Update Linux RS prebuilts for LLVM rebase to r275480

Bug: http://b/31320715

Test: Build AOSP and run RenderScript tests (host tests for slang and
libbcc, RsTest, CTS)

Change-Id: I27b54c2922f34923d204c8a339a90b9682fd4456
diff --git a/renderscript/clang-include/__clang_cuda_cmath.h b/renderscript/clang-include/__clang_cuda_cmath.h
new file mode 100644
index 0000000..ae7ff2f
--- /dev/null
+++ b/renderscript/clang-include/__clang_cuda_cmath.h
@@ -0,0 +1,148 @@
+/*===---- __clang_cuda_cmath.h - Device-side CUDA cmath support ------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CLANG_CUDA_CMATH_H__
+#define __CLANG_CUDA_CMATH_H__
+#ifndef __CUDA__
+#error "This file is for CUDA compilation only."
+#endif
+
+// CUDA lets us use various std math functions on the device side.  This file
+// works in concert with __clang_cuda_math_forward_declares.h to make this work.
+//
+// Specifically, the forward-declares header declares __device__ overloads for
+// these functions in the global namespace, then pulls them into namespace std
+// with 'using' statements.  Then this file implements those functions, after
+// the implementations have been pulled in.
+//
+// It's important that we declare the functions in the global namespace and pull
+// them into namespace std with using statements, as opposed to simply declaring
+// these functions in namespace std, because our device functions need to
+// overload the standard library functions, which may be declared in the global
+// namespace or in std, depending on the degree of conformance of the stdlib
+// implementation.  Declaring in the global namespace and pulling into namespace
+// std covers all of the known knowns.
+
+#define __DEVICE__ static __device__ __inline__ __attribute__((always_inline))
+
+__DEVICE__ long long abs(long long __n) { return ::llabs(__n); }
+__DEVICE__ long abs(long __n) { return ::labs(__n); }
+__DEVICE__ float abs(float __x) { return ::fabsf(__x); }
+__DEVICE__ double abs(double __x) { return ::fabs(__x); }
+__DEVICE__ float acos(float __x) { return ::acosf(__x); }
+__DEVICE__ float asin(float __x) { return ::asinf(__x); }
+__DEVICE__ float atan(float __x) { return ::atanf(__x); }
+__DEVICE__ float atan2(float __x, float __y) { return ::atan2f(__x, __y); }
+__DEVICE__ float ceil(float __x) { return ::ceilf(__x); }
+__DEVICE__ float cos(float __x) { return ::cosf(__x); }
+__DEVICE__ float cosh(float __x) { return ::coshf(__x); }
+__DEVICE__ float exp(float __x) { return ::expf(__x); }
+__DEVICE__ float fabs(float __x) { return ::fabsf(__x); }
+__DEVICE__ float floor(float __x) { return ::floorf(__x); }
+__DEVICE__ float fmod(float __x, float __y) { return ::fmodf(__x, __y); }
+__DEVICE__ int fpclassify(float __x) {
+  return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
+                              FP_ZERO, __x);
+}
+__DEVICE__ int fpclassify(double __x) {
+  return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
+                              FP_ZERO, __x);
+}
+__DEVICE__ float frexp(float __arg, int *__exp) {
+  return ::frexpf(__arg, __exp);
+}
+__DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
+__DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
+__DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
+__DEVICE__ bool isfinite(double __x) { return ::__finite(__x); }
+__DEVICE__ bool isgreater(float __x, float __y) {
+  return __builtin_isgreater(__x, __y);
+}
+__DEVICE__ bool isgreater(double __x, double __y) {
+  return __builtin_isgreater(__x, __y);
+}
+__DEVICE__ bool isgreaterequal(float __x, float __y) {
+  return __builtin_isgreaterequal(__x, __y);
+}
+__DEVICE__ bool isgreaterequal(double __x, double __y) {
+  return __builtin_isgreaterequal(__x, __y);
+}
+__DEVICE__ bool isless(float __x, float __y) {
+  return __builtin_isless(__x, __y);
+}
+__DEVICE__ bool isless(double __x, double __y) {
+  return __builtin_isless(__x, __y);
+}
+__DEVICE__ bool islessequal(float __x, float __y) {
+  return __builtin_islessequal(__x, __y);
+}
+__DEVICE__ bool islessequal(double __x, double __y) {
+  return __builtin_islessequal(__x, __y);
+}
+__DEVICE__ bool islessgreater(float __x, float __y) {
+  return __builtin_islessgreater(__x, __y);
+}
+__DEVICE__ bool islessgreater(double __x, double __y) {
+  return __builtin_islessgreater(__x, __y);
+}
+__DEVICE__ bool isnan(float __x) { return ::__isnanf(__x); }
+__DEVICE__ bool isnan(double __x) { return ::__isnan(__x); }
+__DEVICE__ bool isnormal(float __x) { return __builtin_isnormal(__x); }
+__DEVICE__ bool isnormal(double __x) { return __builtin_isnormal(__x); }
+__DEVICE__ bool isunordered(float __x, float __y) {
+  return __builtin_isunordered(__x, __y);
+}
+__DEVICE__ bool isunordered(double __x, double __y) {
+  return __builtin_isunordered(__x, __y);
+}
+__DEVICE__ float ldexp(float __arg, int __exp) {
+  return ::ldexpf(__arg, __exp);
+}
+__DEVICE__ float log(float __x) { return ::logf(__x); }
+__DEVICE__ float log10(float __x) { return ::log10f(__x); }
+__DEVICE__ float modf(float __x, float *__iptr) { return ::modff(__x, __iptr); }
+__DEVICE__ float nexttoward(float __from, float __to) {
+  return __builtin_nexttowardf(__from, __to);
+}
+__DEVICE__ double nexttoward(double __from, double __to) {
+  return __builtin_nexttoward(__from, __to);
+}
+__DEVICE__ float pow(float __base, float __exp) {
+  return ::powf(__base, __exp);
+}
+__DEVICE__ float pow(float __base, int __iexp) {
+  return ::powif(__base, __iexp);
+}
+__DEVICE__ double pow(double __base, int __iexp) {
+  return ::powi(__base, __iexp);
+}
+__DEVICE__ bool signbit(float __x) { return ::__signbitf(__x); }
+__DEVICE__ bool signbit(double __x) { return ::__signbit(__x); }
+__DEVICE__ float sin(float __x) { return ::sinf(__x); }
+__DEVICE__ float sinh(float __x) { return ::sinhf(__x); }
+__DEVICE__ float sqrt(float __x) { return ::sqrtf(__x); }
+__DEVICE__ float tan(float __x) { return ::tanf(__x); }
+__DEVICE__ float tanh(float __x) { return ::tanhf(__x); }
+
+#undef __DEVICE__
+
+#endif
diff --git a/renderscript/clang-include/__clang_cuda_intrinsics.h b/renderscript/clang-include/__clang_cuda_intrinsics.h
new file mode 100644
index 0000000..3df41fa
--- /dev/null
+++ b/renderscript/clang-include/__clang_cuda_intrinsics.h
@@ -0,0 +1,322 @@
+/*===--- __clang_cuda_intrinsics.h - Device-side CUDA intrinsic wrappers ---===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CLANG_CUDA_INTRINSICS_H__
+#define __CLANG_CUDA_INTRINSICS_H__
+#ifndef __CUDA__
+#error "This file is for CUDA compilation only."
+#endif
+
+// sm_30 intrinsics: __shfl_{up,down,xor}.
+
+#define __SM_30_INTRINSICS_H__
+#define __SM_30_INTRINSICS_HPP__
+
+#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
+
+#pragma push_macro("__MAKE_SHUFFLES")
+#define __MAKE_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, __Mask)    \
+  inline __device__ int __FnName(int __in, int __offset,                       \
+                                 int __width = warpSize) {                     \
+    return __IntIntrinsic(__in, __offset,                                      \
+                          ((warpSize - __width) << 8) | (__Mask));             \
+  }                                                                            \
+  inline __device__ float __FnName(float __in, int __offset,                   \
+                                   int __width = warpSize) {                   \
+    return __FloatIntrinsic(__in, __offset,                                    \
+                            ((warpSize - __width) << 8) | (__Mask));           \
+  }                                                                            \
+  inline __device__ unsigned int __FnName(unsigned int __in, int __offset,     \
+                                          int __width = warpSize) {            \
+    return static_cast<unsigned int>(                                          \
+        ::__FnName(static_cast<int>(__in), __offset, __width));                \
+  }                                                                            \
+  inline __device__ long long __FnName(long long __in, int __offset,           \
+                                       int __width = warpSize) {               \
+    struct __Bits {                                                            \
+      int __a, __b;                                                            \
+    };                                                                         \
+    _Static_assert(sizeof(__in) == sizeof(__Bits));                            \
+    _Static_assert(sizeof(__Bits) == 2 * sizeof(int));                         \
+    __Bits __tmp;                                                              \
+    memcpy(&__in, &__tmp, sizeof(__in));                                       \
+    __tmp.__a = ::__FnName(__tmp.__a, __offset, __width);                      \
+    __tmp.__b = ::__FnName(__tmp.__b, __offset, __width);                      \
+    long long __out;                                                           \
+    memcpy(&__out, &__tmp, sizeof(__tmp));                                     \
+    return __out;                                                              \
+  }                                                                            \
+  inline __device__ unsigned long long __FnName(                               \
+      unsigned long long __in, int __offset, int __width = warpSize) {         \
+    return static_cast<unsigned long long>(                                    \
+        ::__FnName(static_cast<unsigned long long>(__in), __offset, __width)); \
+  }                                                                            \
+  inline __device__ double __FnName(double __in, int __offset,                 \
+                                    int __width = warpSize) {                  \
+    long long __tmp;                                                           \
+    _Static_assert(sizeof(__tmp) == sizeof(__in));                             \
+    memcpy(&__tmp, &__in, sizeof(__in));                                       \
+    __tmp = ::__FnName(__tmp, __offset, __width);                              \
+    double __out;                                                              \
+    memcpy(&__out, &__tmp, sizeof(__out));                                     \
+    return __out;                                                              \
+  }
+
+__MAKE_SHUFFLES(__shfl, __nvvm_shfl_idx_i32, __nvvm_shfl_idx_f32, 0x1f);
+// We use 0 rather than 31 as our mask, because shfl.up applies to lanes >=
+// maxLane.
+__MAKE_SHUFFLES(__shfl_up, __nvvm_shfl_up_i32, __nvvm_shfl_up_f32, 0);
+__MAKE_SHUFFLES(__shfl_down, __nvvm_shfl_down_i32, __nvvm_shfl_down_f32, 0x1f);
+__MAKE_SHUFFLES(__shfl_xor, __nvvm_shfl_bfly_i32, __nvvm_shfl_bfly_f32, 0x1f);
+
+#pragma pop_macro("__MAKE_SHUFFLES")
+
+#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
+
+// sm_32 intrinsics: __ldg and __funnelshift_{l,lc,r,rc}.
+
+// Prevent the vanilla sm_32 intrinsics header from being included.
+#define __SM_32_INTRINSICS_H__
+#define __SM_32_INTRINSICS_HPP__
+
+#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
+
+inline __device__ char __ldg(const char *ptr) { return __nvvm_ldg_c(ptr); }
+inline __device__ short __ldg(const short *ptr) { return __nvvm_ldg_s(ptr); }
+inline __device__ int __ldg(const int *ptr) { return __nvvm_ldg_i(ptr); }
+inline __device__ long __ldg(const long *ptr) { return __nvvm_ldg_l(ptr); }
+inline __device__ long long __ldg(const long long *ptr) {
+  return __nvvm_ldg_ll(ptr);
+}
+inline __device__ unsigned char __ldg(const unsigned char *ptr) {
+  return __nvvm_ldg_uc(ptr);
+}
+inline __device__ unsigned short __ldg(const unsigned short *ptr) {
+  return __nvvm_ldg_us(ptr);
+}
+inline __device__ unsigned int __ldg(const unsigned int *ptr) {
+  return __nvvm_ldg_ui(ptr);
+}
+inline __device__ unsigned long __ldg(const unsigned long *ptr) {
+  return __nvvm_ldg_ul(ptr);
+}
+inline __device__ unsigned long long __ldg(const unsigned long long *ptr) {
+  return __nvvm_ldg_ull(ptr);
+}
+inline __device__ float __ldg(const float *ptr) { return __nvvm_ldg_f(ptr); }
+inline __device__ double __ldg(const double *ptr) { return __nvvm_ldg_d(ptr); }
+
+inline __device__ char2 __ldg(const char2 *ptr) {
+  typedef char c2 __attribute__((ext_vector_type(2)));
+  // We can assume that ptr is aligned at least to char2's alignment, but the
+  // load will assume that ptr is aligned to char2's alignment.  This is only
+  // safe if alignof(c2) <= alignof(char2).
+  c2 rv = __nvvm_ldg_c2(reinterpret_cast<const c2 *>(ptr));
+  char2 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  return ret;
+}
+inline __device__ char4 __ldg(const char4 *ptr) {
+  typedef char c4 __attribute__((ext_vector_type(4)));
+  c4 rv = __nvvm_ldg_c4(reinterpret_cast<const c4 *>(ptr));
+  char4 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  ret.z = rv[2];
+  ret.w = rv[3];
+  return ret;
+}
+inline __device__ short2 __ldg(const short2 *ptr) {
+  typedef short s2 __attribute__((ext_vector_type(2)));
+  s2 rv = __nvvm_ldg_s2(reinterpret_cast<const s2 *>(ptr));
+  short2 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  return ret;
+}
+inline __device__ short4 __ldg(const short4 *ptr) {
+  typedef short s4 __attribute__((ext_vector_type(4)));
+  s4 rv = __nvvm_ldg_s4(reinterpret_cast<const s4 *>(ptr));
+  short4 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  ret.z = rv[2];
+  ret.w = rv[3];
+  return ret;
+}
+inline __device__ int2 __ldg(const int2 *ptr) {
+  typedef int i2 __attribute__((ext_vector_type(2)));
+  i2 rv = __nvvm_ldg_i2(reinterpret_cast<const i2 *>(ptr));
+  int2 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  return ret;
+}
+inline __device__ int4 __ldg(const int4 *ptr) {
+  typedef int i4 __attribute__((ext_vector_type(4)));
+  i4 rv = __nvvm_ldg_i4(reinterpret_cast<const i4 *>(ptr));
+  int4 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  ret.z = rv[2];
+  ret.w = rv[3];
+  return ret;
+}
+inline __device__ longlong2 __ldg(const longlong2 *ptr) {
+  typedef long long ll2 __attribute__((ext_vector_type(2)));
+  ll2 rv = __nvvm_ldg_ll2(reinterpret_cast<const ll2 *>(ptr));
+  longlong2 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  return ret;
+}
+
+inline __device__ uchar2 __ldg(const uchar2 *ptr) {
+  typedef unsigned char uc2 __attribute__((ext_vector_type(2)));
+  uc2 rv = __nvvm_ldg_uc2(reinterpret_cast<const uc2 *>(ptr));
+  uchar2 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  return ret;
+}
+inline __device__ uchar4 __ldg(const uchar4 *ptr) {
+  typedef unsigned char uc4 __attribute__((ext_vector_type(4)));
+  uc4 rv = __nvvm_ldg_uc4(reinterpret_cast<const uc4 *>(ptr));
+  uchar4 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  ret.z = rv[2];
+  ret.w = rv[3];
+  return ret;
+}
+inline __device__ ushort2 __ldg(const ushort2 *ptr) {
+  typedef unsigned short us2 __attribute__((ext_vector_type(2)));
+  us2 rv = __nvvm_ldg_us2(reinterpret_cast<const us2 *>(ptr));
+  ushort2 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  return ret;
+}
+inline __device__ ushort4 __ldg(const ushort4 *ptr) {
+  typedef unsigned short us4 __attribute__((ext_vector_type(4)));
+  us4 rv = __nvvm_ldg_us4(reinterpret_cast<const us4 *>(ptr));
+  ushort4 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  ret.z = rv[2];
+  ret.w = rv[3];
+  return ret;
+}
+inline __device__ uint2 __ldg(const uint2 *ptr) {
+  typedef unsigned int ui2 __attribute__((ext_vector_type(2)));
+  ui2 rv = __nvvm_ldg_ui2(reinterpret_cast<const ui2 *>(ptr));
+  uint2 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  return ret;
+}
+inline __device__ uint4 __ldg(const uint4 *ptr) {
+  typedef unsigned int ui4 __attribute__((ext_vector_type(4)));
+  ui4 rv = __nvvm_ldg_ui4(reinterpret_cast<const ui4 *>(ptr));
+  uint4 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  ret.z = rv[2];
+  ret.w = rv[3];
+  return ret;
+}
+inline __device__ ulonglong2 __ldg(const ulonglong2 *ptr) {
+  typedef unsigned long long ull2 __attribute__((ext_vector_type(2)));
+  ull2 rv = __nvvm_ldg_ull2(reinterpret_cast<const ull2 *>(ptr));
+  ulonglong2 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  return ret;
+}
+
+inline __device__ float2 __ldg(const float2 *ptr) {
+  typedef float f2 __attribute__((ext_vector_type(2)));
+  f2 rv = __nvvm_ldg_f2(reinterpret_cast<const f2 *>(ptr));
+  float2 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  return ret;
+}
+inline __device__ float4 __ldg(const float4 *ptr) {
+  typedef float f4 __attribute__((ext_vector_type(4)));
+  f4 rv = __nvvm_ldg_f4(reinterpret_cast<const f4 *>(ptr));
+  float4 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  ret.z = rv[2];
+  ret.w = rv[3];
+  return ret;
+}
+inline __device__ double2 __ldg(const double2 *ptr) {
+  typedef double d2 __attribute__((ext_vector_type(2)));
+  d2 rv = __nvvm_ldg_d2(reinterpret_cast<const d2 *>(ptr));
+  double2 ret;
+  ret.x = rv[0];
+  ret.y = rv[1];
+  return ret;
+}
+
+// TODO: Implement these as intrinsics, so the backend can work its magic on
+// these.  Alternatively, we could implement these as plain C and try to get
+// llvm to recognize the relevant patterns.
+inline __device__ unsigned __funnelshift_l(unsigned low32, unsigned high32,
+                                           unsigned shiftWidth) {
+  unsigned result;
+  asm("shf.l.wrap.b32 %0, %1, %2, %3;"
+      : "=r"(result)
+      : "r"(low32), "r"(high32), "r"(shiftWidth));
+  return result;
+}
+inline __device__ unsigned __funnelshift_lc(unsigned low32, unsigned high32,
+                                            unsigned shiftWidth) {
+  unsigned result;
+  asm("shf.l.clamp.b32 %0, %1, %2, %3;"
+      : "=r"(result)
+      : "r"(low32), "r"(high32), "r"(shiftWidth));
+  return result;
+}
+inline __device__ unsigned __funnelshift_r(unsigned low32, unsigned high32,
+                                           unsigned shiftWidth) {
+  unsigned result;
+  asm("shf.r.wrap.b32 %0, %1, %2, %3;"
+      : "=r"(result)
+      : "r"(low32), "r"(high32), "r"(shiftWidth));
+  return result;
+}
+inline __device__ unsigned __funnelshift_rc(unsigned low32, unsigned high32,
+                                            unsigned shiftWidth) {
+  unsigned ret;
+  asm("shf.r.clamp.b32 %0, %1, %2, %3;"
+      : "=r"(ret)
+      : "r"(low32), "r"(high32), "r"(shiftWidth));
+  return ret;
+}
+
+#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
+
+#endif // defined(__CLANG_CUDA_INTRINSICS_H__)
diff --git a/renderscript/clang-include/__clang_cuda_math_forward_declares.h b/renderscript/clang-include/__clang_cuda_math_forward_declares.h
new file mode 100644
index 0000000..3f2834d
--- /dev/null
+++ b/renderscript/clang-include/__clang_cuda_math_forward_declares.h
@@ -0,0 +1,263 @@
+/*===- __clang_math_forward_declares.h - Prototypes of __device__ math fns --===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CLANG__CUDA_MATH_FORWARD_DECLARES_H__
+#define __CLANG__CUDA_MATH_FORWARD_DECLARES_H__
+#ifndef __CUDA__
+#error "This file is for CUDA compilation only."
+#endif
+
+// This file forward-declares of some math functions we (or the CUDA headers)
+// will define later.  We need to do this, and do it before cmath is included,
+// because the standard library may have constexpr math functions.  In the
+// absence of a prior __device__ decl, those constexpr functions may become
+// implicitly host+device.  host+device functions can't be overloaded, so that
+// would preclude the use of our own __device__ overloads for these functions.
+
+#pragma push_macro("__DEVICE__")
+#define __DEVICE__                                                             \
+  static __inline__ __attribute__((always_inline)) __attribute__((device))
+
+__DEVICE__ double abs(double);
+__DEVICE__ float abs(float);
+__DEVICE__ int abs(int);
+__DEVICE__ long abs(long);
+__DEVICE__ long long abs(long long);
+__DEVICE__ double acos(double);
+__DEVICE__ float acos(float);
+__DEVICE__ double acosh(double);
+__DEVICE__ float acosh(float);
+__DEVICE__ double asin(double);
+__DEVICE__ float asin(float);
+__DEVICE__ double asinh(double);
+__DEVICE__ float asinh(float);
+__DEVICE__ double atan2(double, double);
+__DEVICE__ float atan2(float, float);
+__DEVICE__ double atan(double);
+__DEVICE__ float atan(float);
+__DEVICE__ double atanh(double);
+__DEVICE__ float atanh(float);
+__DEVICE__ double cbrt(double);
+__DEVICE__ float cbrt(float);
+__DEVICE__ double ceil(double);
+__DEVICE__ float ceil(float);
+__DEVICE__ double copysign(double, double);
+__DEVICE__ float copysign(float, float);
+__DEVICE__ double cos(double);
+__DEVICE__ float cos(float);
+__DEVICE__ double cosh(double);
+__DEVICE__ float cosh(float);
+__DEVICE__ double erfc(double);
+__DEVICE__ float erfc(float);
+__DEVICE__ double erf(double);
+__DEVICE__ float erf(float);
+__DEVICE__ double exp2(double);
+__DEVICE__ float exp2(float);
+__DEVICE__ double exp(double);
+__DEVICE__ float exp(float);
+__DEVICE__ double expm1(double);
+__DEVICE__ float expm1(float);
+__DEVICE__ double fabs(double);
+__DEVICE__ float fabs(float);
+__DEVICE__ double fdim(double, double);
+__DEVICE__ float fdim(float, float);
+__DEVICE__ double floor(double);
+__DEVICE__ float floor(float);
+__DEVICE__ double fma(double, double, double);
+__DEVICE__ float fma(float, float, float);
+__DEVICE__ double fmax(double, double);
+__DEVICE__ float fmax(float, float);
+__DEVICE__ double fmin(double, double);
+__DEVICE__ float fmin(float, float);
+__DEVICE__ double fmod(double, double);
+__DEVICE__ float fmod(float, float);
+__DEVICE__ int fpclassify(double);
+__DEVICE__ int fpclassify(float);
+__DEVICE__ double frexp(double, int *);
+__DEVICE__ float frexp(float, int *);
+__DEVICE__ double hypot(double, double);
+__DEVICE__ float hypot(float, float);
+__DEVICE__ int ilogb(double);
+__DEVICE__ int ilogb(float);
+__DEVICE__ bool isfinite(double);
+__DEVICE__ bool isfinite(float);
+__DEVICE__ bool isgreater(double, double);
+__DEVICE__ bool isgreaterequal(double, double);
+__DEVICE__ bool isgreaterequal(float, float);
+__DEVICE__ bool isgreater(float, float);
+__DEVICE__ bool isinf(double);
+__DEVICE__ bool isinf(float);
+__DEVICE__ bool isless(double, double);
+__DEVICE__ bool islessequal(double, double);
+__DEVICE__ bool islessequal(float, float);
+__DEVICE__ bool isless(float, float);
+__DEVICE__ bool islessgreater(double, double);
+__DEVICE__ bool islessgreater(float, float);
+__DEVICE__ bool isnan(double);
+__DEVICE__ bool isnan(float);
+__DEVICE__ bool isnormal(double);
+__DEVICE__ bool isnormal(float);
+__DEVICE__ bool isunordered(double, double);
+__DEVICE__ bool isunordered(float, float);
+__DEVICE__ long labs(long);
+__DEVICE__ double ldexp(double, int);
+__DEVICE__ float ldexp(float, int);
+__DEVICE__ double lgamma(double);
+__DEVICE__ float lgamma(float);
+__DEVICE__ long long llabs(long long);
+__DEVICE__ long long llrint(double);
+__DEVICE__ long long llrint(float);
+__DEVICE__ double log10(double);
+__DEVICE__ float log10(float);
+__DEVICE__ double log1p(double);
+__DEVICE__ float log1p(float);
+__DEVICE__ double log2(double);
+__DEVICE__ float log2(float);
+__DEVICE__ double logb(double);
+__DEVICE__ float logb(float);
+__DEVICE__ double log(double);
+__DEVICE__ float log(float);
+__DEVICE__ long lrint(double);
+__DEVICE__ long lrint(float);
+__DEVICE__ long lround(double);
+__DEVICE__ long lround(float);
+__DEVICE__ double modf(double, double *);
+__DEVICE__ float modf(float, float *);
+__DEVICE__ double nan(const char *);
+__DEVICE__ float nanf(const char *);
+__DEVICE__ double nearbyint(double);
+__DEVICE__ float nearbyint(float);
+__DEVICE__ double nextafter(double, double);
+__DEVICE__ float nextafter(float, float);
+__DEVICE__ double nexttoward(double, double);
+__DEVICE__ float nexttoward(float, float);
+__DEVICE__ double pow(double, double);
+__DEVICE__ double pow(double, int);
+__DEVICE__ float pow(float, float);
+__DEVICE__ float pow(float, int);
+__DEVICE__ double remainder(double, double);
+__DEVICE__ float remainder(float, float);
+__DEVICE__ double remquo(double, double, int *);
+__DEVICE__ float remquo(float, float, int *);
+__DEVICE__ double rint(double);
+__DEVICE__ float rint(float);
+__DEVICE__ double round(double);
+__DEVICE__ float round(float);
+__DEVICE__ double scalbln(double, long);
+__DEVICE__ float scalbln(float, long);
+__DEVICE__ double scalbn(double, int);
+__DEVICE__ float scalbn(float, int);
+__DEVICE__ bool signbit(double);
+__DEVICE__ bool signbit(float);
+__DEVICE__ double sin(double);
+__DEVICE__ float sin(float);
+__DEVICE__ double sinh(double);
+__DEVICE__ float sinh(float);
+__DEVICE__ double sqrt(double);
+__DEVICE__ float sqrt(float);
+__DEVICE__ double tan(double);
+__DEVICE__ float tan(float);
+__DEVICE__ double tanh(double);
+__DEVICE__ float tanh(float);
+__DEVICE__ double tgamma(double);
+__DEVICE__ float tgamma(float);
+__DEVICE__ double trunc(double);
+__DEVICE__ float trunc(float);
+
+namespace std {
+using ::abs;
+using ::acos;
+using ::acosh;
+using ::asin;
+using ::asinh;
+using ::atan;
+using ::atan2;
+using ::atanh;
+using ::cbrt;
+using ::ceil;
+using ::copysign;
+using ::cos;
+using ::cosh;
+using ::erf;
+using ::erfc;
+using ::exp;
+using ::exp2;
+using ::expm1;
+using ::fabs;
+using ::fdim;
+using ::floor;
+using ::fma;
+using ::fmax;
+using ::fmin;
+using ::fmod;
+using ::fpclassify;
+using ::frexp;
+using ::hypot;
+using ::ilogb;
+using ::isfinite;
+using ::isgreater;
+using ::isgreaterequal;
+using ::isinf;
+using ::isless;
+using ::islessequal;
+using ::islessgreater;
+using ::isnan;
+using ::isnormal;
+using ::isunordered;
+using ::labs;
+using ::ldexp;
+using ::lgamma;
+using ::llabs;
+using ::llrint;
+using ::log;
+using ::log10;
+using ::log1p;
+using ::log2;
+using ::logb;
+using ::lrint;
+using ::lround;
+using ::modf;
+using ::nan;
+using ::nanf;
+using ::nearbyint;
+using ::nextafter;
+using ::nexttoward;
+using ::pow;
+using ::remainder;
+using ::remquo;
+using ::rint;
+using ::round;
+using ::scalbln;
+using ::scalbn;
+using ::signbit;
+using ::sin;
+using ::sinh;
+using ::sqrt;
+using ::tan;
+using ::tanh;
+using ::tgamma;
+using ::trunc;
+} // namespace std
+
+#pragma pop_macro("__DEVICE__")
+
+#endif
diff --git a/renderscript/clang-include/__clang_cuda_runtime_wrapper.h b/renderscript/clang-include/__clang_cuda_runtime_wrapper.h
index 8e5f033..6445f9b 100644
--- a/renderscript/clang-include/__clang_cuda_runtime_wrapper.h
+++ b/renderscript/clang-include/__clang_cuda_runtime_wrapper.h
@@ -42,10 +42,14 @@
 
 #if defined(__CUDA__) && defined(__clang__)
 
+// Include some forward declares that must come before cmath.
+#include <__clang_cuda_math_forward_declares.h>
+
 // Include some standard headers to avoid CUDA headers including them
 // while some required macros (like __THROW) are in a weird state.
-#include <stdlib.h>
 #include <cmath>
+#include <cstdlib>
+#include <stdlib.h>
 
 // Preserve common macros that will be changed below by us or by CUDA
 // headers.
@@ -79,17 +83,15 @@
 // definitions from .hpp files.
 #define __DEVICE_FUNCTIONS_H__
 #define __MATH_FUNCTIONS_H__
+#define __COMMON_FUNCTIONS_H__
 
 #undef __CUDACC__
 #define __CUDABE__
 // Disables definitions of device-side runtime support stubs in
 // cuda_device_runtime_api.h
-#define __CUDADEVRT_INTERNAL__
+#include "driver_types.h"
 #include "host_config.h"
 #include "host_defines.h"
-#include "driver_types.h"
-#include "common_functions.h"
-#undef __CUDADEVRT_INTERNAL__
 
 #undef __CUDABE__
 #define __CUDACC__
@@ -100,11 +102,11 @@
 
 // CUDA headers use __nvvm_memcpy and __nvvm_memset which Clang does
 // not have at the moment. Emulate them with a builtin memcpy/memset.
-#define __nvvm_memcpy(s,d,n,a) __builtin_memcpy(s,d,n)
-#define __nvvm_memset(d,c,n,a) __builtin_memset(d,c,n)
+#define __nvvm_memcpy(s, d, n, a) __builtin_memcpy(s, d, n)
+#define __nvvm_memset(d, c, n, a) __builtin_memset(d, c, n)
 
-#include "crt/host_runtime.h"
 #include "crt/device_runtime.h"
+#include "crt/host_runtime.h"
 // device_runtime.h defines __cxa_* macros that will conflict with
 // cxxabi.h.
 // FIXME: redefine these as __device__ functions.
@@ -140,7 +142,20 @@
 #pragma push_macro("__forceinline__")
 #define __forceinline__ __device__ __inline__ __attribute__((always_inline))
 #include "device_functions.hpp"
+
+// math_function.hpp uses the __USE_FAST_MATH__ macro to determine whether we
+// get the slow-but-accurate or fast-but-inaccurate versions of functions like
+// sin and exp.  This is controlled in clang by -fcuda-approx-transcendentals.
+//
+// device_functions.hpp uses __USE_FAST_MATH__ for a different purpose (fast vs.
+// slow divides), so we need to scope our define carefully here.
+#pragma push_macro("__USE_FAST_MATH__")
+#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
+#define __USE_FAST_MATH__
+#endif
 #include "math_functions.hpp"
+#pragma pop_macro("__USE_FAST_MATH__")
+
 #include "math_functions_dbl_ptx3.hpp"
 #pragma pop_macro("__forceinline__")
 
@@ -152,21 +167,21 @@
 // Alas, additional overloads for these functions are hard to get to.
 // Considering that we only need these overloads for a few functions,
 // we can provide them here.
-static inline float rsqrt(float a) { return rsqrtf(a); }
-static inline float rcbrt(float a) { return rcbrtf(a); }
-static inline float sinpi(float a) { return sinpif(a); }
-static inline float cospi(float a) { return cospif(a); }
-static inline void sincospi(float a, float *b, float *c) {
-  return sincospi(a, b, c);
+static inline float rsqrt(float __a) { return rsqrtf(__a); }
+static inline float rcbrt(float __a) { return rcbrtf(__a); }
+static inline float sinpi(float __a) { return sinpif(__a); }
+static inline float cospi(float __a) { return cospif(__a); }
+static inline void sincospi(float __a, float *__b, float *__c) {
+  return sincospif(__a, __b, __c);
 }
-static inline float erfcinv(float a) { return erfcinvf(a); }
-static inline float normcdfinv(float a) { return normcdfinvf(a); }
-static inline float normcdf(float a) { return normcdff(a); }
-static inline float erfcx(float a) { return erfcxf(a); }
+static inline float erfcinv(float __a) { return erfcinvf(__a); }
+static inline float normcdfinv(float __a) { return normcdfinvf(__a); }
+static inline float normcdf(float __a) { return normcdff(__a); }
+static inline float erfcx(float __a) { return erfcxf(__a); }
 
 // For some reason single-argument variant is not always declared by
 // CUDA headers. Alas, device_functions.hpp included below needs it.
-static inline __device__ void __brkpt(int c) { __brkpt(); }
+static inline __device__ void __brkpt(int __c) { __brkpt(); }
 
 // Now include *.hpp with definitions of various GPU functions.  Alas,
 // a lot of thins get declared/defined with __host__ attribute which
@@ -178,17 +193,34 @@
 #undef __CUDABE__
 #define __CUDACC__
 #undef __DEVICE_FUNCTIONS_HPP__
-#include "device_functions.hpp"
 #include "device_atomic_functions.hpp"
+#include "device_functions.hpp"
 #include "sm_20_atomic_functions.hpp"
-#include "sm_32_atomic_functions.hpp"
 #include "sm_20_intrinsics.hpp"
-// sm_30_intrinsics.h has declarations that use default argument, so
-// we have to include it and it will in turn include .hpp
-#include "sm_30_intrinsics.h"
-#include "sm_32_intrinsics.hpp"
+#include "sm_32_atomic_functions.hpp"
+
+// Don't include sm_30_intrinsics.h and sm_32_intrinsics.h.  These define the
+// __shfl and __ldg intrinsics using inline (volatile) asm, but we want to
+// define them using builtins so that the optimizer can reason about and across
+// these instructions.  In particular, using intrinsics for ldg gets us the
+// [addr+imm] addressing mode, which, although it doesn't actually exist in the
+// hardware, seems to generate faster machine code because ptxas can more easily
+// reason about our code.
+
 #undef __MATH_FUNCTIONS_HPP__
+
+// math_functions.hpp defines ::signbit as a __host__ __device__ function.  This
+// conflicts with libstdc++'s constexpr ::signbit, so we have to rename
+// math_function.hpp's ::signbit.  It's guarded by #undef signbit, but that's
+// conditional on __GNUC__.  :)
+#pragma push_macro("signbit")
+#pragma push_macro("__GNUC__")
+#undef __GNUC__
+#define signbit __ignored_cuda_signbit
 #include "math_functions.hpp"
+#pragma pop_macro("__GNUC__")
+#pragma pop_macro("signbit")
+
 #pragma pop_macro("__host__")
 
 #include "texture_indirect_functions.h"
@@ -200,17 +232,85 @@
 // Set up compiler macros expected to be seen during compilation.
 #undef __CUDABE__
 #define __CUDACC__
-#define __NVCC__
 
-#if defined(__CUDA_ARCH__)
-// We need to emit IR declaration for non-existing __nvvm_reflect() to
-// let backend know that it should be treated as const nothrow
-// function which is what NVVMReflect pass expects to see.
-extern "C" __device__ __attribute__((const)) int __nvvm_reflect(const void *);
-static __device__ __attribute__((used)) int __nvvm_reflect_anchor() {
-  return __nvvm_reflect("NONE");
+extern "C" {
+// Device-side CUDA system calls.
+// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability/index.html#system-calls
+// We need these declarations and wrappers for device-side
+// malloc/free/printf calls to work without relying on
+// -fcuda-disable-target-call-checks option.
+__device__ int vprintf(const char *, const char *);
+__device__ void free(void *) __attribute((nothrow));
+__device__ void *malloc(size_t) __attribute((nothrow)) __attribute__((malloc));
+__device__ void __assertfail(const char *__message, const char *__file,
+                             unsigned __line, const char *__function,
+                             size_t __charSize) __attribute__((noreturn));
+
+// In order for standard assert() macro on linux to work we need to
+// provide device-side __assert_fail()
+__device__ static inline void __assert_fail(const char *__message,
+                                            const char *__file, unsigned __line,
+                                            const char *__function) {
+  __assertfail(__message, __file, __line, __function, sizeof(char));
 }
-#endif
+
+// Clang will convert printf into vprintf, but we still need
+// device-side declaration for it.
+__device__ int printf(const char *, ...);
+} // extern "C"
+
+// We also need device-side std::malloc and std::free.
+namespace std {
+__device__ static inline void free(void *__ptr) { ::free(__ptr); }
+__device__ static inline void *malloc(size_t __size) {
+  return ::malloc(__size);
+}
+} // namespace std
+
+// Out-of-line implementations from cuda_builtin_vars.h.  These need to come
+// after we've pulled in the definition of uint3 and dim3.
+
+__device__ inline __cuda_builtin_threadIdx_t::operator uint3() const {
+  uint3 ret;
+  ret.x = x;
+  ret.y = y;
+  ret.z = z;
+  return ret;
+}
+
+__device__ inline __cuda_builtin_blockIdx_t::operator uint3() const {
+  uint3 ret;
+  ret.x = x;
+  ret.y = y;
+  ret.z = z;
+  return ret;
+}
+
+__device__ inline __cuda_builtin_blockDim_t::operator dim3() const {
+  return dim3(x, y, z);
+}
+
+__device__ inline __cuda_builtin_gridDim_t::operator dim3() const {
+  return dim3(x, y, z);
+}
+
+#include <__clang_cuda_cmath.h>
+#include <__clang_cuda_intrinsics.h>
+
+// curand_mtgp32_kernel helpfully redeclares blockDim and threadIdx in host
+// mode, giving them their "proper" types of dim3 and uint3.  This is
+// incompatible with the types we give in cuda_builtin_vars.h.  As as hack,
+// force-include the header (nvcc doesn't include it by default) but redefine
+// dim3 and uint3 to our builtin types.  (Thankfully dim3 and uint3 are only
+// used here for the redeclarations of blockDim and threadIdx.)
+#pragma push_macro("dim3")
+#pragma push_macro("uint3")
+#define dim3 __cuda_builtin_blockDim_t
+#define uint3 __cuda_builtin_threadIdx_t
+#include "curand_mtgp32_kernel.h"
+#pragma pop_macro("dim3")
+#pragma pop_macro("uint3")
+#pragma pop_macro("__USE_FAST_MATH__")
 
 #endif // __CUDA__
 #endif // __CLANG_CUDA_RUNTIME_WRAPPER_H__
diff --git a/renderscript/clang-include/__wmmintrin_aes.h b/renderscript/clang-include/__wmmintrin_aes.h
index 100799e..211518e 100644
--- a/renderscript/clang-include/__wmmintrin_aes.h
+++ b/renderscript/clang-include/__wmmintrin_aes.h
@@ -28,36 +28,121 @@
 /* Define the default attributes for the functions in this file. */
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("aes")))
 
+/// \brief Performs a single round of AES encryption using the Equivalent
+///    Inverse Cipher, transforming the state value from the first source
+///    operand using a 128-bit round key value contained in the second source
+///    operand, and writes the result to the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VAESENC instruction.
+///
+/// \param __V
+///    A 128-bit integer vector containing the state value.
+/// \param __R
+///    A 128-bit integer vector containing the round key value.
+/// \returns A 128-bit integer vector containing the encrypted value.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_aesenc_si128(__m128i __V, __m128i __R)
 {
-  return (__m128i)__builtin_ia32_aesenc128(__V, __R);
+  return (__m128i)__builtin_ia32_aesenc128((__v2di)__V, (__v2di)__R);
 }
 
+/// \brief Performs the final round of AES encryption using the Equivalent
+///    Inverse Cipher, transforming the state value from the first source
+///    operand using a 128-bit round key value contained in the second source
+///    operand, and writes the result to the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VAESENCLAST instruction.
+///
+/// \param __V
+///    A 128-bit integer vector containing the state value.
+/// \param __R
+///    A 128-bit integer vector containing the round key value.
+/// \returns A 128-bit integer vector containing the encrypted value.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_aesenclast_si128(__m128i __V, __m128i __R)
 {
-  return (__m128i)__builtin_ia32_aesenclast128(__V, __R);
+  return (__m128i)__builtin_ia32_aesenclast128((__v2di)__V, (__v2di)__R);
 }
 
+/// \brief Performs a single round of AES decryption using the Equivalent
+///    Inverse Cipher, transforming the state value from the first source
+///    operand using a 128-bit round key value contained in the second source
+///    operand, and writes the result to the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VAESDEC instruction.
+///
+/// \param __V
+///    A 128-bit integer vector containing the state value.
+/// \param __R
+///    A 128-bit integer vector containing the round key value.
+/// \returns A 128-bit integer vector containing the decrypted value.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_aesdec_si128(__m128i __V, __m128i __R)
 {
-  return (__m128i)__builtin_ia32_aesdec128(__V, __R);
+  return (__m128i)__builtin_ia32_aesdec128((__v2di)__V, (__v2di)__R);
 }
 
+/// \brief Performs the final round of AES decryption using the Equivalent
+///    Inverse Cipher, transforming the state value from the first source
+///    operand using a 128-bit round key value contained in the second source
+///    operand, and writes the result to the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VAESDECLAST instruction.
+///
+/// \param __V
+///    A 128-bit integer vector containing the state value.
+/// \param __R
+///    A 128-bit integer vector containing the round key value.
+/// \returns A 128-bit integer vector containing the decrypted value.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_aesdeclast_si128(__m128i __V, __m128i __R)
 {
-  return (__m128i)__builtin_ia32_aesdeclast128(__V, __R);
+  return (__m128i)__builtin_ia32_aesdeclast128((__v2di)__V, (__v2di)__R);
 }
 
+/// \brief Applies the AES InvMixColumns() transformation to an expanded key
+///    contained in the source operand, and writes the result to the
+///    destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VAESIMC instruction.
+///
+/// \param __V
+///    A 128-bit integer vector containing the expanded key.
+/// \returns A 128-bit integer vector containing the transformed value.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_aesimc_si128(__m128i __V)
 {
-  return (__m128i)__builtin_ia32_aesimc128(__V);
+  return (__m128i)__builtin_ia32_aesimc128((__v2di)__V);
 }
 
+/// \brief Generates a round key for AES encyption, operating on 128-bit data
+///    specified in the first source operand and using an 8-bit round constant
+///    specified by the second source operand, and writes the result to the
+///    destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_aeskeygenassist_si128(__m128i C, const int R);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c AESKEYGENASSIST instruction.
+///
+/// \param C
+///    A 128-bit integer vector that is used to generate the AES encryption key.
+/// \param R
+///    An 8-bit round constant used to generate the AES encryption key.
+/// \returns A 128-bit round key for AES encryption.
 #define _mm_aeskeygenassist_si128(C, R) \
   (__m128i)__builtin_ia32_aeskeygenassist128((__v2di)(__m128i)(C), (int)(R))
 
diff --git a/renderscript/clang-include/__wmmintrin_pclmul.h b/renderscript/clang-include/__wmmintrin_pclmul.h
index 68e944e..d4e073f 100644
--- a/renderscript/clang-include/__wmmintrin_pclmul.h
+++ b/renderscript/clang-include/__wmmintrin_pclmul.h
@@ -23,6 +23,34 @@
 #ifndef _WMMINTRIN_PCLMUL_H
 #define _WMMINTRIN_PCLMUL_H
 
+/// \brief Multiplies two 64-bit integer values, which are selected from source
+///    operands using the immediate-value operand. The multiplication is a
+///    carry-less multiplication, and the 128-bit integer product is stored in
+///    the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_clmulepi64_si128(__m128i __X, __m128i __Y, const int __I);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPCLMULQDQ instruction.
+///
+/// \param __X
+///    A 128-bit vector of [2 x i64] containing one of the source operands.
+/// \param __Y
+///    A 128-bit vector of [2 x i64] containing one of the source operands.
+/// \param __I
+///    An immediate value specifying which 64-bit values to select from the
+///    operands.
+///    Bit 0 is used to select a value from operand __X,
+///    and bit 4 is used to select a value from operand __Y:
+///    Bit[0]=0 indicates that bits[63:0] of operand __X are used.
+///    Bit[0]=1 indicates that bits[127:64] of operand __X are used.
+///    Bit[4]=0 indicates that bits[63:0] of operand __Y are used.
+///    Bit[4]=1 indicates that bits[127:64] of operand __Y are used.
+/// \returns The 128-bit integer vector containing the result of the carry-less
+///    multiplication of the selected 64-bit values.
 #define _mm_clmulepi64_si128(__X, __Y, __I) \
   ((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(__X), \
                                         (__v2di)(__m128i)(__Y), (char)(__I)))
diff --git a/renderscript/clang-include/altivec.h b/renderscript/clang-include/altivec.h
index c39156e..74a1914 100644
--- a/renderscript/clang-include/altivec.h
+++ b/renderscript/clang-include/altivec.h
@@ -27,7 +27,7 @@
 #error "AltiVec support not enabled"
 #endif
 
-/* constants for mapping CR6 bits to predicate result. */
+/* Constants for mapping CR6 bits to predicate result. */
 
 #define __CR6_EQ 0
 #define __CR6_EQ_REV 1
@@ -36,67 +36,65 @@
 
 #define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
 
-static vector signed char __ATTRS_o_ai vec_perm(vector signed char __a,
-                                                vector signed char __b,
-                                                vector unsigned char __c);
+static __inline__ vector signed char __ATTRS_o_ai vec_perm(
+    vector signed char __a, vector signed char __b, vector unsigned char __c);
 
-static vector unsigned char __ATTRS_o_ai vec_perm(vector unsigned char __a,
-                                                  vector unsigned char __b,
-                                                  vector unsigned char __c);
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_perm(vector unsigned char __a, vector unsigned char __b,
+         vector unsigned char __c);
 
-static vector bool char __ATTRS_o_ai vec_perm(vector bool char __a,
-                                              vector bool char __b,
-                                              vector unsigned char __c);
+static __inline__ vector bool char __ATTRS_o_ai
+vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c);
 
-static vector short __ATTRS_o_ai vec_perm(vector signed short __a,
-                                          vector signed short __b,
-                                          vector unsigned char __c);
+static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a,
+                                                     vector signed short __b,
+                                                     vector unsigned char __c);
 
-static vector unsigned short __ATTRS_o_ai vec_perm(vector unsigned short __a,
-                                                   vector unsigned short __b,
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_perm(vector unsigned short __a, vector unsigned short __b,
+         vector unsigned char __c);
+
+static __inline__ vector bool short __ATTRS_o_ai vec_perm(
+    vector bool short __a, vector bool short __b, vector unsigned char __c);
+
+static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a,
+                                                     vector pixel __b,
+                                                     vector unsigned char __c);
+
+static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a,
+                                                   vector signed int __b,
                                                    vector unsigned char __c);
 
-static vector bool short __ATTRS_o_ai vec_perm(vector bool short __a,
-                                               vector bool short __b,
-                                               vector unsigned char __c);
+static __inline__ vector unsigned int __ATTRS_o_ai vec_perm(
+    vector unsigned int __a, vector unsigned int __b, vector unsigned char __c);
 
-static vector pixel __ATTRS_o_ai vec_perm(vector pixel __a, vector pixel __b,
-                                          vector unsigned char __c);
+static __inline__ vector bool int __ATTRS_o_ai
+vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c);
 
-static vector int __ATTRS_o_ai vec_perm(vector signed int __a,
-                                        vector signed int __b,
-                                        vector unsigned char __c);
-
-static vector unsigned int __ATTRS_o_ai vec_perm(vector unsigned int __a,
-                                                 vector unsigned int __b,
-                                                 vector unsigned char __c);
-
-static vector bool int __ATTRS_o_ai vec_perm(vector bool int __a,
-                                             vector bool int __b,
-                                             vector unsigned char __c);
-
-static vector float __ATTRS_o_ai vec_perm(vector float __a, vector float __b,
-                                          vector unsigned char __c);
+static __inline__ vector float __ATTRS_o_ai vec_perm(vector float __a,
+                                                     vector float __b,
+                                                     vector unsigned char __c);
 
 #ifdef __VSX__
-static vector long long __ATTRS_o_ai vec_perm(vector signed long long __a,
-                                              vector signed long long __b,
-                                              vector unsigned char __c);
+static __inline__ vector long long __ATTRS_o_ai
+vec_perm(vector signed long long __a, vector signed long long __b,
+         vector unsigned char __c);
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_perm(vector unsigned long long __a, vector unsigned long long __b,
          vector unsigned char __c);
 
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_perm(vector bool long long __a, vector bool long long __b,
          vector unsigned char __c);
 
-static vector double __ATTRS_o_ai vec_perm(vector double __a, vector double __b,
-                                           vector unsigned char __c);
+static __inline__ vector double __ATTRS_o_ai vec_perm(vector double __a,
+                                                      vector double __b,
+                                                      vector unsigned char __c);
 #endif
 
-static vector unsigned char __ATTRS_o_ai vec_xor(vector unsigned char __a,
-                                                 vector unsigned char __b);
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_xor(vector unsigned char __a, vector unsigned char __b);
 
 /* vec_abs */
 
@@ -104,36 +102,41 @@
 #define __builtin_altivec_abs_v8hi vec_abs
 #define __builtin_altivec_abs_v4si vec_abs
 
-static vector signed char __ATTRS_o_ai vec_abs(vector signed char __a) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_abs(vector signed char __a) {
   return __builtin_altivec_vmaxsb(__a, -__a);
 }
 
-static vector signed short __ATTRS_o_ai vec_abs(vector signed short __a) {
+static __inline__ vector signed short __ATTRS_o_ai
+vec_abs(vector signed short __a) {
   return __builtin_altivec_vmaxsh(__a, -__a);
 }
 
-static vector signed int __ATTRS_o_ai vec_abs(vector signed int __a) {
+static __inline__ vector signed int __ATTRS_o_ai
+vec_abs(vector signed int __a) {
   return __builtin_altivec_vmaxsw(__a, -__a);
 }
 
 #if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_abs(vector signed long long __a) {
   return __builtin_altivec_vmaxsd(__a, -__a);
 }
 #endif
 
-static vector float __ATTRS_o_ai vec_abs(vector float __a) {
+static __inline__ vector float __ATTRS_o_ai vec_abs(vector float __a) {
+#ifdef __VSX__
+  return __builtin_vsx_xvabssp(__a);
+#else
   vector unsigned int __res =
       (vector unsigned int)__a & (vector unsigned int)(0x7FFFFFFF);
   return (vector float)__res;
+#endif
 }
 
 #if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector double __ATTRS_o_ai vec_abs(vector double __a) {
-  vector unsigned long long __res = { 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF };
-  __res &= (vector unsigned int)__a;
-  return (vector double)__res;
+static __inline__ vector double __ATTRS_o_ai vec_abs(vector double __a) {
+  return __builtin_vsx_xvabsdp(__a);
 }
 #endif
 
@@ -142,138 +145,146 @@
 #define __builtin_altivec_abss_v8hi vec_abss
 #define __builtin_altivec_abss_v4si vec_abss
 
-static vector signed char __ATTRS_o_ai vec_abss(vector signed char __a) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_abss(vector signed char __a) {
   return __builtin_altivec_vmaxsb(
       __a, __builtin_altivec_vsubsbs((vector signed char)(0), __a));
 }
 
-static vector signed short __ATTRS_o_ai vec_abss(vector signed short __a) {
+static __inline__ vector signed short __ATTRS_o_ai
+vec_abss(vector signed short __a) {
   return __builtin_altivec_vmaxsh(
       __a, __builtin_altivec_vsubshs((vector signed short)(0), __a));
 }
 
-static vector signed int __ATTRS_o_ai vec_abss(vector signed int __a) {
+static __inline__ vector signed int __ATTRS_o_ai
+vec_abss(vector signed int __a) {
   return __builtin_altivec_vmaxsw(
       __a, __builtin_altivec_vsubsws((vector signed int)(0), __a));
 }
 
 /* vec_add */
 
-static vector signed char __ATTRS_o_ai vec_add(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_add(vector signed char __a, vector signed char __b) {
   return __a + __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_add(vector bool char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_add(vector bool char __a, vector signed char __b) {
   return (vector signed char)__a + __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_add(vector signed char __a,
-                                               vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_add(vector signed char __a, vector bool char __b) {
   return __a + (vector signed char)__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_add(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_add(vector unsigned char __a, vector unsigned char __b) {
   return __a + __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_add(vector bool char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_add(vector bool char __a, vector unsigned char __b) {
   return (vector unsigned char)__a + __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_add(vector unsigned char __a,
-                                                 vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_add(vector unsigned char __a, vector bool char __b) {
   return __a + (vector unsigned char)__b;
 }
 
-static vector short __ATTRS_o_ai vec_add(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_add(vector short __a,
+                                                    vector short __b) {
   return __a + __b;
 }
 
-static vector short __ATTRS_o_ai vec_add(vector bool short __a,
-                                         vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_add(vector bool short __a,
+                                                    vector short __b) {
   return (vector short)__a + __b;
 }
 
-static vector short __ATTRS_o_ai vec_add(vector short __a,
-                                         vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_add(vector short __a,
+                                                    vector bool short __b) {
   return __a + (vector short)__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_add(vector unsigned short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_add(vector unsigned short __a, vector unsigned short __b) {
   return __a + __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_add(vector bool short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_add(vector bool short __a, vector unsigned short __b) {
   return (vector unsigned short)__a + __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_add(vector unsigned short __a,
-                                                  vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_add(vector unsigned short __a, vector bool short __b) {
   return __a + (vector unsigned short)__b;
 }
 
-static vector int __ATTRS_o_ai vec_add(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_add(vector int __a,
+                                                  vector int __b) {
   return __a + __b;
 }
 
-static vector int __ATTRS_o_ai vec_add(vector bool int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_add(vector bool int __a,
+                                                  vector int __b) {
   return (vector int)__a + __b;
 }
 
-static vector int __ATTRS_o_ai vec_add(vector int __a, vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_add(vector int __a,
+                                                  vector bool int __b) {
   return __a + (vector int)__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_add(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_add(vector unsigned int __a, vector unsigned int __b) {
   return __a + __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_add(vector bool int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_add(vector bool int __a, vector unsigned int __b) {
   return (vector unsigned int)__a + __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_add(vector unsigned int __a,
-                                                vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_add(vector unsigned int __a, vector bool int __b) {
   return __a + (vector unsigned int)__b;
 }
 
 #if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_add(vector signed long long __a, vector signed long long __b) {
   return __a + __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_add(vector unsigned long long __a, vector unsigned long long __b) {
   return __a + __b;
 }
 
-static vector signed __int128 __ATTRS_o_ai vec_add(vector signed __int128 __a,
-                                                   vector signed __int128 __b) {
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_add(vector signed __int128 __a, vector signed __int128 __b) {
   return __a + __b;
 }
 
-static vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_add(vector unsigned __int128 __a, vector unsigned __int128 __b) {
   return __a + __b;
 }
 #endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
 
-static vector float __ATTRS_o_ai vec_add(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_add(vector float __a,
+                                                    vector float __b) {
   return __a + __b;
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai
-vec_add(vector double __a, vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_add(vector double __a,
+                                                     vector double __b) {
   return __a + __b;
 }
 #endif // __VSX__
@@ -281,13 +292,13 @@
 /* vec_adde */
 
 #if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector signed __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_adde(vector signed __int128 __a, vector signed __int128 __b,
          vector signed __int128 __c) {
   return __builtin_altivec_vaddeuqm(__a, __b, __c);
 }
 
-static vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_adde(vector unsigned __int128 __a, vector unsigned __int128 __b,
          vector unsigned __int128 __c) {
   return __builtin_altivec_vaddeuqm(__a, __b, __c);
@@ -297,13 +308,13 @@
 /* vec_addec */
 
 #if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector signed __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_addec(vector signed __int128 __a, vector signed __int128 __b,
           vector signed __int128 __c) {
   return __builtin_altivec_vaddecuq(__a, __b, __c);
 }
 
-static vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_addec(vector unsigned __int128 __a, vector unsigned __int128 __b,
           vector unsigned __int128 __c) {
   return __builtin_altivec_vaddecuq(__a, __b, __c);
@@ -314,33 +325,33 @@
 
 #define __builtin_altivec_vaddubm vec_vaddubm
 
-static vector signed char __ATTRS_o_ai vec_vaddubm(vector signed char __a,
-                                                   vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vaddubm(vector signed char __a, vector signed char __b) {
   return __a + __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_vaddubm(vector bool char __a,
-                                                   vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vaddubm(vector bool char __a, vector signed char __b) {
   return (vector signed char)__a + __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_vaddubm(vector signed char __a,
-                                                   vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vaddubm(vector signed char __a, vector bool char __b) {
   return __a + (vector signed char)__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vaddubm(vector unsigned char __a,
-                                                     vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vaddubm(vector unsigned char __a, vector unsigned char __b) {
   return __a + __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vaddubm(vector bool char __a,
-                                                     vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vaddubm(vector bool char __a, vector unsigned char __b) {
   return (vector unsigned char)__a + __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vaddubm(vector unsigned char __a,
-                                                     vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vaddubm(vector unsigned char __a, vector bool char __b) {
   return __a + (vector unsigned char)__b;
 }
 
@@ -348,33 +359,33 @@
 
 #define __builtin_altivec_vadduhm vec_vadduhm
 
-static vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
-                                             vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
+                                                        vector short __b) {
   return __a + __b;
 }
 
-static vector short __ATTRS_o_ai vec_vadduhm(vector bool short __a,
-                                             vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector bool short __a,
+                                                        vector short __b) {
   return (vector short)__a + __b;
 }
 
-static vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
-                                             vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
+                                                        vector bool short __b) {
   return __a + (vector short)__b;
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_vadduhm(vector unsigned short __a, vector unsigned short __b) {
   return __a + __b;
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_vadduhm(vector bool short __a, vector unsigned short __b) {
   return (vector unsigned short)__a + __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vadduhm(vector unsigned short __a,
-                                                      vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vadduhm(vector unsigned short __a, vector bool short __b) {
   return __a + (vector unsigned short)__b;
 }
 
@@ -382,32 +393,33 @@
 
 #define __builtin_altivec_vadduwm vec_vadduwm
 
-static vector int __ATTRS_o_ai vec_vadduwm(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector int __a,
+                                                      vector int __b) {
   return __a + __b;
 }
 
-static vector int __ATTRS_o_ai vec_vadduwm(vector bool int __a,
-                                           vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector bool int __a,
+                                                      vector int __b) {
   return (vector int)__a + __b;
 }
 
-static vector int __ATTRS_o_ai vec_vadduwm(vector int __a,
-                                           vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector int __a,
+                                                      vector bool int __b) {
   return __a + (vector int)__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vadduwm(vector unsigned int __a,
-                                                    vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vadduwm(vector unsigned int __a, vector unsigned int __b) {
   return __a + __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vadduwm(vector bool int __a,
-                                                    vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vadduwm(vector bool int __a, vector unsigned int __b) {
   return (vector unsigned int)__a + __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vadduwm(vector unsigned int __a,
-                                                    vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vadduwm(vector unsigned int __a, vector bool int __b) {
   return __a + (vector unsigned int)__b;
 }
 
@@ -415,33 +427,32 @@
 
 #define __builtin_altivec_vaddfp vec_vaddfp
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_vaddfp(vector float __a, vector float __b) {
   return __a + __b;
 }
 
 /* vec_addc */
 
-static vector signed int __ATTRS_o_ai vec_addc(vector signed int __a,
-                                               vector signed int __b) {
+static __inline__ vector signed int __ATTRS_o_ai
+vec_addc(vector signed int __a, vector signed int __b) {
   return (vector signed int)__builtin_altivec_vaddcuw((vector unsigned int)__a,
                                                       (vector unsigned int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_addc(vector unsigned int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_addc(vector unsigned int __a, vector unsigned int __b) {
   return __builtin_altivec_vaddcuw(__a, __b);
 }
 
 #if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector signed __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_addc(vector signed __int128 __a, vector signed __int128 __b) {
   return (vector signed __int128)__builtin_altivec_vaddcuq(
-    (vector unsigned __int128)__a,
-    (vector unsigned __int128)__b);
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
 }
 
-static vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_addc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
   return __builtin_altivec_vaddcuq(__a, __b);
 }
@@ -449,222 +460,227 @@
 
 /* vec_vaddcuw */
 
-static vector unsigned int __attribute__((__always_inline__))
+static __inline__ vector unsigned int __attribute__((__always_inline__))
 vec_vaddcuw(vector unsigned int __a, vector unsigned int __b) {
   return __builtin_altivec_vaddcuw(__a, __b);
 }
 
 /* vec_adds */
 
-static vector signed char __ATTRS_o_ai vec_adds(vector signed char __a,
-                                                vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_adds(vector signed char __a, vector signed char __b) {
   return __builtin_altivec_vaddsbs(__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_adds(vector bool char __a,
-                                                vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_adds(vector bool char __a, vector signed char __b) {
   return __builtin_altivec_vaddsbs((vector signed char)__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_adds(vector signed char __a,
-                                                vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_adds(vector signed char __a, vector bool char __b) {
   return __builtin_altivec_vaddsbs(__a, (vector signed char)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_adds(vector unsigned char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_adds(vector unsigned char __a, vector unsigned char __b) {
   return __builtin_altivec_vaddubs(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_adds(vector bool char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_adds(vector bool char __a, vector unsigned char __b) {
   return __builtin_altivec_vaddubs((vector unsigned char)__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_adds(vector unsigned char __a,
-                                                  vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_adds(vector unsigned char __a, vector bool char __b) {
   return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b);
 }
 
-static vector short __ATTRS_o_ai vec_adds(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_adds(vector short __a,
+                                                     vector short __b) {
   return __builtin_altivec_vaddshs(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_adds(vector bool short __a,
-                                          vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_adds(vector bool short __a,
+                                                     vector short __b) {
   return __builtin_altivec_vaddshs((vector short)__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_adds(vector short __a,
-                                          vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_adds(vector short __a,
+                                                     vector bool short __b) {
   return __builtin_altivec_vaddshs(__a, (vector short)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_adds(vector unsigned short __a,
-                                                   vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_adds(vector unsigned short __a, vector unsigned short __b) {
   return __builtin_altivec_vadduhs(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_adds(vector bool short __a,
-                                                   vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_adds(vector bool short __a, vector unsigned short __b) {
   return __builtin_altivec_vadduhs((vector unsigned short)__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_adds(vector unsigned short __a,
-                                                   vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_adds(vector unsigned short __a, vector bool short __b) {
   return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b);
 }
 
-static vector int __ATTRS_o_ai vec_adds(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_adds(vector int __a,
+                                                   vector int __b) {
   return __builtin_altivec_vaddsws(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_adds(vector bool int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_adds(vector bool int __a,
+                                                   vector int __b) {
   return __builtin_altivec_vaddsws((vector int)__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_adds(vector int __a, vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_adds(vector int __a,
+                                                   vector bool int __b) {
   return __builtin_altivec_vaddsws(__a, (vector int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_adds(vector unsigned int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_adds(vector unsigned int __a, vector unsigned int __b) {
   return __builtin_altivec_vadduws(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_adds(vector bool int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_adds(vector bool int __a, vector unsigned int __b) {
   return __builtin_altivec_vadduws((vector unsigned int)__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_adds(vector unsigned int __a,
-                                                 vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_adds(vector unsigned int __a, vector bool int __b) {
   return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
 }
 
 /* vec_vaddsbs */
 
-static vector signed char __ATTRS_o_ai vec_vaddsbs(vector signed char __a,
-                                                   vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vaddsbs(vector signed char __a, vector signed char __b) {
   return __builtin_altivec_vaddsbs(__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_vaddsbs(vector bool char __a,
-                                                   vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vaddsbs(vector bool char __a, vector signed char __b) {
   return __builtin_altivec_vaddsbs((vector signed char)__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_vaddsbs(vector signed char __a,
-                                                   vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vaddsbs(vector signed char __a, vector bool char __b) {
   return __builtin_altivec_vaddsbs(__a, (vector signed char)__b);
 }
 
 /* vec_vaddubs */
 
-static vector unsigned char __ATTRS_o_ai vec_vaddubs(vector unsigned char __a,
-                                                     vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vaddubs(vector unsigned char __a, vector unsigned char __b) {
   return __builtin_altivec_vaddubs(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vaddubs(vector bool char __a,
-                                                     vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vaddubs(vector bool char __a, vector unsigned char __b) {
   return __builtin_altivec_vaddubs((vector unsigned char)__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vaddubs(vector unsigned char __a,
-                                                     vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vaddubs(vector unsigned char __a, vector bool char __b) {
   return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b);
 }
 
 /* vec_vaddshs */
 
-static vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
-                                             vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
+                                                        vector short __b) {
   return __builtin_altivec_vaddshs(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_vaddshs(vector bool short __a,
-                                             vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector bool short __a,
+                                                        vector short __b) {
   return __builtin_altivec_vaddshs((vector short)__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
-                                             vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
+                                                        vector bool short __b) {
   return __builtin_altivec_vaddshs(__a, (vector short)__b);
 }
 
 /* vec_vadduhs */
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_vadduhs(vector unsigned short __a, vector unsigned short __b) {
   return __builtin_altivec_vadduhs(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_vadduhs(vector bool short __a, vector unsigned short __b) {
   return __builtin_altivec_vadduhs((vector unsigned short)__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vadduhs(vector unsigned short __a,
-                                                      vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vadduhs(vector unsigned short __a, vector bool short __b) {
   return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b);
 }
 
 /* vec_vaddsws */
 
-static vector int __ATTRS_o_ai vec_vaddsws(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector int __a,
+                                                      vector int __b) {
   return __builtin_altivec_vaddsws(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_vaddsws(vector bool int __a,
-                                           vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector bool int __a,
+                                                      vector int __b) {
   return __builtin_altivec_vaddsws((vector int)__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_vaddsws(vector int __a,
-                                           vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector int __a,
+                                                      vector bool int __b) {
   return __builtin_altivec_vaddsws(__a, (vector int)__b);
 }
 
 /* vec_vadduws */
 
-static vector unsigned int __ATTRS_o_ai vec_vadduws(vector unsigned int __a,
-                                                    vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vadduws(vector unsigned int __a, vector unsigned int __b) {
   return __builtin_altivec_vadduws(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vadduws(vector bool int __a,
-                                                    vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vadduws(vector bool int __a, vector unsigned int __b) {
   return __builtin_altivec_vadduws((vector unsigned int)__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vadduws(vector unsigned int __a,
-                                                    vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vadduws(vector unsigned int __a, vector bool int __b) {
   return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
 }
 
 #if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
 /* vec_vadduqm */
 
-static vector signed __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vadduqm(vector signed __int128 __a, vector signed __int128 __b) {
   return __a + __b;
 }
 
-static vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_vadduqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
   return __a + __b;
 }
 
 /* vec_vaddeuqm */
 
-static vector signed __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vaddeuqm(vector signed __int128 __a, vector signed __int128 __b,
              vector signed __int128 __c) {
   return __builtin_altivec_vaddeuqm(__a, __b, __c);
 }
 
-static vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_vaddeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
              vector unsigned __int128 __c) {
   return __builtin_altivec_vaddeuqm(__a, __b, __c);
@@ -672,25 +688,25 @@
 
 /* vec_vaddcuq */
 
-static vector signed __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vaddcuq(vector signed __int128 __a, vector signed __int128 __b) {
   return __builtin_altivec_vaddcuq(__a, __b);
 }
 
-static vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_vaddcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
   return __builtin_altivec_vaddcuq(__a, __b);
 }
 
 /* vec_vaddecuq */
 
-static vector signed __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vaddecuq(vector signed __int128 __a, vector signed __int128 __b,
              vector signed __int128 __c) {
   return __builtin_altivec_vaddecuq(__a, __b, __c);
 }
 
-static vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_vaddecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
              vector unsigned __int128 __c) {
   return __builtin_altivec_vaddecuq(__a, __b, __c);
@@ -701,338 +717,351 @@
 
 #define __builtin_altivec_vand vec_and
 
-static vector signed char __ATTRS_o_ai vec_and(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_and(vector signed char __a, vector signed char __b) {
   return __a & __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_and(vector bool char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_and(vector bool char __a, vector signed char __b) {
   return (vector signed char)__a & __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_and(vector signed char __a,
-                                               vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_and(vector signed char __a, vector bool char __b) {
   return __a & (vector signed char)__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_and(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_and(vector unsigned char __a, vector unsigned char __b) {
   return __a & __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_and(vector bool char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_and(vector bool char __a, vector unsigned char __b) {
   return (vector unsigned char)__a & __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_and(vector unsigned char __a,
-                                                 vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_and(vector unsigned char __a, vector bool char __b) {
   return __a & (vector unsigned char)__b;
 }
 
-static vector bool char __ATTRS_o_ai vec_and(vector bool char __a,
-                                             vector bool char __b) {
+static __inline__ vector bool char __ATTRS_o_ai vec_and(vector bool char __a,
+                                                        vector bool char __b) {
   return __a & __b;
 }
 
-static vector short __ATTRS_o_ai vec_and(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_and(vector short __a,
+                                                    vector short __b) {
   return __a & __b;
 }
 
-static vector short __ATTRS_o_ai vec_and(vector bool short __a,
-                                         vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_and(vector bool short __a,
+                                                    vector short __b) {
   return (vector short)__a & __b;
 }
 
-static vector short __ATTRS_o_ai vec_and(vector short __a,
-                                         vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_and(vector short __a,
+                                                    vector bool short __b) {
   return __a & (vector short)__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_and(vector unsigned short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_and(vector unsigned short __a, vector unsigned short __b) {
   return __a & __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_and(vector bool short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_and(vector bool short __a, vector unsigned short __b) {
   return (vector unsigned short)__a & __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_and(vector unsigned short __a,
-                                                  vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_and(vector unsigned short __a, vector bool short __b) {
   return __a & (vector unsigned short)__b;
 }
 
-static vector bool short __ATTRS_o_ai vec_and(vector bool short __a,
-                                              vector bool short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_and(vector bool short __a, vector bool short __b) {
   return __a & __b;
 }
 
-static vector int __ATTRS_o_ai vec_and(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_and(vector int __a,
+                                                  vector int __b) {
   return __a & __b;
 }
 
-static vector int __ATTRS_o_ai vec_and(vector bool int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_and(vector bool int __a,
+                                                  vector int __b) {
   return (vector int)__a & __b;
 }
 
-static vector int __ATTRS_o_ai vec_and(vector int __a, vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_and(vector int __a,
+                                                  vector bool int __b) {
   return __a & (vector int)__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_and(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_and(vector unsigned int __a, vector unsigned int __b) {
   return __a & __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_and(vector bool int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_and(vector bool int __a, vector unsigned int __b) {
   return (vector unsigned int)__a & __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_and(vector unsigned int __a,
-                                                vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_and(vector unsigned int __a, vector bool int __b) {
   return __a & (vector unsigned int)__b;
 }
 
-static vector bool int __ATTRS_o_ai vec_and(vector bool int __a,
-                                            vector bool int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_and(vector bool int __a,
+                                                       vector bool int __b) {
   return __a & __b;
 }
 
-static vector float __ATTRS_o_ai vec_and(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_and(vector float __a,
+                                                    vector float __b) {
   vector unsigned int __res =
       (vector unsigned int)__a & (vector unsigned int)__b;
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_and(vector bool int __a,
-                                         vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_and(vector bool int __a,
+                                                    vector float __b) {
   vector unsigned int __res =
       (vector unsigned int)__a & (vector unsigned int)__b;
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_and(vector float __a,
-                                         vector bool int __b) {
+static __inline__ vector float __ATTRS_o_ai vec_and(vector float __a,
+                                                    vector bool int __b) {
   vector unsigned int __res =
       (vector unsigned int)__a & (vector unsigned int)__b;
   return (vector float)__res;
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai vec_and(vector bool long long __a, vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_and(vector bool long long __a,
+                                                     vector double __b) {
   vector unsigned long long __res =
       (vector unsigned long long)__a & (vector unsigned long long)__b;
   return (vector double)__res;
 }
 
-static vector double __ATTRS_o_ai vec_and(vector double __a, vector bool long long __b) {
+static __inline__ vector double __ATTRS_o_ai
+vec_and(vector double __a, vector bool long long __b) {
   vector unsigned long long __res =
       (vector unsigned long long)__a & (vector unsigned long long)__b;
   return (vector double)__res;
 }
 
-static vector double __ATTRS_o_ai vec_and(vector double __a, vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_and(vector double __a,
+                                                     vector double __b) {
   vector unsigned long long __res =
       (vector unsigned long long)__a & (vector unsigned long long)__b;
   return (vector double)__res;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_and(vector signed long long __a, vector signed long long __b) {
   return __a & __b;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_and(vector bool long long __a, vector signed long long __b) {
   return (vector signed long long)__a & __b;
 }
 
-static vector signed long long __ATTRS_o_ai vec_and(vector signed long long __a,
-                                                    vector bool long long __b) {
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_and(vector signed long long __a, vector bool long long __b) {
   return __a & (vector signed long long)__b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_and(vector unsigned long long __a, vector unsigned long long __b) {
   return __a & __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_and(vector bool long long __a, vector unsigned long long __b) {
   return (vector unsigned long long)__a & __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_and(vector unsigned long long __a, vector bool long long __b) {
   return __a & (vector unsigned long long)__b;
 }
 
-static vector bool long long __ATTRS_o_ai vec_and(vector bool long long __a,
-                                                  vector bool long long __b) {
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_and(vector bool long long __a, vector bool long long __b) {
   return __a & __b;
 }
 #endif
 
 /* vec_vand */
 
-static vector signed char __ATTRS_o_ai vec_vand(vector signed char __a,
-                                                vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vand(vector signed char __a, vector signed char __b) {
   return __a & __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_vand(vector bool char __a,
-                                                vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vand(vector bool char __a, vector signed char __b) {
   return (vector signed char)__a & __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_vand(vector signed char __a,
-                                                vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vand(vector signed char __a, vector bool char __b) {
   return __a & (vector signed char)__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vand(vector unsigned char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vand(vector unsigned char __a, vector unsigned char __b) {
   return __a & __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vand(vector bool char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vand(vector bool char __a, vector unsigned char __b) {
   return (vector unsigned char)__a & __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vand(vector unsigned char __a,
-                                                  vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vand(vector unsigned char __a, vector bool char __b) {
   return __a & (vector unsigned char)__b;
 }
 
-static vector bool char __ATTRS_o_ai vec_vand(vector bool char __a,
-                                              vector bool char __b) {
+static __inline__ vector bool char __ATTRS_o_ai vec_vand(vector bool char __a,
+                                                         vector bool char __b) {
   return __a & __b;
 }
 
-static vector short __ATTRS_o_ai vec_vand(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vand(vector short __a,
+                                                     vector short __b) {
   return __a & __b;
 }
 
-static vector short __ATTRS_o_ai vec_vand(vector bool short __a,
-                                          vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vand(vector bool short __a,
+                                                     vector short __b) {
   return (vector short)__a & __b;
 }
 
-static vector short __ATTRS_o_ai vec_vand(vector short __a,
-                                          vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vand(vector short __a,
+                                                     vector bool short __b) {
   return __a & (vector short)__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vand(vector unsigned short __a,
-                                                   vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vand(vector unsigned short __a, vector unsigned short __b) {
   return __a & __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vand(vector bool short __a,
-                                                   vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vand(vector bool short __a, vector unsigned short __b) {
   return (vector unsigned short)__a & __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vand(vector unsigned short __a,
-                                                   vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vand(vector unsigned short __a, vector bool short __b) {
   return __a & (vector unsigned short)__b;
 }
 
-static vector bool short __ATTRS_o_ai vec_vand(vector bool short __a,
-                                               vector bool short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vand(vector bool short __a, vector bool short __b) {
   return __a & __b;
 }
 
-static vector int __ATTRS_o_ai vec_vand(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vand(vector int __a,
+                                                   vector int __b) {
   return __a & __b;
 }
 
-static vector int __ATTRS_o_ai vec_vand(vector bool int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vand(vector bool int __a,
+                                                   vector int __b) {
   return (vector int)__a & __b;
 }
 
-static vector int __ATTRS_o_ai vec_vand(vector int __a, vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vand(vector int __a,
+                                                   vector bool int __b) {
   return __a & (vector int)__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vand(vector unsigned int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vand(vector unsigned int __a, vector unsigned int __b) {
   return __a & __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vand(vector bool int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vand(vector bool int __a, vector unsigned int __b) {
   return (vector unsigned int)__a & __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vand(vector unsigned int __a,
-                                                 vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vand(vector unsigned int __a, vector bool int __b) {
   return __a & (vector unsigned int)__b;
 }
 
-static vector bool int __ATTRS_o_ai vec_vand(vector bool int __a,
-                                             vector bool int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_vand(vector bool int __a,
+                                                        vector bool int __b) {
   return __a & __b;
 }
 
-static vector float __ATTRS_o_ai vec_vand(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vand(vector float __a,
+                                                     vector float __b) {
   vector unsigned int __res =
       (vector unsigned int)__a & (vector unsigned int)__b;
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_vand(vector bool int __a,
-                                          vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vand(vector bool int __a,
+                                                     vector float __b) {
   vector unsigned int __res =
       (vector unsigned int)__a & (vector unsigned int)__b;
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_vand(vector float __a,
-                                          vector bool int __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vand(vector float __a,
+                                                     vector bool int __b) {
   vector unsigned int __res =
       (vector unsigned int)__a & (vector unsigned int)__b;
   return (vector float)__res;
 }
 
 #ifdef __VSX__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_vand(vector signed long long __a, vector signed long long __b) {
   return __a & __b;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_vand(vector bool long long __a, vector signed long long __b) {
   return (vector signed long long)__a & __b;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_vand(vector signed long long __a, vector bool long long __b) {
   return __a & (vector signed long long)__b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_vand(vector unsigned long long __a, vector unsigned long long __b) {
   return __a & __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_vand(vector bool long long __a, vector unsigned long long __b) {
   return (vector unsigned long long)__a & __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_vand(vector unsigned long long __a, vector bool long long __b) {
   return __a & (vector unsigned long long)__b;
 }
 
-static vector bool long long __ATTRS_o_ai vec_vand(vector bool long long __a,
-                                                   vector bool long long __b) {
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_vand(vector bool long long __a, vector bool long long __b) {
   return __a & __b;
 }
 #endif
@@ -1041,419 +1070,432 @@
 
 #define __builtin_altivec_vandc vec_andc
 
-static vector signed char __ATTRS_o_ai vec_andc(vector signed char __a,
-                                                vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_andc(vector signed char __a, vector signed char __b) {
   return __a & ~__b;
 }
 
-static vector signed char __ATTRS_o_ai vec_andc(vector bool char __a,
-                                                vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_andc(vector bool char __a, vector signed char __b) {
   return (vector signed char)__a & ~__b;
 }
 
-static vector signed char __ATTRS_o_ai vec_andc(vector signed char __a,
-                                                vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_andc(vector signed char __a, vector bool char __b) {
   return __a & ~(vector signed char)__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_andc(vector unsigned char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_andc(vector unsigned char __a, vector unsigned char __b) {
   return __a & ~__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_andc(vector bool char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_andc(vector bool char __a, vector unsigned char __b) {
   return (vector unsigned char)__a & ~__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_andc(vector unsigned char __a,
-                                                  vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_andc(vector unsigned char __a, vector bool char __b) {
   return __a & ~(vector unsigned char)__b;
 }
 
-static vector bool char __ATTRS_o_ai vec_andc(vector bool char __a,
-                                              vector bool char __b) {
+static __inline__ vector bool char __ATTRS_o_ai vec_andc(vector bool char __a,
+                                                         vector bool char __b) {
   return __a & ~__b;
 }
 
-static vector short __ATTRS_o_ai vec_andc(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_andc(vector short __a,
+                                                     vector short __b) {
   return __a & ~__b;
 }
 
-static vector short __ATTRS_o_ai vec_andc(vector bool short __a,
-                                          vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_andc(vector bool short __a,
+                                                     vector short __b) {
   return (vector short)__a & ~__b;
 }
 
-static vector short __ATTRS_o_ai vec_andc(vector short __a,
-                                          vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_andc(vector short __a,
+                                                     vector bool short __b) {
   return __a & ~(vector short)__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_andc(vector unsigned short __a,
-                                                   vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_andc(vector unsigned short __a, vector unsigned short __b) {
   return __a & ~__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_andc(vector bool short __a,
-                                                   vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_andc(vector bool short __a, vector unsigned short __b) {
   return (vector unsigned short)__a & ~__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_andc(vector unsigned short __a,
-                                                   vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_andc(vector unsigned short __a, vector bool short __b) {
   return __a & ~(vector unsigned short)__b;
 }
 
-static vector bool short __ATTRS_o_ai vec_andc(vector bool short __a,
-                                               vector bool short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_andc(vector bool short __a, vector bool short __b) {
   return __a & ~__b;
 }
 
-static vector int __ATTRS_o_ai vec_andc(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_andc(vector int __a,
+                                                   vector int __b) {
   return __a & ~__b;
 }
 
-static vector int __ATTRS_o_ai vec_andc(vector bool int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_andc(vector bool int __a,
+                                                   vector int __b) {
   return (vector int)__a & ~__b;
 }
 
-static vector int __ATTRS_o_ai vec_andc(vector int __a, vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_andc(vector int __a,
+                                                   vector bool int __b) {
   return __a & ~(vector int)__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_andc(vector unsigned int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_andc(vector unsigned int __a, vector unsigned int __b) {
   return __a & ~__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_andc(vector bool int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_andc(vector bool int __a, vector unsigned int __b) {
   return (vector unsigned int)__a & ~__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_andc(vector unsigned int __a,
-                                                 vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_andc(vector unsigned int __a, vector bool int __b) {
   return __a & ~(vector unsigned int)__b;
 }
 
-static vector bool int __ATTRS_o_ai vec_andc(vector bool int __a,
-                                             vector bool int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_andc(vector bool int __a,
+                                                        vector bool int __b) {
   return __a & ~__b;
 }
 
-static vector float __ATTRS_o_ai vec_andc(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_andc(vector float __a,
+                                                     vector float __b) {
   vector unsigned int __res =
       (vector unsigned int)__a & ~(vector unsigned int)__b;
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_andc(vector bool int __a,
-                                          vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_andc(vector bool int __a,
+                                                     vector float __b) {
   vector unsigned int __res =
       (vector unsigned int)__a & ~(vector unsigned int)__b;
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_andc(vector float __a,
-                                          vector bool int __b) {
+static __inline__ vector float __ATTRS_o_ai vec_andc(vector float __a,
+                                                     vector bool int __b) {
   vector unsigned int __res =
       (vector unsigned int)__a & ~(vector unsigned int)__b;
   return (vector float)__res;
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai
-vec_andc(vector bool long long __a, vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_andc(vector bool long long __a,
+                                                      vector double __b) {
   vector unsigned long long __res =
       (vector unsigned long long)__a & ~(vector unsigned long long)__b;
   return (vector double)__res;
 }
 
-static vector double __ATTRS_o_ai
+static __inline__ vector double __ATTRS_o_ai
 vec_andc(vector double __a, vector bool long long __b) {
   vector unsigned long long __res =
       (vector unsigned long long)__a & ~(vector unsigned long long)__b;
   return (vector double)__res;
 }
 
-static vector double __ATTRS_o_ai vec_andc(vector double __a, vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_andc(vector double __a,
+                                                      vector double __b) {
   vector unsigned long long __res =
       (vector unsigned long long)__a & ~(vector unsigned long long)__b;
   return (vector double)__res;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_andc(vector signed long long __a, vector signed long long __b) {
   return __a & ~__b;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_andc(vector bool long long __a, vector signed long long __b) {
   return (vector signed long long)__a & ~__b;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_andc(vector signed long long __a, vector bool long long __b) {
   return __a & ~(vector signed long long)__b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_andc(vector unsigned long long __a, vector unsigned long long __b) {
   return __a & ~__b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_andc(vector bool long long __a, vector unsigned long long __b) {
   return (vector unsigned long long)__a & ~__b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_andc(vector unsigned long long __a, vector bool long long __b) {
   return __a & ~(vector unsigned long long)__b;
 }
 
-static vector bool long long __ATTRS_o_ai vec_andc(vector bool long long __a,
-                                                   vector bool long long __b) {
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_andc(vector bool long long __a, vector bool long long __b) {
   return __a & ~__b;
 }
 #endif
 
 /* vec_vandc */
 
-static vector signed char __ATTRS_o_ai vec_vandc(vector signed char __a,
-                                                 vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vandc(vector signed char __a, vector signed char __b) {
   return __a & ~__b;
 }
 
-static vector signed char __ATTRS_o_ai vec_vandc(vector bool char __a,
-                                                 vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vandc(vector bool char __a, vector signed char __b) {
   return (vector signed char)__a & ~__b;
 }
 
-static vector signed char __ATTRS_o_ai vec_vandc(vector signed char __a,
-                                                 vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vandc(vector signed char __a, vector bool char __b) {
   return __a & ~(vector signed char)__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vandc(vector unsigned char __a,
-                                                   vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vandc(vector unsigned char __a, vector unsigned char __b) {
   return __a & ~__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vandc(vector bool char __a,
-                                                   vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vandc(vector bool char __a, vector unsigned char __b) {
   return (vector unsigned char)__a & ~__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vandc(vector unsigned char __a,
-                                                   vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vandc(vector unsigned char __a, vector bool char __b) {
   return __a & ~(vector unsigned char)__b;
 }
 
-static vector bool char __ATTRS_o_ai vec_vandc(vector bool char __a,
-                                               vector bool char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vandc(vector bool char __a, vector bool char __b) {
   return __a & ~__b;
 }
 
-static vector short __ATTRS_o_ai vec_vandc(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vandc(vector short __a,
+                                                      vector short __b) {
   return __a & ~__b;
 }
 
-static vector short __ATTRS_o_ai vec_vandc(vector bool short __a,
-                                           vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vandc(vector bool short __a,
+                                                      vector short __b) {
   return (vector short)__a & ~__b;
 }
 
-static vector short __ATTRS_o_ai vec_vandc(vector short __a,
-                                           vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vandc(vector short __a,
+                                                      vector bool short __b) {
   return __a & ~(vector short)__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vandc(vector unsigned short __a,
-                                                    vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vandc(vector unsigned short __a, vector unsigned short __b) {
   return __a & ~__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vandc(vector bool short __a,
-                                                    vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vandc(vector bool short __a, vector unsigned short __b) {
   return (vector unsigned short)__a & ~__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vandc(vector unsigned short __a,
-                                                    vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vandc(vector unsigned short __a, vector bool short __b) {
   return __a & ~(vector unsigned short)__b;
 }
 
-static vector bool short __ATTRS_o_ai vec_vandc(vector bool short __a,
-                                                vector bool short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vandc(vector bool short __a, vector bool short __b) {
   return __a & ~__b;
 }
 
-static vector int __ATTRS_o_ai vec_vandc(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vandc(vector int __a,
+                                                    vector int __b) {
   return __a & ~__b;
 }
 
-static vector int __ATTRS_o_ai vec_vandc(vector bool int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vandc(vector bool int __a,
+                                                    vector int __b) {
   return (vector int)__a & ~__b;
 }
 
-static vector int __ATTRS_o_ai vec_vandc(vector int __a, vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vandc(vector int __a,
+                                                    vector bool int __b) {
   return __a & ~(vector int)__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vandc(vector unsigned int __a,
-                                                  vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vandc(vector unsigned int __a, vector unsigned int __b) {
   return __a & ~__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vandc(vector bool int __a,
-                                                  vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vandc(vector bool int __a, vector unsigned int __b) {
   return (vector unsigned int)__a & ~__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vandc(vector unsigned int __a,
-                                                  vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vandc(vector unsigned int __a, vector bool int __b) {
   return __a & ~(vector unsigned int)__b;
 }
 
-static vector bool int __ATTRS_o_ai vec_vandc(vector bool int __a,
-                                              vector bool int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_vandc(vector bool int __a,
+                                                         vector bool int __b) {
   return __a & ~__b;
 }
 
-static vector float __ATTRS_o_ai vec_vandc(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vandc(vector float __a,
+                                                      vector float __b) {
   vector unsigned int __res =
       (vector unsigned int)__a & ~(vector unsigned int)__b;
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_vandc(vector bool int __a,
-                                           vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vandc(vector bool int __a,
+                                                      vector float __b) {
   vector unsigned int __res =
       (vector unsigned int)__a & ~(vector unsigned int)__b;
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_vandc(vector float __a,
-                                           vector bool int __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vandc(vector float __a,
+                                                      vector bool int __b) {
   vector unsigned int __res =
       (vector unsigned int)__a & ~(vector unsigned int)__b;
   return (vector float)__res;
 }
 
 #ifdef __VSX__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_vandc(vector signed long long __a, vector signed long long __b) {
   return __a & ~__b;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_vandc(vector bool long long __a, vector signed long long __b) {
   return (vector signed long long)__a & ~__b;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_vandc(vector signed long long __a, vector bool long long __b) {
   return __a & ~(vector signed long long)__b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_vandc(vector unsigned long long __a, vector unsigned long long __b) {
   return __a & ~__b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_vandc(vector bool long long __a, vector unsigned long long __b) {
   return (vector unsigned long long)__a & ~__b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_vandc(vector unsigned long long __a, vector bool long long __b) {
   return __a & ~(vector unsigned long long)__b;
 }
 
-static vector bool long long __ATTRS_o_ai vec_vandc(vector bool long long __a,
-                                                    vector bool long long __b) {
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_vandc(vector bool long long __a, vector bool long long __b) {
   return __a & ~__b;
 }
 #endif
 
 /* vec_avg */
 
-static vector signed char __ATTRS_o_ai vec_avg(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_avg(vector signed char __a, vector signed char __b) {
   return __builtin_altivec_vavgsb(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_avg(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_avg(vector unsigned char __a, vector unsigned char __b) {
   return __builtin_altivec_vavgub(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_avg(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_avg(vector short __a,
+                                                    vector short __b) {
   return __builtin_altivec_vavgsh(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_avg(vector unsigned short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_avg(vector unsigned short __a, vector unsigned short __b) {
   return __builtin_altivec_vavguh(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_avg(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_avg(vector int __a,
+                                                  vector int __b) {
   return __builtin_altivec_vavgsw(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_avg(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_avg(vector unsigned int __a, vector unsigned int __b) {
   return __builtin_altivec_vavguw(__a, __b);
 }
 
 /* vec_vavgsb */
 
-static vector signed char __attribute__((__always_inline__))
+static __inline__ vector signed char __attribute__((__always_inline__))
 vec_vavgsb(vector signed char __a, vector signed char __b) {
   return __builtin_altivec_vavgsb(__a, __b);
 }
 
 /* vec_vavgub */
 
-static vector unsigned char __attribute__((__always_inline__))
+static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_vavgub(vector unsigned char __a, vector unsigned char __b) {
   return __builtin_altivec_vavgub(__a, __b);
 }
 
 /* vec_vavgsh */
 
-static vector short __attribute__((__always_inline__))
+static __inline__ vector short __attribute__((__always_inline__))
 vec_vavgsh(vector short __a, vector short __b) {
   return __builtin_altivec_vavgsh(__a, __b);
 }
 
 /* vec_vavguh */
 
-static vector unsigned short __attribute__((__always_inline__))
+static __inline__ vector unsigned short __attribute__((__always_inline__))
 vec_vavguh(vector unsigned short __a, vector unsigned short __b) {
   return __builtin_altivec_vavguh(__a, __b);
 }
 
 /* vec_vavgsw */
 
-static vector int __attribute__((__always_inline__))
+static __inline__ vector int __attribute__((__always_inline__))
 vec_vavgsw(vector int __a, vector int __b) {
   return __builtin_altivec_vavgsw(__a, __b);
 }
 
 /* vec_vavguw */
 
-static vector unsigned int __attribute__((__always_inline__))
+static __inline__ vector unsigned int __attribute__((__always_inline__))
 vec_vavguw(vector unsigned int __a, vector unsigned int __b) {
   return __builtin_altivec_vavguw(__a, __b);
 }
 
 /* vec_ceil */
 
-static vector float __ATTRS_o_ai vec_ceil(vector float __a) {
+static __inline__ vector float __ATTRS_o_ai vec_ceil(vector float __a) {
 #ifdef __VSX__
   return __builtin_vsx_xvrspip(__a);
 #else
@@ -1462,82 +1504,83 @@
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai vec_ceil(vector double __a) {
+static __inline__ vector double __ATTRS_o_ai vec_ceil(vector double __a) {
   return __builtin_vsx_xvrdpip(__a);
 }
 #endif
 
 /* vec_vrfip */
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_vrfip(vector float __a) {
   return __builtin_altivec_vrfip(__a);
 }
 
 /* vec_cmpb */
 
-static vector int __attribute__((__always_inline__))
+static __inline__ vector int __attribute__((__always_inline__))
 vec_cmpb(vector float __a, vector float __b) {
   return __builtin_altivec_vcmpbfp(__a, __b);
 }
 
 /* vec_vcmpbfp */
 
-static vector int __attribute__((__always_inline__))
+static __inline__ vector int __attribute__((__always_inline__))
 vec_vcmpbfp(vector float __a, vector float __b) {
   return __builtin_altivec_vcmpbfp(__a, __b);
 }
 
 /* vec_cmpeq */
 
-static vector bool char __ATTRS_o_ai vec_cmpeq(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpeq(vector signed char __a, vector signed char __b) {
   return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
                                                       (vector char)__b);
 }
 
-static vector bool char __ATTRS_o_ai vec_cmpeq(vector unsigned char __a,
-                                               vector unsigned char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpeq(vector unsigned char __a, vector unsigned char __b) {
   return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
                                                       (vector char)__b);
 }
 
-static vector bool short __ATTRS_o_ai vec_cmpeq(vector short __a,
-                                                vector short __b) {
+static __inline__ vector bool short __ATTRS_o_ai vec_cmpeq(vector short __a,
+                                                           vector short __b) {
   return (vector bool short)__builtin_altivec_vcmpequh(__a, __b);
 }
 
-static vector bool short __ATTRS_o_ai vec_cmpeq(vector unsigned short __a,
-                                                vector unsigned short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpeq(vector unsigned short __a, vector unsigned short __b) {
   return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a,
                                                        (vector short)__b);
 }
 
-static vector bool int __ATTRS_o_ai vec_cmpeq(vector int __a, vector int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector int __a,
+                                                         vector int __b) {
   return (vector bool int)__builtin_altivec_vcmpequw(__a, __b);
 }
 
-static vector bool int __ATTRS_o_ai vec_cmpeq(vector unsigned int __a,
-                                              vector unsigned int __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpeq(vector unsigned int __a, vector unsigned int __b) {
   return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a,
                                                      (vector int)__b);
 }
 
 #ifdef __POWER8_VECTOR__
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmpeq(vector signed long long __a, vector signed long long __b) {
   return (vector bool long long)__builtin_altivec_vcmpequd(__a, __b);
 }
 
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
   return (vector bool long long)__builtin_altivec_vcmpequd(
       (vector long long)__a, (vector long long)__b);
 }
 #endif
 
-static vector bool int __ATTRS_o_ai vec_cmpeq(vector float __a,
-                                              vector float __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector float __a,
+                                                         vector float __b) {
 #ifdef __VSX__
   return (vector bool int)__builtin_vsx_xvcmpeqsp(__a, __b);
 #else
@@ -1546,58 +1589,58 @@
 }
 
 #ifdef __VSX__
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmpeq(vector double __a, vector double __b) {
   return (vector bool long long)__builtin_vsx_xvcmpeqdp(__a, __b);
 }
 #endif
 
-
 /* vec_cmpgt */
 
-static vector bool char __ATTRS_o_ai vec_cmpgt(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpgt(vector signed char __a, vector signed char __b) {
   return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b);
 }
 
-static vector bool char __ATTRS_o_ai vec_cmpgt(vector unsigned char __a,
-                                               vector unsigned char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpgt(vector unsigned char __a, vector unsigned char __b) {
   return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b);
 }
 
-static vector bool short __ATTRS_o_ai vec_cmpgt(vector short __a,
-                                                vector short __b) {
+static __inline__ vector bool short __ATTRS_o_ai vec_cmpgt(vector short __a,
+                                                           vector short __b) {
   return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b);
 }
 
-static vector bool short __ATTRS_o_ai vec_cmpgt(vector unsigned short __a,
-                                                vector unsigned short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpgt(vector unsigned short __a, vector unsigned short __b) {
   return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b);
 }
 
-static vector bool int __ATTRS_o_ai vec_cmpgt(vector int __a, vector int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector int __a,
+                                                         vector int __b) {
   return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b);
 }
 
-static vector bool int __ATTRS_o_ai vec_cmpgt(vector unsigned int __a,
-                                              vector unsigned int __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpgt(vector unsigned int __a, vector unsigned int __b) {
   return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b);
 }
 
 #ifdef __POWER8_VECTOR__
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmpgt(vector signed long long __a, vector signed long long __b) {
   return (vector bool long long)__builtin_altivec_vcmpgtsd(__a, __b);
 }
 
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
   return (vector bool long long)__builtin_altivec_vcmpgtud(__a, __b);
 }
 #endif
 
-static vector bool int __ATTRS_o_ai vec_cmpgt(vector float __a,
-                                              vector float __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector float __a,
+                                                         vector float __b) {
 #ifdef __VSX__
   return (vector bool int)__builtin_vsx_xvcmpgtsp(__a, __b);
 #else
@@ -1606,7 +1649,7 @@
 }
 
 #ifdef __VSX__
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmpgt(vector double __a, vector double __b) {
   return (vector bool long long)__builtin_vsx_xvcmpgtdp(__a, __b);
 }
@@ -1614,38 +1657,38 @@
 
 /* vec_cmpge */
 
-static vector bool char __ATTRS_o_ai
-vec_cmpge (vector signed char __a, vector signed char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpge(vector signed char __a, vector signed char __b) {
   return ~(vec_cmpgt(__b, __a));
 }
 
-static vector bool char __ATTRS_o_ai
-vec_cmpge (vector unsigned char __a, vector unsigned char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpge(vector unsigned char __a, vector unsigned char __b) {
   return ~(vec_cmpgt(__b, __a));
 }
 
-static vector bool short __ATTRS_o_ai
-vec_cmpge (vector signed short __a, vector signed short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpge(vector signed short __a, vector signed short __b) {
   return ~(vec_cmpgt(__b, __a));
 }
 
-static vector bool short __ATTRS_o_ai
-vec_cmpge (vector unsigned short __a, vector unsigned short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpge(vector unsigned short __a, vector unsigned short __b) {
   return ~(vec_cmpgt(__b, __a));
 }
 
-static vector bool int __ATTRS_o_ai
-vec_cmpge (vector signed int __a, vector signed int __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpge(vector signed int __a, vector signed int __b) {
   return ~(vec_cmpgt(__b, __a));
 }
 
-static vector bool int __ATTRS_o_ai
-vec_cmpge (vector unsigned int __a, vector unsigned int __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpge(vector unsigned int __a, vector unsigned int __b) {
   return ~(vec_cmpgt(__b, __a));
 }
 
-static vector bool int __ATTRS_o_ai
-vec_cmpge(vector float __a, vector float __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_cmpge(vector float __a,
+                                                         vector float __b) {
 #ifdef __VSX__
   return (vector bool int)__builtin_vsx_xvcmpgesp(__a, __b);
 #else
@@ -1654,19 +1697,19 @@
 }
 
 #ifdef __VSX__
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmpge(vector double __a, vector double __b) {
   return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b);
 }
 #endif
 
 #ifdef __POWER8_VECTOR__
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmpge(vector signed long long __a, vector signed long long __b) {
   return ~(vec_cmpgt(__b, __a));
 }
 
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
   return ~(vec_cmpgt(__b, __a));
 }
@@ -1674,111 +1717,111 @@
 
 /* vec_vcmpgefp */
 
-static vector bool int __attribute__((__always_inline__))
+static __inline__ vector bool int __attribute__((__always_inline__))
 vec_vcmpgefp(vector float __a, vector float __b) {
   return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
 }
 
 /* vec_vcmpgtsb */
 
-static vector bool char __attribute__((__always_inline__))
+static __inline__ vector bool char __attribute__((__always_inline__))
 vec_vcmpgtsb(vector signed char __a, vector signed char __b) {
   return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b);
 }
 
 /* vec_vcmpgtub */
 
-static vector bool char __attribute__((__always_inline__))
+static __inline__ vector bool char __attribute__((__always_inline__))
 vec_vcmpgtub(vector unsigned char __a, vector unsigned char __b) {
   return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b);
 }
 
 /* vec_vcmpgtsh */
 
-static vector bool short __attribute__((__always_inline__))
+static __inline__ vector bool short __attribute__((__always_inline__))
 vec_vcmpgtsh(vector short __a, vector short __b) {
   return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b);
 }
 
 /* vec_vcmpgtuh */
 
-static vector bool short __attribute__((__always_inline__))
+static __inline__ vector bool short __attribute__((__always_inline__))
 vec_vcmpgtuh(vector unsigned short __a, vector unsigned short __b) {
   return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b);
 }
 
 /* vec_vcmpgtsw */
 
-static vector bool int __attribute__((__always_inline__))
+static __inline__ vector bool int __attribute__((__always_inline__))
 vec_vcmpgtsw(vector int __a, vector int __b) {
   return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b);
 }
 
 /* vec_vcmpgtuw */
 
-static vector bool int __attribute__((__always_inline__))
+static __inline__ vector bool int __attribute__((__always_inline__))
 vec_vcmpgtuw(vector unsigned int __a, vector unsigned int __b) {
   return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b);
 }
 
 /* vec_vcmpgtfp */
 
-static vector bool int __attribute__((__always_inline__))
+static __inline__ vector bool int __attribute__((__always_inline__))
 vec_vcmpgtfp(vector float __a, vector float __b) {
   return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
 }
 
 /* vec_cmple */
 
-static vector bool char __ATTRS_o_ai
-vec_cmple (vector signed char __a, vector signed char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmple(vector signed char __a, vector signed char __b) {
   return vec_cmpge(__b, __a);
 }
 
-static vector bool char __ATTRS_o_ai
-vec_cmple (vector unsigned char __a, vector unsigned char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmple(vector unsigned char __a, vector unsigned char __b) {
   return vec_cmpge(__b, __a);
 }
 
-static vector bool short __ATTRS_o_ai
-vec_cmple (vector signed short __a, vector signed short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmple(vector signed short __a, vector signed short __b) {
   return vec_cmpge(__b, __a);
 }
 
-static vector bool short __ATTRS_o_ai
-vec_cmple (vector unsigned short __a, vector unsigned short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmple(vector unsigned short __a, vector unsigned short __b) {
   return vec_cmpge(__b, __a);
 }
 
-static vector bool int __ATTRS_o_ai
-vec_cmple (vector signed int __a, vector signed int __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmple(vector signed int __a, vector signed int __b) {
   return vec_cmpge(__b, __a);
 }
 
-static vector bool int __ATTRS_o_ai
-vec_cmple (vector unsigned int __a, vector unsigned int __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmple(vector unsigned int __a, vector unsigned int __b) {
   return vec_cmpge(__b, __a);
 }
 
-static vector bool int __ATTRS_o_ai
-vec_cmple(vector float __a, vector float __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_cmple(vector float __a,
+                                                         vector float __b) {
   return vec_cmpge(__b, __a);
 }
 
 #ifdef __VSX__
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmple(vector double __a, vector double __b) {
   return vec_cmpge(__b, __a);
 }
 #endif
 
 #ifdef __POWER8_VECTOR__
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmple(vector signed long long __a, vector signed long long __b) {
   return vec_cmpge(__b, __a);
 }
 
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
   return vec_cmpge(__b, __a);
 }
@@ -1786,83 +1829,90 @@
 
 /* vec_cmplt */
 
-static vector bool char __ATTRS_o_ai vec_cmplt(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmplt(vector signed char __a, vector signed char __b) {
   return vec_cmpgt(__b, __a);
 }
 
-static vector bool char __ATTRS_o_ai vec_cmplt(vector unsigned char __a,
-                                               vector unsigned char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmplt(vector unsigned char __a, vector unsigned char __b) {
   return vec_cmpgt(__b, __a);
 }
 
-static vector bool short __ATTRS_o_ai vec_cmplt(vector short __a,
-                                                vector short __b) {
+static __inline__ vector bool short __ATTRS_o_ai vec_cmplt(vector short __a,
+                                                           vector short __b) {
   return vec_cmpgt(__b, __a);
 }
 
-static vector bool short __ATTRS_o_ai vec_cmplt(vector unsigned short __a,
-                                                vector unsigned short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmplt(vector unsigned short __a, vector unsigned short __b) {
   return vec_cmpgt(__b, __a);
 }
 
-static vector bool int __ATTRS_o_ai vec_cmplt(vector int __a, vector int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_cmplt(vector int __a,
+                                                         vector int __b) {
   return vec_cmpgt(__b, __a);
 }
 
-static vector bool int __ATTRS_o_ai vec_cmplt(vector unsigned int __a,
-                                              vector unsigned int __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmplt(vector unsigned int __a, vector unsigned int __b) {
   return vec_cmpgt(__b, __a);
 }
 
-static vector bool int __ATTRS_o_ai vec_cmplt(vector float __a,
-                                              vector float __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_cmplt(vector float __a,
+                                                         vector float __b) {
   return vec_cmpgt(__b, __a);
 }
 
 #ifdef __VSX__
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmplt(vector double __a, vector double __b) {
   return vec_cmpgt(__b, __a);
 }
 #endif
 
 #ifdef __POWER8_VECTOR__
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmplt(vector signed long long __a, vector signed long long __b) {
   return vec_cmpgt(__b, __a);
 }
 
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
   return vec_cmpgt(__b, __a);
 }
 
 /* vec_cntlz */
 
-static vector signed char __ATTRS_o_ai vec_cntlz(vector signed char __a) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_cntlz(vector signed char __a) {
   return __builtin_altivec_vclzb(__a);
 }
-static vector unsigned char __ATTRS_o_ai vec_cntlz(vector unsigned char __a) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_cntlz(vector unsigned char __a) {
   return __builtin_altivec_vclzb(__a);
 }
-static vector signed short __ATTRS_o_ai vec_cntlz(vector signed short __a) {
+static __inline__ vector signed short __ATTRS_o_ai
+vec_cntlz(vector signed short __a) {
   return __builtin_altivec_vclzh(__a);
 }
-static vector unsigned short __ATTRS_o_ai vec_cntlz(vector unsigned short __a) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_cntlz(vector unsigned short __a) {
   return __builtin_altivec_vclzh(__a);
 }
-static vector signed int __ATTRS_o_ai vec_cntlz(vector signed int __a) {
+static __inline__ vector signed int __ATTRS_o_ai
+vec_cntlz(vector signed int __a) {
   return __builtin_altivec_vclzw(__a);
 }
-static vector unsigned int __ATTRS_o_ai vec_cntlz(vector unsigned int __a) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_cntlz(vector unsigned int __a) {
   return __builtin_altivec_vclzw(__a);
 }
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_cntlz(vector signed long long __a) {
   return __builtin_altivec_vclzd(__a);
 }
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_cntlz(vector unsigned long long __a) {
   return __builtin_altivec_vclzd(__a);
 }
@@ -1871,64 +1921,97 @@
 /* vec_cpsgn */
 
 #ifdef __VSX__
-static vector float __ATTRS_o_ai vec_cpsgn(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_cpsgn(vector float __a,
+                                                      vector float __b) {
   return __builtin_vsx_xvcpsgnsp(__a, __b);
 }
 
-static vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
-                                            vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
+                                                       vector double __b) {
   return __builtin_vsx_xvcpsgndp(__a, __b);
 }
 #endif
 
 /* vec_ctf */
 
-static vector float __ATTRS_o_ai vec_ctf(vector int __a, int __b) {
+static __inline__ vector float __ATTRS_o_ai vec_ctf(vector int __a, int __b) {
   return __builtin_altivec_vcfsx(__a, __b);
 }
 
-static vector float __ATTRS_o_ai vec_ctf(vector unsigned int __a, int __b) {
+static __inline__ vector float __ATTRS_o_ai vec_ctf(vector unsigned int __a,
+                                                    int __b) {
   return __builtin_altivec_vcfux((vector int)__a, __b);
 }
 
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai
+vec_ctf(vector unsigned long long __a, int __b) {
+  vector double __ret = __builtin_convertvector(__a, vector double);
+  __ret *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
+  return __ret;
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_ctf(vector signed long long __a, int __b) {
+  vector double __ret = __builtin_convertvector(__a, vector double);
+  __ret *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
+  return __ret;
+}
+#endif
+
 /* vec_vcfsx */
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_vcfsx(vector int __a, int __b) {
   return __builtin_altivec_vcfsx(__a, __b);
 }
 
 /* vec_vcfux */
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_vcfux(vector unsigned int __a, int __b) {
   return __builtin_altivec_vcfux((vector int)__a, __b);
 }
 
 /* vec_cts */
 
-static vector int __attribute__((__always_inline__))
-vec_cts(vector float __a, int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_cts(vector float __a, int __b) {
   return __builtin_altivec_vctsxs(__a, __b);
 }
 
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_cts(vector double __a, int __b) {
+  __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
+  return __builtin_convertvector(__a, vector signed long long);
+}
+#endif
+
 /* vec_vctsxs */
 
-static vector int __attribute__((__always_inline__))
+static __inline__ vector int __attribute__((__always_inline__))
 vec_vctsxs(vector float __a, int __b) {
   return __builtin_altivec_vctsxs(__a, __b);
 }
 
 /* vec_ctu */
 
-static vector unsigned int __attribute__((__always_inline__))
-vec_ctu(vector float __a, int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai vec_ctu(vector float __a,
+                                                           int __b) {
   return __builtin_altivec_vctuxs(__a, __b);
 }
 
+#ifdef __VSX__
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_ctu(vector double __a, int __b) {
+  __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
+  return __builtin_convertvector(__a, vector unsigned long long);
+}
+#endif
+
 /* vec_vctuxs */
 
-static vector unsigned int __attribute__((__always_inline__))
+static __inline__ vector unsigned int __attribute__((__always_inline__))
 vec_vctuxs(vector float __a, int __b) {
   return __builtin_altivec_vctuxs(__a, __b);
 }
@@ -1936,13 +2019,15 @@
 /* vec_double */
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai vec_double (vector signed long long __a) {
-  vector double __ret = { __a[0], __a[1] };
+static __inline__ vector double __ATTRS_o_ai
+vec_double(vector signed long long __a) {
+  vector double __ret = {__a[0], __a[1]};
   return __ret;
 }
 
-static vector double __ATTRS_o_ai vec_double (vector unsigned long long __a) {
-  vector double __ret = { __a[0], __a[1] };
+static __inline__ vector double __ATTRS_o_ai
+vec_double(vector unsigned long long __a) {
+  vector double __ret = {__a[0], __a[1]};
   return __ret;
 }
 #endif
@@ -1952,178 +2037,172 @@
 /* Integer vector divides (vectors are scalarized, elements divided
    and the vectors reassembled).
 */
-static vector signed char __ATTRS_o_ai vec_div(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_div(vector signed char __a, vector signed char __b) {
   return __a / __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_div(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_div(vector unsigned char __a, vector unsigned char __b) {
   return __a / __b;
 }
 
-static vector signed short __ATTRS_o_ai vec_div(vector signed short __a,
-                                                vector signed short __b) {
+static __inline__ vector signed short __ATTRS_o_ai
+vec_div(vector signed short __a, vector signed short __b) {
   return __a / __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_div(vector unsigned short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_div(vector unsigned short __a, vector unsigned short __b) {
   return __a / __b;
 }
 
-static vector signed int __ATTRS_o_ai vec_div(vector signed int __a,
-                                              vector signed int __b) {
+static __inline__ vector signed int __ATTRS_o_ai
+vec_div(vector signed int __a, vector signed int __b) {
   return __a / __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_div(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_div(vector unsigned int __a, vector unsigned int __b) {
   return __a / __b;
 }
 
 #ifdef __VSX__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_div(vector signed long long __a, vector signed long long __b) {
   return __a / __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_div(vector unsigned long long __a, vector unsigned long long __b) {
   return __a / __b;
 }
 
-static vector float __ATTRS_o_ai vec_div(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_div(vector float __a,
+                                                    vector float __b) {
   return __a / __b;
 }
 
-static vector double __ATTRS_o_ai vec_div(vector double __a,
-                                          vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_div(vector double __a,
+                                                     vector double __b) {
   return __a / __b;
 }
 #endif
 
 /* vec_dss */
 
-static void __attribute__((__always_inline__)) vec_dss(int __a) {
+static __inline__ void __attribute__((__always_inline__)) vec_dss(int __a) {
   __builtin_altivec_dss(__a);
 }
 
 /* vec_dssall */
 
-static void __attribute__((__always_inline__)) vec_dssall(void) {
+static __inline__ void __attribute__((__always_inline__)) vec_dssall(void) {
   __builtin_altivec_dssall();
 }
 
 /* vec_dst */
-
-static void __attribute__((__always_inline__))
-vec_dst(const void *__a, int __b, int __c) {
-  __builtin_altivec_dst(__a, __b, __c);
-}
+#define vec_dst(__PTR, __CW, __STR) \
+  __extension__(                    \
+      { __builtin_altivec_dst((const void *)(__PTR), (__CW), (__STR)); })
 
 /* vec_dstst */
-
-static void __attribute__((__always_inline__))
-vec_dstst(const void *__a, int __b, int __c) {
-  __builtin_altivec_dstst(__a, __b, __c);
-}
+#define vec_dstst(__PTR, __CW, __STR) \
+  __extension__(                      \
+      { __builtin_altivec_dstst((const void *)(__PTR), (__CW), (__STR)); })
 
 /* vec_dststt */
-
-static void __attribute__((__always_inline__))
-vec_dststt(const void *__a, int __b, int __c) {
-  __builtin_altivec_dststt(__a, __b, __c);
-}
+#define vec_dststt(__PTR, __CW, __STR) \
+  __extension__(                       \
+      { __builtin_altivec_dststt((const void *)(__PTR), (__CW), (__STR)); })
 
 /* vec_dstt */
-
-static void __attribute__((__always_inline__))
-vec_dstt(const void *__a, int __b, int __c) {
-  __builtin_altivec_dstt(__a, __b, __c);
-}
+#define vec_dstt(__PTR, __CW, __STR) \
+  __extension__(                     \
+      { __builtin_altivec_dstt((const void *)(__PTR), (__CW), (__STR)); })
 
 /* vec_eqv */
 
 #ifdef __POWER8_VECTOR__
-static vector signed char __ATTRS_o_ai vec_eqv(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_eqv(vector signed char __a, vector signed char __b) {
   return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a,
                                                   (vector unsigned int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_eqv(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_eqv(vector unsigned char __a, vector unsigned char __b) {
   return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
                                                     (vector unsigned int)__b);
 }
 
-static vector bool char __ATTRS_o_ai vec_eqv(vector bool char __a,
-                                             vector bool char __b) {
+static __inline__ vector bool char __ATTRS_o_ai vec_eqv(vector bool char __a,
+                                                        vector bool char __b) {
   return (vector bool char)__builtin_vsx_xxleqv((vector unsigned int)__a,
                                                 (vector unsigned int)__b);
 }
 
-static vector signed short __ATTRS_o_ai vec_eqv(vector signed short __a,
-                                                vector signed short __b) {
+static __inline__ vector signed short __ATTRS_o_ai
+vec_eqv(vector signed short __a, vector signed short __b) {
   return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a,
                                                    (vector unsigned int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_eqv(vector unsigned short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_eqv(vector unsigned short __a, vector unsigned short __b) {
   return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
                                                      (vector unsigned int)__b);
 }
 
-static vector bool short __ATTRS_o_ai vec_eqv(vector bool short __a,
-                                              vector bool short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_eqv(vector bool short __a, vector bool short __b) {
   return (vector bool short)__builtin_vsx_xxleqv((vector unsigned int)__a,
                                                  (vector unsigned int)__b);
 }
 
-static vector signed int __ATTRS_o_ai vec_eqv(vector signed int __a,
-                                              vector signed int __b) {
+static __inline__ vector signed int __ATTRS_o_ai
+vec_eqv(vector signed int __a, vector signed int __b) {
   return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
                                                  (vector unsigned int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_eqv(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_eqv(vector unsigned int __a, vector unsigned int __b) {
   return __builtin_vsx_xxleqv(__a, __b);
 }
 
-static vector bool int __ATTRS_o_ai vec_eqv(vector bool int __a,
-                                            vector bool int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_eqv(vector bool int __a,
+                                                       vector bool int __b) {
   return (vector bool int)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                 (vector unsigned int)__b);
+                                               (vector unsigned int)__b);
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_eqv(vector signed long long __a, vector signed long long __b) {
-  return (vector signed long long)
-    __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
+  return (vector signed long long)__builtin_vsx_xxleqv(
+      (vector unsigned int)__a, (vector unsigned int)__b);
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_eqv(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)
-    __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
+  return (vector unsigned long long)__builtin_vsx_xxleqv(
+      (vector unsigned int)__a, (vector unsigned int)__b);
 }
 
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_eqv(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)
-    __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
+  return (vector bool long long)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                                     (vector unsigned int)__b);
 }
 
-static vector float __ATTRS_o_ai vec_eqv(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_eqv(vector float __a,
+                                                    vector float __b) {
   return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a,
                                             (vector unsigned int)__b);
 }
 
-static vector double __ATTRS_o_ai vec_eqv(vector double __a,
-                                          vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_eqv(vector double __a,
+                                                     vector double __b) {
   return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
                                              (vector unsigned int)__b);
 }
@@ -2131,21 +2210,21 @@
 
 /* vec_expte */
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_expte(vector float __a) {
   return __builtin_altivec_vexptefp(__a);
 }
 
 /* vec_vexptefp */
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_vexptefp(vector float __a) {
   return __builtin_altivec_vexptefp(__a);
 }
 
 /* vec_floor */
 
-static vector float __ATTRS_o_ai vec_floor(vector float __a) {
+static __inline__ vector float __ATTRS_o_ai vec_floor(vector float __a) {
 #ifdef __VSX__
   return __builtin_vsx_xvrspim(__a);
 #else
@@ -2154,439 +2233,460 @@
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai vec_floor(vector double __a) {
+static __inline__ vector double __ATTRS_o_ai vec_floor(vector double __a) {
   return __builtin_vsx_xvrdpim(__a);
 }
 #endif
 
 /* vec_vrfim */
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_vrfim(vector float __a) {
   return __builtin_altivec_vrfim(__a);
 }
 
 /* vec_ld */
 
-static vector signed char __ATTRS_o_ai vec_ld(int __a,
-                                              const vector signed char *__b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_ld(int __a, const vector signed char *__b) {
   return (vector signed char)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_ld(int __a, const signed char *__b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_ld(int __a, const signed char *__b) {
   return (vector signed char)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
 vec_ld(int __a, const vector unsigned char *__b) {
   return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_ld(int __a,
-                                                const unsigned char *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_ld(int __a, const unsigned char *__b) {
   return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector bool char __ATTRS_o_ai vec_ld(int __a,
-                                            const vector bool char *__b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_ld(int __a, const vector bool char *__b) {
   return (vector bool char)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_ld(int __a, const vector short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_ld(int __a,
+                                                   const vector short *__b) {
   return (vector short)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_ld(int __a, const short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_ld(int __a, const short *__b) {
   return (vector short)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_ld(int __a, const vector unsigned short *__b) {
   return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_ld(int __a,
-                                                 const unsigned short *__b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_ld(int __a, const unsigned short *__b) {
   return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector bool short __ATTRS_o_ai vec_ld(int __a,
-                                             const vector bool short *__b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_ld(int __a, const vector bool short *__b) {
   return (vector bool short)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector pixel __ATTRS_o_ai vec_ld(int __a, const vector pixel *__b) {
+static __inline__ vector pixel __ATTRS_o_ai vec_ld(int __a,
+                                                   const vector pixel *__b) {
   return (vector pixel)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_ld(int __a, const vector int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_ld(int __a,
+                                                 const vector int *__b) {
   return (vector int)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_ld(int __a, const int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_ld(int __a, const int *__b) {
   return (vector int)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_ld(int __a,
-                                               const vector unsigned int *__b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_ld(int __a, const vector unsigned int *__b) {
   return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_ld(int __a,
-                                               const unsigned int *__b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_ld(int __a, const unsigned int *__b) {
   return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector bool int __ATTRS_o_ai vec_ld(int __a,
-                                           const vector bool int *__b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_ld(int __a, const vector bool int *__b) {
   return (vector bool int)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector float __ATTRS_o_ai vec_ld(int __a, const vector float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_ld(int __a,
+                                                   const vector float *__b) {
   return (vector float)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector float __ATTRS_o_ai vec_ld(int __a, const float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_ld(int __a, const float *__b) {
   return (vector float)__builtin_altivec_lvx(__a, __b);
 }
 
 /* vec_lvx */
 
-static vector signed char __ATTRS_o_ai vec_lvx(int __a,
-                                               const vector signed char *__b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvx(int __a, const vector signed char *__b) {
   return (vector signed char)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_lvx(int __a,
-                                               const signed char *__b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvx(int __a, const signed char *__b) {
   return (vector signed char)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
 vec_lvx(int __a, const vector unsigned char *__b) {
   return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_lvx(int __a,
-                                                 const unsigned char *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvx(int __a, const unsigned char *__b) {
   return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector bool char __ATTRS_o_ai vec_lvx(int __a,
-                                             const vector bool char *__b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_lvx(int __a, const vector bool char *__b) {
   return (vector bool char)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_lvx(int __a, const vector short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_lvx(int __a,
+                                                    const vector short *__b) {
   return (vector short)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_lvx(int __a, const short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_lvx(int __a, const short *__b) {
   return (vector short)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_lvx(int __a, const vector unsigned short *__b) {
   return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_lvx(int __a,
-                                                  const unsigned short *__b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvx(int __a, const unsigned short *__b) {
   return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector bool short __ATTRS_o_ai vec_lvx(int __a,
-                                              const vector bool short *__b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_lvx(int __a, const vector bool short *__b) {
   return (vector bool short)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector pixel __ATTRS_o_ai vec_lvx(int __a, const vector pixel *__b) {
+static __inline__ vector pixel __ATTRS_o_ai vec_lvx(int __a,
+                                                    const vector pixel *__b) {
   return (vector pixel)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_lvx(int __a, const vector int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lvx(int __a,
+                                                  const vector int *__b) {
   return (vector int)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_lvx(int __a, const int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lvx(int __a, const int *__b) {
   return (vector int)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai
+static __inline__ vector unsigned int __ATTRS_o_ai
 vec_lvx(int __a, const vector unsigned int *__b) {
   return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_lvx(int __a,
-                                                const unsigned int *__b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvx(int __a, const unsigned int *__b) {
   return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector bool int __ATTRS_o_ai vec_lvx(int __a,
-                                            const vector bool int *__b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_lvx(int __a, const vector bool int *__b) {
   return (vector bool int)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector float __ATTRS_o_ai vec_lvx(int __a, const vector float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_lvx(int __a,
+                                                    const vector float *__b) {
   return (vector float)__builtin_altivec_lvx(__a, __b);
 }
 
-static vector float __ATTRS_o_ai vec_lvx(int __a, const float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_lvx(int __a, const float *__b) {
   return (vector float)__builtin_altivec_lvx(__a, __b);
 }
 
 /* vec_lde */
 
-static vector signed char __ATTRS_o_ai vec_lde(int __a,
-                                               const signed char *__b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lde(int __a, const signed char *__b) {
   return (vector signed char)__builtin_altivec_lvebx(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_lde(int __a,
-                                                 const unsigned char *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lde(int __a, const unsigned char *__b) {
   return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_lde(int __a, const short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_lde(int __a, const short *__b) {
   return (vector short)__builtin_altivec_lvehx(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_lde(int __a,
-                                                  const unsigned short *__b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lde(int __a, const unsigned short *__b) {
   return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_lde(int __a, const int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lde(int __a, const int *__b) {
   return (vector int)__builtin_altivec_lvewx(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_lde(int __a,
-                                                const unsigned int *__b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lde(int __a, const unsigned int *__b) {
   return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
 }
 
-static vector float __ATTRS_o_ai vec_lde(int __a, const float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_lde(int __a, const float *__b) {
   return (vector float)__builtin_altivec_lvewx(__a, __b);
 }
 
 /* vec_lvebx */
 
-static vector signed char __ATTRS_o_ai vec_lvebx(int __a,
-                                                 const signed char *__b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvebx(int __a, const signed char *__b) {
   return (vector signed char)__builtin_altivec_lvebx(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_lvebx(int __a,
-                                                   const unsigned char *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvebx(int __a, const unsigned char *__b) {
   return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
 }
 
 /* vec_lvehx */
 
-static vector short __ATTRS_o_ai vec_lvehx(int __a, const short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_lvehx(int __a,
+                                                      const short *__b) {
   return (vector short)__builtin_altivec_lvehx(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_lvehx(int __a,
-                                                    const unsigned short *__b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvehx(int __a, const unsigned short *__b) {
   return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
 }
 
 /* vec_lvewx */
 
-static vector int __ATTRS_o_ai vec_lvewx(int __a, const int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lvewx(int __a, const int *__b) {
   return (vector int)__builtin_altivec_lvewx(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_lvewx(int __a,
-                                                  const unsigned int *__b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvewx(int __a, const unsigned int *__b) {
   return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
 }
 
-static vector float __ATTRS_o_ai vec_lvewx(int __a, const float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_lvewx(int __a,
+                                                      const float *__b) {
   return (vector float)__builtin_altivec_lvewx(__a, __b);
 }
 
 /* vec_ldl */
 
-static vector signed char __ATTRS_o_ai vec_ldl(int __a,
-                                               const vector signed char *__b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_ldl(int __a, const vector signed char *__b) {
   return (vector signed char)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_ldl(int __a,
-                                               const signed char *__b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_ldl(int __a, const signed char *__b) {
   return (vector signed char)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
 vec_ldl(int __a, const vector unsigned char *__b) {
   return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_ldl(int __a,
-                                                 const unsigned char *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_ldl(int __a, const unsigned char *__b) {
   return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector bool char __ATTRS_o_ai vec_ldl(int __a,
-                                             const vector bool char *__b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_ldl(int __a, const vector bool char *__b) {
   return (vector bool char)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_ldl(int __a, const vector short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_ldl(int __a,
+                                                    const vector short *__b) {
   return (vector short)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_ldl(int __a, const short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_ldl(int __a, const short *__b) {
   return (vector short)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_ldl(int __a, const vector unsigned short *__b) {
   return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_ldl(int __a,
-                                                  const unsigned short *__b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_ldl(int __a, const unsigned short *__b) {
   return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector bool short __ATTRS_o_ai vec_ldl(int __a,
-                                              const vector bool short *__b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_ldl(int __a, const vector bool short *__b) {
   return (vector bool short)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector pixel __ATTRS_o_ai vec_ldl(int __a, const vector pixel *__b) {
+static __inline__ vector pixel __ATTRS_o_ai vec_ldl(int __a,
+                                                    const vector pixel *__b) {
   return (vector pixel short)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_ldl(int __a, const vector int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_ldl(int __a,
+                                                  const vector int *__b) {
   return (vector int)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_ldl(int __a, const int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_ldl(int __a, const int *__b) {
   return (vector int)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai
+static __inline__ vector unsigned int __ATTRS_o_ai
 vec_ldl(int __a, const vector unsigned int *__b) {
   return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_ldl(int __a,
-                                                const unsigned int *__b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_ldl(int __a, const unsigned int *__b) {
   return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector bool int __ATTRS_o_ai vec_ldl(int __a,
-                                            const vector bool int *__b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_ldl(int __a, const vector bool int *__b) {
   return (vector bool int)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector float __ATTRS_o_ai vec_ldl(int __a, const vector float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_ldl(int __a,
+                                                    const vector float *__b) {
   return (vector float)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector float __ATTRS_o_ai vec_ldl(int __a, const float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_ldl(int __a, const float *__b) {
   return (vector float)__builtin_altivec_lvxl(__a, __b);
 }
 
 /* vec_lvxl */
 
-static vector signed char __ATTRS_o_ai vec_lvxl(int __a,
-                                                const vector signed char *__b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvxl(int __a, const vector signed char *__b) {
   return (vector signed char)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_lvxl(int __a,
-                                                const signed char *__b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvxl(int __a, const signed char *__b) {
   return (vector signed char)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
 vec_lvxl(int __a, const vector unsigned char *__b) {
   return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_lvxl(int __a,
-                                                  const unsigned char *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvxl(int __a, const unsigned char *__b) {
   return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector bool char __ATTRS_o_ai vec_lvxl(int __a,
-                                              const vector bool char *__b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_lvxl(int __a, const vector bool char *__b) {
   return (vector bool char)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_lvxl(int __a, const vector short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_lvxl(int __a,
+                                                     const vector short *__b) {
   return (vector short)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_lvxl(int __a, const short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_lvxl(int __a,
+                                                     const short *__b) {
   return (vector short)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_lvxl(int __a, const vector unsigned short *__b) {
   return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_lvxl(int __a,
-                                                   const unsigned short *__b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvxl(int __a, const unsigned short *__b) {
   return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector bool short __ATTRS_o_ai vec_lvxl(int __a,
-                                               const vector bool short *__b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_lvxl(int __a, const vector bool short *__b) {
   return (vector bool short)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector pixel __ATTRS_o_ai vec_lvxl(int __a, const vector pixel *__b) {
+static __inline__ vector pixel __ATTRS_o_ai vec_lvxl(int __a,
+                                                     const vector pixel *__b) {
   return (vector pixel)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_lvxl(int __a, const vector int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lvxl(int __a,
+                                                   const vector int *__b) {
   return (vector int)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_lvxl(int __a, const int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lvxl(int __a, const int *__b) {
   return (vector int)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai
+static __inline__ vector unsigned int __ATTRS_o_ai
 vec_lvxl(int __a, const vector unsigned int *__b) {
   return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_lvxl(int __a,
-                                                 const unsigned int *__b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvxl(int __a, const unsigned int *__b) {
   return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector bool int __ATTRS_o_ai vec_lvxl(int __a,
-                                             const vector bool int *__b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_lvxl(int __a, const vector bool int *__b) {
   return (vector bool int)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector float __ATTRS_o_ai vec_lvxl(int __a, const vector float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_lvxl(int __a,
+                                                     const vector float *__b) {
   return (vector float)__builtin_altivec_lvxl(__a, __b);
 }
 
-static vector float __ATTRS_o_ai vec_lvxl(int __a, const float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_lvxl(int __a,
+                                                     const float *__b) {
   return (vector float)__builtin_altivec_lvxl(__a, __b);
 }
 
 /* vec_loge */
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_loge(vector float __a) {
   return __builtin_altivec_vlogefp(__a);
 }
 
 /* vec_vlogefp */
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_vlogefp(vector float __a) {
   return __builtin_altivec_vlogefp(__a);
 }
@@ -2594,7 +2694,7 @@
 /* vec_lvsl */
 
 #ifdef __LITTLE_ENDIAN__
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
     __attribute__((__deprecated__("use assignment for unaligned little endian \
 loads/stores"))) vec_lvsl(int __a, const signed char *__b) {
   vector unsigned char mask =
@@ -2604,14 +2704,14 @@
   return vec_perm(mask, mask, reverse);
 }
 #else
-static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
-                                                  const signed char *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvsl(int __a, const signed char *__b) {
   return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
     __attribute__((__deprecated__("use assignment for unaligned little endian \
 loads/stores"))) vec_lvsl(int __a, const unsigned char *__b) {
   vector unsigned char mask =
@@ -2621,14 +2721,14 @@
   return vec_perm(mask, mask, reverse);
 }
 #else
-static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
-                                                  const unsigned char *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvsl(int __a, const unsigned char *__b) {
   return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
     __attribute__((__deprecated__("use assignment for unaligned little endian \
 loads/stores"))) vec_lvsl(int __a, const short *__b) {
   vector unsigned char mask =
@@ -2638,13 +2738,14 @@
   return vec_perm(mask, mask, reverse);
 }
 #else
-static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a, const short *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
+                                                             const short *__b) {
   return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
     __attribute__((__deprecated__("use assignment for unaligned little endian \
 loads/stores"))) vec_lvsl(int __a, const unsigned short *__b) {
   vector unsigned char mask =
@@ -2654,14 +2755,14 @@
   return vec_perm(mask, mask, reverse);
 }
 #else
-static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
-                                                  const unsigned short *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvsl(int __a, const unsigned short *__b) {
   return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
     __attribute__((__deprecated__("use assignment for unaligned little endian \
 loads/stores"))) vec_lvsl(int __a, const int *__b) {
   vector unsigned char mask =
@@ -2671,13 +2772,14 @@
   return vec_perm(mask, mask, reverse);
 }
 #else
-static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a, const int *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
+                                                             const int *__b) {
   return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
     __attribute__((__deprecated__("use assignment for unaligned little endian \
 loads/stores"))) vec_lvsl(int __a, const unsigned int *__b) {
   vector unsigned char mask =
@@ -2687,14 +2789,14 @@
   return vec_perm(mask, mask, reverse);
 }
 #else
-static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
-                                                  const unsigned int *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvsl(int __a, const unsigned int *__b) {
   return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
     __attribute__((__deprecated__("use assignment for unaligned little endian \
 loads/stores"))) vec_lvsl(int __a, const float *__b) {
   vector unsigned char mask =
@@ -2704,7 +2806,8 @@
   return vec_perm(mask, mask, reverse);
 }
 #else
-static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a, const float *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
+                                                             const float *__b) {
   return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
 }
 #endif
@@ -2712,7 +2815,7 @@
 /* vec_lvsr */
 
 #ifdef __LITTLE_ENDIAN__
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
     __attribute__((__deprecated__("use assignment for unaligned little endian \
 loads/stores"))) vec_lvsr(int __a, const signed char *__b) {
   vector unsigned char mask =
@@ -2722,14 +2825,14 @@
   return vec_perm(mask, mask, reverse);
 }
 #else
-static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
-                                                  const signed char *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvsr(int __a, const signed char *__b) {
   return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
     __attribute__((__deprecated__("use assignment for unaligned little endian \
 loads/stores"))) vec_lvsr(int __a, const unsigned char *__b) {
   vector unsigned char mask =
@@ -2739,14 +2842,14 @@
   return vec_perm(mask, mask, reverse);
 }
 #else
-static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
-                                                  const unsigned char *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvsr(int __a, const unsigned char *__b) {
   return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
     __attribute__((__deprecated__("use assignment for unaligned little endian \
 loads/stores"))) vec_lvsr(int __a, const short *__b) {
   vector unsigned char mask =
@@ -2756,13 +2859,14 @@
   return vec_perm(mask, mask, reverse);
 }
 #else
-static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, const short *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
+                                                             const short *__b) {
   return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
     __attribute__((__deprecated__("use assignment for unaligned little endian \
 loads/stores"))) vec_lvsr(int __a, const unsigned short *__b) {
   vector unsigned char mask =
@@ -2772,14 +2876,14 @@
   return vec_perm(mask, mask, reverse);
 }
 #else
-static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
-                                                  const unsigned short *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvsr(int __a, const unsigned short *__b) {
   return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
     __attribute__((__deprecated__("use assignment for unaligned little endian \
 loads/stores"))) vec_lvsr(int __a, const int *__b) {
   vector unsigned char mask =
@@ -2789,13 +2893,14 @@
   return vec_perm(mask, mask, reverse);
 }
 #else
-static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, const int *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
+                                                             const int *__b) {
   return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
     __attribute__((__deprecated__("use assignment for unaligned little endian \
 loads/stores"))) vec_lvsr(int __a, const unsigned int *__b) {
   vector unsigned char mask =
@@ -2805,14 +2910,14 @@
   return vec_perm(mask, mask, reverse);
 }
 #else
-static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
-                                                  const unsigned int *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvsr(int __a, const unsigned int *__b) {
   return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
     __attribute__((__deprecated__("use assignment for unaligned little endian \
 loads/stores"))) vec_lvsr(int __a, const float *__b) {
   vector unsigned char mask =
@@ -2822,47 +2927,48 @@
   return vec_perm(mask, mask, reverse);
 }
 #else
-static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, const float *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
+                                                             const float *__b) {
   return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
 }
 #endif
 
 /* vec_madd */
-static vector signed short __ATTRS_o_ai
+static __inline__ vector signed short __ATTRS_o_ai
 vec_mladd(vector signed short, vector signed short, vector signed short);
-static vector signed short __ATTRS_o_ai
+static __inline__ vector signed short __ATTRS_o_ai
 vec_mladd(vector signed short, vector unsigned short, vector unsigned short);
-static vector signed short __ATTRS_o_ai
+static __inline__ vector signed short __ATTRS_o_ai
 vec_mladd(vector unsigned short, vector signed short, vector signed short);
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_mladd(vector unsigned short, vector unsigned short, vector unsigned short);
 
-static vector signed short __ATTRS_o_ai
-vec_madd(vector signed short __a, vector signed short __b,
-         vector signed short __c) {
-  return  vec_mladd(__a, __b, __c);
+static __inline__ vector signed short __ATTRS_o_ai vec_madd(
+    vector signed short __a, vector signed short __b, vector signed short __c) {
+  return vec_mladd(__a, __b, __c);
 }
 
-static vector signed short __ATTRS_o_ai
+static __inline__ vector signed short __ATTRS_o_ai
 vec_madd(vector signed short __a, vector unsigned short __b,
          vector unsigned short __c) {
   return vec_mladd(__a, __b, __c);
 }
 
-static vector signed short __ATTRS_o_ai
+static __inline__ vector signed short __ATTRS_o_ai
 vec_madd(vector unsigned short __a, vector signed short __b,
          vector signed short __c) {
   return vec_mladd(__a, __b, __c);
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_madd(vector unsigned short __a, vector unsigned short __b,
          vector unsigned short __c) {
   return vec_mladd(__a, __b, __c);
 }
 
-static vector float __ATTRS_o_ai
-vec_madd(vector float __a, vector float __b, vector float __c) {
+static __inline__ vector float __ATTRS_o_ai vec_madd(vector float __a,
+                                                     vector float __b,
+                                                     vector float __c) {
 #ifdef __VSX__
   return __builtin_vsx_xvmaddasp(__a, __b, __c);
 #else
@@ -2871,29 +2977,30 @@
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai
-vec_madd(vector double __a, vector double __b, vector double __c) {
+static __inline__ vector double __ATTRS_o_ai vec_madd(vector double __a,
+                                                      vector double __b,
+                                                      vector double __c) {
   return __builtin_vsx_xvmaddadp(__a, __b, __c);
 }
 #endif
 
 /* vec_vmaddfp */
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_vmaddfp(vector float __a, vector float __b, vector float __c) {
   return __builtin_altivec_vmaddfp(__a, __b, __c);
 }
 
 /* vec_madds */
 
-static vector signed short __attribute__((__always_inline__))
+static __inline__ vector signed short __attribute__((__always_inline__))
 vec_madds(vector signed short __a, vector signed short __b,
           vector signed short __c) {
   return __builtin_altivec_vmhaddshs(__a, __b, __c);
 }
 
 /* vec_vmhaddshs */
-static vector signed short __attribute__((__always_inline__))
+static __inline__ vector signed short __attribute__((__always_inline__))
 vec_vmhaddshs(vector signed short __a, vector signed short __b,
               vector signed short __c) {
   return __builtin_altivec_vmhaddshs(__a, __b, __c);
@@ -2902,138 +3009,145 @@
 /* vec_msub */
 
 #ifdef __VSX__
-static vector float __ATTRS_o_ai
-vec_msub(vector float __a, vector float __b, vector float __c) {
+static __inline__ vector float __ATTRS_o_ai vec_msub(vector float __a,
+                                                     vector float __b,
+                                                     vector float __c) {
   return __builtin_vsx_xvmsubasp(__a, __b, __c);
 }
 
-static vector double __ATTRS_o_ai
-vec_msub(vector double __a, vector double __b, vector double __c) {
+static __inline__ vector double __ATTRS_o_ai vec_msub(vector double __a,
+                                                      vector double __b,
+                                                      vector double __c) {
   return __builtin_vsx_xvmsubadp(__a, __b, __c);
 }
 #endif
 
 /* vec_max */
 
-static vector signed char __ATTRS_o_ai vec_max(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_max(vector signed char __a, vector signed char __b) {
   return __builtin_altivec_vmaxsb(__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_max(vector bool char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_max(vector bool char __a, vector signed char __b) {
   return __builtin_altivec_vmaxsb((vector signed char)__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_max(vector signed char __a,
-                                               vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_max(vector signed char __a, vector bool char __b) {
   return __builtin_altivec_vmaxsb(__a, (vector signed char)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_max(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_max(vector unsigned char __a, vector unsigned char __b) {
   return __builtin_altivec_vmaxub(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_max(vector bool char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_max(vector bool char __a, vector unsigned char __b) {
   return __builtin_altivec_vmaxub((vector unsigned char)__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_max(vector unsigned char __a,
-                                                 vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_max(vector unsigned char __a, vector bool char __b) {
   return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b);
 }
 
-static vector short __ATTRS_o_ai vec_max(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_max(vector short __a,
+                                                    vector short __b) {
   return __builtin_altivec_vmaxsh(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_max(vector bool short __a,
-                                         vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_max(vector bool short __a,
+                                                    vector short __b) {
   return __builtin_altivec_vmaxsh((vector short)__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_max(vector short __a,
-                                         vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_max(vector short __a,
+                                                    vector bool short __b) {
   return __builtin_altivec_vmaxsh(__a, (vector short)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_max(vector unsigned short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_max(vector unsigned short __a, vector unsigned short __b) {
   return __builtin_altivec_vmaxuh(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_max(vector bool short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_max(vector bool short __a, vector unsigned short __b) {
   return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_max(vector unsigned short __a,
-                                                  vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_max(vector unsigned short __a, vector bool short __b) {
   return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b);
 }
 
-static vector int __ATTRS_o_ai vec_max(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_max(vector int __a,
+                                                  vector int __b) {
   return __builtin_altivec_vmaxsw(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_max(vector bool int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_max(vector bool int __a,
+                                                  vector int __b) {
   return __builtin_altivec_vmaxsw((vector int)__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_max(vector int __a, vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_max(vector int __a,
+                                                  vector bool int __b) {
   return __builtin_altivec_vmaxsw(__a, (vector int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_max(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_max(vector unsigned int __a, vector unsigned int __b) {
   return __builtin_altivec_vmaxuw(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_max(vector bool int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_max(vector bool int __a, vector unsigned int __b) {
   return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_max(vector unsigned int __a,
-                                                vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_max(vector unsigned int __a, vector bool int __b) {
   return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b);
 }
 
 #ifdef __POWER8_VECTOR__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_max(vector signed long long __a, vector signed long long __b) {
   return __builtin_altivec_vmaxsd(__a, __b);
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_max(vector bool long long __a, vector signed long long __b) {
   return __builtin_altivec_vmaxsd((vector signed long long)__a, __b);
 }
 
-static vector signed long long __ATTRS_o_ai vec_max(vector signed long long __a,
-                                                    vector bool long long __b) {
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_max(vector signed long long __a, vector bool long long __b) {
   return __builtin_altivec_vmaxsd(__a, (vector signed long long)__b);
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_max(vector unsigned long long __a, vector unsigned long long __b) {
   return __builtin_altivec_vmaxud(__a, __b);
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_max(vector bool long long __a, vector unsigned long long __b) {
   return __builtin_altivec_vmaxud((vector unsigned long long)__a, __b);
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_max(vector unsigned long long __a, vector bool long long __b) {
   return __builtin_altivec_vmaxud(__a, (vector unsigned long long)__b);
 }
 #endif
 
-static vector float __ATTRS_o_ai vec_max(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_max(vector float __a,
+                                                    vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvmaxsp(__a, __b);
 #else
@@ -3042,114 +3156,117 @@
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai vec_max(vector double __a,
-                                          vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_max(vector double __a,
+                                                     vector double __b) {
   return __builtin_vsx_xvmaxdp(__a, __b);
 }
 #endif
 
 /* vec_vmaxsb */
 
-static vector signed char __ATTRS_o_ai vec_vmaxsb(vector signed char __a,
-                                                  vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vmaxsb(vector signed char __a, vector signed char __b) {
   return __builtin_altivec_vmaxsb(__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_vmaxsb(vector bool char __a,
-                                                  vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vmaxsb(vector bool char __a, vector signed char __b) {
   return __builtin_altivec_vmaxsb((vector signed char)__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_vmaxsb(vector signed char __a,
-                                                  vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vmaxsb(vector signed char __a, vector bool char __b) {
   return __builtin_altivec_vmaxsb(__a, (vector signed char)__b);
 }
 
 /* vec_vmaxub */
 
-static vector unsigned char __ATTRS_o_ai vec_vmaxub(vector unsigned char __a,
-                                                    vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vmaxub(vector unsigned char __a, vector unsigned char __b) {
   return __builtin_altivec_vmaxub(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vmaxub(vector bool char __a,
-                                                    vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vmaxub(vector bool char __a, vector unsigned char __b) {
   return __builtin_altivec_vmaxub((vector unsigned char)__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vmaxub(vector unsigned char __a,
-                                                    vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vmaxub(vector unsigned char __a, vector bool char __b) {
   return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b);
 }
 
 /* vec_vmaxsh */
 
-static vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
-                                            vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
+                                                       vector short __b) {
   return __builtin_altivec_vmaxsh(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_vmaxsh(vector bool short __a,
-                                            vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector bool short __a,
+                                                       vector short __b) {
   return __builtin_altivec_vmaxsh((vector short)__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
-                                            vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
+                                                       vector bool short __b) {
   return __builtin_altivec_vmaxsh(__a, (vector short)__b);
 }
 
 /* vec_vmaxuh */
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_vmaxuh(vector unsigned short __a, vector unsigned short __b) {
   return __builtin_altivec_vmaxuh(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_vmaxuh(vector bool short __a, vector unsigned short __b) {
   return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vmaxuh(vector unsigned short __a,
-                                                     vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vmaxuh(vector unsigned short __a, vector bool short __b) {
   return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b);
 }
 
 /* vec_vmaxsw */
 
-static vector int __ATTRS_o_ai vec_vmaxsw(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector int __a,
+                                                     vector int __b) {
   return __builtin_altivec_vmaxsw(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_vmaxsw(vector bool int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector bool int __a,
+                                                     vector int __b) {
   return __builtin_altivec_vmaxsw((vector int)__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_vmaxsw(vector int __a, vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector int __a,
+                                                     vector bool int __b) {
   return __builtin_altivec_vmaxsw(__a, (vector int)__b);
 }
 
 /* vec_vmaxuw */
 
-static vector unsigned int __ATTRS_o_ai vec_vmaxuw(vector unsigned int __a,
-                                                   vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vmaxuw(vector unsigned int __a, vector unsigned int __b) {
   return __builtin_altivec_vmaxuw(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vmaxuw(vector bool int __a,
-                                                   vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vmaxuw(vector bool int __a, vector unsigned int __b) {
   return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vmaxuw(vector unsigned int __a,
-                                                   vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vmaxuw(vector unsigned int __a, vector bool int __b) {
   return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b);
 }
 
 /* vec_vmaxfp */
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_vmaxfp(vector float __a, vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvmaxsp(__a, __b);
@@ -3160,39 +3277,39 @@
 
 /* vec_mergeh */
 
-static vector signed char __ATTRS_o_ai vec_mergeh(vector signed char __a,
-                                                  vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_mergeh(vector signed char __a, vector signed char __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
                                          0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
                                          0x06, 0x16, 0x07, 0x17));
 }
 
-static vector unsigned char __ATTRS_o_ai vec_mergeh(vector unsigned char __a,
-                                                    vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_mergeh(vector unsigned char __a, vector unsigned char __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
                                          0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
                                          0x06, 0x16, 0x07, 0x17));
 }
 
-static vector bool char __ATTRS_o_ai vec_mergeh(vector bool char __a,
-                                                vector bool char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_mergeh(vector bool char __a, vector bool char __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
                                          0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
                                          0x06, 0x16, 0x07, 0x17));
 }
 
-static vector short __ATTRS_o_ai vec_mergeh(vector short __a,
-                                            vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_mergeh(vector short __a,
+                                                       vector short __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
                                          0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
                                          0x06, 0x07, 0x16, 0x17));
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_mergeh(vector unsigned short __a, vector unsigned short __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
@@ -3200,47 +3317,48 @@
                                          0x06, 0x07, 0x16, 0x17));
 }
 
-static vector bool short __ATTRS_o_ai vec_mergeh(vector bool short __a,
-                                                 vector bool short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_mergeh(vector bool short __a, vector bool short __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
                                          0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
                                          0x06, 0x07, 0x16, 0x17));
 }
 
-static vector pixel __ATTRS_o_ai vec_mergeh(vector pixel __a,
-                                            vector pixel __b) {
+static __inline__ vector pixel __ATTRS_o_ai vec_mergeh(vector pixel __a,
+                                                       vector pixel __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
                                          0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
                                          0x06, 0x07, 0x16, 0x17));
 }
 
-static vector int __ATTRS_o_ai vec_mergeh(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_mergeh(vector int __a,
+                                                     vector int __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
                                          0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
                                          0x14, 0x15, 0x16, 0x17));
 }
 
-static vector unsigned int __ATTRS_o_ai vec_mergeh(vector unsigned int __a,
-                                                   vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mergeh(vector unsigned int __a, vector unsigned int __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
                                          0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
                                          0x14, 0x15, 0x16, 0x17));
 }
 
-static vector bool int __ATTRS_o_ai vec_mergeh(vector bool int __a,
-                                               vector bool int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_mergeh(vector bool int __a,
+                                                          vector bool int __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
                                          0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
                                          0x14, 0x15, 0x16, 0x17));
 }
 
-static vector float __ATTRS_o_ai vec_mergeh(vector float __a,
-                                            vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_mergeh(vector float __a,
+                                                       vector float __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
                                          0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
@@ -3248,91 +3366,81 @@
 }
 
 #ifdef __VSX__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_mergeh(vector signed long long __a, vector signed long long __b) {
   return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
-                                         0x04, 0x05, 0x06, 0x07,
-                                         0x10, 0x11, 0x12, 0x13,
+                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
                                          0x14, 0x15, 0x16, 0x17));
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_mergeh(vector signed long long __a, vector bool long long __b) {
   return vec_perm(__a, (vector signed long long)__b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
-                                         0x04, 0x05, 0x06, 0x07,
-                                         0x10, 0x11, 0x12, 0x13,
+                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
                                          0x14, 0x15, 0x16, 0x17));
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_mergeh(vector bool long long __a, vector signed long long __b) {
   return vec_perm((vector signed long long)__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
-                                         0x04, 0x05, 0x06, 0x07,
-                                         0x10, 0x11, 0x12, 0x13,
+                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
                                          0x14, 0x15, 0x16, 0x17));
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
   return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
-                                         0x04, 0x05, 0x06, 0x07,
-                                         0x10, 0x11, 0x12, 0x13,
+                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
                                          0x14, 0x15, 0x16, 0x17));
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_mergeh(vector unsigned long long __a, vector bool long long __b) {
   return vec_perm(__a, (vector unsigned long long)__b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
-                                         0x04, 0x05, 0x06, 0x07,
-                                         0x10, 0x11, 0x12, 0x13,
+                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
                                          0x14, 0x15, 0x16, 0x17));
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_mergeh(vector bool long long __a, vector unsigned long long __b) {
   return vec_perm((vector unsigned long long)__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
-                                         0x04, 0x05, 0x06, 0x07,
-                                         0x10, 0x11, 0x12, 0x13,
+                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
                                          0x14, 0x15, 0x16, 0x17));
 }
 
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_mergeh(vector bool long long __a, vector bool long long __b) {
   return vec_perm(__a, __b,
-                 (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
-                                        0x04, 0x05, 0x06, 0x07,
-                                        0x10, 0x11, 0x12, 0x13,
-                                        0x14, 0x15, 0x16, 0x17));
+                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
+                                         0x14, 0x15, 0x16, 0x17));
 }
 
-static vector double __ATTRS_o_ai vec_mergeh(vector double __a,
-                                             vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_mergeh(vector double __a,
+                                                        vector double __b) {
   return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
-                                         0x04, 0x05, 0x06, 0x07,
-                                         0x10, 0x11, 0x12, 0x13,
+                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
                                          0x14, 0x15, 0x16, 0x17));
 }
-static vector double __ATTRS_o_ai vec_mergeh(vector double __a,
-                                             vector bool long long __b) {
+static __inline__ vector double __ATTRS_o_ai
+vec_mergeh(vector double __a, vector bool long long __b) {
   return vec_perm(__a, (vector double)__b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
-                                         0x04, 0x05, 0x06, 0x07,
-                                         0x10, 0x11, 0x12, 0x13,
+                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
                                          0x14, 0x15, 0x16, 0x17));
 }
-static vector double __ATTRS_o_ai vec_mergeh(vector bool long long __a,
-                                             vector double __b) {
+static __inline__ vector double __ATTRS_o_ai
+vec_mergeh(vector bool long long __a, vector double __b) {
   return vec_perm((vector double)__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
-                                         0x04, 0x05, 0x06, 0x07,
-                                         0x10, 0x11, 0x12, 0x13,
+                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
                                          0x14, 0x15, 0x16, 0x17));
 }
 #endif
@@ -3341,24 +3449,24 @@
 
 #define __builtin_altivec_vmrghb vec_vmrghb
 
-static vector signed char __ATTRS_o_ai vec_vmrghb(vector signed char __a,
-                                                  vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vmrghb(vector signed char __a, vector signed char __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
                                          0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
                                          0x06, 0x16, 0x07, 0x17));
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vmrghb(vector unsigned char __a,
-                                                    vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vmrghb(vector unsigned char __a, vector unsigned char __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
                                          0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
                                          0x06, 0x16, 0x07, 0x17));
 }
 
-static vector bool char __ATTRS_o_ai vec_vmrghb(vector bool char __a,
-                                                vector bool char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vmrghb(vector bool char __a, vector bool char __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
                                          0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
@@ -3369,15 +3477,15 @@
 
 #define __builtin_altivec_vmrghh vec_vmrghh
 
-static vector short __ATTRS_o_ai vec_vmrghh(vector short __a,
-                                            vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vmrghh(vector short __a,
+                                                       vector short __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
                                          0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
                                          0x06, 0x07, 0x16, 0x17));
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_vmrghh(vector unsigned short __a, vector unsigned short __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
@@ -3385,16 +3493,16 @@
                                          0x06, 0x07, 0x16, 0x17));
 }
 
-static vector bool short __ATTRS_o_ai vec_vmrghh(vector bool short __a,
-                                                 vector bool short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vmrghh(vector bool short __a, vector bool short __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
                                          0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
                                          0x06, 0x07, 0x16, 0x17));
 }
 
-static vector pixel __ATTRS_o_ai vec_vmrghh(vector pixel __a,
-                                            vector pixel __b) {
+static __inline__ vector pixel __ATTRS_o_ai vec_vmrghh(vector pixel __a,
+                                                       vector pixel __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
                                          0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
@@ -3405,31 +3513,32 @@
 
 #define __builtin_altivec_vmrghw vec_vmrghw
 
-static vector int __ATTRS_o_ai vec_vmrghw(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vmrghw(vector int __a,
+                                                     vector int __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
                                          0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
                                          0x14, 0x15, 0x16, 0x17));
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vmrghw(vector unsigned int __a,
-                                                   vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vmrghw(vector unsigned int __a, vector unsigned int __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
                                          0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
                                          0x14, 0x15, 0x16, 0x17));
 }
 
-static vector bool int __ATTRS_o_ai vec_vmrghw(vector bool int __a,
-                                               vector bool int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_vmrghw(vector bool int __a,
+                                                          vector bool int __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
                                          0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
                                          0x14, 0x15, 0x16, 0x17));
 }
 
-static vector float __ATTRS_o_ai vec_vmrghw(vector float __a,
-                                            vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vmrghw(vector float __a,
+                                                       vector float __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
                                          0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
@@ -3438,39 +3547,39 @@
 
 /* vec_mergel */
 
-static vector signed char __ATTRS_o_ai vec_mergel(vector signed char __a,
-                                                  vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_mergel(vector signed char __a, vector signed char __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
                                          0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
                                          0x0E, 0x1E, 0x0F, 0x1F));
 }
 
-static vector unsigned char __ATTRS_o_ai vec_mergel(vector unsigned char __a,
-                                                    vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_mergel(vector unsigned char __a, vector unsigned char __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
                                          0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
                                          0x0E, 0x1E, 0x0F, 0x1F));
 }
 
-static vector bool char __ATTRS_o_ai vec_mergel(vector bool char __a,
-                                                vector bool char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_mergel(vector bool char __a, vector bool char __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
                                          0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
                                          0x0E, 0x1E, 0x0F, 0x1F));
 }
 
-static vector short __ATTRS_o_ai vec_mergel(vector short __a,
-                                            vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_mergel(vector short __a,
+                                                       vector short __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
                                          0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
                                          0x0E, 0x0F, 0x1E, 0x1F));
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_mergel(vector unsigned short __a, vector unsigned short __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
@@ -3478,47 +3587,48 @@
                                          0x0E, 0x0F, 0x1E, 0x1F));
 }
 
-static vector bool short __ATTRS_o_ai vec_mergel(vector bool short __a,
-                                                 vector bool short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_mergel(vector bool short __a, vector bool short __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
                                          0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
                                          0x0E, 0x0F, 0x1E, 0x1F));
 }
 
-static vector pixel __ATTRS_o_ai vec_mergel(vector pixel __a,
-                                            vector pixel __b) {
+static __inline__ vector pixel __ATTRS_o_ai vec_mergel(vector pixel __a,
+                                                       vector pixel __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
                                          0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
                                          0x0E, 0x0F, 0x1E, 0x1F));
 }
 
-static vector int __ATTRS_o_ai vec_mergel(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_mergel(vector int __a,
+                                                     vector int __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
                                          0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
 
-static vector unsigned int __ATTRS_o_ai vec_mergel(vector unsigned int __a,
-                                                   vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mergel(vector unsigned int __a, vector unsigned int __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
                                          0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
 
-static vector bool int __ATTRS_o_ai vec_mergel(vector bool int __a,
-                                               vector bool int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_mergel(vector bool int __a,
+                                                          vector bool int __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
                                          0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
 
-static vector float __ATTRS_o_ai vec_mergel(vector float __a,
-                                            vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_mergel(vector float __a,
+                                                       vector float __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
                                          0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
@@ -3526,84 +3636,74 @@
 }
 
 #ifdef __VSX__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_mergel(vector signed long long __a, vector signed long long __b) {
   return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
-                                         0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x18, 0X19, 0x1A, 0x1B,
+                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_mergel(vector signed long long __a, vector bool long long __b) {
   return vec_perm(__a, (vector signed long long)__b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
-                                         0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x18, 0X19, 0x1A, 0x1B,
+                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_mergel(vector bool long long __a, vector signed long long __b) {
   return vec_perm((vector signed long long)__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
-                                         0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x18, 0X19, 0x1A, 0x1B,
+                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
   return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
-                                         0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x18, 0X19, 0x1A, 0x1B,
+                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_mergel(vector unsigned long long __a, vector bool long long __b) {
   return vec_perm(__a, (vector unsigned long long)__b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
-                                         0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x18, 0X19, 0x1A, 0x1B,
+                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_mergel(vector bool long long __a, vector unsigned long long __b) {
   return vec_perm((vector unsigned long long)__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
-                                         0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x18, 0X19, 0x1A, 0x1B,
+                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_mergel(vector bool long long __a, vector bool long long __b) {
   return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
-                                         0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x18, 0X19, 0x1A, 0x1B,
+                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
-static vector double __ATTRS_o_ai
-vec_mergel(vector double __a, vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_mergel(vector double __a,
+                                                        vector double __b) {
   return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
-                                         0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x18, 0X19, 0x1A, 0x1B,
+                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
-static vector double __ATTRS_o_ai
+static __inline__ vector double __ATTRS_o_ai
 vec_mergel(vector double __a, vector bool long long __b) {
   return vec_perm(__a, (vector double)__b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
-                                         0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x18, 0X19, 0x1A, 0x1B,
+                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
-static vector double __ATTRS_o_ai
+static __inline__ vector double __ATTRS_o_ai
 vec_mergel(vector bool long long __a, vector double __b) {
   return vec_perm((vector double)__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
-                                         0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x18, 0X19, 0x1A, 0x1B,
+                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
 #endif
@@ -3612,24 +3712,24 @@
 
 #define __builtin_altivec_vmrglb vec_vmrglb
 
-static vector signed char __ATTRS_o_ai vec_vmrglb(vector signed char __a,
-                                                  vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vmrglb(vector signed char __a, vector signed char __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
                                          0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
                                          0x0E, 0x1E, 0x0F, 0x1F));
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vmrglb(vector unsigned char __a,
-                                                    vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vmrglb(vector unsigned char __a, vector unsigned char __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
                                          0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
                                          0x0E, 0x1E, 0x0F, 0x1F));
 }
 
-static vector bool char __ATTRS_o_ai vec_vmrglb(vector bool char __a,
-                                                vector bool char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vmrglb(vector bool char __a, vector bool char __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
                                          0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
@@ -3640,15 +3740,15 @@
 
 #define __builtin_altivec_vmrglh vec_vmrglh
 
-static vector short __ATTRS_o_ai vec_vmrglh(vector short __a,
-                                            vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vmrglh(vector short __a,
+                                                       vector short __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
                                          0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
                                          0x0E, 0x0F, 0x1E, 0x1F));
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_vmrglh(vector unsigned short __a, vector unsigned short __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
@@ -3656,16 +3756,16 @@
                                          0x0E, 0x0F, 0x1E, 0x1F));
 }
 
-static vector bool short __ATTRS_o_ai vec_vmrglh(vector bool short __a,
-                                                 vector bool short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vmrglh(vector bool short __a, vector bool short __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
                                          0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
                                          0x0E, 0x0F, 0x1E, 0x1F));
 }
 
-static vector pixel __ATTRS_o_ai vec_vmrglh(vector pixel __a,
-                                            vector pixel __b) {
+static __inline__ vector pixel __ATTRS_o_ai vec_vmrglh(vector pixel __a,
+                                                       vector pixel __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
                                          0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
@@ -3676,215 +3776,226 @@
 
 #define __builtin_altivec_vmrglw vec_vmrglw
 
-static vector int __ATTRS_o_ai vec_vmrglw(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vmrglw(vector int __a,
+                                                     vector int __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
                                          0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vmrglw(vector unsigned int __a,
-                                                   vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vmrglw(vector unsigned int __a, vector unsigned int __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
                                          0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
 
-static vector bool int __ATTRS_o_ai vec_vmrglw(vector bool int __a,
-                                               vector bool int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_vmrglw(vector bool int __a,
+                                                          vector bool int __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
                                          0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
 
-static vector float __ATTRS_o_ai vec_vmrglw(vector float __a,
-                                            vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vmrglw(vector float __a,
+                                                       vector float __b) {
   return vec_perm(__a, __b,
                   (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
                                          0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
                                          0x1C, 0x1D, 0x1E, 0x1F));
 }
 
-
 #ifdef __POWER8_VECTOR__
 /* vec_mergee */
 
-static vector bool int __ATTRS_o_ai
-vec_mergee(vector bool int __a, vector bool int __b) {
-  return vec_perm(__a, __b, (vector unsigned char)
-                  (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
-                   0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B));
+static __inline__ vector bool int __ATTRS_o_ai vec_mergee(vector bool int __a,
+                                                          vector bool int __b) {
+  return vec_perm(__a, __b,
+                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
+                                         0x18, 0x19, 0x1A, 0x1B));
 }
 
-static vector signed int __ATTRS_o_ai
+static __inline__ vector signed int __ATTRS_o_ai
 vec_mergee(vector signed int __a, vector signed int __b) {
-  return vec_perm(__a, __b, (vector unsigned char)
-                  (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
-                   0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B));
+  return vec_perm(__a, __b,
+                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
+                                         0x18, 0x19, 0x1A, 0x1B));
 }
 
-static vector unsigned int __ATTRS_o_ai
+static __inline__ vector unsigned int __ATTRS_o_ai
 vec_mergee(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b, (vector unsigned char)
-                  (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
-                   0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B));
+  return vec_perm(__a, __b,
+                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
+                                         0x18, 0x19, 0x1A, 0x1B));
 }
 
 /* vec_mergeo */
 
-static vector bool int  __ATTRS_o_ai
-vec_mergeo(vector bool int __a, vector bool int __b) {
-  return vec_perm(__a, __b, (vector unsigned char)
-                  (0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17,
-                   0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+static __inline__ vector bool int __ATTRS_o_ai vec_mergeo(vector bool int __a,
+                                                          vector bool int __b) {
+  return vec_perm(__a, __b,
+                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
+                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
+                                         0x1C, 0x1D, 0x1E, 0x1F));
 }
 
-static vector signed int  __ATTRS_o_ai
+static __inline__ vector signed int __ATTRS_o_ai
 vec_mergeo(vector signed int __a, vector signed int __b) {
-  return vec_perm(__a, __b, (vector unsigned char)
-                  (0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17,
-                   0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+  return vec_perm(__a, __b,
+                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
+                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
+                                         0x1C, 0x1D, 0x1E, 0x1F));
 }
 
-static vector unsigned int  __ATTRS_o_ai
+static __inline__ vector unsigned int __ATTRS_o_ai
 vec_mergeo(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b, (vector unsigned char)
-                  (0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17,
-                   0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+  return vec_perm(__a, __b,
+                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
+                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
+                                         0x1C, 0x1D, 0x1E, 0x1F));
 }
 
 #endif
 
 /* vec_mfvscr */
 
-static vector unsigned short __attribute__((__always_inline__))
+static __inline__ vector unsigned short __attribute__((__always_inline__))
 vec_mfvscr(void) {
   return __builtin_altivec_mfvscr();
 }
 
 /* vec_min */
 
-static vector signed char __ATTRS_o_ai vec_min(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_min(vector signed char __a, vector signed char __b) {
   return __builtin_altivec_vminsb(__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_min(vector bool char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_min(vector bool char __a, vector signed char __b) {
   return __builtin_altivec_vminsb((vector signed char)__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_min(vector signed char __a,
-                                               vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_min(vector signed char __a, vector bool char __b) {
   return __builtin_altivec_vminsb(__a, (vector signed char)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_min(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_min(vector unsigned char __a, vector unsigned char __b) {
   return __builtin_altivec_vminub(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_min(vector bool char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_min(vector bool char __a, vector unsigned char __b) {
   return __builtin_altivec_vminub((vector unsigned char)__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_min(vector unsigned char __a,
-                                                 vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_min(vector unsigned char __a, vector bool char __b) {
   return __builtin_altivec_vminub(__a, (vector unsigned char)__b);
 }
 
-static vector short __ATTRS_o_ai vec_min(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_min(vector short __a,
+                                                    vector short __b) {
   return __builtin_altivec_vminsh(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_min(vector bool short __a,
-                                         vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_min(vector bool short __a,
+                                                    vector short __b) {
   return __builtin_altivec_vminsh((vector short)__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_min(vector short __a,
-                                         vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_min(vector short __a,
+                                                    vector bool short __b) {
   return __builtin_altivec_vminsh(__a, (vector short)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_min(vector unsigned short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_min(vector unsigned short __a, vector unsigned short __b) {
   return __builtin_altivec_vminuh(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_min(vector bool short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_min(vector bool short __a, vector unsigned short __b) {
   return __builtin_altivec_vminuh((vector unsigned short)__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_min(vector unsigned short __a,
-                                                  vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_min(vector unsigned short __a, vector bool short __b) {
   return __builtin_altivec_vminuh(__a, (vector unsigned short)__b);
 }
 
-static vector int __ATTRS_o_ai vec_min(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_min(vector int __a,
+                                                  vector int __b) {
   return __builtin_altivec_vminsw(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_min(vector bool int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_min(vector bool int __a,
+                                                  vector int __b) {
   return __builtin_altivec_vminsw((vector int)__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_min(vector int __a, vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_min(vector int __a,
+                                                  vector bool int __b) {
   return __builtin_altivec_vminsw(__a, (vector int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_min(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_min(vector unsigned int __a, vector unsigned int __b) {
   return __builtin_altivec_vminuw(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_min(vector bool int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_min(vector bool int __a, vector unsigned int __b) {
   return __builtin_altivec_vminuw((vector unsigned int)__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_min(vector unsigned int __a,
-                                                vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_min(vector unsigned int __a, vector bool int __b) {
   return __builtin_altivec_vminuw(__a, (vector unsigned int)__b);
 }
 
 #ifdef __POWER8_VECTOR__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_min(vector signed long long __a, vector signed long long __b) {
   return __builtin_altivec_vminsd(__a, __b);
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_min(vector bool long long __a, vector signed long long __b) {
   return __builtin_altivec_vminsd((vector signed long long)__a, __b);
 }
 
-static vector signed long long __ATTRS_o_ai vec_min(vector signed long long __a,
-                                                    vector bool long long __b) {
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_min(vector signed long long __a, vector bool long long __b) {
   return __builtin_altivec_vminsd(__a, (vector signed long long)__b);
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_min(vector unsigned long long __a, vector unsigned long long __b) {
   return __builtin_altivec_vminud(__a, __b);
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_min(vector bool long long __a, vector unsigned long long __b) {
   return __builtin_altivec_vminud((vector unsigned long long)__a, __b);
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_min(vector unsigned long long __a, vector bool long long __b) {
   return __builtin_altivec_vminud(__a, (vector unsigned long long)__b);
 }
 #endif
 
-static vector float __ATTRS_o_ai vec_min(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_min(vector float __a,
+                                                    vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvminsp(__a, __b);
 #else
@@ -3893,114 +4004,117 @@
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai vec_min(vector double __a,
-                                          vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_min(vector double __a,
+                                                     vector double __b) {
   return __builtin_vsx_xvmindp(__a, __b);
 }
 #endif
 
 /* vec_vminsb */
 
-static vector signed char __ATTRS_o_ai vec_vminsb(vector signed char __a,
-                                                  vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vminsb(vector signed char __a, vector signed char __b) {
   return __builtin_altivec_vminsb(__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_vminsb(vector bool char __a,
-                                                  vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vminsb(vector bool char __a, vector signed char __b) {
   return __builtin_altivec_vminsb((vector signed char)__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_vminsb(vector signed char __a,
-                                                  vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vminsb(vector signed char __a, vector bool char __b) {
   return __builtin_altivec_vminsb(__a, (vector signed char)__b);
 }
 
 /* vec_vminub */
 
-static vector unsigned char __ATTRS_o_ai vec_vminub(vector unsigned char __a,
-                                                    vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vminub(vector unsigned char __a, vector unsigned char __b) {
   return __builtin_altivec_vminub(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vminub(vector bool char __a,
-                                                    vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vminub(vector bool char __a, vector unsigned char __b) {
   return __builtin_altivec_vminub((vector unsigned char)__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vminub(vector unsigned char __a,
-                                                    vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vminub(vector unsigned char __a, vector bool char __b) {
   return __builtin_altivec_vminub(__a, (vector unsigned char)__b);
 }
 
 /* vec_vminsh */
 
-static vector short __ATTRS_o_ai vec_vminsh(vector short __a,
-                                            vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector short __a,
+                                                       vector short __b) {
   return __builtin_altivec_vminsh(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_vminsh(vector bool short __a,
-                                            vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector bool short __a,
+                                                       vector short __b) {
   return __builtin_altivec_vminsh((vector short)__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_vminsh(vector short __a,
-                                            vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector short __a,
+                                                       vector bool short __b) {
   return __builtin_altivec_vminsh(__a, (vector short)__b);
 }
 
 /* vec_vminuh */
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_vminuh(vector unsigned short __a, vector unsigned short __b) {
   return __builtin_altivec_vminuh(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_vminuh(vector bool short __a, vector unsigned short __b) {
   return __builtin_altivec_vminuh((vector unsigned short)__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vminuh(vector unsigned short __a,
-                                                     vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vminuh(vector unsigned short __a, vector bool short __b) {
   return __builtin_altivec_vminuh(__a, (vector unsigned short)__b);
 }
 
 /* vec_vminsw */
 
-static vector int __ATTRS_o_ai vec_vminsw(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector int __a,
+                                                     vector int __b) {
   return __builtin_altivec_vminsw(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_vminsw(vector bool int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector bool int __a,
+                                                     vector int __b) {
   return __builtin_altivec_vminsw((vector int)__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_vminsw(vector int __a, vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector int __a,
+                                                     vector bool int __b) {
   return __builtin_altivec_vminsw(__a, (vector int)__b);
 }
 
 /* vec_vminuw */
 
-static vector unsigned int __ATTRS_o_ai vec_vminuw(vector unsigned int __a,
-                                                   vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vminuw(vector unsigned int __a, vector unsigned int __b) {
   return __builtin_altivec_vminuw(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vminuw(vector bool int __a,
-                                                   vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vminuw(vector bool int __a, vector unsigned int __b) {
   return __builtin_altivec_vminuw((vector unsigned int)__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vminuw(vector unsigned int __a,
-                                                   vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vminuw(vector unsigned int __a, vector bool int __b) {
   return __builtin_altivec_vminuw(__a, (vector unsigned int)__b);
 }
 
 /* vec_vminfp */
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_vminfp(vector float __a, vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvminsp(__a, __b);
@@ -4013,49 +4127,48 @@
 
 #define __builtin_altivec_vmladduhm vec_mladd
 
-static vector short __ATTRS_o_ai vec_mladd(vector short __a, vector short __b,
-                                           vector short __c) {
+static __inline__ vector short __ATTRS_o_ai vec_mladd(vector short __a,
+                                                      vector short __b,
+                                                      vector short __c) {
   return __a * __b + __c;
 }
 
-static vector short __ATTRS_o_ai vec_mladd(vector short __a,
-                                           vector unsigned short __b,
-                                           vector unsigned short __c) {
+static __inline__ vector short __ATTRS_o_ai vec_mladd(
+    vector short __a, vector unsigned short __b, vector unsigned short __c) {
   return __a * (vector short)__b + (vector short)__c;
 }
 
-static vector short __ATTRS_o_ai vec_mladd(vector unsigned short __a,
-                                           vector short __b, vector short __c) {
+static __inline__ vector short __ATTRS_o_ai vec_mladd(vector unsigned short __a,
+                                                      vector short __b,
+                                                      vector short __c) {
   return (vector short)__a * __b + __c;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_mladd(vector unsigned short __a,
-                                                    vector unsigned short __b,
-                                                    vector unsigned short __c) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_mladd(vector unsigned short __a, vector unsigned short __b,
+          vector unsigned short __c) {
   return __a * __b + __c;
 }
 
 /* vec_vmladduhm */
 
-static vector short __ATTRS_o_ai vec_vmladduhm(vector short __a,
-                                               vector short __b,
-                                               vector short __c) {
+static __inline__ vector short __ATTRS_o_ai vec_vmladduhm(vector short __a,
+                                                          vector short __b,
+                                                          vector short __c) {
   return __a * __b + __c;
 }
 
-static vector short __ATTRS_o_ai vec_vmladduhm(vector short __a,
-                                               vector unsigned short __b,
-                                               vector unsigned short __c) {
+static __inline__ vector short __ATTRS_o_ai vec_vmladduhm(
+    vector short __a, vector unsigned short __b, vector unsigned short __c) {
   return __a * (vector short)__b + (vector short)__c;
 }
 
-static vector short __ATTRS_o_ai vec_vmladduhm(vector unsigned short __a,
-                                               vector short __b,
-                                               vector short __c) {
+static __inline__ vector short __ATTRS_o_ai
+vec_vmladduhm(vector unsigned short __a, vector short __b, vector short __c) {
   return (vector short)__a * __b + __c;
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_vmladduhm(vector unsigned short __a, vector unsigned short __b,
               vector unsigned short __c) {
   return __a * __b + __c;
@@ -4063,53 +4176,54 @@
 
 /* vec_mradds */
 
-static vector short __attribute__((__always_inline__))
+static __inline__ vector short __attribute__((__always_inline__))
 vec_mradds(vector short __a, vector short __b, vector short __c) {
   return __builtin_altivec_vmhraddshs(__a, __b, __c);
 }
 
 /* vec_vmhraddshs */
 
-static vector short __attribute__((__always_inline__))
+static __inline__ vector short __attribute__((__always_inline__))
 vec_vmhraddshs(vector short __a, vector short __b, vector short __c) {
   return __builtin_altivec_vmhraddshs(__a, __b, __c);
 }
 
 /* vec_msum */
 
-static vector int __ATTRS_o_ai vec_msum(vector signed char __a,
-                                        vector unsigned char __b,
-                                        vector int __c) {
+static __inline__ vector int __ATTRS_o_ai vec_msum(vector signed char __a,
+                                                   vector unsigned char __b,
+                                                   vector int __c) {
   return __builtin_altivec_vmsummbm(__a, __b, __c);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_msum(vector unsigned char __a,
-                                                 vector unsigned char __b,
-                                                 vector unsigned int __c) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_msum(vector unsigned char __a, vector unsigned char __b,
+         vector unsigned int __c) {
   return __builtin_altivec_vmsumubm(__a, __b, __c);
 }
 
-static vector int __ATTRS_o_ai vec_msum(vector short __a, vector short __b,
-                                        vector int __c) {
+static __inline__ vector int __ATTRS_o_ai vec_msum(vector short __a,
+                                                   vector short __b,
+                                                   vector int __c) {
   return __builtin_altivec_vmsumshm(__a, __b, __c);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_msum(vector unsigned short __a,
-                                                 vector unsigned short __b,
-                                                 vector unsigned int __c) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_msum(vector unsigned short __a, vector unsigned short __b,
+         vector unsigned int __c) {
   return __builtin_altivec_vmsumuhm(__a, __b, __c);
 }
 
 /* vec_vmsummbm */
 
-static vector int __attribute__((__always_inline__))
+static __inline__ vector int __attribute__((__always_inline__))
 vec_vmsummbm(vector signed char __a, vector unsigned char __b, vector int __c) {
   return __builtin_altivec_vmsummbm(__a, __b, __c);
 }
 
 /* vec_vmsumubm */
 
-static vector unsigned int __attribute__((__always_inline__))
+static __inline__ vector unsigned int __attribute__((__always_inline__))
 vec_vmsumubm(vector unsigned char __a, vector unsigned char __b,
              vector unsigned int __c) {
   return __builtin_altivec_vmsumubm(__a, __b, __c);
@@ -4117,14 +4231,14 @@
 
 /* vec_vmsumshm */
 
-static vector int __attribute__((__always_inline__))
+static __inline__ vector int __attribute__((__always_inline__))
 vec_vmsumshm(vector short __a, vector short __b, vector int __c) {
   return __builtin_altivec_vmsumshm(__a, __b, __c);
 }
 
 /* vec_vmsumuhm */
 
-static vector unsigned int __attribute__((__always_inline__))
+static __inline__ vector unsigned int __attribute__((__always_inline__))
 vec_vmsumuhm(vector unsigned short __a, vector unsigned short __b,
              vector unsigned int __c) {
   return __builtin_altivec_vmsumuhm(__a, __b, __c);
@@ -4132,27 +4246,28 @@
 
 /* vec_msums */
 
-static vector int __ATTRS_o_ai vec_msums(vector short __a, vector short __b,
-                                         vector int __c) {
+static __inline__ vector int __ATTRS_o_ai vec_msums(vector short __a,
+                                                    vector short __b,
+                                                    vector int __c) {
   return __builtin_altivec_vmsumshs(__a, __b, __c);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_msums(vector unsigned short __a,
-                                                  vector unsigned short __b,
-                                                  vector unsigned int __c) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_msums(vector unsigned short __a, vector unsigned short __b,
+          vector unsigned int __c) {
   return __builtin_altivec_vmsumuhs(__a, __b, __c);
 }
 
 /* vec_vmsumshs */
 
-static vector int __attribute__((__always_inline__))
+static __inline__ vector int __attribute__((__always_inline__))
 vec_vmsumshs(vector short __a, vector short __b, vector int __c) {
   return __builtin_altivec_vmsumshs(__a, __b, __c);
 }
 
 /* vec_vmsumuhs */
 
-static vector unsigned int __attribute__((__always_inline__))
+static __inline__ vector unsigned int __attribute__((__always_inline__))
 vec_vmsumuhs(vector unsigned short __a, vector unsigned short __b,
              vector unsigned int __c) {
   return __builtin_altivec_vmsumuhs(__a, __b, __c);
@@ -4160,47 +4275,47 @@
 
 /* vec_mtvscr */
 
-static void __ATTRS_o_ai vec_mtvscr(vector signed char __a) {
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector signed char __a) {
   __builtin_altivec_mtvscr((vector int)__a);
 }
 
-static void __ATTRS_o_ai vec_mtvscr(vector unsigned char __a) {
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned char __a) {
   __builtin_altivec_mtvscr((vector int)__a);
 }
 
-static void __ATTRS_o_ai vec_mtvscr(vector bool char __a) {
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool char __a) {
   __builtin_altivec_mtvscr((vector int)__a);
 }
 
-static void __ATTRS_o_ai vec_mtvscr(vector short __a) {
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector short __a) {
   __builtin_altivec_mtvscr((vector int)__a);
 }
 
-static void __ATTRS_o_ai vec_mtvscr(vector unsigned short __a) {
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned short __a) {
   __builtin_altivec_mtvscr((vector int)__a);
 }
 
-static void __ATTRS_o_ai vec_mtvscr(vector bool short __a) {
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool short __a) {
   __builtin_altivec_mtvscr((vector int)__a);
 }
 
-static void __ATTRS_o_ai vec_mtvscr(vector pixel __a) {
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector pixel __a) {
   __builtin_altivec_mtvscr((vector int)__a);
 }
 
-static void __ATTRS_o_ai vec_mtvscr(vector int __a) {
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector int __a) {
   __builtin_altivec_mtvscr((vector int)__a);
 }
 
-static void __ATTRS_o_ai vec_mtvscr(vector unsigned int __a) {
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned int __a) {
   __builtin_altivec_mtvscr((vector int)__a);
 }
 
-static void __ATTRS_o_ai vec_mtvscr(vector bool int __a) {
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool int __a) {
   __builtin_altivec_mtvscr((vector int)__a);
 }
 
-static void __ATTRS_o_ai vec_mtvscr(vector float __a) {
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector float __a) {
   __builtin_altivec_mtvscr((vector int)__a);
 }
 
@@ -4210,55 +4325,56 @@
    elements separately, then truncating the results and moving to the
    result vector.
 */
-static vector signed char __ATTRS_o_ai vec_mul(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_mul(vector signed char __a, vector signed char __b) {
   return __a * __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_mul(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_mul(vector unsigned char __a, vector unsigned char __b) {
   return __a * __b;
 }
 
-static vector signed short __ATTRS_o_ai vec_mul(vector signed short __a,
-                                                vector signed short __b) {
+static __inline__ vector signed short __ATTRS_o_ai
+vec_mul(vector signed short __a, vector signed short __b) {
   return __a * __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_mul(vector unsigned short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_mul(vector unsigned short __a, vector unsigned short __b) {
   return __a * __b;
 }
 
-static vector signed int __ATTRS_o_ai vec_mul(vector signed int __a,
-                                              vector signed int __b) {
+static __inline__ vector signed int __ATTRS_o_ai
+vec_mul(vector signed int __a, vector signed int __b) {
   return __a * __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_mul(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mul(vector unsigned int __a, vector unsigned int __b) {
   return __a * __b;
 }
 
 #ifdef __VSX__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_mul(vector signed long long __a, vector signed long long __b) {
   return __a * __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_mul(vector unsigned long long __a, vector unsigned long long __b) {
   return __a * __b;
 }
 #endif
 
-static vector float __ATTRS_o_ai vec_mul(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_mul(vector float __a,
+                                                    vector float __b) {
   return __a * __b;
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai
-vec_mul(vector double __a, vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_mul(vector double __a,
+                                                     vector double __b) {
   return __a * __b;
 }
 #endif
@@ -4268,8 +4384,8 @@
 
 /* vec_mule */
 
-static vector short __ATTRS_o_ai vec_mule(vector signed char __a,
-                                          vector signed char __b) {
+static __inline__ vector short __ATTRS_o_ai vec_mule(vector signed char __a,
+                                                     vector signed char __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmulosb(__a, __b);
 #else
@@ -4277,8 +4393,8 @@
 #endif
 }
 
-static vector unsigned short __ATTRS_o_ai vec_mule(vector unsigned char __a,
-                                                   vector unsigned char __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_mule(vector unsigned char __a, vector unsigned char __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmuloub(__a, __b);
 #else
@@ -4286,7 +4402,8 @@
 #endif
 }
 
-static vector int __ATTRS_o_ai vec_mule(vector short __a, vector short __b) {
+static __inline__ vector int __ATTRS_o_ai vec_mule(vector short __a,
+                                                   vector short __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmulosh(__a, __b);
 #else
@@ -4294,8 +4411,8 @@
 #endif
 }
 
-static vector unsigned int __ATTRS_o_ai vec_mule(vector unsigned short __a,
-                                                 vector unsigned short __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mule(vector unsigned short __a, vector unsigned short __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmulouh(__a, __b);
 #else
@@ -4304,8 +4421,8 @@
 }
 
 #ifdef __POWER8_VECTOR__
-static vector signed long long __ATTRS_o_ai vec_mule(vector signed int __a,
-                                                     vector signed int __b) {
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mule(vector signed int __a, vector signed int __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmulosw(__a, __b);
 #else
@@ -4313,7 +4430,7 @@
 #endif
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_mule(vector unsigned int __a, vector unsigned int __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmulouw(__a, __b);
@@ -4325,7 +4442,7 @@
 
 /* vec_vmulesb */
 
-static vector short __attribute__((__always_inline__))
+static __inline__ vector short __attribute__((__always_inline__))
 vec_vmulesb(vector signed char __a, vector signed char __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmulosb(__a, __b);
@@ -4336,7 +4453,7 @@
 
 /* vec_vmuleub */
 
-static vector unsigned short __attribute__((__always_inline__))
+static __inline__ vector unsigned short __attribute__((__always_inline__))
 vec_vmuleub(vector unsigned char __a, vector unsigned char __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmuloub(__a, __b);
@@ -4347,7 +4464,7 @@
 
 /* vec_vmulesh */
 
-static vector int __attribute__((__always_inline__))
+static __inline__ vector int __attribute__((__always_inline__))
 vec_vmulesh(vector short __a, vector short __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmulosh(__a, __b);
@@ -4358,7 +4475,7 @@
 
 /* vec_vmuleuh */
 
-static vector unsigned int __attribute__((__always_inline__))
+static __inline__ vector unsigned int __attribute__((__always_inline__))
 vec_vmuleuh(vector unsigned short __a, vector unsigned short __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmulouh(__a, __b);
@@ -4369,8 +4486,8 @@
 
 /* vec_mulo */
 
-static vector short __ATTRS_o_ai vec_mulo(vector signed char __a,
-                                          vector signed char __b) {
+static __inline__ vector short __ATTRS_o_ai vec_mulo(vector signed char __a,
+                                                     vector signed char __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmulesb(__a, __b);
 #else
@@ -4378,8 +4495,8 @@
 #endif
 }
 
-static vector unsigned short __ATTRS_o_ai vec_mulo(vector unsigned char __a,
-                                                   vector unsigned char __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_mulo(vector unsigned char __a, vector unsigned char __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmuleub(__a, __b);
 #else
@@ -4387,7 +4504,8 @@
 #endif
 }
 
-static vector int __ATTRS_o_ai vec_mulo(vector short __a, vector short __b) {
+static __inline__ vector int __ATTRS_o_ai vec_mulo(vector short __a,
+                                                   vector short __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmulesh(__a, __b);
 #else
@@ -4395,8 +4513,8 @@
 #endif
 }
 
-static vector unsigned int __ATTRS_o_ai vec_mulo(vector unsigned short __a,
-                                                 vector unsigned short __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mulo(vector unsigned short __a, vector unsigned short __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmuleuh(__a, __b);
 #else
@@ -4405,8 +4523,8 @@
 }
 
 #ifdef __POWER8_VECTOR__
-static vector signed long long __ATTRS_o_ai vec_mulo(vector signed int __a,
-                                                     vector signed int __b) {
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mulo(vector signed int __a, vector signed int __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmulesw(__a, __b);
 #else
@@ -4414,7 +4532,7 @@
 #endif
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_mulo(vector unsigned int __a, vector unsigned int __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmuleuw(__a, __b);
@@ -4426,7 +4544,7 @@
 
 /* vec_vmulosb */
 
-static vector short __attribute__((__always_inline__))
+static __inline__ vector short __attribute__((__always_inline__))
 vec_vmulosb(vector signed char __a, vector signed char __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmulesb(__a, __b);
@@ -4437,7 +4555,7 @@
 
 /* vec_vmuloub */
 
-static vector unsigned short __attribute__((__always_inline__))
+static __inline__ vector unsigned short __attribute__((__always_inline__))
 vec_vmuloub(vector unsigned char __a, vector unsigned char __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmuleub(__a, __b);
@@ -4448,7 +4566,7 @@
 
 /* vec_vmulosh */
 
-static vector int __attribute__((__always_inline__))
+static __inline__ vector int __attribute__((__always_inline__))
 vec_vmulosh(vector short __a, vector short __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmulesh(__a, __b);
@@ -4459,7 +4577,7 @@
 
 /* vec_vmulouh */
 
-static vector unsigned int __attribute__((__always_inline__))
+static __inline__ vector unsigned int __attribute__((__always_inline__))
 vec_vmulouh(vector unsigned short __a, vector unsigned short __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vmuleuh(__a, __b);
@@ -4471,140 +4589,137 @@
 /*  vec_nand */
 
 #ifdef __POWER8_VECTOR__
-static vector signed char __ATTRS_o_ai vec_nand(vector signed char __a,
-                                                vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_nand(vector signed char __a, vector signed char __b) {
   return ~(__a & __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_nand(vector signed char __a,
-                                                vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_nand(vector signed char __a, vector bool char __b) {
   return ~(__a & __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_nand(vector bool char __a,
-                                                vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_nand(vector bool char __a, vector signed char __b) {
   return ~(__a & __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_nand(vector unsigned char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_nand(vector unsigned char __a, vector unsigned char __b) {
   return ~(__a & __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_nand(vector unsigned char __a,
-                                                  vector bool char __b) {
-  return ~(__a & __b);
-
-}
-
-static vector unsigned char __ATTRS_o_ai vec_nand(vector bool char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_nand(vector unsigned char __a, vector bool char __b) {
   return ~(__a & __b);
 }
 
-static vector bool char __ATTRS_o_ai vec_nand(vector bool char __a,
-                                              vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_nand(vector bool char __a, vector unsigned char __b) {
   return ~(__a & __b);
 }
 
-static vector signed short __ATTRS_o_ai vec_nand(vector signed short __a,
-                                                 vector signed short __b) {
+static __inline__ vector bool char __ATTRS_o_ai vec_nand(vector bool char __a,
+                                                         vector bool char __b) {
   return ~(__a & __b);
 }
 
-static vector signed short __ATTRS_o_ai vec_nand(vector signed short __a,
-                                                 vector bool short __b) {
+static __inline__ vector signed short __ATTRS_o_ai
+vec_nand(vector signed short __a, vector signed short __b) {
   return ~(__a & __b);
 }
 
-static vector signed short __ATTRS_o_ai vec_nand(vector bool short __a,
-                                                 vector signed short __b) {
+static __inline__ vector signed short __ATTRS_o_ai
+vec_nand(vector signed short __a, vector bool short __b) {
   return ~(__a & __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_nand(vector unsigned short __a,
-                                                   vector unsigned short __b) {
+static __inline__ vector signed short __ATTRS_o_ai
+vec_nand(vector bool short __a, vector signed short __b) {
   return ~(__a & __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_nand(vector unsigned short __a,
-                                                   vector bool short __b) {
-  return ~(__a & __b);
-
-}
-
-static vector bool short __ATTRS_o_ai vec_nand(vector bool short __a,
-                                               vector bool short __b) {
-  return ~(__a & __b);
-
-}
-
-static vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
-                                               vector signed int __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_nand(vector unsigned short __a, vector unsigned short __b) {
   return ~(__a & __b);
 }
 
-static vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
-                                               vector bool int __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_nand(vector unsigned short __a, vector bool short __b) {
   return ~(__a & __b);
 }
 
-static vector signed int __ATTRS_o_ai vec_nand(vector bool int __a,
-                                               vector signed int __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_nand(vector bool short __a, vector bool short __b) {
   return ~(__a & __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_nand(vector unsigned int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector signed int __ATTRS_o_ai
+vec_nand(vector signed int __a, vector signed int __b) {
   return ~(__a & __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_nand(vector unsigned int __a,
-                                                 vector bool int __b) {
+static __inline__ vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
+                                                          vector bool int __b) {
   return ~(__a & __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_nand(vector bool int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector signed int __ATTRS_o_ai
+vec_nand(vector bool int __a, vector signed int __b) {
   return ~(__a & __b);
 }
 
-static vector bool int __ATTRS_o_ai vec_nand(vector bool int __a,
-                                             vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_nand(vector unsigned int __a, vector unsigned int __b) {
   return ~(__a & __b);
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_nand(vector unsigned int __a, vector bool int __b) {
+  return ~(__a & __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_nand(vector bool int __a, vector unsigned int __b) {
+  return ~(__a & __b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_nand(vector bool int __a,
+                                                        vector bool int __b) {
+  return ~(__a & __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_nand(vector signed long long __a, vector signed long long __b) {
   return ~(__a & __b);
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_nand(vector signed long long __a, vector bool long long __b) {
   return ~(__a & __b);
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_nand(vector bool long long __a, vector signed long long __b) {
   return ~(__a & __b);
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_nand(vector unsigned long long __a, vector unsigned long long __b) {
   return ~(__a & __b);
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_nand(vector unsigned long long __a, vector bool long long __b) {
   return ~(__a & __b);
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_nand(vector bool long long __a, vector unsigned long long __b) {
   return ~(__a & __b);
 }
 
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_nand(vector bool long long __a, vector bool long long __b) {
   return ~(__a & __b);
 }
@@ -4614,21 +4729,24 @@
 /* vec_nmadd */
 
 #ifdef __VSX__
-static vector float __ATTRS_o_ai
-vec_nmadd(vector float __a, vector float __b, vector float __c) {
+static __inline__ vector float __ATTRS_o_ai vec_nmadd(vector float __a,
+                                                      vector float __b,
+                                                      vector float __c) {
   return __builtin_vsx_xvnmaddasp(__a, __b, __c);
 }
 
-static vector double __ATTRS_o_ai
-vec_nmadd(vector double __a, vector double __b, vector double __c) {
+static __inline__ vector double __ATTRS_o_ai vec_nmadd(vector double __a,
+                                                       vector double __b,
+                                                       vector double __c) {
   return __builtin_vsx_xvnmaddadp(__a, __b, __c);
 }
 #endif
 
 /* vec_nmsub */
 
-static vector float __ATTRS_o_ai
-vec_nmsub(vector float __a, vector float __b, vector float __c) {
+static __inline__ vector float __ATTRS_o_ai vec_nmsub(vector float __a,
+                                                      vector float __b,
+                                                      vector float __c) {
 #ifdef __VSX__
   return __builtin_vsx_xvnmsubasp(__a, __b, __c);
 #else
@@ -4637,15 +4755,16 @@
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai
-vec_nmsub(vector double __a, vector double __b, vector double __c) {
+static __inline__ vector double __ATTRS_o_ai vec_nmsub(vector double __a,
+                                                       vector double __b,
+                                                       vector double __c) {
   return __builtin_vsx_xvnmsubadp(__a, __b, __c);
 }
 #endif
 
 /* vec_vnmsubfp */
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_vnmsubfp(vector float __a, vector float __b, vector float __c) {
   return __builtin_altivec_vnmsubfp(__a, __b, __c);
 }
@@ -4654,58 +4773,61 @@
 
 #define __builtin_altivec_vnor vec_nor
 
-static vector signed char __ATTRS_o_ai vec_nor(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_nor(vector signed char __a, vector signed char __b) {
   return ~(__a | __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_nor(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_nor(vector unsigned char __a, vector unsigned char __b) {
   return ~(__a | __b);
 }
 
-static vector bool char __ATTRS_o_ai vec_nor(vector bool char __a,
-                                             vector bool char __b) {
+static __inline__ vector bool char __ATTRS_o_ai vec_nor(vector bool char __a,
+                                                        vector bool char __b) {
   return ~(__a | __b);
 }
 
-static vector short __ATTRS_o_ai vec_nor(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_nor(vector short __a,
+                                                    vector short __b) {
   return ~(__a | __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_nor(vector unsigned short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_nor(vector unsigned short __a, vector unsigned short __b) {
   return ~(__a | __b);
 }
 
-static vector bool short __ATTRS_o_ai vec_nor(vector bool short __a,
-                                              vector bool short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_nor(vector bool short __a, vector bool short __b) {
   return ~(__a | __b);
 }
 
-static vector int __ATTRS_o_ai vec_nor(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_nor(vector int __a,
+                                                  vector int __b) {
   return ~(__a | __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_nor(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_nor(vector unsigned int __a, vector unsigned int __b) {
   return ~(__a | __b);
 }
 
-static vector bool int __ATTRS_o_ai vec_nor(vector bool int __a,
-                                            vector bool int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_nor(vector bool int __a,
+                                                       vector bool int __b) {
   return ~(__a | __b);
 }
 
-static vector float __ATTRS_o_ai vec_nor(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_nor(vector float __a,
+                                                    vector float __b) {
   vector unsigned int __res =
       ~((vector unsigned int)__a | (vector unsigned int)__b);
   return (vector float)__res;
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai
-vec_nor(vector double __a, vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_nor(vector double __a,
+                                                     vector double __b) {
   vector unsigned long long __res =
       ~((vector unsigned long long)__a | (vector unsigned long long)__b);
   return (vector double)__res;
@@ -4714,68 +4836,71 @@
 
 /* vec_vnor */
 
-static vector signed char __ATTRS_o_ai vec_vnor(vector signed char __a,
-                                                vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vnor(vector signed char __a, vector signed char __b) {
   return ~(__a | __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vnor(vector unsigned char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vnor(vector unsigned char __a, vector unsigned char __b) {
   return ~(__a | __b);
 }
 
-static vector bool char __ATTRS_o_ai vec_vnor(vector bool char __a,
-                                              vector bool char __b) {
+static __inline__ vector bool char __ATTRS_o_ai vec_vnor(vector bool char __a,
+                                                         vector bool char __b) {
   return ~(__a | __b);
 }
 
-static vector short __ATTRS_o_ai vec_vnor(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vnor(vector short __a,
+                                                     vector short __b) {
   return ~(__a | __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vnor(vector unsigned short __a,
-                                                   vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vnor(vector unsigned short __a, vector unsigned short __b) {
   return ~(__a | __b);
 }
 
-static vector bool short __ATTRS_o_ai vec_vnor(vector bool short __a,
-                                               vector bool short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vnor(vector bool short __a, vector bool short __b) {
   return ~(__a | __b);
 }
 
-static vector int __ATTRS_o_ai vec_vnor(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vnor(vector int __a,
+                                                   vector int __b) {
   return ~(__a | __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vnor(vector unsigned int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vnor(vector unsigned int __a, vector unsigned int __b) {
   return ~(__a | __b);
 }
 
-static vector bool int __ATTRS_o_ai vec_vnor(vector bool int __a,
-                                             vector bool int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_vnor(vector bool int __a,
+                                                        vector bool int __b) {
   return ~(__a | __b);
 }
 
-static vector float __ATTRS_o_ai vec_vnor(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vnor(vector float __a,
+                                                     vector float __b) {
   vector unsigned int __res =
       ~((vector unsigned int)__a | (vector unsigned int)__b);
   return (vector float)__res;
 }
 
 #ifdef __VSX__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_nor(vector signed long long __a, vector signed long long __b) {
   return ~(__a | __b);
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_nor(vector unsigned long long __a, vector unsigned long long __b) {
   return ~(__a | __b);
 }
 
-static vector bool long long __ATTRS_o_ai vec_nor(vector bool long long __a,
-                                                  vector bool long long __b) {
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_nor(vector bool long long __a, vector bool long long __b) {
   return ~(__a | __b);
 }
 #endif
@@ -4784,315 +4909,323 @@
 
 #define __builtin_altivec_vor vec_or
 
-static vector signed char __ATTRS_o_ai vec_or(vector signed char __a,
-                                              vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_or(vector signed char __a, vector signed char __b) {
   return __a | __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_or(vector bool char __a,
-                                              vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_or(vector bool char __a, vector signed char __b) {
   return (vector signed char)__a | __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_or(vector signed char __a,
-                                              vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai vec_or(vector signed char __a,
+                                                         vector bool char __b) {
   return __a | (vector signed char)__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_or(vector unsigned char __a,
-                                                vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_or(vector unsigned char __a, vector unsigned char __b) {
   return __a | __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_or(vector bool char __a,
-                                                vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_or(vector bool char __a, vector unsigned char __b) {
   return (vector unsigned char)__a | __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_or(vector unsigned char __a,
-                                                vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_or(vector unsigned char __a, vector bool char __b) {
   return __a | (vector unsigned char)__b;
 }
 
-static vector bool char __ATTRS_o_ai vec_or(vector bool char __a,
-                                            vector bool char __b) {
+static __inline__ vector bool char __ATTRS_o_ai vec_or(vector bool char __a,
+                                                       vector bool char __b) {
   return __a | __b;
 }
 
-static vector short __ATTRS_o_ai vec_or(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_or(vector short __a,
+                                                   vector short __b) {
   return __a | __b;
 }
 
-static vector short __ATTRS_o_ai vec_or(vector bool short __a,
-                                        vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_or(vector bool short __a,
+                                                   vector short __b) {
   return (vector short)__a | __b;
 }
 
-static vector short __ATTRS_o_ai vec_or(vector short __a,
-                                        vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_or(vector short __a,
+                                                   vector bool short __b) {
   return __a | (vector short)__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_or(vector unsigned short __a,
-                                                 vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_or(vector unsigned short __a, vector unsigned short __b) {
   return __a | __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_or(vector bool short __a,
-                                                 vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_or(vector bool short __a, vector unsigned short __b) {
   return (vector unsigned short)__a | __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_or(vector unsigned short __a,
-                                                 vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_or(vector unsigned short __a, vector bool short __b) {
   return __a | (vector unsigned short)__b;
 }
 
-static vector bool short __ATTRS_o_ai vec_or(vector bool short __a,
-                                             vector bool short __b) {
+static __inline__ vector bool short __ATTRS_o_ai vec_or(vector bool short __a,
+                                                        vector bool short __b) {
   return __a | __b;
 }
 
-static vector int __ATTRS_o_ai vec_or(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_or(vector int __a,
+                                                 vector int __b) {
   return __a | __b;
 }
 
-static vector int __ATTRS_o_ai vec_or(vector bool int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_or(vector bool int __a,
+                                                 vector int __b) {
   return (vector int)__a | __b;
 }
 
-static vector int __ATTRS_o_ai vec_or(vector int __a, vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_or(vector int __a,
+                                                 vector bool int __b) {
   return __a | (vector int)__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_or(vector unsigned int __a,
-                                               vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_or(vector unsigned int __a, vector unsigned int __b) {
   return __a | __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_or(vector bool int __a,
-                                               vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_or(vector bool int __a, vector unsigned int __b) {
   return (vector unsigned int)__a | __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_or(vector unsigned int __a,
-                                               vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_or(vector unsigned int __a, vector bool int __b) {
   return __a | (vector unsigned int)__b;
 }
 
-static vector bool int __ATTRS_o_ai vec_or(vector bool int __a,
-                                           vector bool int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_or(vector bool int __a,
+                                                      vector bool int __b) {
   return __a | __b;
 }
 
-static vector float __ATTRS_o_ai vec_or(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_or(vector float __a,
+                                                   vector float __b) {
   vector unsigned int __res =
       (vector unsigned int)__a | (vector unsigned int)__b;
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_or(vector bool int __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_or(vector bool int __a,
+                                                   vector float __b) {
   vector unsigned int __res =
       (vector unsigned int)__a | (vector unsigned int)__b;
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_or(vector float __a, vector bool int __b) {
+static __inline__ vector float __ATTRS_o_ai vec_or(vector float __a,
+                                                   vector bool int __b) {
   vector unsigned int __res =
       (vector unsigned int)__a | (vector unsigned int)__b;
   return (vector float)__res;
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai vec_or(vector bool long long __a,
-                                         vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_or(vector bool long long __a,
+                                                    vector double __b) {
   return (vector unsigned long long)__a | (vector unsigned long long)__b;
 }
 
-static vector double __ATTRS_o_ai vec_or(vector double __a,
-                                         vector bool long long __b) {
+static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a,
+                                                    vector bool long long __b) {
   return (vector unsigned long long)__a | (vector unsigned long long)__b;
 }
 
-static vector double __ATTRS_o_ai vec_or(vector double __a, vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a,
+                                                    vector double __b) {
   vector unsigned long long __res =
       (vector unsigned long long)__a | (vector unsigned long long)__b;
   return (vector double)__res;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_or(vector signed long long __a, vector signed long long __b) {
   return __a | __b;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_or(vector bool long long __a, vector signed long long __b) {
   return (vector signed long long)__a | __b;
 }
 
-static vector signed long long __ATTRS_o_ai vec_or(vector signed long long __a,
-                                                   vector bool long long __b) {
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_or(vector signed long long __a, vector bool long long __b) {
   return __a | (vector signed long long)__b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_or(vector unsigned long long __a, vector unsigned long long __b) {
   return __a | __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_or(vector bool long long __a, vector unsigned long long __b) {
   return (vector unsigned long long)__a | __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_or(vector unsigned long long __a, vector bool long long __b) {
   return __a | (vector unsigned long long)__b;
 }
 
-static vector bool long long __ATTRS_o_ai vec_or(vector bool long long __a,
-                                                 vector bool long long __b) {
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_or(vector bool long long __a, vector bool long long __b) {
   return __a | __b;
 }
 #endif
 
 #ifdef __POWER8_VECTOR__
-static vector signed char __ATTRS_o_ai vec_orc(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_orc(vector signed char __a, vector signed char __b) {
   return __a | ~__b;
 }
 
-static vector signed char __ATTRS_o_ai vec_orc(vector signed char __a,
-                                               vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_orc(vector signed char __a, vector bool char __b) {
   return __a | ~__b;
 }
 
-static vector signed char __ATTRS_o_ai vec_orc(vector bool char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_orc(vector bool char __a, vector signed char __b) {
   return __a | ~__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_orc(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_orc(vector unsigned char __a, vector unsigned char __b) {
   return __a | ~__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_orc(vector unsigned char __a,
-                                                 vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_orc(vector unsigned char __a, vector bool char __b) {
   return __a | ~__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_orc(vector bool char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_orc(vector bool char __a, vector unsigned char __b) {
   return __a | ~__b;
 }
 
-static vector bool char __ATTRS_o_ai vec_orc(vector bool char __a,
-                                             vector bool char __b) {
+static __inline__ vector bool char __ATTRS_o_ai vec_orc(vector bool char __a,
+                                                        vector bool char __b) {
   return __a | ~__b;
 }
 
-static vector signed short __ATTRS_o_ai vec_orc(vector signed short __a,
-                                                vector signed short __b) {
+static __inline__ vector signed short __ATTRS_o_ai
+vec_orc(vector signed short __a, vector signed short __b) {
   return __a | ~__b;
 }
 
-static vector signed short __ATTRS_o_ai vec_orc(vector signed short __a,
-                                                vector bool short __b) {
+static __inline__ vector signed short __ATTRS_o_ai
+vec_orc(vector signed short __a, vector bool short __b) {
   return __a | ~__b;
 }
 
-static vector signed short __ATTRS_o_ai vec_orc(vector bool short __a,
-                                                vector signed short __b) {
+static __inline__ vector signed short __ATTRS_o_ai
+vec_orc(vector bool short __a, vector signed short __b) {
   return __a | ~__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_orc(vector unsigned short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_orc(vector unsigned short __a, vector unsigned short __b) {
   return __a | ~__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_orc(vector unsigned short __a,
-                                                  vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_orc(vector unsigned short __a, vector bool short __b) {
   return __a | ~__b;
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_orc(vector bool short __a, vector unsigned short __b) {
   return __a | ~__b;
 }
 
-static vector bool short __ATTRS_o_ai vec_orc(vector bool short __a,
-                                              vector bool short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_orc(vector bool short __a, vector bool short __b) {
   return __a | ~__b;
 }
 
-static vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
-                                              vector signed int __b) {
+static __inline__ vector signed int __ATTRS_o_ai
+vec_orc(vector signed int __a, vector signed int __b) {
   return __a | ~__b;
 }
 
-static vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
-                                              vector bool int __b) {
+static __inline__ vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
+                                                         vector bool int __b) {
   return __a | ~__b;
 }
 
-static vector signed int __ATTRS_o_ai vec_orc(vector bool int __a,
-                                              vector signed int __b) {
+static __inline__ vector signed int __ATTRS_o_ai
+vec_orc(vector bool int __a, vector signed int __b) {
   return __a | ~__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_orc(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_orc(vector unsigned int __a, vector unsigned int __b) {
   return __a | ~__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_orc(vector unsigned int __a,
-                                                vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_orc(vector unsigned int __a, vector bool int __b) {
   return __a | ~__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_orc(vector bool int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_orc(vector bool int __a, vector unsigned int __b) {
   return __a | ~__b;
 }
 
-static vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
-                                            vector bool int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
+                                                       vector bool int __b) {
   return __a | ~__b;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_orc(vector signed long long __a, vector signed long long __b) {
   return __a | ~__b;
 }
 
-static vector signed long long __ATTRS_o_ai vec_orc(vector signed long long __a,
-                                                    vector bool long long __b) {
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_orc(vector signed long long __a, vector bool long long __b) {
   return __a | ~__b;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_orc(vector bool long long __a, vector signed long long __b) {
   return __a | ~__b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_orc(vector unsigned long long __a, vector unsigned long long __b) {
   return __a | ~__b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_orc(vector unsigned long long __a, vector bool long long __b) {
   return __a | ~__b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_orc(vector bool long long __a, vector unsigned long long __b) {
   return __a | ~__b;
 }
 
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_orc(vector bool long long __a, vector bool long long __b) {
   return __a | ~__b;
 }
@@ -5100,160 +5233,165 @@
 
 /* vec_vor */
 
-static vector signed char __ATTRS_o_ai vec_vor(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vor(vector signed char __a, vector signed char __b) {
   return __a | __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_vor(vector bool char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vor(vector bool char __a, vector signed char __b) {
   return (vector signed char)__a | __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_vor(vector signed char __a,
-                                               vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vor(vector signed char __a, vector bool char __b) {
   return __a | (vector signed char)__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vor(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vor(vector unsigned char __a, vector unsigned char __b) {
   return __a | __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vor(vector bool char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vor(vector bool char __a, vector unsigned char __b) {
   return (vector unsigned char)__a | __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vor(vector unsigned char __a,
-                                                 vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vor(vector unsigned char __a, vector bool char __b) {
   return __a | (vector unsigned char)__b;
 }
 
-static vector bool char __ATTRS_o_ai vec_vor(vector bool char __a,
-                                             vector bool char __b) {
+static __inline__ vector bool char __ATTRS_o_ai vec_vor(vector bool char __a,
+                                                        vector bool char __b) {
   return __a | __b;
 }
 
-static vector short __ATTRS_o_ai vec_vor(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vor(vector short __a,
+                                                    vector short __b) {
   return __a | __b;
 }
 
-static vector short __ATTRS_o_ai vec_vor(vector bool short __a,
-                                         vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vor(vector bool short __a,
+                                                    vector short __b) {
   return (vector short)__a | __b;
 }
 
-static vector short __ATTRS_o_ai vec_vor(vector short __a,
-                                         vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vor(vector short __a,
+                                                    vector bool short __b) {
   return __a | (vector short)__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vor(vector unsigned short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vor(vector unsigned short __a, vector unsigned short __b) {
   return __a | __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vor(vector bool short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vor(vector bool short __a, vector unsigned short __b) {
   return (vector unsigned short)__a | __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vor(vector unsigned short __a,
-                                                  vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vor(vector unsigned short __a, vector bool short __b) {
   return __a | (vector unsigned short)__b;
 }
 
-static vector bool short __ATTRS_o_ai vec_vor(vector bool short __a,
-                                              vector bool short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vor(vector bool short __a, vector bool short __b) {
   return __a | __b;
 }
 
-static vector int __ATTRS_o_ai vec_vor(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vor(vector int __a,
+                                                  vector int __b) {
   return __a | __b;
 }
 
-static vector int __ATTRS_o_ai vec_vor(vector bool int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vor(vector bool int __a,
+                                                  vector int __b) {
   return (vector int)__a | __b;
 }
 
-static vector int __ATTRS_o_ai vec_vor(vector int __a, vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vor(vector int __a,
+                                                  vector bool int __b) {
   return __a | (vector int)__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vor(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vor(vector unsigned int __a, vector unsigned int __b) {
   return __a | __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vor(vector bool int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vor(vector bool int __a, vector unsigned int __b) {
   return (vector unsigned int)__a | __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vor(vector unsigned int __a,
-                                                vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vor(vector unsigned int __a, vector bool int __b) {
   return __a | (vector unsigned int)__b;
 }
 
-static vector bool int __ATTRS_o_ai vec_vor(vector bool int __a,
-                                            vector bool int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_vor(vector bool int __a,
+                                                       vector bool int __b) {
   return __a | __b;
 }
 
-static vector float __ATTRS_o_ai vec_vor(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vor(vector float __a,
+                                                    vector float __b) {
   vector unsigned int __res =
       (vector unsigned int)__a | (vector unsigned int)__b;
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_vor(vector bool int __a,
-                                         vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vor(vector bool int __a,
+                                                    vector float __b) {
   vector unsigned int __res =
       (vector unsigned int)__a | (vector unsigned int)__b;
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_vor(vector float __a,
-                                         vector bool int __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vor(vector float __a,
+                                                    vector bool int __b) {
   vector unsigned int __res =
       (vector unsigned int)__a | (vector unsigned int)__b;
   return (vector float)__res;
 }
 
 #ifdef __VSX__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_vor(vector signed long long __a, vector signed long long __b) {
   return __a | __b;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_vor(vector bool long long __a, vector signed long long __b) {
   return (vector signed long long)__a | __b;
 }
 
-static vector signed long long __ATTRS_o_ai vec_vor(vector signed long long __a,
-                                                    vector bool long long __b) {
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_vor(vector signed long long __a, vector bool long long __b) {
   return __a | (vector signed long long)__b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_vor(vector unsigned long long __a, vector unsigned long long __b) {
   return __a | __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_vor(vector bool long long __a, vector unsigned long long __b) {
   return (vector unsigned long long)__a | __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_vor(vector unsigned long long __a, vector bool long long __b) {
   return __a | (vector unsigned long long)__b;
 }
 
-static vector bool long long __ATTRS_o_ai vec_vor(vector bool long long __a,
-                                                  vector bool long long __b) {
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_vor(vector bool long long __a, vector bool long long __b) {
   return __a | __b;
 }
 #endif
@@ -5263,8 +5401,8 @@
 /* The various vector pack instructions have a big-endian bias, so for
    little endian we must handle reversed element numbering.  */
 
-static vector signed char __ATTRS_o_ai vec_pack(vector signed short __a,
-                                                vector signed short __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_pack(vector signed short __a, vector signed short __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector signed char)vec_perm(
       __a, __b,
@@ -5278,8 +5416,8 @@
 #endif
 }
 
-static vector unsigned char __ATTRS_o_ai vec_pack(vector unsigned short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_pack(vector unsigned short __a, vector unsigned short __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector unsigned char)vec_perm(
       __a, __b,
@@ -5293,8 +5431,8 @@
 #endif
 }
 
-static vector bool char __ATTRS_o_ai vec_pack(vector bool short __a,
-                                              vector bool short __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_pack(vector bool short __a, vector bool short __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool char)vec_perm(
       __a, __b,
@@ -5308,7 +5446,8 @@
 #endif
 }
 
-static vector short __ATTRS_o_ai vec_pack(vector int __a, vector int __b) {
+static __inline__ vector short __ATTRS_o_ai vec_pack(vector int __a,
+                                                     vector int __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector short)vec_perm(
       __a, __b,
@@ -5322,8 +5461,8 @@
 #endif
 }
 
-static vector unsigned short __ATTRS_o_ai vec_pack(vector unsigned int __a,
-                                                   vector unsigned int __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_pack(vector unsigned int __a, vector unsigned int __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector unsigned short)vec_perm(
       __a, __b,
@@ -5337,8 +5476,8 @@
 #endif
 }
 
-static vector bool short __ATTRS_o_ai vec_pack(vector bool int __a,
-                                               vector bool int __b) {
+static __inline__ vector bool short __ATTRS_o_ai vec_pack(vector bool int __a,
+                                                          vector bool int __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool short)vec_perm(
       __a, __b,
@@ -5353,8 +5492,8 @@
 }
 
 #ifdef __VSX__
-static vector signed int __ATTRS_o_ai vec_pack(vector signed long long __a,
-                                               vector signed long long __b) {
+static __inline__ vector signed int __ATTRS_o_ai
+vec_pack(vector signed long long __a, vector signed long long __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector signed int)vec_perm(
       __a, __b,
@@ -5367,7 +5506,7 @@
                              0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
 #endif
 }
-static vector unsigned int __ATTRS_o_ai
+static __inline__ vector unsigned int __ATTRS_o_ai
 vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector unsigned int)vec_perm(
@@ -5382,8 +5521,8 @@
 #endif
 }
 
-static vector bool int __ATTRS_o_ai vec_pack(vector bool long long __a,
-                                             vector bool long long __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_pack(vector bool long long __a, vector bool long long __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool int)vec_perm(
       __a, __b,
@@ -5403,8 +5542,8 @@
 
 #define __builtin_altivec_vpkuhum vec_vpkuhum
 
-static vector signed char __ATTRS_o_ai vec_vpkuhum(vector signed short __a,
-                                                   vector signed short __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vpkuhum(vector signed short __a, vector signed short __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector signed char)vec_perm(
       __a, __b,
@@ -5418,7 +5557,7 @@
 #endif
 }
 
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
 vec_vpkuhum(vector unsigned short __a, vector unsigned short __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector unsigned char)vec_perm(
@@ -5433,8 +5572,8 @@
 #endif
 }
 
-static vector bool char __ATTRS_o_ai vec_vpkuhum(vector bool short __a,
-                                                 vector bool short __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vpkuhum(vector bool short __a, vector bool short __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool char)vec_perm(
       __a, __b,
@@ -5452,7 +5591,8 @@
 
 #define __builtin_altivec_vpkuwum vec_vpkuwum
 
-static vector short __ATTRS_o_ai vec_vpkuwum(vector int __a, vector int __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vpkuwum(vector int __a,
+                                                        vector int __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector short)vec_perm(
       __a, __b,
@@ -5466,8 +5606,8 @@
 #endif
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vpkuwum(vector unsigned int __a,
-                                                      vector unsigned int __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vpkuwum(vector unsigned int __a, vector unsigned int __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector unsigned short)vec_perm(
       __a, __b,
@@ -5481,8 +5621,8 @@
 #endif
 }
 
-static vector bool short __ATTRS_o_ai vec_vpkuwum(vector bool int __a,
-                                                  vector bool int __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vpkuwum(vector bool int __a, vector bool int __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool short)vec_perm(
       __a, __b,
@@ -5501,8 +5641,8 @@
 #ifdef __POWER8_VECTOR__
 #define __builtin_altivec_vpkudum vec_vpkudum
 
-static vector int __ATTRS_o_ai vec_vpkudum(vector long long __a,
-                                           vector long long __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vpkudum(vector long long __a,
+                                                      vector long long __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector int)vec_perm(
       __a, __b,
@@ -5516,7 +5656,7 @@
 #endif
 }
 
-static vector unsigned int __ATTRS_o_ai
+static __inline__ vector unsigned int __ATTRS_o_ai
 vec_vpkudum(vector unsigned long long __a, vector unsigned long long __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector unsigned int)vec_perm(
@@ -5531,8 +5671,8 @@
 #endif
 }
 
-static vector bool int __ATTRS_o_ai vec_vpkudum(vector bool long long __a,
-                                                vector bool long long __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vpkudum(vector bool long long __a, vector bool long long __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool int)vec_perm(
       (vector long long)__a, (vector long long)__b,
@@ -5549,7 +5689,7 @@
 
 /* vec_packpx */
 
-static vector pixel __attribute__((__always_inline__))
+static __inline__ vector pixel __attribute__((__always_inline__))
 vec_packpx(vector unsigned int __a, vector unsigned int __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
@@ -5560,7 +5700,7 @@
 
 /* vec_vpkpx */
 
-static vector pixel __attribute__((__always_inline__))
+static __inline__ vector pixel __attribute__((__always_inline__))
 vec_vpkpx(vector unsigned int __a, vector unsigned int __b) {
 #ifdef __LITTLE_ENDIAN__
   return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
@@ -5571,8 +5711,8 @@
 
 /* vec_packs */
 
-static vector signed char __ATTRS_o_ai vec_packs(vector short __a,
-                                                 vector short __b) {
+static __inline__ vector signed char __ATTRS_o_ai vec_packs(vector short __a,
+                                                            vector short __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkshss(__b, __a);
 #else
@@ -5580,8 +5720,8 @@
 #endif
 }
 
-static vector unsigned char __ATTRS_o_ai vec_packs(vector unsigned short __a,
-                                                   vector unsigned short __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_packs(vector unsigned short __a, vector unsigned short __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkuhus(__b, __a);
 #else
@@ -5589,8 +5729,8 @@
 #endif
 }
 
-static vector signed short __ATTRS_o_ai vec_packs(vector int __a,
-                                                  vector int __b) {
+static __inline__ vector signed short __ATTRS_o_ai vec_packs(vector int __a,
+                                                             vector int __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkswss(__b, __a);
 #else
@@ -5598,8 +5738,8 @@
 #endif
 }
 
-static vector unsigned short __ATTRS_o_ai vec_packs(vector unsigned int __a,
-                                                    vector unsigned int __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_packs(vector unsigned int __a, vector unsigned int __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkuwus(__b, __a);
 #else
@@ -5608,8 +5748,8 @@
 }
 
 #ifdef __POWER8_VECTOR__
-static vector int __ATTRS_o_ai vec_packs(vector long long __a,
-                                         vector long long __b) {
+static __inline__ vector int __ATTRS_o_ai vec_packs(vector long long __a,
+                                                    vector long long __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpksdss(__b, __a);
 #else
@@ -5617,7 +5757,7 @@
 #endif
 }
 
-static vector unsigned int __ATTRS_o_ai
+static __inline__ vector unsigned int __ATTRS_o_ai
 vec_packs(vector unsigned long long __a, vector unsigned long long __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkudus(__b, __a);
@@ -5629,7 +5769,7 @@
 
 /* vec_vpkshss */
 
-static vector signed char __attribute__((__always_inline__))
+static __inline__ vector signed char __attribute__((__always_inline__))
 vec_vpkshss(vector short __a, vector short __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkshss(__b, __a);
@@ -5641,8 +5781,8 @@
 /* vec_vpksdss */
 
 #ifdef __POWER8_VECTOR__
-static vector int __ATTRS_o_ai vec_vpksdss(vector long long __a,
-                                           vector long long __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vpksdss(vector long long __a,
+                                                      vector long long __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpksdss(__b, __a);
 #else
@@ -5653,7 +5793,7 @@
 
 /* vec_vpkuhus */
 
-static vector unsigned char __attribute__((__always_inline__))
+static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_vpkuhus(vector unsigned short __a, vector unsigned short __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkuhus(__b, __a);
@@ -5665,7 +5805,7 @@
 /* vec_vpkudus */
 
 #ifdef __POWER8_VECTOR__
-static vector unsigned int __attribute__((__always_inline__))
+static __inline__ vector unsigned int __attribute__((__always_inline__))
 vec_vpkudus(vector unsigned long long __a, vector unsigned long long __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkudus(__b, __a);
@@ -5677,7 +5817,7 @@
 
 /* vec_vpkswss */
 
-static vector signed short __attribute__((__always_inline__))
+static __inline__ vector signed short __attribute__((__always_inline__))
 vec_vpkswss(vector int __a, vector int __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkswss(__b, __a);
@@ -5688,7 +5828,7 @@
 
 /* vec_vpkuwus */
 
-static vector unsigned short __attribute__((__always_inline__))
+static __inline__ vector unsigned short __attribute__((__always_inline__))
 vec_vpkuwus(vector unsigned int __a, vector unsigned int __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkuwus(__b, __a);
@@ -5699,8 +5839,8 @@
 
 /* vec_packsu */
 
-static vector unsigned char __ATTRS_o_ai vec_packsu(vector short __a,
-                                                    vector short __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_packsu(vector short __a, vector short __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkshus(__b, __a);
 #else
@@ -5708,8 +5848,8 @@
 #endif
 }
 
-static vector unsigned char __ATTRS_o_ai vec_packsu(vector unsigned short __a,
-                                                    vector unsigned short __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_packsu(vector unsigned short __a, vector unsigned short __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkuhus(__b, __a);
 #else
@@ -5717,8 +5857,8 @@
 #endif
 }
 
-static vector unsigned short __ATTRS_o_ai vec_packsu(vector int __a,
-                                                     vector int __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_packsu(vector int __a, vector int __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkswus(__b, __a);
 #else
@@ -5726,8 +5866,8 @@
 #endif
 }
 
-static vector unsigned short __ATTRS_o_ai vec_packsu(vector unsigned int __a,
-                                                     vector unsigned int __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_packsu(vector unsigned int __a, vector unsigned int __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkuwus(__b, __a);
 #else
@@ -5736,8 +5876,8 @@
 }
 
 #ifdef __POWER8_VECTOR__
-static vector unsigned int __ATTRS_o_ai vec_packsu(vector long long __a,
-                                                   vector long long __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_packsu(vector long long __a, vector long long __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpksdus(__b, __a);
 #else
@@ -5745,7 +5885,7 @@
 #endif
 }
 
-static vector unsigned int __ATTRS_o_ai
+static __inline__ vector unsigned int __ATTRS_o_ai
 vec_packsu(vector unsigned long long __a, vector unsigned long long __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkudus(__b, __a);
@@ -5757,8 +5897,8 @@
 
 /* vec_vpkshus */
 
-static vector unsigned char __ATTRS_o_ai vec_vpkshus(vector short __a,
-                                                     vector short __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vpkshus(vector short __a, vector short __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkshus(__b, __a);
 #else
@@ -5766,7 +5906,7 @@
 #endif
 }
 
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
 vec_vpkshus(vector unsigned short __a, vector unsigned short __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkuhus(__b, __a);
@@ -5777,8 +5917,8 @@
 
 /* vec_vpkswus */
 
-static vector unsigned short __ATTRS_o_ai vec_vpkswus(vector int __a,
-                                                      vector int __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vpkswus(vector int __a, vector int __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkswus(__b, __a);
 #else
@@ -5786,8 +5926,8 @@
 #endif
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vpkswus(vector unsigned int __a,
-                                                      vector unsigned int __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vpkswus(vector unsigned int __a, vector unsigned int __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpkuwus(__b, __a);
 #else
@@ -5798,8 +5938,8 @@
 /* vec_vpksdus */
 
 #ifdef __POWER8_VECTOR__
-static vector unsigned int __ATTRS_o_ai vec_vpksdus(vector long long __a,
-                                                    vector long long __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vpksdus(vector long long __a, vector long long __b) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vpksdus(__b, __a);
 #else
@@ -5818,9 +5958,8 @@
 // in that the vec_xor can be recognized as a vec_nor (and for P8 and
 // later, possibly a vec_nand).
 
-static vector signed char __ATTRS_o_ai vec_perm(vector signed char __a,
-                                                vector signed char __b,
-                                                vector unsigned char __c) {
+static __inline__ vector signed char __ATTRS_o_ai vec_perm(
+    vector signed char __a, vector signed char __b, vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
   vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
                               255, 255, 255, 255, 255, 255, 255, 255};
@@ -5833,9 +5972,9 @@
 #endif
 }
 
-static vector unsigned char __ATTRS_o_ai vec_perm(vector unsigned char __a,
-                                                  vector unsigned char __b,
-                                                  vector unsigned char __c) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_perm(vector unsigned char __a, vector unsigned char __b,
+         vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
   vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
                               255, 255, 255, 255, 255, 255, 255, 255};
@@ -5848,9 +5987,8 @@
 #endif
 }
 
-static vector bool char __ATTRS_o_ai vec_perm(vector bool char __a,
-                                              vector bool char __b,
-                                              vector unsigned char __c) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
   vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
                               255, 255, 255, 255, 255, 255, 255, 255};
@@ -5863,9 +6001,9 @@
 #endif
 }
 
-static vector short __ATTRS_o_ai vec_perm(vector signed short __a,
-                                          vector signed short __b,
-                                          vector unsigned char __c) {
+static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a,
+                                                     vector signed short __b,
+                                                     vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
   vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
                               255, 255, 255, 255, 255, 255, 255, 255};
@@ -5878,9 +6016,9 @@
 #endif
 }
 
-static vector unsigned short __ATTRS_o_ai vec_perm(vector unsigned short __a,
-                                                   vector unsigned short __b,
-                                                   vector unsigned char __c) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_perm(vector unsigned short __a, vector unsigned short __b,
+         vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
   vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
                               255, 255, 255, 255, 255, 255, 255, 255};
@@ -5893,9 +6031,8 @@
 #endif
 }
 
-static vector bool short __ATTRS_o_ai vec_perm(vector bool short __a,
-                                               vector bool short __b,
-                                               vector unsigned char __c) {
+static __inline__ vector bool short __ATTRS_o_ai vec_perm(
+    vector bool short __a, vector bool short __b, vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
   vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
                               255, 255, 255, 255, 255, 255, 255, 255};
@@ -5908,8 +6045,9 @@
 #endif
 }
 
-static vector pixel __ATTRS_o_ai vec_perm(vector pixel __a, vector pixel __b,
-                                          vector unsigned char __c) {
+static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a,
+                                                     vector pixel __b,
+                                                     vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
   vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
                               255, 255, 255, 255, 255, 255, 255, 255};
@@ -5922,9 +6060,9 @@
 #endif
 }
 
-static vector int __ATTRS_o_ai vec_perm(vector signed int __a,
-                                        vector signed int __b,
-                                        vector unsigned char __c) {
+static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a,
+                                                   vector signed int __b,
+                                                   vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
   vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
                               255, 255, 255, 255, 255, 255, 255, 255};
@@ -5935,9 +6073,9 @@
 #endif
 }
 
-static vector unsigned int __ATTRS_o_ai vec_perm(vector unsigned int __a,
-                                                 vector unsigned int __b,
-                                                 vector unsigned char __c) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_perm(vector unsigned int __a, vector unsigned int __b,
+         vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
   vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
                               255, 255, 255, 255, 255, 255, 255, 255};
@@ -5950,9 +6088,8 @@
 #endif
 }
 
-static vector bool int __ATTRS_o_ai vec_perm(vector bool int __a,
-                                             vector bool int __b,
-                                             vector unsigned char __c) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
   vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
                               255, 255, 255, 255, 255, 255, 255, 255};
@@ -5965,8 +6102,9 @@
 #endif
 }
 
-static vector float __ATTRS_o_ai vec_perm(vector float __a, vector float __b,
-                                          vector unsigned char __c) {
+static __inline__ vector float __ATTRS_o_ai vec_perm(vector float __a,
+                                                     vector float __b,
+                                                     vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
   vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
                               255, 255, 255, 255, 255, 255, 255, 255};
@@ -5980,9 +6118,9 @@
 }
 
 #ifdef __VSX__
-static vector long long __ATTRS_o_ai vec_perm(vector signed long long __a,
-                                              vector signed long long __b,
-                                              vector unsigned char __c) {
+static __inline__ vector long long __ATTRS_o_ai
+vec_perm(vector signed long long __a, vector signed long long __b,
+         vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
   vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
                               255, 255, 255, 255, 255, 255, 255, 255};
@@ -5995,7 +6133,7 @@
 #endif
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_perm(vector unsigned long long __a, vector unsigned long long __b,
          vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
@@ -6010,7 +6148,7 @@
 #endif
 }
 
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_perm(vector bool long long __a, vector bool long long __b,
          vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
@@ -6025,8 +6163,8 @@
 #endif
 }
 
-static vector double __ATTRS_o_ai vec_perm(vector double __a, vector double __b,
-                                           vector unsigned char __c) {
+static __inline__ vector double __ATTRS_o_ai
+vec_perm(vector double __a, vector double __b, vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
   vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
                               255, 255, 255, 255, 255, 255, 255, 255};
@@ -6042,92 +6180,86 @@
 
 /* vec_vperm */
 
-static vector signed char __ATTRS_o_ai vec_vperm(vector signed char __a,
-                                                 vector signed char __b,
-                                                 vector unsigned char __c) {
+static __inline__ vector signed char __ATTRS_o_ai vec_vperm(
+    vector signed char __a, vector signed char __b, vector unsigned char __c) {
   return vec_perm(__a, __b, __c);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vperm(vector unsigned char __a,
-                                                   vector unsigned char __b,
-                                                   vector unsigned char __c) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vperm(vector unsigned char __a, vector unsigned char __b,
+          vector unsigned char __c) {
   return vec_perm(__a, __b, __c);
 }
 
-static vector bool char __ATTRS_o_ai vec_vperm(vector bool char __a,
-                                               vector bool char __b,
-                                               vector unsigned char __c) {
+static __inline__ vector bool char __ATTRS_o_ai vec_vperm(
+    vector bool char __a, vector bool char __b, vector unsigned char __c) {
   return vec_perm(__a, __b, __c);
 }
 
-static vector short __ATTRS_o_ai vec_vperm(vector short __a, vector short __b,
-                                           vector unsigned char __c) {
+static __inline__ vector short __ATTRS_o_ai
+vec_vperm(vector short __a, vector short __b, vector unsigned char __c) {
   return vec_perm(__a, __b, __c);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vperm(vector unsigned short __a,
-                                                    vector unsigned short __b,
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vperm(vector unsigned short __a, vector unsigned short __b,
+          vector unsigned char __c) {
+  return vec_perm(__a, __b, __c);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai vec_vperm(
+    vector bool short __a, vector bool short __b, vector unsigned char __c) {
+  return vec_perm(__a, __b, __c);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai
+vec_vperm(vector pixel __a, vector pixel __b, vector unsigned char __c) {
+  return vec_perm(__a, __b, __c);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vperm(vector int __a,
+                                                    vector int __b,
                                                     vector unsigned char __c) {
   return vec_perm(__a, __b, __c);
 }
 
-static vector bool short __ATTRS_o_ai vec_vperm(vector bool short __a,
-                                                vector bool short __b,
-                                                vector unsigned char __c) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vperm(vector unsigned int __a, vector unsigned int __b,
+          vector unsigned char __c) {
   return vec_perm(__a, __b, __c);
 }
 
-static vector pixel __ATTRS_o_ai vec_vperm(vector pixel __a, vector pixel __b,
-                                           vector unsigned char __c) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vperm(vector bool int __a, vector bool int __b, vector unsigned char __c) {
   return vec_perm(__a, __b, __c);
 }
 
-static vector int __ATTRS_o_ai vec_vperm(vector int __a, vector int __b,
-                                         vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static vector unsigned int __ATTRS_o_ai vec_vperm(vector unsigned int __a,
-                                                  vector unsigned int __b,
-                                                  vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static vector bool int __ATTRS_o_ai vec_vperm(vector bool int __a,
-                                              vector bool int __b,
-                                              vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static vector float __ATTRS_o_ai vec_vperm(vector float __a, vector float __b,
-                                           vector unsigned char __c) {
+static __inline__ vector float __ATTRS_o_ai
+vec_vperm(vector float __a, vector float __b, vector unsigned char __c) {
   return vec_perm(__a, __b, __c);
 }
 
 #ifdef __VSX__
-static vector long long __ATTRS_o_ai vec_vperm(vector long long __a,
-                                               vector long long __b,
-                                               vector unsigned char __c) {
+static __inline__ vector long long __ATTRS_o_ai vec_vperm(
+    vector long long __a, vector long long __b, vector unsigned char __c) {
   return vec_perm(__a, __b, __c);
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_vperm(vector unsigned long long __a, vector unsigned long long __b,
           vector unsigned char __c) {
   return vec_perm(__a, __b, __c);
 }
 
-static vector double __ATTRS_o_ai vec_vperm(vector double __a,
-                                            vector double __b,
-                                            vector unsigned char __c) {
+static __inline__ vector double __ATTRS_o_ai
+vec_vperm(vector double __a, vector double __b, vector unsigned char __c) {
   return vec_perm(__a, __b, __c);
 }
 #endif
 
 /* vec_re */
 
-static vector float __ATTRS_o_ai
-vec_re(vector float __a) {
+static __inline__ vector float __ATTRS_o_ai vec_re(vector float __a) {
 #ifdef __VSX__
   return __builtin_vsx_xvresp(__a);
 #else
@@ -6136,56 +6268,57 @@
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai vec_re(vector double __a) {
+static __inline__ vector double __ATTRS_o_ai vec_re(vector double __a) {
   return __builtin_vsx_xvredp(__a);
 }
 #endif
 
 /* vec_vrefp */
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_vrefp(vector float __a) {
   return __builtin_altivec_vrefp(__a);
 }
 
 /* vec_rl */
 
-static vector signed char __ATTRS_o_ai vec_rl(vector signed char __a,
-                                              vector unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_rl(vector signed char __a, vector unsigned char __b) {
   return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_rl(vector unsigned char __a,
-                                                vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_rl(vector unsigned char __a, vector unsigned char __b) {
   return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_rl(vector short __a,
-                                        vector unsigned short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_rl(vector short __a,
+                                                   vector unsigned short __b) {
   return __builtin_altivec_vrlh(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_rl(vector unsigned short __a,
-                                                 vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_rl(vector unsigned short __a, vector unsigned short __b) {
   return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_rl(vector int __a, vector unsigned int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_rl(vector int __a,
+                                                 vector unsigned int __b) {
   return __builtin_altivec_vrlw(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_rl(vector unsigned int __a,
-                                               vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_rl(vector unsigned int __a, vector unsigned int __b) {
   return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b);
 }
 
 #ifdef __POWER8_VECTOR__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_rl(vector signed long long __a, vector unsigned long long __b) {
   return __builtin_altivec_vrld(__a, __b);
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
   return __builtin_altivec_vrld(__a, __b);
 }
@@ -6193,43 +6326,43 @@
 
 /* vec_vrlb */
 
-static vector signed char __ATTRS_o_ai vec_vrlb(vector signed char __a,
-                                                vector unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vrlb(vector signed char __a, vector unsigned char __b) {
   return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vrlb(vector unsigned char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vrlb(vector unsigned char __a, vector unsigned char __b) {
   return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b);
 }
 
 /* vec_vrlh */
 
-static vector short __ATTRS_o_ai vec_vrlh(vector short __a,
-                                          vector unsigned short __b) {
+static __inline__ vector short __ATTRS_o_ai
+vec_vrlh(vector short __a, vector unsigned short __b) {
   return __builtin_altivec_vrlh(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vrlh(vector unsigned short __a,
-                                                   vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vrlh(vector unsigned short __a, vector unsigned short __b) {
   return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b);
 }
 
 /* vec_vrlw */
 
-static vector int __ATTRS_o_ai vec_vrlw(vector int __a,
-                                        vector unsigned int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vrlw(vector int __a,
+                                                   vector unsigned int __b) {
   return __builtin_altivec_vrlw(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vrlw(vector unsigned int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vrlw(vector unsigned int __a, vector unsigned int __b) {
   return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b);
 }
 
 /* vec_round */
 
-static vector float __ATTRS_o_ai vec_round(vector float __a) {
+static __inline__ vector float __ATTRS_o_ai vec_round(vector float __a) {
 #ifdef __VSX__
   return __builtin_vsx_xvrspi(__a);
 #else
@@ -6238,36 +6371,34 @@
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai vec_round(vector double __a) {
+static __inline__ vector double __ATTRS_o_ai vec_round(vector double __a) {
   return __builtin_vsx_xvrdpi(__a);
 }
 
 /* vec_rint */
 
-static vector float __ATTRS_o_ai
-vec_rint(vector float __a) {
+static __inline__ vector float __ATTRS_o_ai vec_rint(vector float __a) {
   return __builtin_vsx_xvrspic(__a);
 }
 
-static vector double __ATTRS_o_ai
-vec_rint(vector double __a) {
+static __inline__ vector double __ATTRS_o_ai vec_rint(vector double __a) {
   return __builtin_vsx_xvrdpic(__a);
 }
 
 /* vec_nearbyint */
 
-static vector float __ATTRS_o_ai vec_nearbyint(vector float __a) {
+static __inline__ vector float __ATTRS_o_ai vec_nearbyint(vector float __a) {
   return __builtin_vsx_xvrspi(__a);
 }
 
-static vector double __ATTRS_o_ai vec_nearbyint(vector double __a) {
+static __inline__ vector double __ATTRS_o_ai vec_nearbyint(vector double __a) {
   return __builtin_vsx_xvrdpi(__a);
 }
 #endif
 
 /* vec_vrfin */
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_vrfin(vector float __a) {
   return __builtin_altivec_vrfin(__a);
 }
@@ -6275,19 +6406,18 @@
 /* vec_sqrt */
 
 #ifdef __VSX__
-static vector float __ATTRS_o_ai vec_sqrt(vector float __a) {
+static __inline__ vector float __ATTRS_o_ai vec_sqrt(vector float __a) {
   return __builtin_vsx_xvsqrtsp(__a);
 }
 
-static vector double __ATTRS_o_ai vec_sqrt(vector double __a) {
+static __inline__ vector double __ATTRS_o_ai vec_sqrt(vector double __a) {
   return __builtin_vsx_xvsqrtdp(__a);
 }
 #endif
 
 /* vec_rsqrte */
 
-static vector float __ATTRS_o_ai
-vec_rsqrte(vector float __a) {
+static __inline__ vector float __ATTRS_o_ai vec_rsqrte(vector float __a) {
 #ifdef __VSX__
   return __builtin_vsx_xvrsqrtesp(__a);
 #else
@@ -6296,14 +6426,14 @@
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai vec_rsqrte(vector double __a) {
+static __inline__ vector double __ATTRS_o_ai vec_rsqrte(vector double __a) {
   return __builtin_vsx_xvrsqrtedp(__a);
 }
 #endif
 
 /* vec_vrsqrtefp */
 
-static __vector float __attribute__((__always_inline__))
+static __inline__ __vector float __attribute__((__always_inline__))
 vec_vrsqrtefp(vector float __a) {
   return __builtin_altivec_vrsqrtefp(__a);
 }
@@ -6312,257 +6442,250 @@
 
 #define __builtin_altivec_vsel_4si vec_sel
 
-static vector signed char __ATTRS_o_ai vec_sel(vector signed char __a,
-                                               vector signed char __b,
-                                               vector unsigned char __c) {
+static __inline__ vector signed char __ATTRS_o_ai vec_sel(
+    vector signed char __a, vector signed char __b, vector unsigned char __c) {
   return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
 }
 
-static vector signed char __ATTRS_o_ai vec_sel(vector signed char __a,
-                                               vector signed char __b,
-                                               vector bool char __c) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) {
   return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_sel(vector unsigned char __a,
-                                                 vector unsigned char __b,
-                                                 vector unsigned char __c) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sel(vector unsigned char __a, vector unsigned char __b,
+        vector unsigned char __c) {
   return (__a & ~__c) | (__b & __c);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_sel(vector unsigned char __a,
-                                                 vector unsigned char __b,
-                                                 vector bool char __c) {
+static __inline__ vector unsigned char __ATTRS_o_ai vec_sel(
+    vector unsigned char __a, vector unsigned char __b, vector bool char __c) {
   return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c);
 }
 
-static vector bool char __ATTRS_o_ai vec_sel(vector bool char __a,
-                                             vector bool char __b,
-                                             vector unsigned char __c) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
   return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c);
 }
 
-static vector bool char __ATTRS_o_ai vec_sel(vector bool char __a,
-                                             vector bool char __b,
-                                             vector bool char __c) {
+static __inline__ vector bool char __ATTRS_o_ai vec_sel(vector bool char __a,
+                                                        vector bool char __b,
+                                                        vector bool char __c) {
   return (__a & ~__c) | (__b & __c);
 }
 
-static vector short __ATTRS_o_ai vec_sel(vector short __a, vector short __b,
-                                         vector unsigned short __c) {
+static __inline__ vector short __ATTRS_o_ai vec_sel(vector short __a,
+                                                    vector short __b,
+                                                    vector unsigned short __c) {
   return (__a & ~(vector short)__c) | (__b & (vector short)__c);
 }
 
-static vector short __ATTRS_o_ai vec_sel(vector short __a, vector short __b,
-                                         vector bool short __c) {
+static __inline__ vector short __ATTRS_o_ai vec_sel(vector short __a,
+                                                    vector short __b,
+                                                    vector bool short __c) {
   return (__a & ~(vector short)__c) | (__b & (vector short)__c);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_sel(vector unsigned short __a,
-                                                  vector unsigned short __b,
-                                                  vector unsigned short __c) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sel(vector unsigned short __a, vector unsigned short __b,
+        vector unsigned short __c) {
   return (__a & ~__c) | (__b & __c);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_sel(vector unsigned short __a,
-                                                  vector unsigned short __b,
-                                                  vector bool short __c) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sel(vector unsigned short __a, vector unsigned short __b,
+        vector bool short __c) {
   return (__a & ~(vector unsigned short)__c) |
          (__b & (vector unsigned short)__c);
 }
 
-static vector bool short __ATTRS_o_ai vec_sel(vector bool short __a,
-                                              vector bool short __b,
-                                              vector unsigned short __c) {
+static __inline__ vector bool short __ATTRS_o_ai vec_sel(
+    vector bool short __a, vector bool short __b, vector unsigned short __c) {
   return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c);
 }
 
-static vector bool short __ATTRS_o_ai vec_sel(vector bool short __a,
-                                              vector bool short __b,
-                                              vector bool short __c) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) {
   return (__a & ~__c) | (__b & __c);
 }
 
-static vector int __ATTRS_o_ai vec_sel(vector int __a, vector int __b,
-                                       vector unsigned int __c) {
+static __inline__ vector int __ATTRS_o_ai vec_sel(vector int __a,
+                                                  vector int __b,
+                                                  vector unsigned int __c) {
   return (__a & ~(vector int)__c) | (__b & (vector int)__c);
 }
 
-static vector int __ATTRS_o_ai vec_sel(vector int __a, vector int __b,
-                                       vector bool int __c) {
+static __inline__ vector int __ATTRS_o_ai vec_sel(vector int __a,
+                                                  vector int __b,
+                                                  vector bool int __c) {
   return (__a & ~(vector int)__c) | (__b & (vector int)__c);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_sel(vector unsigned int __a,
-                                                vector unsigned int __b,
-                                                vector unsigned int __c) {
+static __inline__ vector unsigned int __ATTRS_o_ai vec_sel(
+    vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
   return (__a & ~__c) | (__b & __c);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_sel(vector unsigned int __a,
-                                                vector unsigned int __b,
-                                                vector bool int __c) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
   return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c);
 }
 
-static vector bool int __ATTRS_o_ai vec_sel(vector bool int __a,
-                                            vector bool int __b,
-                                            vector unsigned int __c) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
   return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c);
 }
 
-static vector bool int __ATTRS_o_ai vec_sel(vector bool int __a,
-                                            vector bool int __b,
-                                            vector bool int __c) {
+static __inline__ vector bool int __ATTRS_o_ai vec_sel(vector bool int __a,
+                                                       vector bool int __b,
+                                                       vector bool int __c) {
   return (__a & ~__c) | (__b & __c);
 }
 
-static vector float __ATTRS_o_ai vec_sel(vector float __a, vector float __b,
-                                         vector unsigned int __c) {
+static __inline__ vector float __ATTRS_o_ai vec_sel(vector float __a,
+                                                    vector float __b,
+                                                    vector unsigned int __c) {
   vector int __res = ((vector int)__a & ~(vector int)__c) |
                      ((vector int)__b & (vector int)__c);
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_sel(vector float __a, vector float __b,
-                                         vector bool int __c) {
+static __inline__ vector float __ATTRS_o_ai vec_sel(vector float __a,
+                                                    vector float __b,
+                                                    vector bool int __c) {
   vector int __res = ((vector int)__a & ~(vector int)__c) |
                      ((vector int)__b & (vector int)__c);
   return (vector float)__res;
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai vec_sel(vector double __a, vector double __b,
-                                          vector bool long long __c) {
+static __inline__ vector double __ATTRS_o_ai
+vec_sel(vector double __a, vector double __b, vector bool long long __c) {
   vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
-                     ((vector long long)__b & (vector long long)__c);
+                           ((vector long long)__b & (vector long long)__c);
   return (vector double)__res;
 }
 
-static vector double __ATTRS_o_ai vec_sel(vector double __a, vector double __b,
-                                          vector unsigned long long __c) {
+static __inline__ vector double __ATTRS_o_ai
+vec_sel(vector double __a, vector double __b, vector unsigned long long __c) {
   vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
-                     ((vector long long)__b & (vector long long)__c);
+                           ((vector long long)__b & (vector long long)__c);
   return (vector double)__res;
 }
 #endif
 
 /* vec_vsel */
 
-static vector signed char __ATTRS_o_ai vec_vsel(vector signed char __a,
-                                                vector signed char __b,
-                                                vector unsigned char __c) {
+static __inline__ vector signed char __ATTRS_o_ai vec_vsel(
+    vector signed char __a, vector signed char __b, vector unsigned char __c) {
   return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
 }
 
-static vector signed char __ATTRS_o_ai vec_vsel(vector signed char __a,
-                                                vector signed char __b,
-                                                vector bool char __c) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsel(vector signed char __a, vector signed char __b, vector bool char __c) {
   return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsel(vector unsigned char __a,
-                                                  vector unsigned char __b,
-                                                  vector unsigned char __c) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsel(vector unsigned char __a, vector unsigned char __b,
+         vector unsigned char __c) {
   return (__a & ~__c) | (__b & __c);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsel(vector unsigned char __a,
-                                                  vector unsigned char __b,
-                                                  vector bool char __c) {
+static __inline__ vector unsigned char __ATTRS_o_ai vec_vsel(
+    vector unsigned char __a, vector unsigned char __b, vector bool char __c) {
   return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c);
 }
 
-static vector bool char __ATTRS_o_ai vec_vsel(vector bool char __a,
-                                              vector bool char __b,
-                                              vector unsigned char __c) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vsel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
   return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c);
 }
 
-static vector bool char __ATTRS_o_ai vec_vsel(vector bool char __a,
-                                              vector bool char __b,
-                                              vector bool char __c) {
+static __inline__ vector bool char __ATTRS_o_ai vec_vsel(vector bool char __a,
+                                                         vector bool char __b,
+                                                         vector bool char __c) {
   return (__a & ~__c) | (__b & __c);
 }
 
-static vector short __ATTRS_o_ai vec_vsel(vector short __a, vector short __b,
-                                          vector unsigned short __c) {
+static __inline__ vector short __ATTRS_o_ai
+vec_vsel(vector short __a, vector short __b, vector unsigned short __c) {
   return (__a & ~(vector short)__c) | (__b & (vector short)__c);
 }
 
-static vector short __ATTRS_o_ai vec_vsel(vector short __a, vector short __b,
-                                          vector bool short __c) {
+static __inline__ vector short __ATTRS_o_ai vec_vsel(vector short __a,
+                                                     vector short __b,
+                                                     vector bool short __c) {
   return (__a & ~(vector short)__c) | (__b & (vector short)__c);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vsel(vector unsigned short __a,
-                                                   vector unsigned short __b,
-                                                   vector unsigned short __c) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsel(vector unsigned short __a, vector unsigned short __b,
+         vector unsigned short __c) {
   return (__a & ~__c) | (__b & __c);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vsel(vector unsigned short __a,
-                                                   vector unsigned short __b,
-                                                   vector bool short __c) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsel(vector unsigned short __a, vector unsigned short __b,
+         vector bool short __c) {
   return (__a & ~(vector unsigned short)__c) |
          (__b & (vector unsigned short)__c);
 }
 
-static vector bool short __ATTRS_o_ai vec_vsel(vector bool short __a,
-                                               vector bool short __b,
-                                               vector unsigned short __c) {
+static __inline__ vector bool short __ATTRS_o_ai vec_vsel(
+    vector bool short __a, vector bool short __b, vector unsigned short __c) {
   return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c);
 }
 
-static vector bool short __ATTRS_o_ai vec_vsel(vector bool short __a,
-                                               vector bool short __b,
-                                               vector bool short __c) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsel(vector bool short __a, vector bool short __b, vector bool short __c) {
   return (__a & ~__c) | (__b & __c);
 }
 
-static vector int __ATTRS_o_ai vec_vsel(vector int __a, vector int __b,
-                                        vector unsigned int __c) {
+static __inline__ vector int __ATTRS_o_ai vec_vsel(vector int __a,
+                                                   vector int __b,
+                                                   vector unsigned int __c) {
   return (__a & ~(vector int)__c) | (__b & (vector int)__c);
 }
 
-static vector int __ATTRS_o_ai vec_vsel(vector int __a, vector int __b,
-                                        vector bool int __c) {
+static __inline__ vector int __ATTRS_o_ai vec_vsel(vector int __a,
+                                                   vector int __b,
+                                                   vector bool int __c) {
   return (__a & ~(vector int)__c) | (__b & (vector int)__c);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vsel(vector unsigned int __a,
-                                                 vector unsigned int __b,
-                                                 vector unsigned int __c) {
+static __inline__ vector unsigned int __ATTRS_o_ai vec_vsel(
+    vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
   return (__a & ~__c) | (__b & __c);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vsel(vector unsigned int __a,
-                                                 vector unsigned int __b,
-                                                 vector bool int __c) {
+static __inline__ vector unsigned int __ATTRS_o_ai vec_vsel(
+    vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
   return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c);
 }
 
-static vector bool int __ATTRS_o_ai vec_vsel(vector bool int __a,
-                                             vector bool int __b,
-                                             vector unsigned int __c) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
   return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c);
 }
 
-static vector bool int __ATTRS_o_ai vec_vsel(vector bool int __a,
-                                             vector bool int __b,
-                                             vector bool int __c) {
+static __inline__ vector bool int __ATTRS_o_ai vec_vsel(vector bool int __a,
+                                                        vector bool int __b,
+                                                        vector bool int __c) {
   return (__a & ~__c) | (__b & __c);
 }
 
-static vector float __ATTRS_o_ai vec_vsel(vector float __a, vector float __b,
-                                          vector unsigned int __c) {
+static __inline__ vector float __ATTRS_o_ai vec_vsel(vector float __a,
+                                                     vector float __b,
+                                                     vector unsigned int __c) {
   vector int __res = ((vector int)__a & ~(vector int)__c) |
                      ((vector int)__b & (vector int)__c);
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_vsel(vector float __a, vector float __b,
-                                          vector bool int __c) {
+static __inline__ vector float __ATTRS_o_ai vec_vsel(vector float __a,
+                                                     vector float __b,
+                                                     vector bool int __c) {
   vector int __res = ((vector int)__a & ~(vector int)__c) |
                      ((vector int)__b & (vector int)__c);
   return (vector float)__res;
@@ -6570,42 +6693,43 @@
 
 /* vec_sl */
 
-static vector signed char __ATTRS_o_ai vec_sl(vector signed char __a,
-                                              vector unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sl(vector signed char __a, vector unsigned char __b) {
   return __a << (vector signed char)__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_sl(vector unsigned char __a,
-                                                vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sl(vector unsigned char __a, vector unsigned char __b) {
   return __a << __b;
 }
 
-static vector short __ATTRS_o_ai vec_sl(vector short __a,
-                                        vector unsigned short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_sl(vector short __a,
+                                                   vector unsigned short __b) {
   return __a << (vector short)__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_sl(vector unsigned short __a,
-                                                 vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sl(vector unsigned short __a, vector unsigned short __b) {
   return __a << __b;
 }
 
-static vector int __ATTRS_o_ai vec_sl(vector int __a, vector unsigned int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_sl(vector int __a,
+                                                 vector unsigned int __b) {
   return __a << (vector int)__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_sl(vector unsigned int __a,
-                                               vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sl(vector unsigned int __a, vector unsigned int __b) {
   return __a << __b;
 }
 
 #ifdef __POWER8_VECTOR__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_sl(vector signed long long __a, vector unsigned long long __b) {
   return __a << (vector long long)__b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_sl(vector unsigned long long __a, vector unsigned long long __b) {
   return __a << __b;
 }
@@ -6615,13 +6739,13 @@
 
 #define __builtin_altivec_vslb vec_vslb
 
-static vector signed char __ATTRS_o_ai vec_vslb(vector signed char __a,
-                                                vector unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vslb(vector signed char __a, vector unsigned char __b) {
   return vec_sl(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vslb(vector unsigned char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vslb(vector unsigned char __a, vector unsigned char __b) {
   return vec_sl(__a, __b);
 }
 
@@ -6629,13 +6753,13 @@
 
 #define __builtin_altivec_vslh vec_vslh
 
-static vector short __ATTRS_o_ai vec_vslh(vector short __a,
-                                          vector unsigned short __b) {
+static __inline__ vector short __ATTRS_o_ai
+vec_vslh(vector short __a, vector unsigned short __b) {
   return vec_sl(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vslh(vector unsigned short __a,
-                                                   vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vslh(vector unsigned short __a, vector unsigned short __b) {
   return vec_sl(__a, __b);
 }
 
@@ -6643,13 +6767,13 @@
 
 #define __builtin_altivec_vslw vec_vslw
 
-static vector int __ATTRS_o_ai vec_vslw(vector int __a,
-                                        vector unsigned int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vslw(vector int __a,
+                                                   vector unsigned int __b) {
   return vec_sl(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vslw(vector unsigned int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vslw(vector unsigned int __a, vector unsigned int __b) {
   return vec_sl(__a, __b);
 }
 
@@ -6657,17 +6781,15 @@
 
 #define __builtin_altivec_vsldoi_4si vec_sld
 
-static vector signed char __ATTRS_o_ai vec_sld(vector signed char __a,
-                                               vector signed char __b,
-                                               unsigned const int __c) {
+static __inline__ vector signed char __ATTRS_o_ai vec_sld(
+    vector signed char __a, vector signed char __b, unsigned const int __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -6677,17 +6799,16 @@
 #endif
 }
 
-static vector unsigned char __ATTRS_o_ai vec_sld(vector unsigned char __a,
-                                                 vector unsigned char __b,
-                                                 unsigned const int __c) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sld(vector unsigned char __a, vector unsigned char __b,
+        unsigned const int __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -6697,17 +6818,15 @@
 #endif
 }
 
-static vector bool char __ATTRS_o_ai vec_sld(vector bool char __a,
-                                             vector bool char __b,
-                                             unsigned const int __c) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_sld(vector bool char __a, vector bool char __b, unsigned const int __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -6717,17 +6836,15 @@
 #endif
 }
 
-static vector signed short __ATTRS_o_ai vec_sld(vector signed short __a,
-                                                vector signed short __b,
-                                                unsigned const int __c) {
+static __inline__ vector signed short __ATTRS_o_ai vec_sld(
+    vector signed short __a, vector signed short __b, unsigned const int __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -6737,17 +6854,16 @@
 #endif
 }
 
-static vector unsigned short __ATTRS_o_ai vec_sld(vector unsigned short __a,
-                                                  vector unsigned short __b,
-                                                  unsigned const int __c) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sld(vector unsigned short __a, vector unsigned short __b,
+        unsigned const int __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -6757,17 +6873,15 @@
 #endif
 }
 
-static vector bool short __ATTRS_o_ai vec_sld(vector bool short __a,
-                                              vector bool short __b,
-                                              unsigned const int __c) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_sld(vector bool short __a, vector bool short __b, unsigned const int __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -6777,16 +6891,16 @@
 #endif
 }
 
-static vector pixel __ATTRS_o_ai vec_sld(vector pixel __a, vector pixel __b,
-                                         unsigned const int __c) {
+static __inline__ vector pixel __ATTRS_o_ai vec_sld(vector pixel __a,
+                                                    vector pixel __b,
+                                                    unsigned const int __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -6796,17 +6910,15 @@
 #endif
 }
 
-static vector signed int __ATTRS_o_ai vec_sld(vector signed int __a,
-                                              vector signed int __b,
-                                              unsigned const int __c) {
+static __inline__ vector signed int __ATTRS_o_ai
+vec_sld(vector signed int __a, vector signed int __b, unsigned const int __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -6816,17 +6928,15 @@
 #endif
 }
 
-static vector unsigned int __ATTRS_o_ai vec_sld(vector unsigned int __a,
-                                                vector unsigned int __b,
-                                                unsigned const int __c) {
+static __inline__ vector unsigned int __ATTRS_o_ai vec_sld(
+    vector unsigned int __a, vector unsigned int __b, unsigned const int __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -6836,17 +6946,16 @@
 #endif
 }
 
-static vector bool int __ATTRS_o_ai vec_sld(vector bool int __a,
-                                            vector bool int __b,
-                                            unsigned const int __c) {
+static __inline__ vector bool int __ATTRS_o_ai vec_sld(vector bool int __a,
+                                                       vector bool int __b,
+                                                       unsigned const int __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -6856,16 +6965,16 @@
 #endif
 }
 
-static vector float __ATTRS_o_ai vec_sld(vector float __a, vector float __b,
-                                         unsigned const int __c) {
+static __inline__ vector float __ATTRS_o_ai vec_sld(vector float __a,
+                                                    vector float __b,
+                                                    unsigned const int __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -6877,17 +6986,15 @@
 
 /* vec_vsldoi */
 
-static vector signed char __ATTRS_o_ai vec_vsldoi(vector signed char __a,
-                                                  vector signed char __b,
-                                                  unsigned char __c) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsldoi(vector signed char __a, vector signed char __b, unsigned char __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -6897,17 +7004,15 @@
 #endif
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsldoi(vector unsigned char __a,
-                                                    vector unsigned char __b,
-                                                    unsigned char __c) {
+static __inline__ vector unsigned char __ATTRS_o_ai vec_vsldoi(
+    vector unsigned char __a, vector unsigned char __b, unsigned char __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -6917,16 +7022,16 @@
 #endif
 }
 
-static vector short __ATTRS_o_ai vec_vsldoi(vector short __a, vector short __b,
-                                            unsigned char __c) {
+static __inline__ vector short __ATTRS_o_ai vec_vsldoi(vector short __a,
+                                                       vector short __b,
+                                                       unsigned char __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -6936,17 +7041,53 @@
 #endif
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vsldoi(vector unsigned short __a,
-                                                     vector unsigned short __b,
+static __inline__ vector unsigned short __ATTRS_o_ai vec_vsldoi(
+    vector unsigned short __a, vector unsigned short __b, unsigned char __c) {
+  unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+  return vec_perm(
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+  return vec_perm(
+      __a, __b,
+      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsldoi(vector pixel __a,
+                                                       vector pixel __b,
+                                                       unsigned char __c) {
+  unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+  return vec_perm(
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+  return vec_perm(
+      __a, __b,
+      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsldoi(vector int __a,
+                                                     vector int __b,
                                                      unsigned char __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -6956,16 +7097,15 @@
 #endif
 }
 
-static vector pixel __ATTRS_o_ai vec_vsldoi(vector pixel __a, vector pixel __b,
-                                            unsigned char __c) {
+static __inline__ vector unsigned int __ATTRS_o_ai vec_vsldoi(
+    vector unsigned int __a, vector unsigned int __b, unsigned char __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -6975,55 +7115,16 @@
 #endif
 }
 
-static vector int __ATTRS_o_ai vec_vsldoi(vector int __a, vector int __b,
-                                          unsigned char __c) {
+static __inline__ vector float __ATTRS_o_ai vec_vsldoi(vector float __a,
+                                                       vector float __b,
+                                                       unsigned char __c) {
   unsigned char __d = __c & 0x0F;
 #ifdef __LITTLE_ENDIAN__
   return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static vector unsigned int __ATTRS_o_ai vec_vsldoi(vector unsigned int __a,
-                                                   vector unsigned int __b,
-                                                   unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static vector float __ATTRS_o_ai vec_vsldoi(vector float __a, vector float __b,
-                                            unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a,
-      (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
-                             21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
-                             26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
-                             31 - __d));
+      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
+                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
+                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
 #else
   return vec_perm(
       __a, __b,
@@ -7035,654 +7136,655 @@
 
 /* vec_sll */
 
-static vector signed char __ATTRS_o_ai vec_sll(vector signed char __a,
-                                               vector unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sll(vector signed char __a, vector unsigned char __b) {
   return (vector signed char)__builtin_altivec_vsl((vector int)__a,
                                                    (vector int)__b);
 }
 
-static vector signed char __ATTRS_o_ai vec_sll(vector signed char __a,
-                                               vector unsigned short __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sll(vector signed char __a, vector unsigned short __b) {
   return (vector signed char)__builtin_altivec_vsl((vector int)__a,
                                                    (vector int)__b);
 }
 
-static vector signed char __ATTRS_o_ai vec_sll(vector signed char __a,
-                                               vector unsigned int __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sll(vector signed char __a, vector unsigned int __b) {
   return (vector signed char)__builtin_altivec_vsl((vector int)__a,
                                                    (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_sll(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sll(vector unsigned char __a, vector unsigned char __b) {
   return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_sll(vector unsigned char __a,
-                                                 vector unsigned short __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sll(vector unsigned char __a, vector unsigned short __b) {
   return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_sll(vector unsigned char __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sll(vector unsigned char __a, vector unsigned int __b) {
   return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector bool char __ATTRS_o_ai vec_sll(vector bool char __a,
-                                             vector unsigned char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_sll(vector bool char __a, vector unsigned char __b) {
   return (vector bool char)__builtin_altivec_vsl((vector int)__a,
                                                  (vector int)__b);
 }
 
-static vector bool char __ATTRS_o_ai vec_sll(vector bool char __a,
-                                             vector unsigned short __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_sll(vector bool char __a, vector unsigned short __b) {
   return (vector bool char)__builtin_altivec_vsl((vector int)__a,
                                                  (vector int)__b);
 }
 
-static vector bool char __ATTRS_o_ai vec_sll(vector bool char __a,
-                                             vector unsigned int __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_sll(vector bool char __a, vector unsigned int __b) {
   return (vector bool char)__builtin_altivec_vsl((vector int)__a,
                                                  (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_sll(vector short __a,
-                                         vector unsigned char __b) {
+static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
+                                                    vector unsigned char __b) {
   return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_sll(vector short __a,
-                                         vector unsigned short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
+                                                    vector unsigned short __b) {
   return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_sll(vector short __a,
-                                         vector unsigned int __b) {
+static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
+                                                    vector unsigned int __b) {
   return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_sll(vector unsigned short __a,
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sll(vector unsigned short __a, vector unsigned char __b) {
+  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+                                                      (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sll(vector unsigned short __a, vector unsigned short __b) {
+  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+                                                      (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sll(vector unsigned short __a, vector unsigned int __b) {
+  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+                                                      (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_sll(vector bool short __a, vector unsigned char __b) {
+  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+                                                  (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_sll(vector bool short __a, vector unsigned short __b) {
+  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+                                                  (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_sll(vector bool short __a, vector unsigned int __b) {
+  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+                                                  (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
+                                                    vector unsigned char __b) {
+  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
+                                                    vector unsigned short __b) {
+  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
+                                                    vector unsigned int __b) {
+  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
                                                   vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
+  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_sll(vector unsigned short __a,
+static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
                                                   vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
+  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_sll(vector unsigned short __a,
+static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
                                                   vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static vector bool short __ATTRS_o_ai vec_sll(vector bool short __a,
-                                              vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static vector bool short __ATTRS_o_ai vec_sll(vector bool short __a,
-                                              vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static vector bool short __ATTRS_o_ai vec_sll(vector bool short __a,
-                                              vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
-                                         vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
-                                         vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
-                                         vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static vector int __ATTRS_o_ai vec_sll(vector int __a,
-                                       vector unsigned char __b) {
   return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
 }
 
-static vector int __ATTRS_o_ai vec_sll(vector int __a,
-                                       vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static vector int __ATTRS_o_ai vec_sll(vector int __a,
-                                       vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static vector unsigned int __ATTRS_o_ai vec_sll(vector unsigned int __a,
-                                                vector unsigned char __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sll(vector unsigned int __a, vector unsigned char __b) {
   return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_sll(vector unsigned int __a,
-                                                vector unsigned short __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sll(vector unsigned int __a, vector unsigned short __b) {
   return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_sll(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sll(vector unsigned int __a, vector unsigned int __b) {
   return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector bool int __ATTRS_o_ai vec_sll(vector bool int __a,
-                                            vector unsigned char __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_sll(vector bool int __a, vector unsigned char __b) {
   return (vector bool int)__builtin_altivec_vsl((vector int)__a,
                                                 (vector int)__b);
 }
 
-static vector bool int __ATTRS_o_ai vec_sll(vector bool int __a,
-                                            vector unsigned short __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_sll(vector bool int __a, vector unsigned short __b) {
   return (vector bool int)__builtin_altivec_vsl((vector int)__a,
                                                 (vector int)__b);
 }
 
-static vector bool int __ATTRS_o_ai vec_sll(vector bool int __a,
-                                            vector unsigned int __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_sll(vector bool int __a, vector unsigned int __b) {
   return (vector bool int)__builtin_altivec_vsl((vector int)__a,
                                                 (vector int)__b);
 }
 
 /* vec_vsl */
 
-static vector signed char __ATTRS_o_ai vec_vsl(vector signed char __a,
-                                               vector unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsl(vector signed char __a, vector unsigned char __b) {
   return (vector signed char)__builtin_altivec_vsl((vector int)__a,
                                                    (vector int)__b);
 }
 
-static vector signed char __ATTRS_o_ai vec_vsl(vector signed char __a,
-                                               vector unsigned short __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsl(vector signed char __a, vector unsigned short __b) {
   return (vector signed char)__builtin_altivec_vsl((vector int)__a,
                                                    (vector int)__b);
 }
 
-static vector signed char __ATTRS_o_ai vec_vsl(vector signed char __a,
-                                               vector unsigned int __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsl(vector signed char __a, vector unsigned int __b) {
   return (vector signed char)__builtin_altivec_vsl((vector int)__a,
                                                    (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsl(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsl(vector unsigned char __a, vector unsigned char __b) {
   return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsl(vector unsigned char __a,
-                                                 vector unsigned short __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsl(vector unsigned char __a, vector unsigned short __b) {
   return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsl(vector unsigned char __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsl(vector unsigned char __a, vector unsigned int __b) {
   return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector bool char __ATTRS_o_ai vec_vsl(vector bool char __a,
-                                             vector unsigned char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vsl(vector bool char __a, vector unsigned char __b) {
   return (vector bool char)__builtin_altivec_vsl((vector int)__a,
                                                  (vector int)__b);
 }
 
-static vector bool char __ATTRS_o_ai vec_vsl(vector bool char __a,
-                                             vector unsigned short __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vsl(vector bool char __a, vector unsigned short __b) {
   return (vector bool char)__builtin_altivec_vsl((vector int)__a,
                                                  (vector int)__b);
 }
 
-static vector bool char __ATTRS_o_ai vec_vsl(vector bool char __a,
-                                             vector unsigned int __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vsl(vector bool char __a, vector unsigned int __b) {
   return (vector bool char)__builtin_altivec_vsl((vector int)__a,
                                                  (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_vsl(vector short __a,
-                                         vector unsigned char __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
+                                                    vector unsigned char __b) {
   return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_vsl(vector short __a,
-                                         vector unsigned short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
+                                                    vector unsigned short __b) {
   return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_vsl(vector short __a,
-                                         vector unsigned int __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
+                                                    vector unsigned int __b) {
   return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vsl(vector unsigned short __a,
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsl(vector unsigned short __a, vector unsigned char __b) {
+  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+                                                      (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsl(vector unsigned short __a, vector unsigned short __b) {
+  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+                                                      (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsl(vector unsigned short __a, vector unsigned int __b) {
+  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+                                                      (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsl(vector bool short __a, vector unsigned char __b) {
+  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+                                                  (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsl(vector bool short __a, vector unsigned short __b) {
+  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+                                                  (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsl(vector bool short __a, vector unsigned int __b) {
+  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+                                                  (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
+                                                    vector unsigned char __b) {
+  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
+                                                    vector unsigned short __b) {
+  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
+                                                    vector unsigned int __b) {
+  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
                                                   vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
+  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vsl(vector unsigned short __a,
+static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
                                                   vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
+  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vsl(vector unsigned short __a,
+static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
                                                   vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static vector bool short __ATTRS_o_ai vec_vsl(vector bool short __a,
-                                              vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static vector bool short __ATTRS_o_ai vec_vsl(vector bool short __a,
-                                              vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static vector bool short __ATTRS_o_ai vec_vsl(vector bool short __a,
-                                              vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
-                                         vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
-                                         vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
-                                         vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static vector int __ATTRS_o_ai vec_vsl(vector int __a,
-                                       vector unsigned char __b) {
   return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
 }
 
-static vector int __ATTRS_o_ai vec_vsl(vector int __a,
-                                       vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static vector int __ATTRS_o_ai vec_vsl(vector int __a,
-                                       vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static vector unsigned int __ATTRS_o_ai vec_vsl(vector unsigned int __a,
-                                                vector unsigned char __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsl(vector unsigned int __a, vector unsigned char __b) {
   return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vsl(vector unsigned int __a,
-                                                vector unsigned short __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsl(vector unsigned int __a, vector unsigned short __b) {
   return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vsl(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsl(vector unsigned int __a, vector unsigned int __b) {
   return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector bool int __ATTRS_o_ai vec_vsl(vector bool int __a,
-                                            vector unsigned char __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsl(vector bool int __a, vector unsigned char __b) {
   return (vector bool int)__builtin_altivec_vsl((vector int)__a,
                                                 (vector int)__b);
 }
 
-static vector bool int __ATTRS_o_ai vec_vsl(vector bool int __a,
-                                            vector unsigned short __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsl(vector bool int __a, vector unsigned short __b) {
   return (vector bool int)__builtin_altivec_vsl((vector int)__a,
                                                 (vector int)__b);
 }
 
-static vector bool int __ATTRS_o_ai vec_vsl(vector bool int __a,
-                                            vector unsigned int __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsl(vector bool int __a, vector unsigned int __b) {
   return (vector bool int)__builtin_altivec_vsl((vector int)__a,
                                                 (vector int)__b);
 }
 
 /* vec_slo */
 
-static vector signed char __ATTRS_o_ai vec_slo(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_slo(vector signed char __a, vector signed char __b) {
   return (vector signed char)__builtin_altivec_vslo((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector signed char __ATTRS_o_ai vec_slo(vector signed char __a,
-                                               vector unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_slo(vector signed char __a, vector unsigned char __b) {
   return (vector signed char)__builtin_altivec_vslo((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_slo(vector unsigned char __a,
-                                                 vector signed char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_slo(vector unsigned char __a, vector signed char __b) {
   return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
                                                       (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_slo(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_slo(vector unsigned char __a, vector unsigned char __b) {
   return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
                                                       (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_slo(vector short __a,
-                                         vector signed char __b) {
+static __inline__ vector short __ATTRS_o_ai vec_slo(vector short __a,
+                                                    vector signed char __b) {
   return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_slo(vector short __a,
-                                         vector unsigned char __b) {
+static __inline__ vector short __ATTRS_o_ai vec_slo(vector short __a,
+                                                    vector unsigned char __b) {
   return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_slo(vector unsigned short __a,
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_slo(vector unsigned short __a, vector signed char __b) {
+  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
+                                                       (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_slo(vector unsigned short __a, vector unsigned char __b) {
+  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
+                                                       (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
+                                                    vector signed char __b) {
+  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
+                                                    vector unsigned char __b) {
+  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_slo(vector int __a,
                                                   vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
+  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_slo(vector unsigned short __a,
+static __inline__ vector int __ATTRS_o_ai vec_slo(vector int __a,
                                                   vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
-                                         vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
-                                         vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static vector int __ATTRS_o_ai vec_slo(vector int __a, vector signed char __b) {
   return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
 }
 
-static vector int __ATTRS_o_ai vec_slo(vector int __a,
-                                       vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static vector unsigned int __ATTRS_o_ai vec_slo(vector unsigned int __a,
-                                                vector signed char __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_slo(vector unsigned int __a, vector signed char __b) {
   return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_slo(vector unsigned int __a,
-                                                vector unsigned char __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_slo(vector unsigned int __a, vector unsigned char __b) {
   return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector float __ATTRS_o_ai vec_slo(vector float __a,
-                                         vector signed char __b) {
+static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a,
+                                                    vector signed char __b) {
   return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
 }
 
-static vector float __ATTRS_o_ai vec_slo(vector float __a,
-                                         vector unsigned char __b) {
+static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a,
+                                                    vector unsigned char __b) {
   return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
 }
 
 /* vec_vslo */
 
-static vector signed char __ATTRS_o_ai vec_vslo(vector signed char __a,
-                                                vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vslo(vector signed char __a, vector signed char __b) {
   return (vector signed char)__builtin_altivec_vslo((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector signed char __ATTRS_o_ai vec_vslo(vector signed char __a,
-                                                vector unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vslo(vector signed char __a, vector unsigned char __b) {
   return (vector signed char)__builtin_altivec_vslo((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vslo(vector unsigned char __a,
-                                                  vector signed char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vslo(vector unsigned char __a, vector signed char __b) {
   return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
                                                       (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vslo(vector unsigned char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vslo(vector unsigned char __a, vector unsigned char __b) {
   return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
                                                       (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_vslo(vector short __a,
-                                          vector signed char __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vslo(vector short __a,
+                                                     vector signed char __b) {
   return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_vslo(vector short __a,
-                                          vector unsigned char __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vslo(vector short __a,
+                                                     vector unsigned char __b) {
   return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vslo(vector unsigned short __a,
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vslo(vector unsigned short __a, vector signed char __b) {
+  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
+                                                       (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vslo(vector unsigned short __a, vector unsigned char __b) {
+  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
+                                                       (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
+                                                     vector signed char __b) {
+  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
+                                                     vector unsigned char __b) {
+  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vslo(vector int __a,
                                                    vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
+  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vslo(vector unsigned short __a,
+static __inline__ vector int __ATTRS_o_ai vec_vslo(vector int __a,
                                                    vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
-                                          vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
-                                          vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static vector int __ATTRS_o_ai vec_vslo(vector int __a,
-                                        vector signed char __b) {
   return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
 }
 
-static vector int __ATTRS_o_ai vec_vslo(vector int __a,
-                                        vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static vector unsigned int __ATTRS_o_ai vec_vslo(vector unsigned int __a,
-                                                 vector signed char __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vslo(vector unsigned int __a, vector signed char __b) {
   return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vslo(vector unsigned int __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vslo(vector unsigned int __a, vector unsigned char __b) {
   return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector float __ATTRS_o_ai vec_vslo(vector float __a,
-                                          vector signed char __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vslo(vector float __a,
+                                                     vector signed char __b) {
   return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
 }
 
-static vector float __ATTRS_o_ai vec_vslo(vector float __a,
-                                          vector unsigned char __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vslo(vector float __a,
+                                                     vector unsigned char __b) {
   return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
 }
 
 /* vec_splat */
 
-static vector signed char __ATTRS_o_ai vec_splat(vector signed char __a,
-                                                 unsigned const int __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_splat(vector signed char __a, unsigned const int __b) {
   return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
 }
 
-static vector unsigned char __ATTRS_o_ai vec_splat(vector unsigned char __a,
-                                                   unsigned const int __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_splat(vector unsigned char __a, unsigned const int __b) {
   return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
 }
 
-static vector bool char __ATTRS_o_ai vec_splat(vector bool char __a,
-                                               unsigned const int __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_splat(vector bool char __a, unsigned const int __b) {
   return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
 }
 
-static vector signed short __ATTRS_o_ai vec_splat(vector signed short __a,
-                                                  unsigned const int __b) {
+static __inline__ vector signed short __ATTRS_o_ai
+vec_splat(vector signed short __a, unsigned const int __b) {
   unsigned char b0 = (__b & 0x07) * 2;
   unsigned char b1 = b0 + 1;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1, b0, b1));
+                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
+                                         b0, b1, b0, b1, b0, b1));
 }
 
-static vector unsigned short __ATTRS_o_ai vec_splat(vector unsigned short __a,
-                                                    unsigned const int __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_splat(vector unsigned short __a, unsigned const int __b) {
   unsigned char b0 = (__b & 0x07) * 2;
   unsigned char b1 = b0 + 1;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1, b0, b1));
+                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
+                                         b0, b1, b0, b1, b0, b1));
 }
 
-static vector bool short __ATTRS_o_ai vec_splat(vector bool short __a,
-                                                unsigned const int __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_splat(vector bool short __a, unsigned const int __b) {
   unsigned char b0 = (__b & 0x07) * 2;
   unsigned char b1 = b0 + 1;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1, b0, b1));
+                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
+                                         b0, b1, b0, b1, b0, b1));
 }
 
-static vector pixel __ATTRS_o_ai vec_splat(vector pixel __a,
-                                           unsigned const int __b) {
+static __inline__ vector pixel __ATTRS_o_ai vec_splat(vector pixel __a,
+                                                      unsigned const int __b) {
   unsigned char b0 = (__b & 0x07) * 2;
   unsigned char b1 = b0 + 1;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1, b0, b1));
+                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
+                                         b0, b1, b0, b1, b0, b1));
 }
 
-static vector signed int __ATTRS_o_ai vec_splat(vector signed int __a,
-                                                unsigned const int __b) {
+static __inline__ vector signed int __ATTRS_o_ai
+vec_splat(vector signed int __a, unsigned const int __b) {
   unsigned char b0 = (__b & 0x03) * 4;
   unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0,
-                                         b1, b2, b3, b0, b1, b2, b3));
+                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
+                                         b2, b3, b0, b1, b2, b3));
 }
 
-static vector unsigned int __ATTRS_o_ai vec_splat(vector unsigned int __a,
-                                                  unsigned const int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_splat(vector unsigned int __a, unsigned const int __b) {
   unsigned char b0 = (__b & 0x03) * 4;
   unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0,
-                                         b1, b2, b3, b0, b1, b2, b3));
+                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
+                                         b2, b3, b0, b1, b2, b3));
 }
 
-static vector bool int __ATTRS_o_ai vec_splat(vector bool int __a,
-                                              unsigned const int __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_splat(vector bool int __a, unsigned const int __b) {
   unsigned char b0 = (__b & 0x03) * 4;
   unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0,
-                                         b1, b2, b3, b0, b1, b2, b3));
+                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
+                                         b2, b3, b0, b1, b2, b3));
 }
 
-static vector float __ATTRS_o_ai vec_splat(vector float __a,
-                                           unsigned const int __b) {
+static __inline__ vector float __ATTRS_o_ai vec_splat(vector float __a,
+                                                      unsigned const int __b) {
   unsigned char b0 = (__b & 0x03) * 4;
   unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0,
-                                         b1, b2, b3, b0, b1, b2, b3));
+                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
+                                         b2, b3, b0, b1, b2, b3));
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai vec_splat(vector double __a,
-                                            unsigned const int __b) {
+static __inline__ vector double __ATTRS_o_ai vec_splat(vector double __a,
+                                                       unsigned const int __b) {
   unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4,
-                b5 = b0 + 5, b6 = b0 + 6, b7 = b0 + 7;
+  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
+                b6 = b0 + 6, b7 = b0 + 7;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7,
-                                         b0, b1, b2, b3, b4, b5, b6, b7));
+                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
+                                         b2, b3, b4, b5, b6, b7));
 }
-static vector bool long long __ATTRS_o_ai vec_splat(vector bool long long __a,
-                                                    unsigned const int __b) {
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_splat(vector bool long long __a, unsigned const int __b) {
   unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4,
-                b5 = b0 + 5, b6 = b0 + 6, b7 = b0 + 7;
+  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
+                b6 = b0 + 6, b7 = b0 + 7;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7,
-                                         b0, b1, b2, b3, b4, b5, b6, b7));
+                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
+                                         b2, b3, b4, b5, b6, b7));
 }
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_splat(vector signed long long __a, unsigned const int __b) {
   unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4,
-                b5 = b0 + 5, b6 = b0 + 6, b7 = b0 + 7;
+  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
+                b6 = b0 + 6, b7 = b0 + 7;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7,
-                                         b0, b1, b2, b3, b4, b5, b6, b7));
+                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
+                                         b2, b3, b4, b5, b6, b7));
 }
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_splat(vector unsigned long long __a, unsigned const int __b) {
   unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4,
-                b5 = b0 + 5, b6 = b0 + 6, b7 = b0 + 7;
+  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
+                b6 = b0 + 6, b7 = b0 + 7;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7,
-                                         b0, b1, b2, b3, b4, b5, b6, b7));
+                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
+                                         b2, b3, b4, b5, b6, b7));
 }
 #endif
 
@@ -7690,18 +7792,18 @@
 
 #define __builtin_altivec_vspltb vec_vspltb
 
-static vector signed char __ATTRS_o_ai vec_vspltb(vector signed char __a,
-                                                  unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vspltb(vector signed char __a, unsigned char __b) {
   return vec_perm(__a, __a, (vector unsigned char)(__b));
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vspltb(vector unsigned char __a,
-                                                    unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vspltb(vector unsigned char __a, unsigned char __b) {
   return vec_perm(__a, __a, (vector unsigned char)(__b));
 }
 
-static vector bool char __ATTRS_o_ai vec_vspltb(vector bool char __a,
-                                                unsigned char __b) {
+static __inline__ vector bool char __ATTRS_o_ai vec_vspltb(vector bool char __a,
+                                                           unsigned char __b) {
   return vec_perm(__a, __a, (vector unsigned char)(__b));
 }
 
@@ -7709,8 +7811,8 @@
 
 #define __builtin_altivec_vsplth vec_vsplth
 
-static vector short __ATTRS_o_ai vec_vsplth(vector short __a,
-                                            unsigned char __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vsplth(vector short __a,
+                                                       unsigned char __b) {
   __b *= 2;
   unsigned char b1 = __b + 1;
   return vec_perm(__a, __a,
@@ -7718,8 +7820,8 @@
                                          __b, b1, __b, b1, __b, b1, __b, b1));
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vsplth(vector unsigned short __a,
-                                                     unsigned char __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsplth(vector unsigned short __a, unsigned char __b) {
   __b *= 2;
   unsigned char b1 = __b + 1;
   return vec_perm(__a, __a,
@@ -7727,8 +7829,8 @@
                                          __b, b1, __b, b1, __b, b1, __b, b1));
 }
 
-static vector bool short __ATTRS_o_ai vec_vsplth(vector bool short __a,
-                                                 unsigned char __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsplth(vector bool short __a, unsigned char __b) {
   __b *= 2;
   unsigned char b1 = __b + 1;
   return vec_perm(__a, __a,
@@ -7736,8 +7838,8 @@
                                          __b, b1, __b, b1, __b, b1, __b, b1));
 }
 
-static vector pixel __ATTRS_o_ai vec_vsplth(vector pixel __a,
-                                            unsigned char __b) {
+static __inline__ vector pixel __ATTRS_o_ai vec_vsplth(vector pixel __a,
+                                                       unsigned char __b) {
   __b *= 2;
   unsigned char b1 = __b + 1;
   return vec_perm(__a, __a,
@@ -7749,7 +7851,8 @@
 
 #define __builtin_altivec_vspltw vec_vspltw
 
-static vector int __ATTRS_o_ai vec_vspltw(vector int __a, unsigned char __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vspltw(vector int __a,
+                                                     unsigned char __b) {
   __b *= 4;
   unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
   return vec_perm(__a, __a,
@@ -7757,8 +7860,8 @@
                                          b1, b2, b3, __b, b1, b2, b3));
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vspltw(vector unsigned int __a,
-                                                   unsigned char __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vspltw(vector unsigned int __a, unsigned char __b) {
   __b *= 4;
   unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
   return vec_perm(__a, __a,
@@ -7766,8 +7869,8 @@
                                          b1, b2, b3, __b, b1, b2, b3));
 }
 
-static vector bool int __ATTRS_o_ai vec_vspltw(vector bool int __a,
-                                               unsigned char __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_vspltw(vector bool int __a,
+                                                          unsigned char __b) {
   __b *= 4;
   unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
   return vec_perm(__a, __a,
@@ -7775,8 +7878,8 @@
                                          b1, b2, b3, __b, b1, b2, b3));
 }
 
-static vector float __ATTRS_o_ai vec_vspltw(vector float __a,
-                                            unsigned char __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vspltw(vector float __a,
+                                                       unsigned char __b) {
   __b *= 4;
   unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
   return vec_perm(__a, __a,
@@ -7789,14 +7892,16 @@
 #define __builtin_altivec_vspltisb vec_splat_s8
 
 // FIXME: parameter should be treated as 5-bit signed literal
-static vector signed char __ATTRS_o_ai vec_splat_s8(signed char __a) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_splat_s8(signed char __a) {
   return (vector signed char)(__a);
 }
 
 /* vec_vspltisb */
 
 // FIXME: parameter should be treated as 5-bit signed literal
-static vector signed char __ATTRS_o_ai vec_vspltisb(signed char __a) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vspltisb(signed char __a) {
   return (vector signed char)(__a);
 }
 
@@ -7805,14 +7910,14 @@
 #define __builtin_altivec_vspltish vec_splat_s16
 
 // FIXME: parameter should be treated as 5-bit signed literal
-static vector short __ATTRS_o_ai vec_splat_s16(signed char __a) {
+static __inline__ vector short __ATTRS_o_ai vec_splat_s16(signed char __a) {
   return (vector short)(__a);
 }
 
 /* vec_vspltish */
 
 // FIXME: parameter should be treated as 5-bit signed literal
-static vector short __ATTRS_o_ai vec_vspltish(signed char __a) {
+static __inline__ vector short __ATTRS_o_ai vec_vspltish(signed char __a) {
   return (vector short)(__a);
 }
 
@@ -7821,81 +7926,84 @@
 #define __builtin_altivec_vspltisw vec_splat_s32
 
 // FIXME: parameter should be treated as 5-bit signed literal
-static vector int __ATTRS_o_ai vec_splat_s32(signed char __a) {
+static __inline__ vector int __ATTRS_o_ai vec_splat_s32(signed char __a) {
   return (vector int)(__a);
 }
 
 /* vec_vspltisw */
 
 // FIXME: parameter should be treated as 5-bit signed literal
-static vector int __ATTRS_o_ai vec_vspltisw(signed char __a) {
+static __inline__ vector int __ATTRS_o_ai vec_vspltisw(signed char __a) {
   return (vector int)(__a);
 }
 
 /* vec_splat_u8 */
 
 // FIXME: parameter should be treated as 5-bit signed literal
-static vector unsigned char __ATTRS_o_ai vec_splat_u8(unsigned char __a) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_splat_u8(unsigned char __a) {
   return (vector unsigned char)(__a);
 }
 
 /* vec_splat_u16 */
 
 // FIXME: parameter should be treated as 5-bit signed literal
-static vector unsigned short __ATTRS_o_ai vec_splat_u16(signed char __a) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_splat_u16(signed char __a) {
   return (vector unsigned short)(__a);
 }
 
 /* vec_splat_u32 */
 
 // FIXME: parameter should be treated as 5-bit signed literal
-static vector unsigned int __ATTRS_o_ai vec_splat_u32(signed char __a) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_splat_u32(signed char __a) {
   return (vector unsigned int)(__a);
 }
 
 /* vec_sr */
 
-static vector signed char __ATTRS_o_ai vec_sr(vector signed char __a,
-                                              vector unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sr(vector signed char __a, vector unsigned char __b) {
   vector unsigned char __res = (vector unsigned char)__a >> __b;
   return (vector signed char)__res;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_sr(vector unsigned char __a,
-                                                vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sr(vector unsigned char __a, vector unsigned char __b) {
   return __a >> __b;
 }
 
-static vector signed short __ATTRS_o_ai vec_sr(vector signed short __a,
-                                        vector unsigned short __b) {
+static __inline__ vector signed short __ATTRS_o_ai
+vec_sr(vector signed short __a, vector unsigned short __b) {
   vector unsigned short __res = (vector unsigned short)__a >> __b;
   return (vector signed short)__res;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_sr(vector unsigned short __a,
-                                                 vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sr(vector unsigned short __a, vector unsigned short __b) {
   return __a >> __b;
 }
 
-static vector signed int __ATTRS_o_ai vec_sr(vector signed int __a,
-                                             vector unsigned int __b) {
+static __inline__ vector signed int __ATTRS_o_ai
+vec_sr(vector signed int __a, vector unsigned int __b) {
   vector unsigned int __res = (vector unsigned int)__a >> __b;
   return (vector signed int)__res;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_sr(vector unsigned int __a,
-                                               vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sr(vector unsigned int __a, vector unsigned int __b) {
   return __a >> __b;
 }
 
 #ifdef __POWER8_VECTOR__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_sr(vector signed long long __a, vector unsigned long long __b) {
   vector unsigned long long __res = (vector unsigned long long)__a >> __b;
   return (vector signed long long)__res;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_sr(vector unsigned long long __a, vector unsigned long long __b) {
   return __a >> __b;
 }
@@ -7905,13 +8013,13 @@
 
 #define __builtin_altivec_vsrb vec_vsrb
 
-static vector signed char __ATTRS_o_ai vec_vsrb(vector signed char __a,
-                                                vector unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsrb(vector signed char __a, vector unsigned char __b) {
   return __a >> (vector signed char)__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsrb(vector unsigned char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsrb(vector unsigned char __a, vector unsigned char __b) {
   return __a >> __b;
 }
 
@@ -7919,13 +8027,13 @@
 
 #define __builtin_altivec_vsrh vec_vsrh
 
-static vector short __ATTRS_o_ai vec_vsrh(vector short __a,
-                                          vector unsigned short __b) {
+static __inline__ vector short __ATTRS_o_ai
+vec_vsrh(vector short __a, vector unsigned short __b) {
   return __a >> (vector short)__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vsrh(vector unsigned short __a,
-                                                   vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsrh(vector unsigned short __a, vector unsigned short __b) {
   return __a >> __b;
 }
 
@@ -7933,55 +8041,55 @@
 
 #define __builtin_altivec_vsrw vec_vsrw
 
-static vector int __ATTRS_o_ai vec_vsrw(vector int __a,
-                                        vector unsigned int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vsrw(vector int __a,
+                                                   vector unsigned int __b) {
   return __a >> (vector int)__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vsrw(vector unsigned int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsrw(vector unsigned int __a, vector unsigned int __b) {
   return __a >> __b;
 }
 
 /* vec_sra */
 
-static vector signed char __ATTRS_o_ai vec_sra(vector signed char __a,
-                                               vector unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sra(vector signed char __a, vector unsigned char __b) {
   return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_sra(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sra(vector unsigned char __a, vector unsigned char __b) {
   return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_sra(vector short __a,
-                                         vector unsigned short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_sra(vector short __a,
+                                                    vector unsigned short __b) {
   return __builtin_altivec_vsrah(__a, (vector unsigned short)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_sra(vector unsigned short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sra(vector unsigned short __a, vector unsigned short __b) {
   return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_sra(vector int __a,
-                                       vector unsigned int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_sra(vector int __a,
+                                                  vector unsigned int __b) {
   return __builtin_altivec_vsraw(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_sra(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sra(vector unsigned int __a, vector unsigned int __b) {
   return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b);
 }
 
 #ifdef __POWER8_VECTOR__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_sra(vector signed long long __a, vector unsigned long long __b) {
   return __a >> __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_sra(vector unsigned long long __a, vector unsigned long long __b) {
   return (vector unsigned long long)((vector signed long long)__a >> __b);
 }
@@ -7989,1324 +8097,1373 @@
 
 /* vec_vsrab */
 
-static vector signed char __ATTRS_o_ai vec_vsrab(vector signed char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsrab(vector signed char __a, vector unsigned char __b) {
   return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsrab(vector unsigned char __a,
-                                                   vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsrab(vector unsigned char __a, vector unsigned char __b) {
   return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b);
 }
 
 /* vec_vsrah */
 
-static vector short __ATTRS_o_ai vec_vsrah(vector short __a,
-                                           vector unsigned short __b) {
+static __inline__ vector short __ATTRS_o_ai
+vec_vsrah(vector short __a, vector unsigned short __b) {
   return __builtin_altivec_vsrah(__a, (vector unsigned short)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vsrah(vector unsigned short __a,
-                                                    vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsrah(vector unsigned short __a, vector unsigned short __b) {
   return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b);
 }
 
 /* vec_vsraw */
 
-static vector int __ATTRS_o_ai vec_vsraw(vector int __a,
-                                         vector unsigned int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vsraw(vector int __a,
+                                                    vector unsigned int __b) {
   return __builtin_altivec_vsraw(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vsraw(vector unsigned int __a,
-                                                  vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsraw(vector unsigned int __a, vector unsigned int __b) {
   return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b);
 }
 
 /* vec_srl */
 
-static vector signed char __ATTRS_o_ai vec_srl(vector signed char __a,
-                                               vector unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_srl(vector signed char __a, vector unsigned char __b) {
   return (vector signed char)__builtin_altivec_vsr((vector int)__a,
                                                    (vector int)__b);
 }
 
-static vector signed char __ATTRS_o_ai vec_srl(vector signed char __a,
-                                               vector unsigned short __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_srl(vector signed char __a, vector unsigned short __b) {
   return (vector signed char)__builtin_altivec_vsr((vector int)__a,
                                                    (vector int)__b);
 }
 
-static vector signed char __ATTRS_o_ai vec_srl(vector signed char __a,
-                                               vector unsigned int __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_srl(vector signed char __a, vector unsigned int __b) {
   return (vector signed char)__builtin_altivec_vsr((vector int)__a,
                                                    (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_srl(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_srl(vector unsigned char __a, vector unsigned char __b) {
   return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_srl(vector unsigned char __a,
-                                                 vector unsigned short __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_srl(vector unsigned char __a, vector unsigned short __b) {
   return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_srl(vector unsigned char __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_srl(vector unsigned char __a, vector unsigned int __b) {
   return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector bool char __ATTRS_o_ai vec_srl(vector bool char __a,
-                                             vector unsigned char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_srl(vector bool char __a, vector unsigned char __b) {
   return (vector bool char)__builtin_altivec_vsr((vector int)__a,
                                                  (vector int)__b);
 }
 
-static vector bool char __ATTRS_o_ai vec_srl(vector bool char __a,
-                                             vector unsigned short __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_srl(vector bool char __a, vector unsigned short __b) {
   return (vector bool char)__builtin_altivec_vsr((vector int)__a,
                                                  (vector int)__b);
 }
 
-static vector bool char __ATTRS_o_ai vec_srl(vector bool char __a,
-                                             vector unsigned int __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_srl(vector bool char __a, vector unsigned int __b) {
   return (vector bool char)__builtin_altivec_vsr((vector int)__a,
                                                  (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_srl(vector short __a,
-                                         vector unsigned char __b) {
+static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
+                                                    vector unsigned char __b) {
   return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_srl(vector short __a,
-                                         vector unsigned short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
+                                                    vector unsigned short __b) {
   return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_srl(vector short __a,
-                                         vector unsigned int __b) {
+static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
+                                                    vector unsigned int __b) {
   return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_srl(vector unsigned short __a,
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_srl(vector unsigned short __a, vector unsigned char __b) {
+  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+                                                      (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_srl(vector unsigned short __a, vector unsigned short __b) {
+  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+                                                      (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_srl(vector unsigned short __a, vector unsigned int __b) {
+  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+                                                      (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_srl(vector bool short __a, vector unsigned char __b) {
+  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+                                                  (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_srl(vector bool short __a, vector unsigned short __b) {
+  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+                                                  (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_srl(vector bool short __a, vector unsigned int __b) {
+  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+                                                  (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
+                                                    vector unsigned char __b) {
+  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
+                                                    vector unsigned short __b) {
+  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
+                                                    vector unsigned int __b) {
+  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
                                                   vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
+  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_srl(vector unsigned short __a,
+static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
                                                   vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
+  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_srl(vector unsigned short __a,
+static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
                                                   vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static vector bool short __ATTRS_o_ai vec_srl(vector bool short __a,
-                                              vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static vector bool short __ATTRS_o_ai vec_srl(vector bool short __a,
-                                              vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static vector bool short __ATTRS_o_ai vec_srl(vector bool short __a,
-                                              vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
-                                         vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
-                                         vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
-                                         vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static vector int __ATTRS_o_ai vec_srl(vector int __a,
-                                       vector unsigned char __b) {
   return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
 }
 
-static vector int __ATTRS_o_ai vec_srl(vector int __a,
-                                       vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static vector int __ATTRS_o_ai vec_srl(vector int __a,
-                                       vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static vector unsigned int __ATTRS_o_ai vec_srl(vector unsigned int __a,
-                                                vector unsigned char __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_srl(vector unsigned int __a, vector unsigned char __b) {
   return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_srl(vector unsigned int __a,
-                                                vector unsigned short __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_srl(vector unsigned int __a, vector unsigned short __b) {
   return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_srl(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_srl(vector unsigned int __a, vector unsigned int __b) {
   return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector bool int __ATTRS_o_ai vec_srl(vector bool int __a,
-                                            vector unsigned char __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_srl(vector bool int __a, vector unsigned char __b) {
   return (vector bool int)__builtin_altivec_vsr((vector int)__a,
                                                 (vector int)__b);
 }
 
-static vector bool int __ATTRS_o_ai vec_srl(vector bool int __a,
-                                            vector unsigned short __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_srl(vector bool int __a, vector unsigned short __b) {
   return (vector bool int)__builtin_altivec_vsr((vector int)__a,
                                                 (vector int)__b);
 }
 
-static vector bool int __ATTRS_o_ai vec_srl(vector bool int __a,
-                                            vector unsigned int __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_srl(vector bool int __a, vector unsigned int __b) {
   return (vector bool int)__builtin_altivec_vsr((vector int)__a,
                                                 (vector int)__b);
 }
 
 /* vec_vsr */
 
-static vector signed char __ATTRS_o_ai vec_vsr(vector signed char __a,
-                                               vector unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsr(vector signed char __a, vector unsigned char __b) {
   return (vector signed char)__builtin_altivec_vsr((vector int)__a,
                                                    (vector int)__b);
 }
 
-static vector signed char __ATTRS_o_ai vec_vsr(vector signed char __a,
-                                               vector unsigned short __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsr(vector signed char __a, vector unsigned short __b) {
   return (vector signed char)__builtin_altivec_vsr((vector int)__a,
                                                    (vector int)__b);
 }
 
-static vector signed char __ATTRS_o_ai vec_vsr(vector signed char __a,
-                                               vector unsigned int __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsr(vector signed char __a, vector unsigned int __b) {
   return (vector signed char)__builtin_altivec_vsr((vector int)__a,
                                                    (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsr(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsr(vector unsigned char __a, vector unsigned char __b) {
   return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsr(vector unsigned char __a,
-                                                 vector unsigned short __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsr(vector unsigned char __a, vector unsigned short __b) {
   return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsr(vector unsigned char __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsr(vector unsigned char __a, vector unsigned int __b) {
   return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector bool char __ATTRS_o_ai vec_vsr(vector bool char __a,
-                                             vector unsigned char __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vsr(vector bool char __a, vector unsigned char __b) {
   return (vector bool char)__builtin_altivec_vsr((vector int)__a,
                                                  (vector int)__b);
 }
 
-static vector bool char __ATTRS_o_ai vec_vsr(vector bool char __a,
-                                             vector unsigned short __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vsr(vector bool char __a, vector unsigned short __b) {
   return (vector bool char)__builtin_altivec_vsr((vector int)__a,
                                                  (vector int)__b);
 }
 
-static vector bool char __ATTRS_o_ai vec_vsr(vector bool char __a,
-                                             vector unsigned int __b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vsr(vector bool char __a, vector unsigned int __b) {
   return (vector bool char)__builtin_altivec_vsr((vector int)__a,
                                                  (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_vsr(vector short __a,
-                                         vector unsigned char __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
+                                                    vector unsigned char __b) {
   return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_vsr(vector short __a,
-                                         vector unsigned short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
+                                                    vector unsigned short __b) {
   return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_vsr(vector short __a,
-                                         vector unsigned int __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
+                                                    vector unsigned int __b) {
   return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vsr(vector unsigned short __a,
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsr(vector unsigned short __a, vector unsigned char __b) {
+  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+                                                      (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsr(vector unsigned short __a, vector unsigned short __b) {
+  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+                                                      (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsr(vector unsigned short __a, vector unsigned int __b) {
+  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+                                                      (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsr(vector bool short __a, vector unsigned char __b) {
+  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+                                                  (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsr(vector bool short __a, vector unsigned short __b) {
+  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+                                                  (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsr(vector bool short __a, vector unsigned int __b) {
+  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+                                                  (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
+                                                    vector unsigned char __b) {
+  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
+                                                    vector unsigned short __b) {
+  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
+                                                    vector unsigned int __b) {
+  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
                                                   vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
+  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vsr(vector unsigned short __a,
+static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
                                                   vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
+  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vsr(vector unsigned short __a,
+static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
                                                   vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static vector bool short __ATTRS_o_ai vec_vsr(vector bool short __a,
-                                              vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static vector bool short __ATTRS_o_ai vec_vsr(vector bool short __a,
-                                              vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static vector bool short __ATTRS_o_ai vec_vsr(vector bool short __a,
-                                              vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
-                                         vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
-                                         vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
-                                         vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static vector int __ATTRS_o_ai vec_vsr(vector int __a,
-                                       vector unsigned char __b) {
   return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
 }
 
-static vector int __ATTRS_o_ai vec_vsr(vector int __a,
-                                       vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static vector int __ATTRS_o_ai vec_vsr(vector int __a,
-                                       vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static vector unsigned int __ATTRS_o_ai vec_vsr(vector unsigned int __a,
-                                                vector unsigned char __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsr(vector unsigned int __a, vector unsigned char __b) {
   return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vsr(vector unsigned int __a,
-                                                vector unsigned short __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsr(vector unsigned int __a, vector unsigned short __b) {
   return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vsr(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsr(vector unsigned int __a, vector unsigned int __b) {
   return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector bool int __ATTRS_o_ai vec_vsr(vector bool int __a,
-                                            vector unsigned char __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsr(vector bool int __a, vector unsigned char __b) {
   return (vector bool int)__builtin_altivec_vsr((vector int)__a,
                                                 (vector int)__b);
 }
 
-static vector bool int __ATTRS_o_ai vec_vsr(vector bool int __a,
-                                            vector unsigned short __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsr(vector bool int __a, vector unsigned short __b) {
   return (vector bool int)__builtin_altivec_vsr((vector int)__a,
                                                 (vector int)__b);
 }
 
-static vector bool int __ATTRS_o_ai vec_vsr(vector bool int __a,
-                                            vector unsigned int __b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsr(vector bool int __a, vector unsigned int __b) {
   return (vector bool int)__builtin_altivec_vsr((vector int)__a,
                                                 (vector int)__b);
 }
 
 /* vec_sro */
 
-static vector signed char __ATTRS_o_ai vec_sro(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sro(vector signed char __a, vector signed char __b) {
   return (vector signed char)__builtin_altivec_vsro((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector signed char __ATTRS_o_ai vec_sro(vector signed char __a,
-                                               vector unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sro(vector signed char __a, vector unsigned char __b) {
   return (vector signed char)__builtin_altivec_vsro((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_sro(vector unsigned char __a,
-                                                 vector signed char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sro(vector unsigned char __a, vector signed char __b) {
   return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
                                                       (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_sro(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sro(vector unsigned char __a, vector unsigned char __b) {
   return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
                                                       (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_sro(vector short __a,
-                                         vector signed char __b) {
+static __inline__ vector short __ATTRS_o_ai vec_sro(vector short __a,
+                                                    vector signed char __b) {
   return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_sro(vector short __a,
-                                         vector unsigned char __b) {
+static __inline__ vector short __ATTRS_o_ai vec_sro(vector short __a,
+                                                    vector unsigned char __b) {
   return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_sro(vector unsigned short __a,
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sro(vector unsigned short __a, vector signed char __b) {
+  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
+                                                       (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sro(vector unsigned short __a, vector unsigned char __b) {
+  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
+                                                       (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
+                                                    vector signed char __b) {
+  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
+                                                    vector unsigned char __b) {
+  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_sro(vector int __a,
                                                   vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
+  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_sro(vector unsigned short __a,
+static __inline__ vector int __ATTRS_o_ai vec_sro(vector int __a,
                                                   vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
-                                         vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
-                                         vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static vector int __ATTRS_o_ai vec_sro(vector int __a, vector signed char __b) {
   return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
 }
 
-static vector int __ATTRS_o_ai vec_sro(vector int __a,
-                                       vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static vector unsigned int __ATTRS_o_ai vec_sro(vector unsigned int __a,
-                                                vector signed char __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sro(vector unsigned int __a, vector signed char __b) {
   return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_sro(vector unsigned int __a,
-                                                vector unsigned char __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sro(vector unsigned int __a, vector unsigned char __b) {
   return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector float __ATTRS_o_ai vec_sro(vector float __a,
-                                         vector signed char __b) {
+static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a,
+                                                    vector signed char __b) {
   return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
 }
 
-static vector float __ATTRS_o_ai vec_sro(vector float __a,
-                                         vector unsigned char __b) {
+static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a,
+                                                    vector unsigned char __b) {
   return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
 }
 
 /* vec_vsro */
 
-static vector signed char __ATTRS_o_ai vec_vsro(vector signed char __a,
-                                                vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsro(vector signed char __a, vector signed char __b) {
   return (vector signed char)__builtin_altivec_vsro((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector signed char __ATTRS_o_ai vec_vsro(vector signed char __a,
-                                                vector unsigned char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsro(vector signed char __a, vector unsigned char __b) {
   return (vector signed char)__builtin_altivec_vsro((vector int)__a,
                                                     (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsro(vector unsigned char __a,
-                                                  vector signed char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsro(vector unsigned char __a, vector signed char __b) {
   return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
                                                       (vector int)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsro(vector unsigned char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsro(vector unsigned char __a, vector unsigned char __b) {
   return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
                                                       (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_vsro(vector short __a,
-                                          vector signed char __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vsro(vector short __a,
+                                                     vector signed char __b) {
   return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
 }
 
-static vector short __ATTRS_o_ai vec_vsro(vector short __a,
-                                          vector unsigned char __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vsro(vector short __a,
+                                                     vector unsigned char __b) {
   return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vsro(vector unsigned short __a,
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsro(vector unsigned short __a, vector signed char __b) {
+  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
+                                                       (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsro(vector unsigned short __a, vector unsigned char __b) {
+  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
+                                                       (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
+                                                     vector signed char __b) {
+  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
+                                                     vector unsigned char __b) {
+  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsro(vector int __a,
                                                    vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
+  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vsro(vector unsigned short __a,
+static __inline__ vector int __ATTRS_o_ai vec_vsro(vector int __a,
                                                    vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
-                                          vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
-                                          vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static vector int __ATTRS_o_ai vec_vsro(vector int __a,
-                                        vector signed char __b) {
   return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
 }
 
-static vector int __ATTRS_o_ai vec_vsro(vector int __a,
-                                        vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static vector unsigned int __ATTRS_o_ai vec_vsro(vector unsigned int __a,
-                                                 vector signed char __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsro(vector unsigned int __a, vector signed char __b) {
   return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vsro(vector unsigned int __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsro(vector unsigned int __a, vector unsigned char __b) {
   return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
                                                      (vector int)__b);
 }
 
-static vector float __ATTRS_o_ai vec_vsro(vector float __a,
-                                          vector signed char __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a,
+                                                     vector signed char __b) {
   return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
 }
 
-static vector float __ATTRS_o_ai vec_vsro(vector float __a,
-                                          vector unsigned char __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a,
+                                                     vector unsigned char __b) {
   return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
 }
 
 /* vec_st */
 
-static void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
-                                vector signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
+                                           vector signed char *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
-                                signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
+                                           signed char *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
-                                vector unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
+                                           vector unsigned char *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
-                                unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
+                                           unsigned char *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
-                                signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
+                                           signed char *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
-                                unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
+                                           unsigned char *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
-                                vector bool char *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
+                                           vector bool char *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector short __a, int __b, vector short *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector short __a, int __b,
+                                           vector short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector short __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector short __a, int __b,
+                                           short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
-                                vector unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
+                                           vector unsigned short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
-                                unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
+                                           unsigned short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector bool short __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
+                                           short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
-                                unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
+                                           unsigned short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
-                                vector bool short *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
+                                           vector bool short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector pixel __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
+                                           short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
-                                unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
+                                           unsigned short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector pixel __a, int __b, vector pixel *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
+                                           vector pixel *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector int __a, int __b, vector int *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector int __a, int __b,
+                                           vector int *__c) {
   __builtin_altivec_stvx(__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector int __a, int __b, int *__c) {
   __builtin_altivec_stvx(__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
-                                vector unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
+                                           vector unsigned int *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
-                                unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
+                                           unsigned int *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector bool int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
+                                           int *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
-                                unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
+                                           unsigned int *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
-                                vector bool int *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
+                                           vector bool int *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector float __a, int __b, vector float *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector float __a, int __b,
+                                           vector float *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_st(vector float __a, int __b, float *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector float __a, int __b,
+                                           float *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
 /* vec_stvx */
 
-static void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
-                                  vector signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
+                                             vector signed char *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
-                                  signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
+                                             signed char *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
-                                  vector unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
+                                             vector unsigned char *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
-                                  unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
+                                             unsigned char *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
-                                  signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
+                                             signed char *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
-                                  unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
+                                             unsigned char *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
-                                  vector bool char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
+                                             vector bool char *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector short __a, int __b,
-                                  vector short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, int __b,
+                                             vector short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector short __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, int __b,
+                                             short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
-                                  vector unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
+                                             vector unsigned short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
-                                  unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
+                                             unsigned short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
+                                             short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
-                                  unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
+                                             unsigned short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
-                                  vector bool short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
+                                             vector bool short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
+                                             short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
-                                  unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
+                                             unsigned short *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
-                                  vector pixel *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
+                                             vector pixel *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector int __a, int __b, vector int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, int __b,
+                                             vector int *__c) {
   __builtin_altivec_stvx(__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, int __b,
+                                             int *__c) {
   __builtin_altivec_stvx(__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
-                                  vector unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
+                                             vector unsigned int *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
-                                  unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
+                                             unsigned int *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
+                                             int *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
-                                  unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
+                                             unsigned int *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
-                                  vector bool int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
+                                             vector bool int *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector float __a, int __b,
-                                  vector float *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, int __b,
+                                             vector float *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvx(vector float __a, int __b, float *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, int __b,
+                                             float *__c) {
   __builtin_altivec_stvx((vector int)__a, __b, __c);
 }
 
 /* vec_ste */
 
-static void __ATTRS_o_ai vec_ste(vector signed char __a, int __b,
-                                 signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_ste(vector signed char __a, int __b,
+                                            signed char *__c) {
   __builtin_altivec_stvebx((vector char)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_ste(vector unsigned char __a, int __b,
-                                 unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned char __a, int __b,
+                                            unsigned char *__c) {
   __builtin_altivec_stvebx((vector char)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
-                                 signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
+                                            signed char *__c) {
   __builtin_altivec_stvebx((vector char)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
-                                 unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
+                                            unsigned char *__c) {
   __builtin_altivec_stvebx((vector char)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_ste(vector short __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_ste(vector short __a, int __b,
+                                            short *__c) {
   __builtin_altivec_stvehx(__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_ste(vector unsigned short __a, int __b,
-                                 unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned short __a, int __b,
+                                            unsigned short *__c) {
   __builtin_altivec_stvehx((vector short)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_ste(vector bool short __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, int __b,
+                                            short *__c) {
   __builtin_altivec_stvehx((vector short)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_ste(vector bool short __a, int __b,
-                                 unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, int __b,
+                                            unsigned short *__c) {
   __builtin_altivec_stvehx((vector short)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_ste(vector pixel __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, int __b,
+                                            short *__c) {
   __builtin_altivec_stvehx((vector short)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_ste(vector pixel __a, int __b,
-                                 unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, int __b,
+                                            unsigned short *__c) {
   __builtin_altivec_stvehx((vector short)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_ste(vector int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_ste(vector int __a, int __b, int *__c) {
   __builtin_altivec_stvewx(__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_ste(vector unsigned int __a, int __b,
-                                 unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned int __a, int __b,
+                                            unsigned int *__c) {
   __builtin_altivec_stvewx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_ste(vector bool int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, int __b,
+                                            int *__c) {
   __builtin_altivec_stvewx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_ste(vector bool int __a, int __b,
-                                 unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, int __b,
+                                            unsigned int *__c) {
   __builtin_altivec_stvewx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_ste(vector float __a, int __b, float *__c) {
+static __inline__ void __ATTRS_o_ai vec_ste(vector float __a, int __b,
+                                            float *__c) {
   __builtin_altivec_stvewx((vector int)__a, __b, __c);
 }
 
 /* vec_stvebx */
 
-static void __ATTRS_o_ai vec_stvebx(vector signed char __a, int __b,
-                                    signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvebx(vector signed char __a, int __b,
+                                               signed char *__c) {
   __builtin_altivec_stvebx((vector char)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvebx(vector unsigned char __a, int __b,
-                                    unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvebx(vector unsigned char __a,
+                                               int __b, unsigned char *__c) {
   __builtin_altivec_stvebx((vector char)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
-                                    signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
+                                               signed char *__c) {
   __builtin_altivec_stvebx((vector char)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
-                                    unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
+                                               unsigned char *__c) {
   __builtin_altivec_stvebx((vector char)__a, __b, __c);
 }
 
 /* vec_stvehx */
 
-static void __ATTRS_o_ai vec_stvehx(vector short __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector short __a, int __b,
+                                               short *__c) {
   __builtin_altivec_stvehx(__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvehx(vector unsigned short __a, int __b,
-                                    unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector unsigned short __a,
+                                               int __b, unsigned short *__c) {
   __builtin_altivec_stvehx((vector short)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
-                                    short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
+                                               short *__c) {
   __builtin_altivec_stvehx((vector short)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
-                                    unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
+                                               unsigned short *__c) {
   __builtin_altivec_stvehx((vector short)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b,
+                                               short *__c) {
   __builtin_altivec_stvehx((vector short)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b,
-                                    unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b,
+                                               unsigned short *__c) {
   __builtin_altivec_stvehx((vector short)__a, __b, __c);
 }
 
 /* vec_stvewx */
 
-static void __ATTRS_o_ai vec_stvewx(vector int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvewx(vector int __a, int __b,
+                                               int *__c) {
   __builtin_altivec_stvewx(__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvewx(vector unsigned int __a, int __b,
-                                    unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvewx(vector unsigned int __a, int __b,
+                                               unsigned int *__c) {
   __builtin_altivec_stvewx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b,
+                                               int *__c) {
   __builtin_altivec_stvewx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b,
-                                    unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b,
+                                               unsigned int *__c) {
   __builtin_altivec_stvewx((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvewx(vector float __a, int __b, float *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvewx(vector float __a, int __b,
+                                               float *__c) {
   __builtin_altivec_stvewx((vector int)__a, __b, __c);
 }
 
 /* vec_stl */
 
-static void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
-                                 vector signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
+                                            vector signed char *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
-                                 signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
+                                            signed char *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
-                                 vector unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
+                                            vector unsigned char *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
-                                 unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
+                                            unsigned char *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
-                                 signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
+                                            signed char *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
-                                 unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
+                                            unsigned char *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
-                                 vector bool char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
+                                            vector bool char *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector short __a, int __b, vector short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector short __a, int __b,
+                                            vector short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector short __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector short __a, int __b,
+                                            short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
-                                 vector unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
+                                            vector unsigned short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
-                                 unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
+                                            unsigned short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector bool short __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
+                                            short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
-                                 unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
+                                            unsigned short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
-                                 vector bool short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
+                                            vector bool short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector pixel __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
+                                            short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
-                                 unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
+                                            unsigned short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector pixel __a, int __b, vector pixel *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
+                                            vector pixel *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector int __a, int __b, vector int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector int __a, int __b,
+                                            vector int *__c) {
   __builtin_altivec_stvxl(__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector int __a, int __b, int *__c) {
   __builtin_altivec_stvxl(__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
-                                 vector unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
+                                            vector unsigned int *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
-                                 unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
+                                            unsigned int *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector bool int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
+                                            int *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
-                                 unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
+                                            unsigned int *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
-                                 vector bool int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
+                                            vector bool int *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector float __a, int __b, vector float *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector float __a, int __b,
+                                            vector float *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stl(vector float __a, int __b, float *__c) {
+static __inline__ void __ATTRS_o_ai vec_stl(vector float __a, int __b,
+                                            float *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
 /* vec_stvxl */
 
-static void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
-                                   vector signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
+                                              vector signed char *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
-                                   signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
+                                              signed char *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
-                                   vector unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
+                                              vector unsigned char *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
-                                   unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
+                                              unsigned char *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
-                                   signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
+                                              signed char *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
-                                   unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
+                                              unsigned char *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
-                                   vector bool char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
+                                              vector bool char *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector short __a, int __b,
-                                   vector short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector short __a, int __b,
+                                              vector short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector short __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector short __a, int __b,
+                                              short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector unsigned short __a, int __b,
-                                   vector unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned short __a,
+                                              int __b,
+                                              vector unsigned short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector unsigned short __a, int __b,
-                                   unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned short __a,
+                                              int __b, unsigned short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
+                                              short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
-                                   unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
+                                              unsigned short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
-                                   vector bool short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
+                                              vector bool short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
+                                              short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
-                                   unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
+                                              unsigned short *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
-                                   vector pixel *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
+                                              vector pixel *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector int __a, int __b, vector int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector int __a, int __b,
+                                              vector int *__c) {
   __builtin_altivec_stvxl(__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector int __a, int __b,
+                                              int *__c) {
   __builtin_altivec_stvxl(__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
-                                   vector unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
+                                              vector unsigned int *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
-                                   unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
+                                              unsigned int *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
+                                              int *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
-                                   unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
+                                              unsigned int *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
-                                   vector bool int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
+                                              vector bool int *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector float __a, int __b,
-                                   vector float *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector float __a, int __b,
+                                              vector float *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvxl(vector float __a, int __b, float *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector float __a, int __b,
+                                              float *__c) {
   __builtin_altivec_stvxl((vector int)__a, __b, __c);
 }
 
 /* vec_sub */
 
-static vector signed char __ATTRS_o_ai vec_sub(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sub(vector signed char __a, vector signed char __b) {
   return __a - __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_sub(vector bool char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sub(vector bool char __a, vector signed char __b) {
   return (vector signed char)__a - __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_sub(vector signed char __a,
-                                               vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sub(vector signed char __a, vector bool char __b) {
   return __a - (vector signed char)__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_sub(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sub(vector unsigned char __a, vector unsigned char __b) {
   return __a - __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_sub(vector bool char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sub(vector bool char __a, vector unsigned char __b) {
   return (vector unsigned char)__a - __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_sub(vector unsigned char __a,
-                                                 vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sub(vector unsigned char __a, vector bool char __b) {
   return __a - (vector unsigned char)__b;
 }
 
-static vector short __ATTRS_o_ai vec_sub(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_sub(vector short __a,
+                                                    vector short __b) {
   return __a - __b;
 }
 
-static vector short __ATTRS_o_ai vec_sub(vector bool short __a,
-                                         vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_sub(vector bool short __a,
+                                                    vector short __b) {
   return (vector short)__a - __b;
 }
 
-static vector short __ATTRS_o_ai vec_sub(vector short __a,
-                                         vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_sub(vector short __a,
+                                                    vector bool short __b) {
   return __a - (vector short)__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_sub(vector unsigned short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sub(vector unsigned short __a, vector unsigned short __b) {
   return __a - __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_sub(vector bool short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sub(vector bool short __a, vector unsigned short __b) {
   return (vector unsigned short)__a - __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_sub(vector unsigned short __a,
-                                                  vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sub(vector unsigned short __a, vector bool short __b) {
   return __a - (vector unsigned short)__b;
 }
 
-static vector int __ATTRS_o_ai vec_sub(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_sub(vector int __a,
+                                                  vector int __b) {
   return __a - __b;
 }
 
-static vector int __ATTRS_o_ai vec_sub(vector bool int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_sub(vector bool int __a,
+                                                  vector int __b) {
   return (vector int)__a - __b;
 }
 
-static vector int __ATTRS_o_ai vec_sub(vector int __a, vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_sub(vector int __a,
+                                                  vector bool int __b) {
   return __a - (vector int)__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_sub(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sub(vector unsigned int __a, vector unsigned int __b) {
   return __a - __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_sub(vector bool int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sub(vector bool int __a, vector unsigned int __b) {
   return (vector unsigned int)__a - __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_sub(vector unsigned int __a,
-                                                vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sub(vector unsigned int __a, vector bool int __b) {
   return __a - (vector unsigned int)__b;
 }
 
 #if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector signed __int128 __ATTRS_o_ai vec_sub(vector signed __int128 __a,
-                                                   vector signed __int128 __b) {
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_sub(vector signed __int128 __a, vector signed __int128 __b) {
   return __a - __b;
 }
 
-static vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_sub(vector unsigned __int128 __a, vector unsigned __int128 __b) {
   return __a - __b;
 }
 #endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
 
 #ifdef __VSX__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_sub(vector signed long long __a, vector signed long long __b) {
   return __a - __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_sub(vector unsigned long long __a, vector unsigned long long __b) {
   return __a - __b;
 }
 
-static vector double __ATTRS_o_ai
-vec_sub(vector double __a, vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_sub(vector double __a,
+                                                     vector double __b) {
   return __a - __b;
 }
 #endif
 
-static vector float __ATTRS_o_ai vec_sub(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_sub(vector float __a,
+                                                    vector float __b) {
   return __a - __b;
 }
 
@@ -9314,33 +9471,33 @@
 
 #define __builtin_altivec_vsububm vec_vsububm
 
-static vector signed char __ATTRS_o_ai vec_vsububm(vector signed char __a,
-                                                   vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsububm(vector signed char __a, vector signed char __b) {
   return __a - __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_vsububm(vector bool char __a,
-                                                   vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsububm(vector bool char __a, vector signed char __b) {
   return (vector signed char)__a - __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_vsububm(vector signed char __a,
-                                                   vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsububm(vector signed char __a, vector bool char __b) {
   return __a - (vector signed char)__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsububm(vector unsigned char __a,
-                                                     vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsububm(vector unsigned char __a, vector unsigned char __b) {
   return __a - __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsububm(vector bool char __a,
-                                                     vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsububm(vector bool char __a, vector unsigned char __b) {
   return (vector unsigned char)__a - __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsububm(vector unsigned char __a,
-                                                     vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsububm(vector unsigned char __a, vector bool char __b) {
   return __a - (vector unsigned char)__b;
 }
 
@@ -9348,33 +9505,33 @@
 
 #define __builtin_altivec_vsubuhm vec_vsubuhm
 
-static vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
-                                             vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
+                                                        vector short __b) {
   return __a - __b;
 }
 
-static vector short __ATTRS_o_ai vec_vsubuhm(vector bool short __a,
-                                             vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector bool short __a,
+                                                        vector short __b) {
   return (vector short)__a - __b;
 }
 
-static vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
-                                             vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
+                                                        vector bool short __b) {
   return __a - (vector short)__b;
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_vsubuhm(vector unsigned short __a, vector unsigned short __b) {
   return __a - __b;
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_vsubuhm(vector bool short __a, vector unsigned short __b) {
   return (vector unsigned short)__a - __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vsubuhm(vector unsigned short __a,
-                                                      vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsubuhm(vector unsigned short __a, vector bool short __b) {
   return __a - (vector unsigned short)__b;
 }
 
@@ -9382,32 +9539,33 @@
 
 #define __builtin_altivec_vsubuwm vec_vsubuwm
 
-static vector int __ATTRS_o_ai vec_vsubuwm(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector int __a,
+                                                      vector int __b) {
   return __a - __b;
 }
 
-static vector int __ATTRS_o_ai vec_vsubuwm(vector bool int __a,
-                                           vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector bool int __a,
+                                                      vector int __b) {
   return (vector int)__a - __b;
 }
 
-static vector int __ATTRS_o_ai vec_vsubuwm(vector int __a,
-                                           vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector int __a,
+                                                      vector bool int __b) {
   return __a - (vector int)__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vsubuwm(vector unsigned int __a,
-                                                    vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsubuwm(vector unsigned int __a, vector unsigned int __b) {
   return __a - __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vsubuwm(vector bool int __a,
-                                                    vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsubuwm(vector bool int __a, vector unsigned int __b) {
   return (vector unsigned int)__a - __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vsubuwm(vector unsigned int __a,
-                                                    vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsubuwm(vector unsigned int __a, vector bool int __b) {
   return __a - (vector unsigned int)__b;
 }
 
@@ -9415,25 +9573,25 @@
 
 #define __builtin_altivec_vsubfp vec_vsubfp
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_vsubfp(vector float __a, vector float __b) {
   return __a - __b;
 }
 
 /* vec_subc */
 
-static vector unsigned int __ATTRS_o_ai vec_subc(vector unsigned int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_subc(vector unsigned int __a, vector unsigned int __b) {
   return __builtin_altivec_vsubcuw(__a, __b);
 }
 
 #if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_subc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
   return __builtin_altivec_vsubcuq(__a, __b);
 }
 
-static vector signed __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_subc(vector signed __int128 __a, vector signed __int128 __b) {
   return __builtin_altivec_vsubcuq(__a, __b);
 }
@@ -9441,222 +9599,227 @@
 
 /* vec_vsubcuw */
 
-static vector unsigned int __attribute__((__always_inline__))
+static __inline__ vector unsigned int __attribute__((__always_inline__))
 vec_vsubcuw(vector unsigned int __a, vector unsigned int __b) {
   return __builtin_altivec_vsubcuw(__a, __b);
 }
 
 /* vec_subs */
 
-static vector signed char __ATTRS_o_ai vec_subs(vector signed char __a,
-                                                vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_subs(vector signed char __a, vector signed char __b) {
   return __builtin_altivec_vsubsbs(__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_subs(vector bool char __a,
-                                                vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_subs(vector bool char __a, vector signed char __b) {
   return __builtin_altivec_vsubsbs((vector signed char)__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_subs(vector signed char __a,
-                                                vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_subs(vector signed char __a, vector bool char __b) {
   return __builtin_altivec_vsubsbs(__a, (vector signed char)__b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_subs(vector unsigned char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_subs(vector unsigned char __a, vector unsigned char __b) {
   return __builtin_altivec_vsububs(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_subs(vector bool char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_subs(vector bool char __a, vector unsigned char __b) {
   return __builtin_altivec_vsububs((vector unsigned char)__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_subs(vector unsigned char __a,
-                                                  vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_subs(vector unsigned char __a, vector bool char __b) {
   return __builtin_altivec_vsububs(__a, (vector unsigned char)__b);
 }
 
-static vector short __ATTRS_o_ai vec_subs(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_subs(vector short __a,
+                                                     vector short __b) {
   return __builtin_altivec_vsubshs(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_subs(vector bool short __a,
-                                          vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_subs(vector bool short __a,
+                                                     vector short __b) {
   return __builtin_altivec_vsubshs((vector short)__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_subs(vector short __a,
-                                          vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_subs(vector short __a,
+                                                     vector bool short __b) {
   return __builtin_altivec_vsubshs(__a, (vector short)__b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_subs(vector unsigned short __a,
-                                                   vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_subs(vector unsigned short __a, vector unsigned short __b) {
   return __builtin_altivec_vsubuhs(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_subs(vector bool short __a,
-                                                   vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_subs(vector bool short __a, vector unsigned short __b) {
   return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_subs(vector unsigned short __a,
-                                                   vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_subs(vector unsigned short __a, vector bool short __b) {
   return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b);
 }
 
-static vector int __ATTRS_o_ai vec_subs(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_subs(vector int __a,
+                                                   vector int __b) {
   return __builtin_altivec_vsubsws(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_subs(vector bool int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_subs(vector bool int __a,
+                                                   vector int __b) {
   return __builtin_altivec_vsubsws((vector int)__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_subs(vector int __a, vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_subs(vector int __a,
+                                                   vector bool int __b) {
   return __builtin_altivec_vsubsws(__a, (vector int)__b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_subs(vector unsigned int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_subs(vector unsigned int __a, vector unsigned int __b) {
   return __builtin_altivec_vsubuws(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_subs(vector bool int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_subs(vector bool int __a, vector unsigned int __b) {
   return __builtin_altivec_vsubuws((vector unsigned int)__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_subs(vector unsigned int __a,
-                                                 vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_subs(vector unsigned int __a, vector bool int __b) {
   return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
 }
 
 /* vec_vsubsbs */
 
-static vector signed char __ATTRS_o_ai vec_vsubsbs(vector signed char __a,
-                                                   vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsubsbs(vector signed char __a, vector signed char __b) {
   return __builtin_altivec_vsubsbs(__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_vsubsbs(vector bool char __a,
-                                                   vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsubsbs(vector bool char __a, vector signed char __b) {
   return __builtin_altivec_vsubsbs((vector signed char)__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_vsubsbs(vector signed char __a,
-                                                   vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsubsbs(vector signed char __a, vector bool char __b) {
   return __builtin_altivec_vsubsbs(__a, (vector signed char)__b);
 }
 
 /* vec_vsububs */
 
-static vector unsigned char __ATTRS_o_ai vec_vsububs(vector unsigned char __a,
-                                                     vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsububs(vector unsigned char __a, vector unsigned char __b) {
   return __builtin_altivec_vsububs(__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsububs(vector bool char __a,
-                                                     vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsububs(vector bool char __a, vector unsigned char __b) {
   return __builtin_altivec_vsububs((vector unsigned char)__a, __b);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vsububs(vector unsigned char __a,
-                                                     vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsububs(vector unsigned char __a, vector bool char __b) {
   return __builtin_altivec_vsububs(__a, (vector unsigned char)__b);
 }
 
 /* vec_vsubshs */
 
-static vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
-                                             vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
+                                                        vector short __b) {
   return __builtin_altivec_vsubshs(__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_vsubshs(vector bool short __a,
-                                             vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector bool short __a,
+                                                        vector short __b) {
   return __builtin_altivec_vsubshs((vector short)__a, __b);
 }
 
-static vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
-                                             vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
+                                                        vector bool short __b) {
   return __builtin_altivec_vsubshs(__a, (vector short)__b);
 }
 
 /* vec_vsubuhs */
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_vsubuhs(vector unsigned short __a, vector unsigned short __b) {
   return __builtin_altivec_vsubuhs(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_vsubuhs(vector bool short __a, vector unsigned short __b) {
   return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vsubuhs(vector unsigned short __a,
-                                                      vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsubuhs(vector unsigned short __a, vector bool short __b) {
   return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b);
 }
 
 /* vec_vsubsws */
 
-static vector int __ATTRS_o_ai vec_vsubsws(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector int __a,
+                                                      vector int __b) {
   return __builtin_altivec_vsubsws(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_vsubsws(vector bool int __a,
-                                           vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector bool int __a,
+                                                      vector int __b) {
   return __builtin_altivec_vsubsws((vector int)__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_vsubsws(vector int __a,
-                                           vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector int __a,
+                                                      vector bool int __b) {
   return __builtin_altivec_vsubsws(__a, (vector int)__b);
 }
 
 /* vec_vsubuws */
 
-static vector unsigned int __ATTRS_o_ai vec_vsubuws(vector unsigned int __a,
-                                                    vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsubuws(vector unsigned int __a, vector unsigned int __b) {
   return __builtin_altivec_vsubuws(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vsubuws(vector bool int __a,
-                                                    vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsubuws(vector bool int __a, vector unsigned int __b) {
   return __builtin_altivec_vsubuws((vector unsigned int)__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vsubuws(vector unsigned int __a,
-                                                    vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsubuws(vector unsigned int __a, vector bool int __b) {
   return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
 }
 
 #if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
 /* vec_vsubuqm */
 
-static vector signed __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vsubuqm(vector signed __int128 __a, vector signed __int128 __b) {
   return __a - __b;
 }
 
-static vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_vsubuqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
   return __a - __b;
 }
 
 /* vec_vsubeuqm */
 
-static vector signed __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b,
              vector signed __int128 __c) {
   return __builtin_altivec_vsubeuqm(__a, __b, __c);
 }
 
-static vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_vsubeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
              vector unsigned __int128 __c) {
   return __builtin_altivec_vsubeuqm(__a, __b, __c);
@@ -9664,25 +9827,25 @@
 
 /* vec_vsubcuq */
 
-static vector signed __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vsubcuq(vector signed __int128 __a, vector signed __int128 __b) {
   return __builtin_altivec_vsubcuq(__a, __b);
 }
 
-static vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_vsubcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
   return __builtin_altivec_vsubcuq(__a, __b);
 }
 
 /* vec_vsubecuq */
 
-static vector signed __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vsubecuq(vector signed __int128 __a, vector signed __int128 __b,
              vector signed __int128 __c) {
   return __builtin_altivec_vsubecuq(__a, __b, __c);
 }
 
-static vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_vsubecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
              vector unsigned __int128 __c) {
   return __builtin_altivec_vsubecuq(__a, __b, __c);
@@ -9691,38 +9854,38 @@
 
 /* vec_sum4s */
 
-static vector int __ATTRS_o_ai vec_sum4s(vector signed char __a,
-                                         vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed char __a,
+                                                    vector int __b) {
   return __builtin_altivec_vsum4sbs(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai vec_sum4s(vector unsigned char __a,
-                                                  vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sum4s(vector unsigned char __a, vector unsigned int __b) {
   return __builtin_altivec_vsum4ubs(__a, __b);
 }
 
-static vector int __ATTRS_o_ai vec_sum4s(vector signed short __a,
-                                         vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed short __a,
+                                                    vector int __b) {
   return __builtin_altivec_vsum4shs(__a, __b);
 }
 
 /* vec_vsum4sbs */
 
-static vector int __attribute__((__always_inline__))
+static __inline__ vector int __attribute__((__always_inline__))
 vec_vsum4sbs(vector signed char __a, vector int __b) {
   return __builtin_altivec_vsum4sbs(__a, __b);
 }
 
 /* vec_vsum4ubs */
 
-static vector unsigned int __attribute__((__always_inline__))
+static __inline__ vector unsigned int __attribute__((__always_inline__))
 vec_vsum4ubs(vector unsigned char __a, vector unsigned int __b) {
   return __builtin_altivec_vsum4ubs(__a, __b);
 }
 
 /* vec_vsum4shs */
 
-static vector int __attribute__((__always_inline__))
+static __inline__ vector int __attribute__((__always_inline__))
 vec_vsum4shs(vector signed short __a, vector int __b) {
   return __builtin_altivec_vsum4shs(__a, __b);
 }
@@ -9735,7 +9898,7 @@
    programmer wants elements 1 and 3 in both cases, so for little
    endian we must perform some permutes.  */
 
-static vector signed int __attribute__((__always_inline__))
+static __inline__ vector signed int __attribute__((__always_inline__))
 vec_sum2s(vector int __a, vector int __b) {
 #ifdef __LITTLE_ENDIAN__
   vector int __c = (vector signed int)vec_perm(
@@ -9752,7 +9915,7 @@
 
 /* vec_vsum2sws */
 
-static vector signed int __attribute__((__always_inline__))
+static __inline__ vector signed int __attribute__((__always_inline__))
 vec_vsum2sws(vector int __a, vector int __b) {
 #ifdef __LITTLE_ENDIAN__
   vector int __c = (vector signed int)vec_perm(
@@ -9775,7 +9938,7 @@
    wants element 3 in both cases, so for little endian we must perform
    some permutes.  */
 
-static vector signed int __attribute__((__always_inline__))
+static __inline__ vector signed int __attribute__((__always_inline__))
 vec_sums(vector signed int __a, vector signed int __b) {
 #ifdef __LITTLE_ENDIAN__
   __b = (vector signed int)vec_splat(__b, 3);
@@ -9788,7 +9951,7 @@
 
 /* vec_vsumsws */
 
-static vector signed int __attribute__((__always_inline__))
+static __inline__ vector signed int __attribute__((__always_inline__))
 vec_vsumsws(vector signed int __a, vector signed int __b) {
 #ifdef __LITTLE_ENDIAN__
   __b = (vector signed int)vec_splat(__b, 3);
@@ -9801,8 +9964,7 @@
 
 /* vec_trunc */
 
-static vector float __ATTRS_o_ai
-vec_trunc(vector float __a) {
+static __inline__ vector float __ATTRS_o_ai vec_trunc(vector float __a) {
 #ifdef __VSX__
   return __builtin_vsx_xvrspiz(__a);
 #else
@@ -9811,14 +9973,14 @@
 }
 
 #ifdef __VSX__
-static vector double __ATTRS_o_ai vec_trunc(vector double __a) {
+static __inline__ vector double __ATTRS_o_ai vec_trunc(vector double __a) {
   return __builtin_vsx_xvrdpiz(__a);
 }
 #endif
 
 /* vec_vrfiz */
 
-static vector float __attribute__((__always_inline__))
+static __inline__ vector float __attribute__((__always_inline__))
 vec_vrfiz(vector float __a) {
   return __builtin_altivec_vrfiz(__a);
 }
@@ -9828,7 +9990,8 @@
 /* The vector unpack instructions all have a big-endian bias, so for
    little endian we must reverse the meanings of "high" and "low."  */
 
-static vector short __ATTRS_o_ai vec_unpackh(vector signed char __a) {
+static __inline__ vector short __ATTRS_o_ai
+vec_unpackh(vector signed char __a) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vupklsb((vector char)__a);
 #else
@@ -9836,7 +9999,8 @@
 #endif
 }
 
-static vector bool short __ATTRS_o_ai vec_unpackh(vector bool char __a) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_unpackh(vector bool char __a) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
 #else
@@ -9844,7 +10008,7 @@
 #endif
 }
 
-static vector int __ATTRS_o_ai vec_unpackh(vector short __a) {
+static __inline__ vector int __ATTRS_o_ai vec_unpackh(vector short __a) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vupklsh(__a);
 #else
@@ -9852,7 +10016,8 @@
 #endif
 }
 
-static vector bool int __ATTRS_o_ai vec_unpackh(vector bool short __a) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_unpackh(vector bool short __a) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
 #else
@@ -9860,7 +10025,8 @@
 #endif
 }
 
-static vector unsigned int __ATTRS_o_ai vec_unpackh(vector pixel __a) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_unpackh(vector pixel __a) {
 #ifdef __LITTLE_ENDIAN__
   return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
 #else
@@ -9869,7 +10035,7 @@
 }
 
 #ifdef __POWER8_VECTOR__
-static vector long long __ATTRS_o_ai vec_unpackh(vector int __a) {
+static __inline__ vector long long __ATTRS_o_ai vec_unpackh(vector int __a) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vupklsw(__a);
 #else
@@ -9877,7 +10043,8 @@
 #endif
 }
 
-static vector bool long long __ATTRS_o_ai vec_unpackh(vector bool int __a) {
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_unpackh(vector bool int __a) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
 #else
@@ -9888,7 +10055,8 @@
 
 /* vec_vupkhsb */
 
-static vector short __ATTRS_o_ai vec_vupkhsb(vector signed char __a) {
+static __inline__ vector short __ATTRS_o_ai
+vec_vupkhsb(vector signed char __a) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vupklsb((vector char)__a);
 #else
@@ -9896,7 +10064,8 @@
 #endif
 }
 
-static vector bool short __ATTRS_o_ai vec_vupkhsb(vector bool char __a) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vupkhsb(vector bool char __a) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
 #else
@@ -9906,7 +10075,7 @@
 
 /* vec_vupkhsh */
 
-static vector int __ATTRS_o_ai vec_vupkhsh(vector short __a) {
+static __inline__ vector int __ATTRS_o_ai vec_vupkhsh(vector short __a) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vupklsh(__a);
 #else
@@ -9914,7 +10083,8 @@
 #endif
 }
 
-static vector bool int __ATTRS_o_ai vec_vupkhsh(vector bool short __a) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vupkhsh(vector bool short __a) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
 #else
@@ -9922,7 +10092,8 @@
 #endif
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vupkhsh(vector pixel __a) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vupkhsh(vector pixel __a) {
 #ifdef __LITTLE_ENDIAN__
   return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
 #else
@@ -9933,7 +10104,7 @@
 /* vec_vupkhsw */
 
 #ifdef __POWER8_VECTOR__
-static vector long long __ATTRS_o_ai vec_vupkhsw(vector int __a) {
+static __inline__ vector long long __ATTRS_o_ai vec_vupkhsw(vector int __a) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vupklsw(__a);
 #else
@@ -9941,7 +10112,8 @@
 #endif
 }
 
-static vector bool long long __ATTRS_o_ai vec_vupkhsw(vector bool int __a) {
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_vupkhsw(vector bool int __a) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
 #else
@@ -9952,7 +10124,8 @@
 
 /* vec_unpackl */
 
-static vector short __ATTRS_o_ai vec_unpackl(vector signed char __a) {
+static __inline__ vector short __ATTRS_o_ai
+vec_unpackl(vector signed char __a) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vupkhsb((vector char)__a);
 #else
@@ -9960,7 +10133,8 @@
 #endif
 }
 
-static vector bool short __ATTRS_o_ai vec_unpackl(vector bool char __a) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_unpackl(vector bool char __a) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
 #else
@@ -9968,7 +10142,7 @@
 #endif
 }
 
-static vector int __ATTRS_o_ai vec_unpackl(vector short __a) {
+static __inline__ vector int __ATTRS_o_ai vec_unpackl(vector short __a) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vupkhsh(__a);
 #else
@@ -9976,7 +10150,8 @@
 #endif
 }
 
-static vector bool int __ATTRS_o_ai vec_unpackl(vector bool short __a) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_unpackl(vector bool short __a) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
 #else
@@ -9984,7 +10159,8 @@
 #endif
 }
 
-static vector unsigned int __ATTRS_o_ai vec_unpackl(vector pixel __a) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_unpackl(vector pixel __a) {
 #ifdef __LITTLE_ENDIAN__
   return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
 #else
@@ -9993,7 +10169,7 @@
 }
 
 #ifdef __POWER8_VECTOR__
-static vector long long __ATTRS_o_ai vec_unpackl(vector int __a) {
+static __inline__ vector long long __ATTRS_o_ai vec_unpackl(vector int __a) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vupkhsw(__a);
 #else
@@ -10001,7 +10177,8 @@
 #endif
 }
 
-static vector bool long long __ATTRS_o_ai vec_unpackl(vector bool int __a) {
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_unpackl(vector bool int __a) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
 #else
@@ -10012,7 +10189,8 @@
 
 /* vec_vupklsb */
 
-static vector short __ATTRS_o_ai vec_vupklsb(vector signed char __a) {
+static __inline__ vector short __ATTRS_o_ai
+vec_vupklsb(vector signed char __a) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vupkhsb((vector char)__a);
 #else
@@ -10020,7 +10198,8 @@
 #endif
 }
 
-static vector bool short __ATTRS_o_ai vec_vupklsb(vector bool char __a) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vupklsb(vector bool char __a) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
 #else
@@ -10030,7 +10209,7 @@
 
 /* vec_vupklsh */
 
-static vector int __ATTRS_o_ai vec_vupklsh(vector short __a) {
+static __inline__ vector int __ATTRS_o_ai vec_vupklsh(vector short __a) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vupkhsh(__a);
 #else
@@ -10038,7 +10217,8 @@
 #endif
 }
 
-static vector bool int __ATTRS_o_ai vec_vupklsh(vector bool short __a) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vupklsh(vector bool short __a) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
 #else
@@ -10046,7 +10226,8 @@
 #endif
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vupklsh(vector pixel __a) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vupklsh(vector pixel __a) {
 #ifdef __LITTLE_ENDIAN__
   return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
 #else
@@ -10057,7 +10238,7 @@
 /* vec_vupklsw */
 
 #ifdef __POWER8_VECTOR__
-static vector long long __ATTRS_o_ai vec_vupklsw(vector int __a) {
+static __inline__ vector long long __ATTRS_o_ai vec_vupklsw(vector int __a) {
 #ifdef __LITTLE_ENDIAN__
   return __builtin_altivec_vupkhsw(__a);
 #else
@@ -10065,7 +10246,8 @@
 #endif
 }
 
-static vector bool long long __ATTRS_o_ai vec_vupklsw(vector bool int __a) {
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_vupklsw(vector bool int __a) {
 #ifdef __LITTLE_ENDIAN__
   return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
 #else
@@ -10078,248 +10260,437 @@
 
 #ifdef __VSX__
 
-static vector signed int __ATTRS_o_ai vec_vsx_ld(int __a,
-                                                 const vector signed int *__b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector bool int *__b) {
+  return (vector bool int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector signed int *__b) {
   return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai
+static __inline__ vector signed int __ATTRS_o_ai
+vec_vsx_ld(int __a, const signed int *__b) {
+  return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
 vec_vsx_ld(int __a, const vector unsigned int *__b) {
   return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
 }
 
-static vector float __ATTRS_o_ai vec_vsx_ld(int __a, const vector float *__b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsx_ld(int __a, const unsigned int *__b) {
+  return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector float *__b) {
   return (vector float)__builtin_vsx_lxvw4x(__a, __b);
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector float __ATTRS_o_ai vec_vsx_ld(int __a,
+                                                       const float *__b) {
+  return (vector float)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_vsx_ld(int __a, const vector signed long long *__b) {
   return (vector signed long long)__builtin_vsx_lxvd2x(__a, __b);
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_vsx_ld(int __a, const vector unsigned long long *__b) {
   return (vector unsigned long long)__builtin_vsx_lxvd2x(__a, __b);
 }
 
-static vector double __ATTRS_o_ai vec_vsx_ld(int __a,
-                                             const vector double *__b) {
+static __inline__ vector double __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector double *__b) {
   return (vector double)__builtin_vsx_lxvd2x(__a, __b);
 }
 
+static __inline__ vector double __ATTRS_o_ai
+vec_vsx_ld(int __a, const double *__b) {
+  return (vector double)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector bool short *__b) {
+  return (vector bool short)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector signed short *__b) {
+  return (vector signed short)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_vsx_ld(int __a, const signed short *__b) {
+  return (vector signed short)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector unsigned short *__b) {
+  return (vector unsigned short)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsx_ld(int __a, const unsigned short *__b) {
+  return (vector unsigned short)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector bool char *__b) {
+  return (vector bool char)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector signed char *__b) {
+  return (vector signed char)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsx_ld(int __a, const signed char *__b) {
+  return (vector signed char)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector unsigned char *__b) {
+  return (vector unsigned char)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsx_ld(int __a, const unsigned char *__b) {
+  return (vector unsigned char)__builtin_vsx_lxvw4x(__a, __b);
+}
+
 #endif
 
 /* vec_vsx_st */
 
 #ifdef __VSX__
 
-static void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b,
-                                    vector signed int *__c) {
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
+                                               vector bool int *__c) {
   __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b,
-                                    vector unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
+                                               signed int *__c) {
   __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b,
-                                    vector float *__c) {
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
+                                               unsigned int *__c) {
   __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_vsx_st(vector signed long long __a, int __b,
-                                    vector signed long long *__c) {
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b,
+                                               vector signed int *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b,
+                                               signed int *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b,
+                                               vector unsigned int *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b,
+                                               unsigned int *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b,
+                                               vector float *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b,
+                                               float *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed long long __a,
+                                               int __b,
+                                               vector signed long long *__c) {
   __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_vsx_st(vector unsigned long long __a, int __b,
-                                    vector unsigned long long *__c) {
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned long long __a,
+                                               int __b,
+                                               vector unsigned long long *__c) {
   __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
 }
 
-static void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b,
-                                    vector double *__c) {
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b,
+                                               vector double *__c) {
   __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
 }
 
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b,
+                                               double *__c) {
+  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
+                                               vector bool short *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
+                                               signed short *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
+                                               unsigned short *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b,
+                                               vector signed short *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b,
+                                               signed short *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned short __a,
+                                               int __b,
+                                               vector unsigned short *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned short __a,
+                                               int __b, unsigned short *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
+                                               vector bool char *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
+                                               signed char *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
+                                               unsigned char *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b,
+                                               vector signed char *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b,
+                                               signed char *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
+                                               int __b,
+                                               vector unsigned char *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
+                                               int __b, unsigned char *__c) {
+  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
 #endif
 
 /* vec_xor */
 
 #define __builtin_altivec_vxor vec_xor
 
-static vector signed char __ATTRS_o_ai vec_xor(vector signed char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_xor(vector signed char __a, vector signed char __b) {
   return __a ^ __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_xor(vector bool char __a,
-                                               vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_xor(vector bool char __a, vector signed char __b) {
   return (vector signed char)__a ^ __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_xor(vector signed char __a,
-                                               vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_xor(vector signed char __a, vector bool char __b) {
   return __a ^ (vector signed char)__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_xor(vector unsigned char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_xor(vector unsigned char __a, vector unsigned char __b) {
   return __a ^ __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_xor(vector bool char __a,
-                                                 vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_xor(vector bool char __a, vector unsigned char __b) {
   return (vector unsigned char)__a ^ __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_xor(vector unsigned char __a,
-                                                 vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_xor(vector unsigned char __a, vector bool char __b) {
   return __a ^ (vector unsigned char)__b;
 }
 
-static vector bool char __ATTRS_o_ai vec_xor(vector bool char __a,
-                                             vector bool char __b) {
+static __inline__ vector bool char __ATTRS_o_ai vec_xor(vector bool char __a,
+                                                        vector bool char __b) {
   return __a ^ __b;
 }
 
-static vector short __ATTRS_o_ai vec_xor(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_xor(vector short __a,
+                                                    vector short __b) {
   return __a ^ __b;
 }
 
-static vector short __ATTRS_o_ai vec_xor(vector bool short __a,
-                                         vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_xor(vector bool short __a,
+                                                    vector short __b) {
   return (vector short)__a ^ __b;
 }
 
-static vector short __ATTRS_o_ai vec_xor(vector short __a,
-                                         vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_xor(vector short __a,
+                                                    vector bool short __b) {
   return __a ^ (vector short)__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_xor(vector unsigned short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_xor(vector unsigned short __a, vector unsigned short __b) {
   return __a ^ __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_xor(vector bool short __a,
-                                                  vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_xor(vector bool short __a, vector unsigned short __b) {
   return (vector unsigned short)__a ^ __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_xor(vector unsigned short __a,
-                                                  vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_xor(vector unsigned short __a, vector bool short __b) {
   return __a ^ (vector unsigned short)__b;
 }
 
-static vector bool short __ATTRS_o_ai vec_xor(vector bool short __a,
-                                              vector bool short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_xor(vector bool short __a, vector bool short __b) {
   return __a ^ __b;
 }
 
-static vector int __ATTRS_o_ai vec_xor(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_xor(vector int __a,
+                                                  vector int __b) {
   return __a ^ __b;
 }
 
-static vector int __ATTRS_o_ai vec_xor(vector bool int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_xor(vector bool int __a,
+                                                  vector int __b) {
   return (vector int)__a ^ __b;
 }
 
-static vector int __ATTRS_o_ai vec_xor(vector int __a, vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_xor(vector int __a,
+                                                  vector bool int __b) {
   return __a ^ (vector int)__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_xor(vector unsigned int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_xor(vector unsigned int __a, vector unsigned int __b) {
   return __a ^ __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_xor(vector bool int __a,
-                                                vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_xor(vector bool int __a, vector unsigned int __b) {
   return (vector unsigned int)__a ^ __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_xor(vector unsigned int __a,
-                                                vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_xor(vector unsigned int __a, vector bool int __b) {
   return __a ^ (vector unsigned int)__b;
 }
 
-static vector bool int __ATTRS_o_ai vec_xor(vector bool int __a,
-                                            vector bool int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_xor(vector bool int __a,
+                                                       vector bool int __b) {
   return __a ^ __b;
 }
 
-static vector float __ATTRS_o_ai vec_xor(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_xor(vector float __a,
+                                                    vector float __b) {
   vector unsigned int __res =
       (vector unsigned int)__a ^ (vector unsigned int)__b;
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_xor(vector bool int __a,
-                                         vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_xor(vector bool int __a,
+                                                    vector float __b) {
   vector unsigned int __res =
       (vector unsigned int)__a ^ (vector unsigned int)__b;
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_xor(vector float __a,
-                                         vector bool int __b) {
+static __inline__ vector float __ATTRS_o_ai vec_xor(vector float __a,
+                                                    vector bool int __b) {
   vector unsigned int __res =
       (vector unsigned int)__a ^ (vector unsigned int)__b;
   return (vector float)__res;
 }
 
 #ifdef __VSX__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_xor(vector signed long long __a, vector signed long long __b) {
   return __a ^ __b;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_xor(vector bool long long __a, vector signed long long __b) {
   return (vector signed long long)__a ^ __b;
 }
 
-static vector signed long long __ATTRS_o_ai vec_xor(vector signed long long __a,
-                                                    vector bool long long __b) {
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_xor(vector signed long long __a, vector bool long long __b) {
   return __a ^ (vector signed long long)__b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_xor(vector unsigned long long __a, vector unsigned long long __b) {
   return __a ^ __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_xor(vector bool long long __a, vector unsigned long long __b) {
   return (vector unsigned long long)__a ^ __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_xor(vector unsigned long long __a, vector bool long long __b) {
   return __a ^ (vector unsigned long long)__b;
 }
 
-static vector bool long long __ATTRS_o_ai vec_xor(vector bool long long __a,
-                                                  vector bool long long __b) {
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_xor(vector bool long long __a, vector bool long long __b) {
   return __a ^ __b;
 }
 
-static vector double __ATTRS_o_ai
-vec_xor(vector double __a, vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_xor(vector double __a,
+                                                     vector double __b) {
   return (vector double)((vector unsigned long long)__a ^
-                          (vector unsigned long long)__b);
+                         (vector unsigned long long)__b);
 }
 
-static vector double __ATTRS_o_ai
+static __inline__ vector double __ATTRS_o_ai
 vec_xor(vector double __a, vector bool long long __b) {
   return (vector double)((vector unsigned long long)__a ^
-                         (vector unsigned long long) __b);
+                         (vector unsigned long long)__b);
 }
 
-static vector double __ATTRS_o_ai
-vec_xor(vector bool long long __a, vector double __b) {
+static __inline__ vector double __ATTRS_o_ai vec_xor(vector bool long long __a,
+                                                     vector double __b) {
   return (vector double)((vector unsigned long long)__a ^
                          (vector unsigned long long)__b);
 }
@@ -10327,160 +10698,165 @@
 
 /* vec_vxor */
 
-static vector signed char __ATTRS_o_ai vec_vxor(vector signed char __a,
-                                                vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vxor(vector signed char __a, vector signed char __b) {
   return __a ^ __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_vxor(vector bool char __a,
-                                                vector signed char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vxor(vector bool char __a, vector signed char __b) {
   return (vector signed char)__a ^ __b;
 }
 
-static vector signed char __ATTRS_o_ai vec_vxor(vector signed char __a,
-                                                vector bool char __b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vxor(vector signed char __a, vector bool char __b) {
   return __a ^ (vector signed char)__b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vxor(vector unsigned char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vxor(vector unsigned char __a, vector unsigned char __b) {
   return __a ^ __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vxor(vector bool char __a,
-                                                  vector unsigned char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vxor(vector bool char __a, vector unsigned char __b) {
   return (vector unsigned char)__a ^ __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_vxor(vector unsigned char __a,
-                                                  vector bool char __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vxor(vector unsigned char __a, vector bool char __b) {
   return __a ^ (vector unsigned char)__b;
 }
 
-static vector bool char __ATTRS_o_ai vec_vxor(vector bool char __a,
-                                              vector bool char __b) {
+static __inline__ vector bool char __ATTRS_o_ai vec_vxor(vector bool char __a,
+                                                         vector bool char __b) {
   return __a ^ __b;
 }
 
-static vector short __ATTRS_o_ai vec_vxor(vector short __a, vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vxor(vector short __a,
+                                                     vector short __b) {
   return __a ^ __b;
 }
 
-static vector short __ATTRS_o_ai vec_vxor(vector bool short __a,
-                                          vector short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vxor(vector bool short __a,
+                                                     vector short __b) {
   return (vector short)__a ^ __b;
 }
 
-static vector short __ATTRS_o_ai vec_vxor(vector short __a,
-                                          vector bool short __b) {
+static __inline__ vector short __ATTRS_o_ai vec_vxor(vector short __a,
+                                                     vector bool short __b) {
   return __a ^ (vector short)__b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vxor(vector unsigned short __a,
-                                                   vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vxor(vector unsigned short __a, vector unsigned short __b) {
   return __a ^ __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vxor(vector bool short __a,
-                                                   vector unsigned short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vxor(vector bool short __a, vector unsigned short __b) {
   return (vector unsigned short)__a ^ __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_vxor(vector unsigned short __a,
-                                                   vector bool short __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vxor(vector unsigned short __a, vector bool short __b) {
   return __a ^ (vector unsigned short)__b;
 }
 
-static vector bool short __ATTRS_o_ai vec_vxor(vector bool short __a,
-                                               vector bool short __b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vxor(vector bool short __a, vector bool short __b) {
   return __a ^ __b;
 }
 
-static vector int __ATTRS_o_ai vec_vxor(vector int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vxor(vector int __a,
+                                                   vector int __b) {
   return __a ^ __b;
 }
 
-static vector int __ATTRS_o_ai vec_vxor(vector bool int __a, vector int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vxor(vector bool int __a,
+                                                   vector int __b) {
   return (vector int)__a ^ __b;
 }
 
-static vector int __ATTRS_o_ai vec_vxor(vector int __a, vector bool int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_vxor(vector int __a,
+                                                   vector bool int __b) {
   return __a ^ (vector int)__b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vxor(vector unsigned int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vxor(vector unsigned int __a, vector unsigned int __b) {
   return __a ^ __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vxor(vector bool int __a,
-                                                 vector unsigned int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vxor(vector bool int __a, vector unsigned int __b) {
   return (vector unsigned int)__a ^ __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_vxor(vector unsigned int __a,
-                                                 vector bool int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vxor(vector unsigned int __a, vector bool int __b) {
   return __a ^ (vector unsigned int)__b;
 }
 
-static vector bool int __ATTRS_o_ai vec_vxor(vector bool int __a,
-                                             vector bool int __b) {
+static __inline__ vector bool int __ATTRS_o_ai vec_vxor(vector bool int __a,
+                                                        vector bool int __b) {
   return __a ^ __b;
 }
 
-static vector float __ATTRS_o_ai vec_vxor(vector float __a, vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vxor(vector float __a,
+                                                     vector float __b) {
   vector unsigned int __res =
       (vector unsigned int)__a ^ (vector unsigned int)__b;
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_vxor(vector bool int __a,
-                                          vector float __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vxor(vector bool int __a,
+                                                     vector float __b) {
   vector unsigned int __res =
       (vector unsigned int)__a ^ (vector unsigned int)__b;
   return (vector float)__res;
 }
 
-static vector float __ATTRS_o_ai vec_vxor(vector float __a,
-                                          vector bool int __b) {
+static __inline__ vector float __ATTRS_o_ai vec_vxor(vector float __a,
+                                                     vector bool int __b) {
   vector unsigned int __res =
       (vector unsigned int)__a ^ (vector unsigned int)__b;
   return (vector float)__res;
 }
 
 #ifdef __VSX__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_vxor(vector signed long long __a, vector signed long long __b) {
   return __a ^ __b;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_vxor(vector bool long long __a, vector signed long long __b) {
   return (vector signed long long)__a ^ __b;
 }
 
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_vxor(vector signed long long __a, vector bool long long __b) {
   return __a ^ (vector signed long long)__b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_vxor(vector unsigned long long __a, vector unsigned long long __b) {
   return __a ^ __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_vxor(vector bool long long __a, vector unsigned long long __b) {
   return (vector unsigned long long)__a ^ __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_vxor(vector unsigned long long __a, vector bool long long __b) {
   return __a ^ (vector unsigned long long)__b;
 }
 
-static vector bool long long __ATTRS_o_ai vec_vxor(vector bool long long __a,
-                                                   vector bool long long __b) {
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_vxor(vector bool long long __a, vector bool long long __b) {
   return __a ^ __b;
 }
 #endif
@@ -10489,674 +10865,702 @@
 
 /* vec_extract */
 
-static signed char __ATTRS_o_ai vec_extract(vector signed char __a, int __b) {
+static __inline__ signed char __ATTRS_o_ai vec_extract(vector signed char __a,
+                                                       int __b) {
   return __a[__b];
 }
 
-static unsigned char __ATTRS_o_ai vec_extract(vector unsigned char __a,
-                                              int __b) {
+static __inline__ unsigned char __ATTRS_o_ai
+vec_extract(vector unsigned char __a, int __b) {
   return __a[__b];
 }
 
-static unsigned char __ATTRS_o_ai vec_extract(vector bool char __a,
-                                              int __b) {
+static __inline__ unsigned char __ATTRS_o_ai vec_extract(vector bool char __a,
+                                                         int __b) {
   return __a[__b];
 }
 
-static signed short __ATTRS_o_ai vec_extract(vector signed short __a, int __b) {
+static __inline__ signed short __ATTRS_o_ai vec_extract(vector signed short __a,
+                                                        int __b) {
   return __a[__b];
 }
 
-static unsigned short __ATTRS_o_ai vec_extract(vector unsigned short __a,
-                                               int __b) {
+static __inline__ unsigned short __ATTRS_o_ai
+vec_extract(vector unsigned short __a, int __b) {
   return __a[__b];
 }
 
-static unsigned short __ATTRS_o_ai vec_extract(vector bool short __a,
-                                               int __b) {
+static __inline__ unsigned short __ATTRS_o_ai vec_extract(vector bool short __a,
+                                                          int __b) {
   return __a[__b];
 }
 
-static signed int __ATTRS_o_ai vec_extract(vector signed int __a, int __b) {
+static __inline__ signed int __ATTRS_o_ai vec_extract(vector signed int __a,
+                                                      int __b) {
   return __a[__b];
 }
 
-static unsigned int __ATTRS_o_ai vec_extract(vector unsigned int __a, int __b) {
+static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector unsigned int __a,
+                                                        int __b) {
   return __a[__b];
 }
 
-static unsigned int __ATTRS_o_ai vec_extract(vector bool int __a, int __b) {
+static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector bool int __a,
+                                                        int __b) {
   return __a[__b];
 }
 
 #ifdef __VSX__
-static signed long long __ATTRS_o_ai vec_extract(vector signed long long __a,
-                                                 int __b) {
+static __inline__ signed long long __ATTRS_o_ai
+vec_extract(vector signed long long __a, int __b) {
   return __a[__b];
 }
 
-static unsigned long long __ATTRS_o_ai
+static __inline__ unsigned long long __ATTRS_o_ai
 vec_extract(vector unsigned long long __a, int __b) {
   return __a[__b];
 }
 
-static unsigned long long __ATTRS_o_ai vec_extract(vector bool long long __a,
-                                                   int __b) {
+static __inline__ unsigned long long __ATTRS_o_ai
+vec_extract(vector bool long long __a, int __b) {
   return __a[__b];
 }
 
-static double __ATTRS_o_ai vec_extract(vector double __a, int __b) {
+static __inline__ double __ATTRS_o_ai vec_extract(vector double __a, int __b) {
   return __a[__b];
 }
 #endif
 
-static float __ATTRS_o_ai vec_extract(vector float __a, int __b) {
+static __inline__ float __ATTRS_o_ai vec_extract(vector float __a, int __b) {
   return __a[__b];
 }
 
 /* vec_insert */
 
-static vector signed char __ATTRS_o_ai vec_insert(signed char __a,
-                                                  vector signed char __b,
-                                                  int __c) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_insert(signed char __a, vector signed char __b, int __c) {
   __b[__c] = __a;
   return __b;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_insert(unsigned char __a,
-                                                    vector unsigned char __b,
-                                                    int __c) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_insert(unsigned char __a, vector unsigned char __b, int __c) {
   __b[__c] = __a;
   return __b;
 }
 
-static vector bool char __ATTRS_o_ai vec_insert(unsigned char __a,
-                                                vector bool char __b,
-                                                int __c) {
+static __inline__ vector bool char __ATTRS_o_ai vec_insert(unsigned char __a,
+                                                           vector bool char __b,
+                                                           int __c) {
   __b[__c] = __a;
   return __b;
 }
 
-static vector signed short __ATTRS_o_ai vec_insert(signed short __a,
-                                                   vector signed short __b,
-                                                   int __c) {
+static __inline__ vector signed short __ATTRS_o_ai
+vec_insert(signed short __a, vector signed short __b, int __c) {
   __b[__c] = __a;
   return __b;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_insert(unsigned short __a,
-                                                     vector unsigned short __b,
-                                                     int __c) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_insert(unsigned short __a, vector unsigned short __b, int __c) {
   __b[__c] = __a;
   return __b;
 }
 
-static vector bool short __ATTRS_o_ai vec_insert(unsigned short __a,
-                                                 vector bool short __b,
-                                                 int __c) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_insert(unsigned short __a, vector bool short __b, int __c) {
   __b[__c] = __a;
   return __b;
 }
 
-static vector signed int __ATTRS_o_ai vec_insert(signed int __a,
-                                                 vector signed int __b,
-                                                 int __c) {
+static __inline__ vector signed int __ATTRS_o_ai
+vec_insert(signed int __a, vector signed int __b, int __c) {
   __b[__c] = __a;
   return __b;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_insert(unsigned int __a,
-                                                   vector unsigned int __b,
-                                                   int __c) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_insert(unsigned int __a, vector unsigned int __b, int __c) {
   __b[__c] = __a;
   return __b;
 }
 
-static vector bool int __ATTRS_o_ai vec_insert(unsigned int __a,
-                                               vector bool int __b,
-                                               int __c) {
+static __inline__ vector bool int __ATTRS_o_ai vec_insert(unsigned int __a,
+                                                          vector bool int __b,
+                                                          int __c) {
   __b[__c] = __a;
   return __b;
 }
 
 #ifdef __VSX__
-static vector signed long long __ATTRS_o_ai
+static __inline__ vector signed long long __ATTRS_o_ai
 vec_insert(signed long long __a, vector signed long long __b, int __c) {
   __b[__c] = __a;
   return __b;
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_insert(unsigned long long __a, vector unsigned long long __b, int __c) {
   __b[__c] = __a;
   return __b;
 }
 
-static vector bool long long __ATTRS_o_ai
+static __inline__ vector bool long long __ATTRS_o_ai
 vec_insert(unsigned long long __a, vector bool long long __b, int __c) {
   __b[__c] = __a;
   return __b;
 }
-static vector double __ATTRS_o_ai vec_insert(double __a, vector double __b,
-                                             int __c) {
+static __inline__ vector double __ATTRS_o_ai vec_insert(double __a,
+                                                        vector double __b,
+                                                        int __c) {
   __b[__c] = __a;
   return __b;
 }
 #endif
 
-static vector float __ATTRS_o_ai vec_insert(float __a, vector float __b,
-                                            int __c) {
+static __inline__ vector float __ATTRS_o_ai vec_insert(float __a,
+                                                       vector float __b,
+                                                       int __c) {
   __b[__c] = __a;
   return __b;
 }
 
 /* vec_lvlx */
 
-static vector signed char __ATTRS_o_ai vec_lvlx(int __a,
-                                                const signed char *__b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvlx(int __a, const signed char *__b) {
   return vec_perm(vec_ld(__a, __b), (vector signed char)(0),
                   vec_lvsl(__a, __b));
 }
 
-static vector signed char __ATTRS_o_ai vec_lvlx(int __a,
-                                                const vector signed char *__b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvlx(int __a, const vector signed char *__b) {
   return vec_perm(vec_ld(__a, __b), (vector signed char)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector unsigned char __ATTRS_o_ai vec_lvlx(int __a,
-                                                  const unsigned char *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvlx(int __a, const unsigned char *__b) {
   return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0),
                   vec_lvsl(__a, __b));
 }
 
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
 vec_lvlx(int __a, const vector unsigned char *__b) {
   return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector bool char __ATTRS_o_ai vec_lvlx(int __a,
-                                              const vector bool char *__b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_lvlx(int __a, const vector bool char *__b) {
   return vec_perm(vec_ld(__a, __b), (vector bool char)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector short __ATTRS_o_ai vec_lvlx(int __a, const short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_lvlx(int __a,
+                                                     const short *__b) {
   return vec_perm(vec_ld(__a, __b), (vector short)(0), vec_lvsl(__a, __b));
 }
 
-static vector short __ATTRS_o_ai vec_lvlx(int __a, const vector short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_lvlx(int __a,
+                                                     const vector short *__b) {
   return vec_perm(vec_ld(__a, __b), (vector short)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector unsigned short __ATTRS_o_ai vec_lvlx(int __a,
-                                                   const unsigned short *__b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvlx(int __a, const unsigned short *__b) {
   return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0),
                   vec_lvsl(__a, __b));
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_lvlx(int __a, const vector unsigned short *__b) {
   return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector bool short __ATTRS_o_ai vec_lvlx(int __a,
-                                               const vector bool short *__b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_lvlx(int __a, const vector bool short *__b) {
   return vec_perm(vec_ld(__a, __b), (vector bool short)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector pixel __ATTRS_o_ai vec_lvlx(int __a, const vector pixel *__b) {
+static __inline__ vector pixel __ATTRS_o_ai vec_lvlx(int __a,
+                                                     const vector pixel *__b) {
   return vec_perm(vec_ld(__a, __b), (vector pixel)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector int __ATTRS_o_ai vec_lvlx(int __a, const int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lvlx(int __a, const int *__b) {
   return vec_perm(vec_ld(__a, __b), (vector int)(0), vec_lvsl(__a, __b));
 }
 
-static vector int __ATTRS_o_ai vec_lvlx(int __a, const vector int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lvlx(int __a,
+                                                   const vector int *__b) {
   return vec_perm(vec_ld(__a, __b), (vector int)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector unsigned int __ATTRS_o_ai vec_lvlx(int __a,
-                                                 const unsigned int *__b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvlx(int __a, const unsigned int *__b) {
   return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0),
                   vec_lvsl(__a, __b));
 }
 
-static vector unsigned int __ATTRS_o_ai
+static __inline__ vector unsigned int __ATTRS_o_ai
 vec_lvlx(int __a, const vector unsigned int *__b) {
   return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector bool int __ATTRS_o_ai vec_lvlx(int __a,
-                                             const vector bool int *__b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_lvlx(int __a, const vector bool int *__b) {
   return vec_perm(vec_ld(__a, __b), (vector bool int)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector float __ATTRS_o_ai vec_lvlx(int __a, const float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_lvlx(int __a,
+                                                     const float *__b) {
   return vec_perm(vec_ld(__a, __b), (vector float)(0), vec_lvsl(__a, __b));
 }
 
-static vector float __ATTRS_o_ai vec_lvlx(int __a, const vector float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_lvlx(int __a,
+                                                     const vector float *__b) {
   return vec_perm(vec_ld(__a, __b), (vector float)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
 /* vec_lvlxl */
 
-static vector signed char __ATTRS_o_ai vec_lvlxl(int __a,
-                                                 const signed char *__b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvlxl(int __a, const signed char *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector signed char)(0),
                   vec_lvsl(__a, __b));
 }
 
-static vector signed char __ATTRS_o_ai
+static __inline__ vector signed char __ATTRS_o_ai
 vec_lvlxl(int __a, const vector signed char *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector signed char)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector unsigned char __ATTRS_o_ai vec_lvlxl(int __a,
-                                                   const unsigned char *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvlxl(int __a, const unsigned char *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0),
                   vec_lvsl(__a, __b));
 }
 
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
 vec_lvlxl(int __a, const vector unsigned char *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector bool char __ATTRS_o_ai vec_lvlxl(int __a,
-                                               const vector bool char *__b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_lvlxl(int __a, const vector bool char *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector bool char)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector short __ATTRS_o_ai vec_lvlxl(int __a, const short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_lvlxl(int __a,
+                                                      const short *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector short)(0), vec_lvsl(__a, __b));
 }
 
-static vector short __ATTRS_o_ai vec_lvlxl(int __a, const vector short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_lvlxl(int __a,
+                                                      const vector short *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector short)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector unsigned short __ATTRS_o_ai vec_lvlxl(int __a,
-                                                    const unsigned short *__b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvlxl(int __a, const unsigned short *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0),
                   vec_lvsl(__a, __b));
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_lvlxl(int __a, const vector unsigned short *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector bool short __ATTRS_o_ai vec_lvlxl(int __a,
-                                                const vector bool short *__b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_lvlxl(int __a, const vector bool short *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector bool short)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector pixel __ATTRS_o_ai vec_lvlxl(int __a, const vector pixel *__b) {
+static __inline__ vector pixel __ATTRS_o_ai vec_lvlxl(int __a,
+                                                      const vector pixel *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector pixel)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector int __ATTRS_o_ai vec_lvlxl(int __a, const int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lvlxl(int __a, const int *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector int)(0), vec_lvsl(__a, __b));
 }
 
-static vector int __ATTRS_o_ai vec_lvlxl(int __a, const vector int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lvlxl(int __a,
+                                                    const vector int *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector int)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector unsigned int __ATTRS_o_ai vec_lvlxl(int __a,
-                                                  const unsigned int *__b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvlxl(int __a, const unsigned int *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0),
                   vec_lvsl(__a, __b));
 }
 
-static vector unsigned int __ATTRS_o_ai
+static __inline__ vector unsigned int __ATTRS_o_ai
 vec_lvlxl(int __a, const vector unsigned int *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector bool int __ATTRS_o_ai vec_lvlxl(int __a,
-                                              const vector bool int *__b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_lvlxl(int __a, const vector bool int *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector bool int)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector float __ATTRS_o_ai vec_lvlxl(int __a, const float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_lvlxl(int __a,
+                                                      const float *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector float)(0), vec_lvsl(__a, __b));
 }
 
-static vector float __ATTRS_o_ai vec_lvlxl(int __a, vector float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_lvlxl(int __a,
+                                                      vector float *__b) {
   return vec_perm(vec_ldl(__a, __b), (vector float)(0),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
 /* vec_lvrx */
 
-static vector signed char __ATTRS_o_ai vec_lvrx(int __a,
-                                                const signed char *__b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvrx(int __a, const signed char *__b) {
   return vec_perm((vector signed char)(0), vec_ld(__a, __b),
                   vec_lvsl(__a, __b));
 }
 
-static vector signed char __ATTRS_o_ai vec_lvrx(int __a,
-                                                const vector signed char *__b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvrx(int __a, const vector signed char *__b) {
   return vec_perm((vector signed char)(0), vec_ld(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector unsigned char __ATTRS_o_ai vec_lvrx(int __a,
-                                                  const unsigned char *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvrx(int __a, const unsigned char *__b) {
   return vec_perm((vector unsigned char)(0), vec_ld(__a, __b),
                   vec_lvsl(__a, __b));
 }
 
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
 vec_lvrx(int __a, const vector unsigned char *__b) {
   return vec_perm((vector unsigned char)(0), vec_ld(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector bool char __ATTRS_o_ai vec_lvrx(int __a,
-                                              const vector bool char *__b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_lvrx(int __a, const vector bool char *__b) {
   return vec_perm((vector bool char)(0), vec_ld(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector short __ATTRS_o_ai vec_lvrx(int __a, const short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_lvrx(int __a,
+                                                     const short *__b) {
   return vec_perm((vector short)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
 }
 
-static vector short __ATTRS_o_ai vec_lvrx(int __a, const vector short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_lvrx(int __a,
+                                                     const vector short *__b) {
   return vec_perm((vector short)(0), vec_ld(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector unsigned short __ATTRS_o_ai vec_lvrx(int __a,
-                                                   const unsigned short *__b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvrx(int __a, const unsigned short *__b) {
   return vec_perm((vector unsigned short)(0), vec_ld(__a, __b),
                   vec_lvsl(__a, __b));
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_lvrx(int __a, const vector unsigned short *__b) {
   return vec_perm((vector unsigned short)(0), vec_ld(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector bool short __ATTRS_o_ai vec_lvrx(int __a,
-                                               const vector bool short *__b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_lvrx(int __a, const vector bool short *__b) {
   return vec_perm((vector bool short)(0), vec_ld(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector pixel __ATTRS_o_ai vec_lvrx(int __a, const vector pixel *__b) {
+static __inline__ vector pixel __ATTRS_o_ai vec_lvrx(int __a,
+                                                     const vector pixel *__b) {
   return vec_perm((vector pixel)(0), vec_ld(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector int __ATTRS_o_ai vec_lvrx(int __a, const int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lvrx(int __a, const int *__b) {
   return vec_perm((vector int)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
 }
 
-static vector int __ATTRS_o_ai vec_lvrx(int __a, const vector int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lvrx(int __a,
+                                                   const vector int *__b) {
   return vec_perm((vector int)(0), vec_ld(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector unsigned int __ATTRS_o_ai vec_lvrx(int __a,
-                                                 const unsigned int *__b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvrx(int __a, const unsigned int *__b) {
   return vec_perm((vector unsigned int)(0), vec_ld(__a, __b),
                   vec_lvsl(__a, __b));
 }
 
-static vector unsigned int __ATTRS_o_ai
+static __inline__ vector unsigned int __ATTRS_o_ai
 vec_lvrx(int __a, const vector unsigned int *__b) {
   return vec_perm((vector unsigned int)(0), vec_ld(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector bool int __ATTRS_o_ai vec_lvrx(int __a,
-                                             const vector bool int *__b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_lvrx(int __a, const vector bool int *__b) {
   return vec_perm((vector bool int)(0), vec_ld(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector float __ATTRS_o_ai vec_lvrx(int __a, const float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_lvrx(int __a,
+                                                     const float *__b) {
   return vec_perm((vector float)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
 }
 
-static vector float __ATTRS_o_ai vec_lvrx(int __a, const vector float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_lvrx(int __a,
+                                                     const vector float *__b) {
   return vec_perm((vector float)(0), vec_ld(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
 /* vec_lvrxl */
 
-static vector signed char __ATTRS_o_ai vec_lvrxl(int __a,
-                                                 const signed char *__b) {
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvrxl(int __a, const signed char *__b) {
   return vec_perm((vector signed char)(0), vec_ldl(__a, __b),
                   vec_lvsl(__a, __b));
 }
 
-static vector signed char __ATTRS_o_ai
+static __inline__ vector signed char __ATTRS_o_ai
 vec_lvrxl(int __a, const vector signed char *__b) {
   return vec_perm((vector signed char)(0), vec_ldl(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector unsigned char __ATTRS_o_ai vec_lvrxl(int __a,
-                                                   const unsigned char *__b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvrxl(int __a, const unsigned char *__b) {
   return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b),
                   vec_lvsl(__a, __b));
 }
 
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
 vec_lvrxl(int __a, const vector unsigned char *__b) {
   return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector bool char __ATTRS_o_ai vec_lvrxl(int __a,
-                                               const vector bool char *__b) {
+static __inline__ vector bool char __ATTRS_o_ai
+vec_lvrxl(int __a, const vector bool char *__b) {
   return vec_perm((vector bool char)(0), vec_ldl(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector short __ATTRS_o_ai vec_lvrxl(int __a, const short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_lvrxl(int __a,
+                                                      const short *__b) {
   return vec_perm((vector short)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
 }
 
-static vector short __ATTRS_o_ai vec_lvrxl(int __a, const vector short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_lvrxl(int __a,
+                                                      const vector short *__b) {
   return vec_perm((vector short)(0), vec_ldl(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector unsigned short __ATTRS_o_ai vec_lvrxl(int __a,
-                                                    const unsigned short *__b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvrxl(int __a, const unsigned short *__b) {
   return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b),
                   vec_lvsl(__a, __b));
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 vec_lvrxl(int __a, const vector unsigned short *__b) {
   return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector bool short __ATTRS_o_ai vec_lvrxl(int __a,
-                                                const vector bool short *__b) {
+static __inline__ vector bool short __ATTRS_o_ai
+vec_lvrxl(int __a, const vector bool short *__b) {
   return vec_perm((vector bool short)(0), vec_ldl(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector pixel __ATTRS_o_ai vec_lvrxl(int __a, const vector pixel *__b) {
+static __inline__ vector pixel __ATTRS_o_ai vec_lvrxl(int __a,
+                                                      const vector pixel *__b) {
   return vec_perm((vector pixel)(0), vec_ldl(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector int __ATTRS_o_ai vec_lvrxl(int __a, const int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lvrxl(int __a, const int *__b) {
   return vec_perm((vector int)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
 }
 
-static vector int __ATTRS_o_ai vec_lvrxl(int __a, const vector int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lvrxl(int __a,
+                                                    const vector int *__b) {
   return vec_perm((vector int)(0), vec_ldl(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector unsigned int __ATTRS_o_ai vec_lvrxl(int __a,
-                                                  const unsigned int *__b) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvrxl(int __a, const unsigned int *__b) {
   return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b),
                   vec_lvsl(__a, __b));
 }
 
-static vector unsigned int __ATTRS_o_ai
+static __inline__ vector unsigned int __ATTRS_o_ai
 vec_lvrxl(int __a, const vector unsigned int *__b) {
   return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector bool int __ATTRS_o_ai vec_lvrxl(int __a,
-                                              const vector bool int *__b) {
+static __inline__ vector bool int __ATTRS_o_ai
+vec_lvrxl(int __a, const vector bool int *__b) {
   return vec_perm((vector bool int)(0), vec_ldl(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
-static vector float __ATTRS_o_ai vec_lvrxl(int __a, const float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_lvrxl(int __a,
+                                                      const float *__b) {
   return vec_perm((vector float)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
 }
 
-static vector float __ATTRS_o_ai vec_lvrxl(int __a, const vector float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_lvrxl(int __a,
+                                                      const vector float *__b) {
   return vec_perm((vector float)(0), vec_ldl(__a, __b),
                   vec_lvsl(__a, (unsigned char *)__b));
 }
 
 /* vec_stvlx */
 
-static void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
-                                   signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
+                                              signed char *__c) {
   return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
                 __c);
 }
 
-static void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
-                                   vector signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
+                                              vector signed char *__c) {
   return vec_st(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
-                                   unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
+                                              unsigned char *__c) {
   return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
                 __c);
 }
 
-static void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
-                                   vector unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
+                                              vector unsigned char *__c) {
   return vec_st(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlx(vector bool char __a, int __b,
-                                   vector bool char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool char __a, int __b,
+                                              vector bool char *__c) {
   return vec_st(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlx(vector short __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector short __a, int __b,
+                                              short *__c) {
   return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
                 __c);
 }
 
-static void __ATTRS_o_ai vec_stvlx(vector short __a, int __b,
-                                   vector short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector short __a, int __b,
+                                              vector short *__c) {
   return vec_st(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlx(vector unsigned short __a, int __b,
-                                   unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned short __a,
+                                              int __b, unsigned short *__c) {
   return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
                 __c);
 }
 
-static void __ATTRS_o_ai vec_stvlx(vector unsigned short __a, int __b,
-                                   vector unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned short __a,
+                                              int __b,
+                                              vector unsigned short *__c) {
   return vec_st(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlx(vector bool short __a, int __b,
-                                   vector bool short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool short __a, int __b,
+                                              vector bool short *__c) {
   return vec_st(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlx(vector pixel __a, int __b,
-                                   vector pixel *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector pixel __a, int __b,
+                                              vector pixel *__c) {
   return vec_st(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlx(vector int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector int __a, int __b,
+                                              int *__c) {
   return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
                 __c);
 }
 
-static void __ATTRS_o_ai vec_stvlx(vector int __a, int __b, vector int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector int __a, int __b,
+                                              vector int *__c) {
   return vec_st(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
-                                   unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
+                                              unsigned int *__c) {
   return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
                 __c);
 }
 
-static void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
-                                   vector unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
+                                              vector unsigned int *__c) {
   return vec_st(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlx(vector bool int __a, int __b,
-                                   vector bool int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool int __a, int __b,
+                                              vector bool int *__c) {
   return vec_st(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlx(vector float __a, int __b,
-                                   vector float *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector float __a, int __b,
+                                              vector float *__c) {
   return vec_st(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
@@ -11164,111 +11568,116 @@
 
 /* vec_stvlxl */
 
-static void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
-                                    signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
+                                               signed char *__c) {
   return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
                  __c);
 }
 
-static void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
-                                    vector signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
+                                               vector signed char *__c) {
   return vec_stl(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a, int __b,
-                                    unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a,
+                                               int __b, unsigned char *__c) {
   return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
                  __c);
 }
 
-static void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a, int __b,
-                                    vector unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a,
+                                               int __b,
+                                               vector unsigned char *__c) {
   return vec_stl(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlxl(vector bool char __a, int __b,
-                                    vector bool char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool char __a, int __b,
+                                               vector bool char *__c) {
   return vec_stl(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b,
+                                               short *__c) {
   return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
                  __c);
 }
 
-static void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b,
-                                    vector short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b,
+                                               vector short *__c) {
   return vec_stl(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a, int __b,
-                                    unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a,
+                                               int __b, unsigned short *__c) {
   return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
                  __c);
 }
 
-static void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a, int __b,
-                                    vector unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a,
+                                               int __b,
+                                               vector unsigned short *__c) {
   return vec_stl(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlxl(vector bool short __a, int __b,
-                                    vector bool short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool short __a, int __b,
+                                               vector bool short *__c) {
   return vec_stl(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlxl(vector pixel __a, int __b,
-                                    vector pixel *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector pixel __a, int __b,
+                                               vector pixel *__c) {
   return vec_stl(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b,
+                                               int *__c) {
   return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
                  __c);
 }
 
-static void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b, vector int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b,
+                                               vector int *__c) {
   return vec_stl(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
-                                    unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
+                                               unsigned int *__c) {
   return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
                  __c);
 }
 
-static void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
-                                    vector unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
+                                               vector unsigned int *__c) {
   return vec_stl(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlxl(vector bool int __a, int __b,
-                                    vector bool int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool int __a, int __b,
+                                               vector bool int *__c) {
   return vec_stl(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvlxl(vector float __a, int __b,
-                                    vector float *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector float __a, int __b,
+                                               vector float *__c) {
   return vec_stl(
       vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
@@ -11276,111 +11685,115 @@
 
 /* vec_stvrx */
 
-static void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
-                                   signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
+                                              signed char *__c) {
   return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
                 __c);
 }
 
-static void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
-                                   vector signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
+                                              vector signed char *__c) {
   return vec_st(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
-                                   unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
+                                              unsigned char *__c) {
   return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
                 __c);
 }
 
-static void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
-                                   vector unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
+                                              vector unsigned char *__c) {
   return vec_st(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrx(vector bool char __a, int __b,
-                                   vector bool char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool char __a, int __b,
+                                              vector bool char *__c) {
   return vec_st(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrx(vector short __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector short __a, int __b,
+                                              short *__c) {
   return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
                 __c);
 }
 
-static void __ATTRS_o_ai vec_stvrx(vector short __a, int __b,
-                                   vector short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector short __a, int __b,
+                                              vector short *__c) {
   return vec_st(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrx(vector unsigned short __a, int __b,
-                                   unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned short __a,
+                                              int __b, unsigned short *__c) {
   return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
                 __c);
 }
 
-static void __ATTRS_o_ai vec_stvrx(vector unsigned short __a, int __b,
-                                   vector unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned short __a,
+                                              int __b,
+                                              vector unsigned short *__c) {
   return vec_st(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrx(vector bool short __a, int __b,
-                                   vector bool short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool short __a, int __b,
+                                              vector bool short *__c) {
   return vec_st(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrx(vector pixel __a, int __b,
-                                   vector pixel *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector pixel __a, int __b,
+                                              vector pixel *__c) {
   return vec_st(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrx(vector int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector int __a, int __b,
+                                              int *__c) {
   return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
                 __c);
 }
 
-static void __ATTRS_o_ai vec_stvrx(vector int __a, int __b, vector int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector int __a, int __b,
+                                              vector int *__c) {
   return vec_st(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
-                                   unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
+                                              unsigned int *__c) {
   return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
                 __c);
 }
 
-static void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
-                                   vector unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
+                                              vector unsigned int *__c) {
   return vec_st(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrx(vector bool int __a, int __b,
-                                   vector bool int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool int __a, int __b,
+                                              vector bool int *__c) {
   return vec_st(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrx(vector float __a, int __b,
-                                   vector float *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector float __a, int __b,
+                                              vector float *__c) {
   return vec_st(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
@@ -11388,111 +11801,116 @@
 
 /* vec_stvrxl */
 
-static void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
-                                    signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
+                                               signed char *__c) {
   return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
                  __c);
 }
 
-static void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
-                                    vector signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
+                                               vector signed char *__c) {
   return vec_stl(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a, int __b,
-                                    unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a,
+                                               int __b, unsigned char *__c) {
   return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
                  __c);
 }
 
-static void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a, int __b,
-                                    vector unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a,
+                                               int __b,
+                                               vector unsigned char *__c) {
   return vec_stl(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrxl(vector bool char __a, int __b,
-                                    vector bool char *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool char __a, int __b,
+                                               vector bool char *__c) {
   return vec_stl(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b, short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b,
+                                               short *__c) {
   return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
                  __c);
 }
 
-static void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b,
-                                    vector short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b,
+                                               vector short *__c) {
   return vec_stl(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a, int __b,
-                                    unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a,
+                                               int __b, unsigned short *__c) {
   return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
                  __c);
 }
 
-static void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a, int __b,
-                                    vector unsigned short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a,
+                                               int __b,
+                                               vector unsigned short *__c) {
   return vec_stl(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrxl(vector bool short __a, int __b,
-                                    vector bool short *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool short __a, int __b,
+                                               vector bool short *__c) {
   return vec_stl(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrxl(vector pixel __a, int __b,
-                                    vector pixel *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector pixel __a, int __b,
+                                               vector pixel *__c) {
   return vec_stl(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b,
+                                               int *__c) {
   return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
                  __c);
 }
 
-static void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b, vector int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b,
+                                               vector int *__c) {
   return vec_stl(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
-                                    unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
+                                               unsigned int *__c) {
   return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
                  __c);
 }
 
-static void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
-                                    vector unsigned int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
+                                               vector unsigned int *__c) {
   return vec_stl(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrxl(vector bool int __a, int __b,
-                                    vector bool int *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool int __a, int __b,
+                                               vector bool int *__c) {
   return vec_stl(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
 }
 
-static void __ATTRS_o_ai vec_stvrxl(vector float __a, int __b,
-                                    vector float *__c) {
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector float __a, int __b,
+                                               vector float *__c) {
   return vec_stl(
       vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
       __b, __c);
@@ -11500,45 +11918,47 @@
 
 /* vec_promote */
 
-static vector signed char __ATTRS_o_ai vec_promote(signed char __a, int __b) {
+static __inline__ vector signed char __ATTRS_o_ai vec_promote(signed char __a,
+                                                              int __b) {
   vector signed char __res = (vector signed char)(0);
   __res[__b] = __a;
   return __res;
 }
 
-static vector unsigned char __ATTRS_o_ai vec_promote(unsigned char __a,
-                                                     int __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_promote(unsigned char __a, int __b) {
   vector unsigned char __res = (vector unsigned char)(0);
   __res[__b] = __a;
   return __res;
 }
 
-static vector short __ATTRS_o_ai vec_promote(short __a, int __b) {
+static __inline__ vector short __ATTRS_o_ai vec_promote(short __a, int __b) {
   vector short __res = (vector short)(0);
   __res[__b] = __a;
   return __res;
 }
 
-static vector unsigned short __ATTRS_o_ai vec_promote(unsigned short __a,
-                                                      int __b) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_promote(unsigned short __a, int __b) {
   vector unsigned short __res = (vector unsigned short)(0);
   __res[__b] = __a;
   return __res;
 }
 
-static vector int __ATTRS_o_ai vec_promote(int __a, int __b) {
+static __inline__ vector int __ATTRS_o_ai vec_promote(int __a, int __b) {
   vector int __res = (vector int)(0);
   __res[__b] = __a;
   return __res;
 }
 
-static vector unsigned int __ATTRS_o_ai vec_promote(unsigned int __a, int __b) {
+static __inline__ vector unsigned int __ATTRS_o_ai vec_promote(unsigned int __a,
+                                                               int __b) {
   vector unsigned int __res = (vector unsigned int)(0);
   __res[__b] = __a;
   return __res;
 }
 
-static vector float __ATTRS_o_ai vec_promote(float __a, int __b) {
+static __inline__ vector float __ATTRS_o_ai vec_promote(float __a, int __b) {
   vector float __res = (vector float)(0);
   __res[__b] = __a;
   return __res;
@@ -11546,56 +11966,63 @@
 
 /* vec_splats */
 
-static vector signed char __ATTRS_o_ai vec_splats(signed char __a) {
+static __inline__ vector signed char __ATTRS_o_ai vec_splats(signed char __a) {
   return (vector signed char)(__a);
 }
 
-static vector unsigned char __ATTRS_o_ai vec_splats(unsigned char __a) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_splats(unsigned char __a) {
   return (vector unsigned char)(__a);
 }
 
-static vector short __ATTRS_o_ai vec_splats(short __a) {
+static __inline__ vector short __ATTRS_o_ai vec_splats(short __a) {
   return (vector short)(__a);
 }
 
-static vector unsigned short __ATTRS_o_ai vec_splats(unsigned short __a) {
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_splats(unsigned short __a) {
   return (vector unsigned short)(__a);
 }
 
-static vector int __ATTRS_o_ai vec_splats(int __a) { return (vector int)(__a); }
+static __inline__ vector int __ATTRS_o_ai vec_splats(int __a) {
+  return (vector int)(__a);
+}
 
-static vector unsigned int __ATTRS_o_ai vec_splats(unsigned int __a) {
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_splats(unsigned int __a) {
   return (vector unsigned int)(__a);
 }
 
 #ifdef __VSX__
-static vector signed long long __ATTRS_o_ai vec_splats(signed long long __a) {
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_splats(signed long long __a) {
   return (vector signed long long)(__a);
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_splats(unsigned long long __a) {
   return (vector unsigned long long)(__a);
 }
 
 #if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector signed __int128 __ATTRS_o_ai vec_splats(signed __int128 __a) {
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_splats(signed __int128 __a) {
   return (vector signed __int128)(__a);
 }
 
-static vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_splats(unsigned __int128 __a) {
   return (vector unsigned __int128)(__a);
 }
 
 #endif
 
-static vector double __ATTRS_o_ai vec_splats(double __a) {
+static __inline__ vector double __ATTRS_o_ai vec_splats(double __a) {
   return (vector double)(__a);
 }
 #endif
 
-static vector float __ATTRS_o_ai vec_splats(float __a) {
+static __inline__ vector float __ATTRS_o_ai vec_splats(float __a) {
   return (vector float)(__a);
 }
 
@@ -11603,168 +12030,177 @@
 
 /* vec_all_eq */
 
-static int __ATTRS_o_ai vec_all_eq(vector signed char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector signed char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector bool char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector bool char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector bool char __a, vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector short __a, vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector bool short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector bool short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector bool short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector pixel __a, vector pixel __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector pixel __a,
+                                              vector pixel __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector int __a, vector int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
                                       (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
-                                   vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
                                       (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector bool int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
+                                              vector int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
                                       (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector bool int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
                                       (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector bool int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
                                       (vector int)__b);
 }
 
 #ifdef __POWER8_VECTOR__
-static int __ATTRS_o_ai vec_all_eq(vector signed long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, (vector long long)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
                                       (vector long long)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
                                       (vector long long)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
-                                   vector long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
+                                              vector long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
                                       (vector long long)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
                                       (vector long long)__b);
 }
 
-static int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
                                       (vector long long)__b);
 }
 #endif
 
-static int __ATTRS_o_ai vec_all_eq(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector float __a,
+                                              vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvcmpeqsp_p(__CR6_LT, __a, __b);
 #else
@@ -11773,160 +12209,169 @@
 }
 
 #ifdef __VSX__
-static int __ATTRS_o_ai vec_all_eq(vector double __a, vector double __b) {
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector double __a,
+                                              vector double __b) {
   return __builtin_vsx_xvcmpeqdp_p(__CR6_LT, __a, __b);
 }
 #endif
 
 /* vec_all_ge */
 
-static int __ATTRS_o_ai vec_all_ge(vector signed char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector signed char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, (vector signed char)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector bool char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b,
                                       (vector unsigned char)__a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector bool char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, (vector unsigned char)__a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector bool char __a, vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b,
                                       (vector unsigned char)__a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector short __a, vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, (vector short)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
                                       __a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector bool short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
                                       (vector unsigned short)__a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector bool short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b,
                                       (vector unsigned short)__a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector bool short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
                                       (vector unsigned short)__a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector int __a, vector int __b) {
   return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, (vector int)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
-                                   vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector bool int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
+                                              vector int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b,
                                       (vector unsigned int)__a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector bool int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, (vector unsigned int)__a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector bool int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b,
                                       (vector unsigned int)__a);
 }
 
 #ifdef __POWER8_VECTOR__
-static int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __b, __a);
 }
-static int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, (vector signed long long)__b,
                                       __a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
                                       __a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
                                       (vector unsigned long long)__a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b,
                                       (vector unsigned long long)__a);
 }
 
-static int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
                                       (vector unsigned long long)__a);
 }
 #endif
 
-static int __ATTRS_o_ai vec_all_ge(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector float __a,
+                                              vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __a, __b);
 #else
@@ -11935,160 +12380,169 @@
 }
 
 #ifdef __VSX__
-static int __ATTRS_o_ai vec_all_ge(vector double __a, vector double __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector double __a,
+                                              vector double __b) {
   return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __a, __b);
 }
 #endif
 
 /* vec_all_gt */
 
-static int __ATTRS_o_ai vec_all_gt(vector signed char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector signed char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, (vector signed char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, (vector unsigned char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector bool char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a,
                                       (vector unsigned char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector bool char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector bool char __a, vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a,
                                       (vector unsigned char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector short __a, vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a,
                                       (vector unsigned short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector bool short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
                                       (vector unsigned short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector bool short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
                                       __b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector bool short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
                                       (vector unsigned short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector int __a, vector int __b) {
   return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
-                                   vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, (vector unsigned int)__b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector bool int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
+                                              vector int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a,
                                       (vector unsigned int)__b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector bool int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector bool int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a,
                                       (vector unsigned int)__b);
 }
 
 #ifdef __POWER8_VECTOR__
-static int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a, __b);
 }
-static int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a,
                                       (vector signed long long)__b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a,
                                       (vector unsigned long long)__b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
                                       (vector unsigned long long)__b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
                                       __b);
 }
 
-static int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
                                       (vector unsigned long long)__b);
 }
 #endif
 
-static int __ATTRS_o_ai vec_all_gt(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector float __a,
+                                              vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __a, __b);
 #else
@@ -12097,168 +12551,177 @@
 }
 
 #ifdef __VSX__
-static int __ATTRS_o_ai vec_all_gt(vector double __a, vector double __b) {
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector double __a,
+                                              vector double __b) {
   return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __a, __b);
 }
 #endif
 
 /* vec_all_in */
 
-static int __attribute__((__always_inline__))
+static __inline__ int __attribute__((__always_inline__))
 vec_all_in(vector float __a, vector float __b) {
   return __builtin_altivec_vcmpbfp_p(__CR6_EQ, __a, __b);
 }
 
 /* vec_all_le */
 
-static int __ATTRS_o_ai vec_all_le(vector signed char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector signed char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector signed char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector signed char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, (vector signed char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, (vector unsigned char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector bool char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a,
                                       (vector unsigned char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector bool char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector bool char __a, vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a,
                                       (vector unsigned char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector short __a, vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a,
                                       (vector unsigned short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector bool short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
                                       (vector unsigned short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector bool short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
                                       __b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector bool short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
                                       (vector unsigned short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector int __a, vector int __b) {
   return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
-                                   vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, (vector unsigned int)__b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector bool int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
+                                              vector int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a,
                                       (vector unsigned int)__b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector bool int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector bool int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a,
                                       (vector unsigned int)__b);
 }
 
 #ifdef __POWER8_VECTOR__
-static int __ATTRS_o_ai vec_all_le(vector signed long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector signed long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a,
                                       (vector signed long long)__b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a,
                                       (vector unsigned long long)__b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector bool long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
                                       (vector unsigned long long)__b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector bool long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
                                       __b);
 }
 
-static int __ATTRS_o_ai vec_all_le(vector bool long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
                                       (vector unsigned long long)__b);
 }
 #endif
 
-static int __ATTRS_o_ai vec_all_le(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector float __a,
+                                              vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __b, __a);
 #else
@@ -12267,161 +12730,170 @@
 }
 
 #ifdef __VSX__
-static int __ATTRS_o_ai vec_all_le(vector double __a, vector double __b) {
+static __inline__ int __ATTRS_o_ai vec_all_le(vector double __a,
+                                              vector double __b) {
   return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __b, __a);
 }
 #endif
 
 /* vec_all_lt */
 
-static int __ATTRS_o_ai vec_all_lt(vector signed char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector signed char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtsb_p(__CR6_LT, (vector signed char)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector bool char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b,
                                       (vector unsigned char)__a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector bool char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, (vector unsigned char)__a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector bool char __a, vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b,
                                       (vector unsigned char)__a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector short __a, vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtsh_p(__CR6_LT, (vector short)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
                                       __a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector bool short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
                                       (vector unsigned short)__a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector bool short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b,
                                       (vector unsigned short)__a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector bool short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
                                       (vector unsigned short)__a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector int __a, vector int __b) {
   return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtsw_p(__CR6_LT, (vector int)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
-                                   vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector bool int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
+                                              vector int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b,
                                       (vector unsigned int)__a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector bool int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, (vector unsigned int)__a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector bool int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b,
                                       (vector unsigned int)__a);
 }
 
 #ifdef __POWER8_VECTOR__
-static int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtsd_p(__CR6_LT, (vector signed long long)__b,
                                       __a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
                                       __a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
                                       (vector unsigned long long)__a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b,
                                       (vector unsigned long long)__a);
 }
 
-static int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
                                       (vector unsigned long long)__a);
 }
 #endif
 
-static int __ATTRS_o_ai vec_all_lt(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector float __a,
+                                              vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __b, __a);
 #else
@@ -12430,14 +12902,15 @@
 }
 
 #ifdef __VSX__
-static int __ATTRS_o_ai vec_all_lt(vector double __a, vector double __b) {
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector double __a,
+                                              vector double __b) {
   return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __b, __a);
 }
 #endif
 
 /* vec_all_nan */
 
-static int __ATTRS_o_ai vec_all_nan(vector float __a) {
+static __inline__ int __ATTRS_o_ai vec_all_nan(vector float __a) {
 #ifdef __VSX__
   return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __a);
 #else
@@ -12446,176 +12919,185 @@
 }
 
 #ifdef __VSX__
-static int __ATTRS_o_ai vec_all_nan(vector double __a) {
+static __inline__ int __ATTRS_o_ai vec_all_nan(vector double __a) {
   return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __a);
 }
 #endif
 
 /* vec_all_ne */
 
-static int __ATTRS_o_ai vec_all_ne(vector signed char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector signed char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector bool char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector bool char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector bool char __a, vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector short __a, vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector bool short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector bool short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector bool short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector pixel __a, vector pixel __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector pixel __a,
+                                              vector pixel __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector int __a, vector int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
                                       (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
-                                   vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
                                       (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector bool int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
+                                              vector int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
                                       (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector bool int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
                                       (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector bool int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
                                       (vector int)__b);
 }
 
 #ifdef __POWER8_VECTOR__
-static int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector long long)__a,
                                       (vector long long)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a,
                                       (vector signed long long)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
                                       (vector signed long long)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
                                       (vector signed long long)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
                                       (vector signed long long)__b);
 }
 
-static int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
                                       (vector signed long long)__b);
 }
 #endif
 
-static int __ATTRS_o_ai vec_all_ne(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector float __a,
+                                              vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
 #else
@@ -12624,15 +13106,16 @@
 }
 
 #ifdef __VSX__
-static int __ATTRS_o_ai vec_all_ne(vector double __a, vector double __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector double __a,
+                                              vector double __b) {
   return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
 }
 #endif
 
 /* vec_all_nge */
 
-static int __ATTRS_o_ai
-vec_all_nge(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_all_nge(vector float __a,
+                                               vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvcmpgesp_p(__CR6_EQ, __a, __b);
 #else
@@ -12641,16 +13124,16 @@
 }
 
 #ifdef __VSX__
-static int __ATTRS_o_ai
-vec_all_nge(vector double __a, vector double __b) {
+static __inline__ int __ATTRS_o_ai vec_all_nge(vector double __a,
+                                               vector double __b) {
   return __builtin_vsx_xvcmpgedp_p(__CR6_EQ, __a, __b);
 }
 #endif
 
 /* vec_all_ngt */
 
-static int __ATTRS_o_ai
-vec_all_ngt(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ngt(vector float __a,
+                                               vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ, __a, __b);
 #else
@@ -12659,198 +13142,207 @@
 }
 
 #ifdef __VSX__
-static int __ATTRS_o_ai
-vec_all_ngt(vector double __a, vector double __b) {
+static __inline__ int __ATTRS_o_ai vec_all_ngt(vector double __a,
+                                               vector double __b) {
   return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ, __a, __b);
 }
 #endif
 
 /* vec_all_nle */
 
-static int __attribute__((__always_inline__))
+static __inline__ int __attribute__((__always_inline__))
 vec_all_nle(vector float __a, vector float __b) {
   return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __b, __a);
 }
 
 /* vec_all_nlt */
 
-static int __attribute__((__always_inline__))
+static __inline__ int __attribute__((__always_inline__))
 vec_all_nlt(vector float __a, vector float __b) {
   return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __b, __a);
 }
 
 /* vec_all_numeric */
 
-static int __attribute__((__always_inline__))
+static __inline__ int __attribute__((__always_inline__))
 vec_all_numeric(vector float __a) {
   return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __a);
 }
 
 /* vec_any_eq */
 
-static int __ATTRS_o_ai vec_any_eq(vector signed char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector signed char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector bool char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector bool char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector bool char __a, vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector short __a, vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector bool short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector bool short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector bool short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector pixel __a, vector pixel __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector pixel __a,
+                                              vector pixel __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector int __a, vector int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
                                       (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
-                                   vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
                                       (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector bool int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
+                                              vector int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
                                       (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector bool int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
                                       (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector bool int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
                                       (vector int)__b);
 }
 
 #ifdef __POWER8_VECTOR__
-static int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, (vector long long)__a,
                                       (vector long long)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a,
                                       (vector signed long long)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpequd_p(
       __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpequd_p(
       __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpequd_p(
       __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
 }
 
-static int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpequd_p(
       __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
 }
 #endif
 
-static int __ATTRS_o_ai vec_any_eq(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector float __a,
+                                              vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ_REV, __a, __b);
 #else
@@ -12859,168 +13351,177 @@
 }
 
 #ifdef __VSX__
-static int __ATTRS_o_ai vec_any_eq(vector double __a, vector double __b) {
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector double __a,
+                                              vector double __b) {
   return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ_REV, __a, __b);
 }
 #endif
 
 /* vec_any_ge */
 
-static int __ATTRS_o_ai vec_any_ge(vector signed char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector signed char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, (vector signed char)__b,
                                       __a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
                                       __a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector bool char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
                                       (vector unsigned char)__a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector bool char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b,
                                       (vector unsigned char)__a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector bool char __a, vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
                                       (vector unsigned char)__a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector short __a, vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, (vector short)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
                                       __a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector bool short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
                                       (vector unsigned short)__a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector bool short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b,
                                       (vector unsigned short)__a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector bool short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
                                       (vector unsigned short)__a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector int __a, vector int __b) {
   return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, (vector int)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
-                                   vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
                                       __a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector bool int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
+                                              vector int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
                                       (vector unsigned int)__a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector bool int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b,
                                       (vector unsigned int)__a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector bool int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
                                       (vector unsigned int)__a);
 }
 
 #ifdef __POWER8_VECTOR__
-static int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV,
                                       (vector signed long long)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
                                       (vector unsigned long long)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
                                       (vector unsigned long long)__b,
                                       (vector unsigned long long)__a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b,
                                       (vector unsigned long long)__a);
 }
 
-static int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
                                       (vector unsigned long long)__b,
                                       (vector unsigned long long)__a);
 }
 #endif
 
-static int __ATTRS_o_ai vec_any_ge(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector float __a,
+                                              vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __a, __b);
 #else
@@ -13029,168 +13530,177 @@
 }
 
 #ifdef __VSX__
-static int __ATTRS_o_ai vec_any_ge(vector double __a, vector double __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector double __a,
+                                              vector double __b) {
   return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __a, __b);
 }
 #endif
 
 /* vec_any_gt */
 
-static int __ATTRS_o_ai vec_any_gt(vector signed char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector signed char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a,
                                       (vector signed char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a,
                                       (vector unsigned char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector bool char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
                                       (vector unsigned char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector bool char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
                                       __b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector bool char __a, vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
                                       (vector unsigned char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector short __a, vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a,
                                       (vector unsigned short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector bool short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
                                       (vector unsigned short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector bool short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
                                       __b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector bool short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
                                       (vector unsigned short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector int __a, vector int __b) {
   return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
-                                   vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a,
                                       (vector unsigned int)__b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector bool int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
+                                              vector int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
                                       (vector unsigned int)__b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector bool int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
                                       __b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector bool int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
                                       (vector unsigned int)__b);
 }
 
 #ifdef __POWER8_VECTOR__
-static int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a,
                                       (vector signed long long)__b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a,
                                       (vector unsigned long long)__b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
                                       (vector unsigned long long)__a,
                                       (vector unsigned long long)__b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
                                       (vector unsigned long long)__a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
                                       (vector unsigned long long)__a,
                                       (vector unsigned long long)__b);
 }
 #endif
 
-static int __ATTRS_o_ai vec_any_gt(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector float __a,
+                                              vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __a, __b);
 #else
@@ -13199,168 +13709,177 @@
 }
 
 #ifdef __VSX__
-static int __ATTRS_o_ai vec_any_gt(vector double __a, vector double __b) {
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector double __a,
+                                              vector double __b) {
   return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __a, __b);
 }
 #endif
 
 /* vec_any_le */
 
-static int __ATTRS_o_ai vec_any_le(vector signed char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector signed char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a,
                                       (vector signed char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a,
                                       (vector unsigned char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector bool char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
                                       (vector unsigned char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector bool char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
                                       __b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector bool char __a, vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
                                       (vector unsigned char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector short __a, vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a,
                                       (vector unsigned short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector bool short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
                                       (vector unsigned short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector bool short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
                                       __b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector bool short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
                                       (vector unsigned short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector int __a, vector int __b) {
   return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
-                                   vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a,
                                       (vector unsigned int)__b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector bool int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
+                                              vector int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
                                       (vector unsigned int)__b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector bool int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
                                       __b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector bool int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
                                       (vector unsigned int)__b);
 }
 
 #ifdef __POWER8_VECTOR__
-static int __ATTRS_o_ai vec_any_le(vector signed long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector signed long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a,
                                       (vector signed long long)__b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a,
                                       (vector unsigned long long)__b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector bool long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
                                       (vector unsigned long long)__a,
                                       (vector unsigned long long)__b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector bool long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
                                       (vector unsigned long long)__a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_le(vector bool long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
                                       (vector unsigned long long)__a,
                                       (vector unsigned long long)__b);
 }
 #endif
 
-static int __ATTRS_o_ai vec_any_le(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector float __a,
+                                              vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __b, __a);
 #else
@@ -13369,168 +13888,177 @@
 }
 
 #ifdef __VSX__
-static int __ATTRS_o_ai vec_any_le(vector double __a, vector double __b) {
+static __inline__ int __ATTRS_o_ai vec_any_le(vector double __a,
+                                              vector double __b) {
   return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __b, __a);
 }
 #endif
 
 /* vec_any_lt */
 
-static int __ATTRS_o_ai vec_any_lt(vector signed char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector signed char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, (vector signed char)__b,
                                       __a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
                                       __a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector bool char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
                                       (vector unsigned char)__a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector bool char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b,
                                       (vector unsigned char)__a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector bool char __a, vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
                                       (vector unsigned char)__a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector short __a, vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, (vector short)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
                                       __a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector bool short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
                                       (vector unsigned short)__a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector bool short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b,
                                       (vector unsigned short)__a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector bool short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
                                       (vector unsigned short)__a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector int __a, vector int __b) {
   return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, (vector int)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
-                                   vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
                                       __a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector bool int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
+                                              vector int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
                                       (vector unsigned int)__a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector bool int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b,
                                       (vector unsigned int)__a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector bool int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
                                       (vector unsigned int)__a);
 }
 
 #ifdef __POWER8_VECTOR__
-static int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV,
                                       (vector signed long long)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
                                       (vector unsigned long long)__b, __a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
                                       (vector unsigned long long)__b,
                                       (vector unsigned long long)__a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b,
                                       (vector unsigned long long)__a);
 }
 
-static int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
                                       (vector unsigned long long)__b,
                                       (vector unsigned long long)__a);
 }
 #endif
 
-static int __ATTRS_o_ai vec_any_lt(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector float __a,
+                                              vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __b, __a);
 #else
@@ -13539,182 +14067,193 @@
 }
 
 #ifdef __VSX__
-static int __ATTRS_o_ai vec_any_lt(vector double __a, vector double __b) {
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector double __a,
+                                              vector double __b) {
   return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __b, __a);
 }
 #endif
 
 /* vec_any_nan */
 
-static int __attribute__((__always_inline__)) vec_any_nan(vector float __a) {
+static __inline__ int __attribute__((__always_inline__))
+vec_any_nan(vector float __a) {
   return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __a);
 }
 
 /* vec_any_ne */
 
-static int __ATTRS_o_ai vec_any_ne(vector signed char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector signed char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
-                                   vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector bool char __a,
-                                   vector signed char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
+                                              vector signed char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector bool char __a,
-                                   vector unsigned char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
+                                              vector unsigned char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector bool char __a, vector bool char __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
+                                              vector bool char __b) {
   return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
                                       (vector char)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector short __a, vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector bool short __a, vector short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
+                                              vector short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector bool short __a,
-                                   vector unsigned short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
+                                              vector unsigned short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector bool short __a,
-                                   vector bool short __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
+                                              vector bool short __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector pixel __a, vector pixel __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector pixel __a,
+                                              vector pixel __b) {
   return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
                                       (vector short)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector int __a, vector int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
                                       (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
-                                   vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
                                       (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector bool int __a, vector int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
+                                              vector int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
                                       (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector bool int __a,
-                                   vector unsigned int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
+                                              vector unsigned int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
                                       (vector int)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector bool int __a, vector bool int __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
+                                              vector bool int __b) {
   return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
                                       (vector int)__b);
 }
 
 #ifdef __POWER8_VECTOR__
-static int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a, __b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, (vector long long)__a,
                                       (vector long long)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a,
                                       (vector signed long long)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpequd_p(
       __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
-                                   vector signed long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
+                                              vector signed long long __b) {
   return __builtin_altivec_vcmpequd_p(
       __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
-                                   vector unsigned long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
+                                              vector unsigned long long __b) {
   return __builtin_altivec_vcmpequd_p(
       __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
 }
 
-static int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
-                                   vector bool long long __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
+                                              vector bool long long __b) {
   return __builtin_altivec_vcmpequd_p(
       __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
 }
 #endif
 
-static int __ATTRS_o_ai vec_any_ne(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector float __a,
+                                              vector float __b) {
 #ifdef __VSX__
   return __builtin_vsx_xvcmpeqsp_p(__CR6_LT_REV, __a, __b);
 #else
@@ -13723,49 +14262,50 @@
 }
 
 #ifdef __VSX__
-static int __ATTRS_o_ai vec_any_ne(vector double __a, vector double __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector double __a,
+                                              vector double __b) {
   return __builtin_vsx_xvcmpeqdp_p(__CR6_LT_REV, __a, __b);
 }
 #endif
 
 /* vec_any_nge */
 
-static int __attribute__((__always_inline__))
+static __inline__ int __attribute__((__always_inline__))
 vec_any_nge(vector float __a, vector float __b) {
   return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __a, __b);
 }
 
 /* vec_any_ngt */
 
-static int __attribute__((__always_inline__))
+static __inline__ int __attribute__((__always_inline__))
 vec_any_ngt(vector float __a, vector float __b) {
   return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __a, __b);
 }
 
 /* vec_any_nle */
 
-static int __attribute__((__always_inline__))
+static __inline__ int __attribute__((__always_inline__))
 vec_any_nle(vector float __a, vector float __b) {
   return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __b, __a);
 }
 
 /* vec_any_nlt */
 
-static int __attribute__((__always_inline__))
+static __inline__ int __attribute__((__always_inline__))
 vec_any_nlt(vector float __a, vector float __b) {
   return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __b, __a);
 }
 
 /* vec_any_numeric */
 
-static int __attribute__((__always_inline__))
+static __inline__ int __attribute__((__always_inline__))
 vec_any_numeric(vector float __a) {
   return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __a);
 }
 
 /* vec_any_out */
 
-static int __attribute__((__always_inline__))
+static __inline__ int __attribute__((__always_inline__))
 vec_any_out(vector float __a, vector float __b) {
   return __builtin_altivec_vcmpbfp_p(__CR6_EQ_REV, __a, __b);
 }
@@ -13790,30 +14330,30 @@
 #define vec_ncipher_be __builtin_altivec_crypto_vncipher
 #define vec_ncipherlast_be __builtin_altivec_crypto_vncipherlast
 
-static vector unsigned long long __attribute__((__always_inline__))
+static __inline__ vector unsigned long long __attribute__((__always_inline__))
 __builtin_crypto_vsbox(vector unsigned long long __a) {
   return __builtin_altivec_crypto_vsbox(__a);
 }
 
-static vector unsigned long long __attribute__((__always_inline__))
+static __inline__ vector unsigned long long __attribute__((__always_inline__))
 __builtin_crypto_vcipher(vector unsigned long long __a,
                          vector unsigned long long __b) {
   return __builtin_altivec_crypto_vcipher(__a, __b);
 }
 
-static vector unsigned long long __attribute__((__always_inline__))
+static __inline__ vector unsigned long long __attribute__((__always_inline__))
 __builtin_crypto_vcipherlast(vector unsigned long long __a,
                              vector unsigned long long __b) {
   return __builtin_altivec_crypto_vcipherlast(__a, __b);
 }
 
-static vector unsigned long long __attribute__((__always_inline__))
+static __inline__ vector unsigned long long __attribute__((__always_inline__))
 __builtin_crypto_vncipher(vector unsigned long long __a,
                           vector unsigned long long __b) {
   return __builtin_altivec_crypto_vncipher(__a, __b);
 }
 
-static vector unsigned long long __attribute__((__always_inline__))
+static __inline__ vector unsigned long long __attribute__((__always_inline__))
 __builtin_crypto_vncipherlast(vector unsigned long long __a,
                               vector unsigned long long __b) {
   return __builtin_altivec_crypto_vncipherlast(__a, __b);
@@ -13822,20 +14362,20 @@
 #define __builtin_crypto_vshasigmad __builtin_altivec_crypto_vshasigmad
 #define __builtin_crypto_vshasigmaw __builtin_altivec_crypto_vshasigmaw
 
-#define vec_shasigma_be(X, Y, Z) \
-  _Generic((X), vector unsigned int: __builtin_crypto_vshasigmaw, \
-                vector unsigned long long: __builtin_crypto_vshasigmad) \
-((X), (Y), (Z))
+#define vec_shasigma_be(X, Y, Z)                                               \
+  _Generic((X), vector unsigned int                                            \
+           : __builtin_crypto_vshasigmaw, vector unsigned long long            \
+           : __builtin_crypto_vshasigmad)((X), (Y), (Z))
 #endif
 
 #ifdef __POWER8_VECTOR__
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
 __builtin_crypto_vpermxor(vector unsigned char __a, vector unsigned char __b,
                           vector unsigned char __c) {
   return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 __builtin_crypto_vpermxor(vector unsigned short __a, vector unsigned short __b,
                           vector unsigned short __c) {
   return (vector unsigned short)__builtin_altivec_crypto_vpermxor(
@@ -13843,73 +14383,72 @@
       (vector unsigned char)__c);
 }
 
-static vector unsigned int __ATTRS_o_ai __builtin_crypto_vpermxor(
+static __inline__ vector unsigned int __ATTRS_o_ai __builtin_crypto_vpermxor(
     vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
   return (vector unsigned int)__builtin_altivec_crypto_vpermxor(
       (vector unsigned char)__a, (vector unsigned char)__b,
       (vector unsigned char)__c);
 }
 
-static vector unsigned long long __ATTRS_o_ai __builtin_crypto_vpermxor(
-    vector unsigned long long __a, vector unsigned long long __b,
-    vector unsigned long long __c) {
+static __inline__ vector unsigned long long __ATTRS_o_ai
+__builtin_crypto_vpermxor(vector unsigned long long __a,
+                          vector unsigned long long __b,
+                          vector unsigned long long __c) {
   return (vector unsigned long long)__builtin_altivec_crypto_vpermxor(
       (vector unsigned char)__a, (vector unsigned char)__b,
       (vector unsigned char)__c);
 }
 
-static vector unsigned char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
 __builtin_crypto_vpmsumb(vector unsigned char __a, vector unsigned char __b) {
   return __builtin_altivec_crypto_vpmsumb(__a, __b);
 }
 
-static vector unsigned short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
 __builtin_crypto_vpmsumb(vector unsigned short __a, vector unsigned short __b) {
   return __builtin_altivec_crypto_vpmsumh(__a, __b);
 }
 
-static vector unsigned int __ATTRS_o_ai
+static __inline__ vector unsigned int __ATTRS_o_ai
 __builtin_crypto_vpmsumb(vector unsigned int __a, vector unsigned int __b) {
   return __builtin_altivec_crypto_vpmsumw(__a, __b);
 }
 
-static vector unsigned long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
 __builtin_crypto_vpmsumb(vector unsigned long long __a,
                          vector unsigned long long __b) {
   return __builtin_altivec_crypto_vpmsumd(__a, __b);
 }
 
-static vector signed char __ATTRS_o_ai vec_vgbbd (vector signed char __a)
-{
-  return __builtin_altivec_vgbbd((vector unsigned char) __a);
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vgbbd(vector signed char __a) {
+  return __builtin_altivec_vgbbd((vector unsigned char)__a);
 }
 
 #define vec_pmsum_be __builtin_crypto_vpmsumb
 #define vec_gb __builtin_altivec_vgbbd
 
-static vector unsigned char __ATTRS_o_ai vec_vgbbd (vector unsigned char __a)
-{
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vgbbd(vector unsigned char __a) {
   return __builtin_altivec_vgbbd(__a);
 }
 
-static vector long long __ATTRS_o_ai
-vec_vbpermq (vector signed char __a, vector signed char __b)
-{
-  return __builtin_altivec_vbpermq((vector unsigned char) __a,
-                                   (vector unsigned char) __b);
+static __inline__ vector long long __ATTRS_o_ai
+vec_vbpermq(vector signed char __a, vector signed char __b) {
+  return __builtin_altivec_vbpermq((vector unsigned char)__a,
+                                   (vector unsigned char)__b);
 }
 
-static vector long long __ATTRS_o_ai
-vec_vbpermq (vector unsigned char __a, vector unsigned char __b)
-{
+static __inline__ vector long long __ATTRS_o_ai
+vec_vbpermq(vector unsigned char __a, vector unsigned char __b) {
   return __builtin_altivec_vbpermq(__a, __b);
 }
 
 #ifdef __powerpc64__
-static vector unsigned long long __attribute__((__always_inline__))
-vec_bperm (vector unsigned __int128 __a, vector unsigned char __b) {
-  return __builtin_altivec_vbpermq((vector unsigned char) __a,
-                                   (vector unsigned char) __b);
+static __inline__ vector unsigned long long __attribute__((__always_inline__))
+vec_bperm(vector unsigned __int128 __a, vector unsigned char __b) {
+  return __builtin_altivec_vbpermq((vector unsigned char)__a,
+                                   (vector unsigned char)__b);
 }
 #endif
 #endif
diff --git a/renderscript/clang-include/ammintrin.h b/renderscript/clang-include/ammintrin.h
index 4880fd7..8985bb4 100644
--- a/renderscript/clang-include/ammintrin.h
+++ b/renderscript/clang-include/ammintrin.h
@@ -38,9 +38,7 @@
 /// __m128i _mm_extracti_si64(__m128i x, const int len, const int idx);
 /// \endcode
 ///
-/// \code
 /// This intrinsic corresponds to the \c EXTRQ instruction.
-/// \endcode
 ///
 /// \param x
 ///    The value from which bits are extracted.
@@ -49,10 +47,10 @@
 ///    are zero, the length is interpreted as 64.
 /// \param idx
 ///    Bits [5:0] specify the index of the least significant bit; the other
-///    bits are ignored. If the sum of the index and length is greater than
-///    64, the result is undefined. If the length and index are both zero,
-///    bits [63:0] of parameter x are extracted. If the length is zero
-///    but the index is non-zero, the result is undefined.
+///    bits are ignored. If the sum of the index and length is greater than 64,
+///    the result is undefined. If the length and index are both zero, bits
+///    [63:0] of parameter x are extracted. If the length is zero but the index
+///    is non-zero, the result is undefined.
 /// \returns A 128-bit integer vector whose lower 64 bits contain the bits
 ///    extracted from the source operand.
 #define _mm_extracti_si64(x, len, idx) \
@@ -64,20 +62,17 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
-/// \code
 /// This intrinsic corresponds to the \c EXTRQ instruction.
-/// \endcode
 ///
 /// \param __x
 ///    The value from which bits are extracted.
 /// \param __y
-///    Specifies the index of the least significant bit at [13:8]
-///    and the length at [5:0]; all other bits are ignored.
-///    If bits [5:0] are zero, the length is interpreted as 64.
-///    If the sum of the index and length is greater than 64, the result is
-///    undefined. If the length and index are both zero, bits [63:0] of
-///    parameter __x are extracted. If the length is zero but the index is
-///    non-zero, the result is undefined.
+///    Specifies the index of the least significant bit at [13:8] and the
+///    length at [5:0]; all other bits are ignored. If bits [5:0] are zero, the
+///    length is interpreted as 64. If the sum of the index and length is
+///    greater than 64, the result is undefined. If the length and index are
+///    both zero, bits [63:0] of parameter __x are extracted. If the length is
+///    zero but the index is non-zero, the result is undefined.
 /// \returns A 128-bit vector whose lower 64 bits contain the bits extracted
 ///    from the source operand.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -86,9 +81,9 @@
   return (__m128i)__builtin_ia32_extrq((__v2di)__x, (__v16qi)__y);
 }
 
-/// \brief Inserts bits of a specified length from the source integer vector
-///    y into the lower 64 bits of the destination integer vector x at the
-///    index idx and of the length len.
+/// \brief Inserts bits of a specified length from the source integer vector y
+///    into the lower 64 bits of the destination integer vector x at the index
+///    idx and of the length len.
 ///
 /// \headerfile <x86intrin.h>
 ///
@@ -97,9 +92,7 @@
 /// const int idx);
 /// \endcode
 ///
-/// \code
 /// This intrinsic corresponds to the \c INSERTQ instruction.
-/// \endcode
 ///
 /// \param x
 ///    The destination operand where bits will be inserted. The inserted bits
@@ -113,14 +106,14 @@
 ///    are zero, the length is interpreted as 64.
 /// \param idx
 ///    Bits [5:0] specify the index of the least significant bit; the other
-///    bits are ignored. If the sum of the index and length is greater than
-///    64, the result is undefined. If the length and index are both zero,
-///    bits [63:0] of parameter y are inserted into parameter x. If the
-///    length is zero but the index is non-zero, the result is undefined.
-/// \returns A 128-bit integer vector containing the original lower 64-bits
-///    of destination operand x with the specified bitfields replaced by the
-///    lower bits of source operand y. The upper 64 bits of the return value
-///    are undefined.
+///    bits are ignored. If the sum of the index and length is greater than 64,
+///    the result is undefined. If the length and index are both zero, bits
+///    [63:0] of parameter y are inserted into parameter x. If the length is
+///    zero but the index is non-zero, the result is undefined.
+/// \returns A 128-bit integer vector containing the original lower 64-bits of
+///    destination operand x with the specified bitfields replaced by the lower
+///    bits of source operand y. The upper 64 bits of the return value are
+///    undefined.
 
 #define _mm_inserti_si64(x, y, len, idx) \
   ((__m128i)__builtin_ia32_insertqi((__v2di)(__m128i)(x), \
@@ -128,14 +121,12 @@
                                     (char)(len), (char)(idx)))
 
 /// \brief Inserts bits of a specified length from the source integer vector
-///    __y into the lower 64 bits of the destination integer vector __x at
-///    the index and of the length specified by __y.
+///    __y into the lower 64 bits of the destination integer vector __x at the
+///    index and of the length specified by __y.
 ///
 /// \headerfile <x86intrin.h>
 ///
-/// \code
 /// This intrinsic corresponds to the \c INSERTQ instruction.
-/// \endcode
 ///
 /// \param __x
 ///    The destination operand where bits will be inserted. The inserted bits
@@ -145,14 +136,14 @@
 ///    The source operand containing the bits to be extracted. The extracted
 ///    bits are the least significant bits of operand __y with length specified
 ///    by bits [69:64]. These are inserted into the destination at the index
-///    specified by bits [77:72]; all other bits are ignored.
-///    If bits [69:64] are zero, the length is interpreted as 64.
-///    If the sum of the index and length is greater than 64, the result is
-///    undefined. If the length and index are both zero, bits [63:0] of
-///    parameter __y are inserted into parameter __x. If the length
-///    is zero but the index is non-zero, the result is undefined.
-/// \returns A 128-bit integer vector containing the original lower 64-bits
-///    of destination operand __x with the specified bitfields replaced by the
+///    specified by bits [77:72]; all other bits are ignored. If bits [69:64]
+///    are zero, the length is interpreted as 64. If the sum of the index and
+///    length is greater than 64, the result is undefined. If the length and
+///    index are both zero, bits [63:0] of parameter __y are inserted into
+///    parameter __x. If the length is zero but the index is non-zero, the
+///    result is undefined.
+/// \returns A 128-bit integer vector containing the original lower 64-bits of
+///    destination operand __x with the specified bitfields replaced by the
 ///    lower bits of source operand __y. The upper 64 bits of the return value
 ///    are undefined.
 
@@ -168,15 +159,12 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
-/// \code
 /// This intrinsic corresponds to the \c MOVNTSD instruction.
-/// \endcode
 ///
 /// \param __p
 ///    The 64-bit memory location used to store the register value.
 /// \param __a
-///    The 64-bit double-precision floating-point register value to
-///    be stored.
+///    The 64-bit double-precision floating-point register value to be stored.
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_stream_sd(double *__p, __m128d __a)
 {
@@ -189,15 +177,12 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
-/// \code
 /// This intrinsic corresponds to the \c MOVNTSS instruction.
-/// \endcode
 ///
 /// \param __p
 ///    The 32-bit memory location used to store the register value.
 /// \param __a
-///    The 32-bit single-precision floating-point register value to
-///    be stored.
+///    The 32-bit single-precision floating-point register value to be stored.
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_stream_ss(float *__p, __m128 __a)
 {
diff --git a/renderscript/clang-include/arm_acle.h b/renderscript/clang-include/arm_acle.h
index 4be1d09..8423e62 100644
--- a/renderscript/clang-include/arm_acle.h
+++ b/renderscript/clang-include/arm_acle.h
@@ -72,9 +72,11 @@
 
 /* 8.5 Swap */
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-  __swp(uint32_t x, volatile uint32_t *p) {
+__swp(uint32_t __x, volatile uint32_t *__p) {
   uint32_t v;
-  do v = __builtin_arm_ldrex(p); while (__builtin_arm_strex(x, p));
+  do
+    v = __builtin_arm_ldrex(__p);
+  while (__builtin_arm_strex(__x, __p));
   return v;
 }
 
@@ -110,113 +112,115 @@
 /* 9.2 Miscellaneous data-processing intrinsics */
 /* ROR */
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-  __ror(uint32_t x, uint32_t y) {
-  y %= 32;
-  if (y == 0)  return x;
-  return (x >> y) | (x << (32 - y));
+__ror(uint32_t __x, uint32_t __y) {
+  __y %= 32;
+  if (__y == 0)
+    return __x;
+  return (__x >> __y) | (__x << (32 - __y));
 }
 
 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-  __rorll(uint64_t x, uint32_t y) {
-  y %= 64;
-  if (y == 0)  return x;
-  return (x >> y) | (x << (64 - y));
+__rorll(uint64_t __x, uint32_t __y) {
+  __y %= 64;
+  if (__y == 0)
+    return __x;
+  return (__x >> __y) | (__x << (64 - __y));
 }
 
 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-  __rorl(unsigned long x, uint32_t y) {
+__rorl(unsigned long __x, uint32_t __y) {
 #if __SIZEOF_LONG__ == 4
-  return __ror(x, y);
+  return __ror(__x, __y);
 #else
-  return __rorll(x, y);
+  return __rorll(__x, __y);
 #endif
 }
 
 
 /* CLZ */
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-  __clz(uint32_t t) {
-  return __builtin_clz(t);
+__clz(uint32_t __t) {
+  return __builtin_clz(__t);
 }
 
 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-  __clzl(unsigned long t) {
-  return __builtin_clzl(t);
+__clzl(unsigned long __t) {
+  return __builtin_clzl(__t);
 }
 
 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-  __clzll(uint64_t t) {
-  return __builtin_clzll(t);
+__clzll(uint64_t __t) {
+  return __builtin_clzll(__t);
 }
 
 /* REV */
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-  __rev(uint32_t t) {
-  return __builtin_bswap32(t);
+__rev(uint32_t __t) {
+  return __builtin_bswap32(__t);
 }
 
 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-  __revl(unsigned long t) {
+__revl(unsigned long __t) {
 #if __SIZEOF_LONG__ == 4
-  return __builtin_bswap32(t);
+  return __builtin_bswap32(__t);
 #else
-  return __builtin_bswap64(t);
+  return __builtin_bswap64(__t);
 #endif
 }
 
 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-  __revll(uint64_t t) {
-  return __builtin_bswap64(t);
+__revll(uint64_t __t) {
+  return __builtin_bswap64(__t);
 }
 
 /* REV16 */
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-  __rev16(uint32_t t) {
-  return __ror(__rev(t), 16);
+__rev16(uint32_t __t) {
+  return __ror(__rev(__t), 16);
 }
 
 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-  __rev16ll(uint64_t t) {
-  return (((uint64_t)__rev16(t >> 32)) << 32) | __rev16(t);
+__rev16ll(uint64_t __t) {
+  return (((uint64_t)__rev16(__t >> 32)) << 32) | __rev16(__t);
 }
 
 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-  __rev16l(unsigned long t) {
+__rev16l(unsigned long __t) {
 #if __SIZEOF_LONG__ == 4
-    return __rev16(t);
+    return __rev16(__t);
 #else
-    return __rev16ll(t);
+    return __rev16ll(__t);
 #endif
 }
 
 /* REVSH */
 static __inline__ int16_t __attribute__((__always_inline__, __nodebug__))
-  __revsh(int16_t t) {
-  return __builtin_bswap16(t);
+__revsh(int16_t __t) {
+  return __builtin_bswap16(__t);
 }
 
 /* RBIT */
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-  __rbit(uint32_t t) {
-  return __builtin_arm_rbit(t);
+__rbit(uint32_t __t) {
+  return __builtin_arm_rbit(__t);
 }
 
 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-  __rbitll(uint64_t t) {
+__rbitll(uint64_t __t) {
 #if __ARM_32BIT_STATE
-  return (((uint64_t) __builtin_arm_rbit(t)) << 32) |
-    __builtin_arm_rbit(t >> 32);
+  return (((uint64_t)__builtin_arm_rbit(__t)) << 32) |
+         __builtin_arm_rbit(__t >> 32);
 #else
-  return __builtin_arm_rbit64(t);
+  return __builtin_arm_rbit64(__t);
 #endif
 }
 
 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-  __rbitl(unsigned long t) {
+__rbitl(unsigned long __t) {
 #if __SIZEOF_LONG__ == 4
-  return __rbit(t);
+  return __rbit(__t);
 #else
-  return __rbitll(t);
+  return __rbitll(__t);
 #endif
 }
 
@@ -235,61 +239,61 @@
 /* 9.4.2 Saturating addition and subtraction intrinsics */
 #if __ARM_32BIT_STATE
 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-  __qadd(int32_t t, int32_t v) {
-  return __builtin_arm_qadd(t, v);
+__qadd(int32_t __t, int32_t __v) {
+  return __builtin_arm_qadd(__t, __v);
 }
 
 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-  __qsub(int32_t t, int32_t v) {
-  return __builtin_arm_qsub(t, v);
+__qsub(int32_t __t, int32_t __v) {
+  return __builtin_arm_qsub(__t, __v);
 }
 
 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__qdbl(int32_t t) {
-  return __builtin_arm_qadd(t, t);
+__qdbl(int32_t __t) {
+  return __builtin_arm_qadd(__t, __t);
 }
 #endif
 
 /* 9.7 CRC32 intrinsics */
 #if __ARM_FEATURE_CRC32
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-  __crc32b(uint32_t a, uint8_t b) {
-  return __builtin_arm_crc32b(a, b);
+__crc32b(uint32_t __a, uint8_t __b) {
+  return __builtin_arm_crc32b(__a, __b);
 }
 
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-  __crc32h(uint32_t a, uint16_t b) {
-  return __builtin_arm_crc32h(a, b);
+__crc32h(uint32_t __a, uint16_t __b) {
+  return __builtin_arm_crc32h(__a, __b);
 }
 
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-  __crc32w(uint32_t a, uint32_t b) {
-  return __builtin_arm_crc32w(a, b);
+__crc32w(uint32_t __a, uint32_t __b) {
+  return __builtin_arm_crc32w(__a, __b);
 }
 
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-  __crc32d(uint32_t a, uint64_t b) {
-  return __builtin_arm_crc32d(a, b);
+__crc32d(uint32_t __a, uint64_t __b) {
+  return __builtin_arm_crc32d(__a, __b);
 }
 
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-  __crc32cb(uint32_t a, uint8_t b) {
-  return __builtin_arm_crc32cb(a, b);
+__crc32cb(uint32_t __a, uint8_t __b) {
+  return __builtin_arm_crc32cb(__a, __b);
 }
 
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-  __crc32ch(uint32_t a, uint16_t b) {
-  return __builtin_arm_crc32ch(a, b);
+__crc32ch(uint32_t __a, uint16_t __b) {
+  return __builtin_arm_crc32ch(__a, __b);
 }
 
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-  __crc32cw(uint32_t a, uint32_t b) {
-  return __builtin_arm_crc32cw(a, b);
+__crc32cw(uint32_t __a, uint32_t __b) {
+  return __builtin_arm_crc32cw(__a, __b);
 }
 
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-  __crc32cd(uint32_t a, uint64_t b) {
-  return __builtin_arm_crc32cd(a, b);
+__crc32cd(uint32_t __a, uint64_t __b) {
+  return __builtin_arm_crc32cd(__a, __b);
 }
 #endif
 
diff --git a/renderscript/clang-include/avx2intrin.h b/renderscript/clang-include/avx2intrin.h
index f786572..13bcbef 100644
--- a/renderscript/clang-include/avx2intrin.h
+++ b/renderscript/clang-include/avx2intrin.h
@@ -32,7 +32,9 @@
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx2")))
 
 /* SSE4 Multiple Packed Sums of Absolute Difference.  */
-#define _mm256_mpsadbw_epu8(X, Y, M) __builtin_ia32_mpsadbw256((X), (Y), (M))
+#define _mm256_mpsadbw_epu8(X, Y, M) \
+  (__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \
+                                     (__v32qi)(__m256i)(Y), (int)(M))
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_abs_epi8(__m256i __a)
@@ -79,25 +81,25 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_add_epi8(__m256i __a, __m256i __b)
 {
-  return (__m256i)((__v32qi)__a + (__v32qi)__b);
+  return (__m256i)((__v32qu)__a + (__v32qu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_add_epi16(__m256i __a, __m256i __b)
 {
-  return (__m256i)((__v16hi)__a + (__v16hi)__b);
+  return (__m256i)((__v16hu)__a + (__v16hu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_add_epi32(__m256i __a, __m256i __b)
 {
-  return (__m256i)((__v8si)__a + (__v8si)__b);
+  return (__m256i)((__v8su)__a + (__v8su)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_add_epi64(__m256i __a, __m256i __b)
 {
-  return __a + __b;
+  return (__m256i)((__v4du)__a + (__v4du)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -131,13 +133,13 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_and_si256(__m256i __a, __m256i __b)
 {
-  return __a & __b;
+  return (__m256i)((__v4du)__a & (__v4du)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_andnot_si256(__m256i __a, __m256i __b)
 {
-  return ~__a & __b;
+  return (__m256i)(~(__v4du)__a & (__v4du)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -200,7 +202,7 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_cmpeq_epi64(__m256i __a, __m256i __b)
 {
-  return (__m256i)(__a == __b);
+  return (__m256i)((__v4di)__a == (__v4di)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -226,7 +228,7 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_cmpgt_epi64(__m256i __a, __m256i __b)
 {
-  return (__m256i)(__a > __b);
+  return (__m256i)((__v4di)__a > (__v4di)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -358,73 +360,79 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_cvtepi8_epi16(__m128i __V)
 {
-  return (__m256i)__builtin_ia32_pmovsxbw256((__v16qi)__V);
+  /* This function always performs a signed extension, but __v16qi is a char
+     which may be signed or unsigned, so use __v16qs. */
+  return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_cvtepi8_epi32(__m128i __V)
 {
-  return (__m256i)__builtin_ia32_pmovsxbd256((__v16qi)__V);
+  /* This function always performs a signed extension, but __v16qi is a char
+     which may be signed or unsigned, so use __v16qs. */
+  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_cvtepi8_epi64(__m128i __V)
 {
-  return (__m256i)__builtin_ia32_pmovsxbq256((__v16qi)__V);
+  /* This function always performs a signed extension, but __v16qi is a char
+     which may be signed or unsigned, so use __v16qs. */
+  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_cvtepi16_epi32(__m128i __V)
 {
-  return (__m256i)__builtin_ia32_pmovsxwd256((__v8hi)__V);
+  return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_cvtepi16_epi64(__m128i __V)
 {
-  return (__m256i)__builtin_ia32_pmovsxwq256((__v8hi)__V);
+  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_cvtepi32_epi64(__m128i __V)
 {
-  return (__m256i)__builtin_ia32_pmovsxdq256((__v4si)__V);
+  return (__m256i)__builtin_convertvector((__v4si)__V, __v4di);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_cvtepu8_epi16(__m128i __V)
 {
-  return (__m256i)__builtin_ia32_pmovzxbw256((__v16qi)__V);
+  return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_cvtepu8_epi32(__m128i __V)
 {
-  return (__m256i)__builtin_ia32_pmovzxbd256((__v16qi)__V);
+  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_cvtepu8_epi64(__m128i __V)
 {
-  return (__m256i)__builtin_ia32_pmovzxbq256((__v16qi)__V);
+  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_cvtepu16_epi32(__m128i __V)
 {
-  return (__m256i)__builtin_ia32_pmovzxwd256((__v8hi)__V);
+  return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_cvtepu16_epi64(__m128i __V)
 {
-  return (__m256i)__builtin_ia32_pmovzxwq256((__v8hi)__V);
+  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_cvtepu32_epi64(__m128i __V)
 {
-  return (__m256i)__builtin_ia32_pmovzxdq256((__v4si)__V);
+  return (__m256i)__builtin_convertvector((__v4su)__V, __v4di);
 }
 
 static __inline__  __m256i __DEFAULT_FN_ATTRS
@@ -454,13 +462,13 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_mullo_epi16(__m256i __a, __m256i __b)
 {
-  return (__m256i)((__v16hi)__a * (__v16hi)__b);
+  return (__m256i)((__v16hu)__a * (__v16hu)__b);
 }
 
 static __inline__  __m256i __DEFAULT_FN_ATTRS
 _mm256_mullo_epi32 (__m256i __a, __m256i __b)
 {
-  return (__m256i)((__v8si)__a * (__v8si)__b);
+  return (__m256i)((__v8su)__a * (__v8su)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -472,7 +480,7 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_or_si256(__m256i __a, __m256i __b)
 {
-  return __a | __b;
+  return (__m256i)((__v4du)__a | (__v4du)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -489,38 +497,42 @@
 
 #define _mm256_shuffle_epi32(a, imm) __extension__ ({ \
   (__m256i)__builtin_shufflevector((__v8si)(__m256i)(a), \
-                                   (__v8si)_mm256_setzero_si256(), \
-                                   (imm) & 0x3, ((imm) & 0xc) >> 2, \
-                                   ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
-                                   4 + (((imm) & 0x03) >> 0), \
-                                   4 + (((imm) & 0x0c) >> 2), \
-                                   4 + (((imm) & 0x30) >> 4), \
-                                   4 + (((imm) & 0xc0) >> 6)); })
+                                   (__v8si)_mm256_undefined_si256(), \
+                                   0 + (((imm) >> 0) & 0x3), \
+                                   0 + (((imm) >> 2) & 0x3), \
+                                   0 + (((imm) >> 4) & 0x3), \
+                                   0 + (((imm) >> 6) & 0x3), \
+                                   4 + (((imm) >> 0) & 0x3), \
+                                   4 + (((imm) >> 2) & 0x3), \
+                                   4 + (((imm) >> 4) & 0x3), \
+                                   4 + (((imm) >> 6) & 0x3)); })
 
 #define _mm256_shufflehi_epi16(a, imm) __extension__ ({ \
   (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(a), \
-                                   (__v16hi)_mm256_setzero_si256(), \
+                                   (__v16hi)_mm256_undefined_si256(), \
                                    0, 1, 2, 3, \
-                                   4 + (((imm) & 0x03) >> 0), \
-                                   4 + (((imm) & 0x0c) >> 2), \
-                                   4 + (((imm) & 0x30) >> 4), \
-                                   4 + (((imm) & 0xc0) >> 6), \
+                                   4  + (((imm) >> 0) & 0x3), \
+                                   4  + (((imm) >> 2) & 0x3), \
+                                   4  + (((imm) >> 4) & 0x3), \
+                                   4  + (((imm) >> 6) & 0x3), \
                                    8, 9, 10, 11, \
-                                   12 + (((imm) & 0x03) >> 0), \
-                                   12 + (((imm) & 0x0c) >> 2), \
-                                   12 + (((imm) & 0x30) >> 4), \
-                                   12 + (((imm) & 0xc0) >> 6)); })
+                                   12 + (((imm) >> 0) & 0x3), \
+                                   12 + (((imm) >> 2) & 0x3), \
+                                   12 + (((imm) >> 4) & 0x3), \
+                                   12 + (((imm) >> 6) & 0x3)); })
 
 #define _mm256_shufflelo_epi16(a, imm) __extension__ ({ \
   (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(a), \
-                                   (__v16hi)_mm256_setzero_si256(), \
-                                   (imm) & 0x3,((imm) & 0xc) >> 2, \
-                                   ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+                                   (__v16hi)_mm256_undefined_si256(), \
+                                   0 + (((imm) >> 0) & 0x3), \
+                                   0 + (((imm) >> 2) & 0x3), \
+                                   0 + (((imm) >> 4) & 0x3), \
+                                   0 + (((imm) >> 6) & 0x3), \
                                    4, 5, 6, 7, \
-                                   8 + (((imm) & 0x03) >> 0), \
-                                   8 + (((imm) & 0x0c) >> 2), \
-                                   8 + (((imm) & 0x30) >> 4), \
-                                   8 + (((imm) & 0xc0) >> 6), \
+                                   8 + (((imm) >> 0) & 0x3), \
+                                   8 + (((imm) >> 2) & 0x3), \
+                                   8 + (((imm) >> 4) & 0x3), \
+                                   8 + (((imm) >> 6) & 0x3), \
                                    12, 13, 14, 15); })
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -541,8 +553,42 @@
     return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);
 }
 
-#define _mm256_slli_si256(a, count) __extension__ ({ \
-  (__m256i)__builtin_ia32_pslldqi256((__m256i)(a), (count)*8); })
+#define _mm256_slli_si256(a, imm) __extension__ ({ \
+  (__m256i)__builtin_shufflevector(                                          \
+        (__v32qi)_mm256_setzero_si256(),                                     \
+        (__v32qi)(__m256i)(a),                                               \
+        ((char)(imm)&0xF0) ?  0 : ((char)(imm)>0x0 ? 16 : 32) - (char)(imm), \
+        ((char)(imm)&0xF0) ?  1 : ((char)(imm)>0x1 ? 17 : 33) - (char)(imm), \
+        ((char)(imm)&0xF0) ?  2 : ((char)(imm)>0x2 ? 18 : 34) - (char)(imm), \
+        ((char)(imm)&0xF0) ?  3 : ((char)(imm)>0x3 ? 19 : 35) - (char)(imm), \
+        ((char)(imm)&0xF0) ?  4 : ((char)(imm)>0x4 ? 20 : 36) - (char)(imm), \
+        ((char)(imm)&0xF0) ?  5 : ((char)(imm)>0x5 ? 21 : 37) - (char)(imm), \
+        ((char)(imm)&0xF0) ?  6 : ((char)(imm)>0x6 ? 22 : 38) - (char)(imm), \
+        ((char)(imm)&0xF0) ?  7 : ((char)(imm)>0x7 ? 23 : 39) - (char)(imm), \
+        ((char)(imm)&0xF0) ?  8 : ((char)(imm)>0x8 ? 24 : 40) - (char)(imm), \
+        ((char)(imm)&0xF0) ?  9 : ((char)(imm)>0x9 ? 25 : 41) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 10 : ((char)(imm)>0xA ? 26 : 42) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 11 : ((char)(imm)>0xB ? 27 : 43) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 12 : ((char)(imm)>0xC ? 28 : 44) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 13 : ((char)(imm)>0xD ? 29 : 45) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 14 : ((char)(imm)>0xE ? 30 : 46) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 15 : ((char)(imm)>0xF ? 31 : 47) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 16 : ((char)(imm)>0x0 ? 32 : 48) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 17 : ((char)(imm)>0x1 ? 33 : 49) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 18 : ((char)(imm)>0x2 ? 34 : 50) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 19 : ((char)(imm)>0x3 ? 35 : 51) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 20 : ((char)(imm)>0x4 ? 36 : 52) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 21 : ((char)(imm)>0x5 ? 37 : 53) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 22 : ((char)(imm)>0x6 ? 38 : 54) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 23 : ((char)(imm)>0x7 ? 39 : 55) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 24 : ((char)(imm)>0x8 ? 40 : 56) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 25 : ((char)(imm)>0x9 ? 41 : 57) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 26 : ((char)(imm)>0xA ? 42 : 58) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 27 : ((char)(imm)>0xB ? 43 : 59) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 28 : ((char)(imm)>0xC ? 44 : 60) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 29 : ((char)(imm)>0xD ? 45 : 61) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 30 : ((char)(imm)>0xE ? 46 : 62) - (char)(imm), \
+        ((char)(imm)&0xF0) ? 31 : ((char)(imm)>0xF ? 47 : 63) - (char)(imm)); })
 
 #define _mm256_bslli_epi128(a, count) _mm256_slli_si256((a), (count))
 
@@ -573,13 +619,13 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_slli_epi64(__m256i __a, int __count)
 {
-  return __builtin_ia32_psllqi256(__a, __count);
+  return __builtin_ia32_psllqi256((__v4di)__a, __count);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_sll_epi64(__m256i __a, __m128i __count)
 {
-  return __builtin_ia32_psllq256(__a, __count);
+  return __builtin_ia32_psllq256((__v4di)__a, __count);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -606,8 +652,42 @@
   return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count);
 }
 
-#define _mm256_srli_si256(a, count) __extension__ ({ \
-  (__m256i)__builtin_ia32_psrldqi256((__m256i)(a), (count)*8); })
+#define _mm256_srli_si256(a, imm) __extension__ ({ \
+  (__m256i)__builtin_shufflevector(                                           \
+        (__v32qi)(__m256i)(a),                                               \
+        (__v32qi)_mm256_setzero_si256(),                                     \
+        ((char)(imm)&0xF0) ? 32 : (char)(imm) + ((char)(imm)>0xF ? 16 : 0),  \
+        ((char)(imm)&0xF0) ? 33 : (char)(imm) + ((char)(imm)>0xE ? 17 : 1),  \
+        ((char)(imm)&0xF0) ? 34 : (char)(imm) + ((char)(imm)>0xD ? 18 : 2),  \
+        ((char)(imm)&0xF0) ? 35 : (char)(imm) + ((char)(imm)>0xC ? 19 : 3),  \
+        ((char)(imm)&0xF0) ? 36 : (char)(imm) + ((char)(imm)>0xB ? 20 : 4),  \
+        ((char)(imm)&0xF0) ? 37 : (char)(imm) + ((char)(imm)>0xA ? 21 : 5),  \
+        ((char)(imm)&0xF0) ? 38 : (char)(imm) + ((char)(imm)>0x9 ? 22 : 6),  \
+        ((char)(imm)&0xF0) ? 39 : (char)(imm) + ((char)(imm)>0x8 ? 23 : 7),  \
+        ((char)(imm)&0xF0) ? 40 : (char)(imm) + ((char)(imm)>0x7 ? 24 : 8),  \
+        ((char)(imm)&0xF0) ? 41 : (char)(imm) + ((char)(imm)>0x6 ? 25 : 9),  \
+        ((char)(imm)&0xF0) ? 42 : (char)(imm) + ((char)(imm)>0x5 ? 26 : 10), \
+        ((char)(imm)&0xF0) ? 43 : (char)(imm) + ((char)(imm)>0x4 ? 27 : 11), \
+        ((char)(imm)&0xF0) ? 44 : (char)(imm) + ((char)(imm)>0x3 ? 28 : 12), \
+        ((char)(imm)&0xF0) ? 45 : (char)(imm) + ((char)(imm)>0x2 ? 29 : 13), \
+        ((char)(imm)&0xF0) ? 46 : (char)(imm) + ((char)(imm)>0x1 ? 30 : 14), \
+        ((char)(imm)&0xF0) ? 47 : (char)(imm) + ((char)(imm)>0x0 ? 31 : 15), \
+        ((char)(imm)&0xF0) ? 48 : (char)(imm) + ((char)(imm)>0xF ? 32 : 16), \
+        ((char)(imm)&0xF0) ? 49 : (char)(imm) + ((char)(imm)>0xE ? 33 : 17), \
+        ((char)(imm)&0xF0) ? 50 : (char)(imm) + ((char)(imm)>0xD ? 34 : 18), \
+        ((char)(imm)&0xF0) ? 51 : (char)(imm) + ((char)(imm)>0xC ? 35 : 19), \
+        ((char)(imm)&0xF0) ? 52 : (char)(imm) + ((char)(imm)>0xB ? 36 : 20), \
+        ((char)(imm)&0xF0) ? 53 : (char)(imm) + ((char)(imm)>0xA ? 37 : 21), \
+        ((char)(imm)&0xF0) ? 54 : (char)(imm) + ((char)(imm)>0x9 ? 38 : 22), \
+        ((char)(imm)&0xF0) ? 55 : (char)(imm) + ((char)(imm)>0x8 ? 39 : 23), \
+        ((char)(imm)&0xF0) ? 56 : (char)(imm) + ((char)(imm)>0x7 ? 40 : 24), \
+        ((char)(imm)&0xF0) ? 57 : (char)(imm) + ((char)(imm)>0x6 ? 41 : 25), \
+        ((char)(imm)&0xF0) ? 58 : (char)(imm) + ((char)(imm)>0x5 ? 42 : 26), \
+        ((char)(imm)&0xF0) ? 59 : (char)(imm) + ((char)(imm)>0x4 ? 43 : 27), \
+        ((char)(imm)&0xF0) ? 60 : (char)(imm) + ((char)(imm)>0x3 ? 44 : 28), \
+        ((char)(imm)&0xF0) ? 61 : (char)(imm) + ((char)(imm)>0x2 ? 45 : 29), \
+        ((char)(imm)&0xF0) ? 62 : (char)(imm) + ((char)(imm)>0x1 ? 46 : 30), \
+        ((char)(imm)&0xF0) ? 63 : (char)(imm) + ((char)(imm)>0x0 ? 47 : 31)); })
 
 #define _mm256_bsrli_epi128(a, count) _mm256_srli_si256((a), (count))
 
@@ -638,37 +718,37 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_srli_epi64(__m256i __a, int __count)
 {
-  return __builtin_ia32_psrlqi256(__a, __count);
+  return __builtin_ia32_psrlqi256((__v4di)__a, __count);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_srl_epi64(__m256i __a, __m128i __count)
 {
-  return __builtin_ia32_psrlq256(__a, __count);
+  return __builtin_ia32_psrlq256((__v4di)__a, __count);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_sub_epi8(__m256i __a, __m256i __b)
 {
-  return (__m256i)((__v32qi)__a - (__v32qi)__b);
+  return (__m256i)((__v32qu)__a - (__v32qu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_sub_epi16(__m256i __a, __m256i __b)
 {
-  return (__m256i)((__v16hi)__a - (__v16hi)__b);
+  return (__m256i)((__v16hu)__a - (__v16hu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_sub_epi32(__m256i __a, __m256i __b)
 {
-  return (__m256i)((__v8si)__a - (__v8si)__b);
+  return (__m256i)((__v8su)__a - (__v8su)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_sub_epi64(__m256i __a, __m256i __b)
 {
-  return __a - __b;
+  return (__m256i)((__v4du)__a - (__v4du)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -716,7 +796,7 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_unpackhi_epi64(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_shufflevector(__a, __b, 1, 4+1, 3, 4+3);
+  return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -740,13 +820,13 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_unpacklo_epi64(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_shufflevector(__a, __b, 0, 4+0, 2, 4+2);
+  return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_xor_si256(__m256i __a, __m256i __b)
 {
-  return __a ^ __b;
+  return (__m256i)((__v4du)__a ^ (__v4du)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -764,7 +844,7 @@
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_broadcastsd_pd(__m128d __a)
 {
-  return __builtin_shufflevector(__a, __a, 0, 0);
+  return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
@@ -782,7 +862,7 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_broadcastsi128_si256(__m128i __X)
 {
-  return (__m256i)__builtin_shufflevector(__X, __X, 0, 1, 0, 1);
+  return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1);
 }
 
 #define _mm_blend_epi32(V1, V2, M) __extension__ ({ \
@@ -826,7 +906,7 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_broadcastq_epi64(__m128i __X)
 {
-  return (__m256i)__builtin_shufflevector(__X, __X, 0, 0, 0, 0);
+  return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -851,7 +931,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_broadcastq_epi64(__m128i __X)
 {
-  return (__m128i)__builtin_shufflevector(__X, __X, 0, 0);
+  return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -862,9 +942,11 @@
 
 #define _mm256_permute4x64_pd(V, M) __extension__ ({ \
   (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V), \
-                                   (__v4df)_mm256_setzero_pd(), \
-                                   (M) & 0x3, ((M) & 0xc) >> 2, \
-                                   ((M) & 0x30) >> 4, ((M) & 0xc0) >> 6); })
+                                   (__v4df)_mm256_undefined_pd(), \
+                                   ((M) >> 0) & 0x3, \
+                                   ((M) >> 2) & 0x3, \
+                                   ((M) >> 4) & 0x3, \
+                                   ((M) >> 6) & 0x3); })
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
 _mm256_permutevar8x32_ps(__m256 __a, __m256i __b)
@@ -874,16 +956,18 @@
 
 #define _mm256_permute4x64_epi64(V, M) __extension__ ({ \
   (__m256i)__builtin_shufflevector((__v4di)(__m256i)(V), \
-                                   (__v4di)_mm256_setzero_si256(), \
-                                   (M) & 0x3, ((M) & 0xc) >> 2, \
-                                   ((M) & 0x30) >> 4, ((M) & 0xc0) >> 6); })
+                                   (__v4di)_mm256_undefined_si256(), \
+                                   ((M) >> 0) & 0x3, \
+                                   ((M) >> 2) & 0x3, \
+                                   ((M) >> 4) & 0x3, \
+                                   ((M) >> 6) & 0x3); })
 
 #define _mm256_permute2x128_si256(V1, V2, M) __extension__ ({ \
   (__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (M)); })
 
 #define _mm256_extracti128_si256(V, M) __extension__ ({ \
   (__m128i)__builtin_shufflevector((__v4di)(__m256i)(V), \
-                                   (__v4di)_mm256_setzero_si256(), \
+                                   (__v4di)_mm256_undefined_si256(), \
                                    (((M) & 1) ? 2 : 0), \
                                    (((M) & 1) ? 3 : 1) ); })
 
@@ -904,7 +988,7 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_maskload_epi64(long long const *__X, __m256i __M)
 {
-  return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, __M);
+  return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -928,7 +1012,7 @@
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)
 {
-  __builtin_ia32_maskstoreq256((__v4di *)__X, __M, __Y);
+  __builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y);
 }
 
 static __inline__ void __DEFAULT_FN_ATTRS
@@ -940,7 +1024,7 @@
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
 {
-  __builtin_ia32_maskstoreq(( __v2di *)__X, __M, __Y);
+  __builtin_ia32_maskstoreq(( __v2di *)__X, (__v2di)__M, (__v2di)__Y);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -958,13 +1042,13 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_sllv_epi64(__m256i __X, __m256i __Y)
 {
-  return (__m256i)__builtin_ia32_psllv4di(__X, __Y);
+  return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_sllv_epi64(__m128i __X, __m128i __Y)
 {
-  return (__m128i)__builtin_ia32_psllv2di(__X, __Y);
+  return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -994,13 +1078,13 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_srlv_epi64(__m256i __X, __m256i __Y)
 {
-  return (__m256i)__builtin_ia32_psrlv4di(__X, __Y);
+  return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_srlv_epi64(__m128i __X, __m128i __Y)
 {
-  return (__m128i)__builtin_ia32_psrlv2di(__X, __Y);
+  return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y);
 }
 
 #define _mm_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \
diff --git a/renderscript/clang-include/avx512bwintrin.h b/renderscript/clang-include/avx512bwintrin.h
index f289ed7..d3c5a6c 100644
--- a/renderscript/clang-include/avx512bwintrin.h
+++ b/renderscript/clang-include/avx512bwintrin.h
@@ -30,30 +30,28 @@
 
 typedef unsigned int __mmask32;
 typedef unsigned long long __mmask64;
-typedef char __v64qi __attribute__ ((__vector_size__ (64)));
-typedef short __v32hi __attribute__ ((__vector_size__ (64)));
 
 /* Define the default attributes for the functions in this file. */
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bw")))
 
-static  __inline __v64qi __DEFAULT_FN_ATTRS
+static  __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_setzero_qi(void) {
-  return (__v64qi){ 0, 0, 0, 0, 0, 0, 0, 0,
-                       0, 0, 0, 0, 0, 0, 0, 0,
-                       0, 0, 0, 0, 0, 0, 0, 0,
-                       0, 0, 0, 0, 0, 0, 0, 0,
-                       0, 0, 0, 0, 0, 0, 0, 0,
-                       0, 0, 0, 0, 0, 0, 0, 0,
-                       0, 0, 0, 0, 0, 0, 0, 0,
-                       0, 0, 0, 0, 0, 0, 0, 0 };
+  return (__m512i)(__v64qi){ 0, 0, 0, 0, 0, 0, 0, 0,
+                             0, 0, 0, 0, 0, 0, 0, 0,
+                             0, 0, 0, 0, 0, 0, 0, 0,
+                             0, 0, 0, 0, 0, 0, 0, 0,
+                             0, 0, 0, 0, 0, 0, 0, 0,
+                             0, 0, 0, 0, 0, 0, 0, 0,
+                             0, 0, 0, 0, 0, 0, 0, 0,
+                             0, 0, 0, 0, 0, 0, 0, 0 };
 }
 
-static  __inline __v32hi __DEFAULT_FN_ATTRS
+static  __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_setzero_hi(void) {
-  return (__v32hi){ 0, 0, 0, 0, 0, 0, 0, 0,
-                       0, 0, 0, 0, 0, 0, 0, 0,
-                       0, 0, 0, 0, 0, 0, 0, 0,
-                       0, 0, 0, 0, 0, 0, 0, 0 };
+  return (__m512i)(__v32hi){ 0, 0, 0, 0, 0, 0, 0, 0,
+                             0, 0, 0, 0, 0, 0, 0, 0,
+                             0, 0, 0, 0, 0, 0, 0, 0,
+                             0, 0, 0, 0, 0, 0, 0, 0 };
 }
 
 /* Integer compare */
@@ -348,7 +346,7 @@
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_add_epi8 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v64qi) __A + (__v64qi) __B);
+  return (__m512i) ((__v64qu) __A + (__v64qu) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -369,7 +367,7 @@
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_sub_epi8 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v64qi) __A - (__v64qi) __B);
+  return (__m512i) ((__v64qu) __A - (__v64qu) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -390,7 +388,7 @@
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_add_epi16 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v32hi) __A + (__v32hi) __B);
+  return (__m512i) ((__v32hu) __A + (__v32hu) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -411,7 +409,7 @@
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_sub_epi16 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v32hi) __A - (__v32hi) __B);
+  return (__m512i) ((__v32hu) __A - (__v32hu) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -432,7 +430,7 @@
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_mullo_epi16 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v32hi) __A * (__v32hi) __B);
+  return (__m512i) ((__v32hu) __A * (__v32hu) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -454,17 +452,17 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_mask_blend_epi8 (__mmask64 __U, __m512i __A, __m512i __W)
 {
-  return (__m512i) __builtin_ia32_blendmb_512_mask ((__v64qi) __A,
+  return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
               (__v64qi) __W,
-              (__mmask64) __U);
+              (__v64qi) __A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W)
 {
-  return (__m512i) __builtin_ia32_blendmw_512_mask ((__v32hi) __A,
+  return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
               (__v32hi) __W,
-              (__mmask32) __U);
+              (__v32hi) __A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1396,145 +1394,1015 @@
               __M);
 }
 
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_unpackhi_epi8 (__m512i __A, __m512i __B) {
-  return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A,
-                 (__v64qi) __B,
-                 (__v64qi) _mm512_setzero_qi(),
-                 (__mmask64) -1);
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
+{
+  __builtin_ia32_pmovwb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
+{
+  __builtin_ia32_pmovswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
+{
+  __builtin_ia32_pmovuswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_unpackhi_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
-         __m512i __B) {
-  return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A,
-                 (__v64qi) __B,
-                 (__v64qi) __W,
-                 (__mmask64) __U);
+_mm512_unpackhi_epi8(__m512i __A, __m512i __B) {
+  return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B,
+                                          8,  64+8,   9, 64+9,
+                                          10, 64+10, 11, 64+11,
+                                          12, 64+12, 13, 64+13,
+                                          14, 64+14, 15, 64+15,
+                                          24, 64+24, 25, 64+25,
+                                          26, 64+26, 27, 64+27,
+                                          28, 64+28, 29, 64+29,
+                                          30, 64+30, 31, 64+31,
+                                          40, 64+40, 41, 64+41,
+                                          42, 64+42, 43, 64+43,
+                                          44, 64+44, 45, 64+45,
+                                          46, 64+46, 47, 64+47,
+                                          56, 64+56, 57, 64+57,
+                                          58, 64+58, 59, 64+59,
+                                          60, 64+60, 61, 64+61,
+                                          62, 64+62, 63, 64+63);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_unpackhi_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A,
-                 (__v64qi) __B,
-                 (__v64qi) _mm512_setzero_qi(),
-                 (__mmask64) __U);
+_mm512_mask_unpackhi_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
+  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+                                        (__v64qi)_mm512_unpackhi_epi8(__A, __B),
+                                        (__v64qi)__W);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_unpackhi_epi16 (__m512i __A, __m512i __B) {
-  return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A,
-                 (__v32hi) __B,
-                 (__v32hi) _mm512_setzero_hi(),
-                 (__mmask32) -1);
+_mm512_maskz_unpackhi_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
+  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+                                        (__v64qi)_mm512_unpackhi_epi8(__A, __B),
+                                        (__v64qi)_mm512_setzero_qi());
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_unpackhi_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
-          __m512i __B) {
-  return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A,
-                 (__v32hi) __B,
-                 (__v32hi) __W,
-                 (__mmask32) __U);
+_mm512_unpackhi_epi16(__m512i __A, __m512i __B) {
+  return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B,
+                                          4,  32+4,   5, 32+5,
+                                          6,  32+6,   7, 32+7,
+                                          12, 32+12, 13, 32+13,
+                                          14, 32+14, 15, 32+15,
+                                          20, 32+20, 21, 32+21,
+                                          22, 32+22, 23, 32+23,
+                                          28, 32+28, 29, 32+29,
+                                          30, 32+30, 31, 32+31);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_unpackhi_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A,
-                 (__v32hi) __B,
-                 (__v32hi) _mm512_setzero_hi(),
-                 (__mmask32) __U);
+_mm512_mask_unpackhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+                                       (__v32hi)_mm512_unpackhi_epi16(__A, __B),
+                                       (__v32hi)__W);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_unpacklo_epi8 (__m512i __A, __m512i __B) {
-  return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A,
-                 (__v64qi) __B,
-                 (__v64qi) _mm512_setzero_qi(),
-                 (__mmask64) -1);
+_mm512_maskz_unpackhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
+  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+                                       (__v32hi)_mm512_unpackhi_epi16(__A, __B),
+                                       (__v32hi)_mm512_setzero_hi());
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_unpacklo_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
-         __m512i __B) {
-  return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A,
-                 (__v64qi) __B,
-                 (__v64qi) __W,
-                 (__mmask64) __U);
+_mm512_unpacklo_epi8(__m512i __A, __m512i __B) {
+  return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B,
+                                          0,  64+0,   1, 64+1,
+                                          2,  64+2,   3, 64+3,
+                                          4,  64+4,   5, 64+5,
+                                          6,  64+6,   7, 64+7,
+                                          16, 64+16, 17, 64+17,
+                                          18, 64+18, 19, 64+19,
+                                          20, 64+20, 21, 64+21,
+                                          22, 64+22, 23, 64+23,
+                                          32, 64+32, 33, 64+33,
+                                          34, 64+34, 35, 64+35,
+                                          36, 64+36, 37, 64+37,
+                                          38, 64+38, 39, 64+39,
+                                          48, 64+48, 49, 64+49,
+                                          50, 64+50, 51, 64+51,
+                                          52, 64+52, 53, 64+53,
+                                          54, 64+54, 55, 64+55);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_unpacklo_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A,
-                 (__v64qi) __B,
-                 (__v64qi) _mm512_setzero_qi(),
-                 (__mmask64) __U);
+_mm512_mask_unpacklo_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
+  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+                                        (__v64qi)_mm512_unpacklo_epi8(__A, __B),
+                                        (__v64qi)__W);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_unpacklo_epi16 (__m512i __A, __m512i __B) {
-  return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A,
-                 (__v32hi) __B,
-                 (__v32hi) _mm512_setzero_hi(),
-                 (__mmask32) -1);
+_mm512_maskz_unpacklo_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
+  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+                                        (__v64qi)_mm512_unpacklo_epi8(__A, __B),
+                                        (__v64qi)_mm512_setzero_qi());
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_unpacklo_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
-          __m512i __B) {
-  return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A,
-                 (__v32hi) __B,
-                 (__v32hi) __W,
-                 (__mmask32) __U);
+_mm512_unpacklo_epi16(__m512i __A, __m512i __B) {
+  return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B,
+                                          0,  32+0,   1, 32+1,
+                                          2,  32+2,   3, 32+3,
+                                          8,  32+8,   9, 32+9,
+                                          10, 32+10, 11, 32+11,
+                                          16, 32+16, 17, 32+17,
+                                          18, 32+18, 19, 32+19,
+                                          24, 32+24, 25, 32+25,
+                                          26, 32+26, 27, 32+27);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_unpacklo_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A,
-                 (__v32hi) __B,
-                 (__v32hi) _mm512_setzero_hi(),
-                 (__mmask32) __U);
+_mm512_mask_unpacklo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+                                       (__v32hi)_mm512_unpacklo_epi16(__A, __B),
+                                       (__v32hi)__W);
 }
 
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
+  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+                                       (__v32hi)_mm512_unpacklo_epi16(__A, __B),
+                                       (__v32hi)_mm512_setzero_hi());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepi8_epi16 (__m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxbw512_mask ((__v32qi) __A,
+                (__v32hi)
+                _mm512_setzero_hi (),
+                (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi8_epi16 (__m512i __W, __mmask32 __U, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxbw512_mask ((__v32qi) __A,
+                (__v32hi) __W,
+                (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi8_epi16 (__mmask32 __U, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxbw512_mask ((__v32qi) __A,
+                (__v32hi)
+                _mm512_setzero_hi(),
+                (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepu8_epi16 (__m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxbw512_mask ((__v32qi) __A,
+                (__v32hi)
+                _mm512_setzero_hi (),
+                (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu8_epi16 (__m512i __W, __mmask32 __U, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxbw512_mask ((__v32qi) __A,
+                (__v32hi) __W,
+                (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu8_epi16 (__mmask32 __U, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxbw512_mask ((__v32qi) __A,
+                (__v32hi)
+                _mm512_setzero_hi(),
+                (__mmask32) __U);
+}
+
+
 #define _mm512_cmp_epi8_mask(a, b, p) __extension__ ({ \
-  (__mmask16)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
-                                         (__v64qi)(__m512i)(b), \
-                                         (p), (__mmask64)-1); })
+  (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
+                                         (__v64qi)(__m512i)(b), (int)(p), \
+                                         (__mmask64)-1); })
 
 #define _mm512_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
-  (__mmask16)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
-                                         (__v64qi)(__m512i)(b), \
-                                         (p), (__mmask64)(m)); })
+  (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
+                                         (__v64qi)(__m512i)(b), (int)(p), \
+                                         (__mmask64)(m)); })
 
 #define _mm512_cmp_epu8_mask(a, b, p) __extension__ ({ \
-  (__mmask16)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
-                                          (__v64qi)(__m512i)(b), \
-                                          (p), (__mmask64)-1); })
+  (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
+                                          (__v64qi)(__m512i)(b), (int)(p), \
+                                          (__mmask64)-1); })
 
 #define _mm512_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
-  (__mmask16)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
-                                          (__v64qi)(__m512i)(b), \
-                                          (p), (__mmask64)(m)); })
+  (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
+                                          (__v64qi)(__m512i)(b), (int)(p), \
+                                          (__mmask64)(m)); })
 
 #define _mm512_cmp_epi16_mask(a, b, p) __extension__ ({ \
-  (__mmask16)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
-                                         (__v32hi)(__m512i)(b), \
-                                         (p), (__mmask32)-1); })
+  (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
+                                         (__v32hi)(__m512i)(b), (int)(p), \
+                                         (__mmask32)-1); })
 
 #define _mm512_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
-  (__mmask16)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
-                                         (__v32hi)(__m512i)(b), \
-                                         (p), (__mmask32)(m)); })
+  (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
+                                         (__v32hi)(__m512i)(b), (int)(p), \
+                                         (__mmask32)(m)); })
 
 #define _mm512_cmp_epu16_mask(a, b, p) __extension__ ({ \
-  (__mmask16)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
-                                          (__v32hi)(__m512i)(b), \
-                                          (p), (__mmask32)-1); })
+  (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
+                                          (__v32hi)(__m512i)(b), (int)(p), \
+                                          (__mmask32)-1); })
 
 #define _mm512_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
-  (__mmask16)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
-                                          (__v32hi)(__m512i)(b), \
-                                          (p), (__mmask32)(m)); })
+  (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
+                                          (__v32hi)(__m512i)(b), (int)(p), \
+                                          (__mmask32)(m)); })
+
+#define _mm512_shufflehi_epi16(A, imm) __extension__ ({ \
+  (__m512i)__builtin_shufflevector((__v32hi)(__m512i)(A), \
+                                   (__v32hi)_mm512_undefined_epi32(), \
+                                   0, 1, 2, 3, \
+                                   4  + (((imm) >> 0) & 0x3), \
+                                   4  + (((imm) >> 2) & 0x3), \
+                                   4  + (((imm) >> 4) & 0x3), \
+                                   4  + (((imm) >> 6) & 0x3), \
+                                   8, 9, 10, 11, \
+                                   12 + (((imm) >> 0) & 0x3), \
+                                   12 + (((imm) >> 2) & 0x3), \
+                                   12 + (((imm) >> 4) & 0x3), \
+                                   12 + (((imm) >> 6) & 0x3), \
+                                   16, 17, 18, 19, \
+                                   20 + (((imm) >> 0) & 0x3), \
+                                   20 + (((imm) >> 2) & 0x3), \
+                                   20 + (((imm) >> 4) & 0x3), \
+                                   20 + (((imm) >> 6) & 0x3), \
+                                   24, 25, 26, 27, \
+                                   28 + (((imm) >> 0) & 0x3), \
+                                   28 + (((imm) >> 2) & 0x3), \
+                                   28 + (((imm) >> 4) & 0x3), \
+                                   28 + (((imm) >> 6) & 0x3)); })
+
+#define _mm512_mask_shufflehi_epi16(W, U, A, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+                                      (__v32hi)_mm512_shufflehi_epi16((A), \
+                                                                      (imm)), \
+                                      (__v32hi)(__m512i)(W)); })
+
+#define _mm512_maskz_shufflehi_epi16(U, A, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+                                      (__v32hi)_mm512_shufflehi_epi16((A), \
+                                                                      (imm)), \
+                                      (__v32hi)_mm512_setzero_hi()); })
+
+#define _mm512_shufflelo_epi16(A, imm) __extension__ ({ \
+  (__m512i)__builtin_shufflevector((__v32hi)(__m512i)(A), \
+                                   (__v32hi)_mm512_undefined_epi32(), \
+                                   0 + (((imm) >> 0) & 0x3), \
+                                   0 + (((imm) >> 2) & 0x3), \
+                                   0 + (((imm) >> 4) & 0x3), \
+                                   0 + (((imm) >> 6) & 0x3), \
+                                   4, 5, 6, 7, \
+                                   8 + (((imm) >> 0) & 0x3), \
+                                   8 + (((imm) >> 2) & 0x3), \
+                                   8 + (((imm) >> 4) & 0x3), \
+                                   8 + (((imm) >> 6) & 0x3), \
+                                   12, 13, 14, 15, \
+                                   16 + (((imm) >> 0) & 0x3), \
+                                   16 + (((imm) >> 2) & 0x3), \
+                                   16 + (((imm) >> 4) & 0x3), \
+                                   16 + (((imm) >> 6) & 0x3), \
+                                   20, 21, 22, 23, \
+                                   24 + (((imm) >> 0) & 0x3), \
+                                   24 + (((imm) >> 2) & 0x3), \
+                                   24 + (((imm) >> 4) & 0x3), \
+                                   24 + (((imm) >> 6) & 0x3), \
+                                   28, 29, 30, 31); })
+
+
+#define _mm512_mask_shufflelo_epi16(W, U, A, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+                                      (__v32hi)_mm512_shufflelo_epi16((A), \
+                                                                      (imm)), \
+                                      (__v32hi)(__m512i)(W)); })
+
+
+#define _mm512_maskz_shufflelo_epi16(U, A, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+                                      (__v32hi)_mm512_shufflelo_epi16((A), \
+                                                                      (imm)), \
+                                      (__v32hi)_mm512_setzero_hi()); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sllv_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psllv32hi_mask ((__v32hi) __A,
+              (__v32hi) __B,
+              (__v32hi)
+              _mm512_setzero_hi (),
+              (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sllv_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+      __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psllv32hi_mask ((__v32hi) __A,
+              (__v32hi) __B,
+              (__v32hi) __W,
+              (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sllv_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psllv32hi_mask ((__v32hi) __A,
+              (__v32hi) __B,
+              (__v32hi)
+              _mm512_setzero_hi (),
+              (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sll_epi16 (__m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psllw512_mask ((__v32hi) __A,
+             (__v8hi) __B,
+             (__v32hi)
+             _mm512_setzero_hi (),
+             (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sll_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+           __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psllw512_mask ((__v32hi) __A,
+             (__v8hi) __B,
+             (__v32hi) __W,
+             (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sll_epi16 (__mmask32 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psllw512_mask ((__v32hi) __A,
+             (__v8hi) __B,
+             (__v32hi)
+             _mm512_setzero_hi (),
+             (__mmask32) __U);
+}
+
+#define _mm512_slli_epi16(A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psllwi512_mask((__v32hi)(__m512i)(A), (int)(B), \
+                                         (__v32hi)_mm512_setzero_hi(), \
+                                         (__mmask32)-1); })
+
+#define _mm512_mask_slli_epi16(W, U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psllwi512_mask((__v32hi)(__m512i)(A), (int)(B), \
+                                         (__v32hi)(__m512i)(W), \
+                                         (__mmask32)(U)); })
+
+#define _mm512_maskz_slli_epi16(U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psllwi512_mask((__v32hi)(__m512i)(A), (int)(B), \
+                                         (__v32hi)_mm512_setzero_hi(), \
+                                         (__mmask32)(U)); })
+
+#define _mm512_bslli_epi128(a, imm) __extension__ ({ \
+  (__m512i)__builtin_shufflevector(                                          \
+       (__v64qi)_mm512_setzero_si512(),                                      \
+       (__v64qi)(__m512i)(a),                                                \
+       ((char)(imm)&0xF0) ?  0 : ((char)(imm)>0x0 ? 16 :  64) - (char)(imm), \
+       ((char)(imm)&0xF0) ?  1 : ((char)(imm)>0x1 ? 17 :  65) - (char)(imm), \
+       ((char)(imm)&0xF0) ?  2 : ((char)(imm)>0x2 ? 18 :  66) - (char)(imm), \
+       ((char)(imm)&0xF0) ?  3 : ((char)(imm)>0x3 ? 19 :  67) - (char)(imm), \
+       ((char)(imm)&0xF0) ?  4 : ((char)(imm)>0x4 ? 20 :  68) - (char)(imm), \
+       ((char)(imm)&0xF0) ?  5 : ((char)(imm)>0x5 ? 21 :  69) - (char)(imm), \
+       ((char)(imm)&0xF0) ?  6 : ((char)(imm)>0x6 ? 22 :  70) - (char)(imm), \
+       ((char)(imm)&0xF0) ?  7 : ((char)(imm)>0x7 ? 23 :  71) - (char)(imm), \
+       ((char)(imm)&0xF0) ?  8 : ((char)(imm)>0x8 ? 24 :  72) - (char)(imm), \
+       ((char)(imm)&0xF0) ?  9 : ((char)(imm)>0x9 ? 25 :  73) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 10 : ((char)(imm)>0xA ? 26 :  74) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 11 : ((char)(imm)>0xB ? 27 :  75) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 12 : ((char)(imm)>0xC ? 28 :  76) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 13 : ((char)(imm)>0xD ? 29 :  77) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 14 : ((char)(imm)>0xE ? 30 :  78) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 15 : ((char)(imm)>0xF ? 31 :  79) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 16 : ((char)(imm)>0x0 ? 32 :  80) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 17 : ((char)(imm)>0x1 ? 33 :  81) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 18 : ((char)(imm)>0x2 ? 34 :  82) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 19 : ((char)(imm)>0x3 ? 35 :  83) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 20 : ((char)(imm)>0x4 ? 36 :  84) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 21 : ((char)(imm)>0x5 ? 37 :  85) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 22 : ((char)(imm)>0x6 ? 38 :  86) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 23 : ((char)(imm)>0x7 ? 39 :  87) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 24 : ((char)(imm)>0x8 ? 40 :  88) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 25 : ((char)(imm)>0x9 ? 41 :  89) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 26 : ((char)(imm)>0xA ? 42 :  90) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 27 : ((char)(imm)>0xB ? 43 :  91) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 28 : ((char)(imm)>0xC ? 44 :  92) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 29 : ((char)(imm)>0xD ? 45 :  93) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 30 : ((char)(imm)>0xE ? 46 :  94) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 31 : ((char)(imm)>0xF ? 47 :  95) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 32 : ((char)(imm)>0x0 ? 48 :  96) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 33 : ((char)(imm)>0x1 ? 49 :  97) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 34 : ((char)(imm)>0x2 ? 50 :  98) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 35 : ((char)(imm)>0x3 ? 51 :  99) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 36 : ((char)(imm)>0x4 ? 52 : 100) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 37 : ((char)(imm)>0x5 ? 53 : 101) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 38 : ((char)(imm)>0x6 ? 54 : 102) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 39 : ((char)(imm)>0x7 ? 55 : 103) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 40 : ((char)(imm)>0x8 ? 56 : 104) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 41 : ((char)(imm)>0x9 ? 57 : 105) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 42 : ((char)(imm)>0xA ? 58 : 106) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 43 : ((char)(imm)>0xB ? 59 : 107) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 44 : ((char)(imm)>0xC ? 60 : 108) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 45 : ((char)(imm)>0xD ? 61 : 109) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 46 : ((char)(imm)>0xE ? 62 : 110) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 47 : ((char)(imm)>0xF ? 63 : 111) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 48 : ((char)(imm)>0x0 ? 64 : 112) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 49 : ((char)(imm)>0x1 ? 65 : 113) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 50 : ((char)(imm)>0x2 ? 66 : 114) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 51 : ((char)(imm)>0x3 ? 67 : 115) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 52 : ((char)(imm)>0x4 ? 68 : 116) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 53 : ((char)(imm)>0x5 ? 69 : 117) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 54 : ((char)(imm)>0x6 ? 70 : 118) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 55 : ((char)(imm)>0x7 ? 71 : 119) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 56 : ((char)(imm)>0x8 ? 72 : 120) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 57 : ((char)(imm)>0x9 ? 73 : 121) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 58 : ((char)(imm)>0xA ? 74 : 122) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 59 : ((char)(imm)>0xB ? 75 : 123) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 60 : ((char)(imm)>0xC ? 76 : 124) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 61 : ((char)(imm)>0xD ? 77 : 125) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 62 : ((char)(imm)>0xE ? 78 : 126) - (char)(imm), \
+       ((char)(imm)&0xF0) ? 63 : ((char)(imm)>0xF ? 79 : 127) - (char)(imm)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srlv_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psrlv32hi_mask ((__v32hi) __A,
+              (__v32hi) __B,
+              (__v32hi)
+              _mm512_setzero_hi (),
+              (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srlv_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+      __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psrlv32hi_mask ((__v32hi) __A,
+              (__v32hi) __B,
+              (__v32hi) __W,
+              (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srlv_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psrlv32hi_mask ((__v32hi) __A,
+              (__v32hi) __B,
+              (__v32hi)
+              _mm512_setzero_hi (),
+              (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srav_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psrav32hi_mask ((__v32hi) __A,
+              (__v32hi) __B,
+              (__v32hi)
+              _mm512_setzero_hi (),
+              (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srav_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+      __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psrav32hi_mask ((__v32hi) __A,
+              (__v32hi) __B,
+              (__v32hi) __W,
+              (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srav_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psrav32hi_mask ((__v32hi) __A,
+              (__v32hi) __B,
+              (__v32hi)
+              _mm512_setzero_hi (),
+              (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sra_epi16 (__m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psraw512_mask ((__v32hi) __A,
+             (__v8hi) __B,
+             (__v32hi)
+             _mm512_setzero_hi (),
+             (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sra_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+           __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psraw512_mask ((__v32hi) __A,
+             (__v8hi) __B,
+             (__v32hi) __W,
+            (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sra_epi16 (__mmask32 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psraw512_mask ((__v32hi) __A,
+             (__v8hi) __B,
+             (__v32hi)
+             _mm512_setzero_hi (),
+            (__mmask32) __U);
+}
+
+#define _mm512_srai_epi16(A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psrawi512_mask((__v32hi)(__m512i)(A), (int)(B), \
+                                         (__v32hi)_mm512_setzero_hi(), \
+                                         (__mmask32)-1); })
+
+#define _mm512_mask_srai_epi16(W, U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psrawi512_mask((__v32hi)(__m512i)(A), (int)(B), \
+                                         (__v32hi)(__m512i)(W), \
+                                         (__mmask32)(U)); })
+
+#define _mm512_maskz_srai_epi16(U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psrawi512_mask((__v32hi)(__m512i)(A), (int)(B), \
+                                         (__v32hi)_mm512_setzero_hi(), \
+                                         (__mmask32)(U)); })
+
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srl_epi16 (__m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psrlw512_mask ((__v32hi) __A,
+             (__v8hi) __B,
+             (__v32hi)
+             _mm512_setzero_hi (),
+             (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srl_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+           __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psrlw512_mask ((__v32hi) __A,
+             (__v8hi) __B,
+             (__v32hi) __W,
+             (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srl_epi16 (__mmask32 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psrlw512_mask ((__v32hi) __A,
+             (__v8hi) __B,
+             (__v32hi)
+             _mm512_setzero_hi (),
+             (__mmask32) __U);
+}
+
+#define _mm512_srli_epi16(A, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_psrlwi512_mask((__v32hi)(__m512i)(A), (int)(imm), \
+                                         (__v32hi)_mm512_setzero_hi(), \
+                                         (__mmask32)-1); })
+
+#define _mm512_mask_srli_epi16(W, U, A, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_psrlwi512_mask((__v32hi)(__m512i)(A), (int)(imm), \
+                                         (__v32hi)(__m512i)(W), \
+                                         (__mmask32)(U)); })
+
+#define _mm512_maskz_srli_epi16(U, A, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_psrlwi512_mask((__v32hi)(__m512i)(A), (int)(imm), \
+                                         (__v32hi)_mm512_setzero_hi(), \
+                                         (__mmask32)(U)); })
+
+#define _mm512_bsrli_epi128(a, imm) __extension__ ({ \
+  (__m512i)__builtin_shufflevector(                     \
+      (__v64qi)(__m512i)(a),                      \
+      (__v64qi)_mm512_setzero_si512(),            \
+      ((char)(imm)&0xF0) ?  64 : (char)(imm) + ((char)(imm)>0xF ?  48 : 0),  \
+      ((char)(imm)&0xF0) ?  65 : (char)(imm) + ((char)(imm)>0xE ?  49 : 1),  \
+      ((char)(imm)&0xF0) ?  66 : (char)(imm) + ((char)(imm)>0xD ?  50 : 2),  \
+      ((char)(imm)&0xF0) ?  67 : (char)(imm) + ((char)(imm)>0xC ?  51 : 3),  \
+      ((char)(imm)&0xF0) ?  68 : (char)(imm) + ((char)(imm)>0xB ?  52 : 4),  \
+      ((char)(imm)&0xF0) ?  69 : (char)(imm) + ((char)(imm)>0xA ?  53 : 5),  \
+      ((char)(imm)&0xF0) ?  70 : (char)(imm) + ((char)(imm)>0x9 ?  54 : 6),  \
+      ((char)(imm)&0xF0) ?  71 : (char)(imm) + ((char)(imm)>0x8 ?  55 : 7),  \
+      ((char)(imm)&0xF0) ?  72 : (char)(imm) + ((char)(imm)>0x7 ?  56 : 8),  \
+      ((char)(imm)&0xF0) ?  73 : (char)(imm) + ((char)(imm)>0x6 ?  57 : 9),  \
+      ((char)(imm)&0xF0) ?  74 : (char)(imm) + ((char)(imm)>0x5 ?  58 : 10), \
+      ((char)(imm)&0xF0) ?  75 : (char)(imm) + ((char)(imm)>0x4 ?  59 : 11), \
+      ((char)(imm)&0xF0) ?  76 : (char)(imm) + ((char)(imm)>0x3 ?  60 : 12), \
+      ((char)(imm)&0xF0) ?  77 : (char)(imm) + ((char)(imm)>0x2 ?  61 : 13), \
+      ((char)(imm)&0xF0) ?  78 : (char)(imm) + ((char)(imm)>0x1 ?  62 : 14), \
+      ((char)(imm)&0xF0) ?  79 : (char)(imm) + ((char)(imm)>0x0 ?  63 : 15), \
+      ((char)(imm)&0xF0) ?  80 : (char)(imm) + ((char)(imm)>0xF ?  64 : 16), \
+      ((char)(imm)&0xF0) ?  81 : (char)(imm) + ((char)(imm)>0xE ?  65 : 17), \
+      ((char)(imm)&0xF0) ?  82 : (char)(imm) + ((char)(imm)>0xD ?  66 : 18), \
+      ((char)(imm)&0xF0) ?  83 : (char)(imm) + ((char)(imm)>0xC ?  67 : 19), \
+      ((char)(imm)&0xF0) ?  84 : (char)(imm) + ((char)(imm)>0xB ?  68 : 20), \
+      ((char)(imm)&0xF0) ?  85 : (char)(imm) + ((char)(imm)>0xA ?  69 : 21), \
+      ((char)(imm)&0xF0) ?  86 : (char)(imm) + ((char)(imm)>0x9 ?  70 : 22), \
+      ((char)(imm)&0xF0) ?  87 : (char)(imm) + ((char)(imm)>0x8 ?  71 : 23), \
+      ((char)(imm)&0xF0) ?  88 : (char)(imm) + ((char)(imm)>0x7 ?  72 : 24), \
+      ((char)(imm)&0xF0) ?  89 : (char)(imm) + ((char)(imm)>0x6 ?  73 : 25), \
+      ((char)(imm)&0xF0) ?  90 : (char)(imm) + ((char)(imm)>0x5 ?  74 : 26), \
+      ((char)(imm)&0xF0) ?  91 : (char)(imm) + ((char)(imm)>0x4 ?  75 : 27), \
+      ((char)(imm)&0xF0) ?  92 : (char)(imm) + ((char)(imm)>0x3 ?  76 : 28), \
+      ((char)(imm)&0xF0) ?  93 : (char)(imm) + ((char)(imm)>0x2 ?  77 : 29), \
+      ((char)(imm)&0xF0) ?  94 : (char)(imm) + ((char)(imm)>0x1 ?  78 : 30), \
+      ((char)(imm)&0xF0) ?  95 : (char)(imm) + ((char)(imm)>0x0 ?  79 : 31), \
+      ((char)(imm)&0xF0) ?  96 : (char)(imm) + ((char)(imm)>0xF ?  80 : 32), \
+      ((char)(imm)&0xF0) ?  97 : (char)(imm) + ((char)(imm)>0xE ?  81 : 33), \
+      ((char)(imm)&0xF0) ?  98 : (char)(imm) + ((char)(imm)>0xD ?  82 : 34), \
+      ((char)(imm)&0xF0) ?  99 : (char)(imm) + ((char)(imm)>0xC ?  83 : 35), \
+      ((char)(imm)&0xF0) ? 100 : (char)(imm) + ((char)(imm)>0xB ?  84 : 36), \
+      ((char)(imm)&0xF0) ? 101 : (char)(imm) + ((char)(imm)>0xA ?  85 : 37), \
+      ((char)(imm)&0xF0) ? 102 : (char)(imm) + ((char)(imm)>0x9 ?  86 : 38), \
+      ((char)(imm)&0xF0) ? 103 : (char)(imm) + ((char)(imm)>0x8 ?  87 : 39), \
+      ((char)(imm)&0xF0) ? 104 : (char)(imm) + ((char)(imm)>0x7 ?  88 : 40), \
+      ((char)(imm)&0xF0) ? 105 : (char)(imm) + ((char)(imm)>0x6 ?  89 : 41), \
+      ((char)(imm)&0xF0) ? 106 : (char)(imm) + ((char)(imm)>0x5 ?  90 : 42), \
+      ((char)(imm)&0xF0) ? 107 : (char)(imm) + ((char)(imm)>0x4 ?  91 : 43), \
+      ((char)(imm)&0xF0) ? 108 : (char)(imm) + ((char)(imm)>0x3 ?  92 : 44), \
+      ((char)(imm)&0xF0) ? 109 : (char)(imm) + ((char)(imm)>0x2 ?  93 : 45), \
+      ((char)(imm)&0xF0) ? 110 : (char)(imm) + ((char)(imm)>0x1 ?  94 : 46), \
+      ((char)(imm)&0xF0) ? 111 : (char)(imm) + ((char)(imm)>0x0 ?  95 : 47), \
+      ((char)(imm)&0xF0) ? 112 : (char)(imm) + ((char)(imm)>0xF ?  96 : 48), \
+      ((char)(imm)&0xF0) ? 113 : (char)(imm) + ((char)(imm)>0xE ?  97 : 49), \
+      ((char)(imm)&0xF0) ? 114 : (char)(imm) + ((char)(imm)>0xD ?  98 : 50), \
+      ((char)(imm)&0xF0) ? 115 : (char)(imm) + ((char)(imm)>0xC ?  99 : 51), \
+      ((char)(imm)&0xF0) ? 116 : (char)(imm) + ((char)(imm)>0xB ? 100 : 52), \
+      ((char)(imm)&0xF0) ? 117 : (char)(imm) + ((char)(imm)>0xA ? 101 : 53), \
+      ((char)(imm)&0xF0) ? 118 : (char)(imm) + ((char)(imm)>0x9 ? 102 : 54), \
+      ((char)(imm)&0xF0) ? 119 : (char)(imm) + ((char)(imm)>0x8 ? 103 : 55), \
+      ((char)(imm)&0xF0) ? 120 : (char)(imm) + ((char)(imm)>0x7 ? 104 : 56), \
+      ((char)(imm)&0xF0) ? 121 : (char)(imm) + ((char)(imm)>0x6 ? 105 : 57), \
+      ((char)(imm)&0xF0) ? 122 : (char)(imm) + ((char)(imm)>0x5 ? 106 : 58), \
+      ((char)(imm)&0xF0) ? 123 : (char)(imm) + ((char)(imm)>0x4 ? 107 : 59), \
+      ((char)(imm)&0xF0) ? 124 : (char)(imm) + ((char)(imm)>0x3 ? 108 : 60), \
+      ((char)(imm)&0xF0) ? 125 : (char)(imm) + ((char)(imm)>0x2 ? 109 : 61), \
+      ((char)(imm)&0xF0) ? 126 : (char)(imm) + ((char)(imm)>0x1 ? 110 : 62), \
+      ((char)(imm)&0xF0) ? 127 : (char)(imm) + ((char)(imm)>0x0 ? 111 : 63)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
+                (__v32hi) __A,
+                (__v32hi) __W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mov_epi16 (__mmask32 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
+                (__v32hi) __A,
+                (__v32hi) _mm512_setzero_hi ());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mov_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
+                (__v64qi) __A,
+                (__v64qi) __W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
+                (__v64qi) __A,
+                (__v64qi) _mm512_setzero_hi ());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_set1_epi8 (__m512i __O, __mmask64 __M, char __A)
+{
+  return (__m512i) __builtin_ia32_pbroadcastb512_gpr_mask (__A,
+                 (__v64qi) __O,
+                 __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_set1_epi8 (__mmask64 __M, char __A)
+{
+  return (__m512i) __builtin_ia32_pbroadcastb512_gpr_mask (__A,
+                 (__v64qi)
+                 _mm512_setzero_qi(),
+                 __M);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_kunpackd (__mmask64 __A, __mmask64 __B)
+{
+  return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A,
+                (__mmask64) __B);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_kunpackw (__mmask32 __A, __mmask32 __B)
+{
+  return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A,
+                (__mmask32) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_loadu_epi16 (__m512i __W, __mmask32 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_loaddquhi512_mask ((__v32hi *) __P,
+                 (__v32hi) __W,
+                 (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_epi16 (__mmask32 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_loaddquhi512_mask ((__v32hi *) __P,
+                 (__v32hi)
+                 _mm512_setzero_hi (),
+                 (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_loadu_epi8 (__m512i __W, __mmask64 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_loaddquqi512_mask ((__v64qi *) __P,
+                 (__v64qi) __W,
+                 (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_epi8 (__mmask64 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_loaddquqi512_mask ((__v64qi *) __P,
+                 (__v64qi)
+                 _mm512_setzero_hi (),
+                 (__mmask64) __U);
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_storeu_epi16 (void *__P, __mmask32 __U, __m512i __A)
+{
+  __builtin_ia32_storedquhi512_mask ((__v32hi *) __P,
+             (__v32hi) __A,
+             (__mmask32) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_storeu_epi8 (void *__P, __mmask64 __U, __m512i __A)
+{
+  __builtin_ia32_storedquqi512_mask ((__v64qi *) __P,
+             (__v64qi) __A,
+             (__mmask64) __U);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_test_epi8_mask (__m512i __A, __m512i __B)
+{
+  return (__mmask64) __builtin_ia32_ptestmb512 ((__v64qi) __A,
+            (__v64qi) __B,
+            (__mmask64) -1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_test_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
+{
+  return (__mmask64) __builtin_ia32_ptestmb512 ((__v64qi) __A,
+            (__v64qi) __B, __U);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_test_epi16_mask (__m512i __A, __m512i __B)
+{
+  return (__mmask32) __builtin_ia32_ptestmw512 ((__v32hi) __A,
+            (__v32hi) __B,
+            (__mmask32) -1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_test_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__mmask32) __builtin_ia32_ptestmw512 ((__v32hi) __A,
+            (__v32hi) __B, __U);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_testn_epi8_mask (__m512i __A, __m512i __B)
+{
+  return (__mmask64) __builtin_ia32_ptestnmb512 ((__v64qi) __A,
+             (__v64qi) __B,
+             (__mmask64) -1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_testn_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
+{
+  return (__mmask64) __builtin_ia32_ptestnmb512 ((__v64qi) __A,
+             (__v64qi) __B, __U);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_testn_epi16_mask (__m512i __A, __m512i __B)
+{
+  return (__mmask32) __builtin_ia32_ptestnmw512 ((__v32hi) __A,
+             (__v32hi) __B,
+             (__mmask32) -1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_testn_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__mmask32) __builtin_ia32_ptestnmw512 ((__v32hi) __A,
+             (__v32hi) __B, __U);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_movepi8_mask (__m512i __A)
+{
+  return (__mmask64) __builtin_ia32_cvtb2mask512 ((__v64qi) __A);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_movepi16_mask (__m512i __A)
+{
+  return (__mmask32) __builtin_ia32_cvtw2mask512 ((__v32hi) __A);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_movm_epi8 (__mmask64 __A)
+{
+  return (__m512i) __builtin_ia32_cvtmask2b512 (__A);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_movm_epi16 (__mmask32 __A)
+{
+  return (__m512i) __builtin_ia32_cvtmask2w512 (__A);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcastb_epi8 (__m128i __A)
+{
+  return (__m512i)__builtin_shufflevector((__v16qi) __A,
+                                          (__v16qi)_mm_undefined_si128(),
+                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcastb_epi8 (__m512i __O, __mmask64 __M, __m128i __A)
+{
+  return (__m512i)__builtin_ia32_selectb_512(__M,
+                                             (__v64qi) _mm512_broadcastb_epi8(__A),
+                                             (__v64qi) __O);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcastb_epi8 (__mmask64 __M, __m128i __A)
+{
+  return (__m512i)__builtin_ia32_selectb_512(__M,
+                                             (__v64qi) _mm512_broadcastb_epi8(__A),
+                                             (__v64qi) _mm512_setzero_si512());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_set1_epi16 (__m512i __O, __mmask32 __M, short __A)
+{
+  return (__m512i) __builtin_ia32_pbroadcastw512_gpr_mask (__A,
+                 (__v32hi) __O,
+                 __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_set1_epi16 (__mmask32 __M, short __A)
+{
+  return (__m512i) __builtin_ia32_pbroadcastw512_gpr_mask (__A,
+                 (__v32hi) _mm512_setzero_hi(),
+                 __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcastw_epi16 (__m128i __A)
+{
+  return (__m512i)__builtin_shufflevector((__v8hi) __A,
+                                          (__v8hi)_mm_undefined_si128(),
+                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcastw_epi16 (__m512i __O, __mmask32 __M, __m128i __A)
+{
+  return (__m512i)__builtin_ia32_selectw_512(__M,
+                                             (__v32hi) _mm512_broadcastw_epi16(__A),
+                                             (__v32hi) __O);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcastw_epi16 (__mmask32 __M, __m128i __A)
+{
+  return (__m512i)__builtin_ia32_selectw_512(__M,
+                                             (__v32hi) _mm512_broadcastw_epi16(__A),
+                                             (__v32hi) _mm512_setzero_si512());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_permutexvar_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B,
+                 (__v32hi) __A,
+                 (__v32hi) _mm512_undefined_epi32 (),
+                 (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutexvar_epi16 (__mmask32 __M, __m512i __A,
+        __m512i __B)
+{
+  return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B,
+                 (__v32hi) __A,
+                 (__v32hi) _mm512_setzero_hi(),
+                 (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutexvar_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
+             __m512i __B)
+{
+  return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B,
+                 (__v32hi) __A,
+                 (__v32hi) __W,
+                 (__mmask32) __M);
+}
+
+#define _mm512_alignr_epi8(A, B, N) __extension__ ({\
+  (__m512i)__builtin_ia32_palignr512_mask((__v64qi)(__m512i)(A), \
+                                          (__v64qi)(__m512i)(B), (int)(N), \
+                                          (__v64qi)_mm512_undefined_pd(), \
+                                          (__mmask64)-1); })
+
+#define _mm512_mask_alignr_epi8(W, U, A, B, N) __extension__({\
+  (__m512i)__builtin_ia32_palignr512_mask((__v64qi)(__m512i)(A), \
+                                          (__v64qi)(__m512i)(B), (int)(N), \
+                                          (__v64qi)(__m512i)(W), \
+                                          (__mmask64)(U)); })
+
+#define _mm512_maskz_alignr_epi8(U, A, B, N) __extension__({\
+  (__m512i)__builtin_ia32_palignr512_mask((__v64qi)(__m512i)(A), \
+                                          (__v64qi)(__m512i)(B), (int)(N), \
+                                          (__v64qi)_mm512_setzero_si512(), \
+                                          (__mmask64)(U)); })
+
+#define _mm512_dbsad_epu8(A, B, imm) __extension__ ({\
+  (__m512i)__builtin_ia32_dbpsadbw512_mask((__v64qi)(__m512i)(A), \
+                                           (__v64qi)(__m512i)(B), (int)(imm), \
+                                           (__v32hi)_mm512_undefined_epi32(), \
+                                           (__mmask32)-1); })
+
+#define _mm512_mask_dbsad_epu8(W, U, A, B, imm) ({\
+  (__m512i)__builtin_ia32_dbpsadbw512_mask((__v64qi)(__m512i)(A), \
+                                           (__v64qi)(__m512i)(B), (int)(imm), \
+                                           (__v32hi)(__m512i)(W), \
+                                           (__mmask32)(U)); })
+
+#define _mm512_maskz_dbsad_epu8(U, A, B, imm) ({\
+  (__m512i)__builtin_ia32_dbpsadbw512_mask((__v64qi)(__m512i)(A), \
+                                           (__v64qi)(__m512i)(B), (int)(imm), \
+                                           (__v32hi)_mm512_setzero_hi(), \
+                                           (__mmask32)(U)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sad_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psadbw512 ((__v64qi) __A,
+               (__v64qi) __B);
+}
+
 
 
 #undef __DEFAULT_FN_ATTRS
diff --git a/renderscript/clang-include/avx512cdintrin.h b/renderscript/clang-include/avx512cdintrin.h
index 3894b29..23c4235 100644
--- a/renderscript/clang-include/avx512cdintrin.h
+++ b/renderscript/clang-include/avx512cdintrin.h
@@ -126,6 +126,19 @@
              (__v8di) _mm512_setzero_si512 (),
              (__mmask8) __U);
 }
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcastmb_epi64 (__mmask8 __A)
+{
+  return (__m512i) __builtin_ia32_broadcastmb512 (__A);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcastmw_epi32 (__mmask16 __A)
+{
+  return (__m512i) __builtin_ia32_broadcastmw512 (__A);
+}
+
 #undef __DEFAULT_FN_ATTRS
 
 #endif
diff --git a/renderscript/clang-include/avx512dqintrin.h b/renderscript/clang-include/avx512dqintrin.h
index afee490..13665e4 100644
--- a/renderscript/clang-include/avx512dqintrin.h
+++ b/renderscript/clang-include/avx512dqintrin.h
@@ -33,7 +33,7 @@
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_mullo_epi64 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v8di) __A * (__v8di) __B);
+  return (__m512i) ((__v8du) __A * (__v8du) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -55,7 +55,7 @@
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
 _mm512_xor_pd (__m512d __A, __m512d __B) {
-  return (__m512d) ((__v8di) __A ^ (__v8di) __B);
+  return (__m512d) ((__v8du) __A ^ (__v8du) __B);
 }
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
@@ -77,7 +77,7 @@
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS
 _mm512_xor_ps (__m512 __A, __m512 __B) {
-  return (__m512) ((__v16si) __A ^ (__v16si) __B);
+  return (__m512) ((__v16su) __A ^ (__v16su) __B);
 }
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS
@@ -99,7 +99,7 @@
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
 _mm512_or_pd (__m512d __A, __m512d __B) {
-  return (__m512d) ((__v8di) __A | (__v8di) __B);
+  return (__m512d) ((__v8du) __A | (__v8du) __B);
 }
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
@@ -121,7 +121,7 @@
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS
 _mm512_or_ps (__m512 __A, __m512 __B) {
-  return (__m512) ((__v16si) __A | (__v16si) __B);
+  return (__m512) ((__v16su) __A | (__v16su) __B);
 }
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS
@@ -143,7 +143,7 @@
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
 _mm512_and_pd (__m512d __A, __m512d __B) {
-  return (__m512d) ((__v8di) __A & (__v8di) __B);
+  return (__m512d) ((__v8du) __A & (__v8du) __B);
 }
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
@@ -165,7 +165,7 @@
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS
 _mm512_and_ps (__m512 __A, __m512 __B) {
-  return (__m512) ((__v16si) __A & (__v16si) __B);
+  return (__m512) ((__v16su) __A & (__v16su) __B);
 }
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS
@@ -261,17 +261,20 @@
                 _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_cvt_roundpd_epi64(__A, __R) __extension__ ({              \
-  (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,               \
-                (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+#define _mm512_cvt_roundpd_epi64(A, R) __extension__ ({              \
+  (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
+                                           (__v8di)_mm512_setzero_si512(), \
+                                           (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_cvt_roundpd_epi64(__W, __U, __A, __R) __extension__ ({ \
-  (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,                 \
-                (__v8di) __W, (__mmask8) __U, __R);})
+#define _mm512_mask_cvt_roundpd_epi64(W, U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
+                                           (__v8di)(__m512i)(W), \
+                                           (__mmask8)(U), (int)(R)); })
 
-#define _mm512_maskz_cvt_roundpd_epi64(__U, __A, __R) __extension__ ({   \
-  (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,        \
-                (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R); })
+#define _mm512_maskz_cvt_roundpd_epi64(U, A, R) __extension__ ({   \
+  (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
+                                           (__v8di)_mm512_setzero_si512(), \
+                                           (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_cvtpd_epu64 (__m512d __A) {
@@ -297,17 +300,20 @@
                  _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_cvt_roundpd_epu64(__A, __R) __extension__ ({               \
-  (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,               \
-                 (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+#define _mm512_cvt_roundpd_epu64(A, R) __extension__ ({               \
+  (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8di)_mm512_setzero_si512(), \
+                                            (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_cvt_roundpd_epu64(__W, __U, __A, __R) __extension__ ({ \
-  (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,                \
-                 (__v8di) __W, (__mmask8) __U, __R);})
+#define _mm512_mask_cvt_roundpd_epu64(W, U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8di)(__m512i)(W), \
+                                            (__mmask8)(U), (int)(R)); })
 
-#define _mm512_maskz_cvt_roundpd_epu64(__U, __A, __R) __extension__ ({     \
-  (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,                \
-                 (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+#define _mm512_maskz_cvt_roundpd_epu64(U, A, R) __extension__ ({     \
+  (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8di)_mm512_setzero_si512(), \
+                                            (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_cvtps_epi64 (__m256 __A) {
@@ -333,17 +339,20 @@
                 _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_cvt_roundps_epi64(__A, __R) __extension__ ({             \
-  (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,              \
-                (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+#define _mm512_cvt_roundps_epi64(A, R) __extension__ ({             \
+  (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
+                                           (__v8di)_mm512_setzero_si512(), \
+                                           (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_cvt_roundps_epi64(__W, __U, __A, __R) __extension__ ({ \
-  (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,                 \
-                (__v8di) __W, (__mmask8) __U, __R);})
+#define _mm512_mask_cvt_roundps_epi64(W, U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
+                                           (__v8di)(__m512i)(W), \
+                                           (__mmask8)(U), (int)(R)); })
 
-#define _mm512_maskz_cvt_roundps_epi64(__U, __A, __R) __extension__ ({   \
-  (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,               \
-                (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+#define _mm512_maskz_cvt_roundps_epi64(U, A, R) __extension__ ({   \
+  (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
+                                           (__v8di)_mm512_setzero_si512(), \
+                                           (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_cvtps_epu64 (__m256 __A) {
@@ -369,17 +378,20 @@
                  _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_cvt_roundps_epu64(__A, __R) __extension__ ({              \
-  (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,              \
-                 (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+#define _mm512_cvt_roundps_epu64(A, R) __extension__ ({              \
+  (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
+                                            (__v8di)_mm512_setzero_si512(), \
+                                            (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_cvt_roundps_epu64(__W, __U, __A, __R) __extension__ ({ \
-  (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,                \
-                 (__v8di) __W, (__mmask8) __U, __R);})
+#define _mm512_mask_cvt_roundps_epu64(W, U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
+                                            (__v8di)(__m512i)(W), \
+                                            (__mmask8)(U), (int)(R)); })
 
-#define _mm512_maskz_cvt_roundps_epu64(__U, __A, __R) __extension__ ({   \
-  (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,              \
-                 (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+#define _mm512_maskz_cvt_roundps_epu64(U, A, R) __extension__ ({   \
+  (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
+                                            (__v8di)_mm512_setzero_si512(), \
+                                            (__mmask8)(U), (int)(R)); })
 
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
@@ -406,17 +418,20 @@
                 _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_cvt_roundepi64_pd(__A, __R) __extension__ ({          \
-  (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,           \
-                (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R);})
+#define _mm512_cvt_roundepi64_pd(A, R) __extension__ ({          \
+  (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
+                                           (__v8df)_mm512_setzero_pd(), \
+                                           (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_cvt_roundepi64_pd(__W, __U, __A, __R) __extension__ ({ \
-  (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,                 \
-                (__v8df) __W, (__mmask8) __U, __R);})
+#define _mm512_mask_cvt_roundepi64_pd(W, U, A, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
+                                           (__v8df)(__m512d)(W), \
+                                           (__mmask8)(U), (int)(R)); })
 
-#define _mm512_maskz_cvt_roundepi64_pd(__U, __A, __R) __extension__ ({ \
-  (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,             \
-                (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+#define _mm512_maskz_cvt_roundepi64_pd(U, A, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
+                                           (__v8df)_mm512_setzero_pd(), \
+                                           (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
 _mm512_cvtepi64_ps (__m512i __A) {
@@ -442,17 +457,20 @@
                _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_cvt_roundepi64_ps(__A, __R) __extension__ ({        \
-  (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,          \
-               (__v8sf) _mm256_setzero_ps(), (__mmask8) -1, __R);})
+#define _mm512_cvt_roundepi64_ps(A, R) __extension__ ({        \
+  (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
+                                          (__v8sf)_mm256_setzero_ps(), \
+                                          (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_cvt_roundepi64_ps(__W, __U, __A, __R) __extension__ ({ \
-  (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,                  \
-               (__v8sf) __W, (__mmask8) __U, __R);})
+#define _mm512_mask_cvt_roundepi64_ps(W, U, A, R) __extension__ ({ \
+  (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
+                                          (__v8sf)(__m256)(W), (__mmask8)(U), \
+                                          (int)(R)); })
 
-#define _mm512_maskz_cvt_roundepi64_ps(__U, __A, __R) __extension__ ({ \
-  (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,              \
-               (__v8sf) _mm256_setzero_ps(), (__mmask8) __U, __R);})
+#define _mm512_maskz_cvt_roundepi64_ps(U, A, R) __extension__ ({ \
+  (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
+                                          (__v8sf)_mm256_setzero_ps(), \
+                                          (__mmask8)(U), (int)(R)); })
 
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -479,17 +497,20 @@
                  _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_cvtt_roundpd_epi64(__A, __R) __extension__ ({             \
-  (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,              \
-                 (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+#define _mm512_cvtt_roundpd_epi64(A, R) __extension__ ({             \
+  (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8di)_mm512_setzero_si512(), \
+                                            (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_cvtt_roundpd_epi64(__W, __U, __A, __R) __extension__ ({ \
-  (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,                 \
-                 (__v8di) __W, (__mmask8) __U, __R);})
+#define _mm512_mask_cvtt_roundpd_epi64(W, U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8di)(__m512i)(W), \
+                                            (__mmask8)(U), (int)(R)); })
 
-#define _mm512_maskz_cvtt_roundpd_epi64(__U, __A, __R) __extension__ ({ \
-  (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,             \
-                 (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+#define _mm512_maskz_cvtt_roundpd_epi64(U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8di)_mm512_setzero_si512(), \
+                                            (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_cvttpd_epu64 (__m512d __A) {
@@ -515,17 +536,20 @@
                   _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_cvtt_roundpd_epu64(__A, __R) __extension__ ({              \
-  (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,              \
-                  (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+#define _mm512_cvtt_roundpd_epu64(A, R) __extension__ ({              \
+  (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8di)_mm512_setzero_si512(), \
+                                             (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_cvtt_roundpd_epu64(__W, __U, __A, __R) __extension__ ({ \
-  (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,                \
-                  (__v8di) __W, (__mmask8) __U, __R);})
+#define _mm512_mask_cvtt_roundpd_epu64(W, U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8di)(__m512i)(W), \
+                                             (__mmask8)(U), (int)(R)); })
 
-#define _mm512_maskz_cvtt_roundpd_epu64(__U, __A, __R) __extension__ ({   \
-  (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,              \
-                  (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+#define _mm512_maskz_cvtt_roundpd_epu64(U, A, R) __extension__ ({   \
+  (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8di)_mm512_setzero_si512(), \
+                                             (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_cvttps_epi64 (__m256 __A) {
@@ -551,17 +575,20 @@
                  _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_cvtt_roundps_epi64(__A, __R) __extension__ ({            \
-  (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,             \
-                 (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+#define _mm512_cvtt_roundps_epi64(A, R) __extension__ ({            \
+  (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
+                                            (__v8di)_mm512_setzero_si512(), \
+                                            (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_cvtt_roundps_epi64(__W, __U, __A, __R) __extension__ ({ \
-  (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,                 \
-                 (__v8di) __W, (__mmask8) __U, __R);})
+#define _mm512_mask_cvtt_roundps_epi64(W, U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
+                                            (__v8di)(__m512i)(W), \
+                                            (__mmask8)(U), (int)(R)); })
 
-#define _mm512_maskz_cvtt_roundps_epi64(__U, __A, __R) __extension__ ({  \
-  (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,              \
-                 (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+#define _mm512_maskz_cvtt_roundps_epi64(U, A, R) __extension__ ({  \
+  (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
+                                            (__v8di)_mm512_setzero_si512(), \
+                                            (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_cvttps_epu64 (__m256 __A) {
@@ -587,17 +614,20 @@
                   _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_cvtt_roundps_epu64(__A, __R) __extension__ ({            \
-  (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,            \
-                  (__v8di) _mm512_setzero_si512(),(__mmask8) -1, __R);})
+#define _mm512_cvtt_roundps_epu64(A, R) __extension__ ({            \
+  (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
+                                             (__v8di)_mm512_setzero_si512(), \
+                                             (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_cvtt_roundps_epu64(__W, __U, __A, __R) __extension__ ({ \
-  (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,                \
-                  (__v8di) __W, (__mmask8) __U, __R);})
+#define _mm512_mask_cvtt_roundps_epu64(W, U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
+                                             (__v8di)(__m512i)(W), \
+                                             (__mmask8)(U), (int)(R)); })
 
-#define _mm512_maskz_cvtt_roundps_epu64(__U, __A, __R) __extension__ ({  \
-  (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,             \
-                  (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+#define _mm512_maskz_cvtt_roundps_epu64(U, A, R) __extension__ ({  \
+  (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
+                                             (__v8di)_mm512_setzero_si512(), \
+                                             (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
 _mm512_cvtepu64_pd (__m512i __A) {
@@ -623,18 +653,21 @@
                  _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_cvt_roundepu64_pd(__A, __R) __extension__ ({          \
-  (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,          \
-                 (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R);})
+#define _mm512_cvt_roundepu64_pd(A, R) __extension__ ({          \
+  (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
+                                            (__v8df)_mm512_setzero_pd(), \
+                                            (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_cvt_roundepu64_pd(__W, __U, __A, __R) __extension__ ({ \
-  (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,                \
-                 (__v8df) __W, (__mmask8) __U, __R);})
+#define _mm512_mask_cvt_roundepu64_pd(W, U, A, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
+                                            (__v8df)(__m512d)(W), \
+                                            (__mmask8)(U), (int)(R)); })
 
 
-#define _mm512_maskz_cvt_roundepu64_pd(__U, __A, __R) __extension__ ({ \
-  (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,            \
-                 (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+#define _mm512_maskz_cvt_roundepu64_pd(U, A, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
+                                            (__v8df)_mm512_setzero_pd(), \
+                                            (__mmask8)(U), (int)(R)); })
 
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
@@ -661,117 +694,637 @@
                 _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_cvt_roundepu64_ps(__A, __R) __extension__ ({         \
-  (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,          \
-                (__v8sf) _mm256_setzero_ps(), (__mmask8) -1, __R);})
+#define _mm512_cvt_roundepu64_ps(A, R) __extension__ ({         \
+  (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
+                                           (__v8sf)_mm256_setzero_ps(), \
+                                           (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_cvt_roundepu64_ps(__W, __U, __A, __R) __extension__ ({ \
-  (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,                 \
-                (__v8sf) __W, (__mmask8) __U, __R);})
+#define _mm512_mask_cvt_roundepu64_ps(W, U, A, R) __extension__ ({ \
+  (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
+                                           (__v8sf)(__m256)(W), (__mmask8)(U), \
+                                           (int)(R)); })
 
-#define _mm512_maskz_cvt_roundepu64_ps(__U, __A, __R) __extension__ ({ \
-  (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,             \
-                (__v8sf) _mm256_setzero_ps(), (__mmask8) __U, __R);})
+#define _mm512_maskz_cvt_roundepu64_ps(U, A, R) __extension__ ({ \
+  (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
+                                           (__v8sf)_mm256_setzero_ps(), \
+                                           (__mmask8)(U), (int)(R)); })
 
-#define _mm512_range_pd(__A, __B, __C) __extension__ ({                     \
-  (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C,\
-               (__v8df) _mm512_setzero_pd(), (__mmask8) -1,                 \
-               _MM_FROUND_CUR_DIRECTION);})
+#define _mm512_range_pd(A, B, C) __extension__ ({                     \
+  (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+                                          (__v8df)(__m512d)(B), (int)(C), \
+                                          (__v8df)_mm512_setzero_pd(), \
+                                          (__mmask8)-1, \
+                                          _MM_FROUND_CUR_DIRECTION); })
 
-#define _mm512_mask_range_pd(__W, __U, __A, __B, __C) __extension__ ({      \
-  (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C,\
-               (__v8df) __W, (__mmask8) __U, _MM_FROUND_CUR_DIRECTION);})
+#define _mm512_mask_range_pd(W, U, A, B, C) __extension__ ({      \
+  (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+                                          (__v8df)(__m512d)(B), (int)(C), \
+                                          (__v8df)(__m512d)(W), (__mmask8)(U), \
+                                          _MM_FROUND_CUR_DIRECTION); })
 
-#define _mm512_maskz_range_pd(__U, __A, __B, __C) __extension__ ({           \
-  (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C, \
-               (__v8df) _mm512_setzero_pd(), (__mmask8) __U,                 \
-               _MM_FROUND_CUR_DIRECTION);})
+#define _mm512_maskz_range_pd(U, A, B, C) __extension__ ({           \
+  (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+                                          (__v8df)(__m512d)(B), (int)(C), \
+                                          (__v8df)_mm512_setzero_pd(), \
+                                          (__mmask8)(U), \
+                                          _MM_FROUND_CUR_DIRECTION); })
 
-#define _mm512_range_round_pd(__A, __B, __C, __R) __extension__ ({           \
-  (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C, \
-               (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R);})
+#define _mm512_range_round_pd(A, B, C, R) __extension__ ({           \
+  (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+                                          (__v8df)(__m512d)(B), (int)(C), \
+                                          (__v8df)_mm512_setzero_pd(), \
+                                          (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_range_round_pd(__W, __U, __A, __B, __C, __R) __extension__ ({ \
-  (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C,      \
-               (__v8df) __W, (__mmask8) __U, __R);})
+#define _mm512_mask_range_round_pd(W, U, A, B, C, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+                                          (__v8df)(__m512d)(B), (int)(C), \
+                                          (__v8df)(__m512d)(W), (__mmask8)(U), \
+                                          (int)(R)); })
 
-#define _mm512_maskz_range_round_pd(__U, __A, __B, __C, __R) __extension__ ({ \
-  (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C,   \
-               (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+#define _mm512_maskz_range_round_pd(U, A, B, C, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+                                          (__v8df)(__m512d)(B), (int)(C), \
+                                          (__v8df)_mm512_setzero_pd(), \
+                                          (__mmask8)(U), (int)(R)); })
 
-#define _mm512_range_ps(__A, __B, __C) __extension__ ({                       \
-  (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B, __C, \
-               (__v16sf) _mm512_setzero_ps(), (__mmask16) -1,                 \
-               _MM_FROUND_CUR_DIRECTION);})
+#define _mm512_range_ps(A, B, C) __extension__ ({                       \
+  (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+                                         (__v16sf)(__m512)(B), (int)(C), \
+                                         (__v16sf)_mm512_setzero_ps(), \
+                                         (__mmask16)-1, \
+                                         _MM_FROUND_CUR_DIRECTION); })
 
-#define _mm512_mask_range_ps(__W, __U, __A, __B, __C) __extension__ ({         \
-  (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B,       \
-               __C, (__v16sf) __W, (__mmask16) __U, _MM_FROUND_CUR_DIRECTION);})
+#define _mm512_mask_range_ps(W, U, A, B, C) __extension__ ({         \
+  (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+                                         (__v16sf)(__m512)(B), (int)(C), \
+                                         (__v16sf)(__m512)(W), (__mmask16)(U), \
+                                         _MM_FROUND_CUR_DIRECTION); })
 
-#define _mm512_maskz_range_ps(__U, __A, __B, __C) __extension__ ({      \
-  (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A,(__v16sf) __B, \
-              __C, (__v16sf) _mm512_setzero_ps(), (__mmask16) __U,      \
-              _MM_FROUND_CUR_DIRECTION);})
+#define _mm512_maskz_range_ps(U, A, B, C) __extension__ ({      \
+  (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+                                         (__v16sf)(__m512)(B), (int)(C), \
+                                         (__v16sf)_mm512_setzero_ps(), \
+                                         (__mmask16)(U), \
+                                         _MM_FROUND_CUR_DIRECTION); })
 
-#define _mm512_range_round_ps(__A, __B, __C, __R) __extension__ ({         \
-  (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B,   \
-                __C, (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, __R);})
+#define _mm512_range_round_ps(A, B, C, R) __extension__ ({         \
+  (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+                                         (__v16sf)(__m512)(B), (int)(C), \
+                                         (__v16sf)_mm512_setzero_ps(), \
+                                         (__mmask16)-1, (int)(R)); })
 
-#define _mm512_mask_range_round_ps(__W, __U, __A, __B, __C, __R) __extension__ ({ \
-  (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B,          \
-                __C, (__v16sf) __W, (__mmask16) __U, __R);})
+#define _mm512_mask_range_round_ps(W, U, A, B, C, R) __extension__ ({ \
+  (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+                                         (__v16sf)(__m512)(B), (int)(C), \
+                                         (__v16sf)(__m512)(W), (__mmask16)(U), \
+                                         (int)(R)); })
 
-#define _mm512_maskz_range_round_ps(__U, __A, __B, __C, __R) __extension__ ({ \
-  (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B,      \
-                __C, (__v16sf) _mm512_setzero_ps(), (__mmask16) __U, __R);})
+#define _mm512_maskz_range_round_ps(U, A, B, C, R) __extension__ ({ \
+  (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+                                         (__v16sf)(__m512)(B), (int)(C), \
+                                         (__v16sf)_mm512_setzero_ps(), \
+                                         (__mmask16)(U), (int)(R)); })
 
-#define _mm512_reduce_pd(__A, __B) __extension__ ({             \
-  (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
-                (__v8df) _mm512_setzero_pd(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);})
+#define _mm_range_round_ss(A, B, C, R) __extension__ ({           \
+  (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (__v4sf)_mm_setzero_ps(), \
+                                               (__mmask8) -1, (int)(C),\
+                                               (int)(R)); })
 
-#define _mm512_mask_reduce_pd(__W, __U, __A, __B) __extension__ ({ \
-  (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B,    \
-                (__v8df) __W,(__mmask8) __U, _MM_FROUND_CUR_DIRECTION);})
+#define _mm_range_ss(A ,B , C) _mm_range_round_ss(A, B, C ,_MM_FROUND_CUR_DIRECTION)
 
-#define _mm512_maskz_reduce_pd(__U, __A, __B) __extension__ ({  \
-  (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
-                (__v8df) _mm512_setzero_pd(), (__mmask8) __U, _MM_FROUND_CUR_DIRECTION);})
+#define _mm_mask_range_round_ss(W, U, A, B, C, R) __extension__ ({ \
+  (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (__v4sf)(__m128)(W),\
+                                               (__mmask8)(U), (int)(C),\
+                                               (int)(R)); })
 
-#define _mm512_reduce_ps(__A, __B) __extension__ ({              \
-  (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B,  \
-               (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, _MM_FROUND_CUR_DIRECTION);})
+#define _mm_mask_range_ss(W , U, A, B, C) _mm_mask_range_round_ss(W, U, A, B, C , _MM_FROUND_CUR_DIRECTION)
 
-#define _mm512_mask_reduce_ps(__W, __U, __A, __B) __extension__ ({   \
-  (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B,      \
-               (__v16sf) __W, (__mmask16) __U, _MM_FROUND_CUR_DIRECTION);})
+#define _mm_maskz_range_round_ss(U, A, B, C, R) __extension__ ({ \
+  (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (__v4sf)_mm_setzero_ps(), \
+                                               (__mmask8)(U), (int)(C),\
+                                               (int)(R)); })
 
-#define _mm512_maskz_reduce_ps(__U, __A, __B) __extension__ ({       \
-  (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B,      \
-               (__v16sf) _mm512_setzero_ps(), (__mmask16) __U, _MM_FROUND_CUR_DIRECTION);})
+#define _mm_maskz_range_ss(U, A ,B , C) _mm_maskz_range_round_ss(U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
 
-#define _mm512_reduce_round_pd(__A, __B, __R) __extension__ ({\
-  (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
-                (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R);})
+#define _mm_range_round_sd(A, B, C, R) __extension__ ({           \
+  (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8) -1, (int)(C),\
+                                                (int)(R)); })
 
-#define _mm512_mask_reduce_round_pd(__W, __U, __A, __B, __R) __extension__ ({\
-  (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B,    \
-                (__v8df) __W,(__mmask8) __U, __R);})
+#define _mm_range_sd(A ,B , C) _mm_range_round_sd(A, B, C ,_MM_FROUND_CUR_DIRECTION)
 
-#define _mm512_maskz_reduce_round_pd(__U, __A, __B, __R) __extension__ ({\
-  (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
-                (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+#define _mm_mask_range_round_sd(W, U, A, B, C, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (__v2df)(__m128d)(W),\
+                                                (__mmask8)(U), (int)(C),\
+                                                (int)(R)); })
 
-#define _mm512_reduce_round_ps(__A, __B, __R) __extension__ ({\
-  (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B,  \
-               (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, __R);})
+#define _mm_mask_range_sd(W, U, A, B, C) _mm_mask_range_round_sd(W, U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
 
-#define _mm512_mask_reduce_round_ps(__W, __U, __A, __B, __R) __extension__ ({\
-  (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B,      \
-               (__v16sf) __W, (__mmask16) __U, __R);})
+#define _mm_maskz_range_round_sd(U, A, B, C, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)(U), (int)(C),\
+                                                (int)(R)); })
 
-#define _mm512_maskz_reduce_round_ps(__U, __A, __B, __R) __extension__ ({\
-  (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B,      \
-               (__v16sf) _mm512_setzero_ps(), (__mmask16) __U, __R);})
+#define _mm_maskz_range_sd(U, A, B, C) _mm_maskz_range_round_sd(U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_reduce_pd(A, B) __extension__ ({             \
+  (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+                                           (__v8df)_mm512_setzero_pd(), \
+                                           (__mmask8)-1, \
+                                           _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_reduce_pd(W, U, A, B) __extension__ ({ \
+  (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+                                           (__v8df)(__m512d)(W), \
+                                           (__mmask8)(U), \
+                                           _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_reduce_pd(U, A, B) __extension__ ({  \
+  (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+                                           (__v8df)_mm512_setzero_pd(), \
+                                           (__mmask8)(U), \
+                                           _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_reduce_ps(A, B) __extension__ ({              \
+  (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+                                          (__v16sf)_mm512_setzero_ps(), \
+                                          (__mmask16)-1, \
+                                          _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_reduce_ps(W, U, A, B) __extension__ ({   \
+  (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+                                          (__v16sf)(__m512)(W), \
+                                          (__mmask16)(U), \
+                                          _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_reduce_ps(U, A, B) __extension__ ({       \
+  (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+                                          (__v16sf)_mm512_setzero_ps(), \
+                                          (__mmask16)(U), \
+                                          _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_reduce_round_pd(A, B, R) __extension__ ({\
+  (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+                                           (__v8df)_mm512_setzero_pd(), \
+                                           (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_reduce_round_pd(W, U, A, B, R) __extension__ ({\
+  (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+                                           (__v8df)(__m512d)(W), \
+                                           (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_reduce_round_pd(U, A, B, R) __extension__ ({\
+  (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+                                           (__v8df)_mm512_setzero_pd(), \
+                                           (__mmask8)(U), (int)(R)); })
+
+#define _mm512_reduce_round_ps(A, B, R) __extension__ ({\
+  (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+                                          (__v16sf)_mm512_setzero_ps(), \
+                                          (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_reduce_round_ps(W, U, A, B, R) __extension__ ({\
+  (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+                                          (__v16sf)(__m512)(W), \
+                                          (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_reduce_round_ps(U, A, B, R) __extension__ ({\
+  (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+                                          (__v16sf)_mm512_setzero_ps(), \
+                                          (__mmask16)(U), (int)(R)); })
+
+#define _mm_reduce_ss(A, B, C) __extension__ ({              \
+  (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+                                       (__v4sf)(__m128)(B), \
+                                       (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \
+                                       (int)(C), _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_reduce_ss(W, U, A, B, C) __extension__ ({   \
+  (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+                                       (__v4sf)(__m128)(B), \
+                                       (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                       (int)(C), _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_maskz_reduce_ss(U, A, B, C) __extension__ ({       \
+  (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+                                       (__v4sf)(__m128)(B), \
+                                       (__v4sf)_mm_setzero_ps(), \
+                                       (__mmask8)(U), (int)(C), \
+                                       _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_reduce_round_ss(A, B, C, R) __extension__ ({              \
+  (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+                                       (__v4sf)(__m128)(B), \
+                                       (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \
+                                       (int)(C), (int)(R)); })
+
+#define _mm_mask_reduce_round_ss(W, U, A, B, C, R) __extension__ ({   \
+  (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+                                       (__v4sf)(__m128)(B), \
+                                       (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                       (int)(C), (int)(R)); })
+
+#define _mm_maskz_reduce_round_ss(U, A, B, C, R) __extension__ ({       \
+  (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+                                       (__v4sf)(__m128)(B), \
+                                       (__v4sf)_mm_setzero_ps(), \
+                                       (__mmask8)(U), (int)(C), (int)(R)); })
+
+#define _mm_reduce_sd(A, B, C) __extension__ ({              \
+  (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+                                        (__v2df)(__m128d)(B), \
+                                        (__v2df)_mm_setzero_pd(), \
+                                        (__mmask8)-1, (int)(C), \
+                                        _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_reduce_sd(W, U, A, B, C) __extension__ ({   \
+  (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+                                        (__v2df)(__m128d)(B), \
+                                        (__v2df)(__m128d)(W), (__mmask8)(U), \
+                                        (int)(C), _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_maskz_reduce_sd(U, A, B, C) __extension__ ({       \
+  (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+                                        (__v2df)(__m128d)(B), \
+                                        (__v2df)_mm_setzero_pd(), \
+                                        (__mmask8)(U), (int)(C), \
+                                        _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_reduce_round_sd(A, B, C, R) __extension__ ({              \
+  (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+                                        (__v2df)(__m128d)(B), \
+                                        (__v2df)_mm_setzero_pd(), \
+                                        (__mmask8)-1, (int)(C), (int)(R)); })
+
+#define _mm_mask_reduce_round_sd(W, U, A, B, C, R) __extension__ ({   \
+  (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+                                        (__v2df)(__m128d)(B), \
+                                        (__v2df)(__m128d)(W), (__mmask8)(U), \
+                                        (int)(C), (int)(R)); })
+
+#define _mm_maskz_reduce_round_sd(U, A, B, C, R) __extension__ ({       \
+  (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+                                        (__v2df)(__m128d)(B), \
+                                        (__v2df)_mm_setzero_pd(), \
+                                        (__mmask8)(U), (int)(C), (int)(R)); })
+                     
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_movepi32_mask (__m512i __A)
+{
+  return (__mmask16) __builtin_ia32_cvtd2mask512 ((__v16si) __A);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_movm_epi32 (__mmask16 __A)
+{
+  return (__m512i) __builtin_ia32_cvtmask2d512 (__A);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_movm_epi64 (__mmask8 __A)
+{
+  return (__m512i) __builtin_ia32_cvtmask2q512 (__A);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_movepi64_mask (__m512i __A)
+{
+  return (__mmask8) __builtin_ia32_cvtq2mask512 ((__v8di) __A);
+}
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_broadcast_f32x2 (__m128 __A)
+{
+  return (__m512) __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A,
+                (__v16sf)_mm512_undefined_ps(),
+                (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_f32x2 (__m512 __O, __mmask16 __M, __m128 __A)
+{
+  return (__m512) __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A,
+                (__v16sf)
+                __O, __M);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_f32x2 (__mmask16 __M, __m128 __A)
+{
+  return (__m512) __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A,
+                (__v16sf)_mm512_setzero_ps (),
+                __M);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_broadcast_f32x8 (__m256 __A)
+{
+  return (__m512) __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A,
+                _mm512_undefined_ps(),
+                (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_f32x8 (__m512 __O, __mmask16 __M, __m256 __A)
+{
+  return (__m512) __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A,
+                (__v16sf)__O,
+                __M);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_f32x8 (__mmask16 __M, __m256 __A)
+{
+  return (__m512) __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A,
+                (__v16sf)_mm512_setzero_ps (),
+                __M);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_broadcast_f64x2 (__m128d __A)
+{
+  return (__m512d) __builtin_ia32_broadcastf64x2_512_mask ((__v2df) __A,
+                 (__v8df)_mm512_undefined_pd(),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_f64x2 (__m512d __O, __mmask8 __M, __m128d __A)
+{
+  return (__m512d) __builtin_ia32_broadcastf64x2_512_mask ((__v2df) __A,
+                 (__v8df)
+                 __O, __M);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A)
+{
+  return (__m512d) __builtin_ia32_broadcastf64x2_512_mask ((__v2df) __A,
+                 (__v8df)_mm512_setzero_ps (),
+                 __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcast_i32x2 (__m128i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti32x2_512_mask ((__v4si) __A,
+                 (__v16si)_mm512_setzero_si512(),
+                 (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_i32x2 (__m512i __O, __mmask16 __M, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti32x2_512_mask ((__v4si) __A,
+                 (__v16si)
+                 __O, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_i32x2 (__mmask16 __M, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti32x2_512_mask ((__v4si) __A,
+                 (__v16si)_mm512_setzero_si512 (),
+                 __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcast_i32x8 (__m256i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti32x8_512_mask ((__v8si) __A,
+                 (__v16si)_mm512_setzero_si512(),
+                 (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_i32x8 (__m512i __O, __mmask16 __M, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti32x8_512_mask ((__v8si) __A,
+                 (__v16si)__O,
+                 __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_i32x8 (__mmask16 __M, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti32x8_512_mask ((__v8si) __A,
+                 (__v16si)
+                 _mm512_setzero_si512 (),
+                 __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcast_i64x2 (__m128i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti64x2_512_mask ((__v2di) __A,
+                 (__v8di)_mm512_setzero_si512(),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_i64x2 (__m512i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti64x2_512_mask ((__v2di) __A,
+                 (__v8di)
+                 __O, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti64x2_512_mask ((__v2di) __A,
+                 (__v8di)_mm512_setzero_si512 (),
+                 __M);
+}
+
+#define _mm512_extractf32x8_ps(A, imm) __extension__ ({ \
+  (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+                                           (__v8sf)_mm256_setzero_ps(), \
+                                           (__mmask8)-1); })
+
+#define _mm512_mask_extractf32x8_ps(W, U, A, imm) __extension__ ({ \
+  (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+                                           (__v8sf)(__m256)(W), \
+                                           (__mmask8)(U)); })
+
+#define _mm512_maskz_extractf32x8_ps(U, A, imm) __extension__ ({ \
+  (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+                                           (__v8sf)_mm256_setzero_ps(), \
+                                           (__mmask8)(U)); })
+
+#define _mm512_extractf64x2_pd(A, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
+                                                (int)(imm), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)-1); })
+
+#define _mm512_mask_extractf64x2_pd(W, U, A, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
+                                                (int)(imm), \
+                                                (__v2df)(__m128d)(W), \
+                                                (__mmask8)(U)); })
+
+#define _mm512_maskz_extractf64x2_pd(U, A, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
+                                                (int)(imm), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)(U)); })
+
+#define _mm512_extracti32x8_epi32(A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
+                                            (__v8si)_mm256_setzero_si256(), \
+                                            (__mmask8)-1); })
+
+#define _mm512_mask_extracti32x8_epi32(W, U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
+                                            (__v8si)(__m256i)(W), \
+                                            (__mmask8)(U)); })
+
+#define _mm512_maskz_extracti32x8_epi32(U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
+                                            (__v8si)_mm256_setzero_si256(), \
+                                            (__mmask8)(U)); })
+
+#define _mm512_extracti64x2_epi64(A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
+                                                (int)(imm), \
+                                                (__v2di)_mm_setzero_di(), \
+                                                (__mmask8)-1); })
+
+#define _mm512_mask_extracti64x2_epi64(W, U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
+                                                (int)(imm), \
+                                                (__v2di)(__m128i)(W), \
+                                                (__mmask8)(U)); })
+
+#define _mm512_maskz_extracti64x2_epi64(U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
+                                                (int)(imm), \
+                                                (__v2di)_mm_setzero_di(), \
+                                                (__mmask8)(U)); })
+
+#define _mm512_insertf32x8(A, B, imm) __extension__ ({ \
+  (__m512)__builtin_ia32_insertf32x8_mask((__v16sf)(__m512)(A), \
+                                          (__v8sf)(__m256)(B), (int)(imm), \
+                                          (__v16sf)_mm512_setzero_ps(), \
+                                          (__mmask16)-1); })
+
+#define _mm512_mask_insertf32x8(W, U, A, B, imm) __extension__ ({ \
+  (__m512)__builtin_ia32_insertf32x8_mask((__v16sf)(__m512)(A), \
+                                          (__v8sf)(__m256)(B), (int)(imm), \
+                                          (__v16sf)(__m512)(W), \
+                                          (__mmask16)(U)); })
+
+#define _mm512_maskz_insertf32x8(U, A, B, imm) __extension__ ({ \
+  (__m512)__builtin_ia32_insertf32x8_mask((__v16sf)(__m512)(A), \
+                                          (__v8sf)(__m256)(B), (int)(imm), \
+                                          (__v16sf)_mm512_setzero_ps(), \
+                                          (__mmask16)(U)); })
+
+#define _mm512_insertf64x2(A, B, imm) __extension__ ({ \
+  (__m512d)__builtin_ia32_insertf64x2_512_mask((__v8df)(__m512d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (int)(imm), \
+                                               (__v8df)_mm512_setzero_pd(), \
+                                               (__mmask8)-1); })
+
+#define _mm512_mask_insertf64x2(W, U, A, B, imm) __extension__ ({ \
+  (__m512d)__builtin_ia32_insertf64x2_512_mask((__v8df)(__m512d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (int)(imm), \
+                                               (__v8df)(__m512d)(W), \
+                                               (__mmask8)(U)); })
+
+#define _mm512_maskz_insertf64x2(U, A, B, imm) __extension__ ({ \
+  (__m512d)__builtin_ia32_insertf64x2_512_mask((__v8df)(__m512d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (int)(imm), \
+                                               (__v8df)_mm512_setzero_pd(), \
+                                               (__mmask8)(U)); })
+
+#define _mm512_inserti32x8(A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_inserti32x8_mask((__v16si)(__m512i)(A), \
+                                           (__v8si)(__m256i)(B), (int)(imm), \
+                                           (__v16si)_mm512_setzero_si512(), \
+                                           (__mmask16)-1); })
+
+#define _mm512_mask_inserti32x8(W, U, A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_inserti32x8_mask((__v16si)(__m512i)(A), \
+                                           (__v8si)(__m256i)(B), (int)(imm), \
+                                           (__v16si)(__m512i)(W), \
+                                           (__mmask16)(U)); })
+
+#define _mm512_maskz_inserti32x8(U, A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_inserti32x8_mask((__v16si)(__m512i)(A), \
+                                           (__v8si)(__m256i)(B), (int)(imm), \
+                                           (__v16si)_mm512_setzero_si512(), \
+                                           (__mmask16)(U)); })
+
+#define _mm512_inserti64x2(A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_inserti64x2_512_mask((__v8di)(__m512i)(A), \
+                                               (__v2di)(__m128i)(B), \
+                                               (int)(imm), \
+                                               (__v8di)_mm512_setzero_si512(), \
+                                               (__mmask8)-1); })
+
+#define _mm512_mask_inserti64x2(W, U, A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_inserti64x2_512_mask((__v8di)(__m512i)(A), \
+                                               (__v2di)(__m128i)(B), \
+                                               (int)(imm), \
+                                               (__v8di)(__m512i)(W), \
+                                               (__mmask8)(U)); })
+
+#define _mm512_maskz_inserti64x2(U, A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_inserti64x2_512_mask((__v8di)(__m512i)(A), \
+                                               (__v2di)(__m128i)(B), \
+                                               (int)(imm), \
+                                               (__v8di)_mm512_setzero_si512(), \
+                                               (__mmask8)(U)); })
+
+#define _mm512_mask_fpclass_ps_mask(U, A, imm) __extension__ ({ \
+  (__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
+                                              (int)(imm), (__mmask16)(U)); })
+
+#define _mm512_fpclass_ps_mask(A, imm) __extension__ ({ \
+  (__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
+                                              (int)(imm), (__mmask16)-1); })
+
+#define _mm512_mask_fpclass_pd_mask(U, A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+#define _mm512_fpclass_pd_mask(A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \
+                                             (__mmask8)-1); })
+
+#define _mm_fpclass_sd_mask(A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \
+                                          (__mmask8)-1); })
+
+#define _mm_mask_fpclass_sd_mask(U, A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \
+                                          (__mmask8)(U)); })
+
+#define _mm_fpclass_ss_mask(A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
+                                          (__mmask8)-1); })
+
+#define _mm_mask_fpclass_ss_mask(U, A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
+                                          (__mmask8)(U)); })
 
 #undef __DEFAULT_FN_ATTRS
 
diff --git a/renderscript/clang-include/avx512erintrin.h b/renderscript/clang-include/avx512erintrin.h
index 40a9121..8ff212c 100644
--- a/renderscript/clang-include/avx512erintrin.h
+++ b/renderscript/clang-include/avx512erintrin.h
@@ -31,66 +31,66 @@
 #define _mm512_exp2a23_round_pd(A, R) __extension__ ({ \
   (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
                                       (__v8df)_mm512_setzero_pd(), \
-                                      (__mmask8)-1, (R)); })
+                                      (__mmask8)-1, (int)(R)); })
 
 #define _mm512_mask_exp2a23_round_pd(S, M, A, R) __extension__ ({ \
   (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
-                                      (__v8df)(__m512d)(S), \
-                                      (__mmask8)(M), (R)); })
+                                      (__v8df)(__m512d)(S), (__mmask8)(M), \
+                                      (int)(R)); })
 
 #define _mm512_maskz_exp2a23_round_pd(M, A, R) __extension__ ({ \
   (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
                                       (__v8df)_mm512_setzero_pd(), \
-                                      (__mmask8)(M), (R)); })
+                                      (__mmask8)(M), (int)(R)); })
 
 #define _mm512_exp2a23_pd(A) \
-   _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION)
+  _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm512_mask_exp2a23_pd(S, M, A) \
-   _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+  _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm512_maskz_exp2a23_pd(M, A) \
-   _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
+  _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm512_exp2a23_round_ps(A, R) __extension__ ({ \
   (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
                                      (__v16sf)_mm512_setzero_ps(), \
-                                     (__mmask8)-1, (R)); })
+                                     (__mmask16)-1, (int)(R)); })
 
 #define _mm512_mask_exp2a23_round_ps(S, M, A, R) __extension__ ({ \
   (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
-                                     (__v16sf)(__m512)(S), \
-                                     (__mmask8)(M), (R)); })
+                                     (__v16sf)(__m512)(S), (__mmask16)(M), \
+                                     (int)(R)); })
 
 #define _mm512_maskz_exp2a23_round_ps(M, A, R) __extension__ ({ \
   (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
                                      (__v16sf)_mm512_setzero_ps(), \
-                                     (__mmask8)(M), (R)); })
+                                     (__mmask16)(M), (int)(R)); })
 
 #define _mm512_exp2a23_ps(A) \
-   _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION)
+  _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm512_mask_exp2a23_ps(S, M, A) \
-   _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+  _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm512_maskz_exp2a23_ps(M, A) \
-   _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
+  _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
 
 // rsqrt28
 #define _mm512_rsqrt28_round_pd(A, R) __extension__ ({ \
   (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
                                          (__v8df)_mm512_setzero_pd(), \
-                                         (__mmask8)-1, (R)); })
+                                         (__mmask8)-1, (int)(R)); })
 
 #define _mm512_mask_rsqrt28_round_pd(S, M, A, R) __extension__ ({ \
   (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
-                                         (__v8df)(__m512d)(S), \
-                                         (__mmask8)(M), (R)); })
+                                         (__v8df)(__m512d)(S), (__mmask8)(M), \
+                                         (int)(R)); })
 
 #define _mm512_maskz_rsqrt28_round_pd(M, A, R) __extension__ ({ \
   (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
                                          (__v8df)_mm512_setzero_pd(), \
-                                         (__mmask8)(M), (R)); })
+                                         (__mmask8)(M), (int)(R)); })
 
 #define _mm512_rsqrt28_pd(A) \
   _mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
@@ -104,17 +104,17 @@
 #define _mm512_rsqrt28_round_ps(A, R) __extension__ ({ \
   (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
                                         (__v16sf)_mm512_setzero_ps(), \
-                                        (__mmask16)-1, (R)); })
+                                        (__mmask16)-1, (int)(R)); })
 
 #define _mm512_mask_rsqrt28_round_ps(S, M, A, R) __extension__ ({ \
   (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
-                                        (__v16sf)(__m512)(S), \
-                                        (__mmask16)(M), (R)); })
+                                        (__v16sf)(__m512)(S), (__mmask16)(M), \
+                                        (int)(R)); })
 
 #define _mm512_maskz_rsqrt28_round_ps(M, A, R) __extension__ ({ \
   (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
                                         (__v16sf)_mm512_setzero_ps(), \
-                                        (__mmask16)(M), (R)); })
+                                        (__mmask16)(M), (int)(R)); })
 
 #define _mm512_rsqrt28_ps(A) \
   _mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
@@ -126,22 +126,22 @@
   _mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm_rsqrt28_round_ss(A, B, R) __extension__ ({ \
-  (__m128)__builtin_ia32_rsqrt28ss_round((__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), \
-                                        (__v4sf)_mm_setzero_ps(), \
-                                        (__mmask8)-1, (R)); })
+  (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
+                                              (__v4sf)(__m128)(B), \
+                                              (__v4sf)_mm_setzero_ps(), \
+                                              (__mmask8)-1, (int)(R)); })
 
 #define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) __extension__ ({ \
-  (__m128)__builtin_ia32_rsqrt28ss_round((__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), \
-                                        (__v4sf)(__m128)(S), \
-                                        (__mmask8)(M), (R)); })
+  (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
+                                              (__v4sf)(__m128)(B), \
+                                              (__v4sf)(__m128)(S), \
+                                              (__mmask8)(M), (int)(R)); })
 
 #define _mm_maskz_rsqrt28_round_ss(M, A, B, R) __extension__ ({ \
-  (__m128)__builtin_ia32_rsqrt28ss_round((__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), \
-                                        (__v4sf)_mm_setzero_ps(), \
-                                        (__mmask8)(M), (R)); })
+  (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
+                                              (__v4sf)(__m128)(B), \
+                                              (__v4sf)_mm_setzero_ps(), \
+                                              (__mmask8)(M), (int)(R)); })
 
 #define _mm_rsqrt28_ss(A, B) \
   _mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
@@ -153,22 +153,22 @@
   _mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm_rsqrt28_round_sd(A, B, R) __extension__ ({ \
-  (__m128d)__builtin_ia32_rsqrt28sd_round((__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), \
-                                         (__v2df)_mm_setzero_pd(), \
-                                         (__mmask8)-1, (R)); })
+  (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (__v2df)_mm_setzero_pd(), \
+                                               (__mmask8)-1, (int)(R)); })
 
 #define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) __extension__ ({ \
-  (__m128d)__builtin_ia32_rsqrt28sd_round((__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), \
-                                         (__v2df)(__m128d)(S), \
-                                         (__mmask8)(M), (R)); })
+  (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (__v2df)(__m128d)(S), \
+                                               (__mmask8)(M), (int)(R)); })
 
 #define _mm_maskz_rsqrt28_round_sd(M, A, B, R) __extension__ ({ \
-  (__m128d)__builtin_ia32_rsqrt28sd_round((__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), \
-                                         (__v2df)_mm_setzero_pd(), \
-                                         (__mmask8)(M), (R)); })
+  (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (__v2df)_mm_setzero_pd(), \
+                                               (__mmask8)(M), (int)(R)); })
 
 #define _mm_rsqrt28_sd(A, B) \
   _mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
@@ -177,23 +177,23 @@
   _mm_mask_rsqrt28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm_maskz_rsqrt28_sd(M, A, B) \
-  _mm_mask_rsqrt28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+  _mm_maskz_rsqrt28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
 
 // rcp28
 #define _mm512_rcp28_round_pd(A, R) __extension__ ({ \
   (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
                                        (__v8df)_mm512_setzero_pd(), \
-                                       (__mmask8)-1, (R)); })
+                                       (__mmask8)-1, (int)(R)); })
 
 #define _mm512_mask_rcp28_round_pd(S, M, A, R) __extension__ ({ \
   (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
-                                       (__v8df)(__m512d)(S), \
-                                       (__mmask8)(M), (R)); })
+                                       (__v8df)(__m512d)(S), (__mmask8)(M), \
+                                       (int)(R)); })
 
 #define _mm512_maskz_rcp28_round_pd(M, A, R) __extension__ ({ \
   (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
                                        (__v8df)_mm512_setzero_pd(), \
-                                       (__mmask8)(M), (R)); })
+                                       (__mmask8)(M), (int)(R)); })
 
 #define _mm512_rcp28_pd(A) \
   _mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
@@ -207,17 +207,17 @@
 #define _mm512_rcp28_round_ps(A, R) __extension__ ({ \
   (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
                                       (__v16sf)_mm512_setzero_ps(), \
-                                      (__mmask16)-1, (R)); })
+                                      (__mmask16)-1, (int)(R)); })
 
 #define _mm512_mask_rcp28_round_ps(S, M, A, R) __extension__ ({ \
   (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
-                                      (__v16sf)(__m512)(S), \
-                                      (__mmask16)(M), (R)); })
+                                      (__v16sf)(__m512)(S), (__mmask16)(M), \
+                                      (int)(R)); })
 
 #define _mm512_maskz_rcp28_round_ps(M, A, R) __extension__ ({ \
   (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
                                       (__v16sf)_mm512_setzero_ps(), \
-                                      (__mmask16)(M), (R)); })
+                                      (__mmask16)(M), (int)(R)); })
 
 #define _mm512_rcp28_ps(A) \
   _mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
@@ -229,22 +229,22 @@
   _mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm_rcp28_round_ss(A, B, R) __extension__ ({ \
-  (__m128)__builtin_ia32_rcp28ss_round((__v4sf)(__m128)(A), \
-                                      (__v4sf)(__m128)(B), \
-                                      (__v4sf)_mm_setzero_ps(), \
-                                      (__mmask8)-1, (R)); })
+  (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
+                                            (__v4sf)(__m128)(B), \
+                                            (__v4sf)_mm_setzero_ps(), \
+                                            (__mmask8)-1, (int)(R)); })
 
 #define _mm_mask_rcp28_round_ss(S, M, A, B, R) __extension__ ({ \
-  (__m128)__builtin_ia32_rcp28ss_round((__v4sf)(__m128)(A), \
-                                      (__v4sf)(__m128)(B), \
-                                      (__v4sf)(__m128)(S), \
-                                      (__mmask8)(M), (R)); })
+  (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
+                                            (__v4sf)(__m128)(B), \
+                                            (__v4sf)(__m128)(S), \
+                                            (__mmask8)(M), (int)(R)); })
 
 #define _mm_maskz_rcp28_round_ss(M, A, B, R) __extension__ ({ \
-  (__m128)__builtin_ia32_rcp28ss_round((__v4sf)(__m128)(A), \
-                                      (__v4sf)(__m128)(B), \
-                                      (__v4sf)_mm_setzero_ps(), \
-                                      (__mmask8)(M), (R)); })
+  (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
+                                            (__v4sf)(__m128)(B), \
+                                            (__v4sf)_mm_setzero_ps(), \
+                                            (__mmask8)(M), (int)(R)); })
 
 #define _mm_rcp28_ss(A, B) \
   _mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
@@ -256,22 +256,22 @@
   _mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm_rcp28_round_sd(A, B, R) __extension__ ({ \
-  (__m128d)__builtin_ia32_rcp28sd_round((__v2df)(__m128d)(A), \
-                                       (__v2df)(__m128d)(B), \
-                                       (__v2df)_mm_setzero_pd(), \
-                                       (__mmask8)-1, (R)); })
+  (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
+                                             (__v2df)(__m128d)(B), \
+                                             (__v2df)_mm_setzero_pd(), \
+                                             (__mmask8)-1, (int)(R)); })
 
 #define _mm_mask_rcp28_round_sd(S, M, A, B, R) __extension__ ({ \
-  (__m128d)__builtin_ia32_rcp28sd_round((__v2df)(__m128d)(A), \
-                                       (__v2df)(__m128d)(B), \
-                                       (__v2df)(__m128d)(S), \
-                                       (__mmask8)(M), (R)); })
+  (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
+                                             (__v2df)(__m128d)(B), \
+                                             (__v2df)(__m128d)(S), \
+                                             (__mmask8)(M), (int)(R)); })
 
 #define _mm_maskz_rcp28_round_sd(M, A, B, R) __extension__ ({ \
-  (__m128d)__builtin_ia32_rcp28sd_round((__v2df)(__m128d)(A), \
-                                       (__v2df)(__m128d)(B), \
-                                       (__v2df)_mm_setzero_pd(), \
-                                       (__mmask8)(M), (R)); })
+  (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
+                                             (__v2df)(__m128d)(B), \
+                                             (__v2df)_mm_setzero_pd(), \
+                                             (__mmask8)(M), (int)(R)); })
 
 #define _mm_rcp28_sd(A, B) \
   _mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
diff --git a/renderscript/clang-include/avx512fintrin.h b/renderscript/clang-include/avx512fintrin.h
index 8dcdc71..badc436 100644
--- a/renderscript/clang-include/avx512fintrin.h
+++ b/renderscript/clang-include/avx512fintrin.h
@@ -27,11 +27,19 @@
 #ifndef __AVX512FINTRIN_H
 #define __AVX512FINTRIN_H
 
+typedef char __v64qi __attribute__((__vector_size__(64)));
+typedef short __v32hi __attribute__((__vector_size__(64)));
 typedef double __v8df __attribute__((__vector_size__(64)));
 typedef float __v16sf __attribute__((__vector_size__(64)));
 typedef long long __v8di __attribute__((__vector_size__(64)));
 typedef int __v16si __attribute__((__vector_size__(64)));
 
+/* Unsigned types */
+typedef unsigned char __v64qu __attribute__((__vector_size__(64)));
+typedef unsigned short __v32hu __attribute__((__vector_size__(64)));
+typedef unsigned long long __v8du __attribute__((__vector_size__(64)));
+typedef unsigned int __v16su __attribute__((__vector_size__(64)));
+
 typedef float __m512 __attribute__((__vector_size__(64)));
 typedef double __m512d __attribute__((__vector_size__(64)));
 typedef long long __m512i __attribute__((__vector_size__(64)));
@@ -46,6 +54,111 @@
 #define _MM_FROUND_TO_ZERO          0x03
 #define _MM_FROUND_CUR_DIRECTION    0x04
 
+typedef enum
+{
+  _MM_PERM_AAAA = 0x00, _MM_PERM_AAAB = 0x01, _MM_PERM_AAAC = 0x02,
+  _MM_PERM_AAAD = 0x03, _MM_PERM_AABA = 0x04, _MM_PERM_AABB = 0x05,
+  _MM_PERM_AABC = 0x06, _MM_PERM_AABD = 0x07, _MM_PERM_AACA = 0x08,
+  _MM_PERM_AACB = 0x09, _MM_PERM_AACC = 0x0A, _MM_PERM_AACD = 0x0B,
+  _MM_PERM_AADA = 0x0C, _MM_PERM_AADB = 0x0D, _MM_PERM_AADC = 0x0E,
+  _MM_PERM_AADD = 0x0F, _MM_PERM_ABAA = 0x10, _MM_PERM_ABAB = 0x11,
+  _MM_PERM_ABAC = 0x12, _MM_PERM_ABAD = 0x13, _MM_PERM_ABBA = 0x14,
+  _MM_PERM_ABBB = 0x15, _MM_PERM_ABBC = 0x16, _MM_PERM_ABBD = 0x17,
+  _MM_PERM_ABCA = 0x18, _MM_PERM_ABCB = 0x19, _MM_PERM_ABCC = 0x1A,
+  _MM_PERM_ABCD = 0x1B, _MM_PERM_ABDA = 0x1C, _MM_PERM_ABDB = 0x1D,
+  _MM_PERM_ABDC = 0x1E, _MM_PERM_ABDD = 0x1F, _MM_PERM_ACAA = 0x20,
+  _MM_PERM_ACAB = 0x21, _MM_PERM_ACAC = 0x22, _MM_PERM_ACAD = 0x23,
+  _MM_PERM_ACBA = 0x24, _MM_PERM_ACBB = 0x25, _MM_PERM_ACBC = 0x26,
+  _MM_PERM_ACBD = 0x27, _MM_PERM_ACCA = 0x28, _MM_PERM_ACCB = 0x29,
+  _MM_PERM_ACCC = 0x2A, _MM_PERM_ACCD = 0x2B, _MM_PERM_ACDA = 0x2C,
+  _MM_PERM_ACDB = 0x2D, _MM_PERM_ACDC = 0x2E, _MM_PERM_ACDD = 0x2F,
+  _MM_PERM_ADAA = 0x30, _MM_PERM_ADAB = 0x31, _MM_PERM_ADAC = 0x32,
+  _MM_PERM_ADAD = 0x33, _MM_PERM_ADBA = 0x34, _MM_PERM_ADBB = 0x35,
+  _MM_PERM_ADBC = 0x36, _MM_PERM_ADBD = 0x37, _MM_PERM_ADCA = 0x38,
+  _MM_PERM_ADCB = 0x39, _MM_PERM_ADCC = 0x3A, _MM_PERM_ADCD = 0x3B,
+  _MM_PERM_ADDA = 0x3C, _MM_PERM_ADDB = 0x3D, _MM_PERM_ADDC = 0x3E,
+  _MM_PERM_ADDD = 0x3F, _MM_PERM_BAAA = 0x40, _MM_PERM_BAAB = 0x41,
+  _MM_PERM_BAAC = 0x42, _MM_PERM_BAAD = 0x43, _MM_PERM_BABA = 0x44,
+  _MM_PERM_BABB = 0x45, _MM_PERM_BABC = 0x46, _MM_PERM_BABD = 0x47,
+  _MM_PERM_BACA = 0x48, _MM_PERM_BACB = 0x49, _MM_PERM_BACC = 0x4A,
+  _MM_PERM_BACD = 0x4B, _MM_PERM_BADA = 0x4C, _MM_PERM_BADB = 0x4D,
+  _MM_PERM_BADC = 0x4E, _MM_PERM_BADD = 0x4F, _MM_PERM_BBAA = 0x50,
+  _MM_PERM_BBAB = 0x51, _MM_PERM_BBAC = 0x52, _MM_PERM_BBAD = 0x53,
+  _MM_PERM_BBBA = 0x54, _MM_PERM_BBBB = 0x55, _MM_PERM_BBBC = 0x56,
+  _MM_PERM_BBBD = 0x57, _MM_PERM_BBCA = 0x58, _MM_PERM_BBCB = 0x59,
+  _MM_PERM_BBCC = 0x5A, _MM_PERM_BBCD = 0x5B, _MM_PERM_BBDA = 0x5C,
+  _MM_PERM_BBDB = 0x5D, _MM_PERM_BBDC = 0x5E, _MM_PERM_BBDD = 0x5F,
+  _MM_PERM_BCAA = 0x60, _MM_PERM_BCAB = 0x61, _MM_PERM_BCAC = 0x62,
+  _MM_PERM_BCAD = 0x63, _MM_PERM_BCBA = 0x64, _MM_PERM_BCBB = 0x65,
+  _MM_PERM_BCBC = 0x66, _MM_PERM_BCBD = 0x67, _MM_PERM_BCCA = 0x68,
+  _MM_PERM_BCCB = 0x69, _MM_PERM_BCCC = 0x6A, _MM_PERM_BCCD = 0x6B,
+  _MM_PERM_BCDA = 0x6C, _MM_PERM_BCDB = 0x6D, _MM_PERM_BCDC = 0x6E,
+  _MM_PERM_BCDD = 0x6F, _MM_PERM_BDAA = 0x70, _MM_PERM_BDAB = 0x71,
+  _MM_PERM_BDAC = 0x72, _MM_PERM_BDAD = 0x73, _MM_PERM_BDBA = 0x74,
+  _MM_PERM_BDBB = 0x75, _MM_PERM_BDBC = 0x76, _MM_PERM_BDBD = 0x77,
+  _MM_PERM_BDCA = 0x78, _MM_PERM_BDCB = 0x79, _MM_PERM_BDCC = 0x7A,
+  _MM_PERM_BDCD = 0x7B, _MM_PERM_BDDA = 0x7C, _MM_PERM_BDDB = 0x7D,
+  _MM_PERM_BDDC = 0x7E, _MM_PERM_BDDD = 0x7F, _MM_PERM_CAAA = 0x80,
+  _MM_PERM_CAAB = 0x81, _MM_PERM_CAAC = 0x82, _MM_PERM_CAAD = 0x83,
+  _MM_PERM_CABA = 0x84, _MM_PERM_CABB = 0x85, _MM_PERM_CABC = 0x86,
+  _MM_PERM_CABD = 0x87, _MM_PERM_CACA = 0x88, _MM_PERM_CACB = 0x89,
+  _MM_PERM_CACC = 0x8A, _MM_PERM_CACD = 0x8B, _MM_PERM_CADA = 0x8C,
+  _MM_PERM_CADB = 0x8D, _MM_PERM_CADC = 0x8E, _MM_PERM_CADD = 0x8F,
+  _MM_PERM_CBAA = 0x90, _MM_PERM_CBAB = 0x91, _MM_PERM_CBAC = 0x92,
+  _MM_PERM_CBAD = 0x93, _MM_PERM_CBBA = 0x94, _MM_PERM_CBBB = 0x95,
+  _MM_PERM_CBBC = 0x96, _MM_PERM_CBBD = 0x97, _MM_PERM_CBCA = 0x98,
+  _MM_PERM_CBCB = 0x99, _MM_PERM_CBCC = 0x9A, _MM_PERM_CBCD = 0x9B,
+  _MM_PERM_CBDA = 0x9C, _MM_PERM_CBDB = 0x9D, _MM_PERM_CBDC = 0x9E,
+  _MM_PERM_CBDD = 0x9F, _MM_PERM_CCAA = 0xA0, _MM_PERM_CCAB = 0xA1,
+  _MM_PERM_CCAC = 0xA2, _MM_PERM_CCAD = 0xA3, _MM_PERM_CCBA = 0xA4,
+  _MM_PERM_CCBB = 0xA5, _MM_PERM_CCBC = 0xA6, _MM_PERM_CCBD = 0xA7,
+  _MM_PERM_CCCA = 0xA8, _MM_PERM_CCCB = 0xA9, _MM_PERM_CCCC = 0xAA,
+  _MM_PERM_CCCD = 0xAB, _MM_PERM_CCDA = 0xAC, _MM_PERM_CCDB = 0xAD,
+  _MM_PERM_CCDC = 0xAE, _MM_PERM_CCDD = 0xAF, _MM_PERM_CDAA = 0xB0,
+  _MM_PERM_CDAB = 0xB1, _MM_PERM_CDAC = 0xB2, _MM_PERM_CDAD = 0xB3,
+  _MM_PERM_CDBA = 0xB4, _MM_PERM_CDBB = 0xB5, _MM_PERM_CDBC = 0xB6,
+  _MM_PERM_CDBD = 0xB7, _MM_PERM_CDCA = 0xB8, _MM_PERM_CDCB = 0xB9,
+  _MM_PERM_CDCC = 0xBA, _MM_PERM_CDCD = 0xBB, _MM_PERM_CDDA = 0xBC,
+  _MM_PERM_CDDB = 0xBD, _MM_PERM_CDDC = 0xBE, _MM_PERM_CDDD = 0xBF,
+  _MM_PERM_DAAA = 0xC0, _MM_PERM_DAAB = 0xC1, _MM_PERM_DAAC = 0xC2,
+  _MM_PERM_DAAD = 0xC3, _MM_PERM_DABA = 0xC4, _MM_PERM_DABB = 0xC5,
+  _MM_PERM_DABC = 0xC6, _MM_PERM_DABD = 0xC7, _MM_PERM_DACA = 0xC8,
+  _MM_PERM_DACB = 0xC9, _MM_PERM_DACC = 0xCA, _MM_PERM_DACD = 0xCB,
+  _MM_PERM_DADA = 0xCC, _MM_PERM_DADB = 0xCD, _MM_PERM_DADC = 0xCE,
+  _MM_PERM_DADD = 0xCF, _MM_PERM_DBAA = 0xD0, _MM_PERM_DBAB = 0xD1,
+  _MM_PERM_DBAC = 0xD2, _MM_PERM_DBAD = 0xD3, _MM_PERM_DBBA = 0xD4,
+  _MM_PERM_DBBB = 0xD5, _MM_PERM_DBBC = 0xD6, _MM_PERM_DBBD = 0xD7,
+  _MM_PERM_DBCA = 0xD8, _MM_PERM_DBCB = 0xD9, _MM_PERM_DBCC = 0xDA,
+  _MM_PERM_DBCD = 0xDB, _MM_PERM_DBDA = 0xDC, _MM_PERM_DBDB = 0xDD,
+  _MM_PERM_DBDC = 0xDE, _MM_PERM_DBDD = 0xDF, _MM_PERM_DCAA = 0xE0,
+  _MM_PERM_DCAB = 0xE1, _MM_PERM_DCAC = 0xE2, _MM_PERM_DCAD = 0xE3,
+  _MM_PERM_DCBA = 0xE4, _MM_PERM_DCBB = 0xE5, _MM_PERM_DCBC = 0xE6,
+  _MM_PERM_DCBD = 0xE7, _MM_PERM_DCCA = 0xE8, _MM_PERM_DCCB = 0xE9,
+  _MM_PERM_DCCC = 0xEA, _MM_PERM_DCCD = 0xEB, _MM_PERM_DCDA = 0xEC,
+  _MM_PERM_DCDB = 0xED, _MM_PERM_DCDC = 0xEE, _MM_PERM_DCDD = 0xEF,
+  _MM_PERM_DDAA = 0xF0, _MM_PERM_DDAB = 0xF1, _MM_PERM_DDAC = 0xF2,
+  _MM_PERM_DDAD = 0xF3, _MM_PERM_DDBA = 0xF4, _MM_PERM_DDBB = 0xF5,
+  _MM_PERM_DDBC = 0xF6, _MM_PERM_DDBD = 0xF7, _MM_PERM_DDCA = 0xF8,
+  _MM_PERM_DDCB = 0xF9, _MM_PERM_DDCC = 0xFA, _MM_PERM_DDCD = 0xFB,
+  _MM_PERM_DDDA = 0xFC, _MM_PERM_DDDB = 0xFD, _MM_PERM_DDDC = 0xFE,
+  _MM_PERM_DDDD = 0xFF
+} _MM_PERM_ENUM;
+
+typedef enum
+{
+  _MM_MANT_NORM_1_2,    /* interval [1, 2)      */
+  _MM_MANT_NORM_p5_2,   /* interval [0.5, 2)    */
+  _MM_MANT_NORM_p5_1,   /* interval [0.5, 1)    */
+  _MM_MANT_NORM_p75_1p5   /* interval [0.75, 1.5) */
+} _MM_MANTISSA_NORM_ENUM;
+
+typedef enum
+{
+  _MM_MANT_SIGN_src,    /* sign = sign(SRC)     */
+  _MM_MANT_SIGN_zero,   /* sign = 0             */
+  _MM_MANT_SIGN_nan   /* DEST = NaN if sign(SRC) = 1 */
+} _MM_MANTISSA_SIGN_ENUM;
+
 /* Define the default attributes for the functions in this file. */
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
 
@@ -57,30 +170,81 @@
   return (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 };
 }
 
+#define _mm512_setzero_epi32 _mm512_setzero_si512
+
 static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_undefined_pd()
+_mm512_undefined_pd(void)
 {
   return (__m512d)__builtin_ia32_undef512();
 }
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_undefined()
+_mm512_undefined(void)
 {
   return (__m512)__builtin_ia32_undef512();
 }
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_undefined_ps()
+_mm512_undefined_ps(void)
 {
   return (__m512)__builtin_ia32_undef512();
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_undefined_epi32()
+_mm512_undefined_epi32(void)
 {
   return (__m512i)__builtin_ia32_undef512();
 }
 
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcastd_epi32 (__m128i __A)
+{
+  return (__m512i)__builtin_shufflevector((__v4si) __A,
+                                          (__v4si)_mm_undefined_si128(),
+                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcastd_epi32 (__m512i __O, __mmask16 __M, __m128i __A)
+{
+  return (__m512i)__builtin_ia32_selectd_512(__M,
+                                             (__v16si) _mm512_broadcastd_epi32(__A),
+                                             (__v16si) __O);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcastd_epi32 (__mmask16 __M, __m128i __A)
+{
+  return (__m512i)__builtin_ia32_selectd_512(__M,
+                                             (__v16si) _mm512_broadcastd_epi32(__A),
+                                             (__v16si) _mm512_setzero_si512());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcastq_epi64 (__m128i __A)
+{
+  return (__m512i)__builtin_shufflevector((__v2di) __A,
+                                          (__v2di) _mm_undefined_si128(),
+                                          0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcastq_epi64 (__m512i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m512i)__builtin_ia32_selectq_512(__M,
+                                             (__v8di) _mm512_broadcastq_epi64(__A),
+                                             (__v8di) __O);
+
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
+{
+  return (__m512i)__builtin_ia32_selectq_512(__M,
+                                             (__v8di) _mm512_broadcastq_epi64(__A),
+                                             (__v8di) _mm512_setzero_si512());
+}
+
 static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_maskz_set1_epi32(__mmask16 __M, int __A)
 {
@@ -112,6 +276,9 @@
   return (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                    0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
 }
+
+#define _mm512_setzero _mm512_setzero_ps
+
 static  __inline __m512d __DEFAULT_FN_ATTRS
 _mm512_setzero_pd(void)
 {
@@ -132,6 +299,28 @@
 }
 
 static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_set1_epi8(char __w)
+{
+  return (__m512i)(__v64qi){ __w, __w, __w, __w, __w, __w, __w, __w,
+                             __w, __w, __w, __w, __w, __w, __w, __w,
+                             __w, __w, __w, __w, __w, __w, __w, __w,
+                             __w, __w, __w, __w, __w, __w, __w, __w,
+                             __w, __w, __w, __w, __w, __w, __w, __w,
+                             __w, __w, __w, __w, __w, __w, __w, __w,
+                             __w, __w, __w, __w, __w, __w, __w, __w,
+                             __w, __w, __w, __w, __w, __w, __w, __w  };
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_set1_epi16(short __w)
+{
+  return (__m512i)(__v32hi){ __w, __w, __w, __w, __w, __w, __w, __w,
+                             __w, __w, __w, __w, __w, __w, __w, __w,
+                             __w, __w, __w, __w, __w, __w, __w, __w,
+                             __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_set1_epi32(int __s)
 {
   return (__m512i)(__v16si){ __s, __s, __s, __s, __s, __s, __s, __s,
@@ -145,21 +334,62 @@
 }
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_broadcastss_ps(__m128 __X)
+_mm512_broadcastss_ps(__m128 __A)
 {
-  float __f = __X[0];
-  return (__v16sf){ __f, __f, __f, __f,
-                    __f, __f, __f, __f,
-                    __f, __f, __f, __f,
-                    __f, __f, __f, __f };
+  return (__m512)__builtin_shufflevector((__v4sf) __A,
+                                         (__v4sf)_mm_undefined_ps(),
+                                         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
 }
 
-static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_broadcastsd_pd(__m128d __X)
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_set4_epi32 (int __A, int __B, int __C, int __D)
 {
-  double __d = __X[0];
-  return (__v8df){ __d, __d, __d, __d,
-                   __d, __d, __d, __d };
+  return  (__m512i)(__v16si)
+   { __D, __C, __B, __A, __D, __C, __B, __A,
+     __D, __C, __B, __A, __D, __C, __B, __A };
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_set4_epi64 (long long __A, long long __B, long long __C,
+       long long __D)
+{
+  return  (__m512i) (__v8di)
+   { __D, __C, __B, __A, __D, __C, __B, __A };
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_set4_pd (double __A, double __B, double __C, double __D)
+{
+  return  (__m512d)
+   { __D, __C, __B, __A, __D, __C, __B, __A };
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_set4_ps (float __A, float __B, float __C, float __D)
+{
+  return  (__m512)
+   { __D, __C, __B, __A, __D, __C, __B, __A,
+     __D, __C, __B, __A, __D, __C, __B, __A };
+}
+
+#define _mm512_setr4_epi32(e0,e1,e2,e3)               \
+  _mm512_set4_epi32((e3),(e2),(e1),(e0))
+
+#define _mm512_setr4_epi64(e0,e1,e2,e3)               \
+  _mm512_set4_epi64((e3),(e2),(e1),(e0))
+
+#define _mm512_setr4_pd(e0,e1,e2,e3)                \
+  _mm512_set4_pd((e3),(e2),(e1),(e0))
+
+#define _mm512_setr4_ps(e0,e1,e2,e3)                \
+  _mm512_set4_ps((e3),(e2),(e1),(e0))
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_broadcastsd_pd(__m128d __A)
+{
+  return (__m512d)__builtin_shufflevector((__v2df) __A,
+                                          (__v2df) _mm_undefined_pd(),
+                                          0, 0, 0, 0, 0, 0, 0, 0);
 }
 
 /* Cast between vector types */
@@ -183,272 +413,327 @@
   return __builtin_shufflevector(__a, __a, 0, 1);
 }
 
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm512_castpd512_pd256 (__m512d __A)
+{
+  return __builtin_shufflevector(__A, __A, 0, 1, 2, 3);
+}
+
 static __inline __m128 __DEFAULT_FN_ATTRS
 _mm512_castps512_ps128(__m512 __a)
 {
   return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
 }
 
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm512_castps512_ps256 (__m512 __A)
+{
+  return __builtin_shufflevector(__A, __A, 0, 1, 2, 3, 4, 5, 6, 7);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_castpd_ps (__m512d __A)
+{
+  return (__m512) (__A);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_castpd_si512 (__m512d __A)
+{
+  return (__m512i) (__A);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_castpd128_pd512 (__m128d __A)
+{
+  return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_castps_pd (__m512 __A)
+{
+  return (__m512d) (__A);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_castps_si512 (__m512 __A)
+{
+  return (__m512i) (__A);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_castps128_ps512 (__m128 __A)
+{
+    return  __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_castsi128_si512 (__m128i __A)
+{
+   return  __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_castsi256_si512 (__m256i __A)
+{
+   return  __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_castsi512_ps (__m512i __A)
+{
+  return (__m512) (__A);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_castsi512_pd (__m512i __A)
+{
+  return (__m512d) (__A);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm512_castsi512_si128 (__m512i __A)
+{
+  return (__m128i)__builtin_shufflevector(__A, __A , 0, 1);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm512_castsi512_si256 (__m512i __A)
+{
+  return (__m256i)__builtin_shufflevector(__A, __A , 0, 1, 2, 3);
+}
+
 /* Bitwise operators */
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_and_epi32(__m512i __a, __m512i __b)
 {
-  return __a & __b;
+  return (__m512i)((__v16su)__a & (__v16su)__b);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
 {
-  return (__m512i) __builtin_ia32_pandd512_mask((__v16si) __a,
-              (__v16si) __b,
-              (__v16si) __src,
-              (__mmask16) __k);
+  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
+                (__v16si) _mm512_and_epi32(__a, __b),
+                (__v16si) __src);
 }
+
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b)
 {
-  return (__m512i) __builtin_ia32_pandd512_mask((__v16si) __a,
-              (__v16si) __b,
-              (__v16si)
-              _mm512_setzero_si512 (),
-              (__mmask16) __k);
+  return (__m512i) _mm512_mask_and_epi32(_mm512_setzero_si512 (),
+                                         __k, __a, __b);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_and_epi64(__m512i __a, __m512i __b)
 {
-  return __a & __b;
+  return (__m512i)((__v8du)__a & (__v8du)__b);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
 {
-  return (__m512i) __builtin_ia32_pandq512_mask ((__v8di) __a,
-              (__v8di) __b,
-              (__v8di) __src,
-              (__mmask8) __k);
+    return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __k,
+                (__v8di) _mm512_and_epi64(__a, __b),
+                (__v8di) __src);
 }
+
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b)
 {
-  return (__m512i) __builtin_ia32_pandq512_mask ((__v8di) __a,
-              (__v8di) __b,
-              (__v8di)
-              _mm512_setzero_si512 (),
-              (__mmask8) __k);
+  return (__m512i) _mm512_mask_and_epi64(_mm512_setzero_si512 (),
+                                         __k, __a, __b);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_andnot_si512 (__m512i __A, __m512i __B)
+{
+  return (__m512i)(~(__v8du)(__A) & (__v8du)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_andnot_epi32 (__m512i __A, __m512i __B)
 {
-  return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A,
-              (__v16si) __B,
-              (__v16si)
-              _mm512_setzero_si512 (),
-              (__mmask16) -1);
+  return (__m512i)(~(__v16su)(__A) & (__v16su)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_andnot_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+_mm512_mask_andnot_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
 {
-  return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A,
-              (__v16si) __B,
-              (__v16si) __W,
-              (__mmask16) __U);
+  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+                                         (__v16si)_mm512_andnot_epi32(__A, __B),
+                                         (__v16si)__W);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_andnot_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
+_mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B)
 {
-  return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A,
-              (__v16si) __B,
-              (__v16si)
-              _mm512_setzero_si512 (),
-              (__mmask16) __U);
+  return (__m512i)_mm512_mask_andnot_epi32(_mm512_setzero_si512(),
+                                           __U, __A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_andnot_epi64 (__m512i __A, __m512i __B)
+_mm512_andnot_epi64(__m512i __A, __m512i __B)
 {
-  return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A,
-              (__v8di) __B,
-              (__v8di)
-              _mm512_setzero_si512 (),
-              (__mmask8) -1);
+  return (__m512i)(~(__v8du)(__A) & (__v8du)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_andnot_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+_mm512_mask_andnot_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
 {
-  return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A,
-              (__v8di) __B,
-              (__v8di) __W, __U);
+  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+                                          (__v8di)_mm512_andnot_epi64(__A, __B),
+                                          (__v8di)__W);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_andnot_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+_mm512_maskz_andnot_epi64(__mmask8 __U, __m512i __A, __m512i __B)
 {
-  return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A,
-              (__v8di) __B,
-              (__v8di)
-              _mm512_setzero_pd (),
-              __U);
+  return (__m512i)_mm512_mask_andnot_epi64(_mm512_setzero_si512(),
+                                           __U, __A, __B);
 }
+
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_or_epi32(__m512i __a, __m512i __b)
 {
-  return __a | __b;
+  return (__m512i)((__v16su)__a | (__v16su)__b);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_mask_or_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
 {
-  return (__m512i) __builtin_ia32_pord512_mask((__v16si) __a,
-              (__v16si) __b,
-              (__v16si) __src,
-              (__mmask16) __k);
+  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
+                                             (__v16si)_mm512_or_epi32(__a, __b),
+                                             (__v16si)__src);
 }
+
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b)
 {
-  return (__m512i) __builtin_ia32_pord512_mask((__v16si) __a,
-              (__v16si) __b,
-              (__v16si)
-              _mm512_setzero_si512 (),
-              (__mmask16) __k);
+  return (__m512i)_mm512_mask_or_epi32(_mm512_setzero_si512(), __k, __a, __b);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_or_epi64(__m512i __a, __m512i __b)
 {
-  return __a | __b;
+  return (__m512i)((__v8du)__a | (__v8du)__b);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_mask_or_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
 {
-  return (__m512i) __builtin_ia32_porq512_mask ((__v8di) __a,
-              (__v8di) __b,
-              (__v8di) __src,
-              (__mmask8) __k);
+  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k,
+                                             (__v8di)_mm512_or_epi64(__a, __b),
+                                             (__v8di)__src);
 }
+
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b)
 {
-  return (__m512i) __builtin_ia32_porq512_mask ((__v8di) __a,
-              (__v8di) __b,
-              (__v8di)
-              _mm512_setzero_si512 (),
-              (__mmask8) __k);
+  return (__m512i)_mm512_mask_or_epi64(_mm512_setzero_si512(), __k, __a, __b);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_xor_epi32(__m512i __a, __m512i __b)
 {
-  return __a ^ __b;
+  return (__m512i)((__v16su)__a ^ (__v16su)__b);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_mask_xor_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
 {
-  return (__m512i) __builtin_ia32_pxord512_mask((__v16si) __a,
-              (__v16si) __b,
-              (__v16si) __src,
-              (__mmask16) __k);
+  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
+                                            (__v16si)_mm512_xor_epi32(__a, __b),
+                                            (__v16si)__src);
 }
+
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b)
 {
-  return (__m512i) __builtin_ia32_pxord512_mask((__v16si) __a,
-              (__v16si) __b,
-              (__v16si)
-              _mm512_setzero_si512 (),
-              (__mmask16) __k);
+  return (__m512i)_mm512_mask_xor_epi32(_mm512_setzero_si512(), __k, __a, __b);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_xor_epi64(__m512i __a, __m512i __b)
 {
-  return __a ^ __b;
+  return (__m512i)((__v8du)__a ^ (__v8du)__b);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_mask_xor_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
 {
-  return (__m512i) __builtin_ia32_pxorq512_mask ((__v8di) __a,
-              (__v8di) __b,
-              (__v8di) __src,
-              (__mmask8) __k);
+  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k,
+                                             (__v8di)_mm512_xor_epi64(__a, __b),
+                                             (__v8di)__src);
 }
+
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b)
 {
-  return (__m512i) __builtin_ia32_pxorq512_mask ((__v8di) __a,
-              (__v8di) __b,
-              (__v8di)
-              _mm512_setzero_si512 (),
-              (__mmask8) __k);
+  return (__m512i)_mm512_mask_xor_epi64(_mm512_setzero_si512(), __k, __a, __b);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_and_si512(__m512i __a, __m512i __b)
 {
-  return __a & __b;
+  return (__m512i)((__v8du)__a & (__v8du)__b);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_or_si512(__m512i __a, __m512i __b)
 {
-  return __a | __b;
+  return (__m512i)((__v8du)__a | (__v8du)__b);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_xor_si512(__m512i __a, __m512i __b)
 {
-  return __a ^ __b;
+  return (__m512i)((__v8du)__a ^ (__v8du)__b);
 }
+
 /* Arithmetic */
 
 static __inline __m512d __DEFAULT_FN_ATTRS
 _mm512_add_pd(__m512d __a, __m512d __b)
 {
-  return __a + __b;
+  return (__m512d)((__v8df)__a + (__v8df)__b);
 }
 
 static __inline __m512 __DEFAULT_FN_ATTRS
 _mm512_add_ps(__m512 __a, __m512 __b)
 {
-  return __a + __b;
+  return (__m512)((__v16sf)__a + (__v16sf)__b);
 }
 
 static __inline __m512d __DEFAULT_FN_ATTRS
 _mm512_mul_pd(__m512d __a, __m512d __b)
 {
-  return __a * __b;
+  return (__m512d)((__v8df)__a * (__v8df)__b);
 }
 
 static __inline __m512 __DEFAULT_FN_ATTRS
 _mm512_mul_ps(__m512 __a, __m512 __b)
 {
-  return __a * __b;
+  return (__m512)((__v16sf)__a * (__v16sf)__b);
 }
 
 static __inline __m512d __DEFAULT_FN_ATTRS
 _mm512_sub_pd(__m512d __a, __m512d __b)
 {
-  return __a - __b;
+  return (__m512d)((__v8df)__a - (__v8df)__b);
 }
 
 static __inline __m512 __DEFAULT_FN_ATTRS
 _mm512_sub_ps(__m512 __a, __m512 __b)
 {
-  return __a - __b;
+  return (__m512)((__v16sf)__a - (__v16sf)__b);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_add_epi64 (__m512i __A, __m512i __B)
 {
-  return (__m512i) ((__v8di) __A + (__v8di) __B);
+  return (__m512i) ((__v8du) __A + (__v8du) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -473,7 +758,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_sub_epi64 (__m512i __A, __m512i __B)
 {
-  return (__m512i) ((__v8di) __A - (__v8di) __B);
+  return (__m512i) ((__v8du) __A - (__v8du) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -498,7 +783,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_add_epi32 (__m512i __A, __m512i __B)
 {
-  return (__m512i) ((__v16si) __A + (__v16si) __B);
+  return (__m512i) ((__v16su) __A + (__v16su) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -523,7 +808,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_sub_epi32 (__m512i __A, __m512i __B)
 {
-  return (__m512i) ((__v16si) __A - (__v16si) __B);
+  return (__m512i) ((__v16su) __A - (__v16su) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -545,6 +830,24 @@
              (__mmask16) __U);
 }
 
+#define _mm512_mask_max_round_pd(W, U, A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_maxpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)(__m512d)(W), (__mmask8)(U), \
+                                        (int)(R)); })
+
+#define _mm512_maskz_max_round_pd(U, A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_maxpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)_mm512_setzero_pd(), \
+                                        (__mmask8)(U), (int)(R)); })
+
+#define _mm512_max_round_pd(A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_maxpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)_mm512_undefined_pd(), \
+                                        (__mmask8)-1, (int)(R)); })
+
 static  __inline__ __m512d __DEFAULT_FN_ATTRS
 _mm512_max_pd(__m512d __A, __m512d __B)
 {
@@ -556,6 +859,45 @@
              _MM_FROUND_CUR_DIRECTION);
 }
 
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_max_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A,
+                  (__v8df) __B,
+                  (__v8df) __W,
+                  (__mmask8) __U,
+                  _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_max_pd (__mmask8 __U, __m512d __A, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A,
+                  (__v8df) __B,
+                  (__v8df)
+                  _mm512_setzero_pd (),
+                  (__mmask8) __U,
+                  _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_max_round_ps(W, U, A, B, R) __extension__ ({ \
+  (__m512)__builtin_ia32_maxps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)(__m512)(W), (__mmask16)(U), \
+                                       (int)(R)); })
+
+#define _mm512_maskz_max_round_ps(U, A, B, R) __extension__ ({ \
+  (__m512)__builtin_ia32_maxps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)_mm512_setzero_ps(), \
+                                       (__mmask16)(U), (int)(R)); })
+
+#define _mm512_max_round_ps(A, B, R) __extension__ ({ \
+  (__m512)__builtin_ia32_maxps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)_mm512_undefined_ps(), \
+                                       (__mmask16)-1, (int)(R)); })
+
 static  __inline__ __m512 __DEFAULT_FN_ATTRS
 _mm512_max_ps(__m512 __A, __m512 __B)
 {
@@ -567,9 +909,30 @@
             _MM_FROUND_CUR_DIRECTION);
 }
 
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_max_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A,
+                 (__v16sf) __B,
+                 (__v16sf) __W,
+                 (__mmask16) __U,
+                 _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_max_ps (__mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A,
+                 (__v16sf) __B,
+                 (__v16sf)
+                 _mm512_setzero_ps (),
+                 (__mmask16) __U,
+                 _MM_FROUND_CUR_DIRECTION);
+}
+
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_mask_max_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_maxss_round ((__v4sf) __A,
+  return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A,
                 (__v4sf) __B,
                 (__v4sf) __W,
                 (__mmask8) __U,
@@ -578,28 +941,34 @@
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_maxss_round ((__v4sf) __A,
+  return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A,
                 (__v4sf) __B,
                 (__v4sf)  _mm_setzero_ps (),
                 (__mmask8) __U,
                 _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm_max_round_ss(__A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+#define _mm_max_round_ss(A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)-1, (int)(R)); })
 
-#define _mm_mask_max_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf)  __W, (__mmask8) __U,__R); })
+#define _mm_mask_max_round_ss(W, U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                          (int)(R)); })
 
-#define _mm_maskz_max_round_ss(__U, __A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf)  _mm_setzero_ps(), (__mmask8) __U,__R); })
+#define _mm_maskz_max_round_ss(U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A,
+  return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A,
                 (__v2df) __B,
                 (__v2df) __W,
                 (__mmask8) __U,
@@ -608,24 +977,30 @@
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A,
+  return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A,
                 (__v2df) __B,
                 (__v2df)  _mm_setzero_pd (),
                 (__mmask8) __U,
                 _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm_max_round_sd(__A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+#define _mm_max_round_sd(A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)-1, (int)(R)); })
 
-#define _mm_mask_max_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df)  __W, (__mmask8) __U,__R); })
+#define _mm_mask_max_round_sd(W, U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)(__m128d)(W), \
+                                           (__mmask8)(U), (int)(R)); })
 
-#define _mm_maskz_max_round_sd(__U, __A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df)  _mm_setzero_pd(), (__mmask8) __U,__R); })
+#define _mm_maskz_max_round_sd(U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)(U), (int)(R)); })
 
 static __inline __m512i
 __DEFAULT_FN_ATTRS
@@ -638,6 +1013,24 @@
               (__mmask16) -1);
 }
 
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A,
+                   (__v16si) __B,
+                   (__v16si) __W, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A,
+                   (__v16si) __B,
+                   (__v16si)
+                   _mm512_setzero_si512 (),
+                   __M);
+}
+
 static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_max_epu32(__m512i __A, __m512i __B)
 {
@@ -648,6 +1041,24 @@
               (__mmask16) -1);
 }
 
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A,
+                   (__v16si) __B,
+                   (__v16si) __W, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A,
+                   (__v16si) __B,
+                   (__v16si)
+                   _mm512_setzero_si512 (),
+                   __M);
+}
+
 static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_max_epi64(__m512i __A, __m512i __B)
 {
@@ -658,6 +1069,24 @@
               (__mmask8) -1);
 }
 
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A,
+                   (__v8di) __B,
+                   (__v8di) __W, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A,
+                   (__v8di) __B,
+                   (__v8di)
+                   _mm512_setzero_si512 (),
+                   __M);
+}
+
 static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_max_epu64(__m512i __A, __m512i __B)
 {
@@ -668,6 +1097,42 @@
               (__mmask8) -1);
 }
 
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A,
+                   (__v8di) __B,
+                   (__v8di) __W, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A,
+                   (__v8di) __B,
+                   (__v8di)
+                   _mm512_setzero_si512 (),
+                   __M);
+}
+
+#define _mm512_mask_min_round_pd(W, U, A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_minpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)(__m512d)(W), (__mmask8)(U), \
+                                        (int)(R)); })
+
+#define _mm512_maskz_min_round_pd(U, A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_minpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)_mm512_setzero_pd(), \
+                                        (__mmask8)(U), (int)(R)); })
+
+#define _mm512_min_round_pd(A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_minpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)_mm512_undefined_pd(), \
+                                        (__mmask8)-1, (int)(R)); })
+
 static  __inline__ __m512d __DEFAULT_FN_ATTRS
 _mm512_min_pd(__m512d __A, __m512d __B)
 {
@@ -679,6 +1144,45 @@
              _MM_FROUND_CUR_DIRECTION);
 }
 
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_min_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A,
+                  (__v8df) __B,
+                  (__v8df) __W,
+                  (__mmask8) __U,
+                  _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_min_round_ps(W, U, A, B, R) __extension__ ({ \
+  (__m512)__builtin_ia32_minps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)(__m512)(W), (__mmask16)(U), \
+                                       (int)(R)); })
+
+#define _mm512_maskz_min_round_ps(U, A, B, R) __extension__ ({ \
+  (__m512)__builtin_ia32_minps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)_mm512_setzero_ps(), \
+                                       (__mmask16)(U), (int)(R)); })
+
+#define _mm512_min_round_ps(A, B, R) __extension__ ({ \
+  (__m512)__builtin_ia32_minps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)_mm512_undefined_ps(), \
+                                       (__mmask16)-1, (int)(R)); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_min_pd (__mmask8 __U, __m512d __A, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A,
+                  (__v8df) __B,
+                  (__v8df)
+                  _mm512_setzero_pd (),
+                  (__mmask8) __U,
+                  _MM_FROUND_CUR_DIRECTION);
+}
+
 static  __inline__ __m512 __DEFAULT_FN_ATTRS
 _mm512_min_ps(__m512 __A, __m512 __B)
 {
@@ -690,9 +1194,30 @@
             _MM_FROUND_CUR_DIRECTION);
 }
 
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_min_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A,
+                 (__v16sf) __B,
+                 (__v16sf) __W,
+                 (__mmask16) __U,
+                 _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_min_ps (__mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A,
+                 (__v16sf) __B,
+                 (__v16sf)
+                 _mm512_setzero_ps (),
+                 (__mmask16) __U,
+                 _MM_FROUND_CUR_DIRECTION);
+}
+
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_mask_min_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_minss_round ((__v4sf) __A,
+  return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A,
                 (__v4sf) __B,
                 (__v4sf) __W,
                 (__mmask8) __U,
@@ -701,28 +1226,34 @@
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_minss_round ((__v4sf) __A,
+  return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A,
                 (__v4sf) __B,
                 (__v4sf)  _mm_setzero_ps (),
                 (__mmask8) __U,
                 _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm_min_round_ss(__A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_minss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+#define _mm_min_round_ss(A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)-1, (int)(R)); })
 
-#define _mm_mask_min_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_minss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf)  __W, (__mmask8) __U,__R); })
+#define _mm_mask_min_round_ss(W, U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                          (int)(R)); })
 
-#define _mm_maskz_min_round_ss(__U, __A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_minss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf)  _mm_setzero_ps(), (__mmask8) __U,__R); })
+#define _mm_maskz_min_round_ss(U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_minsd_round ((__v2df) __A,
+  return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A,
                 (__v2df) __B,
                 (__v2df) __W,
                 (__mmask8) __U,
@@ -731,24 +1262,30 @@
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_minsd_round ((__v2df) __A,
+  return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A,
                 (__v2df) __B,
                 (__v2df)  _mm_setzero_pd (),
                 (__mmask8) __U,
                 _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm_min_round_sd(__A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_minsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+#define _mm_min_round_sd(A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)-1, (int)(R)); })
 
-#define _mm_mask_min_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_minsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df)  __W, (__mmask8) __U,__R); })
+#define _mm_mask_min_round_sd(W, U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)(__m128d)(W), \
+                                           (__mmask8)(U), (int)(R)); })
 
-#define _mm_maskz_min_round_sd(__U, __A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_minsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df)  _mm_setzero_pd(), (__mmask8) __U,__R); })
+#define _mm_maskz_min_round_sd(U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)(U), (int)(R)); })
 
 static __inline __m512i
 __DEFAULT_FN_ATTRS
@@ -761,6 +1298,24 @@
               (__mmask16) -1);
 }
 
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A,
+                   (__v16si) __B,
+                   (__v16si) __W, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A,
+                   (__v16si) __B,
+                   (__v16si)
+                   _mm512_setzero_si512 (),
+                   __M);
+}
+
 static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_min_epu32(__m512i __A, __m512i __B)
 {
@@ -771,6 +1326,24 @@
               (__mmask16) -1);
 }
 
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A,
+                   (__v16si) __B,
+                   (__v16si) __W, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A,
+                   (__v16si) __B,
+                   (__v16si)
+                   _mm512_setzero_si512 (),
+                   __M);
+}
+
 static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_min_epi64(__m512i __A, __m512i __B)
 {
@@ -781,6 +1354,24 @@
               (__mmask8) -1);
 }
 
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A,
+                   (__v8di) __B,
+                   (__v8di) __W, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A,
+                   (__v8di) __B,
+                   (__v8di)
+                   _mm512_setzero_si512 (),
+                   __M);
+}
+
 static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_min_epu64(__m512i __A, __m512i __B)
 {
@@ -791,6 +1382,24 @@
               (__mmask8) -1);
 }
 
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A,
+                   (__v8di) __B,
+                   (__v8di) __W, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A,
+                   (__v8di) __B,
+                   (__v8di)
+                   _mm512_setzero_si512 (),
+                   __M);
+}
+
 static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_mul_epi32(__m512i __X, __m512i __Y)
 {
@@ -850,7 +1459,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_mullo_epi32 (__m512i __A, __m512i __B)
 {
-  return (__m512i) ((__v16si) __A * (__v16si) __B);
+  return (__m512i) ((__v16su) __A * (__v16su) __B);
 }
 
 static __inline __m512i __DEFAULT_FN_ATTRS
@@ -871,6 +1480,21 @@
               (__v16si) __W, __M);
 }
 
+#define _mm512_mask_sqrt_round_pd(W, U, A, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)(__m512d)(A), \
+                                         (__v8df)(__m512d)(W), (__mmask8)(U), \
+                                         (int)(R)); })
+
+#define _mm512_maskz_sqrt_round_pd(U, A, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)(__m512d)(A), \
+                                         (__v8df)_mm512_setzero_pd(), \
+                                         (__mmask8)(U), (int)(R)); })
+
+#define _mm512_sqrt_round_pd(A, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)(__m512d)(A), \
+                                         (__v8df)_mm512_undefined_pd(), \
+                                         (__mmask8)-1, (int)(R)); })
+
 static  __inline__ __m512d __DEFAULT_FN_ATTRS
 _mm512_sqrt_pd(__m512d __a)
 {
@@ -880,6 +1504,40 @@
                                                 _MM_FROUND_CUR_DIRECTION);
 }
 
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+  return (__m512d) __builtin_ia32_sqrtpd512_mask ((__v8df) __A,
+                   (__v8df) __W,
+                   (__mmask8) __U,
+                   _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A)
+{
+  return (__m512d) __builtin_ia32_sqrtpd512_mask ((__v8df) __A,
+                   (__v8df)
+                   _mm512_setzero_pd (),
+                   (__mmask8) __U,
+                   _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_sqrt_round_ps(W, U, A, R) __extension__ ({ \
+  (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)(__m512)(A), \
+                                        (__v16sf)(__m512)(W), (__mmask16)(U), \
+                                        (int)(R)); })
+
+#define _mm512_maskz_sqrt_round_ps(U, A, R) __extension__ ({ \
+  (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)(__m512)(A), \
+                                        (__v16sf)_mm512_setzero_ps(), \
+                                        (__mmask16)(U), (int)(R)); })
+
+#define _mm512_sqrt_round_ps(A, R) __extension__ ({ \
+  (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)(__m512)(A), \
+                                        (__v16sf)_mm512_undefined_ps(), \
+                                        (__mmask16)-1, (int)(R)); })
+
 static  __inline__ __m512 __DEFAULT_FN_ATTRS
 _mm512_sqrt_ps(__m512 __a)
 {
@@ -889,6 +1547,24 @@
                                                _MM_FROUND_CUR_DIRECTION);
 }
 
+static  __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A)
+{
+  return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__A,
+                                               (__v16sf) __W,
+                                               (__mmask16) __U,
+                                               _MM_FROUND_CUR_DIRECTION);
+}
+
+static  __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A)
+{
+  return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__A,
+                                               (__v16sf) _mm512_setzero_ps (),
+                                               (__mmask16) __U,
+                                               _MM_FROUND_CUR_DIRECTION);
+}
+
 static  __inline__ __m512d __DEFAULT_FN_ATTRS
 _mm512_rsqrt14_pd(__m512d __A)
 {
@@ -897,6 +1573,23 @@
                  _mm512_setzero_pd (),
                  (__mmask8) -1);}
 
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_rsqrt14_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+  return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
+                  (__v8df) __W,
+                  (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_rsqrt14_pd (__mmask8 __U, __m512d __A)
+{
+  return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
+                  (__v8df)
+                  _mm512_setzero_pd (),
+                  (__mmask8) __U);
+}
+
 static  __inline__ __m512 __DEFAULT_FN_ATTRS
 _mm512_rsqrt14_ps(__m512 __A)
 {
@@ -906,26 +1599,79 @@
                 (__mmask16) -1);
 }
 
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_rsqrt14_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+  return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
+                 (__v16sf) __W,
+                 (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_rsqrt14_ps (__mmask16 __U, __m512 __A)
+{
+  return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
+                 (__v16sf)
+                 _mm512_setzero_ps (),
+                 (__mmask16) __U);
+}
+
 static  __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_rsqrt14_ss(__m128 __A, __m128 __B)
 {
-  return (__m128) __builtin_ia32_rsqrt14ss ((__v4sf) __A,
+  return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
              (__v4sf) __B,
              (__v4sf)
              _mm_setzero_ps (),
              (__mmask8) -1);
 }
 
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_rsqrt14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
+          (__v4sf) __B,
+          (__v4sf) __W,
+          (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_rsqrt14_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
+          (__v4sf) __B,
+          (__v4sf) _mm_setzero_ps (),
+          (__mmask8) __U);
+}
+
 static  __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_rsqrt14_sd(__m128d __A, __m128d __B)
 {
-  return (__m128d) __builtin_ia32_rsqrt14sd ((__v2df) __A,
+  return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __A,
               (__v2df) __B,
               (__v2df)
               _mm_setzero_pd (),
               (__mmask8) -1);
 }
 
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_rsqrt14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A,
+          (__v2df) __B,
+          (__v2df) __W,
+          (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_rsqrt14_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A,
+          (__v2df) __B,
+          (__v2df) _mm_setzero_pd (),
+          (__mmask8) __U);
+}
+
 static  __inline__ __m512d __DEFAULT_FN_ATTRS
 _mm512_rcp14_pd(__m512d __A)
 {
@@ -935,6 +1681,23 @@
                (__mmask8) -1);
 }
 
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_rcp14_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+  return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
+                (__v8df) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_rcp14_pd (__mmask8 __U, __m512d __A)
+{
+  return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
+                (__v8df)
+                _mm512_setzero_pd (),
+                (__mmask8) __U);
+}
+
 static  __inline__ __m512 __DEFAULT_FN_ATTRS
 _mm512_rcp14_ps(__m512 __A)
 {
@@ -943,26 +1706,80 @@
               _mm512_setzero_ps (),
               (__mmask16) -1);
 }
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_rcp14_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+  return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
+                   (__v16sf) __W,
+                   (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_rcp14_ps (__mmask16 __U, __m512 __A)
+{
+  return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
+                   (__v16sf)
+                   _mm512_setzero_ps (),
+                   (__mmask16) __U);
+}
+
 static  __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_rcp14_ss(__m128 __A, __m128 __B)
 {
-  return (__m128) __builtin_ia32_rcp14ss ((__v4sf) __A,
+  return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
                  (__v4sf) __B,
                  (__v4sf)
                  _mm_setzero_ps (),
                  (__mmask8) -1);
 }
 
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_rcp14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
+          (__v4sf) __B,
+          (__v4sf) __W,
+          (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_rcp14_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
+          (__v4sf) __B,
+          (__v4sf) _mm_setzero_ps (),
+          (__mmask8) __U);
+}
+
 static  __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_rcp14_sd(__m128d __A, __m128d __B)
 {
-  return (__m128d) __builtin_ia32_rcp14sd ((__v2df) __A,
+  return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __A,
             (__v2df) __B,
             (__v2df)
             _mm_setzero_pd (),
             (__mmask8) -1);
 }
 
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_rcp14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A,
+          (__v2df) __B,
+          (__v2df) __W,
+          (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_rcp14_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A,
+          (__v2df) __B,
+          (__v2df) _mm_setzero_pd (),
+          (__mmask8) __U);
+}
+
 static __inline __m512 __DEFAULT_FN_ATTRS
 _mm512_floor_ps(__m512 __A)
 {
@@ -972,6 +1789,15 @@
                                                   _MM_FROUND_CUR_DIRECTION);
 }
 
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_floor_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
+                   _MM_FROUND_FLOOR,
+                   (__v16sf) __W, __U,
+                   _MM_FROUND_CUR_DIRECTION);
+}
+
 static __inline __m512d __DEFAULT_FN_ATTRS
 _mm512_floor_pd(__m512d __A)
 {
@@ -981,6 +1807,24 @@
                                                    _MM_FROUND_CUR_DIRECTION);
 }
 
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_floor_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
+                _MM_FROUND_FLOOR,
+                (__v8df) __W, __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_ceil_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
+                   _MM_FROUND_CEIL,
+                   (__v16sf) __W, __U,
+                   _MM_FROUND_CUR_DIRECTION);
+}
+
 static __inline __m512 __DEFAULT_FN_ATTRS
 _mm512_ceil_ps(__m512 __A)
 {
@@ -999,6 +1843,15 @@
                                                    _MM_FROUND_CUR_DIRECTION);
 }
 
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
+                _MM_FROUND_CEIL,
+                (__v8df) __W, __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
 static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_abs_epi64(__m512i __A)
 {
@@ -1008,6 +1861,23 @@
              (__mmask8) -1);
 }
 
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_abs_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A,
+                  (__v8di) __W,
+                  (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A,
+                  (__v8di)
+                  _mm512_setzero_si512 (),
+                  (__mmask8) __U);
+}
+
 static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_abs_epi32(__m512i __A)
 {
@@ -1017,9 +1887,26 @@
              (__mmask16) -1);
 }
 
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A,
+                  (__v16si) __W,
+                  (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A,
+                  (__v16si)
+                  _mm512_setzero_si512 (),
+                  (__mmask16) __U);
+}
+
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_mask_add_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_addss_round ((__v4sf) __A,
+  return (__m128) __builtin_ia32_addss_round_mask ((__v4sf) __A,
                 (__v4sf) __B,
                 (__v4sf) __W,
                 (__mmask8) __U,
@@ -1028,28 +1915,34 @@
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_addss_round ((__v4sf) __A,
+  return (__m128) __builtin_ia32_addss_round_mask ((__v4sf) __A,
                 (__v4sf) __B,
                 (__v4sf)  _mm_setzero_ps (),
                 (__mmask8) __U,
                 _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm_add_round_ss(__A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_addss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+#define _mm_add_round_ss(A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)-1, (int)(R)); })
 
-#define _mm_mask_add_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_addss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf)  __W, (__mmask8) __U,__R); })
+#define _mm_mask_add_round_ss(W, U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                          (int)(R)); })
 
-#define _mm_maskz_add_round_ss(__U, __A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_addss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf)  _mm_setzero_ps(), (__mmask8) __U,__R); })
+#define _mm_maskz_add_round_ss(U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_addsd_round ((__v2df) __A,
+  return (__m128d) __builtin_ia32_addsd_round_mask ((__v2df) __A,
                 (__v2df) __B,
                 (__v2df) __W,
                 (__mmask8) __U,
@@ -1058,23 +1951,29 @@
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_addsd_round ((__v2df) __A,
+  return (__m128d) __builtin_ia32_addsd_round_mask ((__v2df) __A,
                 (__v2df) __B,
                 (__v2df)  _mm_setzero_pd (),
                 (__mmask8) __U,
                 _MM_FROUND_CUR_DIRECTION);
 }
-#define _mm_add_round_sd(__A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_addsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+#define _mm_add_round_sd(A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)-1, (int)(R)); })
 
-#define _mm_mask_add_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_addsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df)  __W, (__mmask8) __U,__R); })
+#define _mm_mask_add_round_sd(W, U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)(__m128d)(W), \
+                                           (__mmask8)(U), (int)(R)); })
 
-#define _mm_maskz_add_round_sd(__U, __A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_addsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df)  _mm_setzero_pd(), (__mmask8) __U,__R); })
+#define _mm_maskz_add_round_sd(U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
 _mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
@@ -1112,33 +2011,45 @@
             _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_add_round_pd(__A, __B, __R) __extension__ ({ \
-  (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A, (__v8df) __B, \
-               (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R); })
+#define _mm512_add_round_pd(A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)_mm512_setzero_pd(), \
+                                        (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_add_round_pd(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m512d) __builtin_ia32_addpd512_mask((__v8df) __A, (__v8df) __B, \
-                (__v8df) __W, (__mmask8) __U, __R); })
+#define _mm512_mask_add_round_pd(W, U, A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)(__m512d)(W), (__mmask8)(U), \
+                                        (int)(R)); })
 
-#define _mm512_maskz_add_round_pd(__U, __A, __B, __R) __extension__ ({ \
-  (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A, (__v8df) __B, \
-                (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R); })
+#define _mm512_maskz_add_round_pd(U, A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)_mm512_setzero_pd(), \
+                                        (__mmask8)(U), (int)(R)); })
 
-#define _mm512_add_round_ps(__A, __B, __R) __extension__ ({ \
-  (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, (__v16sf) __B, \
-                (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, __R); })
+#define _mm512_add_round_ps(A, B, R) __extension__ ({ \
+  (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)_mm512_setzero_ps(), \
+                                       (__mmask16)-1, (int)(R)); })
 
-#define _mm512_mask_add_round_ps(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, (__v16sf) __B, \
-                (__v16sf) __W, (__mmask16)__U, __R); })
+#define _mm512_mask_add_round_ps(W, U, A, B, R) __extension__ ({ \
+  (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)(__m512)(W), (__mmask16)(U), \
+                                       (int)(R)); })
 
-#define _mm512_maskz_add_round_ps(__U, __A, __B, __R) __extension__ ({ \
-  (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, (__v16sf) __B, \
-                (__v16sf) _mm512_setzero_ps(), (__mmask16)__U, __R); })
+#define _mm512_maskz_add_round_ps(U, A, B, R) __extension__ ({ \
+  (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)_mm512_setzero_ps(), \
+                                       (__mmask16)(U), (int)(R)); })
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_subss_round ((__v4sf) __A,
+  return (__m128) __builtin_ia32_subss_round_mask ((__v4sf) __A,
                 (__v4sf) __B,
                 (__v4sf) __W,
                 (__mmask8) __U,
@@ -1147,27 +2058,33 @@
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_subss_round ((__v4sf) __A,
+  return (__m128) __builtin_ia32_subss_round_mask ((__v4sf) __A,
                 (__v4sf) __B,
                 (__v4sf)  _mm_setzero_ps (),
                 (__mmask8) __U,
                 _MM_FROUND_CUR_DIRECTION);
 }
-#define _mm_sub_round_ss(__A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_subss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+#define _mm_sub_round_ss(A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)-1, (int)(R)); })
 
-#define _mm_mask_sub_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_subss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf)  __W, (__mmask8) __U,__R); })
+#define _mm_mask_sub_round_ss(W, U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                          (int)(R)); })
 
-#define _mm_maskz_sub_round_ss(__U, __A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_subss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf)  _mm_setzero_ps(), (__mmask8) __U,__R); })
+#define _mm_maskz_sub_round_ss(U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_subsd_round ((__v2df) __A,
+  return (__m128d) __builtin_ia32_subsd_round_mask ((__v2df) __A,
                 (__v2df) __B,
                 (__v2df) __W,
                 (__mmask8) __U,
@@ -1176,24 +2093,30 @@
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_subsd_round ((__v2df) __A,
+  return (__m128d) __builtin_ia32_subsd_round_mask ((__v2df) __A,
                 (__v2df) __B,
                 (__v2df)  _mm_setzero_pd (),
                 (__mmask8) __U,
                 _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm_sub_round_sd(__A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_subsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+#define _mm_sub_round_sd(A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)-1, (int)(R)); })
 
-#define _mm_mask_sub_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_subsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df)  __W, (__mmask8) __U,__R); })
+#define _mm_mask_sub_round_sd(W, U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)(__m128d)(W), \
+                                           (__mmask8)(U), (int)(R)); })
 
-#define _mm_maskz_sub_round_sd(__U, __A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_subsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df)  _mm_setzero_pd(), (__mmask8) __U,__R); })
+#define _mm_maskz_sub_round_sd(U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
 _mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
@@ -1233,33 +2156,45 @@
             _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_sub_round_pd(__A, __B, __R) __extension__ ({ \
-  (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, (__v8df) __B,\
-             (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R); })
+#define _mm512_sub_round_pd(A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)_mm512_setzero_pd(), \
+                                        (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_sub_round_pd(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, (__v8df) __B, \
-             (__v8df) __W, (__mmask8) __U, __R); })
+#define _mm512_mask_sub_round_pd(W, U, A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)(__m512d)(W), (__mmask8)(U), \
+                                        (int)(R)); })
 
-#define _mm512_maskz_sub_round_pd(__U, __A, __B, __R) __extension__ ({ \
-   (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, (__v8df) __B, \
-             (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+#define _mm512_maskz_sub_round_pd(U, A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)_mm512_setzero_pd(), \
+                                        (__mmask8)(U), (int)(R)); })
 
-#define _mm512_sub_round_ps(__A, __B, __R) __extension__ ({ \
-  (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, (__v16sf) __B, \
-            (__v16sf) _mm512_setzero_ps (), (__mmask16) -1, __R);})
+#define _mm512_sub_round_ps(A, B, R) __extension__ ({ \
+  (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)_mm512_setzero_ps(), \
+                                       (__mmask16)-1, (int)(R)); })
 
-#define _mm512_mask_sub_round_ps(__W, __U, __A, __B, __R)  __extension__ ({ \
-  (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, (__v16sf) __B, \
-            (__v16sf) __W, (__mmask16) __U, __R); });
+#define _mm512_mask_sub_round_ps(W, U, A, B, R)  __extension__ ({ \
+  (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)(__m512)(W), (__mmask16)(U), \
+                                       (int)(R)); });
 
-#define _mm512_maskz_sub_round_ps(__U, __A, __B, __R)  __extension__ ({ \
-  (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, (__v16sf) __B, \
-            (__v16sf) _mm512_setzero_ps (), (__mmask16) __U, __R);});
+#define _mm512_maskz_sub_round_ps(U, A, B, R)  __extension__ ({ \
+  (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)_mm512_setzero_ps(), \
+                                       (__mmask16)(U), (int)(R)); });
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_mulss_round ((__v4sf) __A,
+  return (__m128) __builtin_ia32_mulss_round_mask ((__v4sf) __A,
                 (__v4sf) __B,
                 (__v4sf) __W,
                 (__mmask8) __U,
@@ -1268,27 +2203,33 @@
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_mulss_round ((__v4sf) __A,
+  return (__m128) __builtin_ia32_mulss_round_mask ((__v4sf) __A,
                 (__v4sf) __B,
                 (__v4sf)  _mm_setzero_ps (),
                 (__mmask8) __U,
                 _MM_FROUND_CUR_DIRECTION);
 }
-#define _mm_mul_round_ss(__A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_mulss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+#define _mm_mul_round_ss(A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)-1, (int)(R)); })
 
-#define _mm_mask_mul_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_mulss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf)  __W, (__mmask8) __U,__R); })
+#define _mm_mask_mul_round_ss(W, U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                          (int)(R)); })
 
-#define _mm_maskz_mul_round_ss(__U, __A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_mulss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf)  _mm_setzero_ps(), (__mmask8) __U,__R); })
+#define _mm_maskz_mul_round_ss(U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A,
+  return (__m128d) __builtin_ia32_mulsd_round_mask ((__v2df) __A,
                 (__v2df) __B,
                 (__v2df) __W,
                 (__mmask8) __U,
@@ -1297,24 +2238,30 @@
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A,
+  return (__m128d) __builtin_ia32_mulsd_round_mask ((__v2df) __A,
                 (__v2df) __B,
                 (__v2df)  _mm_setzero_pd (),
                 (__mmask8) __U,
                 _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm_mul_round_sd(__A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+#define _mm_mul_round_sd(A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)-1, (int)(R)); })
 
-#define _mm_mask_mul_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df)  __W, (__mmask8) __U,__R); })
+#define _mm_mask_mul_round_sd(W, U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)(__m128d)(W), \
+                                           (__mmask8)(U), (int)(R)); })
 
-#define _mm_maskz_mul_round_sd(__U, __A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df)  _mm_setzero_pd(), (__mmask8) __U,__R); })
+#define _mm_maskz_mul_round_sd(U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
 _mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
@@ -1354,33 +2301,45 @@
             _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_mul_round_pd(__A, __B, __R) __extension__ ({ \
-  (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, (__v8df) __B,\
-             (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R); })
+#define _mm512_mul_round_pd(A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)_mm512_setzero_pd(), \
+                                        (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_mul_round_pd(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, (__v8df) __B, \
-             (__v8df) __W, (__mmask8) __U, __R); })
+#define _mm512_mask_mul_round_pd(W, U, A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)(__m512d)(W), (__mmask8)(U), \
+                                        (int)(R)); })
 
-#define _mm512_maskz_mul_round_pd(__U, __A, __B, __R) __extension__ ({ \
-   (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, (__v8df) __B, \
-             (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+#define _mm512_maskz_mul_round_pd(U, A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)_mm512_setzero_pd(), \
+                                        (__mmask8)(U), (int)(R)); })
 
-#define _mm512_mul_round_ps(__A, __B, __R) __extension__ ({ \
-  (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, (__v16sf) __B, \
-            (__v16sf) _mm512_setzero_ps (), (__mmask16) -1, __R);})
+#define _mm512_mul_round_ps(A, B, R) __extension__ ({ \
+  (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)_mm512_setzero_ps(), \
+                                       (__mmask16)-1, (int)(R)); })
 
-#define _mm512_mask_mul_round_ps(__W, __U, __A, __B, __R)  __extension__ ({ \
-  (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, (__v16sf) __B, \
-            (__v16sf) __W, (__mmask16) __U, __R); });
+#define _mm512_mask_mul_round_ps(W, U, A, B, R)  __extension__ ({ \
+  (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)(__m512)(W), (__mmask16)(U), \
+                                       (int)(R)); });
 
-#define _mm512_maskz_mul_round_ps(__U, __A, __B, __R)  __extension__ ({ \
-  (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, (__v16sf) __B, \
-            (__v16sf) _mm512_setzero_ps (), (__mmask16) __U, __R);});
+#define _mm512_maskz_mul_round_ps(U, A, B, R)  __extension__ ({ \
+  (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)_mm512_setzero_ps(), \
+                                       (__mmask16)(U), (int)(R)); });
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_divss_round ((__v4sf) __A,
+  return (__m128) __builtin_ia32_divss_round_mask ((__v4sf) __A,
                 (__v4sf) __B,
                 (__v4sf) __W,
                 (__mmask8) __U,
@@ -1389,28 +2348,34 @@
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_divss_round ((__v4sf) __A,
+  return (__m128) __builtin_ia32_divss_round_mask ((__v4sf) __A,
                 (__v4sf) __B,
                 (__v4sf)  _mm_setzero_ps (),
                 (__mmask8) __U,
                 _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm_div_round_ss(__A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_divss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+#define _mm_div_round_ss(A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)-1, (int)(R)); })
 
-#define _mm_mask_div_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_divss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf)  __W, (__mmask8) __U,__R); })
+#define _mm_mask_div_round_ss(W, U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                          (int)(R)); })
 
-#define _mm_maskz_div_round_ss(__U, __A, __B, __R) __extension__ ({ \
-  (__m128) __builtin_ia32_divss_round ((__v4sf) __A, (__v4sf) __B, \
-                (__v4sf)  _mm_setzero_ps(), (__mmask8) __U,__R); })
+#define _mm_maskz_div_round_ss(U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)(U), (int)(R)); })
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_divsd_round ((__v2df) __A,
+  return (__m128d) __builtin_ia32_divsd_round_mask ((__v2df) __A,
                 (__v2df) __B,
                 (__v2df) __W,
                 (__mmask8) __U,
@@ -1419,24 +2384,36 @@
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_divsd_round ((__v2df) __A,
+  return (__m128d) __builtin_ia32_divsd_round_mask ((__v2df) __A,
                 (__v2df) __B,
                 (__v2df)  _mm_setzero_pd (),
                 (__mmask8) __U,
                 _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm_div_round_sd(__A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_divsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+#define _mm_div_round_sd(A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)-1, (int)(R)); })
 
-#define _mm_mask_div_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_divsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df)  __W, (__mmask8) __U,__R); })
+#define _mm_mask_div_round_sd(W, U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)(__m128d)(W), \
+                                           (__mmask8)(U), (int)(R)); })
 
-#define _mm_maskz_div_round_sd(__U, __A, __B, __R) __extension__ ({ \
-  (__m128d) __builtin_ia32_divsd_round ((__v2df) __A, (__v2df) __B, \
-                (__v2df)  _mm_setzero_pd(), (__mmask8) __U,__R); })
+#define _mm_maskz_div_round_sd(U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)(U), (int)(R)); })
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_div_pd(__m512d __a, __m512d __b)
+{
+  return (__m512d)((__v8df)__a/(__v8df)__b);
+}
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
 _mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
@@ -1457,6 +2434,12 @@
              _MM_FROUND_CUR_DIRECTION);
 }
 
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_div_ps(__m512 __a, __m512 __b)
+{
+  return (__m512)((__v16sf)__a/(__v16sf)__b);
+}
+
 static __inline__ __m512 __DEFAULT_FN_ATTRS
 _mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
   return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A,
@@ -1476,108 +2459,186 @@
             _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_div_round_pd(__A, __B, __R) __extension__ ({ \
-  (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A, (__v8df) __B,\
-             (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R); })
+#define _mm512_div_round_pd(A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)_mm512_setzero_pd(), \
+                                        (__mmask8)-1, (int)(R)); })
 
-#define _mm512_mask_div_round_pd(__W, __U, __A, __B, __R) __extension__ ({ \
-  (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A, (__v8df) __B, \
-             (__v8df) __W, (__mmask8) __U, __R); })
+#define _mm512_mask_div_round_pd(W, U, A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)(__m512d)(W), (__mmask8)(U), \
+                                        (int)(R)); })
 
-#define _mm512_maskz_div_round_pd(__U, __A, __B, __R) __extension__ ({ \
-   (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A, (__v8df) __B, \
-             (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+#define _mm512_maskz_div_round_pd(U, A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(B), \
+                                        (__v8df)_mm512_setzero_pd(), \
+                                        (__mmask8)(U), (int)(R)); })
 
-#define _mm512_div_round_ps(__A, __B, __R) __extension__ ({ \
-  (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, (__v16sf) __B, \
-            (__v16sf) _mm512_setzero_ps (), (__mmask16) -1, __R);})
+#define _mm512_div_round_ps(A, B, R) __extension__ ({ \
+  (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)_mm512_setzero_ps(), \
+                                       (__mmask16)-1, (int)(R)); })
 
-#define _mm512_mask_div_round_ps(__W, __U, __A, __B, __R)  __extension__ ({ \
-  (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, (__v16sf) __B, \
-            (__v16sf) __W, (__mmask16) __U, __R); });
+#define _mm512_mask_div_round_ps(W, U, A, B, R)  __extension__ ({ \
+  (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)(__m512)(W), (__mmask16)(U), \
+                                       (int)(R)); });
 
-#define _mm512_maskz_div_round_ps(__U, __A, __B, __R)  __extension__ ({ \
-  (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, (__v16sf) __B, \
-            (__v16sf) _mm512_setzero_ps (), (__mmask16) __U, __R);});
+#define _mm512_maskz_div_round_ps(U, A, B, R)  __extension__ ({ \
+  (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(B), \
+                                       (__v16sf)_mm512_setzero_ps(), \
+                                       (__mmask16)(U), (int)(R)); });
 
 #define _mm512_roundscale_ps(A, B) __extension__ ({ \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(A), (B), (__v16sf)(A), \
-                                         -1, _MM_FROUND_CUR_DIRECTION); })
+  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \
+                                         (__v16sf)(__m512)(A), (__mmask16)-1, \
+                                         _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_roundscale_ps(A, B, C, imm) __extension__ ({\
+  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
+                                         (__v16sf)(__m512)(A), (__mmask16)(B), \
+                                         _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_roundscale_ps(A, B, imm) __extension__ ({\
+  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
+                                         (__v16sf)_mm512_setzero_ps(), \
+                                         (__mmask16)(A), \
+                                         _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_roundscale_round_ps(A, B, C, imm, R) __extension__ ({ \
+  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
+                                         (__v16sf)(__m512)(A), (__mmask16)(B), \
+                                         (int)(R)); })
+
+#define _mm512_maskz_roundscale_round_ps(A, B, imm, R) __extension__ ({ \
+  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
+                                         (__v16sf)_mm512_setzero_ps(), \
+                                         (__mmask16)(A), (int)(R)); })
+
+#define _mm512_roundscale_round_ps(A, imm, R) __extension__ ({ \
+  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \
+                                         (__v16sf)_mm512_undefined_ps(), \
+                                         (__mmask16)-1, (int)(R)); })
 
 #define _mm512_roundscale_pd(A, B) __extension__ ({ \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(A), (B), (__v8df)(A), \
-                                          -1, _MM_FROUND_CUR_DIRECTION); })
+  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \
+                                          (__v8df)(__m512d)(A), (__mmask8)-1, \
+                                          _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_roundscale_pd(A, B, C, imm) __extension__ ({\
+  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
+                                          (__v8df)(__m512d)(A), (__mmask8)(B), \
+                                          _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_roundscale_pd(A, B, imm) __extension__ ({\
+  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
+                                          (__v8df)_mm512_setzero_pd(), \
+                                          (__mmask8)(A), \
+                                          _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_roundscale_round_pd(A, B, C, imm, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
+                                          (__v8df)(__m512d)(A), (__mmask8)(B), \
+                                          (int)(R)); })
+
+#define _mm512_maskz_roundscale_round_pd(A, B, imm, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
+                                          (__v8df)_mm512_setzero_pd(), \
+                                          (__mmask8)(A), (int)(R)); })
+
+#define _mm512_roundscale_round_pd(A, imm, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \
+                                          (__v8df)_mm512_undefined_pd(), \
+                                          (__mmask8)-1, (int)(R)); })
 
 #define _mm512_fmadd_round_pd(A, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) (A), \
-                                             (__v8df) (B), (__v8df) (C), \
-                                             (__mmask8) -1, (R)); })
+  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+                                           (__v8df)(__m512d)(B), \
+                                           (__v8df)(__m512d)(C), (__mmask8)-1, \
+                                           (int)(R)); })
 
 
 #define _mm512_mask_fmadd_round_pd(A, U, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) (A), \
-                                             (__v8df) (B), (__v8df) (C), \
-                                             (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+                                           (__v8df)(__m512d)(B), \
+                                           (__v8df)(__m512d)(C), \
+                                           (__mmask8)(U), (int)(R)); })
 
 
 #define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) (A), \
-                                              (__v8df) (B), (__v8df) (C), \
-                                              (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            (__v8df)(__m512d)(C), \
+                                            (__mmask8)(U), (int)(R)); })
 
 
 #define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) (A), \
-                                              (__v8df) (B), (__v8df) (C), \
-                                              (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            (__v8df)(__m512d)(C), \
+                                            (__mmask8)(U), (int)(R)); })
 
 
 #define _mm512_fmsub_round_pd(A, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) (A), \
-                                             (__v8df) (B), -(__v8df) (C), \
-                                             (__mmask8) -1, (R)); })
+  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+                                           (__v8df)(__m512d)(B), \
+                                           -(__v8df)(__m512d)(C), \
+                                           (__mmask8)-1, (int)(R)); })
 
 
 #define _mm512_mask_fmsub_round_pd(A, U, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) (A), \
-                                             (__v8df) (B), -(__v8df) (C), \
-                                             (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+                                           (__v8df)(__m512d)(B), \
+                                           -(__v8df)(__m512d)(C), \
+                                           (__mmask8)(U), (int)(R)); })
 
 
 #define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) (A), \
-                                              (__v8df) (B), -(__v8df) (C), \
-                                              (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            -(__v8df)(__m512d)(C), \
+                                            (__mmask8)(U), (int)(R)); })
 
 
 #define _mm512_fnmadd_round_pd(A, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) (A), \
-                                             (__v8df) (B), (__v8df) (C), \
-                                             (__mmask8) -1, (R)); })
+  (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
+                                           (__v8df)(__m512d)(B), \
+                                           (__v8df)(__m512d)(C), (__mmask8)-1, \
+                                           (int)(R)); })
 
 
 #define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) (A), \
-                                              (__v8df) (B), (__v8df) (C), \
-                                              (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            (__v8df)(__m512d)(C), \
+                                            (__mmask8)(U), (int)(R)); })
 
 
 #define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) (A), \
-                                              (__v8df) (B), (__v8df) (C), \
-                                              (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            (__v8df)(__m512d)(C), \
+                                            (__mmask8)(U), (int)(R)); })
 
 
 #define _mm512_fnmsub_round_pd(A, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) (A), \
-                                             (__v8df) (B), -(__v8df) (C), \
-                                             (__mmask8) -1, (R)); })
+  (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
+                                           (__v8df)(__m512d)(B), \
+                                           -(__v8df)(__m512d)(C), \
+                                           (__mmask8)-1, (int)(R)); })
 
 
 #define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) (A), \
-                                              (__v8df) (B), -(__v8df) (C), \
-                                              (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            -(__v8df)(__m512d)(C), \
+                                            (__mmask8)(U), (int)(R)); })
 
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
@@ -1701,75 +2762,87 @@
 }
 
 #define _mm512_fmadd_round_ps(A, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) (A), \
-                                            (__v16sf) (B), (__v16sf) (C), \
-                                            (__mmask16) -1, (R)); })
+  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+                                          (__v16sf)(__m512)(B), \
+                                          (__v16sf)(__m512)(C), (__mmask16)-1, \
+                                          (int)(R)); })
 
 
 #define _mm512_mask_fmadd_round_ps(A, U, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) (A), \
-                                            (__v16sf) (B), (__v16sf) (C), \
-                                            (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+                                          (__v16sf)(__m512)(B), \
+                                          (__v16sf)(__m512)(C), \
+                                          (__mmask16)(U), (int)(R)); })
 
 
 #define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) (A), \
-                                             (__v16sf) (B), (__v16sf) (C), \
-                                             (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), \
+                                           (__v16sf)(__m512)(C), \
+                                           (__mmask16)(U), (int)(R)); })
 
 
 #define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) (A), \
-                                             (__v16sf) (B), (__v16sf) (C), \
-                                             (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), \
+                                           (__v16sf)(__m512)(C), \
+                                           (__mmask16)(U), (int)(R)); })
 
 
 #define _mm512_fmsub_round_ps(A, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) (A), \
-                                            (__v16sf) (B), -(__v16sf) (C), \
-                                            (__mmask16) -1, (R)); })
+  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+                                          (__v16sf)(__m512)(B), \
+                                          -(__v16sf)(__m512)(C), \
+                                          (__mmask16)-1, (int)(R)); })
 
 
 #define _mm512_mask_fmsub_round_ps(A, U, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) (A), \
-                                            (__v16sf) (B), -(__v16sf) (C), \
-                                            (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+                                          (__v16sf)(__m512)(B), \
+                                          -(__v16sf)(__m512)(C), \
+                                          (__mmask16)(U), (int)(R)); })
 
 
 #define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) (A), \
-                                             (__v16sf) (B), -(__v16sf) (C), \
-                                             (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), \
+                                           -(__v16sf)(__m512)(C), \
+                                           (__mmask16)(U), (int)(R)); })
 
 
 #define _mm512_fnmadd_round_ps(A, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) (A), \
-                                            (__v16sf) (B), (__v16sf) (C), \
-                                            (__mmask16) -1, (R)); })
+  (__m512)__builtin_ia32_vfmaddps512_mask(-(__v16sf)(__m512)(A), \
+                                          (__v16sf)(__m512)(B), \
+                                          (__v16sf)(__m512)(C), (__mmask16)-1, \
+                                          (int)(R)); })
 
 
 #define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddps512_mask3 (-(__v16sf) (A), \
-                                             (__v16sf) (B), (__v16sf) (C), \
-                                             (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), \
+                                           (__v16sf)(__m512)(C), \
+                                           (__mmask16)(U), (int)(R)); })
 
 
 #define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) (A), \
-                                             (__v16sf) (B), (__v16sf) (C), \
-                                             (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), \
+                                           (__v16sf)(__m512)(C), \
+                                           (__mmask16)(U), (int)(R)); })
 
 
 #define _mm512_fnmsub_round_ps(A, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) (A), \
-                                            (__v16sf) (B), -(__v16sf) (C), \
-                                            (__mmask16) -1, (R)); })
+  (__m512)__builtin_ia32_vfmaddps512_mask(-(__v16sf)(__m512)(A), \
+                                          (__v16sf)(__m512)(B), \
+                                          -(__v16sf)(__m512)(C), \
+                                          (__mmask16)-1, (int)(R)); })
 
 
 #define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) (A), \
-                                             (__v16sf) (B), -(__v16sf) (C), \
-                                             (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), \
+                                           -(__v16sf)(__m512)(C), \
+                                           (__mmask16)(U), (int)(R)); })
 
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS
@@ -1893,45 +2966,52 @@
 }
 
 #define _mm512_fmaddsub_round_pd(A, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) (A), \
-                                                (__v8df) (B), (__v8df) (C), \
-                                                (__mmask8) -1, (R)); })
+  (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+                                              (__v8df)(__m512d)(B), \
+                                              (__v8df)(__m512d)(C), \
+                                              (__mmask8)-1, (int)(R)); })
 
 
 #define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) (A), \
-                                                (__v8df) (B), (__v8df) (C), \
-                                                (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+                                              (__v8df)(__m512d)(B), \
+                                              (__v8df)(__m512d)(C), \
+                                              (__mmask8)(U), (int)(R)); })
 
 
 #define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) (A), \
-                                                 (__v8df) (B), (__v8df) (C), \
-                                                 (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \
+                                               (__v8df)(__m512d)(B), \
+                                               (__v8df)(__m512d)(C), \
+                                               (__mmask8)(U), (int)(R)); })
 
 
 #define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) (A), \
-                                                 (__v8df) (B), (__v8df) (C), \
-                                                 (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
+                                               (__v8df)(__m512d)(B), \
+                                               (__v8df)(__m512d)(C), \
+                                               (__mmask8)(U), (int)(R)); })
 
 
 #define _mm512_fmsubadd_round_pd(A, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) (A), \
-                                                (__v8df) (B), -(__v8df) (C), \
-                                                (__mmask8) -1, (R)); })
+  (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+                                              (__v8df)(__m512d)(B), \
+                                              -(__v8df)(__m512d)(C), \
+                                              (__mmask8)-1, (int)(R)); })
 
 
 #define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) (A), \
-                                                (__v8df) (B), -(__v8df) (C), \
-                                                (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+                                              (__v8df)(__m512d)(B), \
+                                              -(__v8df)(__m512d)(C), \
+                                              (__mmask8)(U), (int)(R)); })
 
 
 #define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) (A), \
-                                                 (__v8df) (B), -(__v8df) (C), \
-                                                 (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
+                                               (__v8df)(__m512d)(B), \
+                                               -(__v8df)(__m512d)(C), \
+                                               (__mmask8)(U), (int)(R)); })
 
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
@@ -2005,45 +3085,52 @@
 }
 
 #define _mm512_fmaddsub_round_ps(A, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) (A), \
-                                               (__v16sf) (B), (__v16sf) (C), \
-                                               (__mmask16) -1, (R)); })
+  (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+                                             (__v16sf)(__m512)(B), \
+                                             (__v16sf)(__m512)(C), \
+                                             (__mmask16)-1, (int)(R)); })
 
 
 #define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) (A), \
-                                               (__v16sf) (B), (__v16sf) (C), \
-                                               (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+                                             (__v16sf)(__m512)(B), \
+                                             (__v16sf)(__m512)(C), \
+                                             (__mmask16)(U), (int)(R)); })
 
 
 #define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) (A), \
-                                                (__v16sf) (B), (__v16sf) (C), \
-                                                (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \
+                                              (__v16sf)(__m512)(B), \
+                                              (__v16sf)(__m512)(C), \
+                                              (__mmask16)(U), (int)(R)); })
 
 
 #define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) (A), \
-                                                (__v16sf) (B), (__v16sf) (C), \
-                                                (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
+                                              (__v16sf)(__m512)(B), \
+                                              (__v16sf)(__m512)(C), \
+                                              (__mmask16)(U), (int)(R)); })
 
 
 #define _mm512_fmsubadd_round_ps(A, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) (A), \
-                                               (__v16sf) (B), -(__v16sf) (C), \
-                                               (__mmask16) -1, (R)); })
+  (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+                                             (__v16sf)(__m512)(B), \
+                                             -(__v16sf)(__m512)(C), \
+                                             (__mmask16)-1, (int)(R)); })
 
 
 #define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) (A), \
-                                               (__v16sf) (B), -(__v16sf) (C), \
-                                               (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+                                             (__v16sf)(__m512)(B), \
+                                             -(__v16sf)(__m512)(C), \
+                                             (__mmask16)(U), (int)(R)); })
 
 
 #define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) (A), \
-                                                (__v16sf) (B), -(__v16sf) (C), \
-                                                (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
+                                              (__v16sf)(__m512)(B), \
+                                              -(__v16sf)(__m512)(C), \
+                                              (__mmask16)(U), (int)(R)); })
 
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS
@@ -2117,9 +3204,10 @@
 }
 
 #define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmsubpd512_mask3 ((__v8df) (A), \
-                                              (__v8df) (B), (__v8df) (C), \
-                                              (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            (__v8df)(__m512d)(C), \
+                                            (__mmask8)(U), (int)(R)); })
 
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
@@ -2133,9 +3221,10 @@
 }
 
 #define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmsubps512_mask3 ((__v16sf) (A), \
-                                             (__v16sf) (B), (__v16sf) (C), \
-                                             (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), \
+                                           (__v16sf)(__m512)(C), \
+                                           (__mmask16)(U), (int)(R)); })
 
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS
@@ -2149,9 +3238,10 @@
 }
 
 #define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) (A), \
-                                                 (__v8df) (B), (__v8df) (C), \
-                                                 (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \
+                                               (__v8df)(__m512d)(B), \
+                                               (__v8df)(__m512d)(C), \
+                                               (__mmask8)(U), (int)(R)); })
 
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
@@ -2165,9 +3255,10 @@
 }
 
 #define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) (A), \
-                                                (__v16sf) (B), (__v16sf) (C), \
-                                                (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \
+                                              (__v16sf)(__m512)(B), \
+                                              (__v16sf)(__m512)(C), \
+                                              (__mmask16)(U), (int)(R)); })
 
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS
@@ -2181,9 +3272,10 @@
 }
 
 #define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfnmaddpd512_mask ((__v8df) (A), \
-                                              (__v8df) (B), (__v8df) (C), \
-                                              (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfnmaddpd512_mask((__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            (__v8df)(__m512d)(C), \
+                                            (__mmask8)(U), (int)(R)); })
 
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
@@ -2197,9 +3289,10 @@
 }
 
 #define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfnmaddps512_mask ((__v16sf) (A), \
-                                             (__v16sf) (B), (__v16sf) (C), \
-                                             (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfnmaddps512_mask((__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), \
+                                           (__v16sf)(__m512)(C), \
+                                           (__mmask16)(U), (int)(R)); })
 
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS
@@ -2213,15 +3306,17 @@
 }
 
 #define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfnmsubpd512_mask ((__v8df) (A), \
-                                              (__v8df) (B), (__v8df) (C), \
-                                              (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfnmsubpd512_mask((__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            (__v8df)(__m512d)(C), \
+                                            (__mmask8)(U), (int)(R)); })
 
 
 #define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) __extension__ ({ \
-  (__m512d) __builtin_ia32_vfnmsubpd512_mask3 ((__v8df) (A), \
-                                               (__v8df) (B), (__v8df) (C), \
-                                               (__mmask8) (U), (R)); })
+  (__m512d)__builtin_ia32_vfnmsubpd512_mask3((__v8df)(__m512d)(A), \
+                                             (__v8df)(__m512d)(B), \
+                                             (__v8df)(__m512d)(C), \
+                                             (__mmask8)(U), (int)(R)); })
 
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS
@@ -2245,15 +3340,17 @@
 }
 
 #define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfnmsubps512_mask ((__v16sf) (A), \
-                                             (__v16sf) (B), (__v16sf) (C), \
-                                             (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfnmsubps512_mask((__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), \
+                                           (__v16sf)(__m512)(C), \
+                                           (__mmask16)(U), (int)(R)); })
 
 
 #define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) __extension__ ({ \
-  (__m512) __builtin_ia32_vfnmsubps512_mask3 ((__v16sf) (A), \
-                                              (__v16sf) (B), (__v16sf) (C), \
-                                              (__mmask16) (U), (R)); })
+  (__m512)__builtin_ia32_vfnmsubps512_mask3((__v16sf)(__m512)(A), \
+                                            (__v16sf)(__m512)(B), \
+                                            (__v16sf)(__m512)(C), \
+                                            (__mmask16)(U), (int)(R)); })
 
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS
@@ -2289,6 +3386,29 @@
                                                        (__v16si) __B,
                                                        (__mmask16) -1);
 }
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutex2var_epi32 (__m512i __A, __mmask16 __U,
+                                __m512i __I, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_vpermt2vard512_mask ((__v16si) __I
+                                                        /* idx */ ,
+                                                        (__v16si) __A,
+                                                        (__v16si) __B,
+                                                        (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutex2var_epi32 (__mmask16 __U, __m512i __A,
+                                 __m512i __I, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_vpermt2vard512_maskz ((__v16si) __I
+                                                        /* idx */ ,
+                                                        (__v16si) __A,
+                                                        (__v16si) __B,
+                                                        (__mmask16) __U);
+}
+
 static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_permutex2var_epi64(__m512i __A, __m512i __I, __m512i __B)
 {
@@ -2299,98 +3419,140 @@
                                                        (__mmask8) -1);
 }
 
-static __inline __m512d __DEFAULT_FN_ATTRS
-_mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B)
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutex2var_epi64 (__m512i __A, __mmask8 __U, __m512i __I,
+                                __m512i __B)
 {
-  return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I
-                                                        /* idx */ ,
-                                                        (__v8df) __A,
-                                                        (__v8df) __B,
-                                                        (__mmask8) -1);
-}
-static __inline __m512 __DEFAULT_FN_ATTRS
-_mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B)
-{
-  return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I
+  return (__m512i) __builtin_ia32_vpermt2varq512_mask ((__v8di) __I
                                                        /* idx */ ,
-                                                       (__v16sf) __A,
-                                                       (__v16sf) __B,
-                                                       (__mmask16) -1);
+                                                       (__v8di) __A,
+                                                       (__v8di) __B,
+                                                       (__mmask8) __U);
+}
+
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutex2var_epi64 (__mmask8 __U, __m512i __A,
+         __m512i __I, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_vpermt2varq512_maskz ((__v8di) __I
+                                                        /* idx */ ,
+                                                        (__v8di) __A,
+                                                        (__v8di) __B,
+                                                        (__mmask8) __U);
 }
 
 #define _mm512_alignr_epi64(A, B, I) __extension__ ({ \
   (__m512i)__builtin_ia32_alignq512_mask((__v8di)(__m512i)(A), \
-                                         (__v8di)(__m512i)(B), \
-                                         (I), (__v8di)_mm512_setzero_si512(), \
+                                         (__v8di)(__m512i)(B), (int)(I), \
+                                         (__v8di)_mm512_setzero_si512(), \
                                          (__mmask8)-1); })
 
+#define _mm512_mask_alignr_epi64(W, U, A, B, imm) __extension__({\
+  (__m512i)__builtin_ia32_alignq512_mask((__v8di)(__m512i)(A), \
+                                         (__v8di)(__m512i)(B), (int)(imm), \
+                                         (__v8di)(__m512i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm512_maskz_alignr_epi64(U, A, B, imm) __extension__({\
+  (__m512i)__builtin_ia32_alignq512_mask((__v8di)(__m512i)(A), \
+                                         (__v8di)(__m512i)(B), (int)(imm), \
+                                         (__v8di)_mm512_setzero_si512(), \
+                                         (__mmask8)(U)); })
+
 #define _mm512_alignr_epi32(A, B, I) __extension__ ({ \
   (__m512i)__builtin_ia32_alignd512_mask((__v16si)(__m512i)(A), \
-                                         (__v16si)(__m512i)(B), \
-                                         (I), (__v16si)_mm512_setzero_si512(), \
+                                         (__v16si)(__m512i)(B), (int)(I), \
+                                         (__v16si)_mm512_setzero_si512(), \
                                          (__mmask16)-1); })
 
+#define _mm512_mask_alignr_epi32(W, U, A, B, imm) __extension__ ({\
+  (__m512i)__builtin_ia32_alignd512_mask((__v16si)(__m512i)(A), \
+                                         (__v16si)(__m512i)(B), (int)(imm), \
+                                         (__v16si)(__m512i)(W), \
+                                         (__mmask16)(U)); })
+
+#define _mm512_maskz_alignr_epi32(U, A, B, imm) __extension__({\
+  (__m512i)__builtin_ia32_alignd512_mask((__v16si)(__m512i)(A), \
+                                         (__v16si)(__m512i)(B), (int)(imm), \
+                                         (__v16si)_mm512_setzero_si512(), \
+                                         (__mmask16)(U)); })
 /* Vector Extract */
 
 #define _mm512_extractf64x4_pd(A, I) __extension__ ({                    \
-      (__m256d)                                                          \
-        __builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A),           \
-                                         (I),                            \
-                                         (__v4df)_mm256_setzero_si256(), \
-                                         (__mmask8) -1); })
+  (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
+                                            (__v4df)_mm256_setzero_si256(), \
+                                            (__mmask8)-1); })
+
+#define _mm512_mask_extractf64x4_pd(W, U, A, imm) __extension__ ({\
+  (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
+                                            (__v4df)(__m256d)(W), \
+                                            (__mmask8)(U)); })
+
+#define _mm512_maskz_extractf64x4_pd(U, A, imm) __extension__ ({\
+  (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
+                                            (__v4df)_mm256_setzero_pd(), \
+                                            (__mmask8)(U)); })
 
 #define _mm512_extractf32x4_ps(A, I) __extension__ ({                    \
-      (__m128)                                                           \
-        __builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A),           \
-                                         (I),                            \
-                                         (__v4sf)_mm_setzero_ps(),       \
-                                         (__mmask8) -1); })
+  (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)-1); })
 
+#define _mm512_mask_extractf32x4_ps(W, U, A, imm) __extension__ ({\
+  (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
+                                           (__v4sf)(__m128)(W), \
+                                           (__mmask8)(U)); })
+
+#define _mm512_maskz_extractf32x4_ps(U, A, imm) __extension__ ({\
+  (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)(U)); })
 /* Vector Blend */
 
 static __inline __m512d __DEFAULT_FN_ATTRS
 _mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W)
 {
-  return (__m512d) __builtin_ia32_blendmpd_512_mask ((__v8df) __A,
+  return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
                  (__v8df) __W,
-                 (__mmask8) __U);
+                 (__v8df) __A);
 }
 
 static __inline __m512 __DEFAULT_FN_ATTRS
 _mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W)
 {
-  return (__m512) __builtin_ia32_blendmps_512_mask ((__v16sf) __A,
+  return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
                 (__v16sf) __W,
-                (__mmask16) __U);
+                (__v16sf) __A);
 }
 
 static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W)
 {
-  return (__m512i) __builtin_ia32_blendmq_512_mask ((__v8di) __A,
+  return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
                 (__v8di) __W,
-                (__mmask8) __U);
+                (__v8di) __A);
 }
 
 static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
 {
-  return (__m512i) __builtin_ia32_blendmd_512_mask ((__v16si) __A,
+  return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
                 (__v16si) __W,
-                (__mmask16) __U);
+                (__v16si) __A);
 }
 
 /* Compare */
 
 #define _mm512_cmp_round_ps_mask(A, B, P, R) __extension__ ({ \
   (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (P), (__mmask16)-1, (R)); })
+                                          (__v16sf)(__m512)(B), (int)(P), \
+                                          (__mmask16)-1, (int)(R)); })
 
 #define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) __extension__ ({ \
   (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (P), (__mmask16)(U), (R)); })
+                                          (__v16sf)(__m512)(B), (int)(P), \
+                                          (__mmask16)(U), (int)(R)); })
 
 #define _mm512_cmp_ps_mask(A, B, P) \
   _mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
@@ -2400,13 +3562,13 @@
 
 #define _mm512_cmp_round_pd_mask(A, B, P, R) __extension__ ({ \
   (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
-                                         (__v8df)(__m512d)(B), \
-                                         (P), (__mmask8)-1, (R)); })
+                                         (__v8df)(__m512d)(B), (int)(P), \
+                                         (__mmask8)-1, (int)(R)); })
 
 #define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) __extension__ ({ \
   (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
-                                         (__v8df)(__m512d)(B), \
-                                         (P), (__mmask8)(U), (R)); })
+                                         (__v8df)(__m512d)(B), (int)(P), \
+                                         (__mmask8)(U), (int)(R)); })
 
 #define _mm512_cmp_pd_mask(A, B, P) \
   _mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
@@ -2416,6 +3578,22 @@
 
 /* Conversion */
 
+#define _mm512_cvtt_roundps_epu32(A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
+                                             (__v16si)_mm512_undefined_epi32(), \
+                                             (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_cvtt_roundps_epu32(W, U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
+                                             (__v16si)(__m512i)(W), \
+                                             (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_cvtt_roundps_epu32(U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
+                                             (__v16si)_mm512_setzero_si512(), \
+                                             (__mmask16)(U), (int)(R)); })
+
+
 static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_cvttps_epu32(__m512 __A)
 {
@@ -2426,15 +3604,80 @@
                   _MM_FROUND_CUR_DIRECTION);
 }
 
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
+{
+  return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
+                   (__v16si) __W,
+                   (__mmask16) __U,
+                   _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttps_epu32 (__mmask16 __U, __m512 __A)
+{
+  return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
+                   (__v16si) _mm512_setzero_si512 (),
+                   (__mmask16) __U,
+                   _MM_FROUND_CUR_DIRECTION);
+}
+
 #define _mm512_cvt_roundepi32_ps(A, R) __extension__ ({ \
-  (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(A), \
+  (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
                                           (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)-1, (R)); })
+                                          (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundepi32_ps(W, U, A, R) __extension__ ({ \
+  (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
+                                          (__v16sf)(__m512)(W), \
+                                          (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundepi32_ps(U, A, R) __extension__ ({ \
+  (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
+                                          (__v16sf)_mm512_setzero_ps(), \
+                                          (__mmask16)(U), (int)(R)); })
 
 #define _mm512_cvt_roundepu32_ps(A, R) __extension__ ({ \
-  (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(A), \
+  (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
                                            (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)-1, (R)); })
+                                           (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundepu32_ps(W, U, A, R) __extension__ ({ \
+  (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
+                                           (__v16sf)(__m512)(W), \
+                                           (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundepu32_ps(U, A, R) __extension__ ({ \
+  (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
+                                           (__v16sf)_mm512_setzero_ps(), \
+                                           (__mmask16)(U), (int)(R)); })
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_cvtepu32_ps (__m512i __A)
+{
+  return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A,
+                 (__v16sf) _mm512_undefined_ps (),
+                 (__mmask16) -1,
+                 _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu32_ps (__m512 __W, __mmask16 __U, __m512i __A)
+{
+  return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A,
+                 (__v16sf) __W,
+                 (__mmask16) __U,
+                 _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A)
+{
+  return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A,
+                 (__v16sf) _mm512_setzero_ps (),
+                 (__mmask16) __U,
+                 _MM_FROUND_CUR_DIRECTION);
+}
 
 static __inline __m512d __DEFAULT_FN_ATTRS
 _mm512_cvtepi32_pd(__m256i __A)
@@ -2445,6 +3688,49 @@
                 (__mmask8) -1);
 }
 
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A)
+{
+  return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
+                (__v8df) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A)
+{
+  return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
+                (__v8df) _mm512_setzero_pd (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_cvtepi32_ps (__m512i __A)
+{
+  return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A,
+                (__v16sf) _mm512_undefined_ps (),
+                (__mmask16) -1,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi32_ps (__m512 __W, __mmask16 __U, __m512i __A)
+{
+  return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A,
+                (__v16sf) __W,
+                (__mmask16) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A)
+{
+  return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A,
+                (__v16sf) _mm512_setzero_ps (),
+                (__mmask16) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
 static __inline __m512d __DEFAULT_FN_ATTRS
 _mm512_cvtepu32_pd(__m256i __A)
 {
@@ -2454,15 +3740,109 @@
                 (__mmask8) -1);
 }
 
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A)
+{
+  return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
+                  (__v8df) __W,
+                  (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A)
+{
+  return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
+                  (__v8df) _mm512_setzero_pd (),
+                  (__mmask8) __U);
+}
+
 #define _mm512_cvt_roundpd_ps(A, R) __extension__ ({ \
-  (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(A), \
+  (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
                                           (__v8sf)_mm256_setzero_ps(), \
-                                          (__mmask8)-1, (R)); })
+                                          (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundpd_ps(W, U, A, R) __extension__ ({ \
+  (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
+                                          (__v8sf)(__m256)(W), (__mmask8)(U), \
+                                          (int)(R)); })
+
+#define _mm512_maskz_cvt_roundpd_ps(U, A, R) __extension__ ({ \
+  (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
+                                          (__v8sf)_mm256_setzero_ps(), \
+                                          (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_cvtpd_ps (__m512d __A)
+{
+  return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
+                (__v8sf) _mm256_undefined_ps (),
+                (__mmask8) -1,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtpd_ps (__m256 __W, __mmask8 __U, __m512d __A)
+{
+  return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
+                (__v8sf) __W,
+                (__mmask8) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtpd_ps (__mmask8 __U, __m512d __A)
+{
+  return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
+                (__v8sf) _mm256_setzero_ps (),
+                (__mmask8) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundps_ph(A, I) __extension__ ({ \
+  (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+                                            (__v16hi)_mm256_undefined_si256(), \
+                                            (__mmask16)-1); })
+
+#define _mm512_mask_cvt_roundps_ph(U, W, A, I) __extension__ ({ \
+  (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+                                            (__v16hi)(__m256i)(U), \
+                                            (__mmask16)(W)); })
+
+#define _mm512_maskz_cvt_roundps_ph(W, A, I) __extension__ ({ \
+  (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+                                            (__v16hi)_mm256_setzero_si256(), \
+                                            (__mmask16)(W)); })
 
 #define _mm512_cvtps_ph(A, I) __extension__ ({ \
-  (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(A), (I), \
+  (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
                                             (__v16hi)_mm256_setzero_si256(), \
-                                            -1); })
+                                            (__mmask16)-1); })
+
+#define _mm512_mask_cvtps_ph(U, W, A, I) __extension__ ({ \
+  (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+                                            (__v16hi)(__m256i)(U), \
+                                            (__mmask16)(W)); })
+
+#define _mm512_maskz_cvtps_ph(W, A, I) __extension__ ({\
+  (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+                                            (__v16hi)_mm256_setzero_si256(), \
+                                            (__mmask16)(W)); })
+
+#define _mm512_cvt_roundph_ps(A, R) __extension__ ({ \
+  (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
+                                           (__v16sf)_mm512_undefined_ps(), \
+                                           (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundph_ps(W, U, A, R) __extension__ ({ \
+  (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
+                                           (__v16sf)(__m512)(W), \
+                                           (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundph_ps(U, A, R) __extension__ ({ \
+  (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
+                                           (__v16sf)_mm512_setzero_ps(), \
+                                           (__mmask16)(U), (int)(R)); })
+
 
 static  __inline __m512 __DEFAULT_FN_ATTRS
 _mm512_cvtph_ps(__m256i __A)
@@ -2474,15 +3854,39 @@
                 _MM_FROUND_CUR_DIRECTION);
 }
 
-static __inline __m512i __DEFAULT_FN_ATTRS
-_mm512_cvttps_epi32(__m512 __a)
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtph_ps (__m512 __W, __mmask16 __U, __m256i __A)
 {
-  return (__m512i)
-    __builtin_ia32_cvttps2dq512_mask((__v16sf) __a,
-                                     (__v16si) _mm512_setzero_si512 (),
-                                     (__mmask16) -1, _MM_FROUND_CUR_DIRECTION);
+  return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
+                 (__v16sf) __W,
+                 (__mmask16) __U,
+                 _MM_FROUND_CUR_DIRECTION);
 }
 
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A)
+{
+  return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
+                 (__v16sf) _mm512_setzero_ps (),
+                 (__mmask16) __U,
+                 _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundpd_epi32(A, R) __extension__ ({ \
+  (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8si)_mm256_setzero_si256(), \
+                                            (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvtt_roundpd_epi32(W, U, A, R) __extension__ ({ \
+  (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8si)(__m256i)(W), \
+                                            (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvtt_roundpd_epi32(U, A, R) __extension__ ({ \
+  (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8si)_mm256_setzero_si256(), \
+                                            (__mmask8)(U), (int)(R)); })
+
 static __inline __m256i __DEFAULT_FN_ATTRS
 _mm512_cvttpd_epi32(__m512d __a)
 {
@@ -2492,67 +3896,437 @@
                                                     _MM_FROUND_CUR_DIRECTION);
 }
 
-#define _mm512_cvtt_roundpd_epi32(A, R) __extension__ ({ \
-  (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)-1, (R)); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
+{
+  return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
+                  (__v8si) __W,
+                  (__mmask8) __U,
+                  _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttpd_epi32 (__mmask8 __U, __m512d __A)
+{
+  return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
+                  (__v8si) _mm256_setzero_si256 (),
+                  (__mmask8) __U,
+                  _MM_FROUND_CUR_DIRECTION);
+}
 
 #define _mm512_cvtt_roundps_epi32(A, R) __extension__ ({ \
-  (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(A), \
+  (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
                                             (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)-1, (R)); })
+                                            (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_cvtt_roundps_epi32(W, U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
+                                            (__v16si)(__m512i)(W), \
+                                            (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_cvtt_roundps_epi32(U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
+                                            (__v16si)_mm512_setzero_si512(), \
+                                            (__mmask16)(U), (int)(R)); })
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttps_epi32(__m512 __a)
+{
+  return (__m512i)
+    __builtin_ia32_cvttps2dq512_mask((__v16sf) __a,
+                                     (__v16si) _mm512_setzero_si512 (),
+                                     (__mmask16) -1, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
+{
+  return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
+                  (__v16si) __W,
+                  (__mmask16) __U,
+                  _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttps_epi32 (__mmask16 __U, __m512 __A)
+{
+  return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
+                  (__v16si) _mm512_setzero_si512 (),
+                  (__mmask16) __U,
+                  _MM_FROUND_CUR_DIRECTION);
+}
 
 #define _mm512_cvt_roundps_epi32(A, R) __extension__ ({ \
-  (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(A), \
+  (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
                                            (__v16si)_mm512_setzero_si512(), \
-                                           (__mmask16)-1, (R)); })
+                                           (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundps_epi32(W, U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
+                                           (__v16si)(__m512i)(W), \
+                                           (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundps_epi32(U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
+                                           (__v16si)_mm512_setzero_si512(), \
+                                           (__mmask16)(U), (int)(R)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtps_epi32 (__m512 __A)
+{
+  return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
+                 (__v16si) _mm512_undefined_epi32 (),
+                 (__mmask16) -1,
+                 _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
+{
+  return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
+                 (__v16si) __W,
+                 (__mmask16) __U,
+                 _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtps_epi32 (__mmask16 __U, __m512 __A)
+{
+  return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
+                 (__v16si)
+                 _mm512_setzero_si512 (),
+                 (__mmask16) __U,
+                 _MM_FROUND_CUR_DIRECTION);
+}
 
 #define _mm512_cvt_roundpd_epi32(A, R) __extension__ ({ \
-  (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(A), \
+  (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
                                            (__v8si)_mm256_setzero_si256(), \
-                                           (__mmask8)-1, (R)); })
+                                           (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundpd_epi32(W, U, A, R) __extension__ ({ \
+  (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
+                                           (__v8si)(__m256i)(W), \
+                                           (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundpd_epi32(U, A, R) __extension__ ({ \
+  (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
+                                           (__v8si)_mm256_setzero_si256(), \
+                                           (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtpd_epi32 (__m512d __A)
+{
+  return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
+                 (__v8si)
+                 _mm256_undefined_si256 (),
+                 (__mmask8) -1,
+                 _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
+{
+  return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
+                 (__v8si) __W,
+                 (__mmask8) __U,
+                 _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtpd_epi32 (__mmask8 __U, __m512d __A)
+{
+  return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
+                 (__v8si)
+                 _mm256_setzero_si256 (),
+                 (__mmask8) __U,
+                 _MM_FROUND_CUR_DIRECTION);
+}
 
 #define _mm512_cvt_roundps_epu32(A, R) __extension__ ({ \
-  (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(A), \
+  (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
                                             (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)-1, (R)); })
+                                            (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundps_epu32(W, U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
+                                            (__v16si)(__m512i)(W), \
+                                            (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundps_epu32(U, A, R) __extension__ ({ \
+  (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
+                                            (__v16si)_mm512_setzero_si512(), \
+                                            (__mmask16)(U), (int)(R)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtps_epu32 ( __m512 __A)
+{
+  return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,\
+                  (__v16si)\
+                  _mm512_undefined_epi32 (),\
+                  (__mmask16) -1,\
+                  _MM_FROUND_CUR_DIRECTION);\
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
+{
+  return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
+                  (__v16si) __W,
+                  (__mmask16) __U,
+                  _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtps_epu32 ( __mmask16 __U, __m512 __A)
+{
+  return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
+                  (__v16si) 
+                  _mm512_setzero_si512 (),
+                  (__mmask16) __U ,
+                  _MM_FROUND_CUR_DIRECTION);
+}
 
 #define _mm512_cvt_roundpd_epu32(A, R) __extension__ ({ \
-  (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(A), \
+  (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
                                             (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8) -1, (R)); })
+                                            (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundpd_epu32(W, U, A, R) __extension__ ({ \
+  (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8si)(W), \
+                                            (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundpd_epu32(U, A, R) __extension__ ({ \
+  (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8si)_mm256_setzero_si256(), \
+                                            (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtpd_epu32 (__m512d __A)
+{
+  return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
+                  (__v8si)
+                  _mm256_undefined_si256 (),
+                  (__mmask8) -1,
+                  _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
+{
+  return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
+                  (__v8si) __W,
+                  (__mmask8) __U,
+                  _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtpd_epu32 (__mmask8 __U, __m512d __A)
+{
+  return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
+                  (__v8si)
+                  _mm256_setzero_si256 (),
+                  (__mmask8) __U,
+                  _MM_FROUND_CUR_DIRECTION);
+}
 
 /* Unpack and Interleave */
+
 static __inline __m512d __DEFAULT_FN_ATTRS
 _mm512_unpackhi_pd(__m512d __a, __m512d __b)
 {
-  return __builtin_shufflevector(__a, __b, 1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
+  return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
+                                          1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_unpackhi_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
+{
+  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
+                                           (__v8df)_mm512_unpackhi_pd(__A, __B),
+                                           (__v8df)__W);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B)
+{
+  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
+                                           (__v8df)_mm512_unpackhi_pd(__A, __B),
+                                           (__v8df)_mm512_setzero_pd());
 }
 
 static __inline __m512d __DEFAULT_FN_ATTRS
 _mm512_unpacklo_pd(__m512d __a, __m512d __b)
 {
-  return __builtin_shufflevector(__a, __b, 0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
+  return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
+                                          0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_unpacklo_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
+{
+  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
+                                           (__v8df)_mm512_unpacklo_pd(__A, __B),
+                                           (__v8df)__W);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_pd (__mmask8 __U, __m512d __A, __m512d __B)
+{
+  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
+                                           (__v8df)_mm512_unpacklo_pd(__A, __B),
+                                           (__v8df)_mm512_setzero_pd());
 }
 
 static __inline __m512 __DEFAULT_FN_ATTRS
 _mm512_unpackhi_ps(__m512 __a, __m512 __b)
 {
-  return __builtin_shufflevector(__a, __b,
-                                 2,    18,    3,    19,
-                                 2+4,  18+4,  3+4,  19+4,
-                                 2+8,  18+8,  3+8,  19+8,
-                                 2+12, 18+12, 3+12, 19+12);
+  return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
+                                         2,    18,    3,    19,
+                                         2+4,  18+4,  3+4,  19+4,
+                                         2+8,  18+8,  3+8,  19+8,
+                                         2+12, 18+12, 3+12, 19+12);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_unpackhi_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
+                                          (__v16sf)_mm512_unpackhi_ps(__A, __B),
+                                          (__v16sf)__W);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_unpackhi_ps (__mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
+                                          (__v16sf)_mm512_unpackhi_ps(__A, __B),
+                                          (__v16sf)_mm512_setzero_ps());
 }
 
 static __inline __m512 __DEFAULT_FN_ATTRS
 _mm512_unpacklo_ps(__m512 __a, __m512 __b)
 {
-  return __builtin_shufflevector(__a, __b,
-                                 0,    16,    1,    17,
-                                 0+4,  16+4,  1+4,  17+4,
-                                 0+8,  16+8,  1+8,  17+8,
-                                 0+12, 16+12, 1+12, 17+12);
+  return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
+                                         0,    16,    1,    17,
+                                         0+4,  16+4,  1+4,  17+4,
+                                         0+8,  16+8,  1+8,  17+8,
+                                         0+12, 16+12, 1+12, 17+12);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_unpacklo_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
+                                          (__v16sf)_mm512_unpacklo_ps(__A, __B),
+                                          (__v16sf)__W);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_ps (__mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
+                                          (__v16sf)_mm512_unpacklo_ps(__A, __B),
+                                          (__v16sf)_mm512_setzero_ps());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpackhi_epi32(__m512i __A, __m512i __B)
+{
+  return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
+                                          2,    18,    3,    19,
+                                          2+4,  18+4,  3+4,  19+4,
+                                          2+8,  18+8,  3+8,  19+8,
+                                          2+12, 18+12, 3+12, 19+12);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpackhi_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
+                                       (__v16si)_mm512_unpackhi_epi32(__A, __B),
+                                       (__v16si)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpackhi_epi32(__mmask16 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
+                                       (__v16si)_mm512_unpackhi_epi32(__A, __B),
+                                       (__v16si)_mm512_setzero_si512());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpacklo_epi32(__m512i __A, __m512i __B)
+{
+  return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
+                                          0,    16,    1,    17,
+                                          0+4,  16+4,  1+4,  17+4,
+                                          0+8,  16+8,  1+8,  17+8,
+                                          0+12, 16+12, 1+12, 17+12);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpacklo_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
+                                       (__v16si)_mm512_unpacklo_epi32(__A, __B),
+                                       (__v16si)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_epi32(__mmask16 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
+                                       (__v16si)_mm512_unpacklo_epi32(__A, __B),
+                                       (__v16si)_mm512_setzero_si512());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpackhi_epi64(__m512i __A, __m512i __B)
+{
+  return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
+                                          1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpackhi_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
+                                        (__v8di)_mm512_unpackhi_epi64(__A, __B),
+                                        (__v8di)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpackhi_epi64(__mmask8 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
+                                        (__v8di)_mm512_unpackhi_epi64(__A, __B),
+                                        (__v8di)_mm512_setzero_si512());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpacklo_epi64 (__m512i __A, __m512i __B)
+{
+  return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
+                                          0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpacklo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
+                                        (__v8di)_mm512_unpacklo_epi64(__A, __B),
+                                        (__v8di)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
+                                        (__v8di)_mm512_unpacklo_epi64(__A, __B),
+                                        (__v8di)_mm512_setzero_si512());
 }
 
 /* Bit Test */
@@ -2565,6 +4339,13 @@
             (__mmask16) -1);
 }
 
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_test_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestmd512 ((__v16si) __A,
+                 (__v16si) __B, __U);
+}
+
 static __inline __mmask8 __DEFAULT_FN_ATTRS
 _mm512_test_epi64_mask(__m512i __A, __m512i __B)
 {
@@ -2573,57 +4354,88 @@
                  (__mmask8) -1);
 }
 
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_test_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmq512 ((__v8di) __A, (__v8di) __B, __U);
+}
+
+
 /* SIMD load ops */
 
 static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_loadu_si512 (void const *__P)
+{
+  return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P,
+                  (__v16si)
+                  _mm512_setzero_si512 (),
+                  (__mmask16) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P,
+                  (__v16si) __W,
+                  (__mmask16) __U);
+}
+
+
+static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P)
 {
-  return (__m512i) __builtin_ia32_loaddqusi512_mask ((const __v16si *)__P,
+  return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *)__P,
                                                      (__v16si)
                                                      _mm512_setzero_si512 (),
                                                      (__mmask16) __U);
 }
 
 static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_loadu_epi64 (__m512i __W, __mmask8 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *) __P,
+                  (__v8di) __W,
+                  (__mmask8) __U);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
 _mm512_maskz_loadu_epi64(__mmask8 __U, void const *__P)
 {
-  return (__m512i) __builtin_ia32_loaddqudi512_mask ((const __v8di *)__P,
+  return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *)__P,
                                                      (__v8di)
                                                      _mm512_setzero_si512 (),
                                                      (__mmask8) __U);
 }
 
 static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_loadu_ps (__m512 __W, __mmask16 __U, void const *__P)
+{
+  return (__m512) __builtin_ia32_loadups512_mask ((const float *) __P,
+                   (__v16sf) __W,
+                   (__mmask16) __U);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
 _mm512_maskz_loadu_ps(__mmask16 __U, void const *__P)
 {
-  return (__m512) __builtin_ia32_loadups512_mask ((const __v16sf *)__P,
+  return (__m512) __builtin_ia32_loadups512_mask ((const float *)__P,
                                                   (__v16sf)
                                                   _mm512_setzero_ps (),
                                                   (__mmask16) __U);
 }
 
 static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_loadu_pd (__m512d __W, __mmask8 __U, void const *__P)
+{
+  return (__m512d) __builtin_ia32_loadupd512_mask ((const double *) __P,
+                (__v8df) __W,
+                (__mmask8) __U);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
 _mm512_maskz_loadu_pd(__mmask8 __U, void const *__P)
 {
-  return (__m512d) __builtin_ia32_loadupd512_mask ((const __v8df *)__P,
-                                                   (__v8df)
-                                                   _mm512_setzero_pd (),
-                                                   (__mmask8) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS
-_mm512_maskz_load_ps(__mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__P,
-                                                  (__v16sf)
-                                                  _mm512_setzero_ps (),
-                                                  (__mmask16) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS
-_mm512_maskz_load_pd(__mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__P,
+  return (__m512d) __builtin_ia32_loadupd512_mask ((const double *)__P,
                                                    (__v8df)
                                                    _mm512_setzero_pd (),
                                                    (__mmask8) __U);
@@ -2648,7 +4460,7 @@
 }
 
 static __inline __m512 __DEFAULT_FN_ATTRS
-_mm512_load_ps(double const *__p)
+_mm512_load_ps(float const *__p)
 {
   return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__p,
                                                   (__v16sf)
@@ -2656,8 +4468,25 @@
                                                   (__mmask16) -1);
 }
 
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_load_ps (__m512 __W, __mmask16 __U, void const *__P)
+{
+  return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *) __P,
+                   (__v16sf) __W,
+                   (__mmask16) __U);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_load_ps(__mmask16 __U, void const *__P)
+{
+  return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__P,
+                                                  (__v16sf)
+                                                  _mm512_setzero_ps (),
+                                                  (__mmask16) __U);
+}
+
 static __inline __m512d __DEFAULT_FN_ATTRS
-_mm512_load_pd(float const *__p)
+_mm512_load_pd(double const *__p)
 {
   return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__p,
                                                    (__v8df)
@@ -2665,45 +4494,87 @@
                                                    (__mmask8) -1);
 }
 
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_load_pd (__m512d __W, __mmask8 __U, void const *__P)
+{
+  return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *) __P,
+                          (__v8df) __W,
+                          (__mmask8) __U);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_load_pd(__mmask8 __U, void const *__P)
+{
+  return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__P,
+                                                   (__v8df)
+                                                   _mm512_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_load_si512 (void const *__P)
+{
+  return *(__m512i *) __P;
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_load_epi32 (void const *__P)
+{
+  return *(__m512i *) __P;
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_load_epi64 (void const *__P)
+{
+  return *(__m512i *) __P;
+}
+
 /* SIMD store ops */
 
 static __inline void __DEFAULT_FN_ATTRS
 _mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A)
 {
-  __builtin_ia32_storedqudi512_mask ((__v8di *)__P, (__v8di) __A,
+  __builtin_ia32_storedqudi512_mask ((long long *)__P, (__v8di) __A,
                                      (__mmask8) __U);
 }
 
 static __inline void __DEFAULT_FN_ATTRS
+_mm512_storeu_si512 (void *__P, __m512i __A)
+{
+  __builtin_ia32_storedqusi512_mask ((int *) __P, (__v16si) __A,
+            (__mmask16) -1);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
 _mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A)
 {
-  __builtin_ia32_storedqusi512_mask ((__v16si *)__P, (__v16si) __A,
+  __builtin_ia32_storedqusi512_mask ((int *)__P, (__v16si) __A,
                                      (__mmask16) __U);
 }
 
 static __inline void __DEFAULT_FN_ATTRS
 _mm512_mask_storeu_pd(void *__P, __mmask8 __U, __m512d __A)
 {
-  __builtin_ia32_storeupd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U);
+  __builtin_ia32_storeupd512_mask ((double *)__P, (__v8df) __A, (__mmask8) __U);
 }
 
 static __inline void __DEFAULT_FN_ATTRS
 _mm512_storeu_pd(void *__P, __m512d __A)
 {
-  __builtin_ia32_storeupd512_mask((__v8df *)__P, (__v8df)__A, (__mmask8)-1);
+  __builtin_ia32_storeupd512_mask((double *)__P, (__v8df)__A, (__mmask8)-1);
 }
 
 static __inline void __DEFAULT_FN_ATTRS
 _mm512_mask_storeu_ps(void *__P, __mmask16 __U, __m512 __A)
 {
-  __builtin_ia32_storeups512_mask ((__v16sf *)__P, (__v16sf) __A,
+  __builtin_ia32_storeups512_mask ((float *)__P, (__v16sf) __A,
                                    (__mmask16) __U);
 }
 
 static __inline void __DEFAULT_FN_ATTRS
 _mm512_storeu_ps(void *__P, __m512 __A)
 {
-  __builtin_ia32_storeups512_mask((__v16sf *)__P, (__v16sf)__A, (__mmask16)-1);
+  __builtin_ia32_storeups512_mask((float *)__P, (__v16sf)__A, (__mmask16)-1);
 }
 
 static __inline void __DEFAULT_FN_ATTRS
@@ -2731,6 +4602,24 @@
   *(__m512*)__P = __A;
 }
 
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_store_si512 (void *__P, __m512i __A)
+{
+  *(__m512i *) __P = __A;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_store_epi32 (void *__P, __m512i __A)
+{
+  *(__m512i *) __P = __A;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_store_epi64 (void *__P, __m512i __A)
+{
+  *(__m512i *) __P = __A;
+}
+
 /* Mask ops */
 
 static __inline __mmask16 __DEFAULT_FN_ATTRS
@@ -3029,46 +4918,4626 @@
                                                 __u);
 }
 
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepi8_epi32 (__m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxbd512_mask ((__v16qi) __A,
+                (__v16si)
+                _mm512_setzero_si512 (),
+                (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi8_epi32 (__m512i __W, __mmask16 __U, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxbd512_mask ((__v16qi) __A,
+                (__v16si) __W,
+                (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi8_epi32 (__mmask16 __U, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxbd512_mask ((__v16qi) __A,
+                (__v16si)
+                _mm512_setzero_si512 (),
+                (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepi8_epi64 (__m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxbq512_mask ((__v16qi) __A,
+                (__v8di)
+                _mm512_setzero_si512 (),
+                (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi8_epi64 (__m512i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxbq512_mask ((__v16qi) __A,
+                (__v8di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi8_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxbq512_mask ((__v16qi) __A,
+                (__v8di)
+                _mm512_setzero_si512 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepi32_epi64 (__m256i __X)
+{
+  return (__m512i) __builtin_ia32_pmovsxdq512_mask ((__v8si) __X,
+                (__v8di)
+                _mm512_setzero_si512 (),
+                (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi32_epi64 (__m512i __W, __mmask8 __U, __m256i __X)
+{
+  return (__m512i) __builtin_ia32_pmovsxdq512_mask ((__v8si) __X,
+                (__v8di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi32_epi64 (__mmask8 __U, __m256i __X)
+{
+  return (__m512i) __builtin_ia32_pmovsxdq512_mask ((__v8si) __X,
+                (__v8di)
+                _mm512_setzero_si512 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepi16_epi32 (__m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxwd512_mask ((__v16hi) __A,
+                (__v16si)
+                _mm512_setzero_si512 (),
+                (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi16_epi32 (__m512i __W, __mmask16 __U, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxwd512_mask ((__v16hi) __A,
+                (__v16si) __W,
+                (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi16_epi32 (__mmask16 __U, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxwd512_mask ((__v16hi) __A,
+                (__v16si)
+                _mm512_setzero_si512 (),
+                (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepi16_epi64 (__m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxwq512_mask ((__v8hi) __A,
+                (__v8di)
+                _mm512_setzero_si512 (),
+                (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi16_epi64 (__m512i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxwq512_mask ((__v8hi) __A,
+                (__v8di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi16_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxwq512_mask ((__v8hi) __A,
+                (__v8di)
+                _mm512_setzero_si512 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepu8_epi32 (__m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxbd512_mask ((__v16qi) __A,
+                (__v16si)
+                _mm512_setzero_si512 (),
+                (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu8_epi32 (__m512i __W, __mmask16 __U, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxbd512_mask ((__v16qi) __A,
+                (__v16si) __W,
+                (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu8_epi32 (__mmask16 __U, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxbd512_mask ((__v16qi) __A,
+                (__v16si)
+                _mm512_setzero_si512 (),
+                (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepu8_epi64 (__m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxbq512_mask ((__v16qi) __A,
+                (__v8di)
+                _mm512_setzero_si512 (),
+                (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu8_epi64 (__m512i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxbq512_mask ((__v16qi) __A,
+                (__v8di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxbq512_mask ((__v16qi) __A,
+                (__v8di)
+                _mm512_setzero_si512 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepu32_epi64 (__m256i __X)
+{
+  return (__m512i) __builtin_ia32_pmovzxdq512_mask ((__v8si) __X,
+                (__v8di)
+                _mm512_setzero_si512 (),
+                (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu32_epi64 (__m512i __W, __mmask8 __U, __m256i __X)
+{
+  return (__m512i) __builtin_ia32_pmovzxdq512_mask ((__v8si) __X,
+                (__v8di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu32_epi64 (__mmask8 __U, __m256i __X)
+{
+  return (__m512i) __builtin_ia32_pmovzxdq512_mask ((__v8si) __X,
+                (__v8di)
+                _mm512_setzero_si512 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepu16_epi32 (__m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxwd512_mask ((__v16hi) __A,
+                (__v16si)
+                _mm512_setzero_si512 (),
+                (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu16_epi32 (__m512i __W, __mmask16 __U, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxwd512_mask ((__v16hi) __A,
+                (__v16si) __W,
+                (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu16_epi32 (__mmask16 __U, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxwd512_mask ((__v16hi) __A,
+                (__v16si)
+                _mm512_setzero_si512 (),
+                (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepu16_epi64 (__m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxwq512_mask ((__v8hi) __A,
+                (__v8di)
+                _mm512_setzero_si512 (),
+                (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu16_epi64 (__m512i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxwq512_mask ((__v8hi) __A,
+                (__v8di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu16_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxwq512_mask ((__v8hi) __A,
+                (__v8di)
+                _mm512_setzero_si512 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_rorv_epi32 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A,
+              (__v16si) __B,
+              (__v16si)
+              _mm512_setzero_si512 (),
+              (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_rorv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A,
+              (__v16si) __B,
+              (__v16si) __W,
+              (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_rorv_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A,
+              (__v16si) __B,
+              (__v16si)
+              _mm512_setzero_si512 (),
+              (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_rorv_epi64 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A,
+              (__v8di) __B,
+              (__v8di)
+              _mm512_setzero_si512 (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_rorv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A,
+              (__v8di) __B,
+              (__v8di) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_rorv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A,
+              (__v8di) __B,
+              (__v8di)
+              _mm512_setzero_si512 (),
+              (__mmask8) __U);
+}
+
+
+
 #define _mm512_cmp_epi32_mask(a, b, p) __extension__ ({ \
   (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
-                                         (__v16si)(__m512i)(b), (p), \
+                                         (__v16si)(__m512i)(b), (int)(p), \
                                          (__mmask16)-1); })
 
 #define _mm512_cmp_epu32_mask(a, b, p) __extension__ ({ \
   (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
-                                          (__v16si)(__m512i)(b), (p), \
+                                          (__v16si)(__m512i)(b), (int)(p), \
                                           (__mmask16)-1); })
 
 #define _mm512_cmp_epi64_mask(a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
-                                        (__v8di)(__m512i)(b), (p), \
+                                        (__v8di)(__m512i)(b), (int)(p), \
                                         (__mmask8)-1); })
 
 #define _mm512_cmp_epu64_mask(a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
-                                         (__v8di)(__m512i)(b), (p), \
+                                         (__v8di)(__m512i)(b), (int)(p), \
                                          (__mmask8)-1); })
 
 #define _mm512_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
   (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
-                                         (__v16si)(__m512i)(b), (p), \
+                                         (__v16si)(__m512i)(b), (int)(p), \
                                          (__mmask16)(m)); })
 
 #define _mm512_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
   (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
-                                          (__v16si)(__m512i)(b), (p), \
+                                          (__v16si)(__m512i)(b), (int)(p), \
                                           (__mmask16)(m)); })
 
 #define _mm512_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
-                                        (__v8di)(__m512i)(b), (p), \
+                                        (__v8di)(__m512i)(b), (int)(p), \
                                         (__mmask8)(m)); })
 
 #define _mm512_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
-                                         (__v8di)(__m512i)(b), (p), \
+                                         (__v8di)(__m512i)(b), (int)(p), \
                                          (__mmask8)(m)); })
 
+#define _mm512_rol_epi32(a, b) __extension__ ({ \
+  (__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(a), (int)(b), \
+                                        (__v16si)_mm512_setzero_si512(), \
+                                        (__mmask16)-1); })
+
+#define _mm512_mask_rol_epi32(W, U, a, b) __extension__ ({ \
+  (__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(a), (int)(b), \
+                                        (__v16si)(__m512i)(W), \
+                                        (__mmask16)(U)); })
+
+#define _mm512_maskz_rol_epi32(U, a, b) __extension__ ({ \
+  (__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(a), (int)(b), \
+                                        (__v16si)_mm512_setzero_si512(), \
+                                        (__mmask16)(U)); })
+
+#define _mm512_rol_epi64(a, b) __extension__ ({ \
+  (__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(a), (int)(b), \
+                                        (__v8di)_mm512_setzero_si512(), \
+                                        (__mmask8)-1); })
+
+#define _mm512_mask_rol_epi64(W, U, a, b) __extension__ ({ \
+  (__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(a), (int)(b), \
+                                        (__v8di)(__m512i)(W), (__mmask8)(U)); })
+
+#define _mm512_maskz_rol_epi64(U, a, b) __extension__ ({ \
+  (__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(a), (int)(b), \
+                                        (__v8di)_mm512_setzero_si512(), \
+                                        (__mmask8)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_rolv_epi32 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A,
+              (__v16si) __B,
+              (__v16si)
+              _mm512_setzero_si512 (),
+              (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_rolv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A,
+              (__v16si) __B,
+              (__v16si) __W,
+              (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_rolv_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A,
+              (__v16si) __B,
+              (__v16si)
+              _mm512_setzero_si512 (),
+              (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_rolv_epi64 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A,
+              (__v8di) __B,
+              (__v8di)
+              _mm512_setzero_si512 (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_rolv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A,
+              (__v8di) __B,
+              (__v8di) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A,
+              (__v8di) __B,
+              (__v8di)
+              _mm512_setzero_si512 (),
+              (__mmask8) __U);
+}
+
+#define _mm512_ror_epi32(A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \
+                                        (__v16si)_mm512_setzero_si512(), \
+                                        (__mmask16)-1); })
+
+#define _mm512_mask_ror_epi32(W, U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \
+                                        (__v16si)(__m512i)(W), \
+                                        (__mmask16)(U)); })
+
+#define _mm512_maskz_ror_epi32(U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \
+                                        (__v16si)_mm512_setzero_si512(), \
+                                        (__mmask16)(U)); })
+
+#define _mm512_ror_epi64(A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \
+                                        (__v8di)_mm512_setzero_si512(), \
+                                        (__mmask8)-1); })
+
+#define _mm512_mask_ror_epi64(W, U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \
+                                        (__v8di)(__m512i)(W), (__mmask8)(U)); })
+
+#define _mm512_maskz_ror_epi64(U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \
+                                        (__v8di)_mm512_setzero_si512(), \
+                                        (__mmask8)(U)); })
+
+#define _mm512_slli_epi32(A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_pslldi512_mask((__v16si)(__m512i)(A), (int)(B), \
+                                         (__v16si)_mm512_setzero_si512(), \
+                                         (__mmask16)-1); })
+
+#define _mm512_mask_slli_epi32(W, U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_pslldi512_mask((__v16si)(__m512i)(A), (int)(B), \
+                                         (__v16si)(__m512i)(W), \
+                                         (__mmask16)(U)); })
+
+#define _mm512_maskz_slli_epi32(U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_pslldi512_mask((__v16si)(__m512i)(A), (int)(B), \
+                                         (__v16si)_mm512_setzero_si512(), \
+                                         (__mmask16)(U)); })
+
+#define _mm512_slli_epi64(A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psllqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+                                         (__v8di)_mm512_setzero_si512(), \
+                                         (__mmask8)-1); })
+
+#define _mm512_mask_slli_epi64(W, U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psllqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+                                         (__v8di)(__m512i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm512_maskz_slli_epi64(U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psllqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+                                         (__v8di)_mm512_setzero_si512(), \
+                                         (__mmask8)(U)); })
+
+
+
+#define _mm512_srli_epi32(A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psrldi512_mask((__v16si)(__m512i)(A), (int)(B), \
+                                         (__v16si)_mm512_setzero_si512(), \
+                                         (__mmask16)-1); })
+
+#define _mm512_mask_srli_epi32(W, U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psrldi512_mask((__v16si)(__m512i)(A), (int)(B), \
+                                         (__v16si)(__m512i)(W), \
+                                         (__mmask16)(U)); })
+
+#define _mm512_maskz_srli_epi32(U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psrldi512_mask((__v16si)(__m512i)(A), (int)(B), \
+                                         (__v16si)_mm512_setzero_si512(), \
+                                         (__mmask16)(U)); })
+
+#define _mm512_srli_epi64(A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psrlqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+                                         (__v8di)_mm512_setzero_si512(), \
+                                         (__mmask8)-1); })
+
+#define _mm512_mask_srli_epi64(W, U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psrlqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+                                         (__v8di)(__m512i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm512_maskz_srli_epi64(U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psrlqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+                                         (__v8di)_mm512_setzero_si512(), \
+                                         (__mmask8)(U)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_load_epi32 (__m512i __W, __mmask16 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P,
+              (__v16si) __W,
+              (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_load_epi32 (__mmask16 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P,
+              (__v16si)
+              _mm512_setzero_si512 (),
+              (__mmask16) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_store_epi32 (void *__P, __mmask16 __U, __m512i __A)
+{
+  __builtin_ia32_movdqa32store512_mask ((__v16si *) __P, (__v16si) __A,
+          (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mov_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
+                 (__v16si) __A,
+                 (__v16si) __W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mov_epi32 (__mmask16 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
+                 (__v16si) __A,
+                 (__v16si) _mm512_setzero_si512 ());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mov_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
+                 (__v8di) __A,
+                 (__v8di) __W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mov_epi64 (__mmask8 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
+                 (__v8di) __A,
+                 (__v8di) _mm512_setzero_si512 ());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_load_epi64 (__m512i __W, __mmask8 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P,
+              (__v8di) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_load_epi64 (__mmask8 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P,
+              (__v8di)
+              _mm512_setzero_si512 (),
+              (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_store_epi64 (void *__P, __mmask8 __U, __m512i __A)
+{
+  __builtin_ia32_movdqa64store512_mask ((__v8di *) __P, (__v8di) __A,
+          (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_movedup_pd (__m512d __A)
+{
+  return (__m512d)__builtin_shufflevector((__v8df)__A, (__v8df)__A,
+                                          0, 0, 2, 2, 4, 4, 6, 6);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_movedup_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+                                              (__v8df)_mm512_movedup_pd(__A),
+                                              (__v8df)__W);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_movedup_pd (__mmask8 __U, __m512d __A)
+{
+  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+                                              (__v8df)_mm512_movedup_pd(__A),
+                                              (__v8df)_mm512_setzero_pd());
+}
+
+#define _mm512_fixupimm_round_pd(A, B, C, imm, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+                                             (__v8df)(__m512d)(B), \
+                                             (__v8di)(__m512i)(C), (int)(imm), \
+                                             (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_fixupimm_round_pd(A, U, B, C, imm, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+                                             (__v8df)(__m512d)(B), \
+                                             (__v8di)(__m512i)(C), (int)(imm), \
+                                             (__mmask8)(U), (int)(R)); })
+
+#define _mm512_fixupimm_pd(A, B, C, imm) __extension__ ({ \
+  (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+                                             (__v8df)(__m512d)(B), \
+                                             (__v8di)(__m512i)(C), (int)(imm), \
+                                             (__mmask8)-1, \
+                                             _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_fixupimm_pd(A, U, B, C, imm) __extension__ ({ \
+  (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+                                             (__v8df)(__m512d)(B), \
+                                             (__v8di)(__m512i)(C), (int)(imm), \
+                                             (__mmask8)(U), \
+                                             _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_fixupimm_round_pd(U, A, B, C, imm, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
+                                              (__v8df)(__m512d)(B), \
+                                              (__v8di)(__m512i)(C), \
+                                              (int)(imm), (__mmask8)(U), \
+                                              (int)(R)); })
+
+#define _mm512_maskz_fixupimm_pd(U, A, B, C, imm) __extension__ ({ \
+  (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
+                                              (__v8df)(__m512d)(B), \
+                                              (__v8di)(__m512i)(C), \
+                                              (int)(imm), (__mmask8)(U), \
+                                              _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_fixupimm_round_ps(A, B, C, imm, R) __extension__ ({ \
+  (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+                                            (__v16sf)(__m512)(B), \
+                                            (__v16si)(__m512i)(C), (int)(imm), \
+                                            (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_fixupimm_round_ps(A, U, B, C, imm, R) __extension__ ({ \
+  (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+                                            (__v16sf)(__m512)(B), \
+                                            (__v16si)(__m512i)(C), (int)(imm), \
+                                            (__mmask16)(U), (int)(R)); })
+
+#define _mm512_fixupimm_ps(A, B, C, imm) __extension__ ({ \
+  (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+                                            (__v16sf)(__m512)(B), \
+                                            (__v16si)(__m512i)(C), (int)(imm), \
+                                            (__mmask16)-1, \
+                                            _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_fixupimm_ps(A, U, B, C, imm) __extension__ ({ \
+  (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+                                            (__v16sf)(__m512)(B), \
+                                            (__v16si)(__m512i)(C), (int)(imm), \
+                                            (__mmask16)(U), \
+                                            _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_fixupimm_round_ps(U, A, B, C, imm, R) __extension__ ({ \
+  (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
+                                             (__v16sf)(__m512)(B), \
+                                             (__v16si)(__m512i)(C), \
+                                             (int)(imm), (__mmask16)(U), \
+                                             (int)(R)); })
+
+#define _mm512_maskz_fixupimm_ps(U, A, B, C, imm) __extension__ ({ \
+  (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
+                                             (__v16sf)(__m512)(B), \
+                                             (__v16si)(__m512i)(C), \
+                                             (int)(imm), (__mmask16)(U), \
+                                             _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_fixupimm_round_sd(A, B, C, imm, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), \
+                                          (__v2di)(__m128i)(C), (int)(imm), \
+                                          (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_fixupimm_round_sd(A, U, B, C, imm, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), \
+                                          (__v2di)(__m128i)(C), (int)(imm), \
+                                          (__mmask8)(U), (int)(R)); })
+
+#define _mm_fixupimm_sd(A, B, C, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), \
+                                          (__v2di)(__m128i)(C), (int)(imm), \
+                                          (__mmask8)-1, \
+                                          _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_fixupimm_sd(A, U, B, C, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), \
+                                          (__v2di)(__m128i)(C), (int)(imm), \
+                                          (__mmask8)(U), \
+                                          _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2di)(__m128i)(C), (int)(imm), \
+                                           (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2di)(__m128i)(C), (int)(imm), \
+                                           (__mmask8)(U), \
+                                           _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_fixupimm_round_ss(A, B, C, imm, R) __extension__ ({ \
+  (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), \
+                                         (__v4si)(__m128i)(C), (int)(imm), \
+                                         (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_fixupimm_round_ss(A, U, B, C, imm, R) __extension__ ({ \
+  (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), \
+                                         (__v4si)(__m128i)(C), (int)(imm), \
+                                         (__mmask8)(U), (int)(R)); })
+
+#define _mm_fixupimm_ss(A, B, C, imm) __extension__ ({ \
+  (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), \
+                                         (__v4si)(__m128i)(C), (int)(imm), \
+                                         (__mmask8)-1, \
+                                         _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_fixupimm_ss(A, U, B, C, imm) __extension__ ({ \
+  (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), \
+                                         (__v4si)(__m128i)(C), (int)(imm), \
+                                         (__mmask8)(U), \
+                                         _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) __extension__ ({ \
+  (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4si)(__m128i)(C), (int)(imm), \
+                                          (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) __extension__ ({ \
+  (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4si)(__m128i)(C), (int)(imm), \
+                                          (__mmask8)(U), \
+                                          _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_getexp_round_sd(A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
+                                                 (__v2df)(__m128d)(B), \
+                                                 (__v2df)_mm_setzero_pd(), \
+                                                 (__mmask8)-1, (int)(R)); })
+
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_getexp_sd (__m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_getexpsd128_round_mask ((__v2df) __A,
+                 (__v2df) __B, (__v2df) _mm_setzero_pd(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,
+          (__v2df) __B,
+          (__v2df) __W,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_getexp_round_sd(W, U, A, B, R) __extension__ ({\
+  (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
+                                                 (__v2df)(__m128d)(B), \
+                                                 (__v2df)(__m128d)(W), \
+                                                 (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,
+          (__v2df) __B,
+          (__v2df) _mm_setzero_pd (),
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_getexp_round_sd(U, A, B, R) __extension__ ({\
+  (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
+                                                 (__v2df)(__m128d)(B), \
+                                                 (__v2df)_mm_setzero_pd(), \
+                                                 (__mmask8)(U), (int)(R)); })
+
+#define _mm_getexp_round_ss(A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
+                                                (__v4sf)(__m128)(B), \
+                                                (__v4sf)_mm_setzero_ps(), \
+                                                (__mmask8)-1, (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_getexp_ss (__m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
+                (__v4sf) __B, (__v4sf)  _mm_setzero_ps(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
+          (__v4sf) __B,
+          (__v4sf) __W,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_getexp_round_ss(W, U, A, B, R) __extension__ ({\
+  (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
+                                                (__v4sf)(__m128)(B), \
+                                                (__v4sf)(__m128)(W), \
+                                                (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
+          (__v4sf) __B,
+          (__v4sf) _mm_setzero_pd (),
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_getexp_round_ss(U, A, B, R) __extension__ ({\
+  (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
+                                                (__v4sf)(__m128)(B), \
+                                                (__v4sf)_mm_setzero_ps(), \
+                                                (__mmask8)(U), (int)(R)); })
+
+#define _mm_getmant_round_sd(A, B, C, D, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (int)(((D)<<2) | (C)), \
+                                               (__v2df)_mm_setzero_pd(), \
+                                               (__mmask8)-1, (int)(R)); })
+
+#define _mm_getmant_sd(A, B, C, D)  __extension__ ({ \
+  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (int)(((D)<<2) | (C)), \
+                                               (__v2df)_mm_setzero_pd(), \
+                                               (__mmask8)-1, \
+                                               _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_getmant_sd(W, U, A, B, C, D) __extension__ ({\
+  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (int)(((D)<<2) | (C)), \
+                                               (__v2df)(__m128d)(W), \
+                                               (__mmask8)(U), \
+                                               _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_getmant_round_sd(W, U, A, B, C, D, R)({\
+  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (int)(((D)<<2) | (C)), \
+                                               (__v2df)(__m128d)(W), \
+                                               (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_getmant_sd(U, A, B, C, D) __extension__ ({\
+  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (int)(((D)<<2) | (C)), \
+                                               (__v2df)_mm_setzero_pd(), \
+                                               (__mmask8)(U), \
+                                               _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_maskz_getmant_round_sd(U, A, B, C, D, R) __extension__ ({\
+  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (int)(((D)<<2) | (C)), \
+                                               (__v2df)_mm_setzero_pd(), \
+                                               (__mmask8)(U), (int)(R)); })
+
+#define _mm_getmant_round_ss(A, B, C, D, R) __extension__ ({ \
+  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+                                              (__v4sf)(__m128)(B), \
+                                              (int)(((D)<<2) | (C)), \
+                                              (__v4sf)_mm_setzero_ps(), \
+                                              (__mmask8)-1, (int)(R)); })
+
+#define _mm_getmant_ss(A, B, C, D) __extension__ ({ \
+  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+                                              (__v4sf)(__m128)(B), \
+                                              (int)(((D)<<2) | (C)), \
+                                              (__v4sf)_mm_setzero_ps(), \
+                                              (__mmask8)-1, \
+                                              _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_getmant_ss(W, U, A, B, C, D) __extension__ ({\
+  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+                                              (__v4sf)(__m128)(B), \
+                                              (int)(((D)<<2) | (C)), \
+                                              (__v4sf)(__m128)(W), \
+                                              (__mmask8)(U), \
+                                              _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_getmant_round_ss(W, U, A, B, C, D, R)({\
+  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+                                              (__v4sf)(__m128)(B), \
+                                              (int)(((D)<<2) | (C)), \
+                                              (__v4sf)(__m128)(W), \
+                                              (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_getmant_ss(U, A, B, C, D) __extension__ ({\
+  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+                                              (__v4sf)(__m128)(B), \
+                                              (int)(((D)<<2) | (C)), \
+                                              (__v4sf)_mm_setzero_pd(), \
+                                              (__mmask8)(U), \
+                                              _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_maskz_getmant_round_ss(U, A, B, C, D, R) __extension__ ({\
+  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+                                              (__v4sf)(__m128)(B), \
+                                              (int)(((D)<<2) | (C)), \
+                                              (__v4sf)_mm_setzero_ps(), \
+                                              (__mmask8)(U), (int)(R)); })
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_kmov (__mmask16 __A)
+{
+  return  __A;
+}
+
+#define _mm_comi_round_sd(A, B, P, R) __extension__ ({\
+  (int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \
+                              (int)(P), (int)(R)); })
+
+#define _mm_comi_round_ss(A, B, P, R) __extension__ ({\
+  (int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
+                              (int)(P), (int)(R)); })
+
+#define _mm_cvt_roundsd_si64(A, R) __extension__ ({ \
+  (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask2_permutex2var_epi32 (__m512i __A, __m512i __I,
+         __mmask16 __U, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_vpermi2vard512_mask ((__v16si) __A,
+                   (__v16si) __I
+                   /* idx */ ,
+                   (__v16si) __B,
+                   (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sll_epi32 (__m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_pslld512_mask ((__v16si) __A,
+             (__v4si) __B,
+             (__v16si)
+             _mm512_setzero_si512 (),
+             (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sll_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_pslld512_mask ((__v16si) __A,
+             (__v4si) __B,
+             (__v16si) __W,
+             (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sll_epi32 (__mmask16 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_pslld512_mask ((__v16si) __A,
+             (__v4si) __B,
+             (__v16si)
+             _mm512_setzero_si512 (),
+             (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sll_epi64 (__m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psllq512_mask ((__v8di) __A,
+             (__v2di) __B,
+             (__v8di)
+             _mm512_setzero_si512 (),
+             (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sll_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psllq512_mask ((__v8di) __A,
+             (__v2di) __B,
+             (__v8di) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sll_epi64 (__mmask8 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psllq512_mask ((__v8di) __A,
+             (__v2di) __B,
+             (__v8di)
+             _mm512_setzero_si512 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sllv_epi32 (__m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psllv16si_mask ((__v16si) __X,
+              (__v16si) __Y,
+              (__v16si)
+              _mm512_setzero_si512 (),
+              (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sllv_epi32 (__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psllv16si_mask ((__v16si) __X,
+              (__v16si) __Y,
+              (__v16si) __W,
+              (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sllv_epi32 (__mmask16 __U, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psllv16si_mask ((__v16si) __X,
+              (__v16si) __Y,
+              (__v16si)
+              _mm512_setzero_si512 (),
+              (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sllv_epi64 (__m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psllv8di_mask ((__v8di) __X,
+             (__v8di) __Y,
+             (__v8di)
+             _mm512_undefined_pd (),
+             (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sllv_epi64 (__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psllv8di_mask ((__v8di) __X,
+             (__v8di) __Y,
+             (__v8di) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sllv_epi64 (__mmask8 __U, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psllv8di_mask ((__v8di) __X,
+             (__v8di) __Y,
+             (__v8di)
+             _mm512_setzero_si512 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sra_epi32 (__m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psrad512_mask ((__v16si) __A,
+             (__v4si) __B,
+             (__v16si)
+             _mm512_setzero_si512 (),
+             (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sra_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psrad512_mask ((__v16si) __A,
+             (__v4si) __B,
+             (__v16si) __W,
+             (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sra_epi32 (__mmask16 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psrad512_mask ((__v16si) __A,
+             (__v4si) __B,
+             (__v16si)
+             _mm512_setzero_si512 (),
+             (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sra_epi64 (__m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psraq512_mask ((__v8di) __A,
+             (__v2di) __B,
+             (__v8di)
+             _mm512_setzero_si512 (),
+             (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sra_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psraq512_mask ((__v8di) __A,
+             (__v2di) __B,
+             (__v8di) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sra_epi64 (__mmask8 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psraq512_mask ((__v8di) __A,
+             (__v2di) __B,
+             (__v8di)
+             _mm512_setzero_si512 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srav_epi32 (__m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psrav16si_mask ((__v16si) __X,
+              (__v16si) __Y,
+              (__v16si)
+              _mm512_setzero_si512 (),
+              (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srav_epi32 (__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psrav16si_mask ((__v16si) __X,
+              (__v16si) __Y,
+              (__v16si) __W,
+              (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srav_epi32 (__mmask16 __U, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psrav16si_mask ((__v16si) __X,
+              (__v16si) __Y,
+              (__v16si)
+              _mm512_setzero_si512 (),
+              (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srav_epi64 (__m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psrav8di_mask ((__v8di) __X,
+             (__v8di) __Y,
+             (__v8di)
+             _mm512_setzero_si512 (),
+             (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srav_epi64 (__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psrav8di_mask ((__v8di) __X,
+             (__v8di) __Y,
+             (__v8di) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srav_epi64 (__mmask8 __U, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psrav8di_mask ((__v8di) __X,
+             (__v8di) __Y,
+             (__v8di)
+             _mm512_setzero_si512 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srl_epi32 (__m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psrld512_mask ((__v16si) __A,
+             (__v4si) __B,
+             (__v16si)
+             _mm512_setzero_si512 (),
+             (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srl_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psrld512_mask ((__v16si) __A,
+             (__v4si) __B,
+             (__v16si) __W,
+             (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srl_epi32 (__mmask16 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psrld512_mask ((__v16si) __A,
+             (__v4si) __B,
+             (__v16si)
+             _mm512_setzero_si512 (),
+             (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srl_epi64 (__m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psrlq512_mask ((__v8di) __A,
+             (__v2di) __B,
+             (__v8di)
+             _mm512_setzero_si512 (),
+             (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srl_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psrlq512_mask ((__v8di) __A,
+             (__v2di) __B,
+             (__v8di) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srl_epi64 (__mmask8 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psrlq512_mask ((__v8di) __A,
+             (__v2di) __B,
+             (__v8di)
+             _mm512_setzero_si512 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srlv_epi32 (__m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psrlv16si_mask ((__v16si) __X,
+              (__v16si) __Y,
+              (__v16si)
+              _mm512_setzero_si512 (),
+              (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srlv_epi32 (__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psrlv16si_mask ((__v16si) __X,
+              (__v16si) __Y,
+              (__v16si) __W,
+              (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srlv_epi32 (__mmask16 __U, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psrlv16si_mask ((__v16si) __X,
+              (__v16si) __Y,
+              (__v16si)
+              _mm512_setzero_si512 (),
+              (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srlv_epi64 (__m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psrlv8di_mask ((__v8di) __X,
+             (__v8di) __Y,
+             (__v8di)
+             _mm512_setzero_si512 (),
+             (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srlv_epi64 (__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psrlv8di_mask ((__v8di) __X,
+             (__v8di) __Y,
+             (__v8di) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srlv_epi64 (__mmask8 __U, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_psrlv8di_mask ((__v8di) __X,
+             (__v8di) __Y,
+             (__v8di)
+             _mm512_setzero_si512 (),
+             (__mmask8) __U);
+}
+
+#define _mm512_ternarylogic_epi32(A, B, C, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
+                                            (__v16si)(__m512i)(B), \
+                                            (__v16si)(__m512i)(C), (int)(imm), \
+                                            (__mmask16)-1); })
+
+#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
+                                            (__v16si)(__m512i)(B), \
+                                            (__v16si)(__m512i)(C), (int)(imm), \
+                                            (__mmask16)(U)); })
+
+#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \
+                                             (__v16si)(__m512i)(B), \
+                                             (__v16si)(__m512i)(C), \
+                                             (int)(imm), (__mmask16)(U)); })
+
+#define _mm512_ternarylogic_epi64(A, B, C, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
+                                            (__v8di)(__m512i)(B), \
+                                            (__v8di)(__m512i)(C), (int)(imm), \
+                                            (__mmask8)-1); })
+
+#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
+                                            (__v8di)(__m512i)(B), \
+                                            (__v8di)(__m512i)(C), (int)(imm), \
+                                            (__mmask8)(U)); })
+
+#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \
+                                             (__v8di)(__m512i)(B), \
+                                             (__v8di)(__m512i)(C), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+#define _mm_cvt_roundsd_i64(A, R) __extension__ ({ \
+  (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+
+#define _mm_cvt_roundsd_si32(A, R) __extension__ ({ \
+  (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)); })
+
+#define _mm_cvt_roundsd_i32(A, R) __extension__ ({ \
+  (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)); })
+
+#define _mm_cvt_roundsd_u32(A, R) __extension__ ({ \
+  (unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R)); })
+
+static __inline__ unsigned __DEFAULT_FN_ATTRS
+_mm_cvtsd_u32 (__m128d __A)
+{
+  return (unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A,
+             _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundsd_u64(A, R) __extension__ ({ \
+  (unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \
+                                                  (int)(R)); })
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mm_cvtsd_u64 (__m128d __A)
+{
+  return (unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df)
+                 __A,
+                 _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundss_si32(A, R) __extension__ ({ \
+  (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)); })
+
+#define _mm_cvt_roundss_i32(A, R) __extension__ ({ \
+  (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)); })
+
+#define _mm_cvt_roundss_si64(A, R) __extension__ ({ \
+  (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)); })
+
+#define _mm_cvt_roundss_i64(A, R) __extension__ ({ \
+  (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)); })
+
+#define _mm_cvt_roundss_u32(A, R) __extension__ ({ \
+  (unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R)); })
+
+static __inline__ unsigned __DEFAULT_FN_ATTRS
+_mm_cvtss_u32 (__m128 __A)
+{
+  return (unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A,
+             _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundss_u64(A, R) __extension__ ({ \
+  (unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \
+                                                  (int)(R)); })
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mm_cvtss_u64 (__m128 __A)
+{
+  return (unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf)
+                 __A,
+                 _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtt_roundsd_i32(A, R) __extension__ ({ \
+  (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)); })
+
+#define _mm_cvtt_roundsd_si32(A, R) __extension__ ({ \
+  (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)); })
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvttsd_i32 (__m128d __A)
+{
+  return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A,
+              _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtt_roundsd_si64(A, R) __extension__ ({ \
+  (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+
+#define _mm_cvtt_roundsd_i64(A, R) __extension__ ({ \
+  (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvttsd_i64 (__m128d __A)
+{
+  return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A,
+              _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtt_roundsd_u32(A, R) __extension__ ({ \
+  (unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R)); })
+
+static __inline__ unsigned __DEFAULT_FN_ATTRS
+_mm_cvttsd_u32 (__m128d __A)
+{
+  return (unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A,
+              _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtt_roundsd_u64(A, R) __extension__ ({ \
+  (unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \
+                                                   (int)(R)); })
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mm_cvttsd_u64 (__m128d __A)
+{
+  return (unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df)
+                  __A,
+                  _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtt_roundss_i32(A, R) __extension__ ({ \
+  (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)); })
+
+#define _mm_cvtt_roundss_si32(A, R) __extension__ ({ \
+  (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)); })
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvttss_i32 (__m128 __A)
+{
+  return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A,
+              _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtt_roundss_i64(A, R) __extension__ ({ \
+  (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)); })
+
+#define _mm_cvtt_roundss_si64(A, R) __extension__ ({ \
+  (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)); })
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvttss_i64 (__m128 __A)
+{
+  return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A,
+              _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtt_roundss_u32(A, R) __extension__ ({ \
+  (unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R)); })
+
+static __inline__ unsigned __DEFAULT_FN_ATTRS
+_mm_cvttss_u32 (__m128 __A)
+{
+  return (unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A,
+              _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtt_roundss_u64(A, R) __extension__ ({ \
+  (unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \
+                                                   (int)(R)); })
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mm_cvttss_u64 (__m128 __A)
+{
+  return (unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf)
+                  __A,
+                  _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask2_permutex2var_pd (__m512d __A, __m512i __I, __mmask8 __U,
+            __m512d __B)
+{
+  return (__m512d) __builtin_ia32_vpermi2varpd512_mask ((__v8df) __A,
+              (__v8di) __I
+              /* idx */ ,
+              (__v8df) __B,
+              (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask2_permutex2var_ps (__m512 __A, __m512i __I, __mmask16 __U,
+            __m512 __B)
+{
+  return (__m512) __builtin_ia32_vpermi2varps512_mask ((__v16sf) __A,
+                   (__v16si) __I
+                   /* idx */ ,
+                   (__v16sf) __B,
+                   (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask2_permutex2var_epi64 (__m512i __A, __m512i __I,
+         __mmask8 __U, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_vpermi2varq512_mask ((__v8di) __A,
+                   (__v8di) __I
+                   /* idx */ ,
+                   (__v8di) __B,
+                   (__mmask8) __U);
+}
+
+#define _mm512_permute_pd(X, C) __extension__ ({ \
+  (__m512d)__builtin_shufflevector((__v8df)(__m512d)(X), \
+                                   (__v8df)_mm512_undefined_pd(), \
+                                   0 + (((C) >> 0) & 0x1), \
+                                   0 + (((C) >> 1) & 0x1), \
+                                   2 + (((C) >> 2) & 0x1), \
+                                   2 + (((C) >> 3) & 0x1), \
+                                   4 + (((C) >> 4) & 0x1), \
+                                   4 + (((C) >> 5) & 0x1), \
+                                   6 + (((C) >> 6) & 0x1), \
+                                   6 + (((C) >> 7) & 0x1)); })
+
+#define _mm512_mask_permute_pd(W, U, X, C) __extension__ ({ \
+  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                       (__v8df)_mm512_permute_pd((X), (C)), \
+                                       (__v8df)(__m512d)(W)); })
+
+#define _mm512_maskz_permute_pd(U, X, C) __extension__ ({ \
+  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                       (__v8df)_mm512_permute_pd((X), (C)), \
+                                       (__v8df)_mm512_setzero_pd()); })
+
+#define _mm512_permute_ps(X, C) __extension__ ({ \
+  (__m512)__builtin_shufflevector((__v16sf)(__m512)(X), \
+                                  (__v16sf)_mm512_undefined_ps(), \
+                                   0  + (((C) >> 0) & 0x3), \
+                                   0  + (((C) >> 2) & 0x3), \
+                                   0  + (((C) >> 4) & 0x3), \
+                                   0  + (((C) >> 6) & 0x3), \
+                                   4  + (((C) >> 0) & 0x3), \
+                                   4  + (((C) >> 2) & 0x3), \
+                                   4  + (((C) >> 4) & 0x3), \
+                                   4  + (((C) >> 6) & 0x3), \
+                                   8  + (((C) >> 0) & 0x3), \
+                                   8  + (((C) >> 2) & 0x3), \
+                                   8  + (((C) >> 4) & 0x3), \
+                                   8  + (((C) >> 6) & 0x3), \
+                                   12 + (((C) >> 0) & 0x3), \
+                                   12 + (((C) >> 2) & 0x3), \
+                                   12 + (((C) >> 4) & 0x3), \
+                                   12 + (((C) >> 6) & 0x3)); })
+
+#define _mm512_mask_permute_ps(W, U, X, C) __extension__ ({ \
+  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+                                      (__v16sf)_mm512_permute_ps((X), (C)), \
+                                      (__v16sf)(__m512)(W)); })
+
+#define _mm512_maskz_permute_ps(U, X, C) __extension__ ({ \
+  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+                                      (__v16sf)_mm512_permute_ps((X), (C)), \
+                                      (__v16sf)_mm512_setzero_ps()); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_permutevar_pd (__m512d __A, __m512i __C)
+{
+  return (__m512d) __builtin_ia32_vpermilvarpd512_mask ((__v8df) __A,
+              (__v8di) __C,
+              (__v8df)
+              _mm512_undefined_pd (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_permutevar_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512i __C)
+{
+  return (__m512d) __builtin_ia32_vpermilvarpd512_mask ((__v8df) __A,
+              (__v8di) __C,
+              (__v8df) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_permutevar_pd (__mmask8 __U, __m512d __A, __m512i __C)
+{
+  return (__m512d) __builtin_ia32_vpermilvarpd512_mask ((__v8df) __A,
+              (__v8di) __C,
+              (__v8df)
+              _mm512_setzero_pd (),
+              (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_permutevar_ps (__m512 __A, __m512i __C)
+{
+  return (__m512) __builtin_ia32_vpermilvarps512_mask ((__v16sf) __A,
+                   (__v16si) __C,
+                   (__v16sf)
+                   _mm512_undefined_ps (),
+                   (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_permutevar_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512i __C)
+{
+  return (__m512) __builtin_ia32_vpermilvarps512_mask ((__v16sf) __A,
+                   (__v16si) __C,
+                   (__v16sf) __W,
+                   (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_permutevar_ps (__mmask16 __U, __m512 __A, __m512i __C)
+{
+  return (__m512) __builtin_ia32_vpermilvarps512_mask ((__v16sf) __A,
+                   (__v16si) __C,
+                   (__v16sf)
+                   _mm512_setzero_ps (),
+                   (__mmask16) __U);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I
+                    /* idx */ ,
+                    (__v8df) __A,
+                    (__v8df) __B,
+                    (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_permutex2var_pd (__m512d __A, __mmask8 __U, __m512i __I, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I
+                    /* idx */ ,
+                    (__v8df) __A,
+                    (__v8df) __B,
+                    (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_permutex2var_pd (__mmask8 __U, __m512d __A, __m512i __I,
+            __m512d __B)
+{
+  return (__m512d) __builtin_ia32_vpermt2varpd512_maskz ((__v8di) __I
+                                                         /* idx */ ,
+                                                         (__v8df) __A,
+                                                         (__v8df) __B,
+                                                         (__mmask8) __U);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B)
+{
+  return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I
+                                                         /* idx */ ,
+                                                         (__v16sf) __A,
+                                                         (__v16sf) __B,
+                                                         (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_permutex2var_ps (__m512 __A, __mmask16 __U, __m512i __I, __m512 __B)
+{
+  return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I
+                                                         /* idx */ ,
+                                                         (__v16sf) __A,
+                                                         (__v16sf) __B,
+                                                         (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_permutex2var_ps (__mmask16 __U, __m512 __A, __m512i __I,
+            __m512 __B)
+{
+  return (__m512) __builtin_ia32_vpermt2varps512_maskz ((__v16si) __I
+                                                        /* idx */ ,
+                                                        (__v16sf) __A,
+                                                        (__v16sf) __B,
+                                                        (__mmask16) __U);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_testn_epi32_mask (__m512i __A, __m512i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestnmd512 ((__v16si) __A,
+             (__v16si) __B,
+             (__mmask16) -1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_testn_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestnmd512 ((__v16si) __A,
+             (__v16si) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_testn_epi64_mask (__m512i __A, __m512i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmq512 ((__v8di) __A,
+            (__v8di) __B,
+            (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_testn_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmq512 ((__v8di) __A,
+            (__v8di) __B, __U);
+}
+
+#define _mm512_cvtt_roundpd_epu32(A, R) __extension__ ({ \
+  (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8si)_mm256_undefined_si256(), \
+                                             (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvtt_roundpd_epu32(W, U, A, R) __extension__ ({ \
+  (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8si)(__m256i)(W), \
+                                             (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvtt_roundpd_epu32(U, A, R) __extension__ ({ \
+  (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8si)_mm256_setzero_si256(), \
+                                             (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvttpd_epu32 (__m512d __A)
+{
+  return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
+                  (__v8si)
+                  _mm256_undefined_si256 (),
+                  (__mmask8) -1,
+                  _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
+{
+  return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
+                  (__v8si) __W,
+                  (__mmask8) __U,
+                  _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttpd_epu32 (__mmask8 __U, __m512d __A)
+{
+  return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
+                  (__v8si)
+                  _mm256_setzero_si256 (),
+                  (__mmask8) __U,
+                  _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_roundscale_round_sd(A, B, imm, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)-1, (int)(imm), \
+                                                (int)(R)); })
+
+#define _mm_roundscale_sd(A, B, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)-1, (int)(imm), \
+                                                _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_roundscale_sd(W, U, A, B, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (__v2df)(__m128d)(W), \
+                                                (__mmask8)(U), (int)(imm), \
+                                                _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_roundscale_round_sd(W, U, A, B, I, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (__v2df)(__m128d)(W), \
+                                                (__mmask8)(U), (int)(I), \
+                                                (int)(R)); })
+
+#define _mm_maskz_roundscale_sd(U, A, B, I) __extension__ ({ \
+  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)(U), (int)(I), \
+                                                _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_maskz_roundscale_round_sd(U, A, B, I, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)(U), (int)(I), \
+                                                (int)(R)); })
+
+#define _mm_roundscale_round_ss(A, B, imm, R) __extension__ ({ \
+  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (__v4sf)_mm_setzero_ps(), \
+                                               (__mmask8)-1, (int)(imm), \
+                                               (int)(R)); })
+
+#define _mm_roundscale_ss(A, B, imm) __extension__ ({ \
+  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (__v4sf)_mm_setzero_ps(), \
+                                               (__mmask8)-1, (int)(imm), \
+                                               _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_roundscale_ss(W, U, A, B, I) __extension__ ({ \
+  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (__v4sf)(__m128)(W), \
+                                               (__mmask8)(U), (int)(I), \
+                                               _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_roundscale_round_ss(W, U, A, B, I, R) __extension__ ({ \
+  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (__v4sf)(__m128)(W), \
+                                               (__mmask8)(U), (int)(I), \
+                                               (int)(R)); })
+
+#define _mm_maskz_roundscale_ss(U, A, B, I) __extension__ ({ \
+  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (__v4sf)_mm_setzero_ps(), \
+                                               (__mmask8)(U), (int)(I), \
+                                               _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_maskz_roundscale_round_ss(U, A, B, I, R) __extension__ ({ \
+  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (__v4sf)_mm_setzero_ps(), \
+                                               (__mmask8)(U), (int)(I), \
+                                               (int)(R)); })
+
+#define _mm512_scalef_round_pd(A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
+                                           (__v8df)(__m512d)(B), \
+                                           (__v8df)_mm512_undefined_pd(), \
+                                           (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_scalef_round_pd(W, U, A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
+                                           (__v8df)(__m512d)(B), \
+                                           (__v8df)(__m512d)(W), \
+                                           (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_scalef_round_pd(U, A, B, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
+                                           (__v8df)(__m512d)(B), \
+                                           (__v8df)_mm512_setzero_pd(), \
+                                           (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_scalef_pd (__m512d __A, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
+                (__v8df) __B,
+                (__v8df)
+                _mm512_undefined_pd (),
+                (__mmask8) -1,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_scalef_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
+                (__v8df) __B,
+                (__v8df) __W,
+                (__mmask8) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_scalef_pd (__mmask8 __U, __m512d __A, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
+                (__v8df) __B,
+                (__v8df)
+                _mm512_setzero_pd (),
+                (__mmask8) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_scalef_round_ps(A, B, R) __extension__ ({ \
+  (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
+                                          (__v16sf)(__m512)(B), \
+                                          (__v16sf)_mm512_undefined_ps(), \
+                                          (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_scalef_round_ps(W, U, A, B, R) __extension__ ({ \
+  (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
+                                          (__v16sf)(__m512)(B), \
+                                          (__v16sf)(__m512)(W), \
+                                          (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_scalef_round_ps(U, A, B, R) __extension__ ({ \
+  (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
+                                          (__v16sf)(__m512)(B), \
+                                          (__v16sf)_mm512_setzero_ps(), \
+                                          (__mmask16)(U), (int)(R)); })
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_scalef_ps (__m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
+               (__v16sf) __B,
+               (__v16sf)
+               _mm512_undefined_ps (),
+               (__mmask16) -1,
+               _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_scalef_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
+               (__v16sf) __B,
+               (__v16sf) __W,
+               (__mmask16) __U,
+               _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_scalef_ps (__mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
+               (__v16sf) __B,
+               (__v16sf)
+               _mm512_setzero_ps (),
+               (__mmask16) __U,
+               _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_scalef_round_sd(A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
+                                              (__v2df)(__m128d)(B), \
+                                              (__v2df)_mm_setzero_pd(), \
+                                              (__mmask8)-1, (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_scalef_sd (__m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_scalefsd_round_mask ((__v2df) __A,
+              (__v2df)( __B), (__v2df) _mm_setzero_pd(),
+              (__mmask8) -1,
+              _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_scalef_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A,
+                 (__v2df) __B,
+                (__v2df) __W,
+                (__mmask8) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_scalef_round_sd(W, U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
+                                              (__v2df)(__m128d)(B), \
+                                              (__v2df)(__m128d)(W), \
+                                              (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A,
+                 (__v2df) __B,
+                (__v2df) _mm_setzero_pd (),
+                (__mmask8) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_scalef_round_sd(U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
+                                              (__v2df)(__m128d)(B), \
+                                              (__v2df)_mm_setzero_pd(), \
+                                              (__mmask8)(U), (int)(R)); })
+
+#define _mm_scalef_round_ss(A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
+                                             (__v4sf)(__m128)(B), \
+                                             (__v4sf)_mm_setzero_ps(), \
+                                             (__mmask8)-1, (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_scalef_ss (__m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_scalefss_round_mask ((__v4sf) __A,
+             (__v4sf)( __B), (__v4sf) _mm_setzero_ps(),
+             (__mmask8) -1,
+             _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_scalef_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A,
+                (__v4sf) __B,
+                (__v4sf) __W,
+                (__mmask8) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_scalef_round_ss(W, U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
+                                             (__v4sf)(__m128)(B), \
+                                             (__v4sf)(__m128)(W), \
+                                             (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A,
+                 (__v4sf) __B,
+                (__v4sf) _mm_setzero_ps (),
+                (__mmask8) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_scalef_round_ss(U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
+                                             (__v4sf)(__m128)(B), \
+                                             (__v4sf)_mm_setzero_ps(), \
+                                             (__mmask8)(U), \
+                                             _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_srai_epi32(A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psradi512_mask((__v16si)(__m512i)(A), (int)(B), \
+                                         (__v16si)_mm512_setzero_si512(), \
+                                         (__mmask16)-1); })
+
+#define _mm512_mask_srai_epi32(W, U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psradi512_mask((__v16si)(__m512i)(A), (int)(B), \
+                                         (__v16si)(__m512i)(W), \
+                                         (__mmask16)(U)); })
+
+#define _mm512_maskz_srai_epi32(U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psradi512_mask((__v16si)(__m512i)(A), (int)(B), \
+                                         (__v16si)_mm512_setzero_si512(), \
+                                         (__mmask16)(U)); })
+
+#define _mm512_srai_epi64(A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psraqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+                                         (__v8di)_mm512_setzero_si512(), \
+                                         (__mmask8)-1); })
+
+#define _mm512_mask_srai_epi64(W, U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psraqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+                                         (__v8di)(__m512i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm512_maskz_srai_epi64(U, A, B) __extension__ ({ \
+  (__m512i)__builtin_ia32_psraqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+                                         (__v8di)_mm512_setzero_si512(), \
+                                         (__mmask8)(U)); })
+
+#define _mm512_shuffle_f32x4(A, B, imm) __extension__ ({ \
+  (__m512)__builtin_ia32_shuf_f32x4_mask((__v16sf)(__m512)(A), \
+                                         (__v16sf)(__m512)(B), (int)(imm), \
+                                         (__v16sf)_mm512_undefined_ps(), \
+                                         (__mmask16)-1); })
+
+#define _mm512_mask_shuffle_f32x4(W, U, A, B, imm) __extension__ ({ \
+  (__m512)__builtin_ia32_shuf_f32x4_mask((__v16sf)(__m512)(A), \
+                                         (__v16sf)(__m512)(B), (int)(imm), \
+                                         (__v16sf)(__m512)(W), \
+                                         (__mmask16)(U)); })
+
+#define _mm512_maskz_shuffle_f32x4(U, A, B, imm) __extension__ ({ \
+  (__m512)__builtin_ia32_shuf_f32x4_mask((__v16sf)(__m512)(A), \
+                                         (__v16sf)(__m512)(B), (int)(imm), \
+                                         (__v16sf)_mm512_setzero_ps(), \
+                                         (__mmask16)(U)); })
+
+#define _mm512_shuffle_f64x2(A, B, imm) __extension__ ({ \
+  (__m512d)__builtin_ia32_shuf_f64x2_mask((__v8df)(__m512d)(A), \
+                                          (__v8df)(__m512d)(B), (int)(imm), \
+                                          (__v8df)_mm512_undefined_pd(), \
+                                          (__mmask8)-1); })
+
+#define _mm512_mask_shuffle_f64x2(W, U, A, B, imm) __extension__ ({ \
+  (__m512d)__builtin_ia32_shuf_f64x2_mask((__v8df)(__m512d)(A), \
+                                          (__v8df)(__m512d)(B), (int)(imm), \
+                                          (__v8df)(__m512d)(W), \
+                                          (__mmask8)(U)); })
+
+#define _mm512_maskz_shuffle_f64x2(U, A, B, imm) __extension__ ({ \
+  (__m512d)__builtin_ia32_shuf_f64x2_mask((__v8df)(__m512d)(A), \
+                                          (__v8df)(__m512d)(B), (int)(imm), \
+                                          (__v8df)_mm512_setzero_pd(), \
+                                          (__mmask8)(U)); })
+
+#define _mm512_shuffle_i32x4(A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_shuf_i32x4_mask((__v16si)(__m512i)(A), \
+                                          (__v16si)(__m512i)(B), (int)(imm), \
+                                          (__v16si)_mm512_setzero_si512(), \
+                                          (__mmask16)-1); })
+
+#define _mm512_mask_shuffle_i32x4(W, U, A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_shuf_i32x4_mask((__v16si)(__m512i)(A), \
+                                          (__v16si)(__m512i)(B), (int)(imm), \
+                                          (__v16si)(__m512i)(W), \
+                                          (__mmask16)(U)); })
+
+#define _mm512_maskz_shuffle_i32x4(U, A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_shuf_i32x4_mask((__v16si)(__m512i)(A), \
+                                          (__v16si)(__m512i)(B), (int)(imm), \
+                                          (__v16si)_mm512_setzero_si512(), \
+                                          (__mmask16)(U)); })
+
+#define _mm512_shuffle_i64x2(A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_shuf_i64x2_mask((__v8di)(__m512i)(A), \
+                                          (__v8di)(__m512i)(B), (int)(imm), \
+                                          (__v8di)_mm512_setzero_si512(), \
+                                          (__mmask8)-1); })
+
+#define _mm512_mask_shuffle_i64x2(W, U, A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_shuf_i64x2_mask((__v8di)(__m512i)(A), \
+                                          (__v8di)(__m512i)(B), (int)(imm), \
+                                          (__v8di)(__m512i)(W), \
+                                          (__mmask8)(U)); })
+
+#define _mm512_maskz_shuffle_i64x2(U, A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_shuf_i64x2_mask((__v8di)(__m512i)(A), \
+                                          (__v8di)(__m512i)(B), (int)(imm), \
+                                          (__v8di)_mm512_setzero_si512(), \
+                                          (__mmask8)(U)); })
+
+#define _mm512_shuffle_pd(A, B, M) __extension__ ({ \
+  (__m512d)__builtin_shufflevector((__v8df)(__m512d)(A), \
+                                   (__v8df)(__m512d)(B), \
+                                   0  + (((M) >> 0) & 0x1), \
+                                   8  + (((M) >> 1) & 0x1), \
+                                   2  + (((M) >> 2) & 0x1), \
+                                   10 + (((M) >> 3) & 0x1), \
+                                   4  + (((M) >> 4) & 0x1), \
+                                   12 + (((M) >> 5) & 0x1), \
+                                   6  + (((M) >> 6) & 0x1), \
+                                   14 + (((M) >> 7) & 0x1)); })
+
+#define _mm512_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \
+  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                       (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
+                                       (__v8df)(__m512d)(W)); })
+
+#define _mm512_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \
+  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                       (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
+                                       (__v8df)_mm512_setzero_pd()); })
+
+#define _mm512_shuffle_ps(A, B, M) __extension__ ({ \
+  (__m512d)__builtin_shufflevector((__v16sf)(__m512)(A), \
+                                   (__v16sf)(__m512)(B), \
+                                   0  + (((M) >> 0) & 0x3), \
+                                   0  + (((M) >> 2) & 0x3), \
+                                   16 + (((M) >> 4) & 0x3), \
+                                   16 + (((M) >> 6) & 0x3), \
+                                   4  + (((M) >> 0) & 0x3), \
+                                   4  + (((M) >> 2) & 0x3), \
+                                   20 + (((M) >> 4) & 0x3), \
+                                   20 + (((M) >> 6) & 0x3), \
+                                   8  + (((M) >> 0) & 0x3), \
+                                   8  + (((M) >> 2) & 0x3), \
+                                   24 + (((M) >> 4) & 0x3), \
+                                   24 + (((M) >> 6) & 0x3), \
+                                   12 + (((M) >> 0) & 0x3), \
+                                   12 + (((M) >> 2) & 0x3), \
+                                   28 + (((M) >> 4) & 0x3), \
+                                   28 + (((M) >> 6) & 0x3)); })
+
+#define _mm512_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \
+  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+                                      (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
+                                      (__v16sf)(__m512)(W)); })
+
+#define _mm512_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \
+  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+                                      (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
+                                      (__v16sf)_mm512_setzero_ps()); })
+
+#define _mm_sqrt_round_sd(A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)-1, (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,
+                 (__v2df) __B,
+                (__v2df) __W,
+                (__mmask8) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_sqrt_round_sd(W, U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)(__m128d)(W), \
+                                            (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,
+                 (__v2df) __B,
+                (__v2df) _mm_setzero_pd (),
+                (__mmask8) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_sqrt_round_sd(U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)(U), (int)(R)); })
+
+#define _mm_sqrt_round_ss(A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)-1, (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
+                 (__v4sf) __B,
+                (__v4sf) __W,
+                (__mmask8) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_sqrt_round_ss(W, U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                           (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
+                 (__v4sf) __B,
+                (__v4sf) _mm_setzero_ps (),
+                (__mmask8) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_sqrt_round_ss(U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_broadcast_f32x4 (__m128 __A)
+{
+  return (__m512) __builtin_ia32_broadcastf32x4_512 ((__v4sf) __A,
+                 (__v16sf)
+                 _mm512_undefined_ps (),
+                 (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_f32x4 (__m512 __O, __mmask16 __M, __m128 __A)
+{
+  return (__m512) __builtin_ia32_broadcastf32x4_512 ((__v4sf) __A,
+                 (__v16sf) __O,
+                 __M);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_f32x4 (__mmask16 __M, __m128 __A)
+{
+  return (__m512) __builtin_ia32_broadcastf32x4_512 ((__v4sf) __A,
+                 (__v16sf)
+                 _mm512_setzero_ps (),
+                 __M);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_broadcast_f64x4 (__m256d __A)
+{
+  return (__m512d) __builtin_ia32_broadcastf64x4_512 ((__v4df) __A,
+                  (__v8df)
+                  _mm512_undefined_pd (),
+                  (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_f64x4 (__m512d __O, __mmask8 __M, __m256d __A)
+{
+  return (__m512d) __builtin_ia32_broadcastf64x4_512 ((__v4df) __A,
+                  (__v8df) __O,
+                  __M);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_f64x4 (__mmask8 __M, __m256d __A)
+{
+  return (__m512d) __builtin_ia32_broadcastf64x4_512 ((__v4df) __A,
+                  (__v8df)
+                  _mm512_setzero_pd (),
+                  __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcast_i32x4 (__m128i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti32x4_512 ((__v4si) __A,
+                  (__v16si)
+                  _mm512_undefined_epi32 (),
+                  (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_i32x4 (__m512i __O, __mmask16 __M, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti32x4_512 ((__v4si) __A,
+                  (__v16si) __O,
+                  __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_i32x4 (__mmask16 __M, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti32x4_512 ((__v4si) __A,
+                  (__v16si)
+                  _mm512_setzero_si512 (),
+                  __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcast_i64x4 (__m256i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti64x4_512 ((__v4di) __A,
+                  (__v8di)
+                  _mm512_undefined_epi32 (),
+                  (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_i64x4 (__m512i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti64x4_512 ((__v4di) __A,
+                  (__v8di) __O,
+                  __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_i64x4 (__mmask8 __M, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti64x4_512 ((__v4di) __A,
+                  (__v8di)
+                  _mm512_setzero_si512 (),
+                  __M);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_broadcastsd_pd (__m512d __O, __mmask8 __M, __m128d __A)
+{
+  return (__m512d)__builtin_ia32_selectpd_512(__M,
+                                              (__v8df) _mm512_broadcastsd_pd(__A),
+                                              (__v8df) __O);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A)
+{
+  return (__m512d)__builtin_ia32_selectpd_512(__M,
+                                              (__v8df) _mm512_broadcastsd_pd(__A),
+                                              (__v8df) _mm512_setzero_pd());
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_broadcastss_ps (__m512 __O, __mmask16 __M, __m128 __A)
+{
+  return (__m512)__builtin_ia32_selectps_512(__M,
+                                             (__v16sf) _mm512_broadcastss_ps(__A),
+                                             (__v16sf) __O);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcastss_ps (__mmask16 __M, __m128 __A)
+{
+  return (__m512)__builtin_ia32_selectps_512(__M,
+                                             (__v16sf) _mm512_broadcastss_ps(__A),
+                                             (__v16sf) _mm512_setzero_ps());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtsepi32_epi8 (__m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
+               (__v16qi) _mm_undefined_si128 (),
+               (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
+               (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtsepi32_epi8 (__mmask16 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
+               (__v16qi) _mm_setzero_si128 (),
+               __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
+{
+  __builtin_ia32_pmovsdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtsepi32_epi16 (__m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
+               (__v16hi) _mm256_undefined_si256 (),
+               (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
+               (__v16hi) __O, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtsepi32_epi16 (__mmask16 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
+               (__v16hi) _mm256_setzero_si256 (),
+               __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A)
+{
+  __builtin_ia32_pmovsdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtsepi64_epi8 (__m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
+               (__v16qi) _mm_undefined_si128 (),
+               (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
+               (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtsepi64_epi8 (__mmask8 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
+               (__v16qi) _mm_setzero_si128 (),
+               __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
+{
+  __builtin_ia32_pmovsqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtsepi64_epi32 (__m512i __A)
+{
+  __v8si __O;
+  return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
+               (__v8si) _mm256_undefined_si256 (),
+               (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
+               (__v8si) __O, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtsepi64_epi32 (__mmask8 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
+               (__v8si) _mm256_setzero_si256 (),
+               __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi64_storeu_epi32 (void *__P, __mmask8 __M, __m512i __A)
+{
+  __builtin_ia32_pmovsqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtsepi64_epi16 (__m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
+               (__v8hi) _mm_undefined_si128 (),
+               (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
+               (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtsepi64_epi16 (__mmask8 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
+               (__v8hi) _mm_setzero_si128 (),
+               __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m512i __A)
+{
+  __builtin_ia32_pmovsqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtusepi32_epi8 (__m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
+                (__v16qi) _mm_undefined_si128 (),
+                (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
+                (__v16qi) __O,
+                __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtusepi32_epi8 (__mmask16 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
+                (__v16qi) _mm_setzero_si128 (),
+                __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
+{
+  __builtin_ia32_pmovusdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtusepi32_epi16 (__m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
+                (__v16hi) _mm256_undefined_si256 (),
+                (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
+                (__v16hi) __O,
+                __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtusepi32_epi16 (__mmask16 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
+                (__v16hi) _mm256_setzero_si256 (),
+                __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A)
+{
+  __builtin_ia32_pmovusdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtusepi64_epi8 (__m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
+                (__v16qi) _mm_undefined_si128 (),
+                (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
+                (__v16qi) __O,
+                __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtusepi64_epi8 (__mmask8 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
+                (__v16qi) _mm_setzero_si128 (),
+                __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
+{
+  __builtin_ia32_pmovusqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtusepi64_epi32 (__m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
+                (__v8si) _mm256_undefined_si256 (),
+                (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
+                (__v8si) __O, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtusepi64_epi32 (__mmask8 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
+                (__v8si) _mm256_setzero_si256 (),
+                __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A)
+{
+  __builtin_ia32_pmovusqd512mem_mask ((__v8si*) __P, (__v8di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtusepi64_epi16 (__m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
+                (__v8hi) _mm_undefined_si128 (),
+                (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
+                (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtusepi64_epi16 (__mmask8 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
+                (__v8hi) _mm_setzero_si128 (),
+                __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
+{
+  __builtin_ia32_pmovusqw512mem_mask ((__v8hi*) __P, (__v8di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtepi32_epi8 (__m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
+              (__v16qi) _mm_undefined_si128 (),
+              (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
+              (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi32_epi8 (__mmask16 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
+              (__v16qi) _mm_setzero_si128 (),
+              __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
+{
+  __builtin_ia32_pmovdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtepi32_epi16 (__m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
+              (__v16hi) _mm256_undefined_si256 (),
+              (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
+              (__v16hi) __O, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi32_epi16 (__mmask16 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
+              (__v16hi) _mm256_setzero_si256 (),
+              __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi32_storeu_epi16 (void * __P, __mmask16 __M, __m512i __A)
+{
+  __builtin_ia32_pmovdw512mem_mask ((__v16hi *) __P, (__v16si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtepi64_epi8 (__m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
+              (__v16qi) _mm_undefined_si128 (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
+              (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi64_epi8 (__mmask8 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
+              (__v16qi) _mm_setzero_si128 (),
+              __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
+{
+  __builtin_ia32_pmovqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtepi64_epi32 (__m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
+              (__v8si) _mm256_undefined_si256 (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
+              (__v8si) __O, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi64_epi32 (__mmask8 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
+              (__v8si) _mm256_setzero_si256 (),
+              __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A)
+{
+  __builtin_ia32_pmovqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtepi64_epi16 (__m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
+              (__v8hi) _mm_undefined_si128 (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
+              (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi64_epi16 (__mmask8 __M, __m512i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
+              (__v8hi) _mm_setzero_si128 (),
+              __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
+{
+  __builtin_ia32_pmovqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
+}
+
+#define _mm512_extracti32x4_epi32(A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+                                            (__v4si)_mm_undefined_si128(), \
+                                            (__mmask8)-1); })
+
+#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+                                            (__v4si)(__m128i)(W), \
+                                            (__mmask8)(U)); })
+
+#define _mm512_maskz_extracti32x4_epi32(U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+                                            (__v4si)_mm_setzero_si128(), \
+                                            (__mmask8)(U)); })
+
+#define _mm512_extracti64x4_epi64(A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+                                            (__v4di)_mm256_undefined_si256(), \
+                                            (__mmask8)-1); })
+
+#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+                                            (__v4di)(__m256i)(W), \
+                                            (__mmask8)(U)); })
+
+#define _mm512_maskz_extracti64x4_epi64(U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+                                            (__v4di)_mm256_setzero_si256(), \
+                                            (__mmask8)(U)); })
+
+#define _mm512_insertf64x4(A, B, imm) __extension__ ({ \
+  (__m512d)__builtin_ia32_insertf64x4_mask((__v8df)(__m512d)(A), \
+                                           (__v4df)(__m256d)(B), (int)(imm), \
+                                           (__v8df)_mm512_undefined_pd(), \
+                                           (__mmask8)-1); })
+
+#define _mm512_mask_insertf64x4(W, U, A, B, imm) __extension__ ({ \
+  (__m512d)__builtin_ia32_insertf64x4_mask((__v8df)(__m512d)(A), \
+                                           (__v4df)(__m256d)(B), (int)(imm), \
+                                           (__v8df)(__m512d)(W), \
+                                           (__mmask8)(U)); })
+
+#define _mm512_maskz_insertf64x4(U, A, B, imm) __extension__ ({ \
+  (__m512d)__builtin_ia32_insertf64x4_mask((__v8df)(__m512d)(A), \
+                                           (__v4df)(__m256d)(B), (int)(imm), \
+                                           (__v8df)_mm512_setzero_pd(), \
+                                           (__mmask8)(U)); })
+
+#define _mm512_inserti64x4(A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_inserti64x4_mask((__v8di)(__m512i)(A), \
+                                           (__v4di)(__m256i)(B), (int)(imm), \
+                                           (__v8di)_mm512_setzero_si512(), \
+                                           (__mmask8)-1); })
+
+#define _mm512_mask_inserti64x4(W, U, A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_inserti64x4_mask((__v8di)(__m512i)(A), \
+                                           (__v4di)(__m256i)(B), (int)(imm), \
+                                           (__v8di)(__m512i)(W), \
+                                           (__mmask8)(U)); })
+
+#define _mm512_maskz_inserti64x4(U, A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_inserti64x4_mask((__v8di)(__m512i)(A), \
+                                           (__v4di)(__m256i)(B), (int)(imm), \
+                                           (__v8di)_mm512_setzero_si512(), \
+                                           (__mmask8)(U)); })
+
+#define _mm512_insertf32x4(A, B, imm) __extension__ ({ \
+  (__m512)__builtin_ia32_insertf32x4_mask((__v16sf)(__m512)(A), \
+                                          (__v4sf)(__m128)(B), (int)(imm), \
+                                          (__v16sf)_mm512_undefined_ps(), \
+                                          (__mmask16)-1); })
+
+#define _mm512_mask_insertf32x4(W, U, A, B, imm) __extension__ ({ \
+  (__m512)__builtin_ia32_insertf32x4_mask((__v16sf)(__m512)(A), \
+                                          (__v4sf)(__m128)(B), (int)(imm), \
+                                          (__v16sf)(__m512)(W), \
+                                          (__mmask16)(U)); })
+
+#define _mm512_maskz_insertf32x4(U, A, B, imm) __extension__ ({ \
+  (__m512)__builtin_ia32_insertf32x4_mask((__v16sf)(__m512)(A), \
+                                          (__v4sf)(__m128)(B), (int)(imm), \
+                                          (__v16sf)_mm512_setzero_ps(), \
+                                          (__mmask16)(U)); })
+
+#define _mm512_inserti32x4(A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_inserti32x4_mask((__v16si)(__m512i)(A), \
+                                           (__v4si)(__m128i)(B), (int)(imm), \
+                                           (__v16si)_mm512_setzero_si512(), \
+                                           (__mmask16)-1); })
+
+#define _mm512_mask_inserti32x4(W, U, A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_inserti32x4_mask((__v16si)(__m512i)(A), \
+                                           (__v4si)(__m128i)(B), (int)(imm), \
+                                           (__v16si)(__m512i)(W), \
+                                           (__mmask16)(U)); })
+
+#define _mm512_maskz_inserti32x4(U, A, B, imm) __extension__ ({ \
+  (__m512i)__builtin_ia32_inserti32x4_mask((__v16si)(__m512i)(A), \
+                                           (__v4si)(__m128i)(B), (int)(imm), \
+                                           (__v16si)_mm512_setzero_si512(), \
+                                           (__mmask16)(U)); })
+
+#define _mm512_getmant_round_pd(A, B, C, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v8df)_mm512_undefined_pd(), \
+                                            (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_getmant_round_pd(W, U, A, B, C, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v8df)(__m512d)(W), \
+                                            (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_getmant_round_pd(U, A, B, C, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v8df)_mm512_setzero_pd(), \
+                                            (__mmask8)(U), (int)(R)); })
+
+#define _mm512_getmant_pd(A, B, C) __extension__ ({ \
+  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v8df)_mm512_setzero_pd(), \
+                                            (__mmask8)-1, \
+                                            _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_getmant_pd(W, U, A, B, C) __extension__ ({ \
+  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v8df)(__m512d)(W), \
+                                            (__mmask8)(U), \
+                                            _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_getmant_pd(U, A, B, C) __extension__ ({ \
+  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v8df)_mm512_setzero_pd(), \
+                                            (__mmask8)(U), \
+                                            _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_getmant_round_ps(A, B, C, R) __extension__ ({ \
+  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+                                           (int)(((C)<<2) | (B)), \
+                                           (__v16sf)_mm512_undefined_ps(), \
+                                           (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_getmant_round_ps(W, U, A, B, C, R) __extension__ ({ \
+  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+                                           (int)(((C)<<2) | (B)), \
+                                           (__v16sf)(__m512)(W), \
+                                           (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_getmant_round_ps(U, A, B, C, R) __extension__ ({ \
+  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+                                           (int)(((C)<<2) | (B)), \
+                                           (__v16sf)_mm512_setzero_ps(), \
+                                           (__mmask16)(U), (int)(R)); })
+
+#define _mm512_getmant_ps(A, B, C) __extension__ ({ \
+  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+                                           (int)(((C)<<2)|(B)), \
+                                           (__v16sf)_mm512_undefined_ps(), \
+                                           (__mmask16)-1, \
+                                           _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_getmant_ps(W, U, A, B, C) __extension__ ({ \
+  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+                                           (int)(((C)<<2)|(B)), \
+                                           (__v16sf)(__m512)(W), \
+                                           (__mmask16)(U), \
+                                           _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_getmant_ps(U, A, B, C) __extension__ ({ \
+  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+                                           (int)(((C)<<2)|(B)), \
+                                           (__v16sf)_mm512_setzero_ps(), \
+                                           (__mmask16)(U), \
+                                           _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_getexp_round_pd(A, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
+                                           (__v8df)_mm512_undefined_pd(), \
+                                           (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_getexp_round_pd(W, U, A, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
+                                           (__v8df)(__m512d)(W), \
+                                           (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_getexp_round_pd(U, A, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
+                                           (__v8df)_mm512_setzero_pd(), \
+                                           (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_getexp_pd (__m512d __A)
+{
+  return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
+                (__v8df) _mm512_undefined_pd (),
+                (__mmask8) -1,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_getexp_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+  return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
+                (__v8df) __W,
+                (__mmask8) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_getexp_pd (__mmask8 __U, __m512d __A)
+{
+  return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
+                (__v8df) _mm512_setzero_pd (),
+                (__mmask8) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_getexp_round_ps(A, R) __extension__ ({ \
+  (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
+                                          (__v16sf)_mm512_undefined_ps(), \
+                                          (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_getexp_round_ps(W, U, A, R) __extension__ ({ \
+  (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
+                                          (__v16sf)(__m512)(W), \
+                                          (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_getexp_round_ps(U, A, R) __extension__ ({ \
+  (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
+                                          (__v16sf)_mm512_setzero_ps(), \
+                                          (__mmask16)(U), (int)(R)); })
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_getexp_ps (__m512 __A)
+{
+  return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
+               (__v16sf) _mm512_undefined_ps (),
+               (__mmask16) -1,
+               _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_getexp_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+  return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
+               (__v16sf) __W,
+               (__mmask16) __U,
+               _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A)
+{
+  return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
+               (__v16sf) _mm512_setzero_ps (),
+               (__mmask16) __U,
+               _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_i64gather_ps(index, addr, scale) __extension__ ({ \
+  (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \
+                                       (float const *)(addr), \
+                                       (__v8di)(__m512i)(index), (__mmask8)-1, \
+                                       (int)(scale)); })
+
+#define _mm512_mask_i64gather_ps( __v1_old, __mask, __index,\
+                                  __addr, __scale) __extension__({\
+__builtin_ia32_gatherdiv16sf ((__v8sf) __v1_old,\
+                              __addr,(__v8di) __index, __mask, __scale);\
+})
+
+#define _mm512_i64gather_epi32(index, addr, scale) __extension__ ({\
+  (__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_ps(), \
+                                        (int const *)(addr), \
+                                        (__v8di)(__m512i)(index), \
+                                        (__mmask8)-1, (int)(scale)); })
+
+#define _mm512_mask_i64gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \
+                                        (int const *)(addr), \
+                                        (__v8di)(__m512i)(index), \
+                                        (__mmask8)(mask), (int)(scale)); })
+
+#define _mm512_i64gather_pd(index, addr, scale) __extension__ ({\
+  (__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \
+                                       (double const *)(addr), \
+                                       (__v8di)(__m512i)(index), (__mmask8)-1, \
+                                       (int)(scale)); })
+
+#define _mm512_mask_i64gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \
+                                       (double const *)(addr), \
+                                       (__v8di)(__m512i)(index), \
+                                       (__mmask8)(mask), (int)(scale)); })
+
+#define _mm512_i64gather_epi64(index, addr, scale) __extension__ ({\
+  (__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_pd(), \
+                                       (long long const *)(addr), \
+                                       (__v8di)(__m512i)(index), (__mmask8)-1, \
+                                       (int)(scale)); })
+
+#define _mm512_mask_i64gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \
+                                       (long long const *)(addr), \
+                                       (__v8di)(__m512i)(index), \
+                                       (__mmask8)(mask), (int)(scale)); })
+
+#define _mm512_i32gather_ps(index, addr, scale) __extension__ ({\
+  (__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
+                                       (float const *)(addr), \
+                                       (__v16sf)(__m512)(index), \
+                                       (__mmask16)-1, (int)(scale)); })
+
+#define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
+                                       (float const *)(addr), \
+                                       (__v16sf)(__m512)(index), \
+                                       (__mmask16)(mask), (int)(scale)); })
+
+#define _mm512_i32gather_epi32(index, addr, scale) __extension__ ({\
+  (__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \
+                                        (int const *)(addr), \
+                                        (__v16si)(__m512i)(index), \
+                                        (__mmask16)-1, (int)(scale)); })
+
+#define _mm512_mask_i32gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \
+                                        (int const *)(addr), \
+                                        (__v16si)(__m512i)(index), \
+                                        (__mmask16)(mask), (int)(scale)); })
+
+#define _mm512_i32gather_pd(index, addr, scale) __extension__ ({\
+  (__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \
+                                       (double const *)(addr), \
+                                       (__v8si)(__m256i)(index), (__mmask8)-1, \
+                                       (int)(scale)); })
+
+#define _mm512_mask_i32gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \
+                                       (double const *)(addr), \
+                                       (__v8si)(__m256i)(index), \
+                                       (__mmask8)(mask), (int)(scale)); })
+
+#define _mm512_i32gather_epi64(index, addr, scale) __extension__ ({\
+  (__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \
+                                       (long long const *)(addr), \
+                                       (__v8si)(__m256i)(index), (__mmask8)-1, \
+                                       (int)(scale)); })
+
+#define _mm512_mask_i32gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \
+                                       (long long const *)(addr), \
+                                       (__v8si)(__m256i)(index), \
+                                       (__mmask8)(mask), (int)(scale)); })
+
+#define _mm512_i64scatter_ps(addr, index, v1, scale) __extension__ ({\
+  __builtin_ia32_scatterdiv16sf((float *)(addr), (__mmask8)-1, \
+                                (__v8di)(__m512i)(index), \
+                                (__v8sf)(__m256)(v1), (int)(scale)); })
+
+#define _mm512_mask_i64scatter_ps(addr, mask, index, v1, scale) __extension__ ({\
+  __builtin_ia32_scatterdiv16sf((float *)(addr), (__mmask8)(mask), \
+                                (__v8di)(__m512i)(index), \
+                                (__v8sf)(__m256)(v1), (int)(scale)); })
+
+#define _mm512_i64scatter_epi32(addr, index, v1, scale) __extension__ ({\
+  __builtin_ia32_scatterdiv16si((int *)(addr), (__mmask8)-1, \
+                                (__v8di)(__m512i)(index), \
+                                (__v8si)(__m256i)(v1), (int)(scale)); })
+
+#define _mm512_mask_i64scatter_epi32(addr, mask, index, v1, scale) __extension__ ({\
+  __builtin_ia32_scatterdiv16si((int *)(addr), (__mmask8)(mask), \
+                                (__v8di)(__m512i)(index), \
+                                (__v8si)(__m256i)(v1), (int)(scale)); })
+
+#define _mm512_i64scatter_pd(addr, index, v1, scale) __extension__ ({\
+  __builtin_ia32_scatterdiv8df((double *)(addr), (__mmask8)-1, \
+                               (__v8di)(__m512i)(index), \
+                               (__v8df)(__m512d)(v1), (int)(scale)); })
+
+#define _mm512_mask_i64scatter_pd(addr, mask, index, v1, scale) __extension__ ({\
+  __builtin_ia32_scatterdiv8df((double *)(addr), (__mmask8)(mask), \
+                               (__v8di)(__m512i)(index), \
+                               (__v8df)(__m512d)(v1), (int)(scale)); })
+
+#define _mm512_i64scatter_epi64(addr, index, v1, scale) __extension__ ({\
+  __builtin_ia32_scatterdiv8di((long long *)(addr), (__mmask8)-1, \
+                               (__v8di)(__m512i)(index), \
+                               (__v8di)(__m512i)(v1), (int)(scale)); })
+
+#define _mm512_mask_i64scatter_epi64(addr, mask, index, v1, scale) __extension__ ({\
+  __builtin_ia32_scatterdiv8di((long long *)(addr), (__mmask8)(mask), \
+                               (__v8di)(__m512i)(index), \
+                               (__v8di)(__m512i)(v1), (int)(scale)); })
+
+#define _mm512_i32scatter_ps(addr, index, v1, scale) __extension__ ({\
+  __builtin_ia32_scattersiv16sf((float *)(addr), (__mmask16)-1, \
+                                (__v16si)(__m512i)(index), \
+                                (__v16sf)(__m512)(v1), (int)(scale)); })
+
+#define _mm512_mask_i32scatter_ps(addr, mask, index, v1, scale) __extension__ ({\
+  __builtin_ia32_scattersiv16sf((float *)(addr), (__mmask16)(mask), \
+                                (__v16si)(__m512i)(index), \
+                                (__v16sf)(__m512)(v1), (int)(scale)); })
+
+#define _mm512_i32scatter_epi32(addr, index, v1, scale) __extension__ ({\
+  __builtin_ia32_scattersiv16si((int *)(addr), (__mmask16)-1, \
+                                (__v16si)(__m512i)(index), \
+                                (__v16si)(__m512i)(v1), (int)(scale)); })
+
+#define _mm512_mask_i32scatter_epi32(addr, mask, index, v1, scale) __extension__ ({\
+  __builtin_ia32_scattersiv16si((int *)(addr), (__mmask16)(mask), \
+                                (__v16si)(__m512i)(index), \
+                                (__v16si)(__m512i)(v1), (int)(scale)); })
+
+#define _mm512_i32scatter_pd(addr, index, v1, scale) __extension__ ({\
+  __builtin_ia32_scattersiv8df((double *)(addr), (__mmask8)-1, \
+                               (__v8si)(__m256i)(index), \
+                               (__v8df)(__m512d)(v1), (int)(scale)); })
+
+#define _mm512_mask_i32scatter_pd(addr, mask, index, v1, scale) __extension__ ({\
+  __builtin_ia32_scattersiv8df((double *)(addr), (__mmask8)(mask), \
+                               (__v8si)(__m256i)(index), \
+                               (__v8df)(__m512d)(v1), (int)(scale)); })
+
+#define _mm512_i32scatter_epi64(addr, index, v1, scale) __extension__ ({\
+  __builtin_ia32_scattersiv8di((long long *)(addr), (__mmask8)-1, \
+                               (__v8si)(__m256i)(index), \
+                               (__v8di)(__m512i)(v1), (int)(scale)); })
+
+#define _mm512_mask_i32scatter_epi64(addr, mask, index, v1, scale) __extension__ ({\
+  __builtin_ia32_scattersiv8di((long long *)(addr), (__mmask8)(mask), \
+                               (__v8si)(__m256i)(index), \
+                               (__v8di)(__m512i)(v1), (int)(scale)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __A,
+          (__v4sf) __B,
+          (__v4sf) __W,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_fmadd_round_ss(W, U, A, B, R) __extension__({\
+  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+                                        (__v4sf)(__m128)(B), \
+                                        (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                        (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __A,
+          (__v4sf) __B,
+          (__v4sf) __C,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fmadd_round_ss(U, A, B, C, R) __extension__ ({\
+  (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), \
+                                         (__v4sf)(__m128)(C), (__mmask8)(U), \
+                                         _MM_FROUND_CUR_DIRECTION); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,
+          (__v4sf) __X,
+          (__v4sf) __Y,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fmadd_round_ss(W, X, Y, U, R) __extension__ ({\
+  (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
+                                         (__v4sf)(__m128)(X), \
+                                         (__v4sf)(__m128)(Y), (__mmask8)(U), \
+                                         (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __A,
+          -(__v4sf) __B,
+          (__v4sf) __W,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_fmsub_round_ss(W, U, A, B, R) __extension__ ({\
+  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+                                        -(__v4sf)(__m128)(B), \
+                                        (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                        (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __A,
+          (__v4sf) __B,
+          -(__v4sf) __C,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fmsub_round_ss(U, A, B, C, R) __extension__ ({\
+  (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), \
+                                         -(__v4sf)(__m128)(C), (__mmask8)(U), \
+                                         (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,
+          (__v4sf) __X,
+          -(__v4sf) __Y,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fmsub_round_ss(W, X, Y, U, R) __extension__ ({\
+  (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
+                                         (__v4sf)(__m128)(X), \
+                                         -(__v4sf)(__m128)(Y), (__mmask8)(U), \
+                                         (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask (-(__v4sf) __A,
+          (__v4sf) __B,
+          (__v4sf) __W,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_fnmadd_round_ss(W, U, A, B, R) __extension__ ({\
+  (__m128)__builtin_ia32_vfmaddss3_mask(-(__v4sf)(__m128)(A), \
+                                        (__v4sf)(__m128)(B), \
+                                        (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                        (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_maskz (-(__v4sf) __A,
+          (__v4sf) __B,
+          (__v4sf) __C,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) __extension__ ({\
+  (__m128)__builtin_ia32_vfmaddss3_maskz(-(__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), \
+                                         (__v4sf)(__m128)(C), (__mmask8)(U), \
+                                         (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask3 (-(__v4sf) __W,
+          (__v4sf) __X,
+          (__v4sf) __Y,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fnmadd_round_ss(W, X, Y, U, R) __extension__({\
+  (__m128)__builtin_ia32_vfmaddss3_mask3(-(__v4sf)(__m128)(W), \
+                                         (__v4sf)(__m128)(X), \
+                                         (__v4sf)(__m128)(Y), (__mmask8)(U), \
+                                         (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask (-(__v4sf) __A,
+          -(__v4sf) __B,
+          (__v4sf) __W,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_fnmsub_round_ss(W, U, A, B, R) __extension__ ({\
+  (__m128)__builtin_ia32_vfmaddss3_mask(-(__v4sf)(__m128)(A), \
+                                        -(__v4sf)(__m128)(B), \
+                                        (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                        (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_maskz (-(__v4sf) __A,
+          (__v4sf) __B,
+          -(__v4sf) __C,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) __extension__ ({\
+  (__m128)__builtin_ia32_vfmaddss3_maskz(-(__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), \
+                                         -(__v4sf)(__m128)(C), (__mmask8)(U), \
+                                         _MM_FROUND_CUR_DIRECTION); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask3 (-(__v4sf) __W,
+          (__v4sf) __X,
+          -(__v4sf) __Y,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fnmsub_round_ss(W, X, Y, U, R) __extension__({\
+  (__m128)__builtin_ia32_vfmaddss3_mask3(-(__v4sf)(__m128)(W), \
+                                         (__v4sf)(__m128)(X), \
+                                         -(__v4sf)(__m128)(Y), (__mmask8)(U), \
+                                         (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __A,
+          (__v2df) __B,
+          (__v2df) __W,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_fmadd_round_sd(W, U, A, B, R) __extension__({\
+  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+                                         (__v2df)(__m128d)(B), \
+                                         (__v2df)(__m128d)(W), (__mmask8)(U), \
+                                         (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( (__v2df) __A,
+          (__v2df) __B,
+          (__v2df) __C,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fmadd_round_sd(U, A, B, C, R) __extension__ ({\
+  (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), \
+                                          (__v2df)(__m128d)(C), (__mmask8)(U), \
+                                          _MM_FROUND_CUR_DIRECTION); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,
+          (__v2df) __X,
+          (__v2df) __Y,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fmadd_round_sd(W, X, Y, U, R) __extension__ ({\
+  (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
+                                          (__v2df)(__m128d)(X), \
+                                          (__v2df)(__m128d)(Y), (__mmask8)(U), \
+                                          (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __A,
+          -(__v2df) __B,
+          (__v2df) __W,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_fmsub_round_sd(W, U, A, B, R) __extension__ ({\
+  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+                                         -(__v2df)(__m128d)(B), \
+                                         (__v2df)(__m128d)(W), (__mmask8)(U), \
+                                         (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( (__v2df) __A,
+          (__v2df) __B,
+          -(__v2df) __C,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fmsub_round_sd(U, A, B, C, R) __extension__ ({\
+  (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), \
+                                          -(__v2df)(__m128d)(C), \
+                                          (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,
+          (__v2df) __X,
+          -(__v2df) __Y,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fmsub_round_sd(W, X, Y, U, R) __extension__ ({\
+  (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
+                                          (__v2df)(__m128d)(X), \
+                                          -(__v2df)(__m128d)(Y), \
+                                          (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ( -(__v2df) __A,
+          (__v2df) __B,
+          (__v2df) __W,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_fnmadd_round_sd(W, U, A, B, R) __extension__ ({\
+  (__m128d)__builtin_ia32_vfmaddsd3_mask(-(__v2df)(__m128d)(A), \
+                                         (__v2df)(__m128d)(B), \
+                                         (__v2df)(__m128d)(W), (__mmask8)(U), \
+                                         (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( -(__v2df) __A,
+          (__v2df) __B,
+          (__v2df) __C,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) __extension__ ({\
+  (__m128d)__builtin_ia32_vfmaddsd3_maskz(-(__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), \
+                                          (__v2df)(__m128d)(C), (__mmask8)(U), \
+                                          (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask3 (-(__v2df) __W,
+          (__v2df) __X,
+          (__v2df) __Y,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fnmadd_round_sd(W, X, Y, U, R) __extension__({\
+  (__m128d)__builtin_ia32_vfmaddsd3_mask3(-(__v2df)(__m128d)(W), \
+                                          (__v2df)(__m128d)(X), \
+                                          (__v2df)(__m128d)(Y), (__mmask8)(U), \
+                                          (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ( -(__v2df) __A,
+          -(__v2df) __B,
+          (__v2df) __W,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_fnmsub_round_sd(W, U, A, B, R) __extension__ ({\
+  (__m128d)__builtin_ia32_vfmaddsd3_mask(-(__v2df)(__m128d)(A), \
+                                         -(__v2df)(__m128d)(B), \
+                                         (__v2df)(__m128d)(W), (__mmask8)(U), \
+                                         (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( -(__v2df) __A,
+          (__v2df) __B,
+          -(__v2df) __C,
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) __extension__ ({\
+  (__m128d)__builtin_ia32_vfmaddsd3_maskz(-(__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), \
+                                          -(__v2df)(__m128d)(C), \
+                                          (__mmask8)(U), \
+                                          _MM_FROUND_CUR_DIRECTION); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask3 (-(__v2df) (__W),
+          (__v2df) __X,
+          -(__v2df) (__Y),
+          (__mmask8) __U,
+          _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fnmsub_round_sd(W, X, Y, U, R) __extension__({\
+  (__m128d)__builtin_ia32_vfmaddsd3_mask3(-(__v2df)(__m128d)(W), \
+                                          (__v2df)(__m128d)(X), \
+                                          -(__v2df)(__m128d)(Y), \
+                                          (__mmask8)(U), (int)(R)); })
+
+#define _mm512_permutex_pd(X, C) __extension__ ({ \
+  (__m512d)__builtin_shufflevector((__v8df)(__m512d)(X), \
+                                   (__v8df)_mm512_undefined_pd(), \
+                                   0 + (((C) >> 0) & 0x3), \
+                                   0 + (((C) >> 2) & 0x3), \
+                                   0 + (((C) >> 4) & 0x3), \
+                                   0 + (((C) >> 6) & 0x3), \
+                                   4 + (((C) >> 0) & 0x3), \
+                                   4 + (((C) >> 2) & 0x3), \
+                                   4 + (((C) >> 4) & 0x3), \
+                                   4 + (((C) >> 6) & 0x3)); })
+
+#define _mm512_mask_permutex_pd(W, U, X, C) __extension__ ({ \
+  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                       (__v8df)_mm512_permutex_pd((X), (C)), \
+                                       (__v8df)(__m512d)(W)); })
+
+#define _mm512_maskz_permutex_pd(U, X, C) __extension__ ({ \
+  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                       (__v8df)_mm512_permutex_pd((X), (C)), \
+                                       (__v8df)_mm512_setzero_pd()); })
+
+#define _mm512_permutex_epi64(X, C) __extension__ ({ \
+  (__m512i)__builtin_shufflevector((__v8di)(__m512i)(X), \
+                                   (__v8di)_mm512_undefined_epi32(), \
+                                   0 + (((C) >> 0) & 0x3), \
+                                   0 + (((C) >> 2) & 0x3), \
+                                   0 + (((C) >> 4) & 0x3), \
+                                   0 + (((C) >> 6) & 0x3), \
+                                   4 + (((C) >> 0) & 0x3), \
+                                   4 + (((C) >> 2) & 0x3), \
+                                   4 + (((C) >> 4) & 0x3), \
+                                   4 + (((C) >> 6) & 0x3)); })
+
+#define _mm512_mask_permutex_epi64(W, U, X, C) __extension__ ({ \
+  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                      (__v8di)_mm512_permutex_epi64((X), (C)), \
+                                      (__v8di)(__m512i)(W)); })
+
+#define _mm512_maskz_permutex_epi64(U, X, C) __extension__ ({ \
+  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                      (__v8di)_mm512_permutex_epi64((X), (C)), \
+                                      (__v8di)_mm512_setzero_si512()); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_permutexvar_pd (__m512i __X, __m512d __Y)
+{
+  return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y,
+                 (__v8di) __X,
+                 (__v8df) _mm512_undefined_pd (),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_permutexvar_pd (__m512d __W, __mmask8 __U, __m512i __X, __m512d __Y)
+{
+  return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y,
+                 (__v8di) __X,
+                 (__v8df) __W,
+                 (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_permutexvar_pd (__mmask8 __U, __m512i __X, __m512d __Y)
+{
+  return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y,
+                 (__v8di) __X,
+                 (__v8df) _mm512_setzero_pd (),
+                 (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutexvar_epi64 (__mmask8 __M, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y,
+                 (__v8di) __X,
+                 (__v8di) _mm512_setzero_si512 (),
+                 __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_permutexvar_epi64 (__m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y,
+                 (__v8di) __X,
+                 (__v8di) _mm512_undefined_epi32 (),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutexvar_epi64 (__m512i __W, __mmask8 __M, __m512i __X,
+             __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y,
+                 (__v8di) __X,
+                 (__v8di) __W,
+                 __M);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_permutexvar_ps (__m512i __X, __m512 __Y)
+{
+  return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y,
+                (__v16si) __X,
+                (__v16sf) _mm512_undefined_ps (),
+                (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_permutexvar_ps (__m512 __W, __mmask16 __U, __m512i __X, __m512 __Y)
+{
+  return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y,
+                (__v16si) __X,
+                (__v16sf) __W,
+                (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_permutexvar_ps (__mmask16 __U, __m512i __X, __m512 __Y)
+{
+  return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y,
+                (__v16si) __X,
+                (__v16sf) _mm512_setzero_ps (),
+                (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutexvar_epi32 (__mmask16 __M, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y,
+                 (__v16si) __X,
+                 (__v16si) _mm512_setzero_si512 (),
+                 __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_permutexvar_epi32 (__m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y,
+                 (__v16si) __X,
+                 (__v16si) _mm512_undefined_epi32 (),
+                 (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutexvar_epi32 (__m512i __W, __mmask16 __M, __m512i __X,
+             __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y,
+                 (__v16si) __X,
+                 (__v16si) __W,
+                 __M);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_kand (__mmask16 __A, __mmask16 __B)
+{
+  return (__mmask16) __builtin_ia32_kandhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_kandn (__mmask16 __A, __mmask16 __B)
+{
+  return (__mmask16) __builtin_ia32_kandnhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_kor (__mmask16 __A, __mmask16 __B)
+{
+  return (__mmask16) __builtin_ia32_korhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm512_kortestc (__mmask16 __A, __mmask16 __B)
+{
+  return __builtin_ia32_kortestchi ((__mmask16) __A, (__mmask16) __B);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm512_kortestz (__mmask16 __A, __mmask16 __B)
+{
+  return __builtin_ia32_kortestzhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_kunpackb (__mmask16 __A, __mmask16 __B)
+{
+  return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_kxnor (__mmask16 __A, __mmask16 __B)
+{
+  return (__mmask16) __builtin_ia32_kxnorhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_kxor (__mmask16 __A, __mmask16 __B)
+{
+  return (__mmask16) __builtin_ia32_kxorhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_stream_si512 (__m512i * __P, __m512i __A)
+{
+  __builtin_nontemporal_store((__v8di)__A, (__v8di*)__P);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_stream_load_si512 (void *__P)
+{
+  return __builtin_ia32_movntdqa512 ((__v8di *)__P);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_stream_pd (double *__P, __m512d __A)
+{
+  __builtin_nontemporal_store((__v8df)__A, (__v8df*)__P);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_stream_ps (float *__P, __m512 __A)
+{
+  __builtin_nontemporal_store((__v16sf)__A, (__v16sf*)__P);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_compress_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+  return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A,
+                  (__v8df) __W,
+                  (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_compress_pd (__mmask8 __U, __m512d __A)
+{
+  return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A,
+                  (__v8df)
+                  _mm512_setzero_pd (),
+                  (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_compress_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A,
+                  (__v8di) __W,
+                  (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_compress_epi64 (__mmask8 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A,
+                  (__v8di)
+                  _mm512_setzero_si512 (),
+                  (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_compress_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+  return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A,
+                 (__v16sf) __W,
+                 (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_compress_ps (__mmask16 __U, __m512 __A)
+{
+  return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A,
+                 (__v16sf)
+                 _mm512_setzero_ps (),
+                 (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_compress_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A,
+                  (__v16si) __W,
+                  (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_compress_epi32 (__mmask16 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A,
+                  (__v16si)
+                  _mm512_setzero_si512 (),
+                  (__mmask16) __U);
+}
+
+#define _mm_cmp_round_ss_mask(X, Y, P, R) __extension__ ({ \
+  (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+                                      (__v4sf)(__m128)(Y), (int)(P), \
+                                      (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) __extension__ ({ \
+  (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+                                      (__v4sf)(__m128)(Y), (int)(P), \
+                                      (__mmask8)(M), (int)(R)); })
+
+#define _mm_cmp_ss_mask(X, Y, P) __extension__ ({ \
+  (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+                                      (__v4sf)(__m128)(Y), (int)(P), \
+                                      (__mmask8)-1, \
+                                      _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_cmp_ss_mask(M, X, Y, P) __extension__ ({ \
+  (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+                                      (__v4sf)(__m128)(Y), (int)(P), \
+                                      (__mmask8)(M), \
+                                      _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_cmp_round_sd_mask(X, Y, P, R) __extension__ ({ \
+  (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+                                      (__v2df)(__m128d)(Y), (int)(P), \
+                                      (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) __extension__ ({ \
+  (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+                                      (__v2df)(__m128d)(Y), (int)(P), \
+                                      (__mmask8)(M), (int)(R)); })
+
+#define _mm_cmp_sd_mask(X, Y, P) __extension__ ({ \
+  (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+                                      (__v2df)(__m128d)(Y), (int)(P), \
+                                      (__mmask8)-1, \
+                                      _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_cmp_sd_mask(M, X, Y, P) __extension__ ({ \
+  (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+                                      (__v2df)(__m128d)(Y), (int)(P), \
+                                      (__mmask8)(M), \
+                                      _MM_FROUND_CUR_DIRECTION); })
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_movehdup_ps (__m512 __A)
+{
+  return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
+                         1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_movehdup_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+                                             (__v16sf)_mm512_movehdup_ps(__A),
+                                             (__v16sf)__W);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_movehdup_ps (__mmask16 __U, __m512 __A)
+{
+  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+                                             (__v16sf)_mm512_movehdup_ps(__A),
+                                             (__v16sf)_mm512_setzero_ps());
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_moveldup_ps (__m512 __A)
+{
+  return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
+                         0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_moveldup_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+                                             (__v16sf)_mm512_moveldup_ps(__A),
+                                             (__v16sf)__W);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_moveldup_ps (__mmask16 __U, __m512 __A)
+{
+  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+                                             (__v16sf)_mm512_moveldup_ps(__A),
+                                             (__v16sf)_mm512_setzero_ps());
+}
+
+#define _mm512_shuffle_epi32(A, I) __extension__ ({ \
+  (__m512i)__builtin_shufflevector((__v16si)(__m512i)(A), \
+                                   (__v16si)_mm512_undefined_epi32(), \
+                                   0  + (((I) >> 0) & 0x3), \
+                                   0  + (((I) >> 2) & 0x3), \
+                                   0  + (((I) >> 4) & 0x3), \
+                                   0  + (((I) >> 6) & 0x3), \
+                                   4  + (((I) >> 0) & 0x3), \
+                                   4  + (((I) >> 2) & 0x3), \
+                                   4  + (((I) >> 4) & 0x3), \
+                                   4  + (((I) >> 6) & 0x3), \
+                                   8  + (((I) >> 0) & 0x3), \
+                                   8  + (((I) >> 2) & 0x3), \
+                                   8  + (((I) >> 4) & 0x3), \
+                                   8  + (((I) >> 6) & 0x3), \
+                                   12 + (((I) >> 0) & 0x3), \
+                                   12 + (((I) >> 2) & 0x3), \
+                                   12 + (((I) >> 4) & 0x3), \
+                                   12 + (((I) >> 6) & 0x3)); })
+
+#define _mm512_mask_shuffle_epi32(W, U, A, I) __extension__ ({ \
+  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                      (__v16si)_mm512_shuffle_epi32((A), (I)), \
+                                      (__v16si)(__m512i)(W)); })
+
+#define _mm512_maskz_shuffle_epi32(U, A, I) __extension__ ({ \
+  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                      (__v16si)_mm512_shuffle_epi32((A), (I)), \
+                                      (__v16si)_mm512_setzero_si512()); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_expand_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+  return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A,
+                (__v8df) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_expand_pd (__mmask8 __U, __m512d __A)
+{
+  return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A,
+                (__v8df) _mm512_setzero_pd (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_expand_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
+                (__v8di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_expand_epi64 ( __mmask8 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
+                (__v8di) _mm512_setzero_pd (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_expandloadu_pd(__m512d __W, __mmask8 __U, void const *__P)
+{
+  return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P,
+              (__v8df) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
+{
+  return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P,
+              (__v8df) _mm512_setzero_pd(),
+              (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_expandloadu_epi64(__m512i __W, __mmask8 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P,
+              (__v8di) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P,
+              (__v8di) _mm512_setzero_pd(),
+              (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_expandloadu_ps(__m512 __W, __mmask16 __U, void const *__P)
+{
+  return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P,
+                   (__v16sf) __W,
+                   (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_expandloadu_ps(__mmask16 __U, void const *__P)
+{
+  return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P,
+                   (__v16sf) _mm512_setzero_ps(),
+                   (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_expandloadu_epi32(__m512i __W, __mmask16 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P,
+              (__v16si) __W,
+              (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_expandloadu_epi32(__mmask16 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P,
+              (__v16si) _mm512_setzero_ps(),
+              (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_expand_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+  return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A,
+               (__v16sf) __W,
+               (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_expand_ps (__mmask16 __U, __m512 __A)
+{
+  return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A,
+               (__v16sf) _mm512_setzero_ps(),
+               (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_expand_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
+                (__v16si) __W,
+                (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
+                (__v16si) _mm512_setzero_ps(),
+                (__mmask16) __U);
+}
+
+#define _mm512_cvt_roundps_pd(A, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
+                                           (__v8df)_mm512_undefined_pd(), \
+                                           (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundps_pd(W, U, A, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
+                                           (__v8df)(__m512d)(W), \
+                                           (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundps_pd(U, A, R) __extension__ ({ \
+  (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
+                                           (__v8df)_mm512_setzero_pd(), \
+                                           (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtps_pd (__m256 __A)
+{
+  return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A,
+                (__v8df)
+                _mm512_undefined_pd (),
+                (__mmask8) -1,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_cvtps_pd (__m512d __W, __mmask8 __U, __m256 __A)
+{
+  return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A,
+                (__v8df) __W,
+                (__mmask8) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtps_pd (__mmask8 __U, __m256 __A)
+{
+  return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A,
+                (__v8df)
+                _mm512_setzero_pd (),
+                (__mmask8) __U,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_mov_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+  return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
+              (__v8df) __A,
+              (__v8df) __W);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_mov_pd (__mmask8 __U, __m512d __A)
+{
+  return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
+              (__v8df) __A,
+              (__v8df) _mm512_setzero_pd ());
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_mov_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+  return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
+             (__v16sf) __A,
+             (__v16sf) __W);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_mov_ps (__mmask16 __U, __m512 __A)
+{
+  return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
+             (__v16sf) __A,
+             (__v16sf) _mm512_setzero_ps ());
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m512d __A)
+{
+  __builtin_ia32_compressstoredf512_mask ((__v8df *) __P, (__v8df) __A,
+            (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m512i __A)
+{
+  __builtin_ia32_compressstoredi512_mask ((__v8di *) __P, (__v8di) __A,
+            (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_compressstoreu_ps (void *__P, __mmask16 __U, __m512 __A)
+{
+  __builtin_ia32_compressstoresf512_mask ((__v16sf *) __P, (__v16sf) __A,
+            (__mmask16) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_compressstoreu_epi32 (void *__P, __mmask16 __U, __m512i __A)
+{
+  __builtin_ia32_compressstoresi512_mask ((__v16si *) __P, (__v16si) __A,
+            (__mmask16) __U);
+}
+
+#define _mm_cvt_roundsd_ss(A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
+                                             (__v2df)(__m128d)(B), \
+                                             (__v4sf)_mm_undefined_ps(), \
+                                             (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_cvt_roundsd_ss(W, U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
+                                             (__v2df)(__m128d)(B), \
+                                             (__v4sf)(__m128)(W), \
+                                             (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_cvt_roundsd_ss(U, A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
+                                             (__v2df)(__m128d)(B), \
+                                             (__v4sf)_mm_setzero_ps(), \
+                                             (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtsd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128d __B)
+{
+  return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)(__A),
+                                             (__v2df)(__B),
+                                             (__v4sf)(__W), 
+                                             (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
+{
+  return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)(__A),
+                                             (__v2df)(__B),
+                                             (__v4sf)_mm_setzero_ps(), 
+                                             (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtss_i32 _mm_cvtss_si32
+#define _mm_cvtss_i64 _mm_cvtss_si64
+#define _mm_cvtsd_i32 _mm_cvtsd_si32
+#define _mm_cvtsd_i64 _mm_cvtsd_si64
+#define _mm_cvti32_sd _mm_cvtsi32_sd
+#define _mm_cvti64_sd _mm_cvtsi64_sd
+#define _mm_cvti32_ss _mm_cvtsi32_ss
+#define _mm_cvti64_ss _mm_cvtsi64_ss
+
+#define _mm_cvt_roundi64_sd(A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
+                                     (int)(R)); })
+
+#define _mm_cvt_roundsi64_sd(A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
+                                     (int)(R)); })
+
+#define _mm_cvt_roundsi32_ss(A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)); })
+
+#define _mm_cvt_roundi32_ss(A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)); })
+
+#define _mm_cvt_roundsi64_ss(A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
+                                    (int)(R)); })
+
+#define _mm_cvt_roundi64_ss(A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
+                                    (int)(R)); })
+
+#define _mm_cvt_roundss_sd(A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
+                                              (__v4sf)(__m128)(B), \
+                                              (__v2df)_mm_undefined_pd(), \
+                                              (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_cvt_roundss_sd(W, U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
+                                              (__v4sf)(__m128)(B), \
+                                              (__v2df)(__m128d)(W), \
+                                              (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_cvt_roundss_sd(U, A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
+                                              (__v4sf)(__m128)(B), \
+                                              (__v2df)_mm_setzero_pd(), \
+                                              (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtss_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128 __B)
+{
+  return __builtin_ia32_cvtss2sd_round_mask((__v2df)(__A),
+                                              (__v4sf)(__B),
+                                              (__v2df)(__W),
+                                              (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION); 
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtss_sd (__mmask8 __U, __m128d __A, __m128 __B)
+{
+  return __builtin_ia32_cvtss2sd_round_mask((__v2df)(__A),
+                                              (__v4sf)(__B),
+                                              (__v2df)_mm_setzero_pd(), 
+                                              (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION); 
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtu32_sd (__m128d __A, unsigned __B)
+{
+  return (__m128d) __builtin_ia32_cvtusi2sd32 ((__v2df) __A, __B);
+}
+
+#define _mm_cvt_roundu64_sd(A, B, R) __extension__ ({ \
+  (__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \
+                                      (unsigned long long)(B), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtu64_sd (__m128d __A, unsigned long long __B)
+{
+  return (__m128d) __builtin_ia32_cvtusi2sd64 ((__v2df) __A, __B,
+                 _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundu32_ss(A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \
+                                     (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtu32_ss (__m128 __A, unsigned __B)
+{
+  return (__m128) __builtin_ia32_cvtusi2ss32 ((__v4sf) __A, __B,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundu64_ss(A, B, R) __extension__ ({ \
+  (__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \
+                                     (unsigned long long)(B), (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtu64_ss (__m128 __A, unsigned long long __B)
+{
+  return (__m128) __builtin_ia32_cvtusi2ss64 ((__v4sf) __A, __B,
+                _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A)
+{
+  return (__m512i) __builtin_ia32_pbroadcastd512_gpr_mask (__A, (__v16si) __O,
+                 __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A)
+{
+  return (__m512i) __builtin_ia32_pbroadcastq512_gpr_mask (__A, (__v8di) __O,
+                 __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_set_epi32 (int __A, int __B, int __C, int __D,
+     int __E, int __F, int __G, int __H,
+     int __I, int __J, int __K, int __L,
+     int __M, int __N, int __O, int __P)
+{
+  return __extension__ (__m512i)(__v16si)
+  { __P, __O, __N, __M, __L, __K, __J, __I,
+    __H, __G, __F, __E, __D, __C, __B, __A };
+}
+
+#define _mm512_setr_epi32(e0,e1,e2,e3,e4,e5,e6,e7,           \
+       e8,e9,e10,e11,e12,e13,e14,e15)          \
+  _mm512_set_epi32((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6), \
+                   (e5),(e4),(e3),(e2),(e1),(e0))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_set_epi64 (long long __A, long long __B, long long __C,
+     long long __D, long long __E, long long __F,
+     long long __G, long long __H)
+{
+  return __extension__ (__m512i) (__v8di)
+  { __H, __G, __F, __E, __D, __C, __B, __A };
+}
+
+#define _mm512_setr_epi64(e0,e1,e2,e3,e4,e5,e6,e7)           \
+  _mm512_set_epi64((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_set_pd (double __A, double __B, double __C, double __D,
+        double __E, double __F, double __G, double __H)
+{
+  return __extension__ (__m512d)
+  { __H, __G, __F, __E, __D, __C, __B, __A };
+}
+
+#define _mm512_setr_pd(e0,e1,e2,e3,e4,e5,e6,e7)              \
+  _mm512_set_pd((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_set_ps (float __A, float __B, float __C, float __D,
+        float __E, float __F, float __G, float __H,
+        float __I, float __J, float __K, float __L,
+        float __M, float __N, float __O, float __P)
+{
+  return __extension__ (__m512)
+  { __P, __O, __N, __M, __L, __K, __J, __I,
+    __H, __G, __F, __E, __D, __C, __B, __A };
+}
+
+#define _mm512_setr_ps(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10,e11,e12,e13,e14,e15) \
+  _mm512_set_ps((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6),(e5), \
+                (e4),(e3),(e2),(e1),(e0))
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_abs_ps(__m512 A)
+{
+  return (__m512)_mm512_and_epi32(_mm512_set1_epi32(0x7FFFFFFF),(__m512i)A) ;
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_abs_ps(__m512 W, __mmask16 K, __m512 A)
+{
+  return (__m512)_mm512_mask_and_epi32((__m512i)W, K, _mm512_set1_epi32(0x7FFFFFFF),(__m512i)A) ;
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_abs_pd(__m512d A)
+{
+  return (__m512d)_mm512_and_epi64(_mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)A) ;
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_abs_pd(__m512d W, __mmask8 K, __m512d A)
+{
+  return (__m512d)_mm512_mask_and_epi64((__v8di)W, K, _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)A);
+}
+
 #undef __DEFAULT_FN_ATTRS
 
 #endif // __AVX512FINTRIN_H
diff --git a/renderscript/clang-include/avx512ifmaintrin.h b/renderscript/clang-include/avx512ifmaintrin.h
new file mode 100644
index 0000000..5defbae
--- /dev/null
+++ b/renderscript/clang-include/avx512ifmaintrin.h
@@ -0,0 +1,92 @@
+/*===------------- avx512ifmaintrin.h - IFMA intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512ifmaintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __IFMAINTRIN_H
+#define __IFMAINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma")))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_madd52hi_epu64 (__m512i __X, __m512i __Y, __m512i __Z)
+{
+  return (__m512i) __builtin_ia32_vpmadd52huq512_mask ((__v8di) __X,
+                   (__v8di) __Y,
+                   (__v8di) __Z,
+                   (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_madd52hi_epu64 (__m512i __W, __mmask8 __M, __m512i __X,
+          __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_vpmadd52huq512_mask ((__v8di) __W,
+                   (__v8di) __X,
+                   (__v8di) __Y,
+                   (__mmask8) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_madd52hi_epu64 (__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z)
+{
+  return (__m512i) __builtin_ia32_vpmadd52huq512_maskz ((__v8di) __X,
+              (__v8di) __Y,
+              (__v8di) __Z,
+              (__mmask8) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_madd52lo_epu64 (__m512i __X, __m512i __Y, __m512i __Z)
+{
+  return (__m512i) __builtin_ia32_vpmadd52luq512_mask ((__v8di) __X,
+                   (__v8di) __Y,
+                   (__v8di) __Z,
+                   (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_madd52lo_epu64 (__m512i __W, __mmask8 __M, __m512i __X,
+          __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_vpmadd52luq512_mask ((__v8di) __W,
+                   (__v8di) __X,
+                   (__v8di) __Y,
+                   (__mmask8) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_madd52lo_epu64 (__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z)
+{
+  return (__m512i) __builtin_ia32_vpmadd52luq512_maskz ((__v8di) __X,
+              (__v8di) __Y,
+              (__v8di) __Z,
+              (__mmask8) __M);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/renderscript/clang-include/avx512ifmavlintrin.h b/renderscript/clang-include/avx512ifmavlintrin.h
new file mode 100644
index 0000000..131ee5c
--- /dev/null
+++ b/renderscript/clang-include/avx512ifmavlintrin.h
@@ -0,0 +1,149 @@
+/*===------------- avx512ifmavlintrin.h - IFMA intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512ifmavlintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __IFMAVLINTRIN_H
+#define __IFMAVLINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma,avx512vl")))
+
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_madd52hi_epu64 (__m128i __X, __m128i __Y, __m128i __Z)
+{
+  return (__m128i) __builtin_ia32_vpmadd52huq128_mask ((__v2di) __X,
+                   (__v2di) __Y,
+                   (__v2di) __Z,
+                   (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_madd52hi_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_vpmadd52huq128_mask ((__v2di) __W,
+                   (__v2di) __X,
+                   (__v2di) __Y,
+                   (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_madd52hi_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z)
+{
+  return (__m128i) __builtin_ia32_vpmadd52huq128_maskz ((__v2di) __X,
+              (__v2di) __Y,
+              (__v2di) __Z,
+              (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_madd52hi_epu64 (__m256i __X, __m256i __Y, __m256i __Z)
+{
+  return (__m256i) __builtin_ia32_vpmadd52huq256_mask ((__v4di) __X,
+                   (__v4di) __Y,
+                   (__v4di) __Z,
+                   (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_madd52hi_epu64 (__m256i __W, __mmask8 __M, __m256i __X,
+          __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_vpmadd52huq256_mask ((__v4di) __W,
+                   (__v4di) __X,
+                   (__v4di) __Y,
+                   (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_madd52hi_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z)
+{
+  return (__m256i) __builtin_ia32_vpmadd52huq256_maskz ((__v4di) __X,
+              (__v4di) __Y,
+              (__v4di) __Z,
+              (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_madd52lo_epu64 (__m128i __X, __m128i __Y, __m128i __Z)
+{
+  return (__m128i) __builtin_ia32_vpmadd52luq128_mask ((__v2di) __X,
+                   (__v2di) __Y,
+                   (__v2di) __Z,
+                   (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_madd52lo_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_vpmadd52luq128_mask ((__v2di) __W,
+                   (__v2di) __X,
+                   (__v2di) __Y,
+                   (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_madd52lo_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z)
+{
+  return (__m128i) __builtin_ia32_vpmadd52luq128_maskz ((__v2di) __X,
+              (__v2di) __Y,
+              (__v2di) __Z,
+              (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_madd52lo_epu64 (__m256i __X, __m256i __Y, __m256i __Z)
+{
+  return (__m256i) __builtin_ia32_vpmadd52luq256_mask ((__v4di) __X,
+                   (__v4di) __Y,
+                   (__v4di) __Z,
+                   (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_madd52lo_epu64 (__m256i __W, __mmask8 __M, __m256i __X,
+          __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_vpmadd52luq256_mask ((__v4di) __W,
+                   (__v4di) __X,
+                   (__v4di) __Y,
+                   (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_madd52lo_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z)
+{
+  return (__m256i) __builtin_ia32_vpmadd52luq256_maskz ((__v4di) __X,
+              (__v4di) __Y,
+              (__v4di) __Z,
+              (__mmask8) __M);
+}
+
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/renderscript/clang-include/avx512pfintrin.h b/renderscript/clang-include/avx512pfintrin.h
new file mode 100644
index 0000000..c7fa3cf
--- /dev/null
+++ b/renderscript/clang-include/avx512pfintrin.h
@@ -0,0 +1,111 @@
+/*===------------- avx512pfintrin.h - PF intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512pfintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512PFINTRIN_H
+#define __AVX512PFINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512pf")))
+
+#define _mm512_mask_prefetch_i32gather_pd(index, mask, addr, scale, hint) __extension__ ({\
+  __builtin_ia32_gatherpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \
+                             (long long const *)(addr), (int)(scale), \
+                             (int)(hint)); })
+              
+#define _mm512_prefetch_i32gather_pd(index, addr, scale, hint) __extension__ ({\
+  __builtin_ia32_gatherpfdpd((__mmask8) -1, (__v8si)(__m256i)(index), \
+                             (long long const *)(addr), (int)(scale), \
+                             (int)(hint)); })
+
+#define _mm512_mask_prefetch_i32gather_ps(index, mask, addr, scale, hint) ({\
+  __builtin_ia32_gatherpfdps((__mmask16)(mask), \
+                             (__v16si)(__m512i)(index), (int const *)(addr), \
+                             (int)(scale), (int)(hint)); })
+
+#define _mm512_prefetch_i32gather_ps(index, addr, scale, hint) ({\
+  __builtin_ia32_gatherpfdps((__mmask16) -1, \
+                             (__v16si)(__m512i)(index), (int const *)(addr), \
+                             (int)(scale), (int)(hint)); })
+
+#define _mm512_mask_prefetch_i64gather_pd(index, mask, addr, scale, hint) __extension__ ({\
+  __builtin_ia32_gatherpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \
+                             (long long const *)(addr), (int)(scale), \
+                             (int)(hint)); })
+
+#define _mm512_prefetch_i64gather_pd(index, addr, scale, hint) __extension__ ({\
+  __builtin_ia32_gatherpfqpd((__mmask8) -1, (__v8di)(__m512i)(index), \
+                             (long long const *)(addr), (int)(scale), \
+                             (int)(hint)); })
+              
+#define _mm512_mask_prefetch_i64gather_ps(index, mask, addr, scale, hint) ({\
+  __builtin_ia32_gatherpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \
+                             (int const *)(addr), (int)(scale), (int)(hint)); })
+
+#define _mm512_prefetch_i64gather_ps(index, addr, scale, hint) ({\
+  __builtin_ia32_gatherpfqps((__mmask8) -1, (__v8di)(__m512i)(index), \
+                             (int const *)(addr), (int)(scale), (int)(hint)); })
+
+#define _mm512_prefetch_i32scatter_pd(addr, index, scale, hint) __extension__ ({\
+  __builtin_ia32_scatterpfdpd((__mmask8)-1, (__v8si)(__m256i)(index), \
+                              (long long *)(addr), (int)(scale), \
+                              (int)(hint)); })
+
+#define _mm512_mask_prefetch_i32scatter_pd(addr, mask, index, scale, hint) __extension__ ({\
+  __builtin_ia32_scatterpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \
+                              (long long *)(addr), (int)(scale), \
+                              (int)(hint)); })
+
+#define _mm512_prefetch_i32scatter_ps(addr, index, scale, hint) __extension__ ({\
+  __builtin_ia32_scatterpfdps((__mmask16)-1, (__v16si)(__m512i)(index), \
+                              (int *)(addr), (int)(scale), (int)(hint)); })
+
+#define _mm512_mask_prefetch_i32scatter_ps(addr, mask, index, scale, hint) __extension__ ({\
+  __builtin_ia32_scatterpfdps((__mmask16)(mask), \
+                              (__v16si)(__m512i)(index), (int *)(addr), \
+                              (int)(scale), (int)(hint)); })
+
+#define _mm512_prefetch_i64scatter_pd(addr, index, scale, hint) __extension__ ({\
+  __builtin_ia32_scatterpfqpd((__mmask8)-1, (__v8di)(__m512i)(index), \
+                              (long long *)(addr), (int)(scale), \
+                              (int)(hint)); })
+
+#define _mm512_mask_prefetch_i64scatter_pd(addr, mask, index, scale, hint) __extension__ ({\
+  __builtin_ia32_scatterpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \
+                              (long long *)(addr), (int)(scale), \
+                              (int)(hint)); })
+
+#define _mm512_prefetch_i64scatter_ps(addr, index, scale, hint) __extension__ ({\
+  __builtin_ia32_scatterpfqps((__mmask8)-1, (__v8di)(__m512i)(index), \
+                              (int *)(addr), (int)(scale), (int)(hint)); })
+
+#define _mm512_mask_prefetch_i64scatter_ps(addr, mask, index, scale, hint) __extension__ ({\
+  __builtin_ia32_scatterpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \
+                              (int *)(addr), (int)(scale), (int)(hint)); })
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/renderscript/clang-include/avx512vbmiintrin.h b/renderscript/clang-include/avx512vbmiintrin.h
new file mode 100644
index 0000000..837238e
--- /dev/null
+++ b/renderscript/clang-include/avx512vbmiintrin.h
@@ -0,0 +1,137 @@
+/*===------------- avx512vbmiintrin.h - VBMI intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vbmiintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __VBMIINTRIN_H
+#define __VBMIINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi")))
+
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask2_permutex2var_epi8 (__m512i __A, __m512i __I,
+         __mmask64 __U, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_vpermi2varqi512_mask ((__v64qi) __A,
+              (__v64qi) __I
+              /* idx */ ,
+              (__v64qi) __B,
+              (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_permutex2var_epi8 (__m512i __A, __m512i __I, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_vpermt2varqi512_mask ((__v64qi) __I
+              /* idx */ ,
+              (__v64qi) __A,
+              (__v64qi) __B,
+              (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutex2var_epi8 (__m512i __A, __mmask64 __U,
+        __m512i __I, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_vpermt2varqi512_mask ((__v64qi) __I
+              /* idx */ ,
+              (__v64qi) __A,
+              (__v64qi) __B,
+              (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutex2var_epi8 (__mmask64 __U, __m512i __A,
+         __m512i __I, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_vpermt2varqi512_maskz ((__v64qi) __I
+               /* idx */ ,
+               (__v64qi) __A,
+               (__v64qi) __B,
+               (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_permutexvar_epi8 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_permvarqi512_mask ((__v64qi) __B,
+                 (__v64qi) __A,
+                 (__v64qi) _mm512_undefined_epi32 (),
+                 (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutexvar_epi8 (__mmask64 __M, __m512i __A,
+        __m512i __B)
+{
+  return (__m512i) __builtin_ia32_permvarqi512_mask ((__v64qi) __B,
+                 (__v64qi) __A,
+                 (__v64qi) _mm512_setzero_si512(),
+                 (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutexvar_epi8 (__m512i __W, __mmask64 __M, __m512i __A,
+             __m512i __B)
+{
+  return (__m512i) __builtin_ia32_permvarqi512_mask ((__v64qi) __B,
+                 (__v64qi) __A,
+                 (__v64qi) __W,
+                 (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_multishift_epi64_epi8 (__m512i __W, __mmask64 __M, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X,
+                (__v64qi) __Y,
+                (__v64qi) __W,
+                (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_multishift_epi64_epi8 (__mmask64 __M, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X,
+                (__v64qi) __Y,
+                (__v64qi) _mm512_setzero_si512 (),
+                (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_multishift_epi64_epi8 (__m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X,
+                (__v64qi) __Y,
+                (__v64qi) _mm512_undefined_epi32 (),
+                (__mmask64) -1);
+}
+
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/renderscript/clang-include/avx512vbmivlintrin.h b/renderscript/clang-include/avx512vbmivlintrin.h
new file mode 100644
index 0000000..105c6d1
--- /dev/null
+++ b/renderscript/clang-include/avx512vbmivlintrin.h
@@ -0,0 +1,247 @@
+/*===------------- avx512vbmivlintrin.h - VBMI intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vbmivlintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __VBMIVLINTRIN_H
+#define __VBMIVLINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi,avx512vl")))
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_epi8 (__m128i __A, __m128i __I, __mmask16 __U,
+            __m128i __B)
+{
+  return (__m128i) __builtin_ia32_vpermi2varqi128_mask ((__v16qi) __A,
+              (__v16qi) __I
+              /* idx */ ,
+              (__v16qi) __B,
+              (__mmask16)
+              __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_epi8 (__m256i __A, __m256i __I,
+         __mmask32 __U, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_vpermi2varqi256_mask ((__v32qi) __A,
+              (__v32qi) __I
+              /* idx */ ,
+              (__v32qi) __B,
+              (__mmask32)
+              __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutex2var_epi8 (__m128i __A, __m128i __I, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_vpermt2varqi128_mask ((__v16qi) __I
+              /* idx */ ,
+              (__v16qi) __A,
+              (__v16qi) __B,
+              (__mmask16) -
+              1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_epi8 (__m128i __A, __mmask16 __U, __m128i __I,
+           __m128i __B)
+{
+  return (__m128i) __builtin_ia32_vpermt2varqi128_mask ((__v16qi) __I
+              /* idx */ ,
+              (__v16qi) __A,
+              (__v16qi) __B,
+              (__mmask16)
+              __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_epi8 (__mmask16 __U, __m128i __A, __m128i __I,
+            __m128i __B)
+{
+  return (__m128i) __builtin_ia32_vpermt2varqi128_maskz ((__v16qi) __I
+               /* idx */ ,
+               (__v16qi) __A,
+               (__v16qi) __B,
+               (__mmask16)
+               __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutex2var_epi8 (__m256i __A, __m256i __I, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_vpermt2varqi256_mask ((__v32qi) __I
+              /* idx */ ,
+              (__v32qi) __A,
+              (__v32qi) __B,
+              (__mmask32) -
+              1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_epi8 (__m256i __A, __mmask32 __U,
+        __m256i __I, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_vpermt2varqi256_mask ((__v32qi) __I
+              /* idx */ ,
+              (__v32qi) __A,
+              (__v32qi) __B,
+              (__mmask32)
+              __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_epi8 (__mmask32 __U, __m256i __A,
+         __m256i __I, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_vpermt2varqi256_maskz ((__v32qi) __I
+               /* idx */ ,
+               (__v32qi) __A,
+               (__v32qi) __B,
+               (__mmask32)
+               __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutexvar_epi8 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B,
+                 (__v16qi) __A,
+                 (__v16qi) _mm_undefined_si128 (),
+                 (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutexvar_epi8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B,
+                 (__v16qi) __A,
+                 (__v16qi) _mm_setzero_si128 (),
+                 (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutexvar_epi8 (__m128i __W, __mmask16 __M, __m128i __A,
+          __m128i __B)
+{
+  return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B,
+                 (__v16qi) __A,
+                 (__v16qi) __W,
+                 (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutexvar_epi8 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B,
+                 (__v32qi) __A,
+                 (__v32qi) _mm256_undefined_si256 (),
+                 (__mmask32) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutexvar_epi8 (__mmask32 __M, __m256i __A,
+        __m256i __B)
+{
+  return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B,
+                 (__v32qi) __A,
+                 (__v32qi) _mm256_setzero_si256 (),
+                 (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutexvar_epi8 (__m256i __W, __mmask32 __M, __m256i __A,
+             __m256i __B)
+{
+  return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B,
+                 (__v32qi) __A,
+                 (__v32qi) __W,
+                 (__mmask32) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_multishift_epi64_epi8 (__m128i __W, __mmask16 __M, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X,
+                (__v16qi) __Y,
+                (__v16qi) __W,
+                (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_multishift_epi64_epi8 (__mmask16 __M, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X,
+                (__v16qi) __Y,
+                (__v16qi)
+                _mm_setzero_si128 (),
+                (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_multishift_epi64_epi8 (__m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X,
+                (__v16qi) __Y,
+                (__v16qi)
+                _mm_undefined_si128 (),
+                (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_multishift_epi64_epi8 (__m256i __W, __mmask32 __M, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X,
+                (__v32qi) __Y,
+                (__v32qi) __W,
+                (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_multishift_epi64_epi8 (__mmask32 __M, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X,
+                (__v32qi) __Y,
+                (__v32qi)
+                _mm256_setzero_si256 (),
+                (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_multishift_epi64_epi8 (__m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X,
+                (__v32qi) __Y,
+                (__v32qi)
+                _mm256_undefined_si256 (),
+                (__mmask32) -1);
+}
+
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/renderscript/clang-include/avx512vlbwintrin.h b/renderscript/clang-include/avx512vlbwintrin.h
index b4542d6..990e992 100644
--- a/renderscript/clang-include/avx512vlbwintrin.h
+++ b/renderscript/clang-include/avx512vlbwintrin.h
@@ -31,6 +31,11 @@
 /* Define the default attributes for the functions in this file. */
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bw")))
 
+static  __inline __m128i __DEFAULT_FN_ATTRS
+_mm_setzero_hi(void){
+    return (__m128i)(__v8hi){ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
 /* Integer compare */
 
 static __inline__ __mmask16 __DEFAULT_FN_ATTRS
@@ -781,33 +786,33 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_mask_blend_epi8 (__mmask16 __U, __m128i __A, __m128i __W)
 {
-  return (__m128i) __builtin_ia32_blendmb_128_mask ((__v16qi) __A,
-               (__v16qi) __W,
-               (__mmask16) __U);
+  return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
+              (__v16qi) __W,
+              (__v16qi) __A);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_mask_blend_epi8 (__mmask32 __U, __m256i __A, __m256i __W)
 {
-  return (__m256i) __builtin_ia32_blendmb_256_mask ((__v32qi) __A,
+  return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
                (__v32qi) __W,
-               (__mmask32) __U);
+               (__v32qi) __A);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_mask_blend_epi16 (__mmask8 __U, __m128i __A, __m128i __W)
 {
-  return (__m128i) __builtin_ia32_blendmw_128_mask ((__v8hi) __A,
+  return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
                (__v8hi) __W,
-               (__mmask8) __U);
+               (__v8hi) __A);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_mask_blend_epi16 (__mmask16 __U, __m256i __A, __m256i __W)
 {
-  return (__m256i) __builtin_ia32_blendmw_256_mask ((__v16hi) __A,
+  return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
                (__v16hi) __W,
-               (__mmask16) __U);
+               (__v16hi) __A);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -1994,6 +1999,25 @@
                __M);
 }
 
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovwb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M);
+}
+
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovswb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovuswb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M);
+}
+
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm256_cvtepi16_epi8 (__m256i __A) {
   return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
@@ -2015,6 +2039,23 @@
                __M);
 }
 
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
+{
+  __builtin_ia32_pmovwb256mem_mask ((__v16qi *) __P, (__v16hi) __A, __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
+{
+  __builtin_ia32_pmovswb256mem_mask ((__v16qi *) __P, (__v16hi) __A, __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovuswb256mem_mask ((__v16qi*) __P, (__v16hi) __A, __M);
+}
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_mask_mulhrs_epi16 (__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
   return (__m128i) __builtin_ia32_pmulhrsw128_mask ((__v8hi) __X,
@@ -2116,220 +2157,1249 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_unpackhi_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
-      __m128i __B) {
-  return (__m128i) __builtin_ia32_punpckhbw128_mask ((__v16qi) __A,
-               (__v16qi) __B,
-               (__v16qi) __W,
-               (__mmask16) __U);
+_mm_mask_unpackhi_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+                                           (__v16qi)_mm_unpackhi_epi8(__A, __B),
+                                           (__v16qi)__W);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_unpackhi_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
-  return (__m128i) __builtin_ia32_punpckhbw128_mask ((__v16qi) __A,
-               (__v16qi) __B,
-               (__v16qi) _mm_setzero_si128(),
-               (__mmask16) __U);
+_mm_maskz_unpackhi_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
+  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+                                           (__v16qi)_mm_unpackhi_epi8(__A, __B),
+                                           (__v16qi)_mm_setzero_si128());
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_unpackhi_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
-         __m256i __B) {
-  return (__m256i) __builtin_ia32_punpckhbw256_mask ((__v32qi) __A,
-               (__v32qi) __B,
-               (__v32qi) __W,
-               (__mmask32) __U);
+_mm256_mask_unpackhi_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+                                        (__v32qi)_mm256_unpackhi_epi8(__A, __B),
+                                        (__v32qi)__W);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_unpackhi_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
-  return (__m256i) __builtin_ia32_punpckhbw256_mask ((__v32qi) __A,
-               (__v32qi) __B,
-               (__v32qi) _mm256_setzero_si256(),
-               (__mmask32) __U);
+_mm256_maskz_unpackhi_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
+  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+                                        (__v32qi)_mm256_unpackhi_epi8(__A, __B),
+                                        (__v32qi)_mm256_setzero_si256());
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_unpackhi_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
-       __m128i __B) {
-  return (__m128i) __builtin_ia32_punpckhwd128_mask ((__v8hi) __A,
-               (__v8hi) __B,
-               (__v8hi) __W,
-               (__mmask8) __U);
+_mm_mask_unpackhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+                                           (__v8hi)_mm_unpackhi_epi16(__A, __B),
+                                           (__v8hi)__W);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_unpackhi_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i) __builtin_ia32_punpckhwd128_mask ((__v8hi) __A,
-               (__v8hi) __B,
-               (__v8hi) _mm_setzero_si128(),
-               (__mmask8) __U);
+_mm_maskz_unpackhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
+  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+                                           (__v8hi)_mm_unpackhi_epi16(__A, __B),
+                                           (__v8hi) _mm_setzero_si128());
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_unpackhi_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
-          __m256i __B) {
-  return (__m256i) __builtin_ia32_punpckhwd256_mask ((__v16hi) __A,
-               (__v16hi) __B,
-               (__v16hi) __W,
-               (__mmask16) __U);
+_mm256_mask_unpackhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+                                       (__v16hi)_mm256_unpackhi_epi16(__A, __B),
+                                       (__v16hi)__W);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_unpackhi_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
-  return (__m256i) __builtin_ia32_punpckhwd256_mask ((__v16hi) __A,
-               (__v16hi) __B,
-               (__v16hi) _mm256_setzero_si256(),
-               (__mmask16) __U);
+_mm256_maskz_unpackhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+                                       (__v16hi)_mm256_unpackhi_epi16(__A, __B),
+                                       (__v16hi)_mm256_setzero_si256());
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_unpacklo_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
-      __m128i __B) {
-  return (__m128i) __builtin_ia32_punpcklbw128_mask ((__v16qi) __A,
-               (__v16qi) __B,
-               (__v16qi) __W,
-               (__mmask16) __U);
+_mm_mask_unpacklo_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+                                           (__v16qi)_mm_unpacklo_epi8(__A, __B),
+                                           (__v16qi)__W);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_unpacklo_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
-  return (__m128i) __builtin_ia32_punpcklbw128_mask ((__v16qi) __A,
-               (__v16qi) __B,
-               (__v16qi) _mm_setzero_si128(),
-               (__mmask16) __U);
+_mm_maskz_unpacklo_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
+  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+                                           (__v16qi)_mm_unpacklo_epi8(__A, __B),
+                                           (__v16qi)_mm_setzero_si128());
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_unpacklo_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
-         __m256i __B) {
-  return (__m256i) __builtin_ia32_punpcklbw256_mask ((__v32qi) __A,
-               (__v32qi) __B,
-               (__v32qi) __W,
-               (__mmask32) __U);
+_mm256_mask_unpacklo_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+                                        (__v32qi)_mm256_unpacklo_epi8(__A, __B),
+                                        (__v32qi)__W);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_unpacklo_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
-  return (__m256i) __builtin_ia32_punpcklbw256_mask ((__v32qi) __A,
-               (__v32qi) __B,
-               (__v32qi) _mm256_setzero_si256(),
-               (__mmask32) __U);
+_mm256_maskz_unpacklo_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
+  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+                                        (__v32qi)_mm256_unpacklo_epi8(__A, __B),
+                                        (__v32qi)_mm256_setzero_si256());
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_unpacklo_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
-       __m128i __B) {
-  return (__m128i) __builtin_ia32_punpcklwd128_mask ((__v8hi) __A,
-               (__v8hi) __B,
-               (__v8hi) __W,
-               (__mmask8) __U);
+_mm_mask_unpacklo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+                                           (__v8hi)_mm_unpacklo_epi16(__A, __B),
+                                           (__v8hi)__W);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_unpacklo_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i) __builtin_ia32_punpcklwd128_mask ((__v8hi) __A,
-               (__v8hi) __B,
-               (__v8hi) _mm_setzero_si128(),
-               (__mmask8) __U);
+_mm_maskz_unpacklo_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
+  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+                                           (__v8hi)_mm_unpacklo_epi16(__A, __B),
+                                           (__v8hi) _mm_setzero_si128());
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_unpacklo_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
-          __m256i __B) {
-  return (__m256i) __builtin_ia32_punpcklwd256_mask ((__v16hi) __A,
-               (__v16hi) __B,
-               (__v16hi) __W,
-               (__mmask16) __U);
+_mm256_mask_unpacklo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+                                       (__v16hi)_mm256_unpacklo_epi16(__A, __B),
+                                       (__v16hi)__W);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_unpacklo_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
-  return (__m256i) __builtin_ia32_punpcklwd256_mask ((__v16hi) __A,
-               (__v16hi) __B,
-               (__v16hi) _mm256_setzero_si256(),
-               (__mmask16) __U);
+_mm256_maskz_unpacklo_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+                                       (__v16hi)_mm256_unpacklo_epi16(__A, __B),
+                                       (__v16hi)_mm256_setzero_si256());
 }
 
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi8_epi16 (__m128i __W, __mmask32 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxbw128_mask ((__v16qi) __A,
+                (__v8hi) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi8_epi16 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxbw128_mask ((__v16qi) __A,
+                (__v8hi)
+                _mm_setzero_si128 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi8_epi16 (__m256i __W, __mmask32 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxbw256_mask ((__v16qi) __A,
+                (__v16hi) __W,
+                (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi8_epi16 (__mmask16 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxbw256_mask ((__v16qi) __A,
+                (__v16hi)
+                _mm256_setzero_si256 (),
+                (__mmask16) __U);
+}
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu8_epi16 (__m128i __W, __mmask32 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxbw128_mask ((__v16qi) __A,
+                (__v8hi) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu8_epi16 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxbw128_mask ((__v16qi) __A,
+                (__v8hi)
+                _mm_setzero_si128 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu8_epi16 (__m256i __W, __mmask32 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxbw256_mask ((__v16qi) __A,
+                (__v16hi) __W,
+                (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxbw256_mask ((__v16qi) __A,
+                (__v16hi)
+                _mm256_setzero_si256 (),
+                (__mmask16) __U);
+}
+
+
 #define _mm_cmp_epi8_mask(a, b, p) __extension__ ({ \
   (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
-                                         (__v16qi)(__m128i)(b), \
-                                         (p), (__mmask16)-1); })
+                                         (__v16qi)(__m128i)(b), (int)(p), \
+                                         (__mmask16)-1); })
 
 #define _mm_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
   (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
-                                         (__v16qi)(__m128i)(b), \
-                                         (p), (__mmask16)(m)); })
+                                         (__v16qi)(__m128i)(b), (int)(p), \
+                                         (__mmask16)(m)); })
 
 #define _mm_cmp_epu8_mask(a, b, p) __extension__ ({ \
   (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
-                                          (__v16qi)(__m128i)(b), \
-                                          (p), (__mmask16)-1); })
+                                          (__v16qi)(__m128i)(b), (int)(p), \
+                                          (__mmask16)-1); })
 
 #define _mm_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
   (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
-                                          (__v16qi)(__m128i)(b), \
-                                          (p), (__mmask16)(m)); })
+                                          (__v16qi)(__m128i)(b), (int)(p), \
+                                          (__mmask16)(m)); })
 
 #define _mm256_cmp_epi8_mask(a, b, p) __extension__ ({ \
   (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
-                                         (__v32qi)(__m256i)(b), \
-                                         (p), (__mmask32)-1); })
+                                         (__v32qi)(__m256i)(b), (int)(p), \
+                                         (__mmask32)-1); })
 
 #define _mm256_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
   (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
-                                         (__v32qi)(__m256i)(b), \
-                                         (p), (__mmask32)(m)); })
+                                         (__v32qi)(__m256i)(b), (int)(p), \
+                                         (__mmask32)(m)); })
 
 #define _mm256_cmp_epu8_mask(a, b, p) __extension__ ({ \
   (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
-                                          (__v32qi)(__m256i)(b), \
-                                          (p), (__mmask32)-1); })
+                                          (__v32qi)(__m256i)(b), (int)(p), \
+                                          (__mmask32)-1); })
 
 #define _mm256_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
   (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
-                                          (__v32qi)(__m256i)(b), \
-                                          (p), (__mmask32)(m)); })
+                                          (__v32qi)(__m256i)(b), (int)(p), \
+                                          (__mmask32)(m)); })
 
 #define _mm_cmp_epi16_mask(a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
-                                        (__v8hi)(__m128i)(b), \
-                                        (p), (__mmask8)-1); })
+                                        (__v8hi)(__m128i)(b), (int)(p), \
+                                        (__mmask8)-1); })
 
 #define _mm_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
-                                        (__v8hi)(__m128i)(b), \
-                                        (p), (__mmask8)(m)); })
+                                        (__v8hi)(__m128i)(b), (int)(p), \
+                                        (__mmask8)(m)); })
 
 #define _mm_cmp_epu16_mask(a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
-                                         (__v8hi)(__m128i)(b), \
-                                         (p), (__mmask8)-1); })
+                                         (__v8hi)(__m128i)(b), (int)(p), \
+                                         (__mmask8)-1); })
 
 #define _mm_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
-                                         (__v8hi)(__m128i)(b), \
-                                         (p), (__mmask8)(m)); })
+                                         (__v8hi)(__m128i)(b), (int)(p), \
+                                         (__mmask8)(m)); })
 
 #define _mm256_cmp_epi16_mask(a, b, p) __extension__ ({ \
   (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
-                                         (__v16hi)(__m256i)(b), \
-                                         (p), (__mmask16)-1); })
+                                         (__v16hi)(__m256i)(b), (int)(p), \
+                                         (__mmask16)-1); })
 
 #define _mm256_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
   (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
-                                         (__v16hi)(__m256i)(b), \
-                                         (p), (__mmask16)(m)); })
+                                         (__v16hi)(__m256i)(b), (int)(p), \
+                                         (__mmask16)(m)); })
 
 #define _mm256_cmp_epu16_mask(a, b, p) __extension__ ({ \
   (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
-                                          (__v16hi)(__m256i)(b), \
-                                          (p), (__mmask16)-1); })
+                                          (__v16hi)(__m256i)(b), (int)(p), \
+                                          (__mmask16)-1); })
 
 #define _mm256_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
   (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
-                                          (__v16hi)(__m256i)(b), \
-                                          (p), (__mmask16)(m)); })
+                                          (__v16hi)(__m256i)(b), (int)(p), \
+                                          (__mmask16)(m)); })
+
+#define _mm_mask_shufflehi_epi16(W, U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+                                      (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
+                                      (__v8hi)(__m128i)(W)); })
+
+#define _mm_maskz_shufflehi_epi16(U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+                                      (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
+                                      (__v8hi)_mm_setzero_hi()); })
+
+#define _mm256_mask_shufflehi_epi16(W, U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+                                      (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
+                                      (__v16hi)(__m256i)(W)); })
+
+#define _mm256_maskz_shufflehi_epi16(U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+                                      (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
+                                      (__v16hi)_mm256_setzero_si256()); })
+
+#define _mm_mask_shufflelo_epi16(W, U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+                                      (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
+                                      (__v8hi)(__m128i)(W)); })
+
+#define _mm_maskz_shufflelo_epi16(U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+                                      (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
+                                      (__v8hi)_mm_setzero_hi()); })
+
+#define _mm256_mask_shufflelo_epi16(W, U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+                                      (__v16hi)_mm256_shufflelo_epi16((A), \
+                                                                      (imm)), \
+                                      (__v16hi)(__m256i)(W)); })
+
+#define _mm256_maskz_shufflelo_epi16(U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+                                      (__v16hi)_mm256_shufflelo_epi16((A), \
+                                                                      (imm)), \
+                                      (__v16hi)_mm256_setzero_si256()); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sllv_epi16 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psllv16hi_mask ((__v16hi) __A,
+              (__v16hi) __B,
+              (__v16hi)
+              _mm256_setzero_si256 (),
+              (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sllv_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psllv16hi_mask ((__v16hi) __A,
+              (__v16hi) __B,
+              (__v16hi) __W,
+              (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sllv_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psllv16hi_mask ((__v16hi) __A,
+              (__v16hi) __B,
+              (__v16hi)
+              _mm256_setzero_si256 (),
+              (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sllv_epi16 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psllv8hi_mask ((__v8hi) __A,
+             (__v8hi) __B,
+             (__v8hi)
+             _mm_setzero_hi (),
+             (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sllv_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+         __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psllv8hi_mask ((__v8hi) __A,
+             (__v8hi) __B,
+             (__v8hi) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sllv_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psllv8hi_mask ((__v8hi) __A,
+             (__v8hi) __B,
+             (__v8hi)
+             _mm_setzero_si128 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sll_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+        __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psllw128_mask ((__v8hi) __A,
+             (__v8hi) __B,
+             (__v8hi) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sll_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psllw128_mask ((__v8hi) __A,
+             (__v8hi) __B,
+             (__v8hi)
+             _mm_setzero_si128 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sll_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+           __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psllw256_mask ((__v16hi) __A,
+             (__v8hi) __B,
+             (__v16hi) __W,
+             (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sll_epi16 (__mmask16 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psllw256_mask ((__v16hi) __A,
+             (__v8hi) __B,
+             (__v16hi)
+             _mm256_setzero_si256 (),
+             (__mmask16) __U);
+}
+
+#define _mm_mask_slli_epi16(W, U, A, B) __extension__ ({ \
+  (__m128i)__builtin_ia32_psllwi128_mask((__v8hi)(__m128i)(A), (int)(B), \
+                                         (__v8hi)(__m128i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm_maskz_slli_epi16(U, A, B) __extension__ ({ \
+  (__m128i)__builtin_ia32_psllwi128_mask((__v8hi)(__m128i)(A), (int)(B), \
+                                         (__v8hi)_mm_setzero_si128(), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_mask_slli_epi16(W, U, A, B) __extension__ ({ \
+  (__m256i)__builtin_ia32_psllwi256_mask((__v16hi)(__m256i)(A), (int)(B), \
+                                         (__v16hi)(__m256i)(W), \
+                                         (__mmask16)(U)); })
+
+#define _mm256_maskz_slli_epi16(U, A, B) __extension__ ({ \
+  (__m256i)__builtin_ia32_psllwi256_mask((__v16hi)(__m256i)(A), (int)(B), \
+                                         (__v16hi)_mm256_setzero_si256(), \
+                                         (__mmask16)(U)); })
+
+
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srlv_epi16 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psrlv16hi_mask ((__v16hi) __A,
+              (__v16hi) __B,
+              (__v16hi)
+              _mm256_setzero_si256 (),
+              (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srlv_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psrlv16hi_mask ((__v16hi) __A,
+              (__v16hi) __B,
+              (__v16hi) __W,
+              (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srlv_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psrlv16hi_mask ((__v16hi) __A,
+              (__v16hi) __B,
+              (__v16hi)
+              _mm256_setzero_si256 (),
+              (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srlv_epi16 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrlv8hi_mask ((__v8hi) __A,
+             (__v8hi) __B,
+             (__v8hi)
+             _mm_setzero_hi (),
+             (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srlv_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+         __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrlv8hi_mask ((__v8hi) __A,
+             (__v8hi) __B,
+             (__v8hi) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srlv_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrlv8hi_mask ((__v8hi) __A,
+             (__v8hi) __B,
+             (__v8hi)
+             _mm_setzero_si128 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srav_epi16 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psrav16hi_mask ((__v16hi) __A,
+              (__v16hi) __B,
+              (__v16hi)
+              _mm256_setzero_si256 (),
+              (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srav_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psrav16hi_mask ((__v16hi) __A,
+              (__v16hi) __B,
+              (__v16hi) __W,
+              (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srav_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psrav16hi_mask ((__v16hi) __A,
+              (__v16hi) __B,
+              (__v16hi)
+              _mm256_setzero_si256 (),
+              (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srav_epi16 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrav8hi_mask ((__v8hi) __A,
+             (__v8hi) __B,
+             (__v8hi)
+             _mm_setzero_hi (),
+             (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srav_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+         __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrav8hi_mask ((__v8hi) __A,
+             (__v8hi) __B,
+             (__v8hi) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srav_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrav8hi_mask ((__v8hi) __A,
+             (__v8hi) __B,
+             (__v8hi)
+             _mm_setzero_si128 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sra_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+        __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psraw128_mask ((__v8hi) __A,
+             (__v8hi) __B,
+             (__v8hi) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sra_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psraw128_mask ((__v8hi) __A,
+             (__v8hi) __B,
+             (__v8hi)
+             _mm_setzero_si128 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sra_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+           __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psraw256_mask ((__v16hi) __A,
+             (__v8hi) __B,
+             (__v16hi) __W,
+             (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sra_epi16 (__mmask16 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psraw256_mask ((__v16hi) __A,
+             (__v8hi) __B,
+             (__v16hi)
+             _mm256_setzero_si256 (),
+             (__mmask16) __U);
+}
+
+#define _mm_mask_srai_epi16(W, U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_psrawi128_mask((__v8hi)(__m128i)(A), (int)(imm), \
+                                         (__v8hi)(__m128i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm_maskz_srai_epi16(U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_psrawi128_mask((__v8hi)(__m128i)(A), (int)(imm), \
+                                         (__v8hi)_mm_setzero_si128(), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_mask_srai_epi16(W, U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_psrawi256_mask((__v16hi)(__m256i)(A), (int)(imm), \
+                                         (__v16hi)(__m256i)(W), \
+                                         (__mmask16)(U)); })
+
+#define _mm256_maskz_srai_epi16(U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_psrawi256_mask((__v16hi)(__m256i)(A), (int)(imm), \
+                                         (__v16hi)_mm256_setzero_si256(), \
+                                         (__mmask16)(U)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srl_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+        __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrlw128_mask ((__v8hi) __A,
+             (__v8hi) __B,
+             (__v8hi) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srl_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrlw128_mask ((__v8hi) __A,
+             (__v8hi) __B,
+             (__v8hi)
+             _mm_setzero_si128 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srl_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+           __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psrlw256_mask ((__v16hi) __A,
+             (__v8hi) __B,
+             (__v16hi) __W,
+             (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srl_epi16 (__mmask16 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psrlw256_mask ((__v16hi) __A,
+             (__v8hi) __B,
+             (__v16hi)
+             _mm256_setzero_si256 (),
+             (__mmask16) __U);
+}
+
+#define _mm_mask_srli_epi16(W, U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_psrlwi128_mask((__v8hi)(__m128i)(A), (int)(imm), \
+                                         (__v8hi)(__m128i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm_maskz_srli_epi16(U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_psrlwi128_mask((__v8hi)(__m128i)(A), (int)(imm), \
+                                         (__v8hi)_mm_setzero_si128(), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_mask_srli_epi16(W, U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_psrlwi256_mask((__v16hi)(__m256i)(A), (int)(imm), \
+                                         (__v16hi)(__m256i)(W), \
+                                         (__mmask16)(U)); })
+
+#define _mm256_maskz_srli_epi16(U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_psrlwi256_mask((__v16hi)(__m256i)(A), (int)(imm), \
+                                         (__v16hi)_mm256_setzero_si256(), \
+                                         (__mmask16)(U)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mov_epi16 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
+                (__v8hi) __A,
+                (__v8hi) __W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mov_epi16 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
+                (__v8hi) __A,
+                (__v8hi) _mm_setzero_hi ());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mov_epi16 (__m256i __W, __mmask16 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
+                (__v16hi) __A,
+                (__v16hi) __W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mov_epi16 (__mmask16 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
+                (__v16hi) __A,
+                (__v16hi) _mm256_setzero_si256 ());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mov_epi8 (__m128i __W, __mmask16 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
+                (__v16qi) __A,
+                (__v16qi) __W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mov_epi8 (__mmask16 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
+                (__v16qi) __A,
+                (__v16qi) _mm_setzero_hi ());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mov_epi8 (__m256i __W, __mmask32 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
+                (__v32qi) __A,
+                (__v32qi) __W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mov_epi8 (__mmask32 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
+                (__v32qi) __A,
+                (__v32qi) _mm256_setzero_si256 ());
+}
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastb128_gpr_mask (__A,
+                 (__v16qi) __O,
+                 __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_set1_epi8 (__mmask16 __M, char __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastb128_gpr_mask (__A,
+                 (__v16qi)
+                 _mm_setzero_si128 (),
+                 __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_set1_epi8 (__m256i __O, __mmask32 __M, char __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastb256_gpr_mask (__A,
+                 (__v32qi) __O,
+                 __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_set1_epi8 (__mmask32 __M, char __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastb256_gpr_mask (__A,
+                 (__v32qi)
+                 _mm256_setzero_si256 (),
+                 __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_loaddquhi128_mask ((__v8hi *) __P,
+                 (__v8hi) __W,
+                 (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_epi16 (__mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_loaddquhi128_mask ((__v8hi *) __P,
+                 (__v8hi)
+                 _mm_setzero_hi (),
+                 (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_epi16 (__m256i __W, __mmask16 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_loaddquhi256_mask ((__v16hi *) __P,
+                 (__v16hi) __W,
+                 (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_epi16 (__mmask16 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_loaddquhi256_mask ((__v16hi *) __P,
+                 (__v16hi)
+                 _mm256_setzero_si256 (),
+                 (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_loadu_epi8 (__m128i __W, __mmask16 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_loaddquqi128_mask ((__v16qi *) __P,
+                 (__v16qi) __W,
+                 (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_epi8 (__mmask16 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_loaddquqi128_mask ((__v16qi *) __P,
+                 (__v16qi)
+                 _mm_setzero_si128 (),
+                 (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_epi8 (__m256i __W, __mmask32 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_loaddquqi256_mask ((__v32qi *) __P,
+                 (__v32qi) __W,
+                 (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_epi8 (__mmask32 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_loaddquqi256_mask ((__v32qi *) __P,
+                 (__v32qi)
+                 _mm256_setzero_si256 (),
+                 (__mmask32) __U);
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_storeu_epi16 (void *__P, __mmask8 __U, __m128i __A)
+{
+  __builtin_ia32_storedquhi128_mask ((__v8hi *) __P,
+             (__v8hi) __A,
+             (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_storeu_epi16 (void *__P, __mmask16 __U, __m256i __A)
+{
+  __builtin_ia32_storedquhi256_mask ((__v16hi *) __P,
+             (__v16hi) __A,
+             (__mmask16) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_storeu_epi8 (void *__P, __mmask16 __U, __m128i __A)
+{
+  __builtin_ia32_storedquqi128_mask ((__v16qi *) __P,
+             (__v16qi) __A,
+             (__mmask16) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A)
+{
+  __builtin_ia32_storedquqi256_mask ((__v32qi *) __P,
+             (__v32qi) __A,
+             (__mmask32) __U);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_test_epi8_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestmb128 ((__v16qi) __A,
+            (__v16qi) __B,
+            (__mmask16) -1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_test_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestmb128 ((__v16qi) __A,
+            (__v16qi) __B, __U);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_test_epi8_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask32) __builtin_ia32_ptestmb256 ((__v32qi) __A,
+            (__v32qi) __B,
+            (__mmask32) -1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_test_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask32) __builtin_ia32_ptestmb256 ((__v32qi) __A,
+            (__v32qi) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_test_epi16_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmw128 ((__v8hi) __A,
+                 (__v8hi) __B,
+                 (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_test_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmw128 ((__v8hi) __A,
+                 (__v8hi) __B, __U);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_test_epi16_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestmw256 ((__v16hi) __A,
+            (__v16hi) __B,
+            (__mmask16) -1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_test_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestmw256 ((__v16hi) __A,
+            (__v16hi) __B, __U);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_testn_epi8_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestnmb128 ((__v16qi) __A,
+             (__v16qi) __B,
+             (__mmask16) -1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_testn_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestnmb128 ((__v16qi) __A,
+             (__v16qi) __B, __U);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_testn_epi8_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask32) __builtin_ia32_ptestnmb256 ((__v32qi) __A,
+             (__v32qi) __B,
+             (__mmask32) -1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_testn_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask32) __builtin_ia32_ptestnmb256 ((__v32qi) __A,
+             (__v32qi) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_testn_epi16_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmw128 ((__v8hi) __A,
+            (__v8hi) __B,
+            (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_testn_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmw128 ((__v8hi) __A,
+            (__v8hi) __B, __U);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_testn_epi16_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestnmw256 ((__v16hi) __A,
+             (__v16hi) __B,
+             (__mmask16) -1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_testn_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestnmw256 ((__v16hi) __A,
+             (__v16hi) __B, __U);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_movepi8_mask (__m128i __A)
+{
+  return (__mmask16) __builtin_ia32_cvtb2mask128 ((__v16qi) __A);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_movepi8_mask (__m256i __A)
+{
+  return (__mmask32) __builtin_ia32_cvtb2mask256 ((__v32qi) __A);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_movepi16_mask (__m128i __A)
+{
+  return (__mmask8) __builtin_ia32_cvtw2mask128 ((__v8hi) __A);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_movepi16_mask (__m256i __A)
+{
+  return (__mmask16) __builtin_ia32_cvtw2mask256 ((__v16hi) __A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_movm_epi8 (__mmask16 __A)
+{
+  return (__m128i) __builtin_ia32_cvtmask2b128 (__A);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_movm_epi8 (__mmask32 __A)
+{
+  return (__m256i) __builtin_ia32_cvtmask2b256 (__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_movm_epi16 (__mmask8 __A)
+{
+  return (__m128i) __builtin_ia32_cvtmask2w128 (__A);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_movm_epi16 (__mmask16 __A)
+{
+  return (__m256i) __builtin_ia32_cvtmask2w256 (__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_broadcastb_epi8 (__m128i __O, __mmask16 __M, __m128i __A)
+{
+  return (__m128i)__builtin_ia32_selectb_128(__M,
+                                             (__v16qi) _mm_broadcastb_epi8(__A),
+                                             (__v16qi) __O);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_broadcastb_epi8 (__mmask16 __M, __m128i __A)
+{
+  return (__m128i)__builtin_ia32_selectb_128(__M,
+                                             (__v16qi) _mm_broadcastb_epi8(__A),
+                                             (__v16qi) _mm_setzero_si128());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_broadcastb_epi8 (__m256i __O, __mmask32 __M, __m128i __A)
+{
+  return (__m256i)__builtin_ia32_selectb_256(__M,
+                                             (__v32qi) _mm256_broadcastb_epi8(__A),
+                                             (__v32qi) __O);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcastb_epi8 (__mmask32 __M, __m128i __A)
+{
+  return (__m256i)__builtin_ia32_selectb_256(__M,
+                                             (__v32qi) _mm256_broadcastb_epi8(__A),
+                                             (__v32qi) _mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_broadcastw_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i)__builtin_ia32_selectw_128(__M,
+                                             (__v8hi) _mm_broadcastw_epi16(__A),
+                                             (__v8hi) __O);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_broadcastw_epi16 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i)__builtin_ia32_selectw_128(__M,
+                                             (__v8hi) _mm_broadcastw_epi16(__A),
+                                             (__v8hi) _mm_setzero_si128());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_broadcastw_epi16 (__m256i __O, __mmask16 __M, __m128i __A)
+{
+  return (__m256i)__builtin_ia32_selectw_256(__M,
+                                             (__v16hi) _mm256_broadcastw_epi16(__A),
+                                             (__v16hi) __O);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcastw_epi16 (__mmask16 __M, __m128i __A)
+{
+  return (__m256i)__builtin_ia32_selectw_256(__M,
+                                             (__v16hi) _mm256_broadcastw_epi16(__A),
+                                             (__v16hi) _mm256_setzero_si256());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_set1_epi16 (__m256i __O, __mmask16 __M, short __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastw256_gpr_mask (__A,
+                 (__v16hi) __O,
+                 __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_set1_epi16 (__mmask16 __M, short __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastw256_gpr_mask (__A,
+                 (__v16hi) _mm256_setzero_si256 (),
+                 __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_set1_epi16 (__m128i __O, __mmask8 __M, short __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastw128_gpr_mask (__A,
+                 (__v8hi) __O,
+                 __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_set1_epi16 (__mmask8 __M, short __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastw128_gpr_mask (__A,
+                 (__v8hi) _mm_setzero_si128 (),
+                 __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutexvar_epi16 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B,
+                 (__v8hi) __A,
+                 (__v8hi) _mm_undefined_si128 (),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutexvar_epi16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B,
+                 (__v8hi) __A,
+                 (__v8hi) _mm_setzero_si128 (),
+                 (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutexvar_epi16 (__m128i __W, __mmask8 __M, __m128i __A,
+          __m128i __B)
+{
+  return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B,
+                 (__v8hi) __A,
+                 (__v8hi) __W,
+                 (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutexvar_epi16 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B,
+                 (__v16hi) __A,
+                 (__v16hi) _mm256_undefined_si256 (),
+                 (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutexvar_epi16 (__mmask16 __M, __m256i __A,
+        __m256i __B)
+{
+  return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B,
+                 (__v16hi) __A,
+                 (__v16hi) _mm256_setzero_si256 (),
+                 (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
+             __m256i __B)
+{
+  return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B,
+                 (__v16hi) __A,
+                 (__v16hi) __W,
+                 (__mmask16) __M);
+}
+
+#define _mm_mask_alignr_epi8(W, U, A, B, N) __extension__ ({ \
+  (__m128i)__builtin_ia32_palignr128_mask((__v16qi)(__m128i)(A), \
+                                          (__v16qi)(__m128i)(B), (int)(N), \
+                                          (__v16qi)(__m128i)(W), \
+                                          (__mmask16)(U)); })
+
+#define _mm_maskz_alignr_epi8(U, A, B, N) __extension__ ({ \
+  (__m128i)__builtin_ia32_palignr128_mask((__v16qi)(__m128i)(A), \
+                                          (__v16qi)(__m128i)(B), (int)(N), \
+                                          (__v16qi)_mm_setzero_si128(), \
+                                          (__mmask16)(U)); })
+
+#define _mm256_mask_alignr_epi8(W, U, A, B, N) __extension__ ({ \
+  (__m256i)__builtin_ia32_palignr256_mask((__v32qi)(__m256i)(A), \
+                                          (__v32qi)(__m256i)(B), (int)(N), \
+                                          (__v32qi)(__m256i)(W), \
+                                          (__mmask32)(U)); })
+
+#define _mm256_maskz_alignr_epi8(U, A, B, N) __extension__ ({ \
+  (__m256i)__builtin_ia32_palignr256_mask((__v32qi)(__m256i)(A), \
+                                          (__v32qi)(__m256i)(B), (int)(N), \
+                                          (__v32qi)_mm256_setzero_si256(), \
+                                          (__mmask32)(U)); })
+
+#define _mm_dbsad_epu8(A, B, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_dbpsadbw128_mask((__v16qi)(__m128i)(A), \
+                                           (__v16qi)(__m128i)(B), (int)(imm), \
+                                           (__v8hi)_mm_setzero_hi(), \
+                                           (__mmask8)-1); })
+
+#define _mm_mask_dbsad_epu8(W, U, A, B, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_dbpsadbw128_mask((__v16qi)(__m128i)(A), \
+                                           (__v16qi)(__m128i)(B), (int)(imm), \
+                                           (__v8hi)(__m128i)(W), \
+                                           (__mmask8)(U)); })
+
+#define _mm_maskz_dbsad_epu8(U, A, B, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_dbpsadbw128_mask((__v16qi)(__m128i)(A), \
+                                           (__v16qi)(__m128i)(B), (int)(imm), \
+                                           (__v8hi)_mm_setzero_si128(), \
+                                           (__mmask8)(U)); })
+
+#define _mm256_dbsad_epu8(A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_dbpsadbw256_mask((__v32qi)(__m256i)(A), \
+                                           (__v32qi)(__m256i)(B), (int)(imm), \
+                                           (__v16hi)_mm256_setzero_si256(), \
+                                           (__mmask16)-1); })
+
+#define _mm256_mask_dbsad_epu8(W, U, A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_dbpsadbw256_mask((__v32qi)(__m256i)(A), \
+                                           (__v32qi)(__m256i)(B), (int)(imm), \
+                                           (__v16hi)(__m256i)(W), \
+                                           (__mmask16)(U)); })
+
+#define _mm256_maskz_dbsad_epu8(U, A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_dbpsadbw256_mask((__v32qi)(__m256i)(A), \
+                                           (__v32qi)(__m256i)(B), (int)(imm), \
+                                           (__v16hi)_mm256_setzero_si256(), \
+                                           (__mmask16)(U)); })
 
 #undef __DEFAULT_FN_ATTRS
 
diff --git a/renderscript/clang-include/avx512vlcdintrin.h b/renderscript/clang-include/avx512vlcdintrin.h
new file mode 100644
index 0000000..7b02e2e
--- /dev/null
+++ b/renderscript/clang-include/avx512vlcdintrin.h
@@ -0,0 +1,263 @@
+/*===---- avx512vlcdintrin.h - AVX512VL and AVX512CD intrinsics ---------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vlcdintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512VLCDINTRIN_H
+#define __AVX512VLCDINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512cd")))
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcastmb_epi64 (__mmask8 __A)
+{
+  return (__m128i) __builtin_ia32_broadcastmb128 (__A);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastmb_epi64 (__mmask8 __A)
+{
+  return (__m256i) __builtin_ia32_broadcastmb256 (__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcastmw_epi32 (__mmask16 __A)
+{
+  return (__m128i) __builtin_ia32_broadcastmw128 (__A);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastmw_epi32 (__mmask16 __A)
+{
+  return (__m256i) __builtin_ia32_broadcastmw256 (__A);
+}
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_conflict_epi64 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A,
+               (__v2di) _mm_undefined_si128 (),
+               (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_conflict_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A,
+               (__v2di) __W,
+               (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_conflict_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A,
+               (__v2di)
+               _mm_setzero_di (),
+               (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_conflict_epi64 (__m256i __A)
+{
+  return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A,
+               (__v4di)  _mm256_undefined_si256 (),
+               (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_conflict_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A,
+               (__v4di) __W,
+               (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_conflict_epi64 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A,
+               (__v4di) _mm256_setzero_si256 (),
+               (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_conflict_epi32 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A,
+               (__v4si) _mm_undefined_si128 (),
+               (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_conflict_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A,
+               (__v4si) __W,
+               (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_conflict_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A,
+               (__v4si) _mm_setzero_si128 (),
+               (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_conflict_epi32 (__m256i __A)
+{
+  return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A,
+               (__v8si) _mm256_undefined_si256 (),
+               (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_conflict_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A,
+               (__v8si) __W,
+               (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_conflict_epi32 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A,
+               (__v8si)
+               _mm256_setzero_si256 (),
+               (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_lzcnt_epi32 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A,
+                 (__v4si)
+                 _mm_setzero_si128 (),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_lzcnt_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A,
+                 (__v4si) __W,
+                 (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_lzcnt_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A,
+                 (__v4si)
+                 _mm_setzero_si128 (),
+                 (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_lzcnt_epi32 (__m256i __A)
+{
+  return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A,
+                 (__v8si)
+                 _mm256_setzero_si256 (),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_lzcnt_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A,
+                 (__v8si) __W,
+                 (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_lzcnt_epi32 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A,
+                 (__v8si)
+                 _mm256_setzero_si256 (),
+                 (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_lzcnt_epi64 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A,
+                 (__v2di)
+                 _mm_setzero_di (),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_lzcnt_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A,
+                 (__v2di) __W,
+                 (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_lzcnt_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A,
+                 (__v2di)
+                 _mm_setzero_di (),
+                 (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_lzcnt_epi64 (__m256i __A)
+{
+  return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A,
+                 (__v4di)
+                 _mm256_setzero_si256 (),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_lzcnt_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A,
+                 (__v4di) __W,
+                 (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_lzcnt_epi64 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A,
+                 (__v4di)
+                 _mm256_setzero_si256 (),
+                 (__mmask8) __U);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __AVX512VLCDINTRIN_H */
diff --git a/renderscript/clang-include/avx512vldqintrin.h b/renderscript/clang-include/avx512vldqintrin.h
index dfd858e..8187bcd 100644
--- a/renderscript/clang-include/avx512vldqintrin.h
+++ b/renderscript/clang-include/avx512vldqintrin.h
@@ -33,7 +33,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_mullo_epi64 (__m256i __A, __m256i __B) {
-  return (__m256i) ((__v4di) __A * (__v4di) __B);
+  return (__m256i) ((__v4du) __A * (__v4du) __B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -55,7 +55,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_mullo_epi64 (__m128i __A, __m128i __B) {
-  return (__m128i) ((__v2di) __A * (__v2di) __B);
+  return (__m128i) ((__v2du) __A * (__v2du) __B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -852,101 +852,413 @@
                 (__mmask8) __U);
 }
 
-#define _mm_range_pd(__A, __B, __C) __extension__ ({                         \
-  (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, (__v2df) __B, __C, \
-                (__v2df) _mm_setzero_pd(), (__mmask8) -1); })
+#define _mm_range_pd(A, B, C) __extension__ ({                         \
+  (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), (int)(C), \
+                                          (__v2df)_mm_setzero_pd(), \
+                                          (__mmask8)-1); })
 
-#define _mm_mask_range_pd(__W, __U, __A, __B, __C) __extension__ ({          \
-  (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, (__v2df) __B, __C, \
-                (__v2df) __W, (__mmask8) __U); })
+#define _mm_mask_range_pd(W, U, A, B, C) __extension__ ({          \
+  (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), (int)(C), \
+                                          (__v2df)(__m128d)(W), \
+                                          (__mmask8)(U)); })
 
-#define _mm_maskz_range_pd(__U, __A, __B, __C) __extension__ ({              \
-  (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, (__v2df) __B, __C, \
-                (__v2df) _mm_setzero_pd(), (__mmask8) __U); })
+#define _mm_maskz_range_pd(U, A, B, C) __extension__ ({              \
+  (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), (int)(C), \
+                                          (__v2df)_mm_setzero_pd(), \
+                                          (__mmask8)(U)); })
 
-#define _mm256_range_pd(__A, __B, __C) __extension__ ({                      \
-  (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, (__v4df) __B, __C, \
-                (__v4df) _mm256_setzero_pd(), (__mmask8) -1); })
+#define _mm256_range_pd(A, B, C) __extension__ ({                      \
+  (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
+                                          (__v4df)(__m256d)(B), (int)(C), \
+                                          (__v4df)_mm256_setzero_pd(), \
+                                          (__mmask8)-1); })
 
-#define _mm256_mask_range_pd(__W, __U, __A, __B, __C) __extension__ ({       \
-  (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, (__v4df) __B, __C, \
-                (__v4df) __W, (__mmask8) __U); })
+#define _mm256_mask_range_pd(W, U, A, B, C) __extension__ ({       \
+  (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
+                                          (__v4df)(__m256d)(B), (int)(C), \
+                                          (__v4df)(__m256d)(W), \
+                                          (__mmask8)(U)); })
 
-#define _mm256_maskz_range_pd(__U, __A, __B, __C) __extension__ ({           \
-  (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, (__v4df) __B, __C, \
-                (__v4df) _mm256_setzero_pd(), (__mmask8) __U); })
+#define _mm256_maskz_range_pd(U, A, B, C) __extension__ ({           \
+  (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
+                                          (__v4df)(__m256d)(B), (int)(C), \
+                                          (__v4df)_mm256_setzero_pd(), \
+                                          (__mmask8)(U)); })
 
-#define _mm_range_ps(__A, __B, __C) __extension__ ({                         \
-  (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, (__v4sf) __B, __C,  \
-                (__v4sf) _mm_setzero_ps(), (__mmask8) -1); })
+#define _mm_range_ps(A, B, C) __extension__ ({                         \
+  (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), (int)(C), \
+                                         (__v4sf)_mm_setzero_ps(), \
+                                         (__mmask8)-1); })
 
-#define _mm_mask_range_ps(__W, __U, __A, __B, __C) __extension__ ({          \
-  (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, (__v4sf) __B, __C,  \
-                (__v4sf) __W, (__mmask8) __U); })
+#define _mm_mask_range_ps(W, U, A, B, C) __extension__ ({          \
+  (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), (int)(C), \
+                                         (__v4sf)(__m128)(W), (__mmask8)(U)); })
 
-#define _mm_maskz_range_ps(__U, __A, __B, __C) __extension__ ({              \
-  (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, (__v4sf) __B, __C,  \
-                (__v4sf) _mm_setzero_ps(), (__mmask8) __U); })
+#define _mm_maskz_range_ps(U, A, B, C) __extension__ ({              \
+  (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), (int)(C), \
+                                         (__v4sf)_mm_setzero_ps(), \
+                                         (__mmask8)(U)); })
 
-#define _mm256_range_ps(__A, __B, __C) __extension__ ({                      \
-  (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, (__v8sf) __B, __C,  \
-                (__v8sf) _mm256_setzero_ps(), (__mmask8) -1); })
+#define _mm256_range_ps(A, B, C) __extension__ ({                      \
+  (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
+                                         (__v8sf)(__m256)(B), (int)(C), \
+                                         (__v8sf)_mm256_setzero_ps(), \
+                                         (__mmask8)-1); })
 
-#define _mm256_mask_range_ps(__W, __U, __A, __B, __C) __extension__ ({       \
-  (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, (__v8sf) __B, __C,  \
-                (__v8sf) __W, (__mmask8) __U); })
+#define _mm256_mask_range_ps(W, U, A, B, C) __extension__ ({       \
+  (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
+                                         (__v8sf)(__m256)(B), (int)(C), \
+                                         (__v8sf)(__m256)(W), (__mmask8)(U)); })
 
-#define _mm256_maskz_range_ps(__U, __A, __B, __C) __extension__ ({           \
-  (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, (__v8sf) __B, __C,  \
-                (__v8sf) _mm256_setzero_ps(), (__mmask8) __U); })
+#define _mm256_maskz_range_ps(U, A, B, C) __extension__ ({           \
+  (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
+                                         (__v8sf)(__m256)(B), (int)(C), \
+                                         (__v8sf)_mm256_setzero_ps(), \
+                                         (__mmask8)(U)); })
 
-#define _mm_reduce_pd(__A, __B) __extension__ ({                \
-  (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, \
-                (__v2df) _mm_setzero_pd(), (__mmask8) -1); })
+#define _mm_reduce_pd(A, B) __extension__ ({                \
+  (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)-1); })
 
-#define _mm_mask_reduce_pd(__W, __U, __A, __B) __extension__ ({ \
-  (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, \
-                (__v2df) __W, (__mmask8) __U); })
+#define _mm_mask_reduce_pd(W, U, A, B) __extension__ ({ \
+  (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
+                                           (__v2df)(__m128d)(W), \
+                                           (__mmask8)(U)); })
 
-#define _mm_maskz_reduce_pd(__U, __A, __B) __extension__ ({     \
-  (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, \
-                (__v2df) _mm_setzero_pd(), (__mmask8) __U); })
+#define _mm_maskz_reduce_pd(U, A, B) __extension__ ({     \
+  (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)(U)); })
 
-#define _mm256_reduce_pd(__A, __B) __extension__ ({                \
-  (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B,    \
-                (__v4df) _mm256_setzero_pd(), (__mmask8) -1); })
+#define _mm256_reduce_pd(A, B) __extension__ ({                \
+  (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
+                                           (__v4df)_mm256_setzero_pd(), \
+                                           (__mmask8)-1); })
 
-#define _mm256_mask_reduce_pd(__W, __U, __A, __B) __extension__ ({ \
-  (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B,    \
-                (__v4df) __W, (__mmask8) __U); })
+#define _mm256_mask_reduce_pd(W, U, A, B) __extension__ ({ \
+  (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
+                                           (__v4df)(__m256d)(W), \
+                                           (__mmask8)(U)); })
 
-#define _mm256_maskz_reduce_pd(__U, __A, __B) __extension__ ({     \
-  (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B,    \
-                (__v4df) _mm256_setzero_pd(), (__mmask8) __U); })
+#define _mm256_maskz_reduce_pd(U, A, B) __extension__ ({     \
+  (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
+                                           (__v4df)_mm256_setzero_pd(), \
+                                           (__mmask8)(U)); })
 
-#define _mm_reduce_ps(__A, __B) __extension__ ({                   \
-  (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B,     \
-                (__v4sf) _mm_setzero_ps(), (__mmask8) -1); })
+#define _mm_reduce_ps(A, B) __extension__ ({                   \
+  (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)-1); })
 
-#define _mm_mask_reduce_ps(__W, __U, __A, __B) __extension__ ({    \
-  (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B,     \
-                (__v4sf) __W, (__mmask8) __U); })
+#define _mm_mask_reduce_ps(W, U, A, B) __extension__ ({    \
+  (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
+                                          (__v4sf)(__m128)(W), \
+                                          (__mmask8)(U)); })
 
-#define _mm_maskz_reduce_ps(__U, __A, __B) __extension__ ({        \
-  (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B,     \
-                (__v4sf) _mm_setzero_ps(), (__mmask8) __U); })
+#define _mm_maskz_reduce_ps(U, A, B) __extension__ ({        \
+  (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)(U)); })
 
-#define _mm256_reduce_ps(__A, __B) __extension__ ({                \
-  (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B,     \
-                (__v8sf) _mm256_setzero_ps(), (__mmask8) -1); })
+#define _mm256_reduce_ps(A, B) __extension__ ({                \
+  (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
+                                          (__v8sf)_mm256_setzero_ps(), \
+                                          (__mmask8)-1); })
 
-#define _mm256_mask_reduce_ps(__W, __U, __A, __B) __extension__ ({ \
-  (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B,     \
-                (__v8sf) __W, (__mmask8) __U); })
+#define _mm256_mask_reduce_ps(W, U, A, B) __extension__ ({ \
+  (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
+                                          (__v8sf)(__m256)(W), \
+                                          (__mmask8)(U)); })
 
-#define _mm256_maskz_reduce_ps(__U, __A, __B) __extension__ ({     \
-  (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B,     \
-                (__v8sf) _mm256_setzero_ps(), (__mmask8) __U); })
+#define _mm256_maskz_reduce_ps(U, A, B) __extension__ ({     \
+  (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
+                                          (__v8sf)_mm256_setzero_ps(), \
+                                          (__mmask8)(U)); })
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_movepi32_mask (__m128i __A)
+{
+  return (__mmask8) __builtin_ia32_cvtd2mask128 ((__v4si) __A);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_movepi32_mask (__m256i __A)
+{
+  return (__mmask8) __builtin_ia32_cvtd2mask256 ((__v8si) __A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_movm_epi32 (__mmask8 __A)
+{
+  return (__m128i) __builtin_ia32_cvtmask2d128 (__A);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_movm_epi32 (__mmask8 __A)
+{
+  return (__m256i) __builtin_ia32_cvtmask2d256 (__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_movm_epi64 (__mmask8 __A)
+{
+  return (__m128i) __builtin_ia32_cvtmask2q128 (__A);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_movm_epi64 (__mmask8 __A)
+{
+  return (__m256i) __builtin_ia32_cvtmask2q256 (__A);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_movepi64_mask (__m128i __A)
+{
+  return (__mmask8) __builtin_ia32_cvtq2mask128 ((__v2di) __A);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_movepi64_mask (__m256i __A)
+{
+  return (__mmask8) __builtin_ia32_cvtq2mask256 ((__v4di) __A);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_broadcast_f32x2 (__m128 __A)
+{
+  return (__m256) __builtin_ia32_broadcastf32x2_256_mask ((__v4sf) __A,
+                (__v8sf)_mm256_undefined_ps(),
+                (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_broadcast_f32x2 (__m256 __O, __mmask8 __M, __m128 __A)
+{
+  return (__m256) __builtin_ia32_broadcastf32x2_256_mask ((__v4sf) __A,
+                (__v8sf) __O,
+                __M);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcast_f32x2 (__mmask8 __M, __m128 __A)
+{
+  return (__m256) __builtin_ia32_broadcastf32x2_256_mask ((__v4sf) __A,
+                (__v8sf) _mm256_setzero_ps (),
+                __M);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_broadcast_f64x2 (__m128d __A)
+{
+  return (__m256d) __builtin_ia32_broadcastf64x2_256_mask ((__v2df) __A,
+                 (__v4df)_mm256_undefined_pd(),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_broadcast_f64x2 (__m256d __O, __mmask8 __M, __m128d __A)
+{
+  return (__m256d) __builtin_ia32_broadcastf64x2_256_mask ((__v2df) __A,
+                 (__v4df) __O,
+                 __M);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A)
+{
+  return (__m256d) __builtin_ia32_broadcastf64x2_256_mask ((__v2df) __A,
+                 (__v4df) _mm256_setzero_ps (),
+                 __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcast_i32x2 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_broadcasti32x2_128_mask ((__v4si) __A,
+                 (__v4si)_mm_undefined_si128(),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_broadcast_i32x2 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_broadcasti32x2_128_mask ((__v4si) __A,
+                 (__v4si) __O,
+                 __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_broadcasti32x2_128_mask ((__v4si) __A,
+                 (__v4si) _mm_setzero_si128 (),
+                 __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcast_i32x2 (__m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti32x2_256_mask ((__v4si) __A,
+                 (__v8si)_mm256_undefined_si256(),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_broadcast_i32x2 (__m256i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti32x2_256_mask ((__v4si) __A,
+                 (__v8si) __O,
+                 __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti32x2_256_mask ((__v4si) __A,
+                 (__v8si) _mm256_setzero_si256 (),
+                 __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcast_i64x2 (__m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti64x2_256_mask ((__v2di) __A,
+                 (__v4di)_mm256_undefined_si256(),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_broadcast_i64x2 (__m256i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti64x2_256_mask ((__v2di) __A,
+                 (__v4di) __O,
+                 __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti64x2_256_mask ((__v2di) __A,
+                 (__v4di) _mm256_setzero_si256 (),
+                 __M);
+}
+
+#define _mm256_extractf64x2_pd(A, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+                                                (int)(imm), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)-1); })
+
+#define _mm256_mask_extractf64x2_pd(W, U, A, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+                                                (int)(imm), \
+                                                (__v2df)(__m128d)(W), \
+                                                (__mmask8)(U)); })
+
+#define _mm256_maskz_extractf64x2_pd(U, A, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+                                                (int)(imm), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)(U)); })
+
+#define _mm256_extracti64x2_epi64(A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+                                                (int)(imm), \
+                                                (__v2di)_mm_setzero_di(), \
+                                                (__mmask8)-1); })
+
+#define _mm256_mask_extracti64x2_epi64(W, U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+                                                (int)(imm), \
+                                                (__v2di)(__m128i)(W), \
+                                                (__mmask8)(U)); })
+
+#define _mm256_maskz_extracti64x2_epi64(U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+                                                (int)(imm), \
+                                                (__v2di)_mm_setzero_di(), \
+                                                (__mmask8)(U)); })
+
+#define _mm256_insertf64x2(A, B, imm) __extension__ ({ \
+  (__m256d)__builtin_ia32_insertf64x2_256_mask((__v4df)(__m256d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (int)(imm), \
+                                               (__v4df)_mm256_setzero_pd(), \
+                                               (__mmask8)-1); })
+
+#define _mm256_mask_insertf64x2(W, U, A, B, imm) __extension__ ({ \
+  (__m256d)__builtin_ia32_insertf64x2_256_mask((__v4df)(__m256d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (int)(imm), \
+                                               (__v4df)(__m256d)(W), \
+                                               (__mmask8)(U)); })
+
+#define _mm256_maskz_insertf64x2(U, A, B, imm) __extension__ ({ \
+  (__m256d)__builtin_ia32_insertf64x2_256_mask((__v4df)(__m256d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (int)(imm), \
+                                               (__v4df)_mm256_setzero_pd(), \
+                                               (__mmask8)(U)); })
+
+#define _mm256_inserti64x2(A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_inserti64x2_256_mask((__v4di)(__m256i)(A), \
+                                               (__v2di)(__m128i)(B), \
+                                               (int)(imm), \
+                                               (__v4di)_mm256_setzero_si256(), \
+                                               (__mmask8)-1); })
+
+#define _mm256_mask_inserti64x2(W, U, A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_inserti64x2_256_mask((__v4di)(__m256i)(A), \
+                                               (__v2di)(__m128i)(B), \
+                                               (int)(imm), \
+                                               (__v4di)(__m256i)(W), \
+                                               (__mmask8)(U)); })
+
+#define _mm256_maskz_inserti64x2(U, A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_inserti64x2_256_mask((__v4di)(__m256i)(A), \
+                                               (__v2di)(__m128i)(B), \
+                                               (int)(imm), \
+                                               (__v4di)_mm256_setzero_si256(), \
+                                               (__mmask8)(U)); })
+
+#define _mm_mask_fpclass_pd_mask(U, A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+#define _mm_fpclass_pd_mask(A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
+                                             (__mmask8)-1); })
+
+#define _mm256_mask_fpclass_pd_mask(U, A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+#define _mm256_fpclass_pd_mask(A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
+                                             (__mmask8)-1); })
+
+#define _mm_mask_fpclass_ps_mask(U, A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+#define _mm_fpclass_ps_mask(A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
+                                             (__mmask8)-1); })
+
+#define _mm256_mask_fpclass_ps_mask(U, A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+#define _mm256_fpclass_ps_mask(A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
+                                             (__mmask8)-1); })
 
 #undef __DEFAULT_FN_ATTRS
 
diff --git a/renderscript/clang-include/avx512vlintrin.h b/renderscript/clang-include/avx512vlintrin.h
index 8f13536..295ce29 100644
--- a/renderscript/clang-include/avx512vlintrin.h
+++ b/renderscript/clang-include/avx512vlintrin.h
@@ -29,17 +29,22 @@
 #define __AVX512VLINTRIN_H
 
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl")))
-#define __DEFAULT_FN_ATTRS_BOTH __attribute__((__always_inline__, __nodebug__, __target__("avx512vl, avx512bw")))
+
+/* Doesn't require avx512vl, used in avx512dqintrin.h */
+static  __inline __m128i __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
+_mm_setzero_di(void) {
+  return (__m128i)(__v2di){ 0LL, 0LL};
+}
 
 /* Integer compare */
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm_cmpeq_epi32_mask(__m128i __a, __m128i __b) {
   return (__mmask8)__builtin_ia32_pcmpeqd128_mask((__v4si)__a, (__v4si)__b,
                                                   (__mmask8)-1);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm_mask_cmpeq_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
   return (__mmask8)__builtin_ia32_pcmpeqd128_mask((__v4si)__a, (__v4si)__b,
                                                   __u);
@@ -57,13 +62,13 @@
                                                 __u);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm256_cmpeq_epi32_mask(__m256i __a, __m256i __b) {
   return (__mmask8)__builtin_ia32_pcmpeqd256_mask((__v8si)__a, (__v8si)__b,
                                                   (__mmask8)-1);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm256_mask_cmpeq_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
   return (__mmask8)__builtin_ia32_pcmpeqd256_mask((__v8si)__a, (__v8si)__b,
                                                   __u);
@@ -81,13 +86,13 @@
                                                 __u);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm_cmpeq_epi64_mask(__m128i __a, __m128i __b) {
   return (__mmask8)__builtin_ia32_pcmpeqq128_mask((__v2di)__a, (__v2di)__b,
                                                   (__mmask8)-1);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm_mask_cmpeq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
   return (__mmask8)__builtin_ia32_pcmpeqq128_mask((__v2di)__a, (__v2di)__b,
                                                   __u);
@@ -105,13 +110,13 @@
                                                 __u);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm256_cmpeq_epi64_mask(__m256i __a, __m256i __b) {
   return (__mmask8)__builtin_ia32_pcmpeqq256_mask((__v4di)__a, (__v4di)__b,
                                                   (__mmask8)-1);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm256_mask_cmpeq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
   return (__mmask8)__builtin_ia32_pcmpeqq256_mask((__v4di)__a, (__v4di)__b,
                                                   __u);
@@ -226,13 +231,13 @@
                                                 __u);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm_cmpgt_epi32_mask(__m128i __a, __m128i __b) {
   return (__mmask8)__builtin_ia32_pcmpgtd128_mask((__v4si)__a, (__v4si)__b,
                                                   (__mmask8)-1);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm_mask_cmpgt_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
   return (__mmask8)__builtin_ia32_pcmpgtd128_mask((__v4si)__a, (__v4si)__b,
                                                   __u);
@@ -250,13 +255,13 @@
                                                 __u);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm256_cmpgt_epi32_mask(__m256i __a, __m256i __b) {
   return (__mmask8)__builtin_ia32_pcmpgtd256_mask((__v8si)__a, (__v8si)__b,
                                                   (__mmask8)-1);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm256_mask_cmpgt_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
   return (__mmask8)__builtin_ia32_pcmpgtd256_mask((__v8si)__a, (__v8si)__b,
                                                   __u);
@@ -274,13 +279,13 @@
                                                 __u);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm_cmpgt_epi64_mask(__m128i __a, __m128i __b) {
   return (__mmask8)__builtin_ia32_pcmpgtq128_mask((__v2di)__a, (__v2di)__b,
                                                   (__mmask8)-1);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm_mask_cmpgt_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
   return (__mmask8)__builtin_ia32_pcmpgtq128_mask((__v2di)__a, (__v2di)__b,
                                                   __u);
@@ -298,13 +303,13 @@
                                                 __u);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm256_cmpgt_epi64_mask(__m256i __a, __m256i __b) {
   return (__mmask8)__builtin_ia32_pcmpgtq256_mask((__v4di)__a, (__v4di)__b,
                                                   (__mmask8)-1);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm256_mask_cmpgt_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
   return (__mmask8)__builtin_ia32_pcmpgtq256_mask((__v4di)__a, (__v4di)__b,
                                                   __u);
@@ -885,437 +890,352 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_and_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
-           __m256i __B)
+_mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
 {
-  return (__m256i) __builtin_ia32_pandd256_mask ((__v8si) __A,
-             (__v8si) __B,
-             (__v8si) __W,
-             (__mmask8) __U);
+  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+                                             (__v8si)_mm256_and_si256(__A, __B),
+                                             (__v8si)__W);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_and_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+_mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B)
 {
-  return (__m256i) __builtin_ia32_pandd256_mask ((__v8si) __A,
-             (__v8si) __B,
-             (__v8si)
-             _mm256_setzero_si256 (),
-             (__mmask8) __U);
+  return (__m256i)_mm256_mask_and_epi32(_mm256_setzero_si256(), __U, __A, __B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_and_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+_mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
 {
-  return (__m128i) __builtin_ia32_pandd128_mask ((__v4si) __A,
-             (__v4si) __B,
-             (__v4si) __W,
-             (__mmask8) __U);
+  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+                                             (__v4si)_mm_and_si128(__A, __B),
+                                             (__v4si)__W);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_and_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_and_epi32(__mmask8 __U, __m128i __A, __m128i __B)
 {
-  return (__m128i) __builtin_ia32_pandd128_mask ((__v4si) __A,
-             (__v4si) __B,
-             (__v4si)
-             _mm_setzero_si128 (),
-             (__mmask8) __U);
+  return (__m128i)_mm_mask_and_epi32(_mm_setzero_si128(), __U, __A, __B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_andnot_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
-        __m256i __B)
+_mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
 {
-  return (__m256i) __builtin_ia32_pandnd256_mask ((__v8si) __A,
-              (__v8si) __B,
-              (__v8si) __W,
-              (__mmask8) __U);
+  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+                                          (__v8si)_mm256_andnot_si256(__A, __B),
+                                          (__v8si)__W);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_andnot_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+_mm256_maskz_andnot_epi32(__mmask8 __U, __m256i __A, __m256i __B)
 {
-  return (__m256i) __builtin_ia32_pandnd256_mask ((__v8si) __A,
-              (__v8si) __B,
-              (__v8si)
-              _mm256_setzero_si256 (),
-              (__mmask8) __U);
+  return (__m256i)_mm256_mask_andnot_epi32(_mm256_setzero_si256(),
+                                           __U, __A, __B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_andnot_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
-           __m128i __B)
+_mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
 {
-  return (__m128i) __builtin_ia32_pandnd128_mask ((__v4si) __A,
-              (__v4si) __B,
-              (__v4si) __W,
-              (__mmask8) __U);
+  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+                                             (__v4si)_mm_andnot_si128(__A, __B),
+                                             (__v4si)__W);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_maskz_andnot_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
 {
-  return (__m128i) __builtin_ia32_pandnd128_mask ((__v4si) __A,
-              (__v4si) __B,
-              (__v4si)
-              _mm_setzero_si128 (),
-              (__mmask8) __U);
+  return (__m128i)_mm_mask_andnot_epi32(_mm_setzero_si128(), __U, __A, __B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
-          __m256i __B)
+_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
 {
-  return (__m256i) __builtin_ia32_pord256_mask ((__v8si) __A,
-            (__v8si) __B,
-            (__v8si) __W,
-            (__mmask8) __U);
+  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+                                             (__v8si)_mm256_or_si256(__A, __B),
+                                             (__v8si)__W);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_or_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+_mm256_maskz_or_epi32(__mmask8 __U, __m256i __A, __m256i __B)
 {
-  return (__m256i) __builtin_ia32_pord256_mask ((__v8si) __A,
-            (__v8si) __B,
-            (__v8si)
-            _mm256_setzero_si256 (),
-            (__mmask8) __U);
+  return (__m256i)_mm256_mask_or_epi32(_mm256_setzero_si256(), __U, __A, __B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_or_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+_mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
 {
-  return (__m128i) __builtin_ia32_pord128_mask ((__v4si) __A,
-            (__v4si) __B,
-            (__v4si) __W,
-            (__mmask8) __U);
+  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+                                             (__v4si)_mm_or_si128(__A, __B),
+                                             (__v4si)__W);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_or_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_or_epi32(__mmask8 __U, __m128i __A, __m128i __B)
 {
-  return (__m128i) __builtin_ia32_pord128_mask ((__v4si) __A,
-            (__v4si) __B,
-            (__v4si)
-            _mm_setzero_si128 (),
-            (__mmask8) __U);
+  return (__m128i)_mm_mask_or_epi32(_mm_setzero_si128(), __U, __A, __B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_xor_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
-           __m256i __B)
+_mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
 {
-  return (__m256i) __builtin_ia32_pxord256_mask ((__v8si) __A,
-             (__v8si) __B,
-             (__v8si) __W,
-             (__mmask8) __U);
+  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+                                             (__v8si)_mm256_xor_si256(__A, __B),
+                                             (__v8si)__W);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_xor_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+_mm256_maskz_xor_epi32(__mmask8 __U, __m256i __A, __m256i __B)
 {
-  return (__m256i) __builtin_ia32_pxord256_mask ((__v8si) __A,
-             (__v8si) __B,
-             (__v8si)
-             _mm256_setzero_si256 (),
-             (__mmask8) __U);
+  return (__m256i)_mm256_mask_xor_epi32(_mm256_setzero_si256(), __U, __A, __B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_xor_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+_mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A,
         __m128i __B)
 {
-  return (__m128i) __builtin_ia32_pxord128_mask ((__v4si) __A,
-             (__v4si) __B,
-             (__v4si) __W,
-             (__mmask8) __U);
+  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+                                             (__v4si)_mm_xor_si128(__A, __B),
+                                             (__v4si)__W);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_xor_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_xor_epi32(__mmask8 __U, __m128i __A, __m128i __B)
 {
-  return (__m128i) __builtin_ia32_pxord128_mask ((__v4si) __A,
-             (__v4si) __B,
-             (__v4si)
-             _mm_setzero_si128 (),
-             (__mmask8) __U);
+  return (__m128i)_mm_mask_xor_epi32(_mm_setzero_si128(), __U, __A, __B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_and_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
-           __m256i __B)
+_mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
 {
-  return (__m256i) __builtin_ia32_pandq256_mask ((__v4di) __A,
-             (__v4di) __B,
-             (__v4di) __W, __U);
+  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+                                             (__v4di)_mm256_and_si256(__A, __B),
+                                             (__v4di)__W);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_and_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+_mm256_maskz_and_epi64(__mmask8 __U, __m256i __A, __m256i __B)
 {
-  return (__m256i) __builtin_ia32_pandq256_mask ((__v4di) __A,
-             (__v4di) __B,
-             (__v4di)
-             _mm256_setzero_pd (),
-             __U);
+  return (__m256i)_mm256_mask_and_epi64(_mm256_setzero_si256(), __U, __A, __B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_and_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+_mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+                                             (__v2di)_mm_and_si128(__A, __B),
+                                             (__v2di)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_and_epi64(__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i)_mm_mask_and_epi64(_mm_setzero_si128(), __U, __A, __B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+                                          (__v4di)_mm256_andnot_si256(__A, __B),
+                                          (__v4di)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_andnot_epi64(__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i)_mm256_mask_andnot_epi64(_mm256_setzero_si256(),
+                                           __U, __A, __B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+                                             (__v2di)_mm_andnot_si128(__A, __B),
+                                             (__v2di)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_andnot_epi64(__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i)_mm_mask_andnot_epi64(_mm_setzero_si128(), __U, __A, __B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+                                             (__v4di)_mm256_or_si256(__A, __B),
+                                             (__v4di)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_or_epi64(__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i)_mm256_mask_or_epi64(_mm256_setzero_si256(), __U, __A, __B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+                                             (__v2di)_mm_or_si128(__A, __B),
+                                             (__v2di)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_or_epi64(__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i)_mm_mask_or_epi64(_mm_setzero_si128(), __U, __A, __B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+                                             (__v4di)_mm256_xor_si256(__A, __B),
+                                             (__v4di)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_xor_epi64(__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i)_mm256_mask_xor_epi64(_mm256_setzero_si256(), __U, __A, __B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A,
         __m128i __B)
 {
-  return (__m128i) __builtin_ia32_pandq128_mask ((__v2di) __A,
-             (__v2di) __B,
-             (__v2di) __W, __U);
+  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+                                             (__v2di)_mm_xor_si128(__A, __B),
+                                             (__v2di)__W);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_and_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B)
 {
-  return (__m128i) __builtin_ia32_pandq128_mask ((__v2di) __A,
-             (__v2di) __B,
-             (__v2di)
-             _mm_setzero_pd (),
-             __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_andnot_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
-        __m256i __B)
-{
-  return (__m256i) __builtin_ia32_pandnq256_mask ((__v4di) __A,
-              (__v4di) __B,
-              (__v4di) __W, __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_andnot_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i) __builtin_ia32_pandnq256_mask ((__v4di) __A,
-              (__v4di) __B,
-              (__v4di)
-              _mm256_setzero_pd (),
-              __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_andnot_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
-           __m128i __B)
-{
-  return (__m128i) __builtin_ia32_pandnq128_mask ((__v2di) __A,
-              (__v2di) __B,
-              (__v2di) __W, __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_andnot_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i) __builtin_ia32_pandnq128_mask ((__v2di) __A,
-              (__v2di) __B,
-              (__v2di)
-              _mm_setzero_pd (),
-              __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_or_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
-          __m256i __B)
-{
-  return (__m256i) __builtin_ia32_porq256_mask ((__v4di) __A,
-            (__v4di) __B,
-            (__v4di) __W,
-            (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_or_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i) __builtin_ia32_porq256_mask ((__v4di) __A,
-            (__v4di) __B,
-            (__v4di)
-            _mm256_setzero_si256 (),
-            (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_or_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i) __builtin_ia32_porq128_mask ((__v2di) __A,
-            (__v2di) __B,
-            (__v2di) __W,
-            (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_or_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i) __builtin_ia32_porq128_mask ((__v2di) __A,
-            (__v2di) __B,
-            (__v2di)
-            _mm_setzero_si128 (),
-            (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_xor_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
-           __m256i __B)
-{
-  return (__m256i) __builtin_ia32_pxorq256_mask ((__v4di) __A,
-             (__v4di) __B,
-             (__v4di) __W,
-             (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_xor_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i) __builtin_ia32_pxorq256_mask ((__v4di) __A,
-             (__v4di) __B,
-             (__v4di)
-             _mm256_setzero_si256 (),
-             (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_xor_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
-        __m128i __B)
-{
-  return (__m128i) __builtin_ia32_pxorq128_mask ((__v2di) __A,
-             (__v2di) __B,
-             (__v2di) __W,
-             (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_xor_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i) __builtin_ia32_pxorq128_mask ((__v2di) __A,
-             (__v2di) __B,
-             (__v2di)
-             _mm_setzero_si128 (),
-             (__mmask8) __U);
+  return (__m128i)_mm_mask_xor_epi64(_mm_setzero_si128(), __U, __A, __B);
 }
 
 #define _mm_cmp_epi32_mask(a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
-                                        (__v4si)(__m128i)(b), \
-                                        (p), (__mmask8)-1); })
+                                        (__v4si)(__m128i)(b), (int)(p), \
+                                        (__mmask8)-1); })
 
 #define _mm_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
-                                        (__v4si)(__m128i)(b), \
-                                        (p), (__mmask8)(m)); })
+                                        (__v4si)(__m128i)(b), (int)(p), \
+                                        (__mmask8)(m)); })
 
 #define _mm_cmp_epu32_mask(a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
-                                         (__v4si)(__m128i)(b), \
-                                         (p), (__mmask8)-1); })
+                                         (__v4si)(__m128i)(b), (int)(p), \
+                                         (__mmask8)-1); })
 
 #define _mm_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
-                                         (__v4si)(__m128i)(b), \
-                                         (p), (__mmask8)(m)); })
+                                         (__v4si)(__m128i)(b), (int)(p), \
+                                         (__mmask8)(m)); })
 
 #define _mm256_cmp_epi32_mask(a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
-                                        (__v8si)(__m256i)(b), \
-                                        (p), (__mmask8)-1); })
+                                        (__v8si)(__m256i)(b), (int)(p), \
+                                        (__mmask8)-1); })
 
 #define _mm256_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
-                                        (__v8si)(__m256i)(b), \
-                                        (p), (__mmask8)(m)); })
+                                        (__v8si)(__m256i)(b), (int)(p), \
+                                        (__mmask8)(m)); })
 
 #define _mm256_cmp_epu32_mask(a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
-                                         (__v8si)(__m256i)(b), \
-                                         (p), (__mmask8)-1); })
+                                         (__v8si)(__m256i)(b), (int)(p), \
+                                         (__mmask8)-1); })
 
 #define _mm256_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
-                                         (__v8si)(__m256i)(b), \
-                                         (p), (__mmask8)(m)); })
+                                         (__v8si)(__m256i)(b), (int)(p), \
+                                         (__mmask8)(m)); })
 
 #define _mm_cmp_epi64_mask(a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
-                                        (__v2di)(__m128i)(b), \
-                                        (p), (__mmask8)-1); })
+                                        (__v2di)(__m128i)(b), (int)(p), \
+                                        (__mmask8)-1); })
 
 #define _mm_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
-                                        (__v2di)(__m128i)(b), \
-                                        (p), (__mmask8)(m)); })
+                                        (__v2di)(__m128i)(b), (int)(p), \
+                                        (__mmask8)(m)); })
 
 #define _mm_cmp_epu64_mask(a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
-                                         (__v2di)(__m128i)(b), \
-                                         (p), (__mmask8)-1); })
+                                         (__v2di)(__m128i)(b), (int)(p), \
+                                         (__mmask8)-1); })
 
 #define _mm_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
-                                         (__v2di)(__m128i)(b), \
-                                         (p), (__mmask8)(m)); })
+                                         (__v2di)(__m128i)(b), (int)(p), \
+                                         (__mmask8)(m)); })
 
 #define _mm256_cmp_epi64_mask(a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
-                                        (__v4di)(__m256i)(b), \
-                                        (p), (__mmask8)-1); })
+                                        (__v4di)(__m256i)(b), (int)(p), \
+                                        (__mmask8)-1); })
 
 #define _mm256_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
-                                        (__v4di)(__m256i)(b), \
-                                        (p), (__mmask8)(m)); })
+                                        (__v4di)(__m256i)(b), (int)(p), \
+                                        (__mmask8)(m)); })
 
 #define _mm256_cmp_epu64_mask(a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
-                                         (__v4di)(__m256i)(b), \
-                                         (p), (__mmask8)-1); })
+                                         (__v4di)(__m256i)(b), (int)(p), \
+                                         (__mmask8)-1); })
 
 #define _mm256_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
   (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
-                                         (__v4di)(__m256i)(b), \
-                                         (p), (__mmask8)(m)); })
+                                         (__v4di)(__m256i)(b), (int)(p), \
+                                         (__mmask8)(m)); })
 
 #define _mm256_cmp_ps_mask(a, b, p)  __extension__ ({ \
   (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
-                                         (__v8sf)(__m256)(b), \
-                                         (p), (__mmask8)-1); })
+                                         (__v8sf)(__m256)(b), (int)(p), \
+                                         (__mmask8)-1); })
 
 #define _mm256_mask_cmp_ps_mask(m, a, b, p)  __extension__ ({ \
   (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
-                                         (__v8sf)(__m256)(b), \
-                                         (p), (__mmask8)(m)); })
+                                         (__v8sf)(__m256)(b), (int)(p), \
+                                         (__mmask8)(m)); })
 
 #define _mm256_cmp_pd_mask(a, b, p)  __extension__ ({ \
-  (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256)(a), \
-                                         (__v4df)(__m256)(b), \
-                                         (p), (__mmask8)-1); })
+  (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
+                                         (__v4df)(__m256d)(b), (int)(p), \
+                                         (__mmask8)-1); })
 
 #define _mm256_mask_cmp_pd_mask(m, a, b, p)  __extension__ ({ \
-  (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256)(a), \
-                                         (__v4df)(__m256)(b), \
-                                         (p), (__mmask8)(m)); })
+  (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
+                                         (__v4df)(__m256d)(b), (int)(p), \
+                                         (__mmask8)(m)); })
 
-#define _mm128_cmp_ps_mask(a, b, p)  __extension__ ({ \
+#define _mm_cmp_ps_mask(a, b, p)  __extension__ ({ \
   (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
-                                         (__v4sf)(__m128)(b), \
-                                         (p), (__mmask8)-1); })
+                                         (__v4sf)(__m128)(b), (int)(p), \
+                                         (__mmask8)-1); })
 
-#define _mm128_mask_cmp_ps_mask(m, a, b, p)  __extension__ ({ \
+#define _mm_mask_cmp_ps_mask(m, a, b, p)  __extension__ ({ \
   (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
-                                         (__v4sf)(__m128)(b), \
-                                         (p), (__mmask8)(m)); })
+                                         (__v4sf)(__m128)(b), (int)(p), \
+                                         (__mmask8)(m)); })
 
-#define _mm128_cmp_pd_mask(a, b, p)  __extension__ ({ \
-  (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128)(a), \
-                                         (__v2df)(__m128)(b), \
-                                         (p), (__mmask8)-1); })
+#define _mm_cmp_pd_mask(a, b, p)  __extension__ ({ \
+  (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
+                                         (__v2df)(__m128d)(b), (int)(p), \
+                                         (__mmask8)-1); })
 
-#define _mm128_mask_cmp_pd_mask(m, a, b, p)  __extension__ ({ \
-  (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128)(a), \
-                                         (__v2df)(__m128)(b), \
-                                         (p), (__mmask8)(m)); })
+#define _mm_mask_cmp_pd_mask(m, a, b, p)  __extension__ ({ \
+  (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
+                                         (__v2df)(__m128d)(b), (int)(p), \
+                                         (__mmask8)(m)); })
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
@@ -2044,58 +1964,58 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_mask_blend_epi32 (__mmask8 __U, __m128i __A, __m128i __W) {
-  return (__m128i) __builtin_ia32_blendmd_128_mask ((__v4si) __A,
+  return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
                 (__v4si) __W,
-                (__mmask8) __U);
+                (__v4si) __A);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_mask_blend_epi32 (__mmask8 __U, __m256i __A, __m256i __W) {
-  return (__m256i) __builtin_ia32_blendmd_256_mask ((__v8si) __A,
+  return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
                 (__v8si) __W,
-                (__mmask8) __U);
+                (__v8si) __A);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_mask_blend_pd (__mmask8 __U, __m128d __A, __m128d __W) {
-  return (__m128d) __builtin_ia32_blendmpd_128_mask ((__v2df) __A,
+  return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
                  (__v2df) __W,
-                 (__mmask8) __U);
+                 (__v2df) __A);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS
 _mm256_mask_blend_pd (__mmask8 __U, __m256d __A, __m256d __W) {
-  return (__m256d) __builtin_ia32_blendmpd_256_mask ((__v4df) __A,
+  return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
                  (__v4df) __W,
-                 (__mmask8) __U);
+                 (__v4df) __A);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_mask_blend_ps (__mmask8 __U, __m128 __A, __m128 __W) {
-  return (__m128) __builtin_ia32_blendmps_128_mask ((__v4sf) __A,
+  return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
                 (__v4sf) __W,
-                (__mmask8) __U);
+                (__v4sf) __A);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
 _mm256_mask_blend_ps (__mmask8 __U, __m256 __A, __m256 __W) {
-  return (__m256) __builtin_ia32_blendmps_256_mask ((__v8sf) __A,
+  return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
                 (__v8sf) __W,
-                (__mmask8) __U);
+                (__v8sf) __A);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_mask_blend_epi64 (__mmask8 __U, __m128i __A, __m128i __W) {
-  return (__m128i) __builtin_ia32_blendmq_128_mask ((__v2di) __A,
+  return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
                 (__v2di) __W,
-                (__mmask8) __U);
+                (__v2di) __A);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_mask_blend_epi64 (__mmask8 __U, __m256i __A, __m256i __W) {
-  return (__m256i) __builtin_ia32_blendmq_256_mask ((__v4di) __A,
+  return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
                 (__v4di) __W,
-                (__mmask8) __U);
+                (__v4di) __A);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
@@ -3833,61 +3753,79 @@
               __M);
 }
 
-#define _mm_roundscale_pd(__A, __imm) __extension__ ({ \
-  (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A, \
-                   __imm, (__v2df) _mm_setzero_pd (), (__mmask8) -1); })
+#define _mm_roundscale_pd(A, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
+                                              (int)(imm), \
+                                              (__v2df)_mm_setzero_pd(), \
+                                              (__mmask8)-1); })
 
 
-#define _mm_mask_roundscale_pd(__W, __U, __A, __imm) __extension__ ({ \
-  (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A, __imm, \
-                   (__v2df) __W, (__mmask8) __U); })
+#define _mm_mask_roundscale_pd(W, U, A, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
+                                              (int)(imm), \
+                                              (__v2df)(__m128d)(W), \
+                                              (__mmask8)(U)); })
 
 
-#define _mm_maskz_roundscale_pd(__U, __A, __imm) __extension__ ({ \
-  (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A, __imm, \
-                   (__v2df) _mm_setzero_pd (), (__mmask8) __U); })
+#define _mm_maskz_roundscale_pd(U, A, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
+                                              (int)(imm), \
+                                              (__v2df)_mm_setzero_pd(), \
+                                              (__mmask8)(U)); })
 
 
-#define _mm256_roundscale_pd(__A, __imm) __extension__ ({ \
-  (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A, __imm, \
-                   (__v4df) _mm256_setzero_pd (), (__mmask8) -1); })
+#define _mm256_roundscale_pd(A, imm) __extension__ ({ \
+  (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
+                                              (int)(imm), \
+                                              (__v4df)_mm256_setzero_pd(), \
+                                              (__mmask8)-1); })
 
 
-#define _mm256_mask_roundscale_pd(__W, __U, __A, __imm) __extension__ ({ \
-  (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A, __imm, \
-                   (__v4df) __W, (__mmask8) __U); })
+#define _mm256_mask_roundscale_pd(W, U, A, imm) __extension__ ({ \
+  (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
+                                              (int)(imm), \
+                                              (__v4df)(__m256d)(W), \
+                                              (__mmask8)(U)); })
 
 
-#define _mm256_maskz_roundscale_pd(__U, __A, __imm)  __extension__ ({ \
-  (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A, __imm, \
-                   (__v4df) _mm256_setzero_pd(), (__mmask8) __U); })
+#define _mm256_maskz_roundscale_pd(U, A, imm)  __extension__ ({ \
+  (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
+                                              (int)(imm), \
+                                              (__v4df)_mm256_setzero_pd(), \
+                                              (__mmask8)(U)); })
 
-#define _mm_roundscale_ps(__A, __imm)  __extension__ ({ \
-  (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A, __imm, \
-                  (__v4sf) _mm_setzero_ps(), (__mmask8) -1); })
+#define _mm_roundscale_ps(A, imm)  __extension__ ({ \
+  (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
+                                             (__v4sf)_mm_setzero_ps(), \
+                                             (__mmask8)-1); })
 
 
-#define _mm_mask_roundscale_ps(__W, __U, __A, __imm)  __extension__ ({ \
-  (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A, __imm, \
-                  (__v4sf) __W, (__mmask8) __U); })
+#define _mm_mask_roundscale_ps(W, U, A, imm)  __extension__ ({ \
+  (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
+                                             (__v4sf)(__m128)(W), \
+                                             (__mmask8)(U)); })
 
 
-#define _mm_maskz_roundscale_ps(__U, __A, __imm)  __extension__ ({ \
-  (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A, __imm, \
-                  (__v4sf) _mm_setzero_ps(), (__mmask8) __U); })
+#define _mm_maskz_roundscale_ps(U, A, imm)  __extension__ ({ \
+  (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
+                                             (__v4sf)_mm_setzero_ps(), \
+                                             (__mmask8)(U)); })
 
-#define _mm256_roundscale_ps(__A, __imm)  __extension__ ({ \
-  (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A,__imm, \
-                  (__v8sf) _mm256_setzero_ps(), (__mmask8) -1); })
+#define _mm256_roundscale_ps(A, imm)  __extension__ ({ \
+  (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
+                                             (__v8sf)_mm256_setzero_ps(), \
+                                             (__mmask8)-1); })
 
-#define _mm256_mask_roundscale_ps(__W, __U, __A,__imm)  __extension__ ({ \
-  (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A, __imm, \
-                  (__v8sf) __W, (__mmask8) __U); })
+#define _mm256_mask_roundscale_ps(W, U, A, imm)  __extension__ ({ \
+  (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
+                                             (__v8sf)(__m256)(W), \
+                                             (__mmask8)(U)); })
 
 
-#define _mm256_maskz_roundscale_ps(__U, __A, __imm)  __extension__ ({ \
-  (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A, __imm, \
-                  (__v8sf) _mm256_setzero_ps(), (__mmask8) __U); })
+#define _mm256_maskz_roundscale_ps(U, A, imm)  __extension__ ({ \
+  (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
+                                             (__v8sf)_mm256_setzero_ps(), \
+                                             (__mmask8)(U)); })
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_scalef_pd (__m128d __A, __m128d __B) {
@@ -3996,153 +3934,165 @@
                (__mmask8) __U);
 }
 
-#define _mm_i64scatter_pd(__addr,__index, __v1, __scale) __extension__ ({ \
-  __builtin_ia32_scatterdiv2df(__addr, (__mmask8) 0xFF, (__v2di) __index, \
-                              (__v2df) __v1, __scale); })
+#define _mm_i64scatter_pd(addr, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scatterdiv2df((double *)(addr), (__mmask8)-1, \
+                               (__v2di)(__m128i)(index), \
+                               (__v2df)(__m128d)(v1), (int)(scale)); })
 
-#define _mm_mask_i64scatter_pd(__addr, __mask, __index, __v1, \
-                               __scale) __extension__ ({ \
-  __builtin_ia32_scatterdiv2df (__addr, __mask, (__v2di) __index, \
-                               (__v2df) __v1, __scale); })
+#define _mm_mask_i64scatter_pd(addr, mask, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scatterdiv2df((double *)(addr), (__mmask8)(mask), \
+                               (__v2di)(__m128i)(index), \
+                               (__v2df)(__m128d)(v1), (int)(scale)); })
 
+#define _mm_i64scatter_epi64(addr, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scatterdiv2di((long long *)(addr), (__mmask8)-1, \
+                               (__v2di)(__m128i)(index), \
+                               (__v2di)(__m128i)(v1), (int)(scale)); })
 
-#define _mm_i64scatter_epi64(__addr, __index, __v1, __scale) __extension__ ({ \
-  __builtin_ia32_scatterdiv2di (__addr, (__mmask8) 0xFF, \
-        (__v2di) __index, (__v2di) __v1, __scale); })
+#define _mm_mask_i64scatter_epi64(addr, mask, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scatterdiv2di((long long *)(addr), (__mmask8)(mask), \
+                               (__v2di)(__m128i)(index), \
+                               (__v2di)(__m128i)(v1), (int)(scale)); })
 
-#define _mm_mask_i64scatter_epi64(__addr, __mask, __index, __v1,\
-                                  __scale) __extension__ ({ \
-  __builtin_ia32_scatterdiv2di (__addr, __mask, (__v2di) __index,\
-        (__v2di) __v1, __scale); })
+#define _mm256_i64scatter_pd(addr, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scatterdiv4df((double *)(addr), (__mmask8)-1, \
+                               (__v4di)(__m256i)(index), \
+                               (__v4df)(__m256d)(v1), (int)(scale)); })
 
-#define _mm256_i64scatter_pd(__addr, __index, __v1, __scale) __extension__ ({ \
-  __builtin_ia32_scatterdiv4df (__addr, (__mmask8) 0xFF,\
-        (__v4di) __index, (__v4df) __v1, __scale); })
+#define _mm256_mask_i64scatter_pd(addr, mask, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scatterdiv4df((double *)(addr), (__mmask8)(mask), \
+                               (__v4di)(__m256i)(index), \
+                               (__v4df)(__m256d)(v1), (int)(scale)); })
 
-#define _mm256_mask_i64scatter_pd(__addr, __mask, __index, __v1,\
-                                   __scale) __extension__ ({ \
-  __builtin_ia32_scatterdiv4df (__addr, __mask, (__v4di) __index,\
-        (__v4df) __v1, __scale); })
+#define _mm256_i64scatter_epi64(addr, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scatterdiv4di((long long *)(addr), (__mmask8)-1, \
+                               (__v4di)(__m256i)(index), \
+                               (__v4di)(__m256i)(v1), (int)(scale)); })
 
-#define _mm256_i64scatter_epi64(__addr, __index, __v1, __scale) __extension__ ({ \
-  __builtin_ia32_scatterdiv4di (__addr, (__mmask8) 0xFF, (__v4di) __index,\
-                               (__v4di) __v1, __scale); })
+#define _mm256_mask_i64scatter_epi64(addr, mask, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scatterdiv4di((long long *)(addr), (__mmask8)(mask), \
+                               (__v4di)(__m256i)(index), \
+                               (__v4di)(__m256i)(v1), (int)(scale)); })
 
-#define _mm256_mask_i64scatter_epi64(__addr, __mask, __index, __v1,\
-                                      __scale) __extension__ ({ \
-  __builtin_ia32_scatterdiv4di (__addr, __mask, (__v4di) __index,\
-        (__v4di) __v1, __scale); })
+#define _mm_i64scatter_ps(addr, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scatterdiv4sf((float *)(addr), (__mmask8)-1, \
+                               (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \
+                               (int)(scale)); })
 
-#define _mm_i64scatter_ps(__addr, __index, __v1, __scale) __extension__ ({ \
-  __builtin_ia32_scatterdiv4sf (__addr, (__mmask8) 0xFF,\
-        (__v2di) __index, (__v4sf) __v1, __scale); })
+#define _mm_mask_i64scatter_ps(addr, mask, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scatterdiv4sf((float *)(addr), (__mmask8)(mask), \
+                               (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \
+                               (int)(scale)); })
 
-#define _mm_mask_i64scatter_ps(__addr, __mask, __index, __v1, \
-                                __scale) __extension__ ({ \
-  __builtin_ia32_scatterdiv4sf (__addr, __mask, (__v2di) __index,\
-        (__v4sf) __v1, __scale); })
+#define _mm_i64scatter_epi32(addr, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scatterdiv4si((int *)(addr), (__mmask8)-1, \
+                               (__v2di)(__m128i)(index), \
+                               (__v4si)(__m128i)(v1), (int)(scale)); })
 
-#define _mm_i64scatter_epi32(__addr, __index, __v1, \
-                              __scale) __extension__ ({ \
-  __builtin_ia32_scatterdiv4si (__addr, (__mmask8) 0xFF,\
-        (__v2di) __index, (__v4si) __v1, __scale); })
+#define _mm_mask_i64scatter_epi32(addr, mask, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scatterdiv4si((int *)(addr), (__mmask8)(mask), \
+                               (__v2di)(__m128i)(index), \
+                               (__v4si)(__m128i)(v1), (int)(scale)); })
 
-#define _mm_mask_i64scatter_epi32(__addr, __mask, __index, __v1,\
-         __scale) __extension__ ({ \
-  __builtin_ia32_scatterdiv4si (__addr, __mask, (__v2di) __index,\
-        (__v4si) __v1, __scale); })
+#define _mm256_i64scatter_ps(addr, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scatterdiv8sf((float *)(addr), (__mmask8)-1, \
+                               (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \
+                               (int)(scale)); })
 
-#define _mm256_i64scatter_ps(__addr, __index, __v1, __scale) __extension__ ({ \
-  __builtin_ia32_scatterdiv8sf (__addr, (__mmask8) 0xFF, (__v4di) __index, \
-                              (__v4sf) __v1, __scale); })
+#define _mm256_mask_i64scatter_ps(addr, mask, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scatterdiv8sf((float *)(addr), (__mmask8)(mask), \
+                               (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \
+                               (int)(scale)); })
 
-#define _mm256_mask_i64scatter_ps(__addr, __mask, __index, __v1, \
-                                   __scale) __extension__ ({ \
-  __builtin_ia32_scatterdiv8sf (__addr, __mask, (__v4di) __index, \
-        (__v4sf) __v1, __scale); })
+#define _mm256_i64scatter_epi32(addr, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scatterdiv8si((int *)(addr), (__mmask8)-1, \
+                               (__v4di)(__m256i)(index), \
+                               (__v4si)(__m128i)(v1), (int)(scale)); })
 
-#define _mm256_i64scatter_epi32(__addr, __index, __v1, __scale) __extension__ ({ \
-  __builtin_ia32_scatterdiv8si (__addr, (__mmask8) 0xFF, \
-        (__v4di) __index, (__v4si) __v1, __scale); })
+#define _mm256_mask_i64scatter_epi32(addr, mask, index, v1, scale) __extension__ ({  \
+  __builtin_ia32_scatterdiv8si((int *)(addr), (__mmask8)(mask), \
+                               (__v4di)(__m256i)(index), \
+                               (__v4si)(__m128i)(v1), (int)(scale)); })
 
-#define _mm256_mask_i64scatter_epi32(__addr, __mask, __index, __v1, \
-                                      __scale) __extension__ ({  \
-  __builtin_ia32_scatterdiv8si(__addr, __mask, (__v4di) __index, \
-        (__v4si) __v1, __scale); })
+#define _mm_i32scatter_pd(addr, index, v1, scale) __extension__ ({      \
+  __builtin_ia32_scattersiv2df((double *)(addr), (__mmask8)-1, \
+                               (__v4si)(__m128i)(index), \
+                               (__v2df)(__m128d)(v1), (int)(scale)); })
 
-#define _mm_i32scatter_pd(__addr, __index, __v1,         \
-                          __scale) __extension__ ({      \
-  __builtin_ia32_scattersiv2df (__addr, (__mmask8) 0xFF, \
-        (__v4si) __index, (__v2df) __v1, __scale); })
+#define _mm_mask_i32scatter_pd(addr, mask, index, v1, scale) __extension__ ({        \
+  __builtin_ia32_scattersiv2df((double *)(addr), (__mmask8)(mask), \
+                               (__v4si)(__m128i)(index), \
+                               (__v2df)(__m128d)(v1), (int)(scale)); })
 
-#define _mm_mask_i32scatter_pd(__addr, __mask, __index, __v1,    \
-                                __scale) __extension__ ({        \
-  __builtin_ia32_scattersiv2df (__addr, __mask, (__v4si) __index,\
-         (__v2df) __v1, __scale); })
+#define _mm_i32scatter_epi64(addr, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scattersiv2di((long long *)(addr), (__mmask8)-1, \
+                               (__v4si)(__m128i)(index), \
+                               (__v2di)(__m128i)(v1), (int)(scale)); })
 
-#define _mm_i32scatter_epi64(__addr, __index, __v1, __scale) __extension__ ({ \
-  __builtin_ia32_scattersiv2di (__addr, (__mmask8) 0xFF,                       \
-        (__v4si) __index, (__v2di) __v1, __scale); })
+#define _mm_mask_i32scatter_epi64(addr, mask, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scattersiv2di((long long *)(addr), (__mmask8)(mask), \
+                               (__v4si)(__m128i)(index), \
+                               (__v2di)(__m128i)(v1), (int)(scale)); })
 
-#define _mm_mask_i32scatter_epi64(__addr, __mask, __index, __v1, \
-         __scale) __extension__ ({                                \
-  __builtin_ia32_scattersiv2di (__addr, __mask, (__v4si) __index, \
-        (__v2di) __v1, __scale); })
+#define _mm256_i32scatter_pd(addr, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scattersiv4df((double *)(addr), (__mmask8)-1, \
+                               (__v4si)(__m128i)(index), \
+                               (__v4df)(__m256d)(v1), (int)(scale)); })
 
-#define _mm256_i32scatter_pd(__addr, __index, __v1, __scale) __extension__ ({ \
-  __builtin_ia32_scattersiv4df (__addr, (__mmask8) 0xFF,                      \
-        (__v4si) __index, (__v4df) __v1, __scale); })
+#define _mm256_mask_i32scatter_pd(addr, mask, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scattersiv4df((double *)(addr), (__mmask8)(mask), \
+                               (__v4si)(__m128i)(index), \
+                               (__v4df)(__m256d)(v1), (int)(scale)); })
 
-#define _mm256_mask_i32scatter_pd(__addr, __mask, __index, __v1, \
-         __scale) __extension__ ({                                \
-  __builtin_ia32_scattersiv4df (__addr, __mask, (__v4si) __index, \
-        (__v4df) __v1, __scale); })
+#define _mm256_i32scatter_epi64(addr, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scattersiv4di((long long *)(addr), (__mmask8)-1, \
+                               (__v4si)(__m128i)(index), \
+                               (__v4di)(__m256i)(v1), (int)(scale)); })
 
-#define _mm256_i32scatter_epi64(__addr, __index, __v1,    \
-                                __scale) __extension__ ({ \
-  __builtin_ia32_scattersiv4di (__addr, (__mmask8) 0xFF,  \
-        (__v4si) __index, (__v4di) __v1, __scale); })
+#define _mm256_mask_i32scatter_epi64(addr, mask, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scattersiv4di((long long *)(addr), (__mmask8)(mask), \
+                               (__v4si)(__m128i)(index), \
+                               (__v4di)(__m256i)(v1), (int)(scale)); })
 
-#define _mm256_mask_i32scatter_epi64(__addr, __mask, __index, __v1, \
-            __scale) __extension__ ({                               \
-  __builtin_ia32_scattersiv4di (__addr, __mask, (__v4si) __index,   \
-        (__v4di) __v1, __scale); })
+#define _mm_i32scatter_ps(addr, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scattersiv4sf((float *)(addr), (__mmask8)-1, \
+                               (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
+                               (int)(scale)); })
 
-#define _mm_i32scatter_ps(__addr, __index, __v1, __scale) __extension__ ({ \
-  __builtin_ia32_scattersiv4sf (__addr, (__mmask8) 0xFF,                   \
-        (__v4si) __index, (__v4sf) __v1, __scale); })
+#define _mm_mask_i32scatter_ps(addr, mask, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scattersiv4sf((float *)(addr), (__mmask8)(mask), \
+                               (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
+                               (int)(scale)); })
 
-#define _mm_mask_i32scatter_ps(__addr, __mask, __index, __v1,     \
-                               __scale) __extension__ ({          \
-  __builtin_ia32_scattersiv4sf (__addr, __mask, (__v4si) __index, \
-        (__v4sf) __v1, __scale); })
+#define _mm_i32scatter_epi32(addr, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scattersiv4si((int *)(addr), (__mmask8)-1, \
+                               (__v4si)(__m128i)(index), \
+                               (__v4si)(__m128i)(v1), (int)(scale)); })
 
-#define _mm_i32scatter_epi32(__addr, __index, __v1, __scale) __extension__ ({ \
-  __builtin_ia32_scattersiv4si (__addr, (__mmask8) 0xFF,                       \
-        (__v4si) __index, (__v4si) __v1, __scale); })
+#define _mm_mask_i32scatter_epi32(addr, mask, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scattersiv4si((int *)(addr), (__mmask8)(mask), \
+                               (__v4si)(__m128i)(index), \
+                               (__v4si)(__m128i)(v1), (int)(scale)); })
 
-#define _mm_mask_i32scatter_epi32(__addr, __mask, __index, __v1, \
-                                  __scale) __extension__ ({      \
-  __builtin_ia32_scattersiv4si (__addr, __mask, (__v4si) __index,\
-        (__v4si) __v1, __scale); })
+#define _mm256_i32scatter_ps(addr, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scattersiv8sf((float *)(addr), (__mmask8)-1, \
+                               (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
+                               (int)(scale)); })
 
-#define _mm256_i32scatter_ps(__addr, __index, __v1, __scale) __extension__ ({ \
-  __builtin_ia32_scattersiv8sf (__addr, (__mmask8) 0xFF,                      \
-        (__v8si) __index, (__v8sf) __v1, __scale); })
+#define _mm256_mask_i32scatter_ps(addr, mask, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scattersiv8sf((float *)(addr), (__mmask8)(mask), \
+                               (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
+                               (int)(scale)); })
 
-#define _mm256_mask_i32scatter_ps(__addr, __mask, __index, __v1, \
-                                   __scale) __extension__ ({     \
-  __builtin_ia32_scattersiv8sf (__addr, __mask, (__v8si) __index,\
-        (__v8sf) __v1, __scale); })
+#define _mm256_i32scatter_epi32(addr, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scattersiv8si((int *)(addr), (__mmask8)-1, \
+                               (__v8si)(__m256i)(index), \
+                               (__v8si)(__m256i)(v1), (int)(scale)); })
 
-#define _mm256_i32scatter_epi32(__addr, __index, __v1, __scale) __extension__ ({ \
-  __builtin_ia32_scattersiv8si (__addr, (__mmask8) 0xFF,                         \
-        (__v8si) __index, (__v8si) __v1, __scale); })
-
-#define _mm256_mask_i32scatter_epi32(__addr, __mask, __index, __v1, \
-            __scale) __extension__ ({                                \
-  __builtin_ia32_scattersiv8si (__addr, __mask, (__v8si) __index,    \
-        (__v8si) __v1, __scale); })
+#define _mm256_mask_i32scatter_epi32(addr, mask, index, v1, scale) __extension__ ({ \
+  __builtin_ia32_scattersiv8si((int *)(addr), (__mmask8)(mask), \
+                               (__v8si)(__m256i)(index), \
+                               (__v8si)(__m256i)(v1), (int)(scale)); })
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_mask_sqrt_pd (__m128d __W, __mmask8 __U, __m128d __A) {
@@ -4600,7 +4550,4621 @@
               __U);
 }
 
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi8_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxbd128_mask ((__v16qi) __A,
+                (__v4si) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxbd128_mask ((__v16qi) __A,
+                (__v4si)
+                _mm_setzero_si128 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi8_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxbd256_mask ((__v16qi) __A,
+                (__v8si) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxbd256_mask ((__v16qi) __A,
+                (__v8si)
+                _mm256_setzero_si256 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi8_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxbq128_mask ((__v16qi) __A,
+                (__v2di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi8_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxbq128_mask ((__v16qi) __A,
+                (__v2di)
+                _mm_setzero_si128 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi8_epi64 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxbq256_mask ((__v16qi) __A,
+                (__v4di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi8_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxbq256_mask ((__v16qi) __A,
+                (__v4di)
+                _mm256_setzero_si256 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_epi64 (__m128i __W, __mmask8 __U, __m128i __X)
+{
+  return (__m128i) __builtin_ia32_pmovsxdq128_mask ((__v4si) __X,
+                (__v2di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi32_epi64 (__mmask8 __U, __m128i __X)
+{
+  return (__m128i) __builtin_ia32_pmovsxdq128_mask ((__v4si) __X,
+                (__v2di)
+                _mm_setzero_si128 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_epi64 (__m256i __W, __mmask8 __U, __m128i __X)
+{
+  return (__m256i) __builtin_ia32_pmovsxdq256_mask ((__v4si) __X,
+                (__v4di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi32_epi64 (__mmask8 __U, __m128i __X)
+{
+  return (__m256i) __builtin_ia32_pmovsxdq256_mask ((__v4si) __X,
+                (__v4di)
+                _mm256_setzero_si256 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi16_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxwd128_mask ((__v8hi) __A,
+                (__v4si) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxwd128_mask ((__v8hi) __A,
+                (__v4si)
+                _mm_setzero_si128 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi16_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxwd256_mask ((__v8hi) __A,
+                (__v8si) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxwd256_mask ((__v8hi) __A,
+                (__v8si)
+                _mm256_setzero_si256 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi16_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxwq128_mask ((__v8hi) __A,
+                (__v2di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi16_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxwq128_mask ((__v8hi) __A,
+                (__v2di)
+                _mm_setzero_si128 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi16_epi64 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxwq256_mask ((__v8hi) __A,
+                (__v4di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi16_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxwq256_mask ((__v8hi) __A,
+                (__v4di)
+                _mm256_setzero_si256 (),
+                (__mmask8) __U);
+}
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu8_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxbd128_mask ((__v16qi) __A,
+                (__v4si) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu8_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxbd128_mask ((__v16qi) __A,
+                (__v4si)
+                _mm_setzero_si128 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu8_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxbd256_mask ((__v16qi) __A,
+                (__v8si) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu8_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxbd256_mask ((__v16qi) __A,
+                (__v8si)
+                _mm256_setzero_si256 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu8_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxbq128_mask ((__v16qi) __A,
+                (__v2di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxbq128_mask ((__v16qi) __A,
+                (__v2di)
+                _mm_setzero_si128 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu8_epi64 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxbq256_mask ((__v16qi) __A,
+                (__v4di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxbq256_mask ((__v16qi) __A,
+                (__v4di)
+                _mm256_setzero_si256 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu32_epi64 (__m128i __W, __mmask8 __U, __m128i __X)
+{
+  return (__m128i) __builtin_ia32_pmovzxdq128_mask ((__v4si) __X,
+                (__v2di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu32_epi64 (__mmask8 __U, __m128i __X)
+{
+  return (__m128i) __builtin_ia32_pmovzxdq128_mask ((__v4si) __X,
+                (__v2di)
+                _mm_setzero_si128 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu32_epi64 (__m256i __W, __mmask8 __U, __m128i __X)
+{
+  return (__m256i) __builtin_ia32_pmovzxdq256_mask ((__v4si) __X,
+                (__v4di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu32_epi64 (__mmask8 __U, __m128i __X)
+{
+  return (__m256i) __builtin_ia32_pmovzxdq256_mask ((__v4si) __X,
+                (__v4di)
+                _mm256_setzero_si256 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu16_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxwd128_mask ((__v8hi) __A,
+                (__v4si) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu16_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxwd128_mask ((__v8hi) __A,
+                (__v4si)
+                _mm_setzero_si128 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu16_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxwd256_mask ((__v8hi) __A,
+                (__v8si) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu16_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxwd256_mask ((__v8hi) __A,
+                (__v8si)
+                _mm256_setzero_si256 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu16_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxwq128_mask ((__v8hi) __A,
+                (__v2di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu16_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxwq128_mask ((__v8hi) __A,
+                (__v2di)
+                _mm_setzero_si128 (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu16_epi64 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxwq256_mask ((__v8hi) __A,
+                (__v4di) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu16_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxwq256_mask ((__v8hi) __A,
+                (__v4di)
+                _mm256_setzero_si256 (),
+                (__mmask8) __U);
+}
+
+
+#define _mm_rol_epi32(a, b) __extension__ ({\
+  (__m128i)__builtin_ia32_prold128_mask((__v4si)(__m128i)(a), (int)(b), \
+                                        (__v4si)_mm_setzero_si128(), \
+                                        (__mmask8)-1); })
+
+#define _mm_mask_rol_epi32(w, u, a, b) __extension__ ({\
+  (__m128i)__builtin_ia32_prold128_mask((__v4si)(__m128i)(a), (int)(b), \
+                                        (__v4si)(__m128i)(w), (__mmask8)(u)); })
+
+#define _mm_maskz_rol_epi32(u, a, b) __extension__ ({\
+  (__m128i)__builtin_ia32_prold128_mask((__v4si)(__m128i)(a), (int)(b), \
+                                        (__v4si)_mm_setzero_si128(), \
+                                        (__mmask8)(u)); })
+
+#define _mm256_rol_epi32(a, b) __extension__ ({\
+  (__m256i)__builtin_ia32_prold256_mask((__v8si)(__m256i)(a), (int)(b), \
+                                        (__v8si)_mm256_setzero_si256(), \
+                                        (__mmask8)-1); })
+
+#define _mm256_mask_rol_epi32(w, u, a, b) __extension__ ({\
+  (__m256i)__builtin_ia32_prold256_mask((__v8si)(__m256i)(a), (int)(b), \
+                                        (__v8si)(__m256i)(w), (__mmask8)(u)); })
+
+#define _mm256_maskz_rol_epi32(u, a, b) __extension__ ({\
+  (__m256i)__builtin_ia32_prold256_mask((__v8si)(__m256i)(a), (int)(b), \
+                                        (__v8si)_mm256_setzero_si256(), \
+                                        (__mmask8)(u)); })
+
+#define _mm_rol_epi64(a, b) __extension__ ({\
+  (__m128i)__builtin_ia32_prolq128_mask((__v2di)(__m128i)(a), (int)(b), \
+                                        (__v2di)_mm_setzero_di(), \
+                                        (__mmask8)-1); })
+
+#define _mm_mask_rol_epi64(w, u, a, b) __extension__ ({\
+  (__m128i)__builtin_ia32_prolq128_mask((__v2di)(__m128i)(a), (int)(b), \
+                                        (__v2di)(__m128i)(w), (__mmask8)(u)); })
+
+#define _mm_maskz_rol_epi64(u, a, b) __extension__ ({\
+  (__m128i)__builtin_ia32_prolq128_mask((__v2di)(__m128i)(a), (int)(b), \
+                                        (__v2di)_mm_setzero_di(), \
+                                        (__mmask8)(u)); })
+
+#define _mm256_rol_epi64(a, b) __extension__ ({\
+  (__m256i)__builtin_ia32_prolq256_mask((__v4di)(__m256i)(a), (int)(b), \
+                                        (__v4di)_mm256_setzero_si256(), \
+                                        (__mmask8)-1); })
+
+#define _mm256_mask_rol_epi64(w, u, a, b) __extension__ ({\
+  (__m256i)__builtin_ia32_prolq256_mask((__v4di)(__m256i)(a), (int)(b), \
+                                        (__v4di)(__m256i)(w), (__mmask8)(u)); })
+
+#define _mm256_maskz_rol_epi64(u, a, b) __extension__ ({\
+  (__m256i)__builtin_ia32_prolq256_mask((__v4di)(__m256i)(a), (int)(b), \
+                                        (__v4di)_mm256_setzero_si256(), \
+                                        (__mmask8)(u)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rolv_epi32 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A,
+              (__v4si) __B,
+              (__v4si)
+              _mm_setzero_si128 (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_rolv_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+         __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A,
+              (__v4si) __B,
+              (__v4si) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_rolv_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A,
+              (__v4si) __B,
+              (__v4si)
+              _mm_setzero_si128 (),
+              (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_rolv_epi32 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A,
+              (__v8si) __B,
+              (__v8si)
+              _mm256_setzero_si256 (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_rolv_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A,
+              (__v8si) __B,
+              (__v8si) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_rolv_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A,
+              (__v8si) __B,
+              (__v8si)
+              _mm256_setzero_si256 (),
+              (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rolv_epi64 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A,
+              (__v2di) __B,
+              (__v2di)
+              _mm_setzero_di (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_rolv_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+         __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A,
+              (__v2di) __B,
+              (__v2di) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_rolv_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A,
+              (__v2di) __B,
+              (__v2di)
+              _mm_setzero_di (),
+              (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_rolv_epi64 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A,
+              (__v4di) __B,
+              (__v4di)
+              _mm256_setzero_si256 (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_rolv_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A,
+              (__v4di) __B,
+              (__v4di) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_rolv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A,
+              (__v4di) __B,
+              (__v4di)
+              _mm256_setzero_si256 (),
+              (__mmask8) __U);
+}
+
+#define _mm_ror_epi32(A, B) __extension__ ({ \
+  (__m128i)__builtin_ia32_prord128_mask((__v4si)(__m128i)(A), (int)(B), \
+                                        (__v4si)_mm_setzero_si128(), \
+                                        (__mmask8)-1); })
+
+#define _mm_mask_ror_epi32(W, U, A, B) __extension__ ({ \
+  (__m128i)__builtin_ia32_prord128_mask((__v4si)(__m128i)(A), (int)(B), \
+                                        (__v4si)(__m128i)(W), (__mmask8)(U)); })
+
+#define _mm_maskz_ror_epi32(U, A, B) __extension__ ({ \
+  (__m128i)__builtin_ia32_prord128_mask((__v4si)(__m128i)(A), (int)(B), \
+                                        (__v4si)_mm_setzero_si128(), \
+                                        (__mmask8)(U)); })
+
+#define _mm256_ror_epi32(A, B) __extension__ ({ \
+  (__m256i)__builtin_ia32_prord256_mask((__v8si)(__m256i)(A), (int)(B), \
+                                        (__v8si)_mm256_setzero_si256(), \
+                                        (__mmask8)-1); })
+
+#define _mm256_mask_ror_epi32(W, U, A, B) __extension__ ({ \
+  (__m256i)__builtin_ia32_prord256_mask((__v8si)(__m256i)(A), (int)(B), \
+                                        (__v8si)(__m256i)(W), (__mmask8)(U)); })
+
+#define _mm256_maskz_ror_epi32(U, A, B) __extension__ ({ \
+  (__m256i)__builtin_ia32_prord256_mask((__v8si)(__m256i)(A), (int)(B), \
+                                        (__v8si)_mm256_setzero_si256(), \
+                                        (__mmask8)(U)); })
+
+#define _mm_ror_epi64(A, B) __extension__ ({ \
+  (__m128i)__builtin_ia32_prorq128_mask((__v2di)(__m128i)(A), (int)(B), \
+                                        (__v2di)_mm_setzero_di(), \
+                                        (__mmask8)-1); })
+
+#define _mm_mask_ror_epi64(W, U, A, B) __extension__ ({ \
+  (__m128i)__builtin_ia32_prorq128_mask((__v2di)(__m128i)(A), (int)(B), \
+                                        (__v2di)(__m128i)(W), (__mmask8)(U)); })
+
+#define _mm_maskz_ror_epi64(U, A, B) __extension__ ({ \
+  (__m128i)__builtin_ia32_prorq128_mask((__v2di)(__m128i)(A), (int)(B), \
+                                        (__v2di)_mm_setzero_di(), \
+                                        (__mmask8)(U)); })
+
+#define _mm256_ror_epi64(A, B) __extension__ ({ \
+  (__m256i)__builtin_ia32_prorq256_mask((__v4di)(__m256i)(A), (int)(B), \
+                                        (__v4di)_mm256_setzero_si256(), \
+                                        (__mmask8)-1); })
+
+#define _mm256_mask_ror_epi64(W, U, A, B) __extension__ ({ \
+  (__m256i)__builtin_ia32_prorq256_mask((__v4di)(__m256i)(A), (int)(B), \
+                                        (__v4di)(__m256i)(W), (__mmask8)(U)); })
+
+#define _mm256_maskz_ror_epi64(U, A, B) __extension__ ({ \
+  (__m256i)__builtin_ia32_prorq256_mask((__v4di)(__m256i)(A), (int)(B), \
+                                        (__v4di)_mm256_setzero_si256(), \
+                                        (__mmask8)(U)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sll_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+        __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pslld128_mask ((__v4si) __A,
+             (__v4si) __B,
+             (__v4si) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sll_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pslld128_mask ((__v4si) __A,
+             (__v4si) __B,
+             (__v4si)
+             _mm_setzero_si128 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sll_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+           __m128i __B)
+{
+  return (__m256i) __builtin_ia32_pslld256_mask ((__v8si) __A,
+             (__v4si) __B,
+             (__v8si) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sll_epi32 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_pslld256_mask ((__v8si) __A,
+             (__v4si) __B,
+             (__v8si)
+             _mm256_setzero_si256 (),
+             (__mmask8) __U);
+}
+
+#define _mm_mask_slli_epi32(W, U, A, B) __extension__ ({ \
+  (__m128i)__builtin_ia32_pslldi128_mask((__v4si)(__m128i)(A), (int)(B), \
+                                         (__v4si)(__m128i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm_maskz_slli_epi32(U, A, B) __extension__ ({ \
+  (__m128i)__builtin_ia32_pslldi128_mask((__v4si)(__m128i)(A), (int)(B), \
+                                         (__v4si)_mm_setzero_si128(), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_mask_slli_epi32(W, U, A, B) __extension__ ({ \
+  (__m256i)__builtin_ia32_pslldi256_mask((__v8si)(__m256i)(A), (int)(B), \
+                                         (__v8si)(__m256i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_maskz_slli_epi32(U, A, B) __extension__ ({ \
+  (__m256i)__builtin_ia32_pslldi256_mask((__v8si)(__m256i)(A), (int)(B), \
+                                         (__v8si)_mm256_setzero_si256(), \
+                                         (__mmask8)(U)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sll_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+        __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psllq128_mask ((__v2di) __A,
+             (__v2di) __B,
+             (__v2di) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sll_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psllq128_mask ((__v2di) __A,
+             (__v2di) __B,
+             (__v2di)
+             _mm_setzero_di (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sll_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+           __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psllq256_mask ((__v4di) __A,
+             (__v2di) __B,
+             (__v4di) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sll_epi64 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psllq256_mask ((__v4di) __A,
+             (__v2di) __B,
+             (__v4di)
+             _mm256_setzero_si256 (),
+             (__mmask8) __U);
+}
+
+#define _mm_mask_slli_epi64(W, U, A, B) __extension__ ({ \
+  (__m128i)__builtin_ia32_psllqi128_mask((__v2di)(__m128i)(A), (int)(B), \
+                                         (__v2di)(__m128i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm_maskz_slli_epi64(U, A, B) __extension__ ({ \
+  (__m128i)__builtin_ia32_psllqi128_mask((__v2di)(__m128i)(A), (int)(B), \
+                                         (__v2di)_mm_setzero_di(), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_mask_slli_epi64(W, U, A, B) __extension__ ({ \
+  (__m256i)__builtin_ia32_psllqi256_mask((__v4di)(__m256i)(A), (int)(B), \
+                                         (__v4di)(__m256i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_maskz_slli_epi64(U, A, B) __extension__ ({ \
+  (__m256i)__builtin_ia32_psllqi256_mask((__v4di)(__m256i)(A), (int)(B), \
+                                         (__v4di)_mm256_setzero_si256(), \
+                                         (__mmask8)(U)); })
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rorv_epi32 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A,
+              (__v4si) __B,
+              (__v4si)
+              _mm_setzero_si128 (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_rorv_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+         __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A,
+              (__v4si) __B,
+              (__v4si) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_rorv_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A,
+              (__v4si) __B,
+              (__v4si)
+              _mm_setzero_si128 (),
+              (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_rorv_epi32 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A,
+              (__v8si) __B,
+              (__v8si)
+              _mm256_setzero_si256 (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_rorv_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A,
+              (__v8si) __B,
+              (__v8si) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_rorv_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A,
+              (__v8si) __B,
+              (__v8si)
+              _mm256_setzero_si256 (),
+              (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rorv_epi64 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A,
+              (__v2di) __B,
+              (__v2di)
+              _mm_setzero_di (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_rorv_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+         __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A,
+              (__v2di) __B,
+              (__v2di) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_rorv_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A,
+              (__v2di) __B,
+              (__v2di)
+              _mm_setzero_di (),
+              (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_rorv_epi64 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A,
+              (__v4di) __B,
+              (__v4di)
+              _mm256_setzero_si256 (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_rorv_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A,
+              (__v4di) __B,
+              (__v4di) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_rorv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A,
+              (__v4di) __B,
+              (__v4di)
+              _mm256_setzero_si256 (),
+              (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sllv_epi64 (__m128i __W, __mmask8 __U, __m128i __X,
+         __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psllv2di_mask ((__v2di) __X,
+             (__v2di) __Y,
+             (__v2di) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sllv_epi64 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psllv2di_mask ((__v2di) __X,
+             (__v2di) __Y,
+             (__v2di)
+             _mm_setzero_di (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sllv_epi64 (__m256i __W, __mmask8 __U, __m256i __X,
+      __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psllv4di_mask ((__v4di) __X,
+             (__v4di) __Y,
+             (__v4di) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sllv_epi64 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psllv4di_mask ((__v4di) __X,
+             (__v4di) __Y,
+             (__v4di)
+             _mm256_setzero_si256 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sllv_epi32 (__m128i __W, __mmask8 __U, __m128i __X,
+         __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psllv4si_mask ((__v4si) __X,
+             (__v4si) __Y,
+             (__v4si) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sllv_epi32 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psllv4si_mask ((__v4si) __X,
+             (__v4si) __Y,
+             (__v4si)
+             _mm_setzero_si128 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sllv_epi32 (__m256i __W, __mmask8 __U, __m256i __X,
+      __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psllv8si_mask ((__v8si) __X,
+             (__v8si) __Y,
+             (__v8si) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sllv_epi32 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psllv8si_mask ((__v8si) __X,
+             (__v8si) __Y,
+             (__v8si)
+             _mm256_setzero_si256 (),
+             (__mmask8) __U);
+}
+
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srlv_epi64 (__m128i __W, __mmask8 __U, __m128i __X,
+         __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psrlv2di_mask ((__v2di) __X,
+             (__v2di) __Y,
+             (__v2di) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srlv_epi64 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psrlv2di_mask ((__v2di) __X,
+             (__v2di) __Y,
+             (__v2di)
+             _mm_setzero_di (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srlv_epi64 (__m256i __W, __mmask8 __U, __m256i __X,
+      __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psrlv4di_mask ((__v4di) __X,
+             (__v4di) __Y,
+             (__v4di) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srlv_epi64 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psrlv4di_mask ((__v4di) __X,
+             (__v4di) __Y,
+             (__v4di)
+             _mm256_setzero_si256 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srlv_epi32 (__m128i __W, __mmask8 __U, __m128i __X,
+         __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psrlv4si_mask ((__v4si) __X,
+             (__v4si) __Y,
+             (__v4si) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srlv_epi32 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psrlv4si_mask ((__v4si) __X,
+             (__v4si) __Y,
+             (__v4si)
+             _mm_setzero_si128 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srlv_epi32 (__m256i __W, __mmask8 __U, __m256i __X,
+      __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psrlv8si_mask ((__v8si) __X,
+             (__v8si) __Y,
+             (__v8si) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srlv_epi32 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psrlv8si_mask ((__v8si) __X,
+             (__v8si) __Y,
+             (__v8si)
+             _mm256_setzero_si256 (),
+             (__mmask8) __U);
+}
+
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srl_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+        __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrld128_mask ((__v4si) __A,
+             (__v4si) __B,
+             (__v4si) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srl_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrld128_mask ((__v4si) __A,
+             (__v4si) __B,
+             (__v4si)
+             _mm_setzero_si128 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srl_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+           __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psrld256_mask ((__v8si) __A,
+             (__v4si) __B,
+             (__v8si) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srl_epi32 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psrld256_mask ((__v8si) __A,
+             (__v4si) __B,
+             (__v8si)
+             _mm256_setzero_si256 (),
+             (__mmask8) __U);
+}
+
+#define _mm_mask_srli_epi32(W, U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_psrldi128_mask((__v4si)(__m128i)(A), (int)(imm), \
+                                         (__v4si)(__m128i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm_maskz_srli_epi32(U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_psrldi128_mask((__v4si)(__m128i)(A), (int)(imm), \
+                                         (__v4si)_mm_setzero_si128(), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_mask_srli_epi32(W, U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_psrldi256_mask((__v8si)(__m256i)(A), (int)(imm), \
+                                         (__v8si)(__m256i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_maskz_srli_epi32(U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_psrldi256_mask((__v8si)(__m256i)(A), (int)(imm), \
+                                         (__v8si)_mm256_setzero_si256(), \
+                                         (__mmask8)(U)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srl_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+        __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrlq128_mask ((__v2di) __A,
+             (__v2di) __B,
+             (__v2di) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srl_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrlq128_mask ((__v2di) __A,
+             (__v2di) __B,
+             (__v2di)
+             _mm_setzero_di (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srl_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+           __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psrlq256_mask ((__v4di) __A,
+             (__v2di) __B,
+             (__v4di) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srl_epi64 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psrlq256_mask ((__v4di) __A,
+             (__v2di) __B,
+             (__v4di)
+             _mm256_setzero_si256 (),
+             (__mmask8) __U);
+}
+
+#define _mm_mask_srli_epi64(W, U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_psrlqi128_mask((__v2di)(__m128i)(A), (int)(imm), \
+                                         (__v2di)(__m128i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm_maskz_srli_epi64(U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_psrlqi128_mask((__v2di)(__m128i)(A), (int)(imm), \
+                                         (__v2di)_mm_setzero_si128(), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_mask_srli_epi64(W, U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_psrlqi256_mask((__v4di)(__m256i)(A), (int)(imm), \
+                                         (__v4di)(__m256i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_maskz_srli_epi64(U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_psrlqi256_mask((__v4di)(__m256i)(A), (int)(imm), \
+                                         (__v4di)_mm256_setzero_si256(), \
+                                         (__mmask8)(U)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srav_epi32 (__m128i __W, __mmask8 __U, __m128i __X,
+         __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psrav4si_mask ((__v4si) __X,
+             (__v4si) __Y,
+             (__v4si) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srav_epi32 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psrav4si_mask ((__v4si) __X,
+             (__v4si) __Y,
+             (__v4si)
+             _mm_setzero_si128 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srav_epi32 (__m256i __W, __mmask8 __U, __m256i __X,
+      __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psrav8si_mask ((__v8si) __X,
+             (__v8si) __Y,
+             (__v8si) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srav_epi32 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psrav8si_mask ((__v8si) __X,
+             (__v8si) __Y,
+             (__v8si)
+             _mm256_setzero_si256 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srav_epi64 (__m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psravq128_mask ((__v2di) __X,
+              (__v2di) __Y,
+              (__v2di)
+              _mm_setzero_di (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srav_epi64 (__m128i __W, __mmask8 __U, __m128i __X,
+         __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psravq128_mask ((__v2di) __X,
+              (__v2di) __Y,
+              (__v2di) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srav_epi64 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psravq128_mask ((__v2di) __X,
+              (__v2di) __Y,
+              (__v2di)
+              _mm_setzero_di (),
+              (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srav_epi64 (__m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psravq256_mask ((__v4di) __X,
+              (__v4di) __Y,
+              (__v4di)
+              _mm256_setzero_si256 (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srav_epi64 (__m256i __W, __mmask8 __U, __m256i __X,
+      __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psravq256_mask ((__v4di) __X,
+              (__v4di) __Y,
+              (__v4di) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srav_epi64 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psravq256_mask ((__v4di) __X,
+              (__v4di) __Y,
+              (__v4di)
+              _mm256_setzero_si256 (),
+              (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mov_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
+                 (__v4si) __A,
+                 (__v4si) __W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mov_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
+                 (__v4si) __A,
+                 (__v4si) _mm_setzero_si128 ());
+}
+
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mov_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
+                 (__v8si) __A,
+                 (__v8si) __W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mov_epi32 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
+                 (__v8si) __A,
+                 (__v8si) _mm256_setzero_si256 ());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_load_epi32 (__m128i __W, __mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P,
+              (__v4si) __W,
+              (__mmask8)
+              __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_load_epi32 (__mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P,
+              (__v4si)
+              _mm_setzero_si128 (),
+              (__mmask8)
+              __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_load_epi32 (__m256i __W, __mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P,
+              (__v8si) __W,
+              (__mmask8)
+              __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_load_epi32 (__mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P,
+              (__v8si)
+              _mm256_setzero_si256 (),
+              (__mmask8)
+              __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A)
+{
+  __builtin_ia32_movdqa32store128_mask ((__v4si *) __P,
+          (__v4si) __A,
+          (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_store_epi32 (void *__P, __mmask8 __U, __m256i __A)
+{
+  __builtin_ia32_movdqa32store256_mask ((__v8si *) __P,
+          (__v8si) __A,
+          (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mov_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
+                 (__v2di) __A,
+                 (__v2di) __W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mov_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
+                 (__v2di) __A,
+                 (__v2di) _mm_setzero_di ());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mov_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
+                 (__v4di) __A,
+                 (__v4di) __W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mov_epi64 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
+                 (__v4di) __A,
+                 (__v4di) _mm256_setzero_si256 ());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_load_epi64 (__m128i __W, __mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P,
+              (__v2di) __W,
+              (__mmask8)
+              __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_load_epi64 (__mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P,
+              (__v2di)
+              _mm_setzero_di (),
+              (__mmask8)
+              __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_load_epi64 (__m256i __W, __mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P,
+              (__v4di) __W,
+              (__mmask8)
+              __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_load_epi64 (__mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P,
+              (__v4di)
+              _mm256_setzero_si256 (),
+              (__mmask8)
+              __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_store_epi64 (void *__P, __mmask8 __U, __m128i __A)
+{
+  __builtin_ia32_movdqa64store128_mask ((__v2di *) __P,
+          (__v2di) __A,
+          (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_store_epi64 (void *__P, __mmask8 __U, __m256i __A)
+{
+  __builtin_ia32_movdqa64store256_mask ((__v4di *) __P,
+          (__v4di) __A,
+          (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_movedup_pd (__m128d __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+                                              (__v2df)_mm_movedup_pd(__A),
+                                              (__v2df)__W);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_movedup_pd (__mmask8 __U, __m128d __A)
+{
+  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+                                              (__v2df)_mm_movedup_pd(__A),
+                                              (__v2df)_mm_setzero_pd());
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_movedup_pd (__m256d __W, __mmask8 __U, __m256d __A)
+{
+  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+                                              (__v4df)_mm256_movedup_pd(__A),
+                                              (__v4df)__W);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_movedup_pd (__mmask8 __U, __m256d __A)
+{
+  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+                                              (__v4df)_mm256_movedup_pd(__A),
+                                              (__v4df)_mm256_setzero_pd());
+}
+
+
+#define _mm_mask_set1_epi32(O, M, A) __extension__ ({ \
+  (__m128i)__builtin_ia32_pbroadcastd128_gpr_mask((int)(A), \
+                                                  (__v4si)(__m128i)(O), \
+                                                  (__mmask8)(M)); })
+
+#define _mm_maskz_set1_epi32(M, A) __extension__ ({ \
+  (__m128i)__builtin_ia32_pbroadcastd128_gpr_mask((int)(A), \
+                                                  (__v4si)_mm_setzero_si128(), \
+                                                  (__mmask8)(M)); })
+
+#define _mm256_mask_set1_epi32(O, M, A) __extension__ ({ \
+  (__m256i)__builtin_ia32_pbroadcastd256_gpr_mask((int)(A), \
+                                                  (__v8si)(__m256i)(O), \
+                                                  (__mmask8)(M)); })
+
+#define _mm256_maskz_set1_epi32(M, A) __extension__ ({ \
+  (__m256i)__builtin_ia32_pbroadcastd256_gpr_mask((int)(A), \
+                                                  (__v8si)_mm256_setzero_si256(), \
+                                                  (__mmask8)(M)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_set1_epi64 (__m128i __O, __mmask8 __M, long long __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastq128_gpr_mask (__A, (__v2di) __O,
+                 __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_set1_epi64 (__mmask8 __M, long long __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastq128_gpr_mask (__A,
+                 (__v2di)
+                 _mm_setzero_si128 (),
+                 __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_set1_epi64 (__m256i __O, __mmask8 __M, long long __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastq256_gpr_mask (__A, (__v4di) __O,
+                 __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_set1_epi64 (__mmask8 __M, long long __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastq256_gpr_mask (__A,
+                 (__v4di)
+                 _mm256_setzero_si256 (),
+                 __M);
+}
+
+#define _mm_fixupimm_pd(A, B, C, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
+                                             (__v2df)(__m128d)(B), \
+                                             (__v2di)(__m128i)(C), (int)(imm), \
+                                             (__mmask8)-1); })
+
+#define _mm_mask_fixupimm_pd(A, U, B, C, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
+                                             (__v2df)(__m128d)(B), \
+                                             (__v2di)(__m128i)(C), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+#define _mm_maskz_fixupimm_pd(U, A, B, C, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_fixupimmpd128_maskz((__v2df)(__m128d)(A), \
+                                              (__v2df)(__m128d)(B), \
+                                              (__v2di)(__m128i)(C), \
+                                              (int)(imm), (__mmask8)(U)); })
+
+#define _mm256_fixupimm_pd(A, B, C, imm) __extension__ ({ \
+  (__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
+                                             (__v4df)(__m256d)(B), \
+                                             (__v4di)(__m256i)(C), (int)(imm), \
+                                             (__mmask8)-1); })
+
+#define _mm256_mask_fixupimm_pd(A, U, B, C, imm) __extension__ ({ \
+  (__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
+                                             (__v4df)(__m256d)(B), \
+                                             (__v4di)(__m256i)(C), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+#define _mm256_maskz_fixupimm_pd(U, A, B, C, imm) __extension__ ({ \
+  (__m256d)__builtin_ia32_fixupimmpd256_maskz((__v4df)(__m256d)(A), \
+                                              (__v4df)(__m256d)(B), \
+                                              (__v4di)(__m256i)(C), \
+                                              (int)(imm), (__mmask8)(U)); })
+
+#define _mm_fixupimm_ps(A, B, C, imm) __extension__ ({ \
+  (__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
+                                            (__v4sf)(__m128)(B), \
+                                            (__v4si)(__m128i)(C), (int)(imm), \
+                                            (__mmask8)-1); })
+
+#define _mm_mask_fixupimm_ps(A, U, B, C, imm) __extension__ ({ \
+  (__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
+                                            (__v4sf)(__m128)(B), \
+                                            (__v4si)(__m128i)(C), (int)(imm), \
+                                            (__mmask8)(U)); })
+
+#define _mm_maskz_fixupimm_ps(U, A, B, C, imm) __extension__ ({ \
+  (__m128)__builtin_ia32_fixupimmps128_maskz((__v4sf)(__m128)(A), \
+                                             (__v4sf)(__m128)(B), \
+                                             (__v4si)(__m128i)(C), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+#define _mm256_fixupimm_ps(A, B, C, imm) __extension__ ({ \
+  (__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
+                                            (__v8sf)(__m256)(B), \
+                                            (__v8si)(__m256i)(C), (int)(imm), \
+                                            (__mmask8)-1); })
+
+#define _mm256_mask_fixupimm_ps(A, U, B, C, imm) __extension__ ({ \
+  (__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
+                                            (__v8sf)(__m256)(B), \
+                                            (__v8si)(__m256i)(C), (int)(imm), \
+                                            (__mmask8)(U)); })
+
+#define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) __extension__ ({ \
+  (__m256)__builtin_ia32_fixupimmps256_maskz((__v8sf)(__m256)(A), \
+                                             (__v8sf)(__m256)(B), \
+                                             (__v8si)(__m256i)(C), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_load_pd (__m128d __W, __mmask8 __U, void const *__P)
+{
+  return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P,
+               (__v2df) __W,
+               (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_load_pd (__mmask8 __U, void const *__P)
+{
+  return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P,
+               (__v2df)
+               _mm_setzero_pd (),
+               (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_load_pd (__m256d __W, __mmask8 __U, void const *__P)
+{
+  return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P,
+               (__v4df) __W,
+               (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_load_pd (__mmask8 __U, void const *__P)
+{
+  return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P,
+               (__v4df)
+               _mm256_setzero_pd (),
+               (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_load_ps (__m128 __W, __mmask8 __U, void const *__P)
+{
+  return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P,
+              (__v4sf) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_load_ps (__mmask8 __U, void const *__P)
+{
+  return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P,
+              (__v4sf)
+              _mm_setzero_ps (),
+              (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_load_ps (__m256 __W, __mmask8 __U, void const *__P)
+{
+  return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P,
+              (__v8sf) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_load_ps (__mmask8 __U, void const *__P)
+{
+  return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P,
+              (__v8sf)
+              _mm256_setzero_ps (),
+              (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_loadu_epi64 (__m128i __W, __mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P,
+                 (__v2di) __W,
+                 (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P,
+                 (__v2di)
+                 _mm_setzero_si128 (),
+                 (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_epi64 (__m256i __W, __mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P,
+                 (__v4di) __W,
+                 (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P,
+                 (__v4di)
+                 _mm256_setzero_si256 (),
+                 (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_loadu_epi32 (__m128i __W, __mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P,
+                 (__v4si) __W,
+                 (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P,
+                 (__v4si)
+                 _mm_setzero_si128 (),
+                 (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_epi32 (__m256i __W, __mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P,
+                 (__v8si) __W,
+                 (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P,
+                 (__v8si)
+                 _mm256_setzero_si256 (),
+                 (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_loadu_pd (__m128d __W, __mmask8 __U, void const *__P)
+{
+  return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P,
+               (__v2df) __W,
+               (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_pd (__mmask8 __U, void const *__P)
+{
+  return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P,
+               (__v2df)
+               _mm_setzero_pd (),
+               (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_pd (__m256d __W, __mmask8 __U, void const *__P)
+{
+  return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P,
+               (__v4df) __W,
+               (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_pd (__mmask8 __U, void const *__P)
+{
+  return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P,
+               (__v4df)
+               _mm256_setzero_pd (),
+               (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_loadu_ps (__m128 __W, __mmask8 __U, void const *__P)
+{
+  return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P,
+              (__v4sf) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_ps (__mmask8 __U, void const *__P)
+{
+  return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P,
+              (__v4sf)
+              _mm_setzero_ps (),
+              (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_ps (__m256 __W, __mmask8 __U, void const *__P)
+{
+  return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P,
+              (__v8sf) __W,
+              (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_ps (__mmask8 __U, void const *__P)
+{
+  return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P,
+              (__v8sf)
+              _mm256_setzero_ps (),
+              (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_store_pd (void *__P, __mmask8 __U, __m128d __A)
+{
+  __builtin_ia32_storeapd128_mask ((__v2df *) __P,
+           (__v2df) __A,
+           (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_store_pd (void *__P, __mmask8 __U, __m256d __A)
+{
+  __builtin_ia32_storeapd256_mask ((__v4df *) __P,
+           (__v4df) __A,
+           (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_store_ps (void *__P, __mmask8 __U, __m128 __A)
+{
+  __builtin_ia32_storeaps128_mask ((__v4sf *) __P,
+           (__v4sf) __A,
+           (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_store_ps (void *__P, __mmask8 __U, __m256 __A)
+{
+  __builtin_ia32_storeaps256_mask ((__v8sf *) __P,
+           (__v8sf) __A,
+           (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_storeu_epi64 (void *__P, __mmask8 __U, __m128i __A)
+{
+  __builtin_ia32_storedqudi128_mask ((__v2di *) __P,
+             (__v2di) __A,
+             (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_storeu_epi64 (void *__P, __mmask8 __U, __m256i __A)
+{
+  __builtin_ia32_storedqudi256_mask ((__v4di *) __P,
+             (__v4di) __A,
+             (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_storeu_epi32 (void *__P, __mmask8 __U, __m128i __A)
+{
+  __builtin_ia32_storedqusi128_mask ((__v4si *) __P,
+             (__v4si) __A,
+             (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_storeu_epi32 (void *__P, __mmask8 __U, __m256i __A)
+{
+  __builtin_ia32_storedqusi256_mask ((__v8si *) __P,
+             (__v8si) __A,
+             (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_storeu_pd (void *__P, __mmask8 __U, __m128d __A)
+{
+  __builtin_ia32_storeupd128_mask ((__v2df *) __P,
+           (__v2df) __A,
+           (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_storeu_pd (void *__P, __mmask8 __U, __m256d __A)
+{
+  __builtin_ia32_storeupd256_mask ((__v4df *) __P,
+           (__v4df) __A,
+           (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_storeu_ps (void *__P, __mmask8 __U, __m128 __A)
+{
+  __builtin_ia32_storeups128_mask ((__v4sf *) __P,
+           (__v4sf) __A,
+           (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_storeu_ps (void *__P, __mmask8 __U, __m256 __A)
+{
+  __builtin_ia32_storeups256_mask ((__v8sf *) __P,
+           (__v8sf) __A,
+           (__mmask8) __U);
+}
+
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+                                              (__v2df)_mm_unpackhi_pd(__A, __B),
+                                              (__v2df)__W);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+                                              (__v2df)_mm_unpackhi_pd(__A, __B),
+                                              (__v2df)_mm_setzero_pd());
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+                                           (__v4df)_mm256_unpackhi_pd(__A, __B),
+                                           (__v4df)__W);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+                                           (__v4df)_mm256_unpackhi_pd(__A, __B),
+                                           (__v4df)_mm256_setzero_pd());
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_unpackhi_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+                                             (__v4sf)_mm_unpackhi_ps(__A, __B),
+                                             (__v4sf)__W);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_unpackhi_ps(__mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+                                             (__v4sf)_mm_unpackhi_ps(__A, __B),
+                                             (__v4sf)_mm_setzero_ps());
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_unpackhi_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+                                           (__v8sf)_mm256_unpackhi_ps(__A, __B),
+                                           (__v8sf)__W);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_unpackhi_ps(__mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+                                           (__v8sf)_mm256_unpackhi_ps(__A, __B),
+                                           (__v8sf)_mm256_setzero_ps());
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+                                              (__v2df)_mm_unpacklo_pd(__A, __B),
+                                              (__v2df)__W);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+                                              (__v2df)_mm_unpacklo_pd(__A, __B),
+                                              (__v2df)_mm_setzero_pd());
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+                                           (__v4df)_mm256_unpacklo_pd(__A, __B),
+                                           (__v4df)__W);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+                                           (__v4df)_mm256_unpacklo_pd(__A, __B),
+                                           (__v4df)_mm256_setzero_pd());
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+                                             (__v4sf)_mm_unpacklo_ps(__A, __B),
+                                             (__v4sf)__W);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_unpacklo_ps(__mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+                                             (__v4sf)_mm_unpacklo_ps(__A, __B),
+                                             (__v4sf)_mm_setzero_ps());
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_unpacklo_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+                                           (__v8sf)_mm256_unpacklo_ps(__A, __B),
+                                           (__v8sf)__W);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_unpacklo_ps(__mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+                                           (__v8sf)_mm256_unpacklo_ps(__A, __B),
+                                           (__v8sf)_mm256_setzero_ps());
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_rcp14_pd (__m128d __A)
+{
+  return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
+                (__v2df)
+                _mm_setzero_pd (),
+                (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_rcp14_pd (__m128d __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
+                (__v2df) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_rcp14_pd (__mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
+                (__v2df)
+                _mm_setzero_pd (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_rcp14_pd (__m256d __A)
+{
+  return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
+                (__v4df)
+                _mm256_setzero_pd (),
+                (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_rcp14_pd (__m256d __W, __mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
+                (__v4df) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_rcp14_pd (__mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
+                (__v4df)
+                _mm256_setzero_pd (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rcp14_ps (__m128 __A)
+{
+  return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
+               (__v4sf)
+               _mm_setzero_ps (),
+               (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_rcp14_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
+               (__v4sf) __W,
+               (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_rcp14_ps (__mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
+               (__v4sf)
+               _mm_setzero_ps (),
+               (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_rcp14_ps (__m256 __A)
+{
+  return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
+               (__v8sf)
+               _mm256_setzero_ps (),
+               (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_rcp14_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
+               (__v8sf) __W,
+               (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_rcp14_ps (__mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
+               (__v8sf)
+               _mm256_setzero_ps (),
+               (__mmask8) __U);
+}
+
+#define _mm_mask_permute_pd(W, U, X, C) __extension__ ({ \
+  (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+                                       (__v2df)_mm_permute_pd((X), (C)), \
+                                       (__v2df)(__m128d)(W)); })
+
+#define _mm_maskz_permute_pd(U, X, C) __extension__ ({ \
+  (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+                                       (__v2df)_mm_permute_pd((X), (C)), \
+                                       (__v2df)_mm_setzero_pd()); })
+
+#define _mm256_mask_permute_pd(W, U, X, C) __extension__ ({ \
+  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+                                       (__v4df)_mm256_permute_pd((X), (C)), \
+                                       (__v4df)(__m256d)(W)); })
+
+#define _mm256_maskz_permute_pd(U, X, C) __extension__ ({ \
+  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+                                       (__v4df)_mm256_permute_pd((X), (C)), \
+                                       (__v4df)_mm256_setzero_pd()); })
+
+#define _mm_mask_permute_ps(W, U, X, C) __extension__ ({ \
+  (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+                                      (__v4sf)_mm_permute_ps((X), (C)), \
+                                      (__v4sf)(__m128)(W)); })
+
+#define _mm_maskz_permute_ps(U, X, C) __extension__ ({ \
+  (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+                                      (__v4sf)_mm_permute_ps((X), (C)), \
+                                      (__v4sf)_mm_setzero_ps()); })
+
+#define _mm256_mask_permute_ps(W, U, X, C) __extension__ ({ \
+  (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+                                      (__v8sf)_mm256_permute_ps((X), (C)), \
+                                      (__v8sf)(__m256)(W)); })
+
+#define _mm256_maskz_permute_ps(U, X, C) __extension__ ({ \
+  (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+                                      (__v8sf)_mm256_permute_ps((X), (C)), \
+                                      (__v8sf)_mm256_setzero_ps()); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_permutevar_pd (__m128d __W, __mmask8 __U, __m128d __A,
+      __m128i __C)
+{
+  return (__m128d) __builtin_ia32_vpermilvarpd_mask ((__v2df) __A,
+                 (__v2di) __C,
+                 (__v2df) __W,
+                 (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_permutevar_pd (__mmask8 __U, __m128d __A, __m128i __C)
+{
+  return (__m128d) __builtin_ia32_vpermilvarpd_mask ((__v2df) __A,
+                 (__v2di) __C,
+                 (__v2df)
+                 _mm_setzero_pd (),
+                 (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_permutevar_pd (__m256d __W, __mmask8 __U, __m256d __A,
+         __m256i __C)
+{
+  return (__m256d) __builtin_ia32_vpermilvarpd256_mask ((__v4df) __A,
+              (__v4di) __C,
+              (__v4df) __W,
+              (__mmask8)
+              __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_permutevar_pd (__mmask8 __U, __m256d __A, __m256i __C)
+{
+  return (__m256d) __builtin_ia32_vpermilvarpd256_mask ((__v4df) __A,
+              (__v4di) __C,
+              (__v4df)
+              _mm256_setzero_pd (),
+              (__mmask8)
+              __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_permutevar_ps (__m128 __W, __mmask8 __U, __m128 __A,
+      __m128i __C)
+{
+  return (__m128) __builtin_ia32_vpermilvarps_mask ((__v4sf) __A,
+                (__v4si) __C,
+                (__v4sf) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_permutevar_ps (__mmask8 __U, __m128 __A, __m128i __C)
+{
+  return (__m128) __builtin_ia32_vpermilvarps_mask ((__v4sf) __A,
+                (__v4si) __C,
+                (__v4sf)
+                _mm_setzero_ps (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_permutevar_ps (__m256 __W, __mmask8 __U, __m256 __A,
+         __m256i __C)
+{
+  return (__m256) __builtin_ia32_vpermilvarps256_mask ((__v8sf) __A,
+                   (__v8si) __C,
+                   (__v8sf) __W,
+                   (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_permutevar_ps (__mmask8 __U, __m256 __A, __m256i __C)
+{
+  return (__m256) __builtin_ia32_vpermilvarps256_mask ((__v8sf) __A,
+                   (__v8si) __C,
+                   (__v8sf)
+                   _mm256_setzero_ps (),
+                   (__mmask8) __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_test_epi32_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmd128 ((__v4si) __A,
+                 (__v4si) __B,
+                 (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_test_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmd128 ((__v4si) __A,
+                 (__v4si) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_test_epi32_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmd256 ((__v8si) __A,
+                 (__v8si) __B,
+                 (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_test_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmd256 ((__v8si) __A,
+                 (__v8si) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_test_epi64_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmq128 ((__v2di) __A,
+                 (__v2di) __B,
+                 (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_test_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmq128 ((__v2di) __A,
+                 (__v2di) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_test_epi64_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmq256 ((__v4di) __A,
+                 (__v4di) __B,
+                 (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_test_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmq256 ((__v4di) __A,
+                 (__v4di) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_testn_epi32_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmd128 ((__v4si) __A,
+            (__v4si) __B,
+            (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_testn_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmd128 ((__v4si) __A,
+            (__v4si) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_testn_epi32_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmd256 ((__v8si) __A,
+            (__v8si) __B,
+            (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_testn_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmd256 ((__v8si) __A,
+            (__v8si) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_testn_epi64_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmq128 ((__v2di) __A,
+            (__v2di) __B,
+            (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_testn_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmq128 ((__v2di) __A,
+            (__v2di) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_testn_epi64_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmq256 ((__v4di) __A,
+            (__v4di) __B,
+            (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_testn_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmq256 ((__v4di) __A,
+            (__v4di) __B, __U);
+}
+
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+                                           (__v4si)_mm_unpackhi_epi32(__A, __B),
+                                           (__v4si)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+                                           (__v4si)_mm_unpackhi_epi32(__A, __B),
+                                           (__v4si)_mm_setzero_si128());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+                                        (__v8si)_mm256_unpackhi_epi32(__A, __B),
+                                        (__v8si)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpackhi_epi32(__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+                                        (__v8si)_mm256_unpackhi_epi32(__A, __B),
+                                        (__v8si)_mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpackhi_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+                                           (__v2di)_mm_unpackhi_epi64(__A, __B),
+                                           (__v2di)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpackhi_epi64(__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+                                           (__v2di)_mm_unpackhi_epi64(__A, __B),
+                                           (__v2di)_mm_setzero_di());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpackhi_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+                                        (__v4di)_mm256_unpackhi_epi64(__A, __B),
+                                        (__v4di)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpackhi_epi64(__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+                                        (__v4di)_mm256_unpackhi_epi64(__A, __B),
+                                        (__v4di)_mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpacklo_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+                                           (__v4si)_mm_unpacklo_epi32(__A, __B),
+                                           (__v4si)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpacklo_epi32(__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+                                           (__v4si)_mm_unpacklo_epi32(__A, __B),
+                                           (__v4si)_mm_setzero_si128());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpacklo_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+                                        (__v8si)_mm256_unpacklo_epi32(__A, __B),
+                                        (__v8si)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpacklo_epi32(__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+                                        (__v8si)_mm256_unpacklo_epi32(__A, __B),
+                                        (__v8si)_mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpacklo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+                                           (__v2di)_mm_unpacklo_epi64(__A, __B),
+                                           (__v2di)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpacklo_epi64(__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+                                           (__v2di)_mm_unpacklo_epi64(__A, __B),
+                                           (__v2di)_mm_setzero_di());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpacklo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+                                        (__v4di)_mm256_unpacklo_epi64(__A, __B),
+                                        (__v4di)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+                                        (__v4di)_mm256_unpacklo_epi64(__A, __B),
+                                        (__v4di)_mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sra_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+        __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrad128_mask ((__v4si) __A,
+             (__v4si) __B,
+             (__v4si) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sra_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrad128_mask ((__v4si) __A,
+             (__v4si) __B,
+             (__v4si)
+             _mm_setzero_si128 (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sra_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+           __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psrad256_mask ((__v8si) __A,
+             (__v4si) __B,
+             (__v8si) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sra_epi32 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psrad256_mask ((__v8si) __A,
+             (__v4si) __B,
+             (__v8si)
+             _mm256_setzero_si256 (),
+             (__mmask8) __U);
+}
+
+#define _mm_mask_srai_epi32(W, U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_psradi128_mask((__v4si)(__m128i)(A), (int)(imm), \
+                                         (__v4si)(__m128i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm_maskz_srai_epi32(U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_psradi128_mask((__v4si)(__m128i)(A), (int)(imm), \
+                                         (__v4si)_mm_setzero_si128(), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_mask_srai_epi32(W, U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_psradi256_mask((__v8si)(__m256i)(A), (int)(imm), \
+                                         (__v8si)(__m256i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_maskz_srai_epi32(U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_psradi256_mask((__v8si)(__m256i)(A), (int)(imm), \
+                                         (__v8si)_mm256_setzero_si256(), \
+                                         (__mmask8)(U)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sra_epi64 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psraq128_mask ((__v2di) __A,
+             (__v2di) __B,
+             (__v2di)
+             _mm_setzero_di (),
+             (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sra_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+        __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psraq128_mask ((__v2di) __A,
+             (__v2di) __B,
+             (__v2di) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sra_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psraq128_mask ((__v2di) __A,
+             (__v2di) __B,
+             (__v2di)
+             _mm_setzero_di (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sra_epi64 (__m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psraq256_mask ((__v4di) __A,
+             (__v2di) __B,
+             (__v4di)
+             _mm256_setzero_si256 (),
+             (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sra_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+           __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psraq256_mask ((__v4di) __A,
+             (__v2di) __B,
+             (__v4di) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sra_epi64 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psraq256_mask ((__v4di) __A,
+             (__v2di) __B,
+             (__v4di)
+             _mm256_setzero_si256 (),
+             (__mmask8) __U);
+}
+
+#define _mm_srai_epi64(A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_psraqi128_mask((__v2di)(__m128i)(A), (int)(imm), \
+                                         (__v2di)_mm_setzero_di(), \
+                                         (__mmask8)-1); })
+
+#define _mm_mask_srai_epi64(W, U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_psraqi128_mask((__v2di)(__m128i)(A), (int)(imm), \
+                                         (__v2di)(__m128i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm_maskz_srai_epi64(U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_psraqi128_mask((__v2di)(__m128i)(A), (int)(imm), \
+                                         (__v2di)_mm_setzero_si128(), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_srai_epi64(A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_psraqi256_mask((__v4di)(__m256i)(A), (int)(imm), \
+                                         (__v4di)_mm256_setzero_si256(), \
+                                         (__mmask8)-1); })
+
+#define _mm256_mask_srai_epi64(W, U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_psraqi256_mask((__v4di)(__m256i)(A), (int)(imm), \
+                                         (__v4di)(__m256i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_maskz_srai_epi64(U, A, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_psraqi256_mask((__v4di)(__m256i)(A), (int)(imm), \
+                                         (__v4di)_mm256_setzero_si256(), \
+                                         (__mmask8)(U)); })
+
+#define _mm_ternarylogic_epi32(A, B, C, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
+                                            (__v4si)(__m128i)(B), \
+                                            (__v4si)(__m128i)(C), (int)(imm), \
+                                            (__mmask8)-1); })
+
+#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
+                                            (__v4si)(__m128i)(B), \
+                                            (__v4si)(__m128i)(C), (int)(imm), \
+                                            (__mmask8)(U)); })
+
+#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_pternlogd128_maskz((__v4si)(__m128i)(A), \
+                                             (__v4si)(__m128i)(B), \
+                                             (__v4si)(__m128i)(C), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+#define _mm256_ternarylogic_epi32(A, B, C, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
+                                            (__v8si)(__m256i)(B), \
+                                            (__v8si)(__m256i)(C), (int)(imm), \
+                                            (__mmask8)-1); })
+
+#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
+                                            (__v8si)(__m256i)(B), \
+                                            (__v8si)(__m256i)(C), (int)(imm), \
+                                            (__mmask8)(U)); })
+
+#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_pternlogd256_maskz((__v8si)(__m256i)(A), \
+                                             (__v8si)(__m256i)(B), \
+                                             (__v8si)(__m256i)(C), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+#define _mm_ternarylogic_epi64(A, B, C, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
+                                            (__v2di)(__m128i)(B), \
+                                            (__v2di)(__m128i)(C), (int)(imm), \
+                                            (__mmask8)-1); })
+
+#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
+                                            (__v2di)(__m128i)(B), \
+                                            (__v2di)(__m128i)(C), (int)(imm), \
+                                            (__mmask8)(U)); })
+
+#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_pternlogq128_maskz((__v2di)(__m128i)(A), \
+                                             (__v2di)(__m128i)(B), \
+                                             (__v2di)(__m128i)(C), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+#define _mm256_ternarylogic_epi64(A, B, C, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
+                                            (__v4di)(__m256i)(B), \
+                                            (__v4di)(__m256i)(C), (int)(imm), \
+                                            (__mmask8)-1); })
+
+#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
+                                            (__v4di)(__m256i)(B), \
+                                            (__v4di)(__m256i)(C), (int)(imm), \
+                                            (__mmask8)(U)); })
+
+#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_pternlogq256_maskz((__v4di)(__m256i)(A), \
+                                             (__v4di)(__m256i)(B), \
+                                             (__v4di)(__m256i)(C), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+
+
+#define _mm256_shuffle_f32x4(A, B, imm) __extension__ ({ \
+  (__m256)__builtin_ia32_shuf_f32x4_256_mask((__v8sf)(__m256)(A), \
+                                             (__v8sf)(__m256)(B), (int)(imm), \
+                                             (__v8sf)_mm256_setzero_ps(), \
+                                             (__mmask8)-1); })
+
+#define _mm256_mask_shuffle_f32x4(W, U, A, B, imm) __extension__ ({ \
+  (__m256)__builtin_ia32_shuf_f32x4_256_mask((__v8sf)(__m256)(A), \
+                                             (__v8sf)(__m256)(B), (int)(imm), \
+                                             (__v8sf)(__m256)(W), \
+                                             (__mmask8)(U)); })
+
+#define _mm256_maskz_shuffle_f32x4(U, A, B, imm) __extension__ ({ \
+  (__m256)__builtin_ia32_shuf_f32x4_256_mask((__v8sf)(__m256)(A), \
+                                             (__v8sf)(__m256)(B), (int)(imm), \
+                                             (__v8sf)_mm256_setzero_ps(), \
+                                             (__mmask8)(U)); })
+
+#define _mm256_shuffle_f64x2(A, B, imm) __extension__ ({ \
+  (__m256d)__builtin_ia32_shuf_f64x2_256_mask((__v4df)(__m256d)(A), \
+                                              (__v4df)(__m256d)(B), \
+                                              (int)(imm), \
+                                              (__v4df)_mm256_setzero_pd(), \
+                                              (__mmask8)-1); })
+
+#define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) __extension__ ({ \
+  (__m256d)__builtin_ia32_shuf_f64x2_256_mask((__v4df)(__m256d)(A), \
+                                              (__v4df)(__m256d)(B), \
+                                              (int)(imm), \
+                                              (__v4df)(__m256d)(W), \
+                                              (__mmask8)(U)); })
+
+#define _mm256_maskz_shuffle_f64x2(U, A, B, imm) __extension__ ({ \
+  (__m256d)__builtin_ia32_shuf_f64x2_256_mask((__v4df)(__m256d)(A), \
+                                              (__v4df)(__m256d)(B), \
+                                              (int)(imm), \
+                                              (__v4df)_mm256_setzero_pd(), \
+                                              (__mmask8)(U)); })
+
+#define _mm256_shuffle_i32x4(A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_shuf_i32x4_256_mask((__v8si)(__m256i)(A), \
+                                              (__v8si)(__m256i)(B), \
+                                              (int)(imm), \
+                                              (__v8si)_mm256_setzero_si256(), \
+                                              (__mmask8)-1); })
+
+#define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_shuf_i32x4_256_mask((__v8si)(__m256i)(A), \
+                                              (__v8si)(__m256i)(B), \
+                                              (int)(imm), \
+                                              (__v8si)(__m256i)(W), \
+                                              (__mmask8)(U)); })
+
+#define _mm256_maskz_shuffle_i32x4(U, A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_shuf_i32x4_256_mask((__v8si)(__m256i)(A), \
+                                              (__v8si)(__m256i)(B), \
+                                              (int)(imm), \
+                                              (__v8si)_mm256_setzero_si256(), \
+                                              (__mmask8)(U)); })
+
+#define _mm256_shuffle_i64x2(A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_shuf_i64x2_256_mask((__v4di)(__m256i)(A), \
+                                              (__v4di)(__m256i)(B), \
+                                              (int)(imm), \
+                                              (__v4di)_mm256_setzero_si256(), \
+                                              (__mmask8)-1); })
+
+#define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_shuf_i64x2_256_mask((__v4di)(__m256i)(A), \
+                                              (__v4di)(__m256i)(B), \
+                                              (int)(imm), \
+                                              (__v4di)(__m256i)(W), \
+                                              (__mmask8)(U)); })
+
+#define _mm256_maskz_shuffle_i64x2(U, A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_shuf_i64x2_256_mask((__v4di)(__m256i)(A), \
+                                              (__v4di)(__m256i)(B), \
+                                              (int)(imm), \
+                                              (__v4di)_mm256_setzero_si256(), \
+                                              (__mmask8)(U)); })
+
+#define _mm_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \
+  (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+                                       (__v2df)_mm_shuffle_pd((A), (B), (M)), \
+                                       (__v2df)(__m128d)(W)); })
+
+#define _mm_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \
+  (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+                                       (__v2df)_mm_shuffle_pd((A), (B), (M)), \
+                                       (__v2df)_mm_setzero_pd()); })
+
+#define _mm256_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \
+  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+                                       (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
+                                       (__v4df)(__m256d)(W)); })
+
+#define _mm256_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \
+  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+                                       (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
+                                       (__v4df)_mm256_setzero_pd()); })
+
+#define _mm_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \
+  (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+                                      (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
+                                      (__v4sf)(__m128)(W)); })
+
+#define _mm_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \
+  (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+                                      (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
+                                      (__v4sf)_mm_setzero_ps()); })
+
+#define _mm256_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \
+  (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+                                      (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
+                                      (__v8sf)(__m256)(W)); })
+
+#define _mm256_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \
+  (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+                                      (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
+                                      (__v8sf)_mm256_setzero_ps()); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_rsqrt14_pd (__m128d __A)
+{
+  return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
+                 (__v2df)
+                 _mm_setzero_pd (),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_rsqrt14_pd (__m128d __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
+                 (__v2df) __W,
+                 (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_rsqrt14_pd (__mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
+                 (__v2df)
+                 _mm_setzero_pd (),
+                 (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_rsqrt14_pd (__m256d __A)
+{
+  return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
+                 (__v4df)
+                 _mm256_setzero_pd (),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_rsqrt14_pd (__m256d __W, __mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
+                 (__v4df) __W,
+                 (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_rsqrt14_pd (__mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
+                 (__v4df)
+                 _mm256_setzero_pd (),
+                 (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rsqrt14_ps (__m128 __A)
+{
+  return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
+                (__v4sf)
+                _mm_setzero_ps (),
+                (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_rsqrt14_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
+                (__v4sf) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_rsqrt14_ps (__mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
+                (__v4sf)
+                _mm_setzero_ps (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_rsqrt14_ps (__m256 __A)
+{
+  return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
+                (__v8sf)
+                _mm256_setzero_ps (),
+                (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_rsqrt14_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
+                (__v8sf) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_rsqrt14_ps (__mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
+                (__v8sf)
+                _mm256_setzero_ps (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_broadcast_f32x4 (__m128 __A)
+{
+  return (__m256) __builtin_ia32_broadcastf32x4_256_mask ((__v4sf) __A,
+                (__v8sf)_mm256_undefined_pd (),
+                (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_broadcast_f32x4 (__m256 __O, __mmask8 __M, __m128 __A)
+{
+  return (__m256) __builtin_ia32_broadcastf32x4_256_mask ((__v4sf) __A,
+                (__v8sf) __O,
+                __M);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcast_f32x4 (__mmask8 __M, __m128 __A)
+{
+  return (__m256) __builtin_ia32_broadcastf32x4_256_mask ((__v4sf) __A,
+                (__v8sf) _mm256_setzero_ps (),
+                __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcast_i32x4 (__m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti32x4_256_mask ((__v4si) __A,
+                 (__v8si)_mm256_undefined_si256 (),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_broadcast_i32x4 (__m256i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti32x4_256_mask ((__v4si) __A,
+                 (__v8si)
+                 __O, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcast_i32x4 (__mmask8 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti32x4_256_mask ((__v4si)
+                 __A,
+                 (__v8si) _mm256_setzero_si256 (),
+                 __M);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_broadcastsd_pd (__m256d __O, __mmask8 __M, __m128d __A)
+{
+  return (__m256d)__builtin_ia32_selectpd_256(__M,
+                                              (__v4df) _mm256_broadcastsd_pd(__A),
+                                              (__v4df) __O);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A)
+{
+  return (__m256d)__builtin_ia32_selectpd_256(__M,
+                                              (__v4df) _mm256_broadcastsd_pd(__A),
+                                              (__v4df) _mm256_setzero_pd());
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_broadcastss_ps (__m128 __O, __mmask8 __M, __m128 __A)
+{
+  return (__m128)__builtin_ia32_selectps_128(__M,
+                                             (__v4sf) _mm_broadcastss_ps(__A),
+                                             (__v4sf) __O);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_broadcastss_ps (__mmask8 __M, __m128 __A)
+{
+  return (__m128)__builtin_ia32_selectps_128(__M,
+                                             (__v4sf) _mm_broadcastss_ps(__A),
+                                             (__v4sf) _mm_setzero_ps());
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_broadcastss_ps (__m256 __O, __mmask8 __M, __m128 __A)
+{
+  return (__m256)__builtin_ia32_selectps_256(__M,
+                                             (__v8sf) _mm256_broadcastss_ps(__A),
+                                             (__v8sf) __O);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcastss_ps (__mmask8 __M, __m128 __A)
+{
+  return (__m256)__builtin_ia32_selectps_256(__M,
+                                             (__v8sf) _mm256_broadcastss_ps(__A),
+                                             (__v8sf) _mm256_setzero_ps());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_broadcastd_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i)__builtin_ia32_selectd_128(__M,
+                                             (__v4si) _mm_broadcastd_epi32(__A),
+                                             (__v4si) __O);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i)__builtin_ia32_selectd_128(__M,
+                                             (__v4si) _mm_broadcastd_epi32(__A),
+                                             (__v4si) _mm_setzero_si128());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_broadcastd_epi32 (__m256i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m256i)__builtin_ia32_selectd_256(__M,
+                                             (__v8si) _mm256_broadcastd_epi32(__A),
+                                             (__v8si) __O);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A)
+{
+  return (__m256i)__builtin_ia32_selectd_256(__M,
+                                             (__v8si) _mm256_broadcastd_epi32(__A),
+                                             (__v8si) _mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_broadcastq_epi64 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i)__builtin_ia32_selectq_128(__M,
+                                             (__v2di) _mm_broadcastq_epi64(__A),
+                                             (__v2di) __O);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i)__builtin_ia32_selectq_128(__M,
+                                             (__v2di) _mm_broadcastq_epi64(__A),
+                                             (__v2di) _mm_setzero_si128());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_broadcastq_epi64 (__m256i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m256i)__builtin_ia32_selectq_256(__M,
+                                             (__v4di) _mm256_broadcastq_epi64(__A),
+                                             (__v4di) __O);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
+{
+  return (__m256i)__builtin_ia32_selectq_256(__M,
+                                             (__v4di) _mm256_broadcastq_epi64(__A),
+                                             (__v4di) _mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsepi32_epi8 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
+               (__v16qi)_mm_undefined_si128(),
+               (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
+               (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtsepi32_epi8 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
+               (__v16qi) _mm_setzero_si128 (),
+               __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovsdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtsepi32_epi8 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
+               (__v16qi)_mm_undefined_si128(),
+               (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
+               (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtsepi32_epi8 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
+               (__v16qi) _mm_setzero_si128 (),
+               __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovsdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsepi32_epi16 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
+               (__v8hi)_mm_setzero_si128 (),
+               (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
+               (__v8hi)__O,
+               __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtsepi32_epi16 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
+               (__v8hi) _mm_setzero_si128 (),
+               __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovsdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtsepi32_epi16 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
+               (__v8hi)_mm_undefined_si128(),
+               (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
+               (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtsepi32_epi16 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
+               (__v8hi) _mm_setzero_si128 (),
+               __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovsdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsepi64_epi8 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
+               (__v16qi)_mm_undefined_si128(),
+               (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
+               (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtsepi64_epi8 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
+               (__v16qi) _mm_setzero_si128 (),
+               __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovsqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtsepi64_epi8 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
+               (__v16qi)_mm_undefined_si128(),
+               (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
+               (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtsepi64_epi8 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
+               (__v16qi) _mm_setzero_si128 (),
+               __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovsqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsepi64_epi32 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
+               (__v4si)_mm_undefined_si128(),
+               (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
+               (__v4si) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtsepi64_epi32 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
+               (__v4si) _mm_setzero_si128 (),
+               __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovsqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtsepi64_epi32 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
+               (__v4si)_mm_undefined_si128(),
+               (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
+               (__v4si)__O,
+               __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtsepi64_epi32 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
+               (__v4si) _mm_setzero_si128 (),
+               __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovsqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsepi64_epi16 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
+               (__v8hi)_mm_undefined_si128(),
+               (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
+               (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtsepi64_epi16 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
+               (__v8hi) _mm_setzero_si128 (),
+               __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovsqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtsepi64_epi16 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
+               (__v8hi)_mm_undefined_si128(),
+               (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
+               (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtsepi64_epi16 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
+               (__v8hi) _mm_setzero_si128 (),
+               __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovsqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtusepi32_epi8 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
+                (__v16qi)_mm_undefined_si128(),
+                (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
+                (__v16qi) __O,
+                __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtusepi32_epi8 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
+                (__v16qi) _mm_setzero_si128 (),
+                __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovusdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtusepi32_epi8 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
+                (__v16qi)_mm_undefined_si128(),
+                (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
+                (__v16qi) __O,
+                __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtusepi32_epi8 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
+                (__v16qi) _mm_setzero_si128 (),
+                __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovusdb256mem_mask ((__v16qi*) __P, (__v8si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtusepi32_epi16 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
+                (__v8hi)_mm_undefined_si128(),
+                (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
+                (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtusepi32_epi16 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
+                (__v8hi) _mm_setzero_si128 (),
+                __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovusdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtusepi32_epi16 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
+                (__v8hi) _mm_undefined_si128(),
+                (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
+                (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtusepi32_epi16 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
+                (__v8hi) _mm_setzero_si128 (),
+                __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovusdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtusepi64_epi8 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
+                (__v16qi)_mm_undefined_si128(),
+                (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
+                (__v16qi) __O,
+                __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtusepi64_epi8 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
+                (__v16qi) _mm_setzero_si128 (),
+                __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovusqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtusepi64_epi8 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
+                (__v16qi)_mm_undefined_si128(),
+                (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
+                (__v16qi) __O,
+                __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtusepi64_epi8 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
+                (__v16qi) _mm_setzero_si128 (),
+                __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovusqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtusepi64_epi32 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
+                (__v4si)_mm_undefined_si128(),
+                (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
+                (__v4si) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtusepi64_epi32 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
+                (__v4si) _mm_setzero_si128 (),
+                __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovusqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtusepi64_epi32 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
+                (__v4si)_mm_undefined_si128(),
+                (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
+                (__v4si) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtusepi64_epi32 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
+                (__v4si) _mm_setzero_si128 (),
+                __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovusqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtusepi64_epi16 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
+                (__v8hi)_mm_undefined_si128(),
+                (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
+                (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtusepi64_epi16 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
+                (__v8hi) _mm_setzero_si128 (),
+                __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovusqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtusepi64_epi16 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
+                (__v8hi)_mm_undefined_si128(),
+                (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
+                (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtusepi64_epi16 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
+                (__v8hi) _mm_setzero_si128 (),
+                __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
+{
+  return __builtin_ia32_pmovusqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi32_epi8 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
+              (__v16qi)_mm_undefined_si128(),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
+              (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi32_epi8 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
+              (__v16qi)
+              _mm_setzero_si128 (),
+              __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtepi32_epi8 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
+              (__v16qi)_mm_undefined_si128(),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
+              (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi32_epi8 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
+              (__v16qi) _mm_setzero_si128 (),
+              __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi32_epi16 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
+              (__v8hi) _mm_setzero_si128 (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
+              (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi32_epi16 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
+              (__v8hi) _mm_setzero_si128 (),
+              __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtepi32_epi16 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
+              (__v8hi)_mm_setzero_si128 (),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
+              (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi32_epi16 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
+              (__v8hi) _mm_setzero_si128 (),
+              __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_storeu_epi16 (void *  __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi64_epi8 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
+              (__v16qi) _mm_undefined_si128(),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
+              (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi64_epi8 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
+              (__v16qi) _mm_setzero_si128 (),
+              __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtepi64_epi8 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
+              (__v16qi) _mm_undefined_si128(),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
+              (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi64_epi8 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
+              (__v16qi) _mm_setzero_si128 (),
+              __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi64_epi32 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
+              (__v4si)_mm_undefined_si128(),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
+              (__v4si) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi64_epi32 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
+              (__v4si) _mm_setzero_si128 (),
+              __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtepi64_epi32 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A,
+              (__v4si) _mm_undefined_si128(),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A,
+              (__v4si) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi64_epi32 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A,
+              (__v4si) _mm_setzero_si128 (),
+              __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi64_epi16 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
+              (__v8hi) _mm_undefined_si128(),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
+              (__v8hi)__O,
+              __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi64_epi16 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
+              (__v8hi) _mm_setzero_si128 (),
+              __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtepi64_epi16 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
+              (__v8hi)_mm_undefined_si128(),
+              (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
+              (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi64_epi16 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
+              (__v8hi) _mm_setzero_si128 (),
+              __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
+}
+
+#define _mm256_extractf32x4_ps(A, imm) __extension__ ({ \
+  (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
+                                               (int)(imm), \
+                                               (__v4sf)_mm_setzero_ps(), \
+                                               (__mmask8)-1); })
+
+#define _mm256_mask_extractf32x4_ps(W, U, A, imm) __extension__ ({ \
+  (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
+                                               (int)(imm), \
+                                               (__v4sf)(__m128)(W), \
+                                               (__mmask8)(U)); })
+
+#define _mm256_maskz_extractf32x4_ps(U, A, imm) __extension__ ({ \
+  (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
+                                               (int)(imm), \
+                                               (__v4sf)_mm_setzero_ps(), \
+                                               (__mmask8)(U)); })
+
+#define _mm256_extracti32x4_epi32(A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
+                                                (int)(imm), \
+                                                (__v4si)_mm_setzero_si128(), \
+                                                (__mmask8)-1); })
+
+#define _mm256_mask_extracti32x4_epi32(W, U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
+                                                (int)(imm), \
+                                                (__v4si)(__m128i)(W), \
+                                                (__mmask8)(U)); })
+
+#define _mm256_maskz_extracti32x4_epi32(U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
+                                                (int)(imm), \
+                                                (__v4si)_mm_setzero_si128(), \
+                                                (__mmask8)(U)); })
+
+#define _mm256_insertf32x4(A, B, imm) __extension__ ({ \
+  (__m256)__builtin_ia32_insertf32x4_256_mask((__v8sf)(__m256)(A), \
+                                              (__v4sf)(__m128)(B), (int)(imm), \
+                                              (__v8sf)_mm256_setzero_ps(), \
+                                              (__mmask8)-1); })
+
+#define _mm256_mask_insertf32x4(W, U, A, B, imm) __extension__ ({ \
+  (__m256)__builtin_ia32_insertf32x4_256_mask((__v8sf)(__m256)(A), \
+                                              (__v4sf)(__m128)(B), (int)(imm), \
+                                              (__v8sf)(__m256)(W), \
+                                              (__mmask8)(U)); })
+
+#define _mm256_maskz_insertf32x4(U, A, B, imm) __extension__ ({ \
+  (__m256)__builtin_ia32_insertf32x4_256_mask((__v8sf)(__m256)(A), \
+                                              (__v4sf)(__m128)(B), (int)(imm), \
+                                              (__v8sf)_mm256_setzero_ps(), \
+                                              (__mmask8)(U)); })
+
+#define _mm256_inserti32x4(A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_inserti32x4_256_mask((__v8si)(__m256i)(A), \
+                                               (__v4si)(__m128i)(B), \
+                                               (int)(imm), \
+                                               (__v8si)_mm256_setzero_si256(), \
+                                               (__mmask8)-1); })
+
+#define _mm256_mask_inserti32x4(W, U, A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_inserti32x4_256_mask((__v8si)(__m256i)(A), \
+                                               (__v4si)(__m128i)(B), \
+                                               (int)(imm), \
+                                               (__v8si)(__m256i)(W), \
+                                               (__mmask8)(U)); })
+
+#define _mm256_maskz_inserti32x4(U, A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_inserti32x4_256_mask((__v8si)(__m256i)(A), \
+                                               (__v4si)(__m128i)(B), \
+                                               (int)(imm), \
+                                               (__v8si)_mm256_setzero_si256(), \
+                                               (__mmask8)(U)); })
+
+#define _mm_getmant_pd(A, B, C) __extension__({\
+  (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)-1); })
+
+#define _mm_mask_getmant_pd(W, U, A, B, C) __extension__({\
+  (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v2df)(__m128d)(W), \
+                                            (__mmask8)(U)); })
+
+#define _mm_maskz_getmant_pd(U, A, B, C) __extension__({\
+  (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)(U)); })
+
+#define _mm256_getmant_pd(A, B, C) __extension__ ({ \
+  (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v4df)_mm256_setzero_pd(), \
+                                            (__mmask8)-1); })
+
+#define _mm256_mask_getmant_pd(W, U, A, B, C) __extension__ ({ \
+  (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v4df)(__m256d)(W), \
+                                            (__mmask8)(U)); })
+
+#define _mm256_maskz_getmant_pd(U, A, B, C) __extension__ ({ \
+  (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v4df)_mm256_setzero_pd(), \
+                                            (__mmask8)(U)); })
+
+#define _mm_getmant_ps(A, B, C) __extension__ ({ \
+  (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
+                                           (int)(((C)<<2) | (B)), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)-1); })
+
+#define _mm_mask_getmant_ps(W, U, A, B, C) __extension__ ({ \
+  (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
+                                           (int)(((C)<<2) | (B)), \
+                                           (__v4sf)(__m128)(W), \
+                                           (__mmask8)(U)); })
+
+#define _mm_maskz_getmant_ps(U, A, B, C) __extension__ ({ \
+  (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
+                                           (int)(((C)<<2) | (B)), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)(U)); })
+
+#define _mm256_getmant_ps(A, B, C) __extension__ ({ \
+  (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
+                                           (int)(((C)<<2) | (B)), \
+                                           (__v8sf)_mm256_setzero_ps(), \
+                                           (__mmask8)-1); })
+
+#define _mm256_mask_getmant_ps(W, U, A, B, C) __extension__ ({ \
+  (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
+                                           (int)(((C)<<2) | (B)), \
+                                           (__v8sf)(__m256)(W), \
+                                           (__mmask8)(U)); })
+
+#define _mm256_maskz_getmant_ps(U, A, B, C) __extension__ ({ \
+  (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
+                                           (int)(((C)<<2) | (B)), \
+                                           (__v8sf)_mm256_setzero_ps(), \
+                                           (__mmask8)(U)); })
+
+#define _mm_mmask_i64gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \
+                                        (double const *)(addr), \
+                                        (__v2di)(__m128i)(index), \
+                                        (__mmask8)(mask), (int)(scale)); })
+
+#define _mm_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \
+                                        (long long const *)(addr), \
+                                        (__v2di)(__m128i)(index), \
+                                        (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_mmask_i64gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \
+                                        (double const *)(addr), \
+                                        (__v4di)(__m256i)(index), \
+                                        (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \
+                                        (long long const *)(addr), \
+                                        (__v4di)(__m256i)(index), \
+                                        (__mmask8)(mask), (int)(scale)); })
+
+#define _mm_mmask_i64gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \
+                                       (float const *)(addr), \
+                                       (__v2di)(__m128i)(index), \
+                                       (__mmask8)(mask), (int)(scale)); })
+
+#define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \
+                                        (int const *)(addr), \
+                                        (__v2di)(__m128i)(index), \
+                                        (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \
+                                       (float const *)(addr), \
+                                       (__v4di)(__m256i)(index), \
+                                       (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \
+                                        (int const *)(addr), \
+                                        (__v4di)(__m256i)(index), \
+                                        (__mmask8)(mask), (int)(scale)); })
+
+#define _mm_mmask_i32gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \
+                                        (double const *)(addr), \
+                                        (__v4si)(__m128i)(index), \
+                                        (__mmask8)(mask), (int)(scale)); })
+
+#define _mm_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \
+                                        (long long const *)(addr), \
+                                        (__v4si)(__m128i)(index), \
+                                        (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_mmask_i32gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \
+                                        (double const *)(addr), \
+                                        (__v4si)(__m128i)(index), \
+                                        (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \
+                                        (long long const *)(addr), \
+                                        (__v4si)(__m128i)(index), \
+                                        (__mmask8)(mask), (int)(scale)); })
+
+#define _mm_mmask_i32gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \
+                                       (float const *)(addr), \
+                                       (__v4si)(__m128i)(index), \
+                                       (__mmask8)(mask), (int)(scale)); })
+
+#define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \
+                                        (int const *)(addr), \
+                                        (__v4si)(__m128i)(index), \
+                                        (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \
+                                       (float const *)(addr), \
+                                       (__v8si)(__m256i)(index), \
+                                       (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+  (__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \
+                                        (int const *)(addr), \
+                                        (__v8si)(__m256i)(index), \
+                                        (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_permutex_pd(X, C) __extension__ ({ \
+  (__m256d)__builtin_shufflevector((__v4df)(__m256d)(X), \
+                                   (__v4df)_mm256_undefined_pd(), \
+                                   ((C) >> 0) & 0x3, ((C) >> 2) & 0x3, \
+                                   ((C) >> 4) & 0x3, ((C) >> 6) & 0x3); })
+
+#define _mm256_mask_permutex_pd(W, U, X, C) __extension__ ({ \
+  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+                                       (__v4df)_mm256_permutex_pd((X), (C)), \
+                                       (__v4df)(__m256d)(W)); })
+
+#define _mm256_maskz_permutex_pd(U, X, C) __extension__ ({ \
+  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+                                       (__v4df)_mm256_permutex_pd((X), (C)), \
+                                       (__v4df)_mm256_setzero_pd()); })
+
+#define _mm256_permutex_epi64(X, C) __extension__ ({ \
+  (__m256i)__builtin_shufflevector((__v4di)(__m256i)(X), \
+                                   (__v4di)_mm256_undefined_si256(), \
+                                   ((C) >> 0) & 0x3, ((C) >> 2) & 0x3, \
+                                   ((C) >> 4) & 0x3, ((C) >> 6) & 0x3); })
+
+#define _mm256_mask_permutex_epi64(W, U, X, C) __extension__ ({ \
+  (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+                                      (__v4di)_mm256_permutex_epi64((X), (C)), \
+                                      (__v4di)(__m256i)(W)); })
+
+#define _mm256_maskz_permutex_epi64(U, X, C) __extension__ ({ \
+  (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+                                      (__v4di)_mm256_permutex_epi64((X), (C)), \
+                                      (__v4di)_mm256_setzero_si256()); })
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_permutexvar_pd (__m256i __X, __m256d __Y)
+{
+  return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y,
+                 (__v4di) __X,
+                 (__v4df) _mm256_undefined_si256 (),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_permutexvar_pd (__m256d __W, __mmask8 __U, __m256i __X,
+          __m256d __Y)
+{
+  return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y,
+                 (__v4di) __X,
+                 (__v4df) __W,
+                 (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_permutexvar_pd (__mmask8 __U, __m256i __X, __m256d __Y)
+{
+  return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y,
+                 (__v4di) __X,
+                 (__v4df) _mm256_setzero_pd (),
+                 (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutexvar_epi64 (__mmask8 __M, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y,
+                 (__v4di) __X,
+                 (__v4di) _mm256_setzero_si256 (),
+                 (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutexvar_epi64 ( __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y,
+                 (__v4di) __X,
+                 (__v4di) _mm256_undefined_si256 (),
+                 (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutexvar_epi64 (__m256i __W, __mmask8 __M, __m256i __X,
+             __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y,
+                 (__v4di) __X,
+                 (__v4di) __W,
+                 __M);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_permutexvar_ps (__m256 __W, __mmask8 __U, __m256i __X,
+          __m256 __Y)
+{
+  return (__m256) __builtin_ia32_permvarsf256_mask ((__v8sf) __Y,
+                (__v8si) __X,
+                (__v8sf) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_permutexvar_ps (__mmask8 __U, __m256i __X, __m256 __Y)
+{
+  return (__m256) __builtin_ia32_permvarsf256_mask ((__v8sf) __Y,
+                (__v8si) __X,
+                (__v8sf) _mm256_setzero_ps (),
+                (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_permutexvar_ps (__m256i __X, __m256 __Y)
+{
+  return (__m256) __builtin_ia32_permvarsf256_mask ((__v8sf) __Y,
+                (__v8si) __X,
+                (__v8sf) _mm256_undefined_si256 (),
+                (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutexvar_epi32 (__mmask8 __M, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y,
+                 (__v8si) __X,
+                 (__v8si) _mm256_setzero_si256 (),
+                 __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutexvar_epi32 (__m256i __W, __mmask8 __M, __m256i __X,
+             __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y,
+                 (__v8si) __X,
+                 (__v8si) __W,
+                 (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutexvar_epi32 (__m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y,
+                 (__v8si) __X,
+                 (__v8si) _mm256_undefined_si256(),
+                 (__mmask8) -1);
+}
+
+#define _mm_alignr_epi32(A, B, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_alignd128_mask((__v4si)(__m128i)(A), \
+                                         (__v4si)(__m128i)(B), (int)(imm), \
+                                         (__v4si)_mm_undefined_si128(), \
+                                         (__mmask8)-1); })
+
+#define _mm_mask_alignr_epi32(W, U, A, B, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_alignd128_mask((__v4si)(__m128i)(A), \
+                                         (__v4si)(__m128i)(B), (int)(imm), \
+                                         (__v4si)(__m128i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm_maskz_alignr_epi32(U, A, B, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_alignd128_mask((__v4si)(__m128i)(A), \
+                                         (__v4si)(__m128i)(B), (int)(imm), \
+                                         (__v4si)_mm_setzero_si128(), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_alignr_epi32(A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_alignd256_mask((__v8si)(__m256i)(A), \
+                                         (__v8si)(__m256i)(B), (int)(imm), \
+                                         (__v8si)_mm256_undefined_si256(), \
+                                         (__mmask8)-1); })
+
+#define _mm256_mask_alignr_epi32(W, U, A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_alignd256_mask((__v8si)(__m256i)(A), \
+                                         (__v8si)(__m256i)(B), (int)(imm), \
+                                         (__v8si)(__m256i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_maskz_alignr_epi32(U, A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_alignd256_mask((__v8si)(__m256i)(A), \
+                                         (__v8si)(__m256i)(B), (int)(imm), \
+                                         (__v8si)_mm256_setzero_si256(), \
+                                         (__mmask8)(U)); })
+
+#define _mm_alignr_epi64(A, B, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_alignq128_mask((__v2di)(__m128i)(A), \
+                                         (__v2di)(__m128i)(B), (int)(imm), \
+                                         (__v2di)_mm_setzero_di(), \
+                                         (__mmask8)-1); })
+
+#define _mm_mask_alignr_epi64(W, U, A, B, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_alignq128_mask((__v2di)(__m128i)(A), \
+                                         (__v2di)(__m128i)(B), (int)(imm), \
+                                         (__v2di)(__m128i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm_maskz_alignr_epi64(U, A, B, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_alignq128_mask((__v2di)(__m128i)(A), \
+                                         (__v2di)(__m128i)(B), (int)(imm), \
+                                         (__v2di)_mm_setzero_di(), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_alignr_epi64(A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_alignq256_mask((__v4di)(__m256i)(A), \
+                                         (__v4di)(__m256i)(B), (int)(imm), \
+                                         (__v4di)_mm256_undefined_pd(), \
+                                         (__mmask8)-1); })
+
+#define _mm256_mask_alignr_epi64(W, U, A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_alignq256_mask((__v4di)(__m256i)(A), \
+                                         (__v4di)(__m256i)(B), (int)(imm), \
+                                         (__v4di)(__m256i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_maskz_alignr_epi64(U, A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_alignq256_mask((__v4di)(__m256i)(A), \
+                                         (__v4di)(__m256i)(B), (int)(imm), \
+                                         (__v4di)_mm256_setzero_si256(), \
+                                         (__mmask8)(U)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_movehdup_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+                                             (__v4sf)_mm_movehdup_ps(__A),
+                                             (__v4sf)__W);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_movehdup_ps (__mmask8 __U, __m128 __A)
+{
+  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+                                             (__v4sf)_mm_movehdup_ps(__A),
+                                             (__v4sf)_mm_setzero_ps());
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_movehdup_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+                                             (__v8sf)_mm256_movehdup_ps(__A),
+                                             (__v8sf)__W);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_movehdup_ps (__mmask8 __U, __m256 __A)
+{
+  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+                                             (__v8sf)_mm256_movehdup_ps(__A),
+                                             (__v8sf)_mm256_setzero_ps());
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_moveldup_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+                                             (__v4sf)_mm_moveldup_ps(__A),
+                                             (__v4sf)__W);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_moveldup_ps (__mmask8 __U, __m128 __A)
+{
+  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+                                             (__v4sf)_mm_moveldup_ps(__A),
+                                             (__v4sf)_mm_setzero_ps());
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_moveldup_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+                                             (__v8sf)_mm256_moveldup_ps(__A),
+                                             (__v8sf)__W);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_moveldup_ps (__mmask8 __U, __m256 __A)
+{
+  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+                                             (__v8sf)_mm256_moveldup_ps(__A),
+                                             (__v8sf)_mm256_setzero_ps());
+}
+
+#define _mm256_mask_shuffle_epi32(W, U, A, I) __extension__({\
+  (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+                                      (__v8si)_mm256_shuffle_epi32((A), (I)), \
+                                      (__v8si)(__m256i)(W)); })
+
+#define _mm256_maskz_shuffle_epi32(U, A, I) __extension__({\
+  (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+                                      (__v8si)_mm256_shuffle_epi32((A), (I)), \
+                                      (__v8si)_mm256_setzero_si256()); })
+
+#define _mm_mask_shuffle_epi32(W, U, A, I) __extension__({\
+  (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+                                      (__v4si)_mm_shuffle_epi32((A), (I)), \
+                                      (__v4si)(__m128i)(W)); })
+
+#define _mm_maskz_shuffle_epi32(U, A, I) __extension__({\
+  (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+                                      (__v4si)_mm_shuffle_epi32((A), (I)), \
+                                      (__v4si)_mm_setzero_si128()); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_mov_pd (__m128d __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
+              (__v2df) __A,
+              (__v2df) __W);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_mov_pd (__mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
+              (__v2df) __A,
+              (__v2df) _mm_setzero_pd ());
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_mov_pd (__m256d __W, __mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
+              (__v4df) __A,
+              (__v4df) __W);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_mov_pd (__mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
+              (__v4df) __A,
+              (__v4df) _mm256_setzero_pd ());
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_mov_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
+             (__v4sf) __A,
+             (__v4sf) __W);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_mov_ps (__mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
+             (__v4sf) __A,
+             (__v4sf) _mm_setzero_ps ());
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_mov_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
+             (__v8sf) __A,
+             (__v8sf) __W);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_mov_ps (__mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
+             (__v8sf) __A,
+             (__v8sf) _mm256_setzero_ps ());
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtph_ps (__m128 __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
+             (__v4sf) __W,
+             (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
+{
+  return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
+             (__v4sf)
+             _mm_setzero_ps (),
+             (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtph_ps (__m256 __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
+                (__v8sf) __W,
+                (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
+{
+  return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
+                (__v8sf)
+                _mm256_setzero_ps (),
+                (__mmask8) __U);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_ph (__m128i __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf) __A, _MM_FROUND_CUR_DIRECTION,
+                                                  (__v8hi) __W,
+                                                  (__mmask8) __U);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_ph (__mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf) __A, _MM_FROUND_CUR_DIRECTION,
+                                                  (__v8hi) _mm_setzero_si128 (),
+                                                  (__mmask8) __U);
+}
+
+#define _mm_mask_cvt_roundps_ph(W, U, A, I) __extension__ ({ \
+  (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
+                                         (__v8hi)(__m128i)(W), \
+                                         (__mmask8)(U)); })
+
+#define _mm_maskz_cvt_roundps_ph(U, A, I) __extension__ ({ \
+  (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
+                                         (__v8hi)_mm_setzero_si128(), \
+                                         (__mmask8)(U)); })
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_ph (__m128i __W, __mmask8 __U, __m256 __A)
+{
+  return (__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf) __A, _MM_FROUND_CUR_DIRECTION,
+                                                      (__v8hi) __W,
+                                                      (__mmask8) __U);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_ph ( __mmask8 __U, __m256 __A)
+{
+  return (__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf) __A, _MM_FROUND_CUR_DIRECTION,
+                                                      (__v8hi) _mm_setzero_si128(),
+                                                      (__mmask8) __U);
+}
+#define _mm256_mask_cvt_roundps_ph(W, U, A, I) __extension__ ({ \
+  (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
+                                            (__v8hi)(__m128i)(W), \
+                                            (__mmask8)(U)); })
+
+#define _mm256_maskz_cvt_roundps_ph(U, A, I) __extension__ ({ \
+  (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
+                                            (__v8hi)_mm_setzero_si128(), \
+                                            (__mmask8)(U)); })
+
+
 #undef __DEFAULT_FN_ATTRS
-#undef __DEFAULT_FN_ATTRS_BOTH
 
 #endif /* __AVX512VLINTRIN_H */
diff --git a/renderscript/clang-include/avxintrin.h b/renderscript/clang-include/avxintrin.h
index 6d1ca54..86bfdfb 100644
--- a/renderscript/clang-include/avxintrin.h
+++ b/renderscript/clang-include/avxintrin.h
@@ -35,6 +35,12 @@
 typedef short __v16hi __attribute__ ((__vector_size__ (32)));
 typedef char __v32qi __attribute__ ((__vector_size__ (32)));
 
+/* Unsigned types */
+typedef unsigned long long __v4du __attribute__ ((__vector_size__ (32)));
+typedef unsigned int __v8su __attribute__ ((__vector_size__ (32)));
+typedef unsigned short __v16hu __attribute__ ((__vector_size__ (32)));
+typedef unsigned char __v32qu __attribute__ ((__vector_size__ (32)));
+
 /* We need an explicitly signed variant for char. Note that this shouldn't
  * appear in the interface though. */
 typedef signed char __v32qs __attribute__((__vector_size__(32)));
@@ -47,193 +53,703 @@
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx")))
 
 /* Arithmetic */
+/// \brief Adds two 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VADDPD / ADDPD instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double] containing one of the source operands.
+/// \param __b
+///    A 256-bit vector of [4 x double] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x double] containing the sums of both
+///    operands.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_add_pd(__m256d __a, __m256d __b)
 {
-  return __a+__b;
+  return (__m256d)((__v4df)__a+(__v4df)__b);
 }
 
+/// \brief Adds two 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VADDPS / ADDPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float] containing one of the source operands.
+/// \param __b
+///    A 256-bit vector of [8 x float] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x float] containing the sums of both
+///    operands.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_add_ps(__m256 __a, __m256 __b)
 {
-  return __a+__b;
+  return (__m256)((__v8sf)__a+(__v8sf)__b);
 }
 
+/// \brief Subtracts two 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSUBPD / SUBPD instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double] containing the minuend.
+/// \param __b
+///    A 256-bit vector of [4 x double] containing the subtrahend.
+/// \returns A 256-bit vector of [4 x double] containing the differences between
+///    both operands.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_sub_pd(__m256d __a, __m256d __b)
 {
-  return __a-__b;
+  return (__m256d)((__v4df)__a-(__v4df)__b);
 }
 
+/// \brief Subtracts two 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSUBPS / SUBPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float] containing the minuend.
+/// \param __b
+///    A 256-bit vector of [8 x float] containing the subtrahend.
+/// \returns A 256-bit vector of [8 x float] containing the differences between
+///    both operands.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_sub_ps(__m256 __a, __m256 __b)
 {
-  return __a-__b;
+  return (__m256)((__v8sf)__a-(__v8sf)__b);
 }
 
+/// \brief Adds the even-indexed values and subtracts the odd-indexed values of
+///    two 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VADDSUBPD / ADDSUBPD instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double] containing the left source operand.
+/// \param __b
+///    A 256-bit vector of [4 x double] containing the right source operand.
+/// \returns A 256-bit vector of [4 x double] containing the alternating sums
+///    and differences between both operands.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_addsub_pd(__m256d __a, __m256d __b)
 {
   return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b);
 }
 
+/// \brief Adds the even-indexed values and subtracts the odd-indexed values of
+///    two 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VADDSUBPS / ADDSUBPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float] containing the left source operand.
+/// \param __b
+///    A 256-bit vector of [8 x float] containing the right source operand.
+/// \returns A 256-bit vector of [8 x float] containing the alternating sums and
+///    differences between both operands.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_addsub_ps(__m256 __a, __m256 __b)
 {
   return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b);
 }
 
+/// \brief Divides two 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VDIVPD / DIVPD instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double] containing the dividend.
+/// \param __b
+///    A 256-bit vector of [4 x double] containing the divisor.
+/// \returns A 256-bit vector of [4 x double] containing the quotients of both
+///    operands.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_div_pd(__m256d __a, __m256d __b)
 {
-  return __a / __b;
+  return (__m256d)((__v4df)__a/(__v4df)__b);
 }
 
+/// \brief Divides two 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VDIVPS / DIVPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float] containing the dividend.
+/// \param __b
+///    A 256-bit vector of [8 x float] containing the divisor.
+/// \returns A 256-bit vector of [8 x float] containing the quotients of both
+///    operands.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_div_ps(__m256 __a, __m256 __b)
 {
-  return __a / __b;
+  return (__m256)((__v8sf)__a/(__v8sf)__b);
 }
 
+/// \brief Compares two 256-bit vectors of [4 x double] and returns the greater
+///    of each pair of values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMAXPD / MAXPD instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double] containing one of the operands.
+/// \param __b
+///    A 256-bit vector of [4 x double] containing one of the operands.
+/// \returns A 256-bit vector of [4 x double] containing the maximum values
+///    between both operands.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_max_pd(__m256d __a, __m256d __b)
 {
   return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b);
 }
 
+/// \brief Compares two 256-bit vectors of [8 x float] and returns the greater
+///    of each pair of values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMAXPS / MAXPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float] containing one of the operands.
+/// \param __b
+///    A 256-bit vector of [8 x float] containing one of the operands.
+/// \returns A 256-bit vector of [8 x float] containing the maximum values
+///    between both operands.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_max_ps(__m256 __a, __m256 __b)
 {
   return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b);
 }
 
+/// \brief Compares two 256-bit vectors of [4 x double] and returns the lesser
+///    of each pair of values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMINPD / MINPD instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double] containing one of the operands.
+/// \param __b
+///    A 256-bit vector of [4 x double] containing one of the operands.
+/// \returns A 256-bit vector of [4 x double] containing the minimum values
+///    between both operands.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_min_pd(__m256d __a, __m256d __b)
 {
   return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b);
 }
 
+/// \brief Compares two 256-bit vectors of [8 x float] and returns the lesser
+///    of each pair of values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMINPS / MINPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float] containing one of the operands.
+/// \param __b
+///    A 256-bit vector of [8 x float] containing one of the operands.
+/// \returns A 256-bit vector of [8 x float] containing the minimum values
+///    between both operands.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_min_ps(__m256 __a, __m256 __b)
 {
   return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b);
 }
 
+/// \brief Multiplies two 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMULPD / MULPD instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double] containing one of the operands.
+/// \param __b
+///    A 256-bit vector of [4 x double] containing one of the operands.
+/// \returns A 256-bit vector of [4 x double] containing the products of both
+///    operands.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_mul_pd(__m256d __a, __m256d __b)
 {
-  return __a * __b;
+  return (__m256d)((__v4df)__a * (__v4df)__b);
 }
 
+/// \brief Multiplies two 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMULPS / MULPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float] containing one of the operands.
+/// \param __b
+///    A 256-bit vector of [8 x float] containing one of the operands.
+/// \returns A 256-bit vector of [8 x float] containing the products of both
+///    operands.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_mul_ps(__m256 __a, __m256 __b)
 {
-  return __a * __b;
+  return (__m256)((__v8sf)__a * (__v8sf)__b);
 }
 
+/// \brief Calculates the square roots of the values in a 256-bit vector of
+///    [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSQRTPD / SQRTPD instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double].
+/// \returns A 256-bit vector of [4 x double] containing the square roots of the
+///    values in the operand.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_sqrt_pd(__m256d __a)
 {
   return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a);
 }
 
+/// \brief Calculates the square roots of the values in a 256-bit vector of
+///    [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSQRTPS / SQRTPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float].
+/// \returns A 256-bit vector of [8 x float] containing the square roots of the
+///    values in the operand.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_sqrt_ps(__m256 __a)
 {
   return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a);
 }
 
+/// \brief Calculates the reciprocal square roots of the values in a 256-bit
+///    vector of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VRSQRTPS / RSQRTPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float].
+/// \returns A 256-bit vector of [8 x float] containing the reciprocal square
+///    roots of the values in the operand.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_rsqrt_ps(__m256 __a)
 {
   return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a);
 }
 
+/// \brief Calculates the reciprocals of the values in a 256-bit vector of
+///    [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VRCPPS / RCPPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float].
+/// \returns A 256-bit vector of [8 x float] containing the reciprocals of the
+///    values in the operand.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_rcp_ps(__m256 __a)
 {
   return (__m256)__builtin_ia32_rcpps256((__v8sf)__a);
 }
 
+/// \brief Rounds the values in a 256-bit vector of [4 x double] as specified
+///    by the byte operand. The source values are rounded to integer values and
+///    returned as 64-bit double-precision floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_round_pd(__m256d V, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VROUNDPD / ROUNDPD instruction.
+///
+/// \param V
+///    A 256-bit vector of [4 x double].
+/// \param M
+///    An integer value that specifies the rounding operation.
+///    Bits [7:4] are reserved.
+///    Bit [3] is a precision exception value:
+///    0: A normal PE exception is used.
+///    1: The PE field is not updated.
+///    Bit [2] is the rounding control source:
+///    0: Use bits [1:0] of M.
+///    1: Use the current MXCSR setting.
+///    Bits [1:0] contain the rounding control definition:
+///    00: Nearest.
+///    01: Downward (toward negative infinity).
+///    10: Upward (toward positive infinity).
+///    11: Truncated.
+/// \returns A 256-bit vector of [4 x double] containing the rounded values.
 #define _mm256_round_pd(V, M) __extension__ ({ \
     (__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)); })
 
+/// \brief Rounds the values stored in a 256-bit vector of [8 x float] as
+///    specified by the byte operand. The source values are rounded to integer
+///    values and returned as floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_round_ps(__m256 V, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VROUNDPS / ROUNDPS instruction.
+///
+/// \param V
+///    A 256-bit vector of [8 x float].
+/// \param M
+///    An integer value that specifies the rounding operation.
+///    Bits [7:4] are reserved.
+///    Bit [3] is a precision exception value:
+///    0: A normal PE exception is used.
+///    1: The PE field is not updated.
+///    Bit [2] is the rounding control source:
+///    0: Use bits [1:0] of M.
+///    1: Use the current MXCSR setting.
+///    Bits [1:0] contain the rounding control definition:
+///    00: Nearest.
+///    01: Downward (toward negative infinity).
+///    10: Upward (toward positive infinity).
+///    11: Truncated.
+/// \returns A 256-bit vector of [8 x float] containing the rounded values.
 #define _mm256_round_ps(V, M) __extension__ ({ \
   (__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)); })
 
+/// \brief Rounds up the values stored in a 256-bit vector of [4 x double]. The
+///    source values are rounded up to integer values and returned as 64-bit
+///    double-precision floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_ceil_pd(__m256d V);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VROUNDPD / ROUNDPD instruction.
+///
+/// \param V
+///    A 256-bit vector of [4 x double].
+/// \returns A 256-bit vector of [4 x double] containing the rounded up values.
 #define _mm256_ceil_pd(V)  _mm256_round_pd((V), _MM_FROUND_CEIL)
+
+/// \brief Rounds down the values stored in a 256-bit vector of [4 x double].
+///    The source values are rounded down to integer values and returned as
+///    64-bit double-precision floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_floor_pd(__m256d V);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VROUNDPD / ROUNDPD instruction.
+///
+/// \param V
+///    A 256-bit vector of [4 x double].
+/// \returns A 256-bit vector of [4 x double] containing the rounded down
+///    values.
 #define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
+
+/// \brief Rounds up the values stored in a 256-bit vector of [8 x float]. The
+///    source values are rounded up to integer values and returned as
+///    floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_ceil_ps(__m256 V);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VROUNDPS / ROUNDPS instruction.
+///
+/// \param V
+///    A 256-bit vector of [8 x float].
+/// \returns A 256-bit vector of [8 x float] containing the rounded up values.
 #define _mm256_ceil_ps(V)  _mm256_round_ps((V), _MM_FROUND_CEIL)
+
+/// \brief Rounds down the values stored in a 256-bit vector of [8 x float]. The
+///    source values are rounded down to integer values and returned as
+///    floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_floor_ps(__m256 V);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VROUNDPS / ROUNDPS instruction.
+///
+/// \param V
+///    A 256-bit vector of [8 x float].
+/// \returns A 256-bit vector of [8 x float] containing the rounded down values.
 #define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR)
 
 /* Logical */
+/// \brief Performs a bitwise AND of two 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VANDPD / ANDPD instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double] containing one of the source operands.
+/// \param __b
+///    A 256-bit vector of [4 x double] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the
+///    values between both operands.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_and_pd(__m256d __a, __m256d __b)
 {
-  return (__m256d)((__v4di)__a & (__v4di)__b);
+  return (__m256d)((__v4du)__a & (__v4du)__b);
 }
 
+/// \brief Performs a bitwise AND of two 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VANDPS / ANDPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float] containing one of the source operands.
+/// \param __b
+///    A 256-bit vector of [8 x float] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the
+///    values between both operands.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_and_ps(__m256 __a, __m256 __b)
 {
-  return (__m256)((__v8si)__a & (__v8si)__b);
+  return (__m256)((__v8su)__a & (__v8su)__b);
 }
 
+/// \brief Performs a bitwise AND of two 256-bit vectors of [4 x double], using
+///    the one's complement of the values contained in the first source operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VANDNPD / ANDNPD instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double] containing the left source operand. The
+///    one's complement of this value is used in the bitwise AND.
+/// \param __b
+///    A 256-bit vector of [4 x double] containing the right source operand.
+/// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the
+///    values of the second operand and the one's complement of the first
+///    operand.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_andnot_pd(__m256d __a, __m256d __b)
 {
-  return (__m256d)(~(__v4di)__a & (__v4di)__b);
+  return (__m256d)(~(__v4du)__a & (__v4du)__b);
 }
 
+/// \brief Performs a bitwise AND of two 256-bit vectors of [8 x float], using
+///    the one's complement of the values contained in the first source operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VANDNPS / ANDNPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float] containing the left source operand. The
+///    one's complement of this value is used in the bitwise AND.
+/// \param __b
+///    A 256-bit vector of [8 x float] containing the right source operand.
+/// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the
+///    values of the second operand and the one's complement of the first
+///    operand.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_andnot_ps(__m256 __a, __m256 __b)
 {
-  return (__m256)(~(__v8si)__a & (__v8si)__b);
+  return (__m256)(~(__v8su)__a & (__v8su)__b);
 }
 
+/// \brief Performs a bitwise OR of two 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VORPD / ORPD instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double] containing one of the source operands.
+/// \param __b
+///    A 256-bit vector of [4 x double] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x double] containing the bitwise OR of the
+///    values between both operands.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_or_pd(__m256d __a, __m256d __b)
 {
-  return (__m256d)((__v4di)__a | (__v4di)__b);
+  return (__m256d)((__v4du)__a | (__v4du)__b);
 }
 
+/// \brief Performs a bitwise OR of two 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VORPS / ORPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float] containing one of the source operands.
+/// \param __b
+///    A 256-bit vector of [8 x float] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x float] containing the bitwise OR of the
+///    values between both operands.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_or_ps(__m256 __a, __m256 __b)
 {
-  return (__m256)((__v8si)__a | (__v8si)__b);
+  return (__m256)((__v8su)__a | (__v8su)__b);
 }
 
+/// \brief Performs a bitwise XOR of two 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VXORPD / XORPD instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double] containing one of the source operands.
+/// \param __b
+///    A 256-bit vector of [4 x double] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x double] containing the bitwise XOR of the
+///    values between both operands.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_xor_pd(__m256d __a, __m256d __b)
 {
-  return (__m256d)((__v4di)__a ^ (__v4di)__b);
+  return (__m256d)((__v4du)__a ^ (__v4du)__b);
 }
 
+/// \brief Performs a bitwise XOR of two 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VXORPS / XORPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float] containing one of the source operands.
+/// \param __b
+///    A 256-bit vector of [8 x float] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x float] containing the bitwise XOR of the
+///    values between both operands.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_xor_ps(__m256 __a, __m256 __b)
 {
-  return (__m256)((__v8si)__a ^ (__v8si)__b);
+  return (__m256)((__v8su)__a ^ (__v8su)__b);
 }
 
 /* Horizontal arithmetic */
+/// \brief Horizontally adds the adjacent pairs of values contained in two
+///    256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VHADDPD / HADDPD instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double] containing one of the source operands.
+///    The horizontal sums of the values are returned in the even-indexed
+///    elements of a vector of [4 x double].
+/// \param __b
+///    A 256-bit vector of [4 x double] containing one of the source operands.
+///    The horizontal sums of the values are returned in the odd-indexed
+///    elements of a vector of [4 x double].
+/// \returns A 256-bit vector of [4 x double] containing the horizontal sums of
+///    both operands.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_hadd_pd(__m256d __a, __m256d __b)
 {
   return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b);
 }
 
+/// \brief Horizontally adds the adjacent pairs of values contained in two
+///    256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VHADDPS / HADDPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float] containing one of the source operands.
+///    The horizontal sums of the values are returned in the elements with
+///    index 0, 1, 4, 5 of a vector of [8 x float].
+/// \param __b
+///    A 256-bit vector of [8 x float] containing one of the source operands.
+///    The horizontal sums of the values are returned in the elements with
+///    index 2, 3, 6, 7 of a vector of [8 x float].
+/// \returns A 256-bit vector of [8 x float] containing the horizontal sums of
+///    both operands.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_hadd_ps(__m256 __a, __m256 __b)
 {
   return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b);
 }
 
+/// \brief Horizontally subtracts the adjacent pairs of values contained in two
+///    256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VHSUBPD / HSUBPD instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double] containing one of the source operands.
+///    The horizontal differences between the values are returned in the
+///    even-indexed elements of a vector of [4 x double].
+/// \param __b
+///    A 256-bit vector of [4 x double] containing one of the source operands.
+///    The horizontal differences between the values are returned in the
+///    odd-indexed elements of a vector of [4 x double].
+/// \returns A 256-bit vector of [4 x double] containing the horizontal
+///    differences of both operands.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_hsub_pd(__m256d __a, __m256d __b)
 {
   return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b);
 }
 
+/// \brief Horizontally subtracts the adjacent pairs of values contained in two
+///    256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VHSUBPS / HSUBPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float] containing one of the source operands.
+///    The horizontal differences between the values are returned in the
+///    elements with index 0, 1, 4, 5 of a vector of [8 x float].
+/// \param __b
+///    A 256-bit vector of [8 x float] containing one of the source operands.
+///    The horizontal differences between the values are returned in the
+///    elements with index 2, 3, 6, 7 of a vector of [8 x float].
+/// \returns A 256-bit vector of [8 x float] containing the horizontal
+///    differences of both operands.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_hsub_ps(__m256 __a, __m256 __b)
 {
@@ -241,71 +757,600 @@
 }
 
 /* Vector permutations */
+/// \brief Copies the values in a 128-bit vector of [2 x double] as specified
+///    by the 128-bit integer vector operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPERMILPD / PERMILPD instruction.
+///
+/// \param __a
+///    A 128-bit vector of [2 x double].
+/// \param __c
+///    A 128-bit integer vector operand specifying how the values are to be
+///    copied.
+///    Bit [1]:
+///    0: Bits [63:0] of the source are copied to bits [63:0] of the
+///    returned vector.
+///    1: Bits [127:64] of the source are copied to bits [63:0] of the
+///    returned vector.
+///    Bit [65]:
+///    0: Bits [63:0] of the source are copied to bits [127:64] of the
+///    returned vector.
+///    1: Bits [127:64] of the source are copied to bits [127:64] of the
+///    returned vector.
+/// \returns A 128-bit vector of [2 x double] containing the copied values.
 static __inline __m128d __DEFAULT_FN_ATTRS
 _mm_permutevar_pd(__m128d __a, __m128i __c)
 {
   return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c);
 }
 
+/// \brief Copies the values in a 256-bit vector of [4 x double] as
+///    specified by the 256-bit integer vector operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPERMILPD / PERMILPD instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double].
+/// \param __c
+///    A 256-bit integer vector operand specifying how the values are to be
+///    copied.
+///    Bit [1]:
+///    0: Bits [63:0] of the source are copied to bits [63:0] of the
+///    returned vector.
+///    1: Bits [127:64] of the source are copied to bits [63:0] of the
+///    returned vector.
+///    Bit [65]:
+///    0: Bits [63:0] of the source are copied to bits [127:64] of the
+///    returned vector.
+///    1: Bits [127:64] of the source are copied to bits [127:64] of the
+///    returned vector.
+///    Bit [129]:
+///    0: Bits [191:128] of the source are copied to bits [191:128] of the
+///    returned vector.
+///    1: Bits [255:192] of the source are copied to bits [191:128] of the
+///    returned vector.
+///    Bit [193]:
+///    0: Bits [191:128] of the source are copied to bits [255:192] of the
+///    returned vector.
+///    1: Bits [255:192] of the source are copied to bits [255:192] of the
+///    returned vector.
+/// \returns A 256-bit vector of [4 x double] containing the copied values.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_permutevar_pd(__m256d __a, __m256i __c)
 {
   return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c);
 }
 
+/// \brief Copies the values stored in a 128-bit vector of [4 x float] as
+///    specified by the 128-bit integer vector operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __c
+///    A 128-bit integer vector operand specifying how the values are to be
+///    copied.
+///    Bits [1:0]:
+///    00: Bits [31:0] of the source are copied to bits [31:0] of the
+///    returned vector.
+///    01: Bits [63:32] of the source are copied to bits [31:0] of the
+///    returned vector.
+///    10: Bits [95:64] of the source are copied to bits [31:0] of the
+///    returned vector.
+///    11: Bits [127:96] of the source are copied to bits [31:0] of the
+///    returned vector.
+///    Bits [33:32]:
+///    00: Bits [31:0] of the source are copied to bits [63:32] of the
+///    returned vector.
+///    01: Bits [63:32] of the source are copied to bits [63:32] of the
+///    returned vector.
+///    10: Bits [95:64] of the source are copied to bits [63:32] of the
+///    returned vector.
+///    11: Bits [127:96] of the source are copied to bits [63:32] of the
+///    returned vector.
+///    Bits [65:64]:
+///    00: Bits [31:0] of the source are copied to bits [95:64] of the
+///    returned vector.
+///    01: Bits [63:32] of the source are copied to bits [95:64] of the
+///    returned vector.
+///    10: Bits [95:64] of the source are copied to bits [95:64] of the
+///    returned vector.
+///    11: Bits [127:96] of the source are copied to bits [95:64] of the
+///    returned vector.
+///    Bits [97:96]:
+///    00: Bits [31:0] of the source are copied to bits [127:96] of the
+///    returned vector.
+///    01: Bits [63:32] of the source are copied to bits [127:96] of the
+///    returned vector.
+///    10: Bits [95:64] of the source are copied to bits [127:96] of the
+///    returned vector.
+///    11: Bits [127:96] of the source are copied to bits [127:96] of the
+///    returned vector.
+/// \returns A 128-bit vector of [4 x float] containing the copied values.
 static __inline __m128 __DEFAULT_FN_ATTRS
 _mm_permutevar_ps(__m128 __a, __m128i __c)
 {
   return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c);
 }
 
+/// \brief Copies the values stored in a 256-bit vector of [8 x float] as
+///    specified by the 256-bit integer vector operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float].
+/// \param __c
+///    A 256-bit integer vector operand specifying how the values are to be
+///    copied.
+///    Bits [1:0]:
+///    00: Bits [31:0] of the source are copied to bits [31:0] of the
+///    returned vector.
+///    01: Bits [63:32] of the source are copied to bits [31:0] of the
+///    returned vector.
+///    10: Bits [95:64] of the source are copied to bits [31:0] of the
+///    returned vector.
+///    11: Bits [127:96] of the source are copied to bits [31:0] of the
+///    returned vector.
+///    Bits [33:32]:
+///    00: Bits [31:0] of the source are copied to bits [63:32] of the
+///    returned vector.
+///    01: Bits [63:32] of the source are copied to bits [63:32] of the
+///    returned vector.
+///    10: Bits [95:64] of the source are copied to bits [63:32] of the
+///    returned vector.
+///    11: Bits [127:96] of the source are copied to bits [63:32] of the
+///    returned vector.
+///    Bits [65:64]:
+///    00: Bits [31:0] of the source are copied to bits [95:64] of the
+///    returned vector.
+///    01: Bits [63:32] of the source are copied to bits [95:64] of the
+///    returned vector.
+///    10: Bits [95:64] of the source are copied to bits [95:64] of the
+///    returned vector.
+///    11: Bits [127:96] of the source are copied to bits [95:64] of the
+///    returned vector.
+///    Bits [97:96]:
+///    00: Bits [31:0] of the source are copied to bits [127:96] of the
+///    returned vector.
+///    01: Bits [63:32] of the source are copied to bits [127:96] of the
+///    returned vector.
+///    10: Bits [95:64] of the source are copied to bits [127:96] of the
+///    returned vector.
+///    11: Bits [127:96] of the source are copied to bits [127:96] of the
+///    returned vector.
+///    Bits [129:128]:
+///    00: Bits [159:128] of the source are copied to bits [159:128] of the
+///    returned vector.
+///    01: Bits [191:160] of the source are copied to bits [159:128] of the
+///    returned vector.
+///    10: Bits [223:192] of the source are copied to bits [159:128] of the
+///    returned vector.
+///    11: Bits [255:224] of the source are copied to bits [159:128] of the
+///    returned vector.
+///    Bits [161:160]:
+///    00: Bits [159:128] of the source are copied to bits [191:160] of the
+///    returned vector.
+///    01: Bits [191:160] of the source are copied to bits [191:160] of the
+///    returned vector.
+///    10: Bits [223:192] of the source are copied to bits [191:160] of the
+///    returned vector.
+///    11: Bits [255:224] of the source are copied to bits [191:160] of the
+///    returned vector.
+///    Bits [193:192]:
+///    00: Bits [159:128] of the source are copied to bits [223:192] of the
+///    returned vector.
+///    01: Bits [191:160] of the source are copied to bits [223:192] of the
+///    returned vector.
+///    10: Bits [223:192] of the source are copied to bits [223:192] of the
+///    returned vector.
+///    11: Bits [255:224] of the source are copied to bits [223:192] of the
+///    returned vector.
+///    Bits [225:224]:
+///    00: Bits [159:128] of the source are copied to bits [255:224] of the
+///    returned vector.
+///    01: Bits [191:160] of the source are copied to bits [255:224] of the
+///    returned vector.
+///    10: Bits [223:192] of the source are copied to bits [255:224] of the
+///    returned vector.
+///    11: Bits [255:224] of the source are copied to bits [255:224] of the
+///    returned vector.
+/// \returns A 256-bit vector of [8 x float] containing the copied values.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_permutevar_ps(__m256 __a, __m256i __c)
 {
   return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c);
 }
 
+/// \brief Copies the values in a 128-bit vector of [2 x double] as
+///    specified by the immediate integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128d _mm_permute_pd(__m128d A, const int C);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERMILPD / PERMILPD instruction.
+///
+/// \param A
+///    A 128-bit vector of [2 x double].
+/// \param C
+///    An immediate integer operand specifying how the values are to be copied.
+///    Bit [0]:
+///    0: Bits [63:0] of the source are copied to bits [63:0] of the
+///    returned vector.
+///    1: Bits [127:64] of the source are copied to bits [63:0] of the
+///    returned vector.
+///    Bit [1]:
+///    0: Bits [63:0] of the source are copied to bits [127:64] of the
+///    returned vector.
+///    1: Bits [127:64] of the source are copied to bits [127:64] of the
+///    returned vector.
+/// \returns A 128-bit vector of [2 x double] containing the copied values.
 #define _mm_permute_pd(A, C) __extension__ ({ \
   (__m128d)__builtin_shufflevector((__v2df)(__m128d)(A), \
-                                   (__v2df)_mm_setzero_pd(), \
-                                   (C) & 0x1, ((C) & 0x2) >> 1); })
+                                   (__v2df)_mm_undefined_pd(), \
+                                   ((C) >> 0) & 0x1, ((C) >> 1) & 0x1); })
 
+/// \brief Copies the values in a 256-bit vector of [4 x double] as
+///    specified by the immediate integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_permute_pd(__m256d A, const int C);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERMILPD / PERMILPD instruction.
+///
+/// \param A
+///    A 256-bit vector of [4 x double].
+/// \param C
+///    An immediate integer operand specifying how the values are to be copied.
+///    Bit [0]:
+///    0: Bits [63:0] of the source are copied to bits [63:0] of the
+///    returned vector.
+///    1: Bits [127:64] of the source are copied to bits [63:0] of the
+///    returned vector.
+///    Bit [1]:
+///    0: Bits [63:0] of the source are copied to bits [127:64] of the
+///    returned vector.
+///    1: Bits [127:64] of the source are copied to bits [127:64] of the
+///    returned vector.
+///    Bit [2]:
+///    0: Bits [191:128] of the source are copied to bits [191:128] of the
+///    returned vector.
+///    1: Bits [255:192] of the source are copied to bits [191:128] of the
+///    returned vector.
+///    Bit [3]:
+///    0: Bits [191:128] of the source are copied to bits [255:192] of the
+///    returned vector.
+///    1: Bits [255:192] of the source are copied to bits [255:192] of the
+///    returned vector.
+/// \returns A 256-bit vector of [4 x double] containing the copied values.
 #define _mm256_permute_pd(A, C) __extension__ ({ \
   (__m256d)__builtin_shufflevector((__v4df)(__m256d)(A), \
-                                   (__v4df)_mm256_setzero_pd(), \
-                                   (C) & 0x1, ((C) & 0x2) >> 1, \
-                                   2 + (((C) & 0x4) >> 2), \
-                                   2 + (((C) & 0x8) >> 3)); })
+                                   (__v4df)_mm256_undefined_pd(), \
+                                   0 + (((C) >> 0) & 0x1), \
+                                   0 + (((C) >> 1) & 0x1), \
+                                   2 + (((C) >> 2) & 0x1), \
+                                   2 + (((C) >> 3) & 0x1)); })
 
+/// \brief Copies the values in a 128-bit vector of [4 x float] as
+///    specified by the immediate integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128 _mm_permute_ps(__m128 A, const int C);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+///
+/// \param A
+///    A 128-bit vector of [4 x float].
+/// \param C
+///    An immediate integer operand specifying how the values are to be copied.
+///    Bits [1:0]:
+///    00: Bits [31:0] of the source are copied to bits [31:0] of the
+///    returned vector.
+///    01: Bits [63:32] of the source are copied to bits [31:0] of the
+///    returned vector.
+///    10: Bits [95:64] of the source are copied to bits [31:0] of the
+///    returned vector.
+///    11: Bits [127:96] of the source are copied to bits [31:0] of the
+///    returned vector.
+///    Bits [3:2]:
+///    00: Bits [31:0] of the source are copied to bits [63:32] of the
+///    returned vector.
+///    01: Bits [63:32] of the source are copied to bits [63:32] of the
+///    returned vector.
+///    10: Bits [95:64] of the source are copied to bits [63:32] of the
+///    returned vector.
+///    11: Bits [127:96] of the source are copied to bits [63:32] of the
+///    returned vector.
+///    Bits [5:4]:
+///    00: Bits [31:0] of the source are copied to bits [95:64] of the
+///    returned vector.
+///    01: Bits [63:32] of the source are copied to bits [95:64] of the
+///    returned vector.
+///    10: Bits [95:64] of the source are copied to bits [95:64] of the
+///    returned vector.
+///    11: Bits [127:96] of the source are copied to bits [95:64] of the
+///    returned vector.
+///    Bits [7:6]:
+///    00: Bits [31:0] of the source are copied to bits [127:96] of the
+///    returned vector.
+///    01: Bits [63:32] of the source are copied to bits [127:96] of the
+///    returned vector.
+///    10: Bits [95:64] of the source are copied to bits [127:96] of the
+///    returned vector.
+///    11: Bits [127:96] of the source are copied to bits [127:96] of the
+///    returned vector.
+/// \returns A 128-bit vector of [4 x float] containing the copied values.
 #define _mm_permute_ps(A, C) __extension__ ({ \
   (__m128)__builtin_shufflevector((__v4sf)(__m128)(A), \
-                                  (__v4sf)_mm_setzero_ps(), \
-                                   (C) & 0x3, ((C) & 0xc) >> 2, \
-                                   ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6); })
+                                  (__v4sf)_mm_undefined_ps(), \
+                                  ((C) >> 0) & 0x3, ((C) >> 2) & 0x3, \
+                                  ((C) >> 4) & 0x3, ((C) >> 6) & 0x3); })
 
+/// \brief Copies the values in a 256-bit vector of [8 x float] as
+///    specified by the immediate integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_permute_ps(__m256 A, const int C);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+///
+/// \param A
+///    A 256-bit vector of [8 x float].
+/// \param C
+///    An immediate integer operand specifying how the values are to be copied.
+///    Bits [1:0]:
+///    00: Bits [31:0] of the source are copied to bits [31:0] of the
+///    returned vector.
+///    01: Bits [63:32] of the source are copied to bits [31:0] of the
+///    returned vector.
+///    10: Bits [95:64] of the source are copied to bits [31:0] of the
+///    returned vector.
+///    11: Bits [127:96] of the source are copied to bits [31:0] of the
+///    returned vector.
+///    Bits [3:2]:
+///    00: Bits [31:0] of the source are copied to bits [63:32] of the
+///    returned vector.
+///    01: Bits [63:32] of the source are copied to bits [63:32] of the
+///    returned vector.
+///    10: Bits [95:64] of the source are copied to bits [63:32] of the
+///    returned vector.
+///    11: Bits [127:96] of the source are copied to bits [63:32] of the
+///    returned vector.
+///    Bits [5:4]:
+///    00: Bits [31:0] of the source are copied to bits [95:64] of the
+///    returned vector.
+///    01: Bits [63:32] of the source are copied to bits [95:64] of the
+///    returned vector.
+///    10: Bits [95:64] of the source are copied to bits [95:64] of the
+///    returned vector.
+///    11: Bits [127:96] of the source are copied to bits [95:64] of the
+///    returned vector.
+///    Bits [7:6]:
+///    00: Bits [31:0] of the source are copied to bits [127:96] of the
+///    returned vector.
+///    01: Bits [63:32] of the source are copied to bits [127:96] of the
+///    returned vector.
+///    10: Bits [95:64] of the source are copied to bits [127:96] of the
+///    returned vector.
+///    11: Bits [127:96] of the source are copied to bits [127:96] of the
+///    returned vector.
+///    Bits [1:0]:
+///    00: Bits [159:128] of the source are copied to bits [159:128] of the
+///    returned vector.
+///    01: Bits [191:160] of the source are copied to bits [159:128] of the
+///    returned vector.
+///    10: Bits [223:192] of the source are copied to bits [159:128] of the
+///    returned vector.
+///    11: Bits [255:224] of the source are copied to bits [159:128] of the
+///    returned vector.
+///    Bits [3:2]:
+///    00: Bits [159:128] of the source are copied to bits [191:160] of the
+///    returned vector.
+///    01: Bits [191:160] of the source are copied to bits [191:160] of the
+///    returned vector.
+///    10: Bits [223:192] of the source are copied to bits [191:160] of the
+///    returned vector.
+///    11: Bits [255:224] of the source are copied to bits [191:160] of the
+///    returned vector.
+///    Bits [5:4]:
+///    00: Bits [159:128] of the source are copied to bits [223:192] of the
+///    returned vector.
+///    01: Bits [191:160] of the source are copied to bits [223:192] of the
+///    returned vector.
+///    10: Bits [223:192] of the source are copied to bits [223:192] of the
+///    returned vector.
+///    11: Bits [255:224] of the source are copied to bits [223:192] of the
+///    returned vector.
+///    Bits [7:6]:
+///    00: Bits [159:128] of the source are copied to bits [255:224] of the
+///    returned vector.
+///    01: Bits [191:160] of the source are copied to bits [255:224] of the
+///    returned vector.
+///    10: Bits [223:192] of the source are copied to bits [255:224] of the
+///    returned vector.
+///    11: Bits [255:224] of the source are copied to bits [255:224] of the
+///    returned vector.
+/// \returns A 256-bit vector of [8 x float] containing the copied values.
 #define _mm256_permute_ps(A, C) __extension__ ({ \
   (__m256)__builtin_shufflevector((__v8sf)(__m256)(A), \
-                                  (__v8sf)_mm256_setzero_ps(), \
-                                  (C) & 0x3, ((C) & 0xc) >> 2, \
-                                  ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6, \
-                                  4 + (((C) & 0x03) >> 0), \
-                                  4 + (((C) & 0x0c) >> 2), \
-                                  4 + (((C) & 0x30) >> 4), \
-                                  4 + (((C) & 0xc0) >> 6)); })
+                                  (__v8sf)_mm256_undefined_ps(), \
+                                  0 + (((C) >> 0) & 0x3), \
+                                  0 + (((C) >> 2) & 0x3), \
+                                  0 + (((C) >> 4) & 0x3), \
+                                  0 + (((C) >> 6) & 0x3), \
+                                  4 + (((C) >> 0) & 0x3), \
+                                  4 + (((C) >> 2) & 0x3), \
+                                  4 + (((C) >> 4) & 0x3), \
+                                  4 + (((C) >> 6) & 0x3)); })
 
+/// \brief Permutes 128-bit data values stored in two 256-bit vectors of
+///    [4 x double], as specified by the immediate integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_permute2f128_pd(__m256d V1, __m256d V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERM2F128 / PERM2F128 instruction.
+///
+/// \param V1
+///    A 256-bit vector of [4 x double].
+/// \param V2
+///    A 256-bit vector of [4 x double.
+/// \param M
+///    An immediate integer operand specifying how the values are to be
+///    permuted.
+///    Bits [1:0]:
+///    00: Bits [127:0] of operand V1 are copied to bits [127:0] of the
+///    destination.
+///    01: Bits [255:128] of operand V1 are copied to bits [127:0] of the
+///    destination.
+///    10: Bits [127:0] of operand V2 are copied to bits [127:0] of the
+///    destination.
+///    11: Bits [255:128] of operand V2 are copied to bits [127:0] of the
+///    destination.
+///    Bits [5:4]:
+///    00: Bits [127:0] of operand V1 are copied to bits [255:128] of the
+///    destination.
+///    01: Bits [255:128] of operand V1 are copied to bits [255:128] of the
+///    destination.
+///    10: Bits [127:0] of operand V2 are copied to bits [255:128] of the
+///    destination.
+///    11: Bits [255:128] of operand V2 are copied to bits [255:128] of the
+///    destination.
+/// \returns A 256-bit vector of [4 x double] containing the copied values.
 #define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \
   (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \
                                            (__v4df)(__m256d)(V2), (M)); })
 
+/// \brief Permutes 128-bit data values stored in two 256-bit vectors of
+///    [8 x float], as specified by the immediate integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_permute2f128_ps(__m256 V1, __m256 V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERM2F128 / PERM2F128 instruction.
+///
+/// \param V1
+///    A 256-bit vector of [8 x float].
+/// \param V2
+///    A 256-bit vector of [8 x float].
+/// \param M
+///    An immediate integer operand specifying how the values are to be
+///    permuted.
+///    Bits [1:0]:
+///    00: Bits [127:0] of operand V1 are copied to bits [127:0] of the
+///    destination.
+///    01: Bits [255:128] of operand V1 are copied to bits [127:0] of the
+///    destination.
+///    10: Bits [127:0] of operand V2 are copied to bits [127:0] of the
+///    destination.
+///    11: Bits [255:128] of operand V2 are copied to bits [127:0] of the
+///    destination.
+///    Bits [5:4]:
+///    00: Bits [127:0] of operand V1 are copied to bits [255:128] of the
+///    destination.
+///    01: Bits [255:128] of operand V1 are copied to bits [255:128] of the
+///    destination.
+///    10: Bits [127:0] of operand V2 are copied to bits [255:128] of the
+///    destination.
+///    11: Bits [255:128] of operand V2 are copied to bits [255:128] of the
+///    destination.
+/// \returns A 256-bit vector of [8 x float] containing the copied values.
 #define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \
   (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \
                                           (__v8sf)(__m256)(V2), (M)); })
 
+/// \brief Permutes 128-bit data values stored in two 256-bit integer vectors,
+///    as specified by the immediate integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256i _mm256_permute2f128_si256(__m256i V1, __m256i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERM2F128 / PERM2F128 instruction.
+///
+/// \param V1
+///    A 256-bit integer vector.
+/// \param V2
+///    A 256-bit integer vector.
+/// \param M
+///    An immediate integer operand specifying how the values are to be copied.
+///    Bits [1:0]:
+///    00: Bits [127:0] of operand V1 are copied to bits [127:0] of the
+///    destination.
+///    01: Bits [255:128] of operand V1 are copied to bits [127:0] of the
+///    destination.
+///    10: Bits [127:0] of operand V2 are copied to bits [127:0] of the
+///    destination.
+///    11: Bits [255:128] of operand V2 are copied to bits [127:0] of the
+///    destination.
+///    Bits [5:4]:
+///    00: Bits [127:0] of operand V1 are copied to bits [255:128] of the
+///    destination.
+///    01: Bits [255:128] of operand V1 are copied to bits [255:128] of the
+///    destination.
+///    10: Bits [127:0] of operand V2 are copied to bits [255:128] of the
+///    destination.
+///    11: Bits [255:128] of operand V2 are copied to bits [255:128] of the
+///    destination.
+/// \returns A 256-bit integer vector containing the copied values.
 #define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \
   (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \
                                            (__v8si)(__m256i)(V2), (M)); })
 
 /* Vector Blend */
+/// \brief Merges 64-bit double-precision data values stored in either of the
+///    two 256-bit vectors of [4 x double], as specified by the immediate
+///    integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_blend_pd(__m256d V1, __m256d V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VBLENDPD / BLENDPD instruction.
+///
+/// \param V1
+///    A 256-bit vector of [4 x double].
+/// \param V2
+///    A 256-bit vector of [4 x double].
+/// \param M
+///    An immediate integer operand, with mask bits [3:0] specifying how the
+///    values are to be copied. The position of the mask bit corresponds to the
+///    index of a copied value. When a mask bit is 0, the corresponding 64-bit
+///    element in operand V1 is copied to the same position in the destination.
+///    When a mask bit is 1, the corresponding 64-bit element in operand V2 is
+///    copied to the same position in the destination.
+/// \returns A 256-bit vector of [4 x double] containing the copied values.
 #define _mm256_blend_pd(V1, V2, M) __extension__ ({ \
   (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V1), \
                                    (__v4df)(__m256d)(V2), \
@@ -314,6 +1359,30 @@
                                    (((M) & 0x04) ? 6 : 2), \
                                    (((M) & 0x08) ? 7 : 3)); })
 
+/// \brief Merges 32-bit single-precision data values stored in either of the
+///    two 256-bit vectors of [8 x float], as specified by the immediate
+///    integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_blend_ps(__m256 V1, __m256 V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VBLENDPS / BLENDPS instruction.
+///
+/// \param V1
+///    A 256-bit vector of [8 x float].
+/// \param V2
+///    A 256-bit vector of [8 x float].
+/// \param M
+///    An immediate integer operand, with mask bits [7:0] specifying how the
+///    values are to be copied. The position of the mask bit corresponds to the
+///    index of a copied value. When a mask bit is 0, the corresponding 32-bit
+///    element in operand V1 is copied to the same position in the destination.
+///    When a mask bit is 1, the corresponding 32-bit element in operand V2 is
+///    copied to the same position in the destination.
+/// \returns A 256-bit vector of [8 x float] containing the copied values.
 #define _mm256_blend_ps(V1, V2, M) __extension__ ({ \
   (__m256)__builtin_shufflevector((__v8sf)(__m256)(V1), \
                                   (__v8sf)(__m256)(V2), \
@@ -326,6 +1395,27 @@
                                   (((M) & 0x40) ? 14 : 6), \
                                   (((M) & 0x80) ? 15 : 7)); })
 
+/// \brief Merges 64-bit double-precision data values stored in either of the
+///    two 256-bit vectors of [4 x double], as specified by the 256-bit vector
+///    operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VBLENDVPD / BLENDVPD instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double].
+/// \param __b
+///    A 256-bit vector of [4 x double].
+/// \param __c
+///    A 256-bit vector operand, with mask bits 255, 191, 127, and 63 specifying
+///    how the values are to be copied. The position of the mask bit corresponds
+///    to the most significant bit of a copied value. When a mask bit is 0, the
+///    corresponding 64-bit element in operand __a is copied to the same
+///    position in the destination. When a mask bit is 1, the corresponding
+///    64-bit element in operand __b is copied to the same position in the
+///    destination.
+/// \returns A 256-bit vector of [4 x double] containing the copied values.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c)
 {
@@ -333,6 +1423,27 @@
     (__v4df)__a, (__v4df)__b, (__v4df)__c);
 }
 
+/// \brief Merges 32-bit single-precision data values stored in either of the
+///    two 256-bit vectors of [8 x float], as specified by the 256-bit vector
+///    operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VBLENDVPS / BLENDVPS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float].
+/// \param __b
+///    A 256-bit vector of [8 x float].
+/// \param __c
+///    A 256-bit vector operand, with mask bits 255, 223, 191, 159, 127, 95, 63,
+///    and 31 specifying how the values are to be copied. The position of the
+///    mask bit corresponds to the most significant bit of a copied value. When
+///    a mask bit is 0, the corresponding 32-bit element in operand __a is
+///    copied to the same position in the destination. When a mask bit is 1, the
+///    corresponding 32-bit element in operand __b is copied to the same
+///    position in the destination.
+/// \returns A 256-bit vector of [8 x float] containing the copied values.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
 {
@@ -341,30 +1452,154 @@
 }
 
 /* Vector Dot Product */
+/// \brief Computes two dot products in parallel, using the lower and upper
+///    halves of two [8 x float] vectors as input to the two computations, and
+///    returning the two dot products in the lower and upper halves of the
+///    [8 x float] result. The immediate integer operand controls which
+///    input elements will contribute to the dot product, and where the final
+///    results are returned. In general, for each dot product, the four
+///    corresponding elements of the input vectors are multiplied; the first
+///    two and second two products are summed, then the two sums are added to
+///    form the final result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_dp_ps(__m256 V1, __m256 V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VDPPS / DPPS instruction.
+///
+/// \param V1
+///    A vector of [8 x float] values, treated as two [4 x float] vectors.
+/// \param V2
+///    A vector of [8 x float] values, treated as two [4 x float] vectors.
+/// \param M
+///    An immediate integer argument. Bits [7:4] determine which elements of
+///    the input vectors are used, with bit [4] corresponding to the lowest
+///    element and bit [7] corresponding to the highest element of each [4 x
+///    float] subvector. If a bit is set, the corresponding elements from the
+///    two input vectors are used as an input for dot product; otherwise that
+///    input is treated as zero. Bits [3:0] determine which elements of the
+///    result will receive a copy of the final dot product, with bit [0]
+///    corresponding to the lowest element and bit [3] corresponding to the
+///    highest element of each [4 x float] subvector. If a bit is set, the dot
+///    product is returned in the corresponding element; otherwise that element
+///    is set to zero. The bitmask is applied in the same way to each of the
+///    two parallel dot product computations.
+/// \returns A 256-bit vector of [8 x float] containing the two dot products.
 #define _mm256_dp_ps(V1, V2, M) __extension__ ({ \
   (__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \
                                  (__v8sf)(__m256)(V2), (M)); })
 
 /* Vector shuffle */
+/// \brief Selects 8 float values from the 256-bit operands of [8 x float], as
+///    specified by the immediate value operand. The four selected elements in
+///    each operand are copied to the destination according to the bits
+///    specified in the immediate operand. The selected elements from the first
+///    256-bit operand are copied to bits [63:0] and bits [191:128] of the
+///    destination, and the selected elements from the second 256-bit operand
+///    are copied to bits [127:64] and bits [255:192] of the destination. For
+///    example, if bits [7:0] of the immediate operand contain a value of 0xFF,
+///    the 256-bit destination vector would contain the following values: b[7],
+///    b[7], a[7], a[7], b[3], b[3], a[3], a[3].
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_shuffle_ps(__m256 a, __m256 b, const int mask);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSHUFPS / SHUFPS instruction.
+///
+/// \param a
+///    A 256-bit vector of [8 x float]. The four selected elements in this
+///    operand are copied to bits [63:0] and bits [191:128] in the destination,
+///    according to the bits specified in the immediate operand.
+/// \param b
+///    A 256-bit vector of [8 x float]. The four selected elements in this
+///    operand are copied to bits [127:64] and bits [255:192] in the
+///    destination, according to the bits specified in the immediate operand.
+/// \param mask
+///    An immediate value containing an 8-bit value specifying which elements to
+///    copy from a and b. Bits [3:0] specify the values copied from operand a.
+///    Bits [7:4] specify the values copied from operand b.
+///    The destinations within the 256-bit destination are assigned values as
+///    follows, according to the bit value assignments described below:
+///    Bits [1:0] are used to assign values to bits [31:0] and [159:128] in the
+///    destination.
+///    Bits [3:2] are used to assign values to bits [63:32] and [191:160] in the
+///    destination.
+///    Bits [5:4] are used to assign values to bits [95:64] and [223:192] in the
+///    destination.
+///    Bits [7:6] are used to assign values to bits [127:96] and [255:224] in
+///    the destination.
+///    Bit value assignments:
+///    00: Bits [31:0] and [159:128] are copied from the selected operand.
+///    01: Bits [63:32] and [191:160] are copied from the selected operand.
+///    10: Bits [95:64] and [223:192] are copied from the selected operand.
+///    11: Bits [127:96] and [255:224] are copied from the selected operand.
+/// \returns A 256-bit vector of [8 x float] containing the shuffled values.
 #define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \
-        (__m256)__builtin_shufflevector((__v8sf)(__m256)(a), \
-                                        (__v8sf)(__m256)(b), \
-                                        (mask) & 0x3, \
-                                        ((mask) & 0xc) >> 2, \
-                                        (((mask) & 0x30) >> 4) + 8, \
-                                        (((mask) & 0xc0) >> 6) + 8, \
-                                        ((mask) & 0x3) + 4, \
-                                        (((mask) & 0xc) >> 2) + 4, \
-                                        (((mask) & 0x30) >> 4) + 12, \
-                                        (((mask) & 0xc0) >> 6) + 12); })
+  (__m256)__builtin_shufflevector((__v8sf)(__m256)(a), \
+                                  (__v8sf)(__m256)(b), \
+                                  0  + (((mask) >> 0) & 0x3), \
+                                  0  + (((mask) >> 2) & 0x3), \
+                                  8  + (((mask) >> 4) & 0x3), \
+                                  8  + (((mask) >> 6) & 0x3), \
+                                  4  + (((mask) >> 0) & 0x3), \
+                                  4  + (((mask) >> 2) & 0x3), \
+                                  12 + (((mask) >> 4) & 0x3), \
+                                  12 + (((mask) >> 6) & 0x3)); })
 
+/// \brief Selects four double-precision values from the 256-bit operands of
+///    [4 x double], as specified by the immediate value operand. The selected
+///    elements from the first 256-bit operand are copied to bits [63:0] and
+///    bits [191:128] in the destination, and the selected elements from the
+///    second 256-bit operand are copied to bits [127:64] and bits [255:192] in
+///    the destination. For example, if bits [3:0] of the immediate operand
+///    contain a value of 0xF, the 256-bit destination vector would contain the
+///    following values: b[3], a[3], b[1], a[1].
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_shuffle_pd(__m256d a, __m256d b, const int mask);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSHUFPD / SHUFPD instruction.
+///
+/// \param a
+///    A 256-bit vector of [4 x double].
+/// \param b
+///    A 256-bit vector of [4 x double].
+/// \param mask
+///    An immediate value containing 8-bit values specifying which elements to
+///    copy from a and b:
+///    Bit [0]=0: Bits [63:0] are copied from a to bits [63:0] of the
+///    destination.
+///    Bit [0]=1: Bits [127:64] are copied from a to bits [63:0] of the
+///    destination.
+///    Bit [1]=0: Bits [63:0] are copied from b to bits [127:64] of the
+///    destination.
+///    Bit [1]=1: Bits [127:64] are copied from b to bits [127:64] of the
+///    destination.
+///    Bit [2]=0: Bits [191:128] are copied from a to bits [191:128] of the
+///    destination.
+///    Bit [2]=1: Bits [255:192] are copied from a to bits [191:128] of the
+///    destination.
+///    Bit [3]=0: Bits [191:128] are copied from b to bits [255:192] of the
+///    destination.
+///    Bit [3]=1: Bits [255:192] are copied from b to bits [255:192] of the
+///    destination.
+/// \returns A 256-bit vector of [4 x double] containing the shuffled values.
 #define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \
-        (__m256d)__builtin_shufflevector((__v4df)(__m256d)(a), \
-                                         (__v4df)(__m256d)(b), \
-                                         (mask) & 0x1, \
-                                         (((mask) & 0x2) >> 1) + 4, \
-                                         (((mask) & 0x4) >> 2) + 2, \
-                                         (((mask) & 0x8) >> 3) + 6); })
+  (__m256d)__builtin_shufflevector((__v4df)(__m256d)(a), \
+                                   (__v4df)(__m256d)(b), \
+                                   0 + (((mask) >> 0) & 0x1), \
+                                   4 + (((mask) >> 1) & 0x1), \
+                                   2 + (((mask) >> 2) & 0x1), \
+                                   6 + (((mask) >> 3) & 0x1)); })
 
 /* Compare */
 #define _CMP_EQ_OQ    0x00 /* Equal (ordered, non-signaling)  */
@@ -400,30 +1635,235 @@
 #define _CMP_GT_OQ    0x1e /* Greater-than (ordered, non-signaling)  */
 #define _CMP_TRUE_US  0x1f /* True (unordered, signaling)  */
 
+/// \brief Compares each of the corresponding double-precision values of two
+///    128-bit vectors of [2 x double], using the operation specified by the
+///    immediate integer operand. Returns a [2 x double] vector consisting of
+///    two doubles corresponding to the two comparison results: zero if the
+///    comparison is false, and all 1's if the comparison is true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128d _mm_cmp_pd(__m128d a, __m128d b, const int c);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCMPPD / CMPPD instruction.
+///
+/// \param a
+///    A 128-bit vector of [2 x double].
+/// \param b
+///    A 128-bit vector of [2 x double].
+/// \param c
+///    An immediate integer operand, with bits [4:0] specifying which comparison
+///    operation to use:
+///    00h, 08h, 10h, 18h: Equal
+///    01h, 09h, 11h, 19h: Less than
+///    02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
+///                        operands)
+///    03h, 0Bh, 13h, 1Bh: Unordered
+///    04h, 0Ch, 14h, 1Ch: Not equal
+///    05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+///    06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
+///                        (swapped operands)
+///    07h, 0Fh, 17h, 1Fh: Ordered
+/// \returns A 128-bit vector of [2 x double] containing the comparison results.
 #define _mm_cmp_pd(a, b, c) __extension__ ({ \
   (__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \
                                 (__v2df)(__m128d)(b), (c)); })
 
+/// \brief Compares each of the corresponding values of two 128-bit vectors of
+///    [4 x float], using the operation specified by the immediate integer
+///    operand. Returns a [4 x float] vector consisting of four floats
+///    corresponding to the four comparison results: zero if the comparison is
+///    false, and all 1's if the comparison is true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128 _mm_cmp_ps(__m128 a, __m128 b, const int c);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCMPPS / CMPPS instruction.
+///
+/// \param a
+///    A 128-bit vector of [4 x float].
+/// \param b
+///    A 128-bit vector of [4 x float].
+/// \param c
+///    An immediate integer operand, with bits [4:0] specifying which comparison
+///    operation to use:
+///    00h, 08h, 10h, 18h: Equal
+///    01h, 09h, 11h, 19h: Less than
+///    02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
+///                        operands)
+///    03h, 0Bh, 13h, 1Bh: Unordered
+///    04h, 0Ch, 14h, 1Ch: Not equal
+///    05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+///    06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
+///                       (swapped operands)
+///    07h, 0Fh, 17h, 1Fh: Ordered
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
 #define _mm_cmp_ps(a, b, c) __extension__ ({ \
   (__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
                                (__v4sf)(__m128)(b), (c)); })
 
+/// \brief Compares each of the corresponding double-precision values of two
+///    256-bit vectors of [4 x double], using the operation specified by the
+///    immediate integer operand. Returns a [4 x double] vector consisting of
+///    four doubles corresponding to the four comparison results: zero if the
+///    comparison is false, and all 1's if the comparison is true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_cmp_pd(__m256d a, __m256d b, const int c);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCMPPD / CMPPD instruction.
+///
+/// \param a
+///    A 256-bit vector of [4 x double].
+/// \param b
+///    A 256-bit vector of [4 x double].
+/// \param c
+///    An immediate integer operand, with bits [4:0] specifying which comparison
+///    operation to use:
+///    00h, 08h, 10h, 18h: Equal
+///    01h, 09h, 11h, 19h: Less than
+///    02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
+///                        operands)
+///    03h, 0Bh, 13h, 1Bh: Unordered
+///    04h, 0Ch, 14h, 1Ch: Not equal
+///    05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+///    06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
+///                        (swapped operands)
+///    07h, 0Fh, 17h, 1Fh: Ordered
+/// \returns A 256-bit vector of [4 x double] containing the comparison results.
 #define _mm256_cmp_pd(a, b, c) __extension__ ({ \
   (__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \
                                    (__v4df)(__m256d)(b), (c)); })
 
+/// \brief Compares each of the corresponding values of two 256-bit vectors of
+///    [8 x float], using the operation specified by the immediate integer
+///    operand. Returns a [8 x float] vector consisting of eight floats
+///    corresponding to the eight comparison results: zero if the comparison is
+///    false, and all 1's if the comparison is true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_cmp_ps(__m256 a, __m256 b, const int c);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCMPPS / CMPPS instruction.
+///
+/// \param a
+///    A 256-bit vector of [8 x float].
+/// \param b
+///    A 256-bit vector of [8 x float].
+/// \param c
+///    An immediate integer operand, with bits [4:0] specifying which comparison
+///    operation to use:
+///    00h, 08h, 10h, 18h: Equal
+///    01h, 09h, 11h, 19h: Less than
+///    02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
+///                        operands)
+///    03h, 0Bh, 13h, 1Bh: Unordered
+///    04h, 0Ch, 14h, 1Ch: Not equal
+///    05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+///    06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
+///                       (swapped operands)
+///    07h, 0Fh, 17h, 1Fh: Ordered
+/// \returns A 256-bit vector of [8 x float] containing the comparison results.
 #define _mm256_cmp_ps(a, b, c) __extension__ ({ \
   (__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \
                                   (__v8sf)(__m256)(b), (c)); })
 
+/// \brief Compares each of the corresponding scalar double-precision values of
+///    two 128-bit vectors of [2 x double], using the operation specified by the
+///    immediate integer operand. If the result is true, all 64 bits of the
+///    destination vector are set; otherwise they are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128d _mm_cmp_sd(__m128d a, __m128d b, const int c);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCMPSD / CMPSD instruction.
+///
+/// \param a
+///    A 128-bit vector of [2 x double].
+/// \param b
+///    A 128-bit vector of [2 x double].
+/// \param c
+///    An immediate integer operand, with bits [4:0] specifying which comparison
+///    operation to use:
+///    00h, 08h, 10h, 18h: Equal
+///    01h, 09h, 11h, 19h: Less than
+///    02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
+///                        operands)
+///    03h, 0Bh, 13h, 1Bh: Unordered
+///    04h, 0Ch, 14h, 1Ch: Not equal
+///    05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+///    06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
+///                       (swapped operands)
+///    07h, 0Fh, 17h, 1Fh: Ordered
+/// \returns A 128-bit vector of [2 x double] containing the comparison results.
 #define _mm_cmp_sd(a, b, c) __extension__ ({ \
   (__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \
                                 (__v2df)(__m128d)(b), (c)); })
 
+/// \brief Compares each of the corresponding scalar values of two 128-bit
+///    vectors of [4 x float], using the operation specified by the immediate
+///    integer operand. If the result is true, all 32 bits of the destination
+///    vector are set; otherwise they are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128 _mm_cmp_ss(__m128 a, __m128 b, const int c);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCMPSS / CMPSS instruction.
+///
+/// \param a
+///    A 128-bit vector of [4 x float].
+/// \param b
+///    A 128-bit vector of [4 x float].
+/// \param c
+///    An immediate integer operand, with bits [4:0] specifying which comparison
+///    operation to use:
+///    00h, 08h, 10h, 18h: Equal
+///    01h, 09h, 11h, 19h: Less than
+///    02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
+///                        operands)
+///    03h, 0Bh, 13h, 1Bh: Unordered
+///    04h, 0Ch, 14h, 1Ch: Not equal
+///    05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+///    06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
+///                       (swapped operands)
+///    07h, 0Fh, 17h, 1Fh: Ordered
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
 #define _mm_cmp_ss(a, b, c) __extension__ ({ \
   (__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
                                (__v4sf)(__m128)(b), (c)); })
 
+/// \brief Takes a [8 x i32] vector and returns the vector element value
+///    indexed by the immediate constant operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VEXTRACTF128+COMPOSITE /
+///   EXTRACTF128+COMPOSITE instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x i32].
+/// \param __imm
+///    An immediate integer operand with bits [2:0] determining which vector
+///    element is extracted and returned.
+/// \returns A 32-bit integer containing the extracted 32 bits of extended
+///    packed data.
 static __inline int __DEFAULT_FN_ATTRS
 _mm256_extract_epi32(__m256i __a, const int __imm)
 {
@@ -431,21 +1871,66 @@
   return __b[__imm & 7];
 }
 
+/// \brief Takes a [16 x i16] vector and returns the vector element value
+///    indexed by the immediate constant operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VEXTRACTF128+COMPOSITE /
+///    EXTRACTF128+COMPOSITE instruction.
+///
+/// \param __a
+///    A 256-bit integer vector of [16 x i16].
+/// \param __imm
+///    An immediate integer operand with bits [3:0] determining which vector
+///    element is extracted and returned.
+/// \returns A 32-bit integer containing the extracted 16 bits of zero extended
+///    packed data.
 static __inline int __DEFAULT_FN_ATTRS
 _mm256_extract_epi16(__m256i __a, const int __imm)
 {
   __v16hi __b = (__v16hi)__a;
-  return __b[__imm & 15];
+  return (unsigned short)__b[__imm & 15];
 }
 
+/// \brief Takes a [32 x i8] vector and returns the vector element value
+///    indexed by the immediate constant operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VEXTRACTF128+COMPOSITE /
+///    EXTRACTF128+COMPOSITE instruction.
+///
+/// \param __a
+///    A 256-bit integer vector of [32 x i8].
+/// \param __imm
+///    An immediate integer operand with bits [4:0] determining which vector
+///    element is extracted and returned.
+/// \returns A 32-bit integer containing the extracted 8 bits of zero extended
+///    packed data.
 static __inline int __DEFAULT_FN_ATTRS
 _mm256_extract_epi8(__m256i __a, const int __imm)
 {
   __v32qi __b = (__v32qi)__a;
-  return __b[__imm & 31];
+  return (unsigned char)__b[__imm & 31];
 }
 
 #ifdef __x86_64__
+/// \brief Takes a [4 x i64] vector and returns the vector element value
+///    indexed by the immediate constant operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VEXTRACTF128+COMPOSITE /
+///    EXTRACTF128+COMPOSITE instruction.
+///
+/// \param __a
+///    A 256-bit integer vector of [4 x i64].
+/// \param __imm
+///    An immediate integer operand with bits [1:0] determining which vector
+///    element is extracted and returned.
+/// \returns A 64-bit integer containing the extracted 64 bits of extended
+///    packed data.
 static __inline long long  __DEFAULT_FN_ATTRS
 _mm256_extract_epi64(__m256i __a, const int __imm)
 {
@@ -454,6 +1939,24 @@
 }
 #endif
 
+/// \brief Takes a [8 x i32] vector and replaces the vector element value
+///    indexed by the immediate constant operand by a new value. Returns the
+///    modified vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VINSERTF128+COMPOSITE /
+///    INSERTF128+COMPOSITE instruction.
+///
+/// \param __a
+///    A vector of [8 x i32] to be used by the insert operation.
+/// \param __b
+///    An integer value. The replacement value for the insert operation.
+/// \param __imm
+///    An immediate integer specifying the index of the vector element to be
+///    replaced.
+/// \returns A copy of vector __a, after replacing its element indexed by __imm
+///     with __b.
 static __inline __m256i __DEFAULT_FN_ATTRS
 _mm256_insert_epi32(__m256i __a, int __b, int const __imm)
 {
@@ -462,6 +1965,25 @@
   return (__m256i)__c;
 }
 
+
+/// \brief Takes a [16 x i16] vector and replaces the vector element value
+///    indexed by the immediate constant operand with a new value. Returns the
+///    modified vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VINSERTF128+COMPOSITE /
+///    INSERTF128+COMPOSITE instruction.
+///
+/// \param __a
+///    A vector of [16 x i16] to be used by the insert operation.
+/// \param __b
+///    An i16 integer value. The replacement value for the insert operation.
+/// \param __imm
+///    An immediate integer specifying the index of the vector element to be
+///    replaced.
+/// \returns A copy of vector __a, after replacing its element indexed by __imm
+///     with __b.
 static __inline __m256i __DEFAULT_FN_ATTRS
 _mm256_insert_epi16(__m256i __a, int __b, int const __imm)
 {
@@ -470,6 +1992,24 @@
   return (__m256i)__c;
 }
 
+/// \brief Takes a [32 x i8] vector and replaces the vector element value
+///    indexed by the immediate constant operand with a new value. Returns the
+///    modified vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VINSERTF128+COMPOSITE /
+///    INSERTF128+COMPOSITE instruction.
+///
+/// \param __a
+///    A vector of [32 x i8] to be used by the insert operation.
+/// \param __b
+///    An i8 integer value. The replacement value for the insert operation.
+/// \param __imm
+///    An immediate integer specifying the index of the vector element to be
+///    replaced.
+/// \returns A copy of vector __a, after replacing its element indexed by __imm
+///    with __b.
 static __inline __m256i __DEFAULT_FN_ATTRS
 _mm256_insert_epi8(__m256i __a, int __b, int const __imm)
 {
@@ -479,6 +2019,24 @@
 }
 
 #ifdef __x86_64__
+/// \brief Takes a [4 x i64] vector and replaces the vector element value
+///    indexed by the immediate constant operand with a new value. Returns the
+///    modified vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VINSERTF128+COMPOSITE /
+///    INSERTF128+COMPOSITE instruction.
+///
+/// \param __a
+///    A vector of [4 x i64] to be used by the insert operation.
+/// \param __b
+///    A 64-bit integer value. The replacement value for the insert operation.
+/// \param __imm
+///    An immediate integer specifying the index of the vector element to be
+///    replaced.
+/// \returns A copy of vector __a, after replacing its element indexed by __imm
+///     with __b.
 static __inline __m256i __DEFAULT_FN_ATTRS
 _mm256_insert_epi64(__m256i __a, long long __b, int const __imm)
 {
@@ -489,24 +2047,61 @@
 #endif
 
 /* Conversion */
+/// \brief Converts a vector of [4 x i32] into a vector of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTDQ2PD / CVTDQ2PD instruction.
+///
+/// \param __a
+///    A 128-bit integer vector of [4 x i32].
+/// \returns A 256-bit vector of [4 x double] containing the converted values.
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_cvtepi32_pd(__m128i __a)
 {
-  return (__m256d)__builtin_ia32_cvtdq2pd256((__v4si) __a);
+  return (__m256d)__builtin_convertvector((__v4si)__a, __v4df);
 }
 
+/// \brief Converts a vector of [8 x i32] into a vector of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTDQ2PS / CVTDQ2PS instruction.
+///
+/// \param __a
+///    A 256-bit integer vector.
+/// \returns A 256-bit vector of [8 x float] containing the converted values.
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_cvtepi32_ps(__m256i __a)
 {
   return (__m256)__builtin_ia32_cvtdq2ps256((__v8si) __a);
 }
 
+/// \brief Converts a 256-bit vector of [4 x double] into a 128-bit vector of
+///    [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTPD2PS / CVTPD2PS instruction.
+///
+/// \param __a
+///    A 256-bit vector of [4 x double].
+/// \returns A 128-bit vector of [4 x float] containing the converted values.
 static __inline __m128 __DEFAULT_FN_ATTRS
 _mm256_cvtpd_ps(__m256d __a)
 {
   return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a);
 }
 
+/// \brief Converts a vector of [8 x float] into a vector of [8 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTPS2DQ / CVTPS2DQ instruction.
+///
+/// \param __a
+///    A 256-bit vector of [8 x float].
+/// \returns A 256-bit integer vector containing the converted values.
 static __inline __m256i __DEFAULT_FN_ATTRS
 _mm256_cvtps_epi32(__m256 __a)
 {
@@ -516,13 +2111,13 @@
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_cvtps_pd(__m128 __a)
 {
-  return (__m256d)__builtin_ia32_cvtps2pd256((__v4sf) __a);
+  return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df);
 }
 
 static __inline __m128i __DEFAULT_FN_ATTRS
 _mm256_cvttpd_epi32(__m256d __a)
 {
-  return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a);
+  return (__m128i)__builtin_convertvector((__v4df) __a, __v4si);
 }
 
 static __inline __m128i __DEFAULT_FN_ATTRS
@@ -534,51 +2129,70 @@
 static __inline __m256i __DEFAULT_FN_ATTRS
 _mm256_cvttps_epi32(__m256 __a)
 {
-  return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a);
+  return (__m256i)__builtin_convertvector((__v8sf) __a, __v8si);
+}
+
+static __inline double __DEFAULT_FN_ATTRS
+_mm256_cvtsd_f64(__m256d __a)
+{
+ return __a[0];
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_cvtsi256_si32(__m256i __a)
+{
+ __v8si __b = (__v8si)__a;
+ return __b[0];
+}
+
+static __inline float __DEFAULT_FN_ATTRS
+_mm256_cvtss_f32(__m256 __a)
+{
+ return __a[0];
 }
 
 /* Vector replicate */
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_movehdup_ps(__m256 __a)
 {
-  return __builtin_shufflevector(__a, __a, 1, 1, 3, 3, 5, 5, 7, 7);
+  return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 1, 1, 3, 3, 5, 5, 7, 7);
 }
 
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_moveldup_ps(__m256 __a)
 {
-  return __builtin_shufflevector(__a, __a, 0, 0, 2, 2, 4, 4, 6, 6);
+  return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 0, 2, 2, 4, 4, 6, 6);
 }
 
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_movedup_pd(__m256d __a)
 {
-  return __builtin_shufflevector(__a, __a, 0, 0, 2, 2);
+  return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 0, 2, 2);
 }
 
 /* Unpack and Interleave */
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_unpackhi_pd(__m256d __a, __m256d __b)
 {
-  return __builtin_shufflevector(__a, __b, 1, 5, 1+2, 5+2);
+  return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 1, 5, 1+2, 5+2);
 }
 
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_unpacklo_pd(__m256d __a, __m256d __b)
 {
-  return __builtin_shufflevector(__a, __b, 0, 4, 0+2, 4+2);
+  return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 0, 4, 0+2, 4+2);
 }
 
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_unpackhi_ps(__m256 __a, __m256 __b)
 {
-  return __builtin_shufflevector(__a, __b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
+  return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
 }
 
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_unpacklo_ps(__m256 __a, __m256 __b)
 {
-  return __builtin_shufflevector(__a, __b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);
+  return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);
 }
 
 /* Bit Test */
@@ -723,13 +2337,13 @@
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_broadcast_pd(__m128d const *__a)
 {
-  return (__m256d)__builtin_ia32_vbroadcastf128_pd256(__a);
+  return (__m256d)__builtin_ia32_vbroadcastf128_pd256((__v2df const *)__a);
 }
 
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_broadcast_ps(__m128 const *__a)
 {
-  return (__m256)__builtin_ia32_vbroadcastf128_ps256(__a);
+  return (__m256)__builtin_ia32_vbroadcastf128_ps256((__v4sf const *)__a);
 }
 
 /* SIMD load ops */
@@ -800,13 +2414,19 @@
 static __inline void __DEFAULT_FN_ATTRS
 _mm256_storeu_pd(double *__p, __m256d __a)
 {
-  __builtin_ia32_storeupd256(__p, (__v4df)__a);
+  struct __storeu_pd {
+    __m256d __v;
+  } __attribute__((__packed__, __may_alias__));
+  ((struct __storeu_pd*)__p)->__v = __a;
 }
 
 static __inline void __DEFAULT_FN_ATTRS
 _mm256_storeu_ps(float *__p, __m256 __a)
 {
-  __builtin_ia32_storeups256(__p, (__v8sf)__a);
+  struct __storeu_ps {
+    __m256 __v;
+  } __attribute__((__packed__, __may_alias__));
+  ((struct __storeu_ps*)__p)->__v = __a;
 }
 
 static __inline void __DEFAULT_FN_ATTRS
@@ -818,7 +2438,10 @@
 static __inline void __DEFAULT_FN_ATTRS
 _mm256_storeu_si256(__m256i *__p, __m256i __a)
 {
-  __builtin_ia32_storedqu256((char *)__p, (__v32qi)__a);
+  struct __storeu_si256 {
+    __m256i __v;
+  } __attribute__((__packed__, __may_alias__));
+  ((struct __storeu_si256*)__p)->__v = __a;
 }
 
 /* Conditional load ops */
@@ -876,36 +2499,36 @@
 static __inline void __DEFAULT_FN_ATTRS
 _mm256_stream_si256(__m256i *__a, __m256i __b)
 {
-  __builtin_ia32_movntdq256((__v4di *)__a, (__v4di)__b);
+  __builtin_nontemporal_store((__v4di)__b, (__v4di*)__a);
 }
 
 static __inline void __DEFAULT_FN_ATTRS
 _mm256_stream_pd(double *__a, __m256d __b)
 {
-  __builtin_ia32_movntpd256(__a, (__v4df)__b);
+  __builtin_nontemporal_store((__v4df)__b, (__v4df*)__a);
 }
 
 static __inline void __DEFAULT_FN_ATTRS
 _mm256_stream_ps(float *__p, __m256 __a)
 {
-  __builtin_ia32_movntps256(__p, (__v8sf)__a);
+  __builtin_nontemporal_store((__v8sf)__a, (__v8sf*)__p);
 }
 
 /* Create vectors */
 static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_undefined_pd()
+_mm256_undefined_pd(void)
 {
   return (__m256d)__builtin_ia32_undef256();
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_undefined_ps()
+_mm256_undefined_ps(void)
 {
   return (__m256)__builtin_ia32_undef256();
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_undefined_si256()
+_mm256_undefined_si256(void)
 {
   return (__m256i)__builtin_ia32_undef256();
 }
@@ -1117,37 +2740,37 @@
 static __inline __m128d __DEFAULT_FN_ATTRS
 _mm256_castpd256_pd128(__m256d __a)
 {
-  return __builtin_shufflevector(__a, __a, 0, 1);
+  return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 1);
 }
 
 static __inline __m128 __DEFAULT_FN_ATTRS
 _mm256_castps256_ps128(__m256 __a)
 {
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
+  return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 1, 2, 3);
 }
 
 static __inline __m128i __DEFAULT_FN_ATTRS
 _mm256_castsi256_si128(__m256i __a)
 {
-  return __builtin_shufflevector(__a, __a, 0, 1);
+  return __builtin_shufflevector((__v4di)__a, (__v4di)__a, 0, 1);
 }
 
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_castpd128_pd256(__m128d __a)
 {
-  return __builtin_shufflevector(__a, __a, 0, 1, -1, -1);
+  return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 1, -1, -1);
 }
 
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_castps128_ps256(__m128 __a)
 {
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
+  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1, 2, 3, -1, -1, -1, -1);
 }
 
 static __inline __m256i __DEFAULT_FN_ATTRS
 _mm256_castsi128_si256(__m128i __a)
 {
-  return __builtin_shufflevector(__a, __a, 0, 1, -1, -1);
+  return __builtin_shufflevector((__v2di)__a, (__v2di)__a, 0, 1, -1, -1);
 }
 
 /*
@@ -1194,7 +2817,7 @@
 #define _mm256_extractf128_ps(V, M) __extension__ ({ \
   (__m128)__builtin_shufflevector( \
     (__v8sf)(__m256)(V), \
-    (__v8sf)(_mm256_setzero_ps()), \
+    (__v8sf)(_mm256_undefined_ps()), \
     (((M) & 1) ? 4 : 0), \
     (((M) & 1) ? 5 : 1), \
     (((M) & 1) ? 6 : 2), \
@@ -1203,14 +2826,14 @@
 #define _mm256_extractf128_pd(V, M) __extension__ ({ \
   (__m128d)__builtin_shufflevector( \
     (__v4df)(__m256d)(V), \
-    (__v4df)(_mm256_setzero_pd()), \
+    (__v4df)(_mm256_undefined_pd()), \
     (((M) & 1) ? 2 : 0), \
     (((M) & 1) ? 3 : 1) );})
 
 #define _mm256_extractf128_si256(V, M) __extension__ ({ \
   (__m128i)__builtin_shufflevector( \
     (__v4di)(__m256i)(V), \
-    (__v4di)(_mm256_setzero_si256()), \
+    (__v4di)(_mm256_undefined_si256()), \
     (((M) & 1) ? 2 : 0), \
     (((M) & 1) ? 3 : 1) );})
 
@@ -1218,35 +2841,22 @@
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
 {
-  struct __loadu_ps {
-    __m128 __v;
-  } __attribute__((__packed__, __may_alias__));
-
-  __m256 __v256 = _mm256_castps128_ps256(((struct __loadu_ps*)__addr_lo)->__v);
-  return _mm256_insertf128_ps(__v256, ((struct __loadu_ps*)__addr_hi)->__v, 1);
+  __m256 __v256 = _mm256_castps128_ps256(_mm_loadu_ps(__addr_lo));
+  return _mm256_insertf128_ps(__v256, _mm_loadu_ps(__addr_hi), 1);
 }
 
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
 {
-  struct __loadu_pd {
-    __m128d __v;
-  } __attribute__((__packed__, __may_alias__));
-
-  __m256d __v256 = _mm256_castpd128_pd256(((struct __loadu_pd*)__addr_lo)->__v);
-  return _mm256_insertf128_pd(__v256, ((struct __loadu_pd*)__addr_hi)->__v, 1);
+  __m256d __v256 = _mm256_castpd128_pd256(_mm_loadu_pd(__addr_lo));
+  return _mm256_insertf128_pd(__v256, _mm_loadu_pd(__addr_hi), 1);
 }
 
 static __inline __m256i __DEFAULT_FN_ATTRS
 _mm256_loadu2_m128i(__m128i const *__addr_hi, __m128i const *__addr_lo)
 {
-  struct __loadu_si128 {
-    __m128i __v;
-  } __attribute__((__packed__, __may_alias__));
-  __m256i __v256 = _mm256_castsi128_si256(
-    ((struct __loadu_si128*)__addr_lo)->__v);
-  return _mm256_insertf128_si256(__v256,
-                                 ((struct __loadu_si128*)__addr_hi)->__v, 1);
+  __m256i __v256 = _mm256_castsi128_si256(_mm_loadu_si128(__addr_lo));
+  return _mm256_insertf128_si256(__v256, _mm_loadu_si128(__addr_hi), 1);
 }
 
 /* SIMD store ops (unaligned) */
@@ -1256,9 +2866,9 @@
   __m128 __v128;
 
   __v128 = _mm256_castps256_ps128(__a);
-  __builtin_ia32_storeups(__addr_lo, __v128);
+  _mm_storeu_ps(__addr_lo, __v128);
   __v128 = _mm256_extractf128_ps(__a, 1);
-  __builtin_ia32_storeups(__addr_hi, __v128);
+  _mm_storeu_ps(__addr_hi, __v128);
 }
 
 static __inline void __DEFAULT_FN_ATTRS
@@ -1267,9 +2877,9 @@
   __m128d __v128;
 
   __v128 = _mm256_castpd256_pd128(__a);
-  __builtin_ia32_storeupd(__addr_lo, __v128);
+  _mm_storeu_pd(__addr_lo, __v128);
   __v128 = _mm256_extractf128_pd(__a, 1);
-  __builtin_ia32_storeupd(__addr_hi, __v128);
+  _mm_storeu_pd(__addr_hi, __v128);
 }
 
 static __inline void __DEFAULT_FN_ATTRS
@@ -1278,14 +2888,14 @@
   __m128i __v128;
 
   __v128 = _mm256_castsi256_si128(__a);
-  __builtin_ia32_storedqu((char *)__addr_lo, (__v16qi)__v128);
+  _mm_storeu_si128(__addr_lo, __v128);
   __v128 = _mm256_extractf128_si256(__a, 1);
-  __builtin_ia32_storedqu((char *)__addr_hi, (__v16qi)__v128);
+  _mm_storeu_si128(__addr_hi, __v128);
 }
 
 static __inline __m256 __DEFAULT_FN_ATTRS
 _mm256_set_m128 (__m128 __hi, __m128 __lo) {
-  return (__m256) __builtin_shufflevector(__lo, __hi, 0, 1, 2, 3, 4, 5, 6, 7);
+  return (__m256) __builtin_shufflevector((__v4sf)__lo, (__v4sf)__hi, 0, 1, 2, 3, 4, 5, 6, 7);
 }
 
 static __inline __m256d __DEFAULT_FN_ATTRS
diff --git a/renderscript/clang-include/bmiintrin.h b/renderscript/clang-include/bmiintrin.h
index da98792..30acfae 100644
--- a/renderscript/clang-include/bmiintrin.h
+++ b/renderscript/clang-include/bmiintrin.h
@@ -28,12 +28,107 @@
 #ifndef __BMIINTRIN_H
 #define __BMIINTRIN_H
 
+/// \brief Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned short _tzcnt_u16(unsigned short a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param a
+///    An unsigned 16-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 16-bit integer containing the number of trailing zero
+///    bits in the operand.
 #define _tzcnt_u16(a)     (__tzcnt_u16((a)))
+
+/// \brief Performs a bitwise AND of the second operand with the one's
+///    complement of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _andn_u32(unsigned int a, unsigned int b);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c ANDN instruction.
+///
+/// \param a
+///    An unsigned integer containing one of the operands.
+/// \param b
+///    An unsigned integer containing one of the operands.
+/// \returns An unsigned integer containing the bitwise AND of the second
+///    operand with the one's complement of the first operand.
 #define _andn_u32(a, b)   (__andn_u32((a), (b)))
+
 /* _bextr_u32 != __bextr_u32 */
+/// \brief Clears all bits in the source except for the least significant bit
+///    containing a value of 1 and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _blsi_u32(unsigned int a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSI instruction.
+///
+/// \param a
+///    An unsigned integer whose bits are to be cleared.
+/// \returns An unsigned integer containing the result of clearing the bits from
+///    the source operand.
 #define _blsi_u32(a)      (__blsi_u32((a)))
+
+/// \brief Creates a mask whose bits are set to 1, using bit 0 up to and
+///    including the least siginificant bit that is set to 1 in the source
+///    operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _blsmsk_u32(unsigned int a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSMSK instruction.
+///
+/// \param a
+///    An unsigned integer used to create the mask.
+/// \returns An unsigned integer containing the newly created mask.
 #define _blsmsk_u32(a)    (__blsmsk_u32((a)))
+
+/// \brief Clears the least siginificant bit that is set to 1 in the source
+///    operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _blsr_u32(unsigned int a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSR instruction.
+///
+/// \param a
+///    An unsigned integer containing the operand to be cleared.
+/// \returns An unsigned integer containing the result of clearing the source
+///    operand.
 #define _blsr_u32(a)      (__blsr_u32((a)))
+
+/// \brief Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _tzcnt_u32(unsigned int a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param a
+///    An unsigned 32-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 32-bit integer containing the number of trailing zero
+///    bits in the operand.
 #define _tzcnt_u32(a)     (__tzcnt_u32((a)))
 
 /* Define the default attributes for the functions in this file. */
@@ -44,12 +139,35 @@
    to use it as a potentially faster version of BSF. */
 #define __RELAXED_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
 
+/// \brief Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param __X
+///    An unsigned 16-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 16-bit integer containing the number of trailing zero
+///    bits in the operand.
 static __inline__ unsigned short __RELAXED_FN_ATTRS
 __tzcnt_u16(unsigned short __X)
 {
   return __X ? __builtin_ctzs(__X) : 16;
 }
 
+/// \brief Performs a bitwise AND of the second operand with the one's
+///    complement of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c ANDN instruction.
+///
+/// \param __X
+///    An unsigned integer containing one of the operands.
+/// \param __Y
+///    An unsigned integer containing one of the operands.
+/// \returns An unsigned integer containing the bitwise AND of the second
+///    operand with the one's complement of the first operand.
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
 __andn_u32(unsigned int __X, unsigned int __Y)
 {
@@ -57,6 +175,21 @@
 }
 
 /* AMD-specified, double-leading-underscore version of BEXTR */
+/// \brief Extracts the specified bits from the first operand and returns them
+///    in the least significant bits of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BEXTR instruction.
+///
+/// \param __X
+///    An unsigned integer whose bits are to be extracted.
+/// \param __Y
+///    An unsigned integer used to specify which bits are extracted. Bits [7:0]
+///    specify the index of the least significant bit. Bits [15:8] specify the
+///    number of bits to be extracted.
+/// \returns An unsigned integer whose least significant bits contain the
+///    extracted bits.
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
 __bextr_u32(unsigned int __X, unsigned int __Y)
 {
@@ -64,45 +197,214 @@
 }
 
 /* Intel-specified, single-leading-underscore version of BEXTR */
+/// \brief Extracts the specified bits from the first operand and returns them
+///    in the least significant bits of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BEXTR instruction.
+///
+/// \param __X
+///    An unsigned integer whose bits are to be extracted.
+/// \param __Y
+///    An unsigned integer used to specify the index of the least significant
+///    bit for the bits to be extracted. Bits [7:0] specify the index.
+/// \param __Z
+///    An unsigned integer used to specify the number of bits to be extracted.
+///    Bits [7:0] specify the number of bits.
+/// \returns An unsigned integer whose least significant bits contain the
+///    extracted bits.
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
 _bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z)
 {
   return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
 }
 
+/// \brief Clears all bits in the source except for the least significant bit
+///    containing a value of 1 and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BLSI instruction.
+///
+/// \param __X
+///    An unsigned integer whose bits are to be cleared.
+/// \returns An unsigned integer containing the result of clearing the bits from
+///    the source operand.
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
 __blsi_u32(unsigned int __X)
 {
   return __X & -__X;
 }
 
+/// \brief Creates a mask whose bits are set to 1, using bit 0 up to and
+///    including the least siginificant bit that is set to 1 in the source
+///    operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BLSMSK instruction.
+///
+/// \param __X
+///    An unsigned integer used to create the mask.
+/// \returns An unsigned integer containing the newly created mask.
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
 __blsmsk_u32(unsigned int __X)
 {
   return __X ^ (__X - 1);
 }
 
+/// \brief Clears the least siginificant bit that is set to 1 in the source
+///    operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BLSR instruction.
+///
+/// \param __X
+///    An unsigned integer containing the operand to be cleared.
+/// \returns An unsigned integer containing the result of clearing the source
+///    operand.
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
 __blsr_u32(unsigned int __X)
 {
   return __X & (__X - 1);
 }
 
+/// \brief Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param __X
+///    An unsigned 32-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 32-bit integer containing the number of trailing zero
+///    bits in the operand.
 static __inline__ unsigned int __RELAXED_FN_ATTRS
 __tzcnt_u32(unsigned int __X)
 {
   return __X ? __builtin_ctz(__X) : 32;
 }
 
+/// \brief Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param __X
+///    An unsigned 32-bit integer whose trailing zeros are to be counted.
+/// \returns An 32-bit integer containing the number of trailing zero
+///    bits in the operand.
+static __inline__ int __RELAXED_FN_ATTRS
+_mm_tzcnt_32(unsigned int __X)
+{
+  return __X ? __builtin_ctz(__X) : 32;
+}
+
 #ifdef __x86_64__
 
+/// \brief Performs a bitwise AND of the second operand with the one's
+///    complement of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _andn_u64 (unsigned long long a, unsigned long long b);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c ANDN instruction.
+///
+/// \param a
+///    An unsigned 64-bit integer containing one of the operands.
+/// \param b
+///    An unsigned 64-bit integer containing one of the operands.
+/// \returns An unsigned 64-bit integer containing the bitwise AND of the second
+///    operand with the one's complement of the first operand.
 #define _andn_u64(a, b)   (__andn_u64((a), (b)))
+
 /* _bextr_u64 != __bextr_u64 */
+/// \brief Clears all bits in the source except for the least significant bit
+///    containing a value of 1 and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _blsi_u64(unsigned long long a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSI instruction.
+///
+/// \param a
+///    An unsigned 64-bit integer whose bits are to be cleared.
+/// \returns An unsigned 64-bit integer containing the result of clearing the
+///    bits from the source operand.
 #define _blsi_u64(a)      (__blsi_u64((a)))
+
+/// \brief Creates a mask whose bits are set to 1, using bit 0 up to and
+///    including the least siginificant bit that is set to 1 in the source
+///    operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _blsmsk_u64(unsigned long long a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSMSK instruction.
+///
+/// \param a
+///    An unsigned 64-bit integer used to create the mask.
+/// \returns A unsigned 64-bit integer containing the newly created mask.
 #define _blsmsk_u64(a)    (__blsmsk_u64((a)))
+
+/// \brief Clears the least siginificant bit that is set to 1 in the source
+///    operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _blsr_u64(unsigned long long a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSR instruction.
+///
+/// \param a
+///    An unsigned 64-bit integer containing the operand to be cleared.
+/// \returns An unsigned 64-bit integer containing the result of clearing the
+///    source operand.
 #define _blsr_u64(a)      (__blsr_u64((a)))
+
+/// \brief Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _tzcnt_u64(unsigned long long a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param a
+///    An unsigned 64-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 64-bit integer containing the number of trailing zero
+///    bits in the operand.
 #define _tzcnt_u64(a)     (__tzcnt_u64((a)))
 
+/// \brief Performs a bitwise AND of the second operand with the one's
+///    complement of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c ANDN instruction.
+///
+/// \param __X
+///    An unsigned 64-bit integer containing one of the operands.
+/// \param __Y
+///    An unsigned 64-bit integer containing one of the operands.
+/// \returns An unsigned 64-bit integer containing the bitwise AND of the second
+///    operand with the one's complement of the first operand.
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS
 __andn_u64 (unsigned long long __X, unsigned long long __Y)
 {
@@ -110,6 +412,21 @@
 }
 
 /* AMD-specified, double-leading-underscore version of BEXTR */
+/// \brief Extracts the specified bits from the first operand and returns them
+///    in the least significant bits of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BEXTR instruction.
+///
+/// \param __X
+///    An unsigned 64-bit integer whose bits are to be extracted.
+/// \param __Y
+///    An unsigned 64-bit integer used to specify which bits are extracted. Bits
+///    [7:0] specify the index of the least significant bit. Bits [15:8] specify
+///    the number of bits to be extracted.
+/// \returns An unsigned 64-bit integer whose least significant bits contain the
+///    extracted bits.
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS
 __bextr_u64(unsigned long long __X, unsigned long long __Y)
 {
@@ -117,36 +434,112 @@
 }
 
 /* Intel-specified, single-leading-underscore version of BEXTR */
+/// \brief Extracts the specified bits from the first operand and returns them
+///     in the least significant bits of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BEXTR instruction.
+///
+/// \param __X
+///    An unsigned 64-bit integer whose bits are to be extracted.
+/// \param __Y
+///    An unsigned integer used to specify the index of the least significant
+///    bit for the bits to be extracted. Bits [7:0] specify the index.
+/// \param __Z
+///    An unsigned integer used to specify the number of bits to be extracted.
+///    Bits [7:0] specify the number of bits.
+/// \returns An unsigned 64-bit integer whose least significant bits contain the
+///    extracted bits.
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS
 _bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z)
 {
   return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
 }
 
+/// \brief Clears all bits in the source except for the least significant bit
+///    containing a value of 1 and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BLSI instruction.
+///
+/// \param __X
+///    An unsigned 64-bit integer whose bits are to be cleared.
+/// \returns An unsigned 64-bit integer containing the result of clearing the
+///    bits from the source operand.
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS
 __blsi_u64(unsigned long long __X)
 {
   return __X & -__X;
 }
 
+/// \brief Creates a mask whose bits are set to 1, using bit 0 up to and
+///    including the least siginificant bit that is set to 1 in the source
+///    operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BLSMSK instruction.
+///
+/// \param __X
+///    An unsigned 64-bit integer used to create the mask.
+/// \returns A unsigned 64-bit integer containing the newly created mask.
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS
 __blsmsk_u64(unsigned long long __X)
 {
   return __X ^ (__X - 1);
 }
 
+/// \brief Clears the least siginificant bit that is set to 1 in the source
+///    operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BLSR instruction.
+///
+/// \param __X
+///    An unsigned 64-bit integer containing the operand to be cleared.
+/// \returns An unsigned 64-bit integer containing the result of clearing the
+///    source operand.
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS
 __blsr_u64(unsigned long long __X)
 {
   return __X & (__X - 1);
 }
 
+/// \brief Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param __X
+///    An unsigned 64-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 64-bit integer containing the number of trailing zero
+///    bits in the operand.
 static __inline__ unsigned long long __RELAXED_FN_ATTRS
 __tzcnt_u64(unsigned long long __X)
 {
   return __X ? __builtin_ctzll(__X) : 64;
 }
 
+/// \brief Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param __X
+///    An unsigned 64-bit integer whose trailing zeros are to be counted.
+/// \returns An 64-bit integer containing the number of trailing zero
+///    bits in the operand.
+static __inline__ long long __RELAXED_FN_ATTRS
+_mm_tzcnt_64(unsigned long long __X)
+{
+  return __X ? __builtin_ctzll(__X) : 64;
+}
+
 #endif /* __x86_64__ */
 
 #undef __DEFAULT_FN_ATTRS
diff --git a/renderscript/clang-include/clflushoptintrin.h b/renderscript/clang-include/clflushoptintrin.h
new file mode 100644
index 0000000..60e0ead
--- /dev/null
+++ b/renderscript/clang-include/clflushoptintrin.h
@@ -0,0 +1,41 @@
+/*===---- clflushoptintrin.h - CLFLUSHOPT intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <clflushoptintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __CLFLUSHOPTINTRIN_H
+#define __CLFLUSHOPTINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__,  __target__("clflushopt")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_clflushopt(char * __m) {
+  __builtin_ia32_clflushopt(__m);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/renderscript/clang-include/cuda_builtin_vars.h b/renderscript/clang-include/cuda_builtin_vars.h
index 901356b..6f5eb9c 100644
--- a/renderscript/clang-include/cuda_builtin_vars.h
+++ b/renderscript/clang-include/cuda_builtin_vars.h
@@ -24,16 +24,20 @@
 #ifndef __CUDA_BUILTIN_VARS_H
 #define __CUDA_BUILTIN_VARS_H
 
+// Forward declares from vector_types.h.
+struct uint3;
+struct dim3;
+
 // The file implements built-in CUDA variables using __declspec(property).
 // https://msdn.microsoft.com/en-us/library/yhfk0thd.aspx
 // All read accesses of built-in variable fields get converted into calls to a
-// getter function which in turn would call appropriate builtin to fetch the
+// getter function which in turn calls the appropriate builtin to fetch the
 // value.
 //
 // Example:
 //    int x = threadIdx.x;
 // IR output:
-//  %0 = call i32 @llvm.ptx.read.tid.x() #3
+//  %0 = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() #3
 // PTX output:
 //  mov.u32     %r2, %tid.x;
 
@@ -60,33 +64,45 @@
   __attribute__((device)) TypeName *operator&() const __DELETE
 
 struct __cuda_builtin_threadIdx_t {
-  __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_tid_x());
-  __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_tid_y());
-  __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_tid_z());
+  __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_tid_x());
+  __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_tid_y());
+  __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_tid_z());
+  // threadIdx should be convertible to uint3 (in fact in nvcc, it *is* a
+  // uint3).  This function is defined after we pull in vector_types.h.
+  __attribute__((device)) operator uint3() const;
 private:
   __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_threadIdx_t);
 };
 
 struct __cuda_builtin_blockIdx_t {
-  __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_ctaid_x());
-  __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_ctaid_y());
-  __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_ctaid_z());
+  __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_ctaid_x());
+  __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_ctaid_y());
+  __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_ctaid_z());
+  // blockIdx should be convertible to uint3 (in fact in nvcc, it *is* a
+  // uint3).  This function is defined after we pull in vector_types.h.
+  __attribute__((device)) operator uint3() const;
 private:
   __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockIdx_t);
 };
 
 struct __cuda_builtin_blockDim_t {
-  __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_ntid_x());
-  __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_ntid_y());
-  __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_ntid_z());
+  __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_ntid_x());
+  __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_ntid_y());
+  __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_ntid_z());
+  // blockDim should be convertible to dim3 (in fact in nvcc, it *is* a
+  // dim3).  This function is defined after we pull in vector_types.h.
+  __attribute__((device)) operator dim3() const;
 private:
   __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockDim_t);
 };
 
 struct __cuda_builtin_gridDim_t {
-  __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_nctaid_x());
-  __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_nctaid_y());
-  __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_nctaid_z());
+  __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_nctaid_x());
+  __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_nctaid_y());
+  __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_nctaid_z());
+  // gridDim should be convertible to dim3 (in fact in nvcc, it *is* a
+  // dim3).  This function is defined after we pull in vector_types.h.
+  __attribute__((device)) operator dim3() const;
 private:
   __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_gridDim_t);
 };
diff --git a/renderscript/clang-include/emmintrin.h b/renderscript/clang-include/emmintrin.h
index cfc2c71..c78d059 100644
--- a/renderscript/clang-include/emmintrin.h
+++ b/renderscript/clang-include/emmintrin.h
@@ -35,6 +35,11 @@
 typedef short __v8hi __attribute__((__vector_size__(16)));
 typedef char __v16qi __attribute__((__vector_size__(16)));
 
+/* Unsigned types */
+typedef unsigned long long __v2du __attribute__ ((__vector_size__ (16)));
+typedef unsigned short __v8hu __attribute__((__vector_size__(16)));
+typedef unsigned char __v16qu __attribute__((__vector_size__(16)));
+
 /* We need an explicitly signed variant for char. Note that this shouldn't
  * appear in the interface though. */
 typedef signed char __v16qs __attribute__((__vector_size__(16)));
@@ -54,7 +59,7 @@
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_add_pd(__m128d __a, __m128d __b)
 {
-  return __a + __b;
+  return (__m128d)((__v2df)__a + (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
@@ -67,7 +72,7 @@
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_sub_pd(__m128d __a, __m128d __b)
 {
-  return __a - __b;
+  return (__m128d)((__v2df)__a - (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
@@ -80,7 +85,7 @@
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_mul_pd(__m128d __a, __m128d __b)
 {
-  return __a * __b;
+  return (__m128d)((__v2df)__a * (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
@@ -93,318 +98,320 @@
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_div_pd(__m128d __a, __m128d __b)
 {
-  return __a / __b;
+  return (__m128d)((__v2df)__a / (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_sqrt_sd(__m128d __a, __m128d __b)
 {
-  __m128d __c = __builtin_ia32_sqrtsd(__b);
+  __m128d __c = __builtin_ia32_sqrtsd((__v2df)__b);
   return (__m128d) { __c[0], __a[1] };
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_sqrt_pd(__m128d __a)
 {
-  return __builtin_ia32_sqrtpd(__a);
+  return __builtin_ia32_sqrtpd((__v2df)__a);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_min_sd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_minsd(__a, __b);
+  return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_min_pd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_minpd(__a, __b);
+  return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_max_sd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_maxsd(__a, __b);
+  return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_max_pd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_maxpd(__a, __b);
+  return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_and_pd(__m128d __a, __m128d __b)
 {
-  return (__m128d)((__v4si)__a & (__v4si)__b);
+  return (__m128d)((__v4su)__a & (__v4su)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_andnot_pd(__m128d __a, __m128d __b)
 {
-  return (__m128d)(~(__v4si)__a & (__v4si)__b);
+  return (__m128d)(~(__v4su)__a & (__v4su)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_or_pd(__m128d __a, __m128d __b)
 {
-  return (__m128d)((__v4si)__a | (__v4si)__b);
+  return (__m128d)((__v4su)__a | (__v4su)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_xor_pd(__m128d __a, __m128d __b)
 {
-  return (__m128d)((__v4si)__a ^ (__v4si)__b);
+  return (__m128d)((__v4su)__a ^ (__v4su)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpeq_pd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpeqpd(__a, __b);
+  return (__m128d)__builtin_ia32_cmpeqpd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmplt_pd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpltpd(__a, __b);
+  return (__m128d)__builtin_ia32_cmpltpd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmple_pd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmplepd(__a, __b);
+  return (__m128d)__builtin_ia32_cmplepd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpgt_pd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpltpd(__b, __a);
+  return (__m128d)__builtin_ia32_cmpltpd((__v2df)__b, (__v2df)__a);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpge_pd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmplepd(__b, __a);
+  return (__m128d)__builtin_ia32_cmplepd((__v2df)__b, (__v2df)__a);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpord_pd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpordpd(__a, __b);
+  return (__m128d)__builtin_ia32_cmpordpd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpunord_pd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpunordpd(__a, __b);
+  return (__m128d)__builtin_ia32_cmpunordpd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpneq_pd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpneqpd(__a, __b);
+  return (__m128d)__builtin_ia32_cmpneqpd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpnlt_pd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpnltpd(__a, __b);
+  return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpnle_pd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpnlepd(__a, __b);
+  return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpngt_pd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpnltpd(__b, __a);
+  return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__b, (__v2df)__a);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpnge_pd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpnlepd(__b, __a);
+  return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__b, (__v2df)__a);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpeq_sd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpeqsd(__a, __b);
+  return (__m128d)__builtin_ia32_cmpeqsd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmplt_sd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpltsd(__a, __b);
+  return (__m128d)__builtin_ia32_cmpltsd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmple_sd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmplesd(__a, __b);
+  return (__m128d)__builtin_ia32_cmplesd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpgt_sd(__m128d __a, __m128d __b)
 {
-  __m128d __c = __builtin_ia32_cmpltsd(__b, __a);
+  __m128d __c = __builtin_ia32_cmpltsd((__v2df)__b, (__v2df)__a);
   return (__m128d) { __c[0], __a[1] };
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpge_sd(__m128d __a, __m128d __b)
 {
-  __m128d __c = __builtin_ia32_cmplesd(__b, __a);
+  __m128d __c = __builtin_ia32_cmplesd((__v2df)__b, (__v2df)__a);
   return (__m128d) { __c[0], __a[1] };
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpord_sd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpordsd(__a, __b);
+  return (__m128d)__builtin_ia32_cmpordsd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpunord_sd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpunordsd(__a, __b);
+  return (__m128d)__builtin_ia32_cmpunordsd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpneq_sd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpneqsd(__a, __b);
+  return (__m128d)__builtin_ia32_cmpneqsd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpnlt_sd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpnltsd(__a, __b);
+  return (__m128d)__builtin_ia32_cmpnltsd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpnle_sd(__m128d __a, __m128d __b)
 {
-  return (__m128d)__builtin_ia32_cmpnlesd(__a, __b);
+  return (__m128d)__builtin_ia32_cmpnlesd((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpngt_sd(__m128d __a, __m128d __b)
 {
-  __m128d __c = __builtin_ia32_cmpnltsd(__b, __a);
+  __m128d __c = __builtin_ia32_cmpnltsd((__v2df)__b, (__v2df)__a);
   return (__m128d) { __c[0], __a[1] };
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cmpnge_sd(__m128d __a, __m128d __b)
 {
-  __m128d __c = __builtin_ia32_cmpnlesd(__b, __a);
+  __m128d __c = __builtin_ia32_cmpnlesd((__v2df)__b, (__v2df)__a);
   return (__m128d) { __c[0], __a[1] };
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_comieq_sd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_comisdeq(__a, __b);
+  return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_comilt_sd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_comisdlt(__a, __b);
+  return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_comile_sd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_comisdle(__a, __b);
+  return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_comigt_sd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_comisdgt(__a, __b);
+  return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_comige_sd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_comisdge(__a, __b);
+  return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_comineq_sd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_comisdneq(__a, __b);
+  return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_ucomieq_sd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_ucomisdeq(__a, __b);
+  return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_ucomilt_sd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_ucomisdlt(__a, __b);
+  return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_ucomile_sd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_ucomisdle(__a, __b);
+  return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_ucomigt_sd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_ucomisdgt(__a, __b);
+  return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_ucomige_sd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_ucomisdge(__a, __b);
+  return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_ucomineq_sd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_ucomisdneq(__a, __b);
+  return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cvtpd_ps(__m128d __a)
 {
-  return __builtin_ia32_cvtpd2ps(__a);
+  return __builtin_ia32_cvtpd2ps((__v2df)__a);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cvtps_pd(__m128 __a)
 {
-  return __builtin_ia32_cvtps2pd(__a);
+  return (__m128d) __builtin_convertvector(
+      __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cvtepi32_pd(__m128i __a)
 {
-  return __builtin_ia32_cvtdq2pd((__v4si)__a);
+  return (__m128d) __builtin_convertvector(
+      __builtin_shufflevector((__v4si)__a, (__v4si)__a, 0, 1), __v2df);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cvtpd_epi32(__m128d __a)
 {
-  return __builtin_ia32_cvtpd2dq(__a);
+  return __builtin_ia32_cvtpd2dq((__v2df)__a);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_cvtsd_si32(__m128d __a)
 {
-  return __builtin_ia32_cvtsd2si(__a);
+  return __builtin_ia32_cvtsd2si((__v2df)__a);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
@@ -431,7 +438,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cvttpd_epi32(__m128d __a)
 {
-  return (__m128i)__builtin_ia32_cvttpd2dq(__a);
+  return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)__a);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
@@ -443,13 +450,13 @@
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_cvtpd_pi32(__m128d __a)
 {
-  return (__m64)__builtin_ia32_cvtpd2pi(__a);
+  return (__m64)__builtin_ia32_cvtpd2pi((__v2df)__a);
 }
 
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_cvttpd_pi32(__m128d __a)
 {
-  return (__m64)__builtin_ia32_cvttpd2pi(__a);
+  return (__m64)__builtin_ia32_cvttpd2pi((__v2df)__a);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
@@ -486,7 +493,7 @@
 _mm_loadr_pd(double const *__dp)
 {
   __m128d __u = *(__m128d*)__dp;
-  return __builtin_shufflevector(__u, __u, 1, 0);
+  return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
@@ -498,6 +505,16 @@
   return ((struct __loadu_pd*)__dp)->__v;
 }
 
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_loadu_si64(void const *__a)
+{
+  struct __loadu_si64 {
+    long long __v;
+  } __attribute__((__packed__, __may_alias__));
+  long long __u = ((struct __loadu_si64*)__a)->__v;
+  return (__m128i){__u, 0L};
+}
+
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_load_sd(double const *__dp)
 {
@@ -529,7 +546,7 @@
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_undefined_pd()
+_mm_undefined_pd(void)
 {
   return (__m128d)__builtin_ia32_undef128();
 }
@@ -580,31 +597,37 @@
 }
 
 static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store1_pd(double *__dp, __m128d __a)
+_mm_store_pd(double *__dp, __m128d __a)
 {
-  struct __mm_store1_pd_struct {
-    double __u[2];
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_store1_pd_struct*)__dp)->__u[0] = __a[0];
-  ((struct __mm_store1_pd_struct*)__dp)->__u[1] = __a[0];
+  *(__m128d*)__dp = __a;
 }
 
 static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd(double *__dp, __m128d __a)
+_mm_store1_pd(double *__dp, __m128d __a)
 {
-  *(__m128d *)__dp = __a;
+  __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
+  _mm_store_pd(__dp, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_pd1(double *__dp, __m128d __a)
+{
+  return _mm_store1_pd(__dp, __a);
 }
 
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_storeu_pd(double *__dp, __m128d __a)
 {
-  __builtin_ia32_storeupd(__dp, __a);
+  struct __storeu_pd {
+    __m128d __v;
+  } __attribute__((__packed__, __may_alias__));
+  ((struct __storeu_pd*)__dp)->__v = __a;
 }
 
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_storer_pd(double *__dp, __m128d __a)
 {
-  __a = __builtin_shufflevector(__a, __a, 1, 0);
+  __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 1, 0);
   *(__m128d *)__dp = __a;
 }
 
@@ -629,31 +652,31 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_add_epi8(__m128i __a, __m128i __b)
 {
-  return (__m128i)((__v16qi)__a + (__v16qi)__b);
+  return (__m128i)((__v16qu)__a + (__v16qu)__b);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_add_epi16(__m128i __a, __m128i __b)
 {
-  return (__m128i)((__v8hi)__a + (__v8hi)__b);
+  return (__m128i)((__v8hu)__a + (__v8hu)__b);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_add_epi32(__m128i __a, __m128i __b)
 {
-  return (__m128i)((__v4si)__a + (__v4si)__b);
+  return (__m128i)((__v4su)__a + (__v4su)__b);
 }
 
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_add_si64(__m64 __a, __m64 __b)
 {
-  return (__m64)__builtin_ia32_paddq(__a, __b);
+  return (__m64)__builtin_ia32_paddq((__v1di)__a, (__v1di)__b);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_add_epi64(__m128i __a, __m128i __b)
 {
-  return __a + __b;
+  return (__m128i)((__v2du)__a + (__v2du)__b);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -734,268 +757,792 @@
   return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
 }
 
+/// \brief Multiplies the corresponding elements of two [8 x short] vectors and
+///    returns a vector containing the low-order 16 bits of each 32-bit product
+///    in the corresponding element.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULLW / PMULLW instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing one of the source operands.
+/// \param __b
+///    A 128-bit integer vector containing one of the source operands.
+/// \returns A 128-bit integer vector containing the products of both operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_mullo_epi16(__m128i __a, __m128i __b)
 {
-  return (__m128i)((__v8hi)__a * (__v8hi)__b);
+  return (__m128i)((__v8hu)__a * (__v8hu)__b);
 }
 
+/// \brief Multiplies 32-bit unsigned integer values contained in the lower bits
+///    of the two 64-bit integer vectors and returns the 64-bit unsigned
+///    product.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMULUDQ instruction.
+///
+/// \param __a
+///    A 64-bit integer containing one of the source operands.
+/// \param __b
+///    A 64-bit integer containing one of the source operands.
+/// \returns A 64-bit integer vector containing the product of both operands.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_mul_su32(__m64 __a, __m64 __b)
 {
   return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b);
 }
 
+/// \brief Multiplies 32-bit unsigned integer values contained in the lower
+///    bits of the corresponding elements of two [2 x i64] vectors, and returns
+///    the 64-bit products in the corresponding elements of a [2 x i64] vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULUDQ / PMULUDQ instruction.
+///
+/// \param __a
+///    A [2 x i64] vector containing one of the source operands.
+/// \param __b
+///    A [2 x i64] vector containing one of the source operands.
+/// \returns A [2 x i64] vector containing the product of both operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_mul_epu32(__m128i __a, __m128i __b)
 {
   return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b);
 }
 
+/// \brief Computes the absolute differences of corresponding 8-bit integer
+///    values in two 128-bit vectors. Sums the first 8 absolute differences, and
+///    separately sums the second 8 absolute differences. Packss these two
+///    unsigned 16-bit integer sums into the upper and lower elements of a
+///    [2 x i64] vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSADBW / PSADBW instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing one of the source operands.
+/// \param __b
+///    A 128-bit integer vector containing one of the source operands.
+/// \returns A [2 x i64] vector containing the sums of the sets of absolute
+///    differences between both operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_sad_epu8(__m128i __a, __m128i __b)
 {
   return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b);
 }
 
+/// \brief Subtracts the corresponding 8-bit integer values in the operands.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBB / PSUBB instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the minuends.
+/// \param __b
+///    A 128-bit integer vector containing the subtrahends.
+/// \returns A 128-bit integer vector containing the differences of the values
+///    in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_sub_epi8(__m128i __a, __m128i __b)
 {
-  return (__m128i)((__v16qi)__a - (__v16qi)__b);
+  return (__m128i)((__v16qu)__a - (__v16qu)__b);
 }
 
+/// \brief Subtracts the corresponding 16-bit integer values in the operands.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBW / PSUBW instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the minuends.
+/// \param __b
+///    A 128-bit integer vector containing the subtrahends.
+/// \returns A 128-bit integer vector containing the differences of the values
+///    in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_sub_epi16(__m128i __a, __m128i __b)
 {
-  return (__m128i)((__v8hi)__a - (__v8hi)__b);
+  return (__m128i)((__v8hu)__a - (__v8hu)__b);
 }
 
+/// \brief Subtracts the corresponding 32-bit integer values in the operands.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBD / PSUBD instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the minuends.
+/// \param __b
+///    A 128-bit integer vector containing the subtrahends.
+/// \returns A 128-bit integer vector containing the differences of the values
+///    in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_sub_epi32(__m128i __a, __m128i __b)
 {
-  return (__m128i)((__v4si)__a - (__v4si)__b);
+  return (__m128i)((__v4su)__a - (__v4su)__b);
 }
 
+/// \brief Subtracts signed or unsigned 64-bit integer values and writes the
+///    difference to the corresponding bits in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSUBQ instruction.
+///
+/// \param __a
+///    A 64-bit integer vector containing the minuend.
+/// \param __b
+///    A 64-bit integer vector containing the subtrahend.
+/// \returns A 64-bit integer vector containing the difference of the values in
+///    the operands.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_sub_si64(__m64 __a, __m64 __b)
 {
-  return (__m64)__builtin_ia32_psubq(__a, __b);
+  return (__m64)__builtin_ia32_psubq((__v1di)__a, (__v1di)__b);
 }
 
+/// \brief Subtracts the corresponding elements of two [2 x i64] vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBQ / PSUBQ instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the minuends.
+/// \param __b
+///    A 128-bit integer vector containing the subtrahends.
+/// \returns A 128-bit integer vector containing the differences of the values
+///    in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_sub_epi64(__m128i __a, __m128i __b)
 {
-  return __a - __b;
+  return (__m128i)((__v2du)__a - (__v2du)__b);
 }
 
+/// \brief Subtracts corresponding 8-bit signed integer values in the input and
+///    returns the differences in the corresponding bytes in the destination.
+///    Differences greater than 7Fh are saturated to 7Fh, and differences less
+///    than 80h are saturated to 80h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBSB / PSUBSB instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the minuends.
+/// \param __b
+///    A 128-bit integer vector containing the subtrahends.
+/// \returns A 128-bit integer vector containing the differences of the values
+///    in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_subs_epi8(__m128i __a, __m128i __b)
 {
   return (__m128i)__builtin_ia32_psubsb128((__v16qi)__a, (__v16qi)__b);
 }
 
+/// \brief Subtracts corresponding 16-bit signed integer values in the input and
+///    returns the differences in the corresponding bytes in the destination.
+///    Differences greater than 7FFFh are saturated to 7FFFh, and values less
+///    than 8000h are saturated to 8000h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBSW / PSUBSW instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the minuends.
+/// \param __b
+///    A 128-bit integer vector containing the subtrahends.
+/// \returns A 128-bit integer vector containing the differences of the values
+///    in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_subs_epi16(__m128i __a, __m128i __b)
 {
   return (__m128i)__builtin_ia32_psubsw128((__v8hi)__a, (__v8hi)__b);
 }
 
+/// \brief Subtracts corresponding 8-bit unsigned integer values in the input
+///    and returns the differences in the corresponding bytes in the
+///    destination. Differences less than 00h are saturated to 00h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBUSB / PSUBUSB instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the minuends.
+/// \param __b
+///    A 128-bit integer vector containing the subtrahends.
+/// \returns A 128-bit integer vector containing the unsigned integer
+///    differences of the values in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_subs_epu8(__m128i __a, __m128i __b)
 {
   return (__m128i)__builtin_ia32_psubusb128((__v16qi)__a, (__v16qi)__b);
 }
 
+/// \brief Subtracts corresponding 16-bit unsigned integer values in the input
+///    and returns the differences in the corresponding bytes in the
+///    destination. Differences less than 0000h are saturated to 0000h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBUSW / PSUBUSW instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the minuends.
+/// \param __b
+///    A 128-bit integer vector containing the subtrahends.
+/// \returns A 128-bit integer vector containing the unsigned integer
+///    differences of the values in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_subs_epu16(__m128i __a, __m128i __b)
 {
   return (__m128i)__builtin_ia32_psubusw128((__v8hi)__a, (__v8hi)__b);
 }
 
+/// \brief Performs a bitwise AND of two 128-bit integer vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPAND / PAND instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing one of the source operands.
+/// \param __b
+///    A 128-bit integer vector containing one of the source operands.
+/// \returns A 128-bit integer vector containing the bitwise AND of the values
+///    in both operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_and_si128(__m128i __a, __m128i __b)
 {
-  return __a & __b;
+  return (__m128i)((__v2du)__a & (__v2du)__b);
 }
 
+/// \brief Performs a bitwise AND of two 128-bit integer vectors, using the
+///    one's complement of the values contained in the first source operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPANDN / PANDN instruction.
+///
+/// \param __a
+///    A 128-bit vector containing the left source operand. The one's complement
+///    of this value is used in the bitwise AND.
+/// \param __b
+///    A 128-bit vector containing the right source operand.
+/// \returns A 128-bit integer vector containing the bitwise AND of the one's
+///    complement of the first operand and the values in the second operand.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_andnot_si128(__m128i __a, __m128i __b)
 {
-  return ~__a & __b;
+  return (__m128i)(~(__v2du)__a & (__v2du)__b);
 }
-
+/// \brief Performs a bitwise OR of two 128-bit integer vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPOR / POR instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing one of the source operands.
+/// \param __b
+///    A 128-bit integer vector containing one of the source operands.
+/// \returns A 128-bit integer vector containing the bitwise OR of the values
+///    in both operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_or_si128(__m128i __a, __m128i __b)
 {
-  return __a | __b;
+  return (__m128i)((__v2du)__a | (__v2du)__b);
 }
 
+/// \brief Performs a bitwise exclusive OR of two 128-bit integer vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPXOR / PXOR instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing one of the source operands.
+/// \param __b
+///    A 128-bit integer vector containing one of the source operands.
+/// \returns A 128-bit integer vector containing the bitwise exclusive OR of the
+///    values in both operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_xor_si128(__m128i __a, __m128i __b)
 {
-  return __a ^ __b;
+  return (__m128i)((__v2du)__a ^ (__v2du)__b);
 }
 
-#define _mm_slli_si128(a, imm) __extension__ ({                         \
-  (__m128i)__builtin_shufflevector((__v16qi)_mm_setzero_si128(),        \
-                                   (__v16qi)(__m128i)(a),               \
-                                   ((imm)&0xF0) ? 0 : 16 - ((imm)&0xF), \
-                                   ((imm)&0xF0) ? 0 : 17 - ((imm)&0xF), \
-                                   ((imm)&0xF0) ? 0 : 18 - ((imm)&0xF), \
-                                   ((imm)&0xF0) ? 0 : 19 - ((imm)&0xF), \
-                                   ((imm)&0xF0) ? 0 : 20 - ((imm)&0xF), \
-                                   ((imm)&0xF0) ? 0 : 21 - ((imm)&0xF), \
-                                   ((imm)&0xF0) ? 0 : 22 - ((imm)&0xF), \
-                                   ((imm)&0xF0) ? 0 : 23 - ((imm)&0xF), \
-                                   ((imm)&0xF0) ? 0 : 24 - ((imm)&0xF), \
-                                   ((imm)&0xF0) ? 0 : 25 - ((imm)&0xF), \
-                                   ((imm)&0xF0) ? 0 : 26 - ((imm)&0xF), \
-                                   ((imm)&0xF0) ? 0 : 27 - ((imm)&0xF), \
-                                   ((imm)&0xF0) ? 0 : 28 - ((imm)&0xF), \
-                                   ((imm)&0xF0) ? 0 : 29 - ((imm)&0xF), \
-                                   ((imm)&0xF0) ? 0 : 30 - ((imm)&0xF), \
-                                   ((imm)&0xF0) ? 0 : 31 - ((imm)&0xF)); })
+/// \brief Left-shifts the 128-bit integer vector operand by the specified
+///    number of bytes. Low-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_slli_si128(__m128i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSLLDQ / PSLLDQ instruction.
+///
+/// \param a
+///    A 128-bit integer vector containing the source operand.
+/// \param imm
+///    An immediate value specifying the number of bytes to left-shift
+///    operand a.
+/// \returns A 128-bit integer vector containing the left-shifted value.
+#define _mm_slli_si128(a, imm) __extension__ ({                              \
+  (__m128i)__builtin_shufflevector(                                          \
+                                 (__v16qi)_mm_setzero_si128(),               \
+                                 (__v16qi)(__m128i)(a),                      \
+                                 ((char)(imm)&0xF0) ?  0 : 16 - (char)(imm), \
+                                 ((char)(imm)&0xF0) ?  1 : 17 - (char)(imm), \
+                                 ((char)(imm)&0xF0) ?  2 : 18 - (char)(imm), \
+                                 ((char)(imm)&0xF0) ?  3 : 19 - (char)(imm), \
+                                 ((char)(imm)&0xF0) ?  4 : 20 - (char)(imm), \
+                                 ((char)(imm)&0xF0) ?  5 : 21 - (char)(imm), \
+                                 ((char)(imm)&0xF0) ?  6 : 22 - (char)(imm), \
+                                 ((char)(imm)&0xF0) ?  7 : 23 - (char)(imm), \
+                                 ((char)(imm)&0xF0) ?  8 : 24 - (char)(imm), \
+                                 ((char)(imm)&0xF0) ?  9 : 25 - (char)(imm), \
+                                 ((char)(imm)&0xF0) ? 10 : 26 - (char)(imm), \
+                                 ((char)(imm)&0xF0) ? 11 : 27 - (char)(imm), \
+                                 ((char)(imm)&0xF0) ? 12 : 28 - (char)(imm), \
+                                 ((char)(imm)&0xF0) ? 13 : 29 - (char)(imm), \
+                                 ((char)(imm)&0xF0) ? 14 : 30 - (char)(imm), \
+                                 ((char)(imm)&0xF0) ? 15 : 31 - (char)(imm)); })
 
 #define _mm_bslli_si128(a, imm) \
   _mm_slli_si128((a), (imm))
 
+/// \brief Left-shifts each 16-bit value in the 128-bit integer vector operand
+///    by the specified number of bits. Low-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLW / PSLLW instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the source operand.
+/// \param __count
+///    An integer value specifying the number of bits to left-shift each value
+///    in operand __a.
+/// \returns A 128-bit integer vector containing the left-shifted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_slli_epi16(__m128i __a, int __count)
 {
   return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count);
 }
 
+/// \brief Left-shifts each 16-bit value in the 128-bit integer vector operand
+///    by the specified number of bits. Low-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLW / PSLLW instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the source operand.
+/// \param __count
+///    A 128-bit integer vector in which bits [63:0] specify the number of bits
+///    to left-shift each value in operand __a.
+/// \returns A 128-bit integer vector containing the left-shifted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_sll_epi16(__m128i __a, __m128i __count)
 {
   return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count);
 }
 
+/// \brief Left-shifts each 32-bit value in the 128-bit integer vector operand
+///    by the specified number of bits. Low-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLD / PSLLD instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the source operand.
+/// \param __count
+///    An integer value specifying the number of bits to left-shift each value
+///    in operand __a.
+/// \returns A 128-bit integer vector containing the left-shifted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_slli_epi32(__m128i __a, int __count)
 {
   return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count);
 }
 
+/// \brief Left-shifts each 32-bit value in the 128-bit integer vector operand
+///    by the specified number of bits. Low-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLD / PSLLD instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the source operand.
+/// \param __count
+///    A 128-bit integer vector in which bits [63:0] specify the number of bits
+///    to left-shift each value in operand __a.
+/// \returns A 128-bit integer vector containing the left-shifted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_sll_epi32(__m128i __a, __m128i __count)
 {
   return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count);
 }
 
+/// \brief Left-shifts each 64-bit value in the 128-bit integer vector operand
+///    by the specified number of bits. Low-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLQ / PSLLQ instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the source operand.
+/// \param __count
+///    An integer value specifying the number of bits to left-shift each value
+///    in operand __a.
+/// \returns A 128-bit integer vector containing the left-shifted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_slli_epi64(__m128i __a, int __count)
 {
-  return __builtin_ia32_psllqi128(__a, __count);
+  return __builtin_ia32_psllqi128((__v2di)__a, __count);
 }
 
+/// \brief Left-shifts each 64-bit value in the 128-bit integer vector operand
+///    by the specified number of bits. Low-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLQ / PSLLQ instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the source operand.
+/// \param __count
+///    A 128-bit integer vector in which bits [63:0] specify the number of bits
+///    to left-shift each value in operand __a.
+/// \returns A 128-bit integer vector containing the left-shifted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_sll_epi64(__m128i __a, __m128i __count)
 {
-  return __builtin_ia32_psllq128(__a, __count);
+  return __builtin_ia32_psllq128((__v2di)__a, (__v2di)__count);
 }
 
+/// \brief Right-shifts each 16-bit value in the 128-bit integer vector operand
+///    by the specified number of bits. High-order bits are filled with the sign
+///    bit of the initial value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAW / PSRAW instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the source operand.
+/// \param __count
+///    An integer value specifying the number of bits to right-shift each value
+///    in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_srai_epi16(__m128i __a, int __count)
 {
   return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count);
 }
 
+/// \brief Right-shifts each 16-bit value in the 128-bit integer vector operand
+///    by the specified number of bits. High-order bits are filled with the sign
+///    bit of the initial value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAW / PSRAW instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the source operand.
+/// \param __count
+///    A 128-bit integer vector in which bits [63:0] specify the number of bits
+///    to right-shift each value in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_sra_epi16(__m128i __a, __m128i __count)
 {
   return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count);
 }
 
+/// \brief Right-shifts each 32-bit value in the 128-bit integer vector operand
+///    by the specified number of bits. High-order bits are filled with the sign
+///    bit of the initial value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAD / PSRAD instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the source operand.
+/// \param __count
+///    An integer value specifying the number of bits to right-shift each value
+///    in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_srai_epi32(__m128i __a, int __count)
 {
   return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count);
 }
 
+/// \brief Right-shifts each 32-bit value in the 128-bit integer vector operand
+///    by the specified number of bits. High-order bits are filled with the sign
+///    bit of the initial value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAD / PSRAD instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the source operand.
+/// \param __count
+///    A 128-bit integer vector in which bits [63:0] specify the number of bits
+///    to right-shift each value in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_sra_epi32(__m128i __a, __m128i __count)
 {
   return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count);
 }
 
-#define _mm_srli_si128(a, imm) __extension__ ({                          \
-  (__m128i)__builtin_shufflevector((__v16qi)(__m128i)(a),                \
-                                   (__v16qi)_mm_setzero_si128(),         \
-                                   ((imm)&0xF0) ? 16 : ((imm)&0xF) + 0,  \
-                                   ((imm)&0xF0) ? 16 : ((imm)&0xF) + 1,  \
-                                   ((imm)&0xF0) ? 16 : ((imm)&0xF) + 2,  \
-                                   ((imm)&0xF0) ? 16 : ((imm)&0xF) + 3,  \
-                                   ((imm)&0xF0) ? 16 : ((imm)&0xF) + 4,  \
-                                   ((imm)&0xF0) ? 16 : ((imm)&0xF) + 5,  \
-                                   ((imm)&0xF0) ? 16 : ((imm)&0xF) + 6,  \
-                                   ((imm)&0xF0) ? 16 : ((imm)&0xF) + 7,  \
-                                   ((imm)&0xF0) ? 16 : ((imm)&0xF) + 8,  \
-                                   ((imm)&0xF0) ? 16 : ((imm)&0xF) + 9,  \
-                                   ((imm)&0xF0) ? 16 : ((imm)&0xF) + 10, \
-                                   ((imm)&0xF0) ? 16 : ((imm)&0xF) + 11, \
-                                   ((imm)&0xF0) ? 16 : ((imm)&0xF) + 12, \
-                                   ((imm)&0xF0) ? 16 : ((imm)&0xF) + 13, \
-                                   ((imm)&0xF0) ? 16 : ((imm)&0xF) + 14, \
-                                   ((imm)&0xF0) ? 16 : ((imm)&0xF) + 15); })
+/// \brief Right-shifts the 128-bit integer vector operand by the specified
+///    number of bytes. High-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_srli_si128(__m128i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSRLDQ / PSRLDQ instruction.
+///
+/// \param a
+///    A 128-bit integer vector containing the source operand.
+/// \param imm
+///    An immediate value specifying the number of bytes to right-shift operand
+///    a.
+/// \returns A 128-bit integer vector containing the right-shifted value.
+#define _mm_srli_si128(a, imm) __extension__ ({                              \
+  (__m128i)__builtin_shufflevector(                                          \
+                                 (__v16qi)(__m128i)(a),                      \
+                                 (__v16qi)_mm_setzero_si128(),               \
+                                 ((char)(imm)&0xF0) ? 16 : (char)(imm) + 0,  \
+                                 ((char)(imm)&0xF0) ? 17 : (char)(imm) + 1,  \
+                                 ((char)(imm)&0xF0) ? 18 : (char)(imm) + 2,  \
+                                 ((char)(imm)&0xF0) ? 19 : (char)(imm) + 3,  \
+                                 ((char)(imm)&0xF0) ? 20 : (char)(imm) + 4,  \
+                                 ((char)(imm)&0xF0) ? 21 : (char)(imm) + 5,  \
+                                 ((char)(imm)&0xF0) ? 22 : (char)(imm) + 6,  \
+                                 ((char)(imm)&0xF0) ? 23 : (char)(imm) + 7,  \
+                                 ((char)(imm)&0xF0) ? 24 : (char)(imm) + 8,  \
+                                 ((char)(imm)&0xF0) ? 25 : (char)(imm) + 9,  \
+                                 ((char)(imm)&0xF0) ? 26 : (char)(imm) + 10, \
+                                 ((char)(imm)&0xF0) ? 27 : (char)(imm) + 11, \
+                                 ((char)(imm)&0xF0) ? 28 : (char)(imm) + 12, \
+                                 ((char)(imm)&0xF0) ? 29 : (char)(imm) + 13, \
+                                 ((char)(imm)&0xF0) ? 30 : (char)(imm) + 14, \
+                                 ((char)(imm)&0xF0) ? 31 : (char)(imm) + 15); })
 
 #define _mm_bsrli_si128(a, imm) \
   _mm_srli_si128((a), (imm))
 
+/// \brief Right-shifts each of 16-bit values in the 128-bit integer vector
+///    operand by the specified number of bits. High-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLW / PSRLW instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the source operand.
+/// \param __count
+///    An integer value specifying the number of bits to right-shift each value
+///    in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_srli_epi16(__m128i __a, int __count)
 {
   return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count);
 }
 
+/// \brief Right-shifts each of 16-bit values in the 128-bit integer vector
+///    operand by the specified number of bits. High-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLW / PSRLW instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the source operand.
+/// \param __count
+///    A 128-bit integer vector in which bits [63:0] specify the number of bits
+///    to right-shift each value in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_srl_epi16(__m128i __a, __m128i __count)
 {
   return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count);
 }
 
+/// \brief Right-shifts each of 32-bit values in the 128-bit integer vector
+///    operand by the specified number of bits. High-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLD / PSRLD instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the source operand.
+/// \param __count
+///    An integer value specifying the number of bits to right-shift each value
+///    in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_srli_epi32(__m128i __a, int __count)
 {
   return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count);
 }
 
+/// \brief Right-shifts each of 32-bit values in the 128-bit integer vector
+///    operand by the specified number of bits. High-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLD / PSRLD instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the source operand.
+/// \param __count
+///    A 128-bit integer vector in which bits [63:0] specify the number of bits
+///    to right-shift each value in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_srl_epi32(__m128i __a, __m128i __count)
 {
   return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count);
 }
 
+/// \brief Right-shifts each of 64-bit values in the 128-bit integer vector
+///    operand by the specified number of bits. High-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLQ / PSRLQ instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the source operand.
+/// \param __count
+///    An integer value specifying the number of bits to right-shift each value
+///    in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_srli_epi64(__m128i __a, int __count)
 {
-  return __builtin_ia32_psrlqi128(__a, __count);
+  return __builtin_ia32_psrlqi128((__v2di)__a, __count);
 }
 
+/// \brief Right-shifts each of 64-bit values in the 128-bit integer vector
+///    operand by the specified number of bits. High-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLQ / PSRLQ instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the source operand.
+/// \param __count
+///    A 128-bit integer vector in which bits [63:0] specify the number of bits
+///    to right-shift each value in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_srl_epi64(__m128i __a, __m128i __count)
 {
-  return __builtin_ia32_psrlq128(__a, __count);
+  return __builtin_ia32_psrlq128((__v2di)__a, (__v2di)__count);
 }
 
+/// \brief Compares each of the corresponding 8-bit values of the 128-bit
+///    integer vectors for equality. Each comparison yields 0h for false, FFh
+///    for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPEQB / PCMPEQB instruction.
+///
+/// \param __a
+///    A 128-bit integer vector.
+/// \param __b
+///    A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cmpeq_epi8(__m128i __a, __m128i __b)
 {
   return (__m128i)((__v16qi)__a == (__v16qi)__b);
 }
 
+/// \brief Compares each of the corresponding 16-bit values of the 128-bit
+///    integer vectors for equality. Each comparison yields 0h for false, FFFFh
+///    for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPEQW / PCMPEQW instruction.
+///
+/// \param __a
+///    A 128-bit integer vector.
+/// \param __b
+///    A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cmpeq_epi16(__m128i __a, __m128i __b)
 {
   return (__m128i)((__v8hi)__a == (__v8hi)__b);
 }
 
+/// \brief Compares each of the corresponding 32-bit values of the 128-bit
+///    integer vectors for equality. Each comparison yields 0h for false,
+///    FFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPEQD / PCMPEQD instruction.
+///
+/// \param __a
+///    A 128-bit integer vector.
+/// \param __b
+///    A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cmpeq_epi32(__m128i __a, __m128i __b)
 {
   return (__m128i)((__v4si)__a == (__v4si)__b);
 }
 
+/// \brief Compares each of the corresponding signed 8-bit values of the 128-bit
+///    integer vectors to determine if the values in the first operand are
+///    greater than those in the second operand. Each comparison yields 0h for
+///    false, FFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTB / PCMPGTB instruction.
+///
+/// \param __a
+///    A 128-bit integer vector.
+/// \param __b
+///    A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cmpgt_epi8(__m128i __a, __m128i __b)
 {
@@ -1004,30 +1551,100 @@
   return (__m128i)((__v16qs)__a > (__v16qs)__b);
 }
 
+/// \brief Compares each of the corresponding signed 16-bit values of the
+///    128-bit integer vectors to determine if the values in the first operand
+///    are greater than those in the second operand. Each comparison yields 0h
+///    for false, FFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTW / PCMPGTW instruction.
+///
+/// \param __a
+///    A 128-bit integer vector.
+/// \param __b
+///    A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cmpgt_epi16(__m128i __a, __m128i __b)
 {
   return (__m128i)((__v8hi)__a > (__v8hi)__b);
 }
 
+/// \brief Compares each of the corresponding signed 32-bit values of the
+///    128-bit integer vectors to determine if the values in the first operand
+///    are greater than those in the second operand. Each comparison yields 0h
+///    for false, FFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTD / PCMPGTD instruction.
+///
+/// \param __a
+///    A 128-bit integer vector.
+/// \param __b
+///    A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cmpgt_epi32(__m128i __a, __m128i __b)
 {
   return (__m128i)((__v4si)__a > (__v4si)__b);
 }
 
+/// \brief Compares each of the corresponding signed 8-bit values of the 128-bit
+///    integer vectors to determine if the values in the first operand are less
+///    than those in the second operand. Each comparison yields 0h for false,
+///    FFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTB / PCMPGTB instruction.
+///
+/// \param __a
+///    A 128-bit integer vector.
+/// \param __b
+///    A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cmplt_epi8(__m128i __a, __m128i __b)
 {
   return _mm_cmpgt_epi8(__b, __a);
 }
 
+/// \brief Compares each of the corresponding signed 16-bit values of the
+///    128-bit integer vectors to determine if the values in the first operand
+///    are less than those in the second operand. Each comparison yields 0h for
+///    false, FFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTW / PCMPGTW instruction.
+///
+/// \param __a
+///    A 128-bit integer vector.
+/// \param __b
+///    A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cmplt_epi16(__m128i __a, __m128i __b)
 {
   return _mm_cmpgt_epi16(__b, __a);
 }
 
+/// \brief Compares each of the corresponding signed 32-bit values of the
+///    128-bit integer vectors to determine if the values in the first operand
+///    are less than those in the second operand. Each comparison yields 0h for
+///    false, FFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTD / PCMPGTD instruction.
+///
+/// \param __a
+///    A 128-bit integer vector.
+/// \param __b
+///    A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cmplt_epi32(__m128i __a, __m128i __b)
 {
@@ -1035,6 +1652,23 @@
 }
 
 #ifdef __x86_64__
+/// \brief Converts a 64-bit signed integer value from the second operand into a
+///    double-precision value and returns it in the lower element of a [2 x
+///    double] vector; the upper element of the returned vector is copied from
+///    the upper element of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTSI2SD / CVTSI2SD instruction.
+///
+/// \param __a
+///    A 128-bit vector of [2 x double]. The upper 64 bits of this operand are
+///    copied to the upper 64 bits of the destination.
+/// \param __b
+///    A 64-bit signed integer operand containing the value to be converted.
+/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
+///    converted value of the second operand. The upper 64 bits are copied from
+///    the upper 64 bits of the first operand.
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_cvtsi64_sd(__m128d __a, long long __b)
 {
@@ -1042,12 +1676,34 @@
   return __a;
 }
 
+/// \brief Converts the first (lower) element of a vector of [2 x double] into a
+///    64-bit signed integer value, according to the current rounding mode.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTSD2SI / CVTSD2SI instruction.
+///
+/// \param __a
+///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
+///    conversion.
+/// \returns A 64-bit signed integer containing the converted value.
 static __inline__ long long __DEFAULT_FN_ATTRS
 _mm_cvtsd_si64(__m128d __a)
 {
-  return __builtin_ia32_cvtsd2si64(__a);
+  return __builtin_ia32_cvtsd2si64((__v2df)__a);
 }
 
+/// \brief Converts the first (lower) element of a vector of [2 x double] into a
+///    64-bit signed integer value, truncating the result when it is inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTTSD2SI / CVTTSD2SI instruction.
+///
+/// \param __a
+///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
+///    conversion.
+/// \returns A 64-bit signed integer containing the converted value.
 static __inline__ long long __DEFAULT_FN_ATTRS
 _mm_cvttsd_si64(__m128d __a)
 {
@@ -1055,24 +1711,63 @@
 }
 #endif
 
+/// \brief Converts a vector of [4 x i32] into a vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTDQ2PS / CVTDQ2PS instruction.
+///
+/// \param __a
+///    A 128-bit integer vector.
+/// \returns A 128-bit vector of [4 x float] containing the converted values.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cvtepi32_ps(__m128i __a)
 {
   return __builtin_ia32_cvtdq2ps((__v4si)__a);
 }
 
+/// \brief Converts a vector of [4 x float] into a vector of [4 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTPS2DQ / CVTPS2DQ instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit integer vector of [4 x i32] containing the converted
+///    values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cvtps_epi32(__m128 __a)
 {
-  return (__m128i)__builtin_ia32_cvtps2dq(__a);
+  return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a);
 }
 
+/// \brief Converts a vector of [4 x float] into a vector of [4 x i32],
+///    truncating the result when it is inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTTPS2DQ / CVTTPS2DQ instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x i32] containing the converted values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cvttps_epi32(__m128 __a)
 {
-  return (__m128i)__builtin_ia32_cvttps2dq(__a);
+  return (__m128i)__builtin_convertvector((__v4sf)__a, __v4si);
 }
 
+/// \brief Returns a vector of [4 x i32] where the lowest element is the input
+///    operand and the remaining elements are zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVD / MOVD instruction.
+///
+/// \param __a
+///    A 32-bit signed integer operand.
+/// \returns A 128-bit vector of [4 x i32].
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cvtsi32_si128(int __a)
 {
@@ -1080,6 +1775,16 @@
 }
 
 #ifdef __x86_64__
+/// \brief Returns a vector of [2 x i64] where the lower element is the input
+///    operand and the upper element is zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVQ / MOVQ instruction.
+///
+/// \param __a
+///    A 64-bit signed integer operand containing the value to be converted.
+/// \returns A 128-bit vector of [2 x i64] containing the converted value.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cvtsi64_si128(long long __a)
 {
@@ -1087,6 +1792,17 @@
 }
 #endif
 
+/// \brief Moves the least significant 32 bits of a vector of [4 x i32] to a
+///    32-bit signed integer value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVD / MOVD instruction.
+///
+/// \param __a
+///    A vector of [4 x i32]. The least significant 32 bits are moved to the
+///    destination.
+/// \returns A 32-bit signed integer containing the moved value.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_cvtsi128_si32(__m128i __a)
 {
@@ -1095,6 +1811,17 @@
 }
 
 #ifdef __x86_64__
+/// \brief Moves the least significant 64 bits of a vector of [2 x i64] to a
+///    64-bit signed integer value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVQ / MOVQ instruction.
+///
+/// \param __a
+///    A vector of [2 x i64]. The least significant 64 bits are moved to the
+///    destination.
+/// \returns A 64-bit signed integer containing the moved value.
 static __inline__ long long __DEFAULT_FN_ATTRS
 _mm_cvtsi128_si64(__m128i __a)
 {
@@ -1102,12 +1829,32 @@
 }
 #endif
 
+/// \brief Moves packed integer values from an aligned 128-bit memory location
+///    to elements in a 128-bit integer vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVDQA / MOVDQA instruction.
+///
+/// \param __p
+///    An aligned pointer to a memory location containing integer values.
+/// \returns A 128-bit integer vector containing the moved values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_load_si128(__m128i const *__p)
 {
   return *__p;
 }
 
+/// \brief Moves packed integer values from an unaligned 128-bit memory location
+///    to elements in a 128-bit integer vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVDQU / MOVDQU instruction.
+///
+/// \param __p
+///    A pointer to a memory location containing integer values.
+/// \returns A 128-bit integer vector containing the moved values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_loadu_si128(__m128i const *__p)
 {
@@ -1117,6 +1864,18 @@
   return ((struct __loadu_si128*)__p)->__v;
 }
 
+/// \brief Returns a vector of [2 x i64] where the lower element is taken from
+///    the lower element of the operand, and the upper element is zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVQ / MOVQ instruction.
+///
+/// \param __p
+///    A 128-bit vector of [2 x i64]. Bits [63:0] are written to bits [63:0] of
+///    the destination.
+/// \returns A 128-bit vector of [2 x i64]. The lower order bits contain the
+///    moved value. The higher order bits are cleared.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_loadl_epi64(__m128i const *__p)
 {
@@ -1126,66 +1885,270 @@
   return (__m128i) { ((struct __mm_loadl_epi64_struct*)__p)->__u, 0};
 }
 
+/// \brief Generates a 128-bit vector of [4 x i32] with unspecified content.
+///    This could be used as an argument to another intrinsic function where the
+///    argument is required but the value is not actually used.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \returns A 128-bit vector of [4 x i32] with unspecified content.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_undefined_si128()
+_mm_undefined_si128(void)
 {
   return (__m128i)__builtin_ia32_undef128();
 }
 
+/// \brief Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
+///    the specified 64-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __q1
+///    A 64-bit integer value used to initialize the upper 64 bits of the
+///    destination vector of [2 x i64].
+/// \param __q0
+///    A 64-bit integer value used to initialize the lower 64 bits of the
+///    destination vector of [2 x i64].
+/// \returns An initialized 128-bit vector of [2 x i64] containing the values
+///    provided in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_set_epi64x(long long __q1, long long __q0)
 {
   return (__m128i){ __q0, __q1 };
 }
 
+/// \brief Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
+///    the specified 64-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __q1
+///    A 64-bit integer value used to initialize the upper 64 bits of the
+///    destination vector of [2 x i64].
+/// \param __q0
+///    A 64-bit integer value used to initialize the lower 64 bits of the
+///    destination vector of [2 x i64].
+/// \returns An initialized 128-bit vector of [2 x i64] containing the values
+///    provided in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_set_epi64(__m64 __q1, __m64 __q0)
 {
   return (__m128i){ (long long)__q0, (long long)__q1 };
 }
 
+/// \brief Initializes the 32-bit values in a 128-bit vector of [4 x i32] with
+///    the specified 32-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __i3
+///    A 32-bit integer value used to initialize bits [127:96] of the
+///    destination vector.
+/// \param __i2
+///    A 32-bit integer value used to initialize bits [95:64] of the destination
+///    vector.
+/// \param __i1
+///    A 32-bit integer value used to initialize bits [63:32] of the destination
+///    vector.
+/// \param __i0
+///    A 32-bit integer value used to initialize bits [31:0] of the destination
+///    vector.
+/// \returns An initialized 128-bit vector of [4 x i32] containing the values
+///    provided in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
 {
   return (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
 }
 
+/// \brief Initializes the 16-bit values in a 128-bit vector of [8 x i16] with
+///    the specified 16-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __w7
+///    A 16-bit integer value used to initialize bits [127:112] of the
+///    destination vector.
+/// \param __w6
+///    A 16-bit integer value used to initialize bits [111:96] of the
+///    destination vector.
+/// \param __w5
+///    A 16-bit integer value used to initialize bits [95:80] of the destination
+///    vector.
+/// \param __w4
+///    A 16-bit integer value used to initialize bits [79:64] of the destination
+///    vector.
+/// \param __w3
+///    A 16-bit integer value used to initialize bits [63:48] of the destination
+///    vector.
+/// \param __w2
+///    A 16-bit integer value used to initialize bits [47:32] of the destination
+///    vector.
+/// \param __w1
+///    A 16-bit integer value used to initialize bits [31:16] of the destination
+///    vector.
+/// \param __w0
+///    A 16-bit integer value used to initialize bits [15:0] of the destination
+///    vector.
+/// \returns An initialized 128-bit vector of [8 x i16] containing the values
+///    provided in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
 {
   return (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
 }
 
+/// \brief Initializes the 8-bit values in a 128-bit vector of [16 x i8] with
+///    the specified 8-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __b15
+///    Initializes bits [127:120] of the destination vector.
+/// \param __b14
+///    Initializes bits [119:112] of the destination vector.
+/// \param __b13
+///    Initializes bits [111:104] of the destination vector.
+/// \param __b12
+///    Initializes bits [103:96] of the destination vector.
+/// \param __b11
+///    Initializes bits [95:88] of the destination vector.
+/// \param __b10
+///    Initializes bits [87:80] of the destination vector.
+/// \param __b9
+///    Initializes bits [79:72] of the destination vector.
+/// \param __b8
+///    Initializes bits [71:64] of the destination vector.
+/// \param __b7
+///    Initializes bits [63:56] of the destination vector.
+/// \param __b6
+///    Initializes bits [55:48] of the destination vector.
+/// \param __b5
+///    Initializes bits [47:40] of the destination vector.
+/// \param __b4
+///    Initializes bits [39:32] of the destination vector.
+/// \param __b3
+///    Initializes bits [31:24] of the destination vector.
+/// \param __b2
+///    Initializes bits [23:16] of the destination vector.
+/// \param __b1
+///    Initializes bits [15:8] of the destination vector.
+/// \param __b0
+///    Initializes bits [7:0] of the destination vector.
+/// \returns An initialized 128-bit vector of [16 x i8] containing the values
+///    provided in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
 {
   return (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
 }
 
+/// \brief Initializes both values in a 128-bit integer vector with the
+///    specified 64-bit integer value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __q
+///    Integer value used to initialize the elements of the destination integer
+///    vector.
+/// \returns An initialized 128-bit integer vector of [2 x i64] with both
+///    elements containing the value provided in the operand.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_set1_epi64x(long long __q)
 {
   return (__m128i){ __q, __q };
 }
 
+/// \brief Initializes both values in a 128-bit vector of [2 x i64] with the
+///    specified 64-bit value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __q
+///    A 64-bit value used to initialize the elements of the destination integer
+///    vector.
+/// \returns An initialized 128-bit vector of [2 x i64] with all elements
+///    containing the value provided in the operand.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_set1_epi64(__m64 __q)
 {
   return (__m128i){ (long long)__q, (long long)__q };
 }
 
+/// \brief Initializes all values in a 128-bit vector of [4 x i32] with the
+///    specified 32-bit value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __i
+///    A 32-bit value used to initialize the elements of the destination integer
+///    vector.
+/// \returns An initialized 128-bit vector of [4 x i32] with all elements
+///    containing the value provided in the operand.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_set1_epi32(int __i)
 {
   return (__m128i)(__v4si){ __i, __i, __i, __i };
 }
 
+/// \brief Initializes all values in a 128-bit vector of [8 x i16] with the
+///    specified 16-bit value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __w
+///    A 16-bit value used to initialize the elements of the destination integer
+///    vector.
+/// \returns An initialized 128-bit vector of [8 x i16] with all elements
+///    containing the value provided in the operand.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_set1_epi16(short __w)
 {
   return (__m128i)(__v8hi){ __w, __w, __w, __w, __w, __w, __w, __w };
 }
 
+/// \brief Initializes all values in a 128-bit vector of [16 x i8] with the
+///    specified 8-bit value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __b
+///    An 8-bit value used to initialize the elements of the destination integer
+///    vector.
+/// \returns An initialized 128-bit vector of [16 x i8] with all elements
+///    containing the value provided in the operand.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_set1_epi8(char __b)
 {
@@ -1231,7 +2194,10 @@
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_storeu_si128(__m128i *__p, __m128i __b)
 {
-  __builtin_ia32_storedqu((char *)__p, (__v16qi)__b);
+  struct __storeu_si128 {
+    __m128i __v;
+  } __attribute__((__packed__, __may_alias__));
+  ((struct __storeu_si128*)__p)->__v = __b;
 }
 
 static __inline__ void __DEFAULT_FN_ATTRS
@@ -1252,13 +2218,13 @@
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_stream_pd(double *__p, __m128d __a)
 {
-  __builtin_ia32_movntpd(__p, __a);
+  __builtin_nontemporal_store((__v2df)__a, (__v2df*)__p);
 }
 
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_stream_si128(__m128i *__p, __m128i __a)
 {
-  __builtin_ia32_movntdq(__p, __a);
+  __builtin_nontemporal_store((__v2di)__a, (__v2di*)__p);
 }
 
 static __inline__ void __DEFAULT_FN_ATTRS
@@ -1334,25 +2300,25 @@
 
 #define _mm_shuffle_epi32(a, imm) __extension__ ({ \
   (__m128i)__builtin_shufflevector((__v4si)(__m128i)(a), \
-                                   (__v4si)_mm_setzero_si128(), \
-                                   (imm) & 0x3, ((imm) & 0xc) >> 2, \
-                                   ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6); })
+                                   (__v4si)_mm_undefined_si128(), \
+                                   ((imm) >> 0) & 0x3, ((imm) >> 2) & 0x3, \
+                                   ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3); })
 
 #define _mm_shufflelo_epi16(a, imm) __extension__ ({ \
   (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \
-                                   (__v8hi)_mm_setzero_si128(), \
-                                   (imm) & 0x3, ((imm) & 0xc) >> 2, \
-                                   ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+                                   (__v8hi)_mm_undefined_si128(), \
+                                   ((imm) >> 0) & 0x3, ((imm) >> 2) & 0x3, \
+                                   ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3, \
                                    4, 5, 6, 7); })
 
 #define _mm_shufflehi_epi16(a, imm) __extension__ ({ \
   (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \
-                                   (__v8hi)_mm_setzero_si128(), \
+                                   (__v8hi)_mm_undefined_si128(), \
                                    0, 1, 2, 3, \
-                                   4 + (((imm) & 0x03) >> 0), \
-                                   4 + (((imm) & 0x0c) >> 2), \
-                                   4 + (((imm) & 0x30) >> 4), \
-                                   4 + (((imm) & 0xc0) >> 6)); })
+                                   4 + (((imm) >> 0) & 0x3), \
+                                   4 + (((imm) >> 2) & 0x3), \
+                                   4 + (((imm) >> 4) & 0x3), \
+                                   4 + (((imm) >> 6) & 0x3)); })
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_unpackhi_epi8(__m128i __a, __m128i __b)
@@ -1375,7 +2341,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_unpackhi_epi64(__m128i __a, __m128i __b)
 {
-  return (__m128i)__builtin_shufflevector(__a, __b, 1, 2+1);
+  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2+1);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -1399,7 +2365,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_unpacklo_epi64(__m128i __a, __m128i __b)
 {
-  return (__m128i)__builtin_shufflevector(__a, __b, 0, 2+0);
+  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2+0);
 }
 
 static __inline__ __m64 __DEFAULT_FN_ATTRS
@@ -1417,30 +2383,31 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_move_epi64(__m128i __a)
 {
-  return __builtin_shufflevector(__a, (__m128i){ 0 }, 0, 2);
+  return __builtin_shufflevector((__v2di)__a, (__m128i){ 0 }, 0, 2);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_unpackhi_pd(__m128d __a, __m128d __b)
 {
-  return __builtin_shufflevector(__a, __b, 1, 2+1);
+  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2+1);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_unpacklo_pd(__m128d __a, __m128d __b)
 {
-  return __builtin_shufflevector(__a, __b, 0, 2+0);
+  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2+0);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_movemask_pd(__m128d __a)
 {
-  return __builtin_ia32_movmskpd(__a);
+  return __builtin_ia32_movmskpd((__v2df)__a);
 }
 
 #define _mm_shuffle_pd(a, b, i) __extension__ ({ \
   (__m128d)__builtin_shufflevector((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
-                                   (i) & 1, (((i) & 2) >> 1) + 2); })
+                                   0 + (((i) >> 0) & 0x1), \
+                                   2 + (((i) >> 1) & 0x1)); })
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_castpd_ps(__m128d __a)
diff --git a/renderscript/clang-include/f16cintrin.h b/renderscript/clang-include/f16cintrin.h
index c655d98..415bf73 100644
--- a/renderscript/clang-include/f16cintrin.h
+++ b/renderscript/clang-include/f16cintrin.h
@@ -29,11 +29,90 @@
 #define __F16CINTRIN_H
 
 /* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("f16c")))
+#define __DEFAULT_FN_ATTRS \
+  __attribute__((__always_inline__, __nodebug__, __target__("f16c")))
 
-#define _mm_cvtps_ph(a, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm)); })
+/// \brief Converts a 16-bit half-precision float value into a 32-bit float
+///    value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTPH2PS instruction.
+///
+/// \param __a
+///    A 16-bit half-precision float value.
+/// \returns The converted 32-bit float value.
+static __inline float __DEFAULT_FN_ATTRS
+_cvtsh_ss(unsigned short __a)
+{
+  __v8hi v = {(short)__a, 0, 0, 0, 0, 0, 0, 0};
+  __v4sf r = __builtin_ia32_vcvtph2ps(v);
+  return r[0];
+}
 
+/// \brief Converts a 32-bit single-precision float value to a 16-bit
+///    half-precision float value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned short _cvtss_sh(float a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCVTPS2PH instruction.
+///
+/// \param a
+///    A 32-bit single-precision float value to be converted to a 16-bit
+///    half-precision float value.
+/// \param imm
+///    An immediate value controlling rounding using bits [2:0]:
+///    000: Nearest
+///    001: Down
+///    010: Up
+///    011: Truncate
+///    1XX: Use MXCSR.RC for rounding
+/// \returns The converted 16-bit half-precision float value.
+#define _cvtss_sh(a, imm)  \
+  ((unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \
+                                                      (imm)))[0]))
+
+/// \brief Converts a 128-bit vector containing 32-bit float values into a
+///    128-bit vector containing 16-bit half-precision float values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_cvtps_ph(__m128 a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCVTPS2PH instruction.
+///
+/// \param a
+///    A 128-bit vector containing 32-bit float values.
+/// \param imm
+///    An immediate value controlling rounding using bits [2:0]:
+///    000: Nearest
+///    001: Down
+///    010: Up
+///    011: Truncate
+///    1XX: Use MXCSR.RC for rounding
+/// \returns A 128-bit vector containing converted 16-bit half-precision float
+///    values. The lower 64 bits are used to store the converted 16-bit
+///    half-precision floating-point values.
+#define _mm_cvtps_ph(a, imm) \
+  ((__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm)))
+
+/// \brief Converts a 128-bit vector containing 16-bit half-precision float
+///    values into a 128-bit vector containing 32-bit float values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTPH2PS instruction.
+///
+/// \param __a
+///    A 128-bit vector containing 16-bit half-precision float values. The lower
+///    64 bits are used in the conversion.
+/// \returns A 128-bit vector of [4 x float] containing converted float values.
 static __inline __m128 __DEFAULT_FN_ATTRS
 _mm_cvtph_ps(__m128i __a)
 {
diff --git a/renderscript/clang-include/float.h b/renderscript/clang-include/float.h
index 238cf76..a28269e 100644
--- a/renderscript/clang-include/float.h
+++ b/renderscript/clang-include/float.h
@@ -39,7 +39,9 @@
 #  undef FLT_MANT_DIG
 #  undef DBL_MANT_DIG
 #  undef LDBL_MANT_DIG
-#  undef DECIMAL_DIG
+#  if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__)
+#    undef DECIMAL_DIG
+#  endif
 #  undef FLT_DIG
 #  undef DBL_DIG
 #  undef LDBL_DIG
@@ -68,6 +70,9 @@
 #    undef FLT_TRUE_MIN
 #    undef DBL_TRUE_MIN
 #    undef LDBL_TRUE_MIN
+#    undef FLT_DECIMAL_DIG
+#    undef DBL_DECIMAL_DIG
+#    undef LDBL_DECIMAL_DIG
 #  endif
 #endif
 
@@ -81,7 +86,9 @@
 #define DBL_MANT_DIG __DBL_MANT_DIG__
 #define LDBL_MANT_DIG __LDBL_MANT_DIG__
 
-#define DECIMAL_DIG __DECIMAL_DIG__
+#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__)
+#  define DECIMAL_DIG __DECIMAL_DIG__
+#endif
 
 #define FLT_DIG __FLT_DIG__
 #define DBL_DIG __DBL_DIG__
@@ -119,6 +126,9 @@
 #  define FLT_TRUE_MIN __FLT_DENORM_MIN__
 #  define DBL_TRUE_MIN __DBL_DENORM_MIN__
 #  define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
+#  define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__
+#  define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__
+#  define LDBL_DECIMAL_DIG __LDBL_DECIMAL_DIG__
 #endif
 
 #endif /* __FLOAT_H */
diff --git a/renderscript/clang-include/fma4intrin.h b/renderscript/clang-include/fma4intrin.h
index f117887..11aa8ce 100644
--- a/renderscript/clang-include/fma4intrin.h
+++ b/renderscript/clang-include/fma4intrin.h
@@ -36,193 +36,193 @@
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_macc_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_macc_ss(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_macc_sd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_msub_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_msub_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfmsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_msub_ss(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfmsubss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_msub_sd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfmsubsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfnmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfnmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_nmacc_ss(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfnmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfnmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfnmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfnmsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_nmsub_ss(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfnmsubss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_nmsub_sd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfnmsubsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_maddsub_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_maddsub_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_msubadd_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfmsubaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfmsubaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
 _mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C);
+  return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS
 _mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C);
+  return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
 _mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C);
+  return (__m256)__builtin_ia32_vfmsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS
 _mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C);
+  return (__m256d)__builtin_ia32_vfmsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
 _mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C);
+  return (__m256)__builtin_ia32_vfnmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS
 _mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C);
+  return (__m256d)__builtin_ia32_vfnmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
 _mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C);
+  return (__m256)__builtin_ia32_vfnmsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS
 _mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C);
+  return (__m256d)__builtin_ia32_vfnmsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
 _mm256_maddsub_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C);
+  return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS
 _mm256_maddsub_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C);
+  return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
 _mm256_msubadd_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C);
+  return (__m256)__builtin_ia32_vfmsubaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS
 _mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C);
+  return (__m256d)__builtin_ia32_vfmsubaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
 }
 
 #undef __DEFAULT_FN_ATTRS
diff --git a/renderscript/clang-include/fmaintrin.h b/renderscript/clang-include/fmaintrin.h
index 114a143..0e2ef0b 100644
--- a/renderscript/clang-include/fmaintrin.h
+++ b/renderscript/clang-include/fmaintrin.h
@@ -34,193 +34,193 @@
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfmsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfmsubss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfmsubsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfnmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfnmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfnmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfnmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfnmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfnmsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfnmsubss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfnmsubsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C);
+  return (__m128)__builtin_ia32_vfmsubaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C);
+  return (__m128d)__builtin_ia32_vfmsubaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
 _mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C);
+  return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS
 _mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C);
+  return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
 _mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C);
+  return (__m256)__builtin_ia32_vfmsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS
 _mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C);
+  return (__m256d)__builtin_ia32_vfmsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
 _mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C);
+  return (__m256)__builtin_ia32_vfnmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS
 _mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C);
+  return (__m256d)__builtin_ia32_vfnmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
 _mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C);
+  return (__m256)__builtin_ia32_vfnmsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS
 _mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C);
+  return (__m256d)__builtin_ia32_vfnmsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
 _mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C);
+  return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS
 _mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C);
+  return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS
 _mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C);
+  return (__m256)__builtin_ia32_vfmsubaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS
 _mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C);
+  return (__m256d)__builtin_ia32_vfmsubaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
 }
 
 #undef __DEFAULT_FN_ATTRS
diff --git a/renderscript/clang-include/htmintrin.h b/renderscript/clang-include/htmintrin.h
index 0088c7c..69c8d7b 100644
--- a/renderscript/clang-include/htmintrin.h
+++ b/renderscript/clang-include/htmintrin.h
@@ -164,24 +164,24 @@
 /* Helper intrinsics to retry tbegin in case of transient failure.  */
 
 static __inline int __attribute__((__always_inline__, __nodebug__))
-__builtin_tbegin_retry_null (int retry)
+__builtin_tbegin_retry_null (int __retry)
 {
   int cc, i = 0;
 
   while ((cc = __builtin_tbegin(0)) == _HTM_TBEGIN_TRANSIENT
-         && i++ < retry)
+         && i++ < __retry)
     __builtin_tx_assist(i);
 
   return cc;
 }
 
 static __inline int __attribute__((__always_inline__, __nodebug__))
-__builtin_tbegin_retry_tdb (void *tdb, int retry)
+__builtin_tbegin_retry_tdb (void *__tdb, int __retry)
 {
   int cc, i = 0;
 
-  while ((cc = __builtin_tbegin(tdb)) == _HTM_TBEGIN_TRANSIENT
-         && i++ < retry)
+  while ((cc = __builtin_tbegin(__tdb)) == _HTM_TBEGIN_TRANSIENT
+         && i++ < __retry)
     __builtin_tx_assist(i);
 
   return cc;
@@ -193,24 +193,24 @@
    __builtin_tbegin_retry_tdb(tdb, retry))
 
 static __inline int __attribute__((__always_inline__, __nodebug__))
-__builtin_tbegin_retry_nofloat_null (int retry)
+__builtin_tbegin_retry_nofloat_null (int __retry)
 {
   int cc, i = 0;
 
   while ((cc = __builtin_tbegin_nofloat(0)) == _HTM_TBEGIN_TRANSIENT
-         && i++ < retry)
+         && i++ < __retry)
     __builtin_tx_assist(i);
 
   return cc;
 }
 
 static __inline int __attribute__((__always_inline__, __nodebug__))
-__builtin_tbegin_retry_nofloat_tdb (void *tdb, int retry)
+__builtin_tbegin_retry_nofloat_tdb (void *__tdb, int __retry)
 {
   int cc, i = 0;
 
-  while ((cc = __builtin_tbegin_nofloat(tdb)) == _HTM_TBEGIN_TRANSIENT
-         && i++ < retry)
+  while ((cc = __builtin_tbegin_nofloat(__tdb)) == _HTM_TBEGIN_TRANSIENT
+         && i++ < __retry)
     __builtin_tx_assist(i);
 
   return cc;
diff --git a/renderscript/clang-include/htmxlintrin.h b/renderscript/clang-include/htmxlintrin.h
index c7571ec..16dc705 100644
--- a/renderscript/clang-include/htmxlintrin.h
+++ b/renderscript/clang-include/htmxlintrin.h
@@ -62,18 +62,18 @@
 
 extern __inline long
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-__TM_begin (void* const TM_buff)
+__TM_begin (void* const __TM_buff)
 {
-  *_TEXASRL_PTR (TM_buff) = 0;
+  *_TEXASRL_PTR (__TM_buff) = 0;
   if (__builtin_expect (__builtin_tbegin (0), 1))
     return _HTM_TBEGIN_STARTED;
 #ifdef __powerpc64__
-  *_TEXASR_PTR (TM_buff) = __builtin_get_texasr ();
+  *_TEXASR_PTR (__TM_buff) = __builtin_get_texasr ();
 #else
-  *_TEXASRU_PTR (TM_buff) = __builtin_get_texasru ();
-  *_TEXASRL_PTR (TM_buff) = __builtin_get_texasr ();
+  *_TEXASRU_PTR (__TM_buff) = __builtin_get_texasru ();
+  *_TEXASRL_PTR (__TM_buff) = __builtin_get_texasr ();
 #endif
-  *_TFIAR_PTR (TM_buff) = __builtin_get_tfiar ();
+  *_TFIAR_PTR (__TM_buff) = __builtin_get_tfiar ();
   return 0;
 }
 
@@ -95,9 +95,9 @@
 
 extern __inline void
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-__TM_named_abort (unsigned char const code)
+__TM_named_abort (unsigned char const __code)
 {
-  __builtin_tabort (code);
+  __builtin_tabort (__code);
 }
 
 extern __inline void
@@ -116,47 +116,47 @@
 
 extern __inline long
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-__TM_is_user_abort (void* const TM_buff)
+__TM_is_user_abort (void* const __TM_buff)
 {
-  texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+  texasru_t texasru = *_TEXASRU_PTR (__TM_buff);
   return _TEXASRU_ABORT (texasru);
 }
 
 extern __inline long
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-__TM_is_named_user_abort (void* const TM_buff, unsigned char *code)
+__TM_is_named_user_abort (void* const __TM_buff, unsigned char *__code)
 {
-  texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+  texasru_t texasru = *_TEXASRU_PTR (__TM_buff);
 
-  *code = _TEXASRU_FAILURE_CODE (texasru);
+  *__code = _TEXASRU_FAILURE_CODE (texasru);
   return _TEXASRU_ABORT (texasru);
 }
 
 extern __inline long
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-__TM_is_illegal (void* const TM_buff)
+__TM_is_illegal (void* const __TM_buff)
 {
-  texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+  texasru_t texasru = *_TEXASRU_PTR (__TM_buff);
   return _TEXASRU_DISALLOWED (texasru);
 }
 
 extern __inline long
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-__TM_is_footprint_exceeded (void* const TM_buff)
+__TM_is_footprint_exceeded (void* const __TM_buff)
 {
-  texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+  texasru_t texasru = *_TEXASRU_PTR (__TM_buff);
   return _TEXASRU_FOOTPRINT_OVERFLOW (texasru);
 }
 
 extern __inline long
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-__TM_nesting_depth (void* const TM_buff)
+__TM_nesting_depth (void* const __TM_buff)
 {
   texasrl_t texasrl;
 
   if (_HTM_STATE (__builtin_ttest ()) == _HTM_NONTRANSACTIONAL)
     {
-      texasrl = *_TEXASRL_PTR (TM_buff);
+      texasrl = *_TEXASRL_PTR (__TM_buff);
       if (!_TEXASR_FAILURE_SUMMARY (texasrl))
         texasrl = 0;
     }
@@ -168,15 +168,15 @@
 
 extern __inline long
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-__TM_is_nested_too_deep(void* const TM_buff)
+__TM_is_nested_too_deep(void* const __TM_buff)
 {
-  texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+  texasru_t texasru = *_TEXASRU_PTR (__TM_buff);
   return _TEXASRU_NESTING_OVERFLOW (texasru);
 }
 
 extern __inline long
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-__TM_is_conflict(void* const TM_buff)
+__TM_is_conflict(void* const __TM_buff)
 {
   texasru_t texasru = *_TEXASRU_PTR (TM_buff);
   /* Return TEXASR bits 11 (Self-Induced Conflict) through
@@ -186,24 +186,24 @@
 
 extern __inline long
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-__TM_is_failure_persistent(void* const TM_buff)
+__TM_is_failure_persistent(void* const __TM_buff)
 {
-  texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+  texasru_t texasru = *_TEXASRU_PTR (__TM_buff);
   return _TEXASRU_FAILURE_PERSISTENT (texasru);
 }
 
 extern __inline long
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-__TM_failure_address(void* const TM_buff)
+__TM_failure_address(void* const __TM_buff)
 {
-  return *_TFIAR_PTR (TM_buff);
+  return *_TFIAR_PTR (__TM_buff);
 }
 
 extern __inline long long
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-__TM_failure_code(void* const TM_buff)
+__TM_failure_code(void* const __TM_buff)
 {
-  return *_TEXASR_PTR (TM_buff);
+  return *_TEXASR_PTR (__TM_buff);
 }
 
 #ifdef __cplusplus
@@ -227,9 +227,9 @@
 }
 
 static __inline long __attribute__((__always_inline__, __nodebug__))
-__TM_begin (void* const tdb)
+__TM_begin (void* const __tdb)
 {
-  return __builtin_tbegin_nofloat (tdb);
+  return __builtin_tbegin_nofloat (__tdb);
 }
 
 static __inline long __attribute__((__always_inline__, __nodebug__))
@@ -245,22 +245,22 @@
 }
 
 static __inline void __attribute__((__always_inline__, __nodebug__))
-__TM_named_abort (unsigned char const code)
+__TM_named_abort (unsigned char const __code)
 {
-  return __builtin_tabort ((int)_HTM_FIRST_USER_ABORT_CODE + code);
+  return __builtin_tabort ((int)_HTM_FIRST_USER_ABORT_CODE + __code);
 }
 
 static __inline void __attribute__((__always_inline__, __nodebug__))
-__TM_non_transactional_store (void* const addr, long long const value)
+__TM_non_transactional_store (void* const __addr, long long const __value)
 {
-  __builtin_non_tx_store ((uint64_t*)addr, (uint64_t)value);
+  __builtin_non_tx_store ((uint64_t*)__addr, (uint64_t)__value);
 }
 
 static __inline long __attribute__((__always_inline__, __nodebug__))
-__TM_nesting_depth (void* const tdb_ptr)
+__TM_nesting_depth (void* const __tdb_ptr)
 {
   int depth = __builtin_tx_nesting_depth ();
-  struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+  struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
 
   if (depth != 0)
     return depth;
@@ -273,9 +273,9 @@
 /* Transaction failure diagnostics */
 
 static __inline long __attribute__((__always_inline__, __nodebug__))
-__TM_is_user_abort (void* const tdb_ptr)
+__TM_is_user_abort (void* const __tdb_ptr)
 {
-  struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+  struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
 
   if (tdb->format != 1)
     return 0;
@@ -284,25 +284,25 @@
 }
 
 static __inline long __attribute__((__always_inline__, __nodebug__))
-__TM_is_named_user_abort (void* const tdb_ptr, unsigned char* code)
+__TM_is_named_user_abort (void* const __tdb_ptr, unsigned char* __code)
 {
-  struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+  struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
 
   if (tdb->format != 1)
     return 0;
 
   if (tdb->abort_code >= _HTM_FIRST_USER_ABORT_CODE)
     {
-      *code = tdb->abort_code - _HTM_FIRST_USER_ABORT_CODE;
+      *__code = tdb->abort_code - _HTM_FIRST_USER_ABORT_CODE;
       return 1;
     }
   return 0;
 }
 
 static __inline long __attribute__((__always_inline__, __nodebug__))
-__TM_is_illegal (void* const tdb_ptr)
+__TM_is_illegal (void* const __tdb_ptr)
 {
-  struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+  struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
 
   return (tdb->format == 1
 	  && (tdb->abort_code == 4 /* unfiltered program interruption */
@@ -310,9 +310,9 @@
 }
 
 static __inline long __attribute__((__always_inline__, __nodebug__))
-__TM_is_footprint_exceeded (void* const tdb_ptr)
+__TM_is_footprint_exceeded (void* const __tdb_ptr)
 {
-  struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+  struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
 
   return (tdb->format == 1
 	  && (tdb->abort_code == 7 /* fetch overflow */
@@ -320,17 +320,17 @@
 }
 
 static __inline long __attribute__((__always_inline__, __nodebug__))
-__TM_is_nested_too_deep (void* const tdb_ptr)
+__TM_is_nested_too_deep (void* const __tdb_ptr)
 {
-  struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+  struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
 
   return tdb->format == 1 && tdb->abort_code == 13; /* depth exceeded */
 }
 
 static __inline long __attribute__((__always_inline__, __nodebug__))
-__TM_is_conflict (void* const tdb_ptr)
+__TM_is_conflict (void* const __tdb_ptr)
 {
-  struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+  struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
 
   return (tdb->format == 1
 	  && (tdb->abort_code == 9 /* fetch conflict */
@@ -338,22 +338,22 @@
 }
 
 static __inline long __attribute__((__always_inline__, __nodebug__))
-__TM_is_failure_persistent (long const result)
+__TM_is_failure_persistent (long const __result)
 {
-  return result == _HTM_TBEGIN_PERSISTENT;
+  return __result == _HTM_TBEGIN_PERSISTENT;
 }
 
 static __inline long __attribute__((__always_inline__, __nodebug__))
-__TM_failure_address (void* const tdb_ptr)
+__TM_failure_address (void* const __tdb_ptr)
 {
-  struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+  struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
   return tdb->atia;
 }
 
 static __inline long __attribute__((__always_inline__, __nodebug__))
-__TM_failure_code (void* const tdb_ptr)
+__TM_failure_code (void* const __tdb_ptr)
 {
-  struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+  struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
 
   return tdb->abort_code;
 }
diff --git a/renderscript/clang-include/ia32intrin.h b/renderscript/clang-include/ia32intrin.h
index 5adf3f1..397f3fd 100644
--- a/renderscript/clang-include/ia32intrin.h
+++ b/renderscript/clang-include/ia32intrin.h
@@ -32,50 +32,26 @@
 static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
 __readeflags(void)
 {
-  unsigned long long __res = 0;
-  __asm__ __volatile__ ("pushf\n\t"
-                        "popq %0\n"
-                        :"=r"(__res)
-                        :
-                        :
-                       );
-  return __res;
+  return __builtin_ia32_readeflags_u64();
 }
 
 static __inline__ void __attribute__((__always_inline__, __nodebug__))
 __writeeflags(unsigned long long __f)
 {
-  __asm__ __volatile__ ("pushq %0\n\t"
-                        "popf\n"
-                        :
-                        :"r"(__f)
-                        :"flags"
-                       );
+  __builtin_ia32_writeeflags_u64(__f);
 }
 
 #else /* !__x86_64__ */
 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
 __readeflags(void)
 {
-  unsigned int __res = 0;
-  __asm__ __volatile__ ("pushf\n\t"
-                        "popl %0\n"
-                        :"=r"(__res)
-                        :
-                        :
-                       );
-  return __res;
+  return __builtin_ia32_readeflags_u32();
 }
 
 static __inline__ void __attribute__((__always_inline__, __nodebug__))
 __writeeflags(unsigned int __f)
 {
-  __asm__ __volatile__ ("pushl %0\n\t"
-                        "popf\n"
-                        :
-                        :"r"(__f)
-                        :"flags"
-                       );
+  __builtin_ia32_writeeflags_u32(__f);
 }
 #endif /* !__x86_64__ */
 
@@ -98,4 +74,6 @@
 
 #define _rdtsc() __rdtsc()
 
+#define _rdpmc(A) __rdpmc(A)
+
 #endif /* __IA32INTRIN_H */
diff --git a/renderscript/clang-include/immintrin.h b/renderscript/clang-include/immintrin.h
index f3c6d19..4b27523 100644
--- a/renderscript/clang-include/immintrin.h
+++ b/renderscript/clang-include/immintrin.h
@@ -24,22 +24,45 @@
 #ifndef __IMMINTRIN_H
 #define __IMMINTRIN_H
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MMX__)
 #include <mmintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE__)
 #include <xmmintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE2__)
 #include <emmintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE3__)
 #include <pmmintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSSE3__)
 #include <tmmintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+    (defined(__SSE4_2__) || defined(__SSE4_1__))
 #include <smmintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+    (defined(__AES__) || defined(__PCLMUL__))
 #include <wmmintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLFLUSHOPT__)
+#include <clflushoptintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX__)
 #include <avxintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX2__)
 #include <avx2intrin.h>
 
 /* The 256-bit versions of functions in f16cintrin.h.
@@ -54,31 +77,90 @@
 {
   return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a);
 }
+#endif /* __AVX2__ */
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__)
 #include <bmiintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI2__)
 #include <bmi2intrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LZCNT__)
 #include <lzcntintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA__)
 #include <fmaintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512F__)
 #include <avx512fintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VL__)
 #include <avx512vlintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BW__)
 #include <avx512bwintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512CD__)
 #include <avx512cdintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512DQ__)
 #include <avx512dqintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+    (defined(__AVX512VL__) && defined(__AVX512BW__))
 #include <avx512vlbwintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+    (defined(__AVX512VL__) && defined(__AVX512CD__))
+#include <avx512vlcdintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+    (defined(__AVX512VL__) && defined(__AVX512DQ__))
 #include <avx512vldqintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512ER__)
 #include <avx512erintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512IFMA__)
+#include <avx512ifmaintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+    (defined(__AVX512IFMA__) && defined(__AVX512VL__))
+#include <avx512ifmavlintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VBMI__)
+#include <avx512vbmiintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+    (defined(__AVX512VBMI__) && defined(__AVX512VL__))
+#include <avx512vbmivlintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512PF__)
+#include <avx512pfintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PKU__)
+#include <pkuintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDRND__)
 static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
 _rdrand16_step(unsigned short *__p)
 {
@@ -91,6 +173,18 @@
   return __builtin_ia32_rdrand32_step(__p);
 }
 
+/* __bit_scan_forward */
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_bit_scan_forward(int __A) {
+  return __builtin_ctz(__A);
+}
+
+/* __bit_scan_reverse */
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_bit_scan_reverse(int __A) {
+  return 31 - __builtin_clz(__A);
+}
+
 #ifdef __x86_64__
 static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
 _rdrand64_step(unsigned long long *__p)
@@ -98,7 +192,9 @@
   return __builtin_ia32_rdrand64_step(__p);
 }
 #endif
+#endif /* __RDRND__ */
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FSGSBASE__)
 #ifdef __x86_64__
 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
 _readfsbase_u32(void)
@@ -147,23 +243,38 @@
 {
   return __builtin_ia32_wrgsbase64(__V);
 }
+
+#endif
+#endif /* __FSGSBASE__ */
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RTM__)
+#include <rtmintrin.h>
+#include <xtestintrin.h>
 #endif
 
-#include <rtmintrin.h>
-
-#include <xtestintrin.h>
-
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SHA__)
 #include <shaintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FXSR__)
 #include <fxsrintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVE__)
 #include <xsaveintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEOPT__)
 #include <xsaveoptintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEC__)
 #include <xsavecintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVES__)
 #include <xsavesintrin.h>
+#endif
 
 /* Some intrinsics inside adxintrin.h are available only on processors with ADX,
  * whereas others are also available at all times. */
diff --git a/renderscript/clang-include/intrin.h b/renderscript/clang-include/intrin.h
new file mode 100644
index 0000000..f18711a
--- /dev/null
+++ b/renderscript/clang-include/intrin.h
@@ -0,0 +1,957 @@
+/* ===-------- intrin.h ---------------------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Only include this if we're compiling for the windows platform. */
+#ifndef _MSC_VER
+#include_next <intrin.h>
+#else
+
+#ifndef __INTRIN_H
+#define __INTRIN_H
+
+/* First include the standard intrinsics. */
+#if defined(__i386__) || defined(__x86_64__)
+#include <x86intrin.h>
+#endif
+
+/* For the definition of jmp_buf. */
+#if __STDC_HOSTED__
+#include <setjmp.h>
+#endif
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(__MMX__)
+/* And the random ones that aren't in those files. */
+__m64 _m_from_float(float);
+float _m_to_float(__m64);
+#endif
+
+/* Other assorted instruction intrinsics. */
+void __addfsbyte(unsigned long, unsigned char);
+void __addfsdword(unsigned long, unsigned long);
+void __addfsword(unsigned long, unsigned short);
+void __code_seg(const char *);
+static __inline__
+void __cpuid(int[4], int);
+static __inline__
+void __cpuidex(int[4], int, int);
+void __debugbreak(void);
+__int64 __emul(int, int);
+unsigned __int64 __emulu(unsigned int, unsigned int);
+void __cdecl __fastfail(unsigned int);
+unsigned int __getcallerseflags(void);
+static __inline__
+void __halt(void);
+unsigned char __inbyte(unsigned short);
+void __inbytestring(unsigned short, unsigned char *, unsigned long);
+void __incfsbyte(unsigned long);
+void __incfsdword(unsigned long);
+void __incfsword(unsigned long);
+unsigned long __indword(unsigned short);
+void __indwordstring(unsigned short, unsigned long *, unsigned long);
+void __int2c(void);
+void __invlpg(void *);
+unsigned short __inword(unsigned short);
+void __inwordstring(unsigned short, unsigned short *, unsigned long);
+void __lidt(void *);
+unsigned __int64 __ll_lshift(unsigned __int64, int);
+__int64 __ll_rshift(__int64, int);
+void __llwpcb(void *);
+unsigned char __lwpins32(unsigned int, unsigned int, unsigned int);
+void __lwpval32(unsigned int, unsigned int, unsigned int);
+unsigned int __lzcnt(unsigned int);
+unsigned short __lzcnt16(unsigned short);
+static __inline__
+void __movsb(unsigned char *, unsigned char const *, size_t);
+static __inline__
+void __movsd(unsigned long *, unsigned long const *, size_t);
+static __inline__
+void __movsw(unsigned short *, unsigned short const *, size_t);
+void __nop(void);
+void __nvreg_restore_fence(void);
+void __nvreg_save_fence(void);
+void __outbyte(unsigned short, unsigned char);
+void __outbytestring(unsigned short, unsigned char *, unsigned long);
+void __outdword(unsigned short, unsigned long);
+void __outdwordstring(unsigned short, unsigned long *, unsigned long);
+void __outword(unsigned short, unsigned short);
+void __outwordstring(unsigned short, unsigned short *, unsigned long);
+static __inline__
+unsigned int __popcnt(unsigned int);
+static __inline__
+unsigned short __popcnt16(unsigned short);
+unsigned long __readcr0(void);
+unsigned long __readcr2(void);
+static __inline__
+unsigned long __readcr3(void);
+unsigned long __readcr4(void);
+unsigned long __readcr8(void);
+unsigned int __readdr(unsigned int);
+#ifdef __i386__
+static __inline__
+unsigned char __readfsbyte(unsigned long);
+static __inline__
+unsigned long __readfsdword(unsigned long);
+static __inline__
+unsigned __int64 __readfsqword(unsigned long);
+static __inline__
+unsigned short __readfsword(unsigned long);
+#endif
+static __inline__
+unsigned __int64 __readmsr(unsigned long);
+unsigned __int64 __readpmc(unsigned long);
+unsigned long __segmentlimit(unsigned long);
+void __sidt(void *);
+void *__slwpcb(void);
+static __inline__
+void __stosb(unsigned char *, unsigned char, size_t);
+static __inline__
+void __stosd(unsigned long *, unsigned long, size_t);
+static __inline__
+void __stosw(unsigned short *, unsigned short, size_t);
+void __svm_clgi(void);
+void __svm_invlpga(void *, int);
+void __svm_skinit(int);
+void __svm_stgi(void);
+void __svm_vmload(size_t);
+void __svm_vmrun(size_t);
+void __svm_vmsave(size_t);
+void __ud2(void);
+unsigned __int64 __ull_rshift(unsigned __int64, int);
+void __vmx_off(void);
+void __vmx_vmptrst(unsigned __int64 *);
+void __wbinvd(void);
+void __writecr0(unsigned int);
+static __inline__
+void __writecr3(unsigned int);
+void __writecr4(unsigned int);
+void __writecr8(unsigned int);
+void __writedr(unsigned int, unsigned int);
+void __writefsbyte(unsigned long, unsigned char);
+void __writefsdword(unsigned long, unsigned long);
+void __writefsqword(unsigned long, unsigned __int64);
+void __writefsword(unsigned long, unsigned short);
+void __writemsr(unsigned long, unsigned __int64);
+static __inline__
+void *_AddressOfReturnAddress(void);
+static __inline__
+unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
+static __inline__
+unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
+static __inline__
+unsigned char _bittest(long const *, long);
+static __inline__
+unsigned char _bittestandcomplement(long *, long);
+static __inline__
+unsigned char _bittestandreset(long *, long);
+static __inline__
+unsigned char _bittestandset(long *, long);
+unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
+unsigned long __cdecl _byteswap_ulong(unsigned long);
+unsigned short __cdecl _byteswap_ushort(unsigned short);
+void __cdecl _disable(void);
+void __cdecl _enable(void);
+long _InterlockedAddLargeStatistic(__int64 volatile *_Addend, long _Value);
+static __inline__
+long _InterlockedAnd(long volatile *_Value, long _Mask);
+static __inline__
+short _InterlockedAnd16(short volatile *_Value, short _Mask);
+static __inline__
+char _InterlockedAnd8(char volatile *_Value, char _Mask);
+unsigned char _interlockedbittestandreset(long volatile *, long);
+static __inline__
+unsigned char _interlockedbittestandset(long volatile *, long);
+static __inline__
+long __cdecl _InterlockedCompareExchange(long volatile *_Destination,
+                                         long _Exchange, long _Comparand);
+long _InterlockedCompareExchange_HLEAcquire(long volatile *, long, long);
+long _InterlockedCompareExchange_HLERelease(long volatile *, long, long);
+static __inline__
+short _InterlockedCompareExchange16(short volatile *_Destination,
+                                    short _Exchange, short _Comparand);
+static __inline__
+__int64 _InterlockedCompareExchange64(__int64 volatile *_Destination,
+                                      __int64 _Exchange, __int64 _Comparand);
+__int64 _InterlockedcompareExchange64_HLEAcquire(__int64 volatile *, __int64,
+                                                 __int64);
+__int64 _InterlockedCompareExchange64_HLERelease(__int64 volatile *, __int64,
+                                                 __int64);
+static __inline__
+char _InterlockedCompareExchange8(char volatile *_Destination, char _Exchange,
+                                  char _Comparand);
+void *_InterlockedCompareExchangePointer_HLEAcquire(void *volatile *, void *,
+                                                    void *);
+void *_InterlockedCompareExchangePointer_HLERelease(void *volatile *, void *,
+                                                    void *);
+static __inline__
+long __cdecl _InterlockedDecrement(long volatile *_Addend);
+static __inline__
+short _InterlockedDecrement16(short volatile *_Addend);
+long _InterlockedExchange(long volatile *_Target, long _Value);
+static __inline__
+short _InterlockedExchange16(short volatile *_Target, short _Value);
+static __inline__
+char _InterlockedExchange8(char volatile *_Target, char _Value);
+static __inline__
+long __cdecl _InterlockedExchangeAdd(long volatile *_Addend, long _Value);
+long _InterlockedExchangeAdd_HLEAcquire(long volatile *, long);
+long _InterlockedExchangeAdd_HLERelease(long volatile *, long);
+static __inline__
+short _InterlockedExchangeAdd16(short volatile *_Addend, short _Value);
+__int64 _InterlockedExchangeAdd64_HLEAcquire(__int64 volatile *, __int64);
+__int64 _InterlockedExchangeAdd64_HLERelease(__int64 volatile *, __int64);
+static __inline__
+char _InterlockedExchangeAdd8(char volatile *_Addend, char _Value);
+static __inline__
+long __cdecl _InterlockedIncrement(long volatile *_Addend);
+static __inline__
+short _InterlockedIncrement16(short volatile *_Addend);
+static __inline__
+long _InterlockedOr(long volatile *_Value, long _Mask);
+static __inline__
+short _InterlockedOr16(short volatile *_Value, short _Mask);
+static __inline__
+char _InterlockedOr8(char volatile *_Value, char _Mask);
+static __inline__
+long _InterlockedXor(long volatile *_Value, long _Mask);
+static __inline__
+short _InterlockedXor16(short volatile *_Value, short _Mask);
+static __inline__
+char _InterlockedXor8(char volatile *_Value, char _Mask);
+void __cdecl _invpcid(unsigned int, void *);
+static __inline__
+unsigned long __cdecl _lrotl(unsigned long, int);
+static __inline__
+unsigned long __cdecl _lrotr(unsigned long, int);
+static __inline__
+void _ReadBarrier(void);
+static __inline__
+void _ReadWriteBarrier(void);
+static __inline__
+void *_ReturnAddress(void);
+unsigned int _rorx_u32(unsigned int, const unsigned int);
+static __inline__
+unsigned int __cdecl _rotl(unsigned int _Value, int _Shift);
+static __inline__
+unsigned short _rotl16(unsigned short _Value, unsigned char _Shift);
+static __inline__
+unsigned __int64 __cdecl _rotl64(unsigned __int64 _Value, int _Shift);
+static __inline__
+unsigned char _rotl8(unsigned char _Value, unsigned char _Shift);
+static __inline__
+unsigned int __cdecl _rotr(unsigned int _Value, int _Shift);
+static __inline__
+unsigned short _rotr16(unsigned short _Value, unsigned char _Shift);
+static __inline__
+unsigned __int64 __cdecl _rotr64(unsigned __int64 _Value, int _Shift);
+static __inline__
+unsigned char _rotr8(unsigned char _Value, unsigned char _Shift);
+int _sarx_i32(int, unsigned int);
+#if __STDC_HOSTED__
+int __cdecl _setjmp(jmp_buf);
+#endif
+unsigned int _shlx_u32(unsigned int, unsigned int);
+unsigned int _shrx_u32(unsigned int, unsigned int);
+void _Store_HLERelease(long volatile *, long);
+void _Store64_HLERelease(__int64 volatile *, __int64);
+void _StorePointer_HLERelease(void *volatile *, void *);
+static __inline__
+void _WriteBarrier(void);
+unsigned __int32 xbegin(void);
+void _xend(void);
+static __inline__
+#define _XCR_XFEATURE_ENABLED_MASK 0
+unsigned __int64 __cdecl _xgetbv(unsigned int);
+void __cdecl _xsetbv(unsigned int, unsigned __int64);
+
+/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */
+#ifdef __x86_64__
+void __addgsbyte(unsigned long, unsigned char);
+void __addgsdword(unsigned long, unsigned long);
+void __addgsqword(unsigned long, unsigned __int64);
+void __addgsword(unsigned long, unsigned short);
+static __inline__
+void __faststorefence(void);
+void __incgsbyte(unsigned long);
+void __incgsdword(unsigned long);
+void __incgsqword(unsigned long);
+void __incgsword(unsigned long);
+unsigned char __lwpins64(unsigned __int64, unsigned int, unsigned int);
+void __lwpval64(unsigned __int64, unsigned int, unsigned int);
+unsigned __int64 __lzcnt64(unsigned __int64);
+static __inline__
+void __movsq(unsigned long long *, unsigned long long const *, size_t);
+__int64 __mulh(__int64, __int64);
+static __inline__
+unsigned __int64 __popcnt64(unsigned __int64);
+static __inline__
+unsigned char __readgsbyte(unsigned long);
+static __inline__
+unsigned long __readgsdword(unsigned long);
+static __inline__
+unsigned __int64 __readgsqword(unsigned long);
+unsigned short __readgsword(unsigned long);
+unsigned __int64 __shiftleft128(unsigned __int64 _LowPart,
+                                unsigned __int64 _HighPart,
+                                unsigned char _Shift);
+unsigned __int64 __shiftright128(unsigned __int64 _LowPart,
+                                 unsigned __int64 _HighPart,
+                                 unsigned char _Shift);
+static __inline__
+void __stosq(unsigned __int64 *, unsigned __int64, size_t);
+unsigned char __vmx_on(unsigned __int64 *);
+unsigned char __vmx_vmclear(unsigned __int64 *);
+unsigned char __vmx_vmlaunch(void);
+unsigned char __vmx_vmptrld(unsigned __int64 *);
+unsigned char __vmx_vmread(size_t, size_t *);
+unsigned char __vmx_vmresume(void);
+unsigned char __vmx_vmwrite(size_t, size_t);
+void __writegsbyte(unsigned long, unsigned char);
+void __writegsdword(unsigned long, unsigned long);
+void __writegsqword(unsigned long, unsigned __int64);
+void __writegsword(unsigned long, unsigned short);
+static __inline__
+unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
+static __inline__
+unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
+static __inline__
+unsigned char _bittest64(__int64 const *, __int64);
+static __inline__
+unsigned char _bittestandcomplement64(__int64 *, __int64);
+static __inline__
+unsigned char _bittestandreset64(__int64 *, __int64);
+static __inline__
+unsigned char _bittestandset64(__int64 *, __int64);
+unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
+long _InterlockedAnd_np(long volatile *_Value, long _Mask);
+short _InterlockedAnd16_np(short volatile *_Value, short _Mask);
+__int64 _InterlockedAnd64_np(__int64 volatile *_Value, __int64 _Mask);
+char _InterlockedAnd8_np(char volatile *_Value, char _Mask);
+unsigned char _interlockedbittestandreset64(__int64 volatile *, __int64);
+static __inline__
+unsigned char _interlockedbittestandset64(__int64 volatile *, __int64);
+long _InterlockedCompareExchange_np(long volatile *_Destination, long _Exchange,
+                                    long _Comparand);
+unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination,
+                                             __int64 _ExchangeHigh,
+                                             __int64 _ExchangeLow,
+                                             __int64 *_CompareandResult);
+unsigned char _InterlockedCompareExchange128_np(__int64 volatile *_Destination,
+                                                __int64 _ExchangeHigh,
+                                                __int64 _ExchangeLow,
+                                                __int64 *_ComparandResult);
+short _InterlockedCompareExchange16_np(short volatile *_Destination,
+                                       short _Exchange, short _Comparand);
+__int64 _InterlockedCompareExchange64_HLEAcquire(__int64 volatile *, __int64,
+                                                 __int64);
+__int64 _InterlockedCompareExchange64_HLERelease(__int64 volatile *, __int64,
+                                                 __int64);
+__int64 _InterlockedCompareExchange64_np(__int64 volatile *_Destination,
+                                         __int64 _Exchange, __int64 _Comparand);
+void *_InterlockedCompareExchangePointer(void *volatile *_Destination,
+                                         void *_Exchange, void *_Comparand);
+void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination,
+                                            void *_Exchange, void *_Comparand);
+static __inline__
+__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
+static __inline__
+__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
+static __inline__
+__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
+void *_InterlockedExchangePointer(void *volatile *_Target, void *_Value);
+static __inline__
+__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
+long _InterlockedOr_np(long volatile *_Value, long _Mask);
+short _InterlockedOr16_np(short volatile *_Value, short _Mask);
+static __inline__
+__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask);
+char _InterlockedOr8_np(char volatile *_Value, char _Mask);
+long _InterlockedXor_np(long volatile *_Value, long _Mask);
+short _InterlockedXor16_np(short volatile *_Value, short _Mask);
+static __inline__
+__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask);
+char _InterlockedXor8_np(char volatile *_Value, char _Mask);
+static __inline__
+__int64 _mul128(__int64 _Multiplier, __int64 _Multiplicand,
+                __int64 *_HighProduct);
+unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);
+__int64 _sarx_i64(__int64, unsigned int);
+#if __STDC_HOSTED__
+int __cdecl _setjmpex(jmp_buf);
+#endif
+unsigned __int64 _shlx_u64(unsigned __int64, unsigned int);
+unsigned __int64 _shrx_u64(unsigned __int64, unsigned int);
+/*
+ * Multiply two 64-bit integers and obtain a 64-bit result.
+ * The low-half is returned directly and the high half is in an out parameter.
+ */
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+_umul128(unsigned __int64 _Multiplier, unsigned __int64 _Multiplicand,
+         unsigned __int64 *_HighProduct) {
+  unsigned __int128 _FullProduct =
+      (unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand;
+  *_HighProduct = _FullProduct >> 64;
+  return _FullProduct;
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__umulh(unsigned __int64 _Multiplier, unsigned __int64 _Multiplicand) {
+  unsigned __int128 _FullProduct =
+      (unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand;
+  return _FullProduct >> 64;
+}
+
+#endif /* __x86_64__ */
+
+/*----------------------------------------------------------------------------*\
+|* Multiplication
+\*----------------------------------------------------------------------------*/
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+__emul(int __in1, int __in2) {
+  return (__int64)__in1 * (__int64)__in2;
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__emulu(unsigned int __in1, unsigned int __in2) {
+  return (unsigned __int64)__in1 * (unsigned __int64)__in2;
+}
+/*----------------------------------------------------------------------------*\
+|* Bit Twiddling
+\*----------------------------------------------------------------------------*/
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_rotl8(unsigned char _Value, unsigned char _Shift) {
+  _Shift &= 0x7;
+  return _Shift ? (_Value << _Shift) | (_Value >> (8 - _Shift)) : _Value;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_rotr8(unsigned char _Value, unsigned char _Shift) {
+  _Shift &= 0x7;
+  return _Shift ? (_Value >> _Shift) | (_Value << (8 - _Shift)) : _Value;
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+_rotl16(unsigned short _Value, unsigned char _Shift) {
+  _Shift &= 0xf;
+  return _Shift ? (_Value << _Shift) | (_Value >> (16 - _Shift)) : _Value;
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+_rotr16(unsigned short _Value, unsigned char _Shift) {
+  _Shift &= 0xf;
+  return _Shift ? (_Value >> _Shift) | (_Value << (16 - _Shift)) : _Value;
+}
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_rotl(unsigned int _Value, int _Shift) {
+  _Shift &= 0x1f;
+  return _Shift ? (_Value << _Shift) | (_Value >> (32 - _Shift)) : _Value;
+}
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_rotr(unsigned int _Value, int _Shift) {
+  _Shift &= 0x1f;
+  return _Shift ? (_Value >> _Shift) | (_Value << (32 - _Shift)) : _Value;
+}
+static __inline__ unsigned long __DEFAULT_FN_ATTRS
+_lrotl(unsigned long _Value, int _Shift) {
+  _Shift &= 0x1f;
+  return _Shift ? (_Value << _Shift) | (_Value >> (32 - _Shift)) : _Value;
+}
+static __inline__ unsigned long __DEFAULT_FN_ATTRS
+_lrotr(unsigned long _Value, int _Shift) {
+  _Shift &= 0x1f;
+  return _Shift ? (_Value >> _Shift) | (_Value << (32 - _Shift)) : _Value;
+}
+static
+__inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+_rotl64(unsigned __int64 _Value, int _Shift) {
+  _Shift &= 0x3f;
+  return _Shift ? (_Value << _Shift) | (_Value >> (64 - _Shift)) : _Value;
+}
+static
+__inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+_rotr64(unsigned __int64 _Value, int _Shift) {
+  _Shift &= 0x3f;
+  return _Shift ? (_Value >> _Shift) | (_Value << (64 - _Shift)) : _Value;
+}
+/*----------------------------------------------------------------------------*\
+|* Bit Counting and Testing
+\*----------------------------------------------------------------------------*/
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_BitScanForward(unsigned long *_Index, unsigned long _Mask) {
+  if (!_Mask)
+    return 0;
+  *_Index = __builtin_ctzl(_Mask);
+  return 1;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_BitScanReverse(unsigned long *_Index, unsigned long _Mask) {
+  if (!_Mask)
+    return 0;
+  *_Index = 31 - __builtin_clzl(_Mask);
+  return 1;
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+__popcnt16(unsigned short _Value) {
+  return __builtin_popcount((int)_Value);
+}
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__popcnt(unsigned int _Value) {
+  return __builtin_popcount(_Value);
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittest(long const *_BitBase, long _BitPos) {
+  return (*_BitBase >> _BitPos) & 1;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandcomplement(long *_BitBase, long _BitPos) {
+  unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+  *_BitBase = *_BitBase ^ (1 << _BitPos);
+  return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandreset(long *_BitBase, long _BitPos) {
+  unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+  *_BitBase = *_BitBase & ~(1 << _BitPos);
+  return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandset(long *_BitBase, long _BitPos) {
+  unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+  *_BitBase = *_BitBase | (1 << _BitPos);
+  return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_interlockedbittestandset(long volatile *_BitBase, long _BitPos) {
+  long _PrevVal = __atomic_fetch_or(_BitBase, 1l << _BitPos, __ATOMIC_SEQ_CST);
+  return (_PrevVal >> _BitPos) & 1;
+}
+#ifdef __x86_64__
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask) {
+  if (!_Mask)
+    return 0;
+  *_Index = __builtin_ctzll(_Mask);
+  return 1;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask) {
+  if (!_Mask)
+    return 0;
+  *_Index = 63 - __builtin_clzll(_Mask);
+  return 1;
+}
+static __inline__
+unsigned __int64 __DEFAULT_FN_ATTRS
+__popcnt64(unsigned __int64 _Value) {
+  return __builtin_popcountll(_Value);
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittest64(__int64 const *_BitBase, __int64 _BitPos) {
+  return (*_BitBase >> _BitPos) & 1;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandcomplement64(__int64 *_BitBase, __int64 _BitPos) {
+  unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+  *_BitBase = *_BitBase ^ (1ll << _BitPos);
+  return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandreset64(__int64 *_BitBase, __int64 _BitPos) {
+  unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+  *_BitBase = *_BitBase & ~(1ll << _BitPos);
+  return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandset64(__int64 *_BitBase, __int64 _BitPos) {
+  unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+  *_BitBase = *_BitBase | (1ll << _BitPos);
+  return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_interlockedbittestandset64(__int64 volatile *_BitBase, __int64 _BitPos) {
+  long long _PrevVal =
+      __atomic_fetch_or(_BitBase, 1ll << _BitPos, __ATOMIC_SEQ_CST);
+  return (_PrevVal >> _BitPos) & 1;
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange Add
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd8(char volatile *_Addend, char _Value) {
+  return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd16(short volatile *_Addend, short _Value) {
+  return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {
+  return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange Sub
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub8(char volatile *_Subend, char _Value) {
+  return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub16(short volatile *_Subend, short _Value) {
+  return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub(long volatile *_Subend, long _Value) {
+  return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value) {
+  return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Increment
+\*----------------------------------------------------------------------------*/
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedIncrement16(short volatile *_Value) {
+  return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedIncrement64(__int64 volatile *_Value) {
+  return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Decrement
+\*----------------------------------------------------------------------------*/
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedDecrement16(short volatile *_Value) {
+  return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedDecrement64(__int64 volatile *_Value) {
+  return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked And
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedAnd8(char volatile *_Value, char _Mask) {
+  return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedAnd16(short volatile *_Value, short _Mask) {
+  return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedAnd(long volatile *_Value, long _Mask) {
+  return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask) {
+  return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Or
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedOr8(char volatile *_Value, char _Mask) {
+  return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedOr16(short volatile *_Value, short _Mask) {
+  return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedOr(long volatile *_Value, long _Mask) {
+  return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedOr64(__int64 volatile *_Value, __int64 _Mask) {
+  return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Xor
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedXor8(char volatile *_Value, char _Mask) {
+  return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedXor16(short volatile *_Value, short _Mask) {
+  return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedXor(long volatile *_Value, long _Mask) {
+  return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedXor64(__int64 volatile *_Value, __int64 _Mask) {
+  return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedExchange8(char volatile *_Target, char _Value) {
+  __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
+  return _Value;
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedExchange16(short volatile *_Target, short _Value) {
+  __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
+  return _Value;
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) {
+  __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
+  return _Value;
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Compare Exchange
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange8(char volatile *_Destination,
+                             char _Exchange, char _Comparand) {
+  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+                            __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+  return _Comparand;
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange16(short volatile *_Destination,
+                              short _Exchange, short _Comparand) {
+  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+                            __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+  return _Comparand;
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange64(__int64 volatile *_Destination,
+                              __int64 _Exchange, __int64 _Comparand) {
+  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+                            __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+  return _Comparand;
+}
+/*----------------------------------------------------------------------------*\
+|* Barriers
+\*----------------------------------------------------------------------------*/
+static __inline__ void __DEFAULT_FN_ATTRS
+__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
+_ReadWriteBarrier(void) {
+  __atomic_signal_fence(__ATOMIC_SEQ_CST);
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
+_ReadBarrier(void) {
+  __atomic_signal_fence(__ATOMIC_SEQ_CST);
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
+_WriteBarrier(void) {
+  __atomic_signal_fence(__ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+__faststorefence(void) {
+  __atomic_thread_fence(__ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* readfs, readgs
+|* (Pointers in address space #256 and #257 are relative to the GS and FS
+|* segment registers, respectively.)
+\*----------------------------------------------------------------------------*/
+#define __ptr_to_addr_space(__addr_space_nbr, __type, __offset)              \
+    ((volatile __type __attribute__((__address_space__(__addr_space_nbr)))*) \
+    (__offset))
+
+#ifdef __i386__
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+__readfsbyte(unsigned long __offset) {
+  return *__ptr_to_addr_space(257, unsigned char, __offset);
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+__readfsword(unsigned long __offset) {
+  return *__ptr_to_addr_space(257, unsigned short, __offset);
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__readfsqword(unsigned long __offset) {
+  return *__ptr_to_addr_space(257, unsigned __int64, __offset);
+}
+#endif
+#ifdef __x86_64__
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+__readgsbyte(unsigned long __offset) {
+  return *__ptr_to_addr_space(256, unsigned char, __offset);
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+__readgsword(unsigned long __offset) {
+  return *__ptr_to_addr_space(256, unsigned short, __offset);
+}
+static __inline__ unsigned long __DEFAULT_FN_ATTRS
+__readgsdword(unsigned long __offset) {
+  return *__ptr_to_addr_space(256, unsigned long, __offset);
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__readgsqword(unsigned long __offset) {
+  return *__ptr_to_addr_space(256, unsigned __int64, __offset);
+}
+#endif
+#undef __ptr_to_addr_space
+/*----------------------------------------------------------------------------*\
+|* movs, stos
+\*----------------------------------------------------------------------------*/
+#if defined(__i386__) || defined(__x86_64__)
+static __inline__ void __DEFAULT_FN_ATTRS
+__movsb(unsigned char *__dst, unsigned char const *__src, size_t __n) {
+  __asm__("rep movsb" : : "D"(__dst), "S"(__src), "c"(__n)
+                        : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__movsd(unsigned long *__dst, unsigned long const *__src, size_t __n) {
+  __asm__("rep movsl" : : "D"(__dst), "S"(__src), "c"(__n)
+                        : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__movsw(unsigned short *__dst, unsigned short const *__src, size_t __n) {
+  __asm__("rep movsw" : : "D"(__dst), "S"(__src), "c"(__n)
+                        : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__stosb(unsigned char *__dst, unsigned char __x, size_t __n) {
+  __asm__("rep stosb" : : "D"(__dst), "a"(__x), "c"(__n)
+                        : "%edi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__stosd(unsigned long *__dst, unsigned long __x, size_t __n) {
+  __asm__("rep stosl" : : "D"(__dst), "a"(__x), "c"(__n)
+                        : "%edi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__stosw(unsigned short *__dst, unsigned short __x, size_t __n) {
+  __asm__("rep stosw" : : "D"(__dst), "a"(__x), "c"(__n)
+                        : "%edi", "%ecx");
+}
+#endif
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+__movsq(unsigned long long *__dst, unsigned long long const *__src, size_t __n) {
+  __asm__("rep movsq" : : "D"(__dst), "S"(__src), "c"(__n)
+                        : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__stosq(unsigned __int64 *__dst, unsigned __int64 __x, size_t __n) {
+  __asm__("rep stosq" : : "D"(__dst), "a"(__x), "c"(__n)
+                        : "%edi", "%ecx");
+}
+#endif
+
+/*----------------------------------------------------------------------------*\
+|* Misc
+\*----------------------------------------------------------------------------*/
+static __inline__ void * __DEFAULT_FN_ATTRS
+_AddressOfReturnAddress(void) {
+  return (void*)((char*)__builtin_frame_address(0) + sizeof(void*));
+}
+static __inline__ void * __DEFAULT_FN_ATTRS
+_ReturnAddress(void) {
+  return __builtin_return_address(0);
+}
+#if defined(__i386__) || defined(__x86_64__)
+static __inline__ void __DEFAULT_FN_ATTRS
+__cpuid(int __info[4], int __level) {
+  __asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
+                   : "a"(__level));
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__cpuidex(int __info[4], int __level, int __ecx) {
+  __asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
+                   : "a"(__level), "c"(__ecx));
+}
+static __inline__ unsigned __int64 __cdecl __DEFAULT_FN_ATTRS
+_xgetbv(unsigned int __xcr_no) {
+  unsigned int __eax, __edx;
+  __asm__ ("xgetbv" : "=a" (__eax), "=d" (__edx) : "c" (__xcr_no));
+  return ((unsigned __int64)__edx << 32) | __eax;
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__halt(void) {
+  __asm__ volatile ("hlt");
+}
+#endif
+
+/*----------------------------------------------------------------------------*\
+|* Privileged intrinsics
+\*----------------------------------------------------------------------------*/
+#if defined(__i386__) || defined(__x86_64__)
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__readmsr(unsigned long __register) {
+  // Loads the contents of a 64-bit model specific register (MSR) specified in
+  // the ECX register into registers EDX:EAX. The EDX register is loaded with
+  // the high-order 32 bits of the MSR and the EAX register is loaded with the
+  // low-order 32 bits. If less than 64 bits are implemented in the MSR being
+  // read, the values returned to EDX:EAX in unimplemented bit locations are
+  // undefined.
+  unsigned long __edx;
+  unsigned long __eax;
+  __asm__ ("rdmsr" : "=d"(__edx), "=a"(__eax) : "c"(__register));
+  return (((unsigned __int64)__edx) << 32) | (unsigned __int64)__eax;
+}
+
+static __inline__ unsigned long __DEFAULT_FN_ATTRS
+__readcr3(void) {
+  unsigned long __cr3_val;
+  __asm__ __volatile__ ("mov %%cr3, %0" : "=q"(__cr3_val) : : "memory");
+  return __cr3_val;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+__writecr3(unsigned int __cr3_val) {
+  __asm__ ("mov %0, %%cr3" : : "q"(__cr3_val) : "memory");
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __INTRIN_H */
+#endif /* _MSC_VER */
diff --git a/renderscript/clang-include/inttypes.h b/renderscript/clang-include/inttypes.h
index 3d59d14..1d8eaba 100644
--- a/renderscript/clang-include/inttypes.h
+++ b/renderscript/clang-include/inttypes.h
@@ -23,6 +23,10 @@
 #ifndef __CLANG_INTTYPES_H
 #define __CLANG_INTTYPES_H
 
+#if defined(_MSC_VER) && _MSC_VER < 1800
+#error MSVC does not have inttypes.h prior to Visual Studio 2013
+#endif
+
 #include_next <inttypes.h>
 
 #if defined(_MSC_VER) && _MSC_VER < 1900
diff --git a/renderscript/clang-include/mm3dnow.h b/renderscript/clang-include/mm3dnow.h
index cb93faf..294866c 100644
--- a/renderscript/clang-include/mm3dnow.h
+++ b/renderscript/clang-include/mm3dnow.h
@@ -33,7 +33,7 @@
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnow")))
 
 static __inline__ void __DEFAULT_FN_ATTRS
-_m_femms() {
+_m_femms(void) {
   __builtin_ia32_femms();
 }
 
diff --git a/renderscript/clang-include/mmintrin.h b/renderscript/clang-include/mmintrin.h
index 162cb1a..cefd605 100644
--- a/renderscript/clang-include/mmintrin.h
+++ b/renderscript/clang-include/mmintrin.h
@@ -26,6 +26,7 @@
 
 typedef long long __m64 __attribute__((__vector_size__(8)));
 
+typedef long long __v1di __attribute__((__vector_size__(8)));
 typedef int __v2si __attribute__((__vector_size__(8)));
 typedef short __v4hi __attribute__((__vector_size__(8)));
 typedef char __v8qi __attribute__((__vector_size__(8)));
@@ -33,366 +34,1314 @@
 /* Define the default attributes for the functions in this file. */
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mmx")))
 
+/// \brief Clears the MMX state by setting the state of the x87 stack registers
+///    to empty.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c EMMS instruction.
+///
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_empty(void)
 {
     __builtin_ia32_emms();
 }
 
+/// \brief Constructs a 64-bit integer vector, setting the lower 32 bits to the
+///    value of the 32-bit integer parameter and setting the upper 32 bits to 0.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVD / MOVD instruction.
+///
+/// \param __i
+///    A 32-bit integer value.
+/// \returns A 64-bit integer vector. The lower 32 bits contain the value of the
+///    parameter. The upper 32 bits are set to 0.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_cvtsi32_si64(int __i)
 {
     return (__m64)__builtin_ia32_vec_init_v2si(__i, 0);
 }
 
+/// \brief Returns the lower 32 bits of a 64-bit integer vector as a 32-bit
+///    signed integer.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVD / MOVD instruction.
+///
+/// \param __m
+///    A 64-bit integer vector.
+/// \returns A 32-bit signed integer value containing the lower 32 bits of the
+///    parameter.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_cvtsi64_si32(__m64 __m)
 {
     return __builtin_ia32_vec_ext_v2si((__v2si)__m, 0);
 }
 
+/// \brief Casts a 64-bit signed integer value into a 64-bit integer vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVQ / MOVD instruction.
+///
+/// \param __i
+///    A 64-bit signed integer.
+/// \returns A 64-bit integer vector containing the same bitwise pattern as the
+///    parameter.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_cvtsi64_m64(long long __i)
 {
     return (__m64)__i;
 }
 
+/// \brief Casts a 64-bit integer vector into a 64-bit signed integer value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVQ / MOVD instruction.
+///
+/// \param __m
+///    A 64-bit integer vector.
+/// \returns A 64-bit signed integer containing the same bitwise pattern as the
+///    parameter.
 static __inline__ long long __DEFAULT_FN_ATTRS
 _mm_cvtm64_si64(__m64 __m)
 {
     return (long long)__m;
 }
 
+/// \brief Converts 16-bit signed integers from both 64-bit integer vector
+///    parameters of [4 x i16] into 8-bit signed integer values, and constructs
+///    a 64-bit integer vector of [8 x i8] as the result. Positive values
+///    greater than 0x7F are saturated to 0x7F. Negative values less than 0x80
+///    are saturated to 0x80.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PACKSSWB instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
+///    16-bit signed integer and is converted to an 8-bit signed integer with
+///    saturation. Positive values greater than 0x7F are saturated to 0x7F.
+///    Negative values less than 0x80 are saturated to 0x80. The converted
+///    [4 x i8] values are written to the lower 32 bits of the result.
+/// \param __m2
+///    A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
+///    16-bit signed integer and is converted to an 8-bit signed integer with
+///    saturation. Positive values greater than 0x7F are saturated to 0x7F.
+///    Negative values less than 0x80 are saturated to 0x80. The converted
+///    [4 x i8] values are written to the upper 32 bits of the result.
+/// \returns A 64-bit integer vector of [8 x i8] containing the converted
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_packs_pi16(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2);
 }
 
+/// \brief Converts 32-bit signed integers from both 64-bit integer vector
+///    parameters of [2 x i32] into 16-bit signed integer values, and constructs
+///    a 64-bit integer vector of [4 x i16] as the result. Positive values
+///    greater than 0x7FFF are saturated to 0x7FFF. Negative values less than
+///    0x8000 are saturated to 0x8000.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PACKSSDW instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a
+///    32-bit signed integer and is converted to a 16-bit signed integer with
+///    saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF.
+///    Negative values less than 0x8000 are saturated to 0x8000. The converted
+///    [2 x i16] values are written to the lower 32 bits of the result.
+/// \param __m2
+///    A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a
+///    32-bit signed integer and is converted to a 16-bit signed integer with
+///    saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF.
+///    Negative values less than 0x8000 are saturated to 0x8000. The converted
+///    [2 x i16] values are written to the upper 32 bits of the result.
+/// \returns A 64-bit integer vector of [4 x i16] containing the converted
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_packs_pi32(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2);
 }
 
+/// \brief Converts 16-bit signed integers from both 64-bit integer vector
+///    parameters of [4 x i16] into 8-bit unsigned integer values, and
+///    constructs a 64-bit integer vector of [8 x i8] as the result. Values
+///    greater than 0xFF are saturated to 0xFF. Values less than 0 are saturated
+///    to 0.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PACKUSWB instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
+///    16-bit signed integer and is converted to an 8-bit unsigned integer with
+///    saturation. Values greater than 0xFF are saturated to 0xFF. Values less
+///    than 0 are saturated to 0. The converted [4 x i8] values are written to
+///    the lower 32 bits of the result.
+/// \param __m2
+///    A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
+///    16-bit signed integer and is converted to an 8-bit unsigned integer with
+///    saturation. Values greater than 0xFF are saturated to 0xFF. Values less
+///    than 0 are saturated to 0. The converted [4 x i8] values are written to
+///    the upper 32 bits of the result.
+/// \returns A 64-bit integer vector of [8 x i8] containing the converted
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_packs_pu16(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_packuswb((__v4hi)__m1, (__v4hi)__m2);
 }
 
+/// \brief Unpacks the upper 32 bits from two 64-bit integer vectors of [8 x i8]
+///    and interleaves them into a 64-bit integer vector of [8 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PUNPCKHBW instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [8 x i8].
+///    Bits [39:32] are written to bits [7:0] of the result.
+///    Bits [47:40] are written to bits [23:16] of the result.
+///    Bits [55:48] are written to bits [39:32] of the result.
+///    Bits [63:56] are written to bits [55:48] of the result.
+/// \param __m2
+///    A 64-bit integer vector of [8 x i8].
+///    Bits [39:32] are written to bits [15:8] of the result.
+///    Bits [47:40] are written to bits [31:24] of the result.
+///    Bits [55:48] are written to bits [47:40] of the result.
+///    Bits [63:56] are written to bits [63:56] of the result.
+/// \returns A 64-bit integer vector of [8 x i8] containing the interleaved
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_unpackhi_pi8(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_punpckhbw((__v8qi)__m1, (__v8qi)__m2);
 }
 
+/// \brief Unpacks the upper 32 bits from two 64-bit integer vectors of
+///    [4 x i16] and interleaves them into a 64-bit integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PUNPCKHWD instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [4 x i16].
+///    Bits [47:32] are written to bits [15:0] of the result.
+///    Bits [63:48] are written to bits [47:32] of the result.
+/// \param __m2
+///    A 64-bit integer vector of [4 x i16].
+///    Bits [47:32] are written to bits [31:16] of the result.
+///    Bits [63:48] are written to bits [63:48] of the result.
+/// \returns A 64-bit integer vector of [4 x i16] containing the interleaved
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_unpackhi_pi16(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_punpckhwd((__v4hi)__m1, (__v4hi)__m2);
 }
 
+/// \brief Unpacks the upper 32 bits from two 64-bit integer vectors of
+///    [2 x i32] and interleaves them into a 64-bit integer vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PUNPCKHDQ instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [2 x i32]. The upper 32 bits are written to
+///    the lower 32 bits of the result.
+/// \param __m2
+///    A 64-bit integer vector of [2 x i32]. The upper 32 bits are written to
+///    the upper 32 bits of the result.
+/// \returns A 64-bit integer vector of [2 x i32] containing the interleaved
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_unpackhi_pi32(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_punpckhdq((__v2si)__m1, (__v2si)__m2);
 }
 
+/// \brief Unpacks the lower 32 bits from two 64-bit integer vectors of [8 x i8]
+///    and interleaves them into a 64-bit integer vector of [8 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PUNPCKLBW instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [8 x i8].
+///    Bits [7:0] are written to bits [7:0] of the result.
+///    Bits [15:8] are written to bits [23:16] of the result.
+///    Bits [23:16] are written to bits [39:32] of the result.
+///    Bits [31:24] are written to bits [55:48] of the result.
+/// \param __m2
+///    A 64-bit integer vector of [8 x i8].
+///    Bits [7:0] are written to bits [15:8] of the result.
+///    Bits [15:8] are written to bits [31:24] of the result.
+///    Bits [23:16] are written to bits [47:40] of the result.
+///    Bits [31:24] are written to bits [63:56] of the result.
+/// \returns A 64-bit integer vector of [8 x i8] containing the interleaved
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_unpacklo_pi8(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_punpcklbw((__v8qi)__m1, (__v8qi)__m2);
 }
 
+/// \brief Unpacks the lower 32 bits from two 64-bit integer vectors of
+///    [4 x i16] and interleaves them into a 64-bit integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PUNPCKLWD instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [4 x i16].
+///    Bits [15:0] are written to bits [15:0] of the result.
+///    Bits [31:16] are written to bits [47:32] of the result.
+/// \param __m2
+///    A 64-bit integer vector of [4 x i16].
+///    Bits [15:0] are written to bits [31:16] of the result.
+///    Bits [31:16] are written to bits [63:48] of the result.
+/// \returns A 64-bit integer vector of [4 x i16] containing the interleaved
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_unpacklo_pi16(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_punpcklwd((__v4hi)__m1, (__v4hi)__m2);
 }
 
+/// \brief Unpacks the lower 32 bits from two 64-bit integer vectors of
+///    [2 x i32] and interleaves them into a 64-bit integer vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PUNPCKLDQ instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [2 x i32]. The lower 32 bits are written to
+///    the lower 32 bits of the result.
+/// \param __m2
+///    A 64-bit integer vector of [2 x i32]. The lower 32 bits are written to
+///    the upper 32 bits of the result.
+/// \returns A 64-bit integer vector of [2 x i32] containing the interleaved
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_punpckldq((__v2si)__m1, (__v2si)__m2);
 }
 
+/// \brief Adds each 8-bit integer element of the first 64-bit integer vector
+///    of [8 x i8] to the corresponding 8-bit integer element of the second
+///    64-bit integer vector of [8 x i8]. The lower 8 bits of the results are
+///    packed into a 64-bit integer vector of [8 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PADDB instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [8 x i8].
+/// \param __m2
+///    A 64-bit integer vector of [8 x i8].
+/// \returns A 64-bit integer vector of [8 x i8] containing the sums of both
+///    parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_add_pi8(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_paddb((__v8qi)__m1, (__v8qi)__m2);
 }
 
+/// \brief Adds each 16-bit integer element of the first 64-bit integer vector
+///    of [4 x i16] to the corresponding 16-bit integer element of the second
+///    64-bit integer vector of [4 x i16]. The lower 16 bits of the results are
+///    packed into a 64-bit integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PADDW instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [4 x i16].
+/// \param __m2
+///    A 64-bit integer vector of [4 x i16].
+/// \returns A 64-bit integer vector of [4 x i16] containing the sums of both
+///    parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_add_pi16(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_paddw((__v4hi)__m1, (__v4hi)__m2);
 }
 
+/// \brief Adds each 32-bit integer element of the first 64-bit integer vector
+///    of [2 x i32] to the corresponding 32-bit integer element of the second
+///    64-bit integer vector of [2 x i32]. The lower 32 bits of the results are
+///    packed into a 64-bit integer vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PADDD instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [2 x i32].
+/// \param __m2
+///    A 64-bit integer vector of [2 x i32].
+/// \returns A 64-bit integer vector of [2 x i32] containing the sums of both
+///    parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_add_pi32(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2);
 }
 
+/// \brief Adds each 8-bit signed integer element of the first 64-bit integer
+///    vector of [8 x i8] to the corresponding 8-bit signed integer element of
+///    the second 64-bit integer vector of [8 x i8]. Positive sums greater than
+///    0x7F are saturated to 0x7F. Negative sums less than 0x80 are saturated to
+///    0x80. The results are packed into a 64-bit integer vector of [8 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PADDSB instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [8 x i8].
+/// \param __m2
+///    A 64-bit integer vector of [8 x i8].
+/// \returns A 64-bit integer vector of [8 x i8] containing the saturated sums
+///    of both parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_adds_pi8(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2);
 }
 
+/// \brief Adds each 16-bit signed integer element of the first 64-bit integer
+///    vector of [4 x i16] to the corresponding 16-bit signed integer element of
+///    the second 64-bit integer vector of [4 x i16]. Positive sums greater than
+///    0x7FFF are saturated to 0x7FFF. Negative sums less than 0x8000 are
+///    saturated to 0x8000. The results are packed into a 64-bit integer vector
+///    of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PADDSW instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [4 x i16].
+/// \param __m2
+///    A 64-bit integer vector of [4 x i16].
+/// \returns A 64-bit integer vector of [4 x i16] containing the saturated sums
+///    of both parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_adds_pi16(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2);
 }
 
+/// \brief Adds each 8-bit unsigned integer element of the first 64-bit integer
+///    vector of [8 x i8] to the corresponding 8-bit unsigned integer element of
+///    the second 64-bit integer vector of [8 x i8]. Sums greater than 0xFF are
+///    saturated to 0xFF. The results are packed into a 64-bit integer vector of
+///    [8 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PADDUSB instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [8 x i8].
+/// \param __m2
+///    A 64-bit integer vector of [8 x i8].
+/// \returns A 64-bit integer vector of [8 x i8] containing the saturated
+///    unsigned sums of both parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_adds_pu8(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2);
 }
 
+/// \brief Adds each 16-bit unsigned integer element of the first 64-bit integer
+///    vector of [4 x i16] to the corresponding 16-bit unsigned integer element
+///    of the second 64-bit integer vector of [4 x i16]. Sums greater than
+///    0xFFFF are saturated to 0xFFFF. The results are packed into a 64-bit
+///    integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PADDUSW instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [4 x i16].
+/// \param __m2
+///    A 64-bit integer vector of [4 x i16].
+/// \returns A 64-bit integer vector of [4 x i16] containing the saturated
+///    unsigned sums of both parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_adds_pu16(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_paddusw((__v4hi)__m1, (__v4hi)__m2);
 }
 
+/// \brief Subtracts each 8-bit integer element of the second 64-bit integer
+///    vector of [8 x i8] from the corresponding 8-bit integer element of the
+///    first 64-bit integer vector of [8 x i8]. The lower 8 bits of the results
+///    are packed into a 64-bit integer vector of [8 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSUBB instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [8 x i8] containing the minuends.
+/// \param __m2
+///    A 64-bit integer vector of [8 x i8] containing the subtrahends.
+/// \returns A 64-bit integer vector of [8 x i8] containing the differences of
+///    both parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_sub_pi8(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_psubb((__v8qi)__m1, (__v8qi)__m2);
 }
 
+/// \brief Subtracts each 16-bit integer element of the second 64-bit integer
+///    vector of [4 x i16] from the corresponding 16-bit integer element of the
+///    first 64-bit integer vector of [4 x i16]. The lower 16 bits of the
+///    results are packed into a 64-bit integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSUBW instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [4 x i16] containing the minuends.
+/// \param __m2
+///    A 64-bit integer vector of [4 x i16] containing the subtrahends.
+/// \returns A 64-bit integer vector of [4 x i16] containing the differences of
+///    both parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_sub_pi16(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_psubw((__v4hi)__m1, (__v4hi)__m2);
 }
 
+/// \brief Subtracts each 32-bit integer element of the second 64-bit integer
+///    vector of [2 x i32] from the corresponding 32-bit integer element of the
+///    first 64-bit integer vector of [2 x i32]. The lower 32 bits of the
+///    results are packed into a 64-bit integer vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSUBD instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [2 x i32] containing the minuends.
+/// \param __m2
+///    A 64-bit integer vector of [2 x i32] containing the subtrahends.
+/// \returns A 64-bit integer vector of [2 x i32] containing the differences of
+///    both parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_sub_pi32(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2);
 }
 
+/// \brief Subtracts each 8-bit signed integer element of the second 64-bit
+///    integer vector of [8 x i8] from the corresponding 8-bit signed integer
+///    element of the first 64-bit integer vector of [8 x i8]. Positive results
+///    greater than 0x7F are saturated to 0x7F. Negative results less than 0x80
+///    are saturated to 0x80. The results are packed into a 64-bit integer
+///    vector of [8 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSUBSB instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [8 x i8] containing the minuends.
+/// \param __m2
+///    A 64-bit integer vector of [8 x i8] containing the subtrahends.
+/// \returns A 64-bit integer vector of [8 x i8] containing the saturated
+///    differences of both parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_subs_pi8(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2);
 }
 
+/// \brief Subtracts each 16-bit signed integer element of the second 64-bit
+///    integer vector of [4 x i16] from the corresponding 16-bit signed integer
+///    element of the first 64-bit integer vector of [4 x i16]. Positive results
+///    greater than 0x7FFF are saturated to 0x7FFF. Negative results less than
+///    0x8000 are saturated to 0x8000. The results are packed into a 64-bit
+///    integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSUBSW instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [4 x i16] containing the minuends.
+/// \param __m2
+///    A 64-bit integer vector of [4 x i16] containing the subtrahends.
+/// \returns A 64-bit integer vector of [4 x i16] containing the saturated
+///    differences of both parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_subs_pi16(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_psubsw((__v4hi)__m1, (__v4hi)__m2);
 }
 
+/// \brief Subtracts each 8-bit unsigned integer element of the second 64-bit
+///    integer vector of [8 x i8] from the corresponding 8-bit unsigned integer
+///    element of the first 64-bit integer vector of [8 x i8]. If an element of
+///    the first vector is less than the corresponding element of the second
+///    vector, the result is saturated to 0. The results are packed into a
+///    64-bit integer vector of [8 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSUBUSB instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [8 x i8] containing the minuends.
+/// \param __m2
+///    A 64-bit integer vector of [8 x i8] containing the subtrahends.
+/// \returns A 64-bit integer vector of [8 x i8] containing the saturated
+///    differences of both parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_subs_pu8(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_psubusb((__v8qi)__m1, (__v8qi)__m2);
 }
 
+/// \brief Subtracts each 16-bit unsigned integer element of the second 64-bit
+///    integer vector of [4 x i16] from the corresponding 16-bit unsigned
+///    integer element of the first 64-bit integer vector of [4 x i16]. If an
+///    element of the first vector is less than the corresponding element of the
+///    second vector, the result is saturated to 0. The results are packed into
+///    a 64-bit integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSUBUSW instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [4 x i16] containing the minuends.
+/// \param __m2
+///    A 64-bit integer vector of [4 x i16] containing the subtrahends.
+/// \returns A 64-bit integer vector of [4 x i16] containing the saturated
+///    differences of both parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_subs_pu16(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_psubusw((__v4hi)__m1, (__v4hi)__m2);
 }
 
+/// \brief Multiplies each 16-bit signed integer element of the first 64-bit
+///    integer vector of [4 x i16] by the corresponding 16-bit signed integer
+///    element of the second 64-bit integer vector of [4 x i16] and get four
+///    32-bit products. Adds adjacent pairs of products to get two 32-bit sums.
+///    The lower 32 bits of these two sums are packed into a 64-bit integer
+///    vector of [2 x i32]. For example, bits [15:0] of both parameters are
+///    multiplied, bits [31:16] of both parameters are multiplied, and the sum
+///    of both results is written to bits [31:0] of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMADDWD instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [4 x i16].
+/// \param __m2
+///    A 64-bit integer vector of [4 x i16].
+/// \returns A 64-bit integer vector of [2 x i32] containing the sums of
+///    products of both parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_madd_pi16(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_pmaddwd((__v4hi)__m1, (__v4hi)__m2);
 }
 
+/// \brief Multiplies each 16-bit signed integer element of the first 64-bit
+///    integer vector of [4 x i16] by the corresponding 16-bit signed integer
+///    element of the second 64-bit integer vector of [4 x i16]. Packs the upper
+///    16 bits of the 32-bit products into a 64-bit integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMULHW instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [4 x i16].
+/// \param __m2
+///    A 64-bit integer vector of [4 x i16].
+/// \returns A 64-bit integer vector of [4 x i16] containing the upper 16 bits
+///    of the products of both parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_mulhi_pi16(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_pmulhw((__v4hi)__m1, (__v4hi)__m2);
 }
 
+/// \brief Multiplies each 16-bit signed integer element of the first 64-bit
+///    integer vector of [4 x i16] by the corresponding 16-bit signed integer
+///    element of the second 64-bit integer vector of [4 x i16]. Packs the lower
+///    16 bits of the 32-bit products into a 64-bit integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMULLW instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [4 x i16].
+/// \param __m2
+///    A 64-bit integer vector of [4 x i16].
+/// \returns A 64-bit integer vector of [4 x i16] containing the lower 16 bits
+///    of the products of both parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_mullo_pi16(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_pmullw((__v4hi)__m1, (__v4hi)__m2);
 }
 
+/// \brief Left-shifts each 16-bit signed integer element of the first
+///    parameter, which is a 64-bit integer vector of [4 x i16], by the number
+///    of bits specified by the second parameter, which is a 64-bit integer. The
+///    lower 16 bits of the results are packed into a 64-bit integer vector of
+///    [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSLLW instruction.
+///
+/// \param __m
+///    A 64-bit integer vector of [4 x i16].
+/// \param __count
+///    A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \returns A 64-bit integer vector of [4 x i16] containing the left-shifted
+///    values. If __count is greater or equal to 16, the result is set to all 0.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_sll_pi16(__m64 __m, __m64 __count)
 {
     return (__m64)__builtin_ia32_psllw((__v4hi)__m, __count);
 }
 
+/// \brief Left-shifts each 16-bit signed integer element of a 64-bit integer
+///    vector of [4 x i16] by the number of bits specified by a 32-bit integer.
+///    The lower 16 bits of the results are packed into a 64-bit integer vector
+///    of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSLLW instruction.
+///
+/// \param __m
+///    A 64-bit integer vector of [4 x i16].
+/// \param __count
+///    A 32-bit integer value.
+/// \returns A 64-bit integer vector of [4 x i16] containing the left-shifted
+///    values. If __count is greater or equal to 16, the result is set to all 0.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_slli_pi16(__m64 __m, int __count)
 {
     return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count);
 }
 
+/// \brief Left-shifts each 32-bit signed integer element of the first
+///    parameter, which is a 64-bit integer vector of [2 x i32], by the number
+///    of bits specified by the second parameter, which is a 64-bit integer. The
+///    lower 32 bits of the results are packed into a 64-bit integer vector of
+///    [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSLLD instruction.
+///
+/// \param __m
+///    A 64-bit integer vector of [2 x i32].
+/// \param __count
+///    A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \returns A 64-bit integer vector of [2 x i32] containing the left-shifted
+///    values. If __count is greater or equal to 32, the result is set to all 0.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_sll_pi32(__m64 __m, __m64 __count)
 {
     return (__m64)__builtin_ia32_pslld((__v2si)__m, __count);
 }
 
+/// \brief Left-shifts each 32-bit signed integer element of a 64-bit integer
+///    vector of [2 x i32] by the number of bits specified by a 32-bit integer.
+///    The lower 32 bits of the results are packed into a 64-bit integer vector
+///    of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSLLD instruction.
+///
+/// \param __m
+///    A 64-bit integer vector of [2 x i32].
+/// \param __count
+///    A 32-bit integer value.
+/// \returns A 64-bit integer vector of [2 x i32] containing the left-shifted
+///    values. If __count is greater or equal to 32, the result is set to all 0.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_slli_pi32(__m64 __m, int __count)
 {
     return (__m64)__builtin_ia32_pslldi((__v2si)__m, __count);
 }
 
+/// \brief Left-shifts the first 64-bit integer parameter by the number of bits
+///    specified by the second 64-bit integer parameter. The lower 64 bits of
+///    result are returned.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSLLQ instruction.
+///
+/// \param __m
+///    A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \param __count
+///    A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \returns A 64-bit integer vector containing the left-shifted value. If
+///     __count is greater or equal to 64, the result is set to 0.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_sll_si64(__m64 __m, __m64 __count)
 {
-    return (__m64)__builtin_ia32_psllq(__m, __count);
+    return (__m64)__builtin_ia32_psllq((__v1di)__m, __count);
 }
 
+/// \brief Left-shifts the first parameter, which is a 64-bit integer, by the
+///    number of bits specified by the second parameter, which is a 32-bit
+///    integer. The lower 64 bits of result are returned.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSLLQ instruction.
+///
+/// \param __m
+///    A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \param __count
+///    A 32-bit integer value.
+/// \returns A 64-bit integer vector containing the left-shifted value. If
+///     __count is greater or equal to 64, the result is set to 0.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_slli_si64(__m64 __m, int __count)
 {
-    return (__m64)__builtin_ia32_psllqi(__m, __count);
+    return (__m64)__builtin_ia32_psllqi((__v1di)__m, __count);
 }
 
+/// \brief Right-shifts each 16-bit integer element of the first parameter,
+///    which is a 64-bit integer vector of [4 x i16], by the number of bits
+///    specified by the second parameter, which is a 64-bit integer. High-order
+///    bits are filled with the sign bit of the initial value of each 16-bit
+///    element. The 16-bit results are packed into a 64-bit integer vector of
+///    [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRAW instruction.
+///
+/// \param __m
+///    A 64-bit integer vector of [4 x i16].
+/// \param __count
+///    A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_sra_pi16(__m64 __m, __m64 __count)
 {
     return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count);
 }
 
+/// \brief Right-shifts each 16-bit integer element of a 64-bit integer vector
+///    of [4 x i16] by the number of bits specified by a 32-bit integer.
+///    High-order bits are filled with the sign bit of the initial value of each
+///    16-bit element. The 16-bit results are packed into a 64-bit integer
+///    vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRAW instruction.
+///
+/// \param __m
+///    A 64-bit integer vector of [4 x i16].
+/// \param __count
+///    A 32-bit integer value.
+/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_srai_pi16(__m64 __m, int __count)
 {
     return (__m64)__builtin_ia32_psrawi((__v4hi)__m, __count);
 }
 
+/// \brief Right-shifts each 32-bit integer element of the first parameter,
+///    which is a 64-bit integer vector of [2 x i32], by the number of bits
+///    specified by the second parameter, which is a 64-bit integer. High-order
+///    bits are filled with the sign bit of the initial value of each 32-bit
+///    element. The 32-bit results are packed into a 64-bit integer vector of
+///    [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRAD instruction.
+///
+/// \param __m
+///    A 64-bit integer vector of [2 x i32].
+/// \param __count
+///    A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_sra_pi32(__m64 __m, __m64 __count)
 {
     return (__m64)__builtin_ia32_psrad((__v2si)__m, __count);
 }
 
+/// \brief Right-shifts each 32-bit integer element of a 64-bit integer vector
+///    of [2 x i32] by the number of bits specified by a 32-bit integer.
+///    High-order bits are filled with the sign bit of the initial value of each
+///    32-bit element. The 32-bit results are packed into a 64-bit integer
+///    vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRAD instruction.
+///
+/// \param __m
+///    A 64-bit integer vector of [2 x i32].
+/// \param __count
+///    A 32-bit integer value.
+/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_srai_pi32(__m64 __m, int __count)
 {
     return (__m64)__builtin_ia32_psradi((__v2si)__m, __count);
 }
 
+/// \brief Right-shifts each 16-bit integer element of the first parameter,
+///    which is a 64-bit integer vector of [4 x i16], by the number of bits
+///    specified by the second parameter, which is a 64-bit integer. High-order
+///    bits are cleared. The 16-bit results are packed into a 64-bit integer
+///    vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRLW instruction.
+///
+/// \param __m
+///    A 64-bit integer vector of [4 x i16].
+/// \param __count
+///    A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_srl_pi16(__m64 __m, __m64 __count)
 {
     return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count);
 }
 
+/// \brief Right-shifts each 16-bit integer element of a 64-bit integer vector
+///    of [4 x i16] by the number of bits specified by a 32-bit integer.
+///    High-order bits are cleared. The 16-bit results are packed into a 64-bit
+///    integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRLW instruction.
+///
+/// \param __m
+///    A 64-bit integer vector of [4 x i16].
+/// \param __count
+///    A 32-bit integer value.
+/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_srli_pi16(__m64 __m, int __count)
 {
     return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count);
 }
 
+/// \brief Right-shifts each 32-bit integer element of the first parameter,
+///    which is a 64-bit integer vector of [2 x i32], by the number of bits
+///    specified by the second parameter, which is a 64-bit integer. High-order
+///    bits are cleared. The 32-bit results are packed into a 64-bit integer
+///    vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRLD instruction.
+///
+/// \param __m
+///    A 64-bit integer vector of [2 x i32].
+/// \param __count
+///    A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_srl_pi32(__m64 __m, __m64 __count)
 {
     return (__m64)__builtin_ia32_psrld((__v2si)__m, __count);
 }
 
+/// \brief Right-shifts each 32-bit integer element of a 64-bit integer vector
+///    of [2 x i32] by the number of bits specified by a 32-bit integer.
+///    High-order bits are cleared. The 32-bit results are packed into a 64-bit
+///    integer vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRLD instruction.
+///
+/// \param __m
+///    A 64-bit integer vector of [2 x i32].
+/// \param __count
+///    A 32-bit integer value.
+/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_srli_pi32(__m64 __m, int __count)
 {
     return (__m64)__builtin_ia32_psrldi((__v2si)__m, __count);
 }
 
+/// \brief Right-shifts the first 64-bit integer parameter by the number of bits
+///    specified by the second 64-bit integer parameter. High-order bits are
+///    cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRLQ instruction.
+///
+/// \param __m
+///    A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \param __count
+///    A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \returns A 64-bit integer vector containing the right-shifted value.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_srl_si64(__m64 __m, __m64 __count)
 {
-    return (__m64)__builtin_ia32_psrlq(__m, __count);
+    return (__m64)__builtin_ia32_psrlq((__v1di)__m, __count);
 }
 
+/// \brief Right-shifts the first parameter, which is a 64-bit integer, by the
+///    number of bits specified by the second parameter, which is a 32-bit
+///    integer. High-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRLQ instruction.
+///
+/// \param __m
+///    A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \param __count
+///    A 32-bit integer value.
+/// \returns A 64-bit integer vector containing the right-shifted value.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_srli_si64(__m64 __m, int __count)
 {
-    return (__m64)__builtin_ia32_psrlqi(__m, __count);
+    return (__m64)__builtin_ia32_psrlqi((__v1di)__m, __count);
 }
 
+/// \brief Performs a bitwise AND of two 64-bit integer vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PAND instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector.
+/// \param __m2
+///    A 64-bit integer vector.
+/// \returns A 64-bit integer vector containing the bitwise AND of both
+///    parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_and_si64(__m64 __m1, __m64 __m2)
 {
-    return __builtin_ia32_pand(__m1, __m2);
+    return __builtin_ia32_pand((__v1di)__m1, (__v1di)__m2);
 }
 
+/// \brief Performs a bitwise NOT of the first 64-bit integer vector, and then
+///    performs a bitwise AND of the intermediate result and the second 64-bit
+///    integer vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PANDN instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector. The one's complement of this parameter is used
+///    in the bitwise AND.
+/// \param __m2
+///    A 64-bit integer vector.
+/// \returns A 64-bit integer vector containing the bitwise AND of the second
+///    parameter and the one's complement of the first parameter.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_andnot_si64(__m64 __m1, __m64 __m2)
 {
-    return __builtin_ia32_pandn(__m1, __m2);
+    return __builtin_ia32_pandn((__v1di)__m1, (__v1di)__m2);
 }
 
+/// \brief Performs a bitwise OR of two 64-bit integer vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c POR instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector.
+/// \param __m2
+///    A 64-bit integer vector.
+/// \returns A 64-bit integer vector containing the bitwise OR of both
+///    parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_or_si64(__m64 __m1, __m64 __m2)
 {
-    return __builtin_ia32_por(__m1, __m2);
+    return __builtin_ia32_por((__v1di)__m1, (__v1di)__m2);
 }
 
+/// \brief Performs a bitwise exclusive OR of two 64-bit integer vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PXOR instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector.
+/// \param __m2
+///    A 64-bit integer vector.
+/// \returns A 64-bit integer vector containing the bitwise exclusive OR of both
+///    parameters.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_xor_si64(__m64 __m1, __m64 __m2)
 {
-    return __builtin_ia32_pxor(__m1, __m2);
+    return __builtin_ia32_pxor((__v1di)__m1, (__v1di)__m2);
 }
 
+/// \brief Compares the 8-bit integer elements of two 64-bit integer vectors of
+///    [8 x i8] to determine if the element of the first vector is equal to the
+///    corresponding element of the second vector. The comparison yields 0 for
+///    false, 0xFF for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PCMPEQB instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [8 x i8].
+/// \param __m2
+///    A 64-bit integer vector of [8 x i8].
+/// \returns A 64-bit integer vector of [8 x i8] containing the comparison
+///    results.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_pcmpeqb((__v8qi)__m1, (__v8qi)__m2);
 }
 
+/// \brief Compares the 16-bit integer elements of two 64-bit integer vectors of
+///    [4 x i16] to determine if the element of the first vector is equal to the
+///    corresponding element of the second vector. The comparison yields 0 for
+///    false, 0xFFFF for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PCMPEQW instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [4 x i16].
+/// \param __m2
+///    A 64-bit integer vector of [4 x i16].
+/// \returns A 64-bit integer vector of [4 x i16] containing the comparison
+///    results.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_pcmpeqw((__v4hi)__m1, (__v4hi)__m2);
 }
 
+/// \brief Compares the 32-bit integer elements of two 64-bit integer vectors of
+///    [2 x i32] to determine if the element of the first vector is equal to the
+///    corresponding element of the second vector. The comparison yields 0 for
+///    false, 0xFFFFFFFF for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PCMPEQD instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [2 x i32].
+/// \param __m2
+///    A 64-bit integer vector of [2 x i32].
+/// \returns A 64-bit integer vector of [2 x i32] containing the comparison
+///    results.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_pcmpeqd((__v2si)__m1, (__v2si)__m2);
 }
 
+/// \brief Compares the 8-bit integer elements of two 64-bit integer vectors of
+///    [8 x i8] to determine if the element of the first vector is greater than
+///    the corresponding element of the second vector. The comparison yields 0
+///    for false, 0xFF for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PCMPGTB instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [8 x i8].
+/// \param __m2
+///    A 64-bit integer vector of [8 x i8].
+/// \returns A 64-bit integer vector of [8 x i8] containing the comparison
+///    results.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_pcmpgtb((__v8qi)__m1, (__v8qi)__m2);
 }
 
+/// \brief Compares the 16-bit integer elements of two 64-bit integer vectors of
+///    [4 x i16] to determine if the element of the first vector is greater than
+///    the corresponding element of the second vector. The comparison yields 0
+///    for false, 0xFFFF for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PCMPGTW instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [4 x i16].
+/// \param __m2
+///    A 64-bit integer vector of [4 x i16].
+/// \returns A 64-bit integer vector of [4 x i16] containing the comparison
+///    results.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_pcmpgtw((__v4hi)__m1, (__v4hi)__m2);
 }
 
+/// \brief Compares the 32-bit integer elements of two 64-bit integer vectors of
+///    [2 x i32] to determine if the element of the first vector is greater than
+///    the corresponding element of the second vector. The comparison yields 0
+///    for false, 0xFFFFFFFF for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PCMPGTD instruction.
+///
+/// \param __m1
+///    A 64-bit integer vector of [2 x i32].
+/// \param __m2
+///    A 64-bit integer vector of [2 x i32].
+/// \returns A 64-bit integer vector of [2 x i32] containing the comparison
+///    results.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_cmpgt_pi32(__m64 __m1, __m64 __m2)
 {
     return (__m64)__builtin_ia32_pcmpgtd((__v2si)__m1, (__v2si)__m2);
 }
 
+/// \brief Constructs a 64-bit integer vector initialized to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the the \c VXORPS / XORPS instruction.
+///
+/// \returns An initialized 64-bit integer vector with all elements set to zero.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_setzero_si64(void)
 {
     return (__m64){ 0LL };
 }
 
+/// \brief Constructs a 64-bit integer vector initialized with the specified
+///    32-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __i1
+///    A 32-bit integer value used to initialize the upper 32 bits of the
+///    result.
+/// \param __i0
+///    A 32-bit integer value used to initialize the lower 32 bits of the
+///    result.
+/// \returns An initialized 64-bit integer vector.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_set_pi32(int __i1, int __i0)
 {
     return (__m64)__builtin_ia32_vec_init_v2si(__i0, __i1);
 }
 
+/// \brief Constructs a 64-bit integer vector initialized with the specified
+///    16-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __s3
+///    A 16-bit integer value used to initialize bits [63:48] of the result.
+/// \param __s2
+///    A 16-bit integer value used to initialize bits [47:32] of the result.
+/// \param __s1
+///    A 16-bit integer value used to initialize bits [31:16] of the result.
+/// \param __s0
+///    A 16-bit integer value used to initialize bits [15:0] of the result.
+/// \returns An initialized 64-bit integer vector.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_set_pi16(short __s3, short __s2, short __s1, short __s0)
 {
     return (__m64)__builtin_ia32_vec_init_v4hi(__s0, __s1, __s2, __s3);
 }
 
+/// \brief Constructs a 64-bit integer vector initialized with the specified
+///    8-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __b7
+///    An 8-bit integer value used to initialize bits [63:56] of the result.
+/// \param __b6
+///    An 8-bit integer value used to initialize bits [55:48] of the result.
+/// \param __b5
+///    An 8-bit integer value used to initialize bits [47:40] of the result.
+/// \param __b4
+///    An 8-bit integer value used to initialize bits [39:32] of the result.
+/// \param __b3
+///    An 8-bit integer value used to initialize bits [31:24] of the result.
+/// \param __b2
+///    An 8-bit integer value used to initialize bits [23:16] of the result.
+/// \param __b1
+///    An 8-bit integer value used to initialize bits [15:8] of the result.
+/// \param __b0
+///    An 8-bit integer value used to initialize bits [7:0] of the result.
+/// \returns An initialized 64-bit integer vector.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2,
             char __b1, char __b0)
@@ -401,36 +1350,129 @@
                                                __b4, __b5, __b6, __b7);
 }
 
+/// \brief Constructs a 64-bit integer vector of [2 x i32], with each of the
+///    32-bit integer vector elements set to the specified 32-bit integer
+///    value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSHUFD / PSHUFD instruction.
+///
+/// \param __i
+///    A 32-bit integer value used to initialize each vector element of the
+///    result.
+/// \returns An initialized 64-bit integer vector of [2 x i32].
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_set1_pi32(int __i)
 {
     return _mm_set_pi32(__i, __i);
 }
 
+/// \brief Constructs a 64-bit integer vector of [4 x i16], with each of the
+///    16-bit integer vector elements set to the specified 16-bit integer
+///    value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSHUFLW / PSHUFLW instruction.
+///
+/// \param __w
+///    A 16-bit integer value used to initialize each vector element of the
+///    result.
+/// \returns An initialized 64-bit integer vector of [4 x i16].
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_set1_pi16(short __w)
 {
     return _mm_set_pi16(__w, __w, __w, __w);
 }
 
+/// \brief Constructs a 64-bit integer vector of [8 x i8], with each of the
+///    8-bit integer vector elements set to the specified 8-bit integer value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKLBW + VPSHUFLW / \c PUNPCKLBW +
+///    PSHUFLW instruction.
+///
+/// \param __b
+///    An 8-bit integer value used to initialize each vector element of the
+///    result.
+/// \returns An initialized 64-bit integer vector of [8 x i8].
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_set1_pi8(char __b)
 {
     return _mm_set_pi8(__b, __b, __b, __b, __b, __b, __b, __b);
 }
 
+/// \brief Constructs a 64-bit integer vector, initialized in reverse order with
+///    the specified 32-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __i0
+///    A 32-bit integer value used to initialize the lower 32 bits of the
+///    result.
+/// \param __i1
+///    A 32-bit integer value used to initialize the upper 32 bits of the
+///    result.
+/// \returns An initialized 64-bit integer vector.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_setr_pi32(int __i0, int __i1)
 {
     return _mm_set_pi32(__i1, __i0);
 }
 
+/// \brief Constructs a 64-bit integer vector, initialized in reverse order with
+///    the specified 16-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __w0
+///    A 16-bit integer value used to initialize bits [15:0] of the result.
+/// \param __w1
+///    A 16-bit integer value used to initialize bits [31:16] of the result.
+/// \param __w2
+///    A 16-bit integer value used to initialize bits [47:32] of the result.
+/// \param __w3
+///    A 16-bit integer value used to initialize bits [63:48] of the result.
+/// \returns An initialized 64-bit integer vector.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_setr_pi16(short __w0, short __w1, short __w2, short __w3)
 {
     return _mm_set_pi16(__w3, __w2, __w1, __w0);
 }
 
+/// \brief Constructs a 64-bit integer vector, initialized in reverse order with
+///    the specified 8-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __b0
+///    An 8-bit integer value used to initialize bits [7:0] of the result.
+/// \param __b1
+///    An 8-bit integer value used to initialize bits [15:8] of the result.
+/// \param __b2
+///    An 8-bit integer value used to initialize bits [23:16] of the result.
+/// \param __b3
+///    An 8-bit integer value used to initialize bits [31:24] of the result.
+/// \param __b4
+///    An 8-bit integer value used to initialize bits [39:32] of the result.
+/// \param __b5
+///    An 8-bit integer value used to initialize bits [47:40] of the result.
+/// \param __b6
+///    An 8-bit integer value used to initialize bits [55:48] of the result.
+/// \param __b7
+///    An 8-bit integer value used to initialize bits [63:56] of the result.
+/// \returns An initialized 64-bit integer vector.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,
              char __b6, char __b7)
diff --git a/renderscript/clang-include/mwaitxintrin.h b/renderscript/clang-include/mwaitxintrin.h
new file mode 100644
index 0000000..635f2ac
--- /dev/null
+++ b/renderscript/clang-include/mwaitxintrin.h
@@ -0,0 +1,47 @@
+/*===---- mwaitxintrin.h - MONITORX/MWAITX intrinsics ----------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <mwaitxintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef _MWAITXINTRIN_H
+#define _MWAITXINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__,  __target__("mwaitx")))
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_monitorx(void const * __p, unsigned __extensions, unsigned __hints)
+{
+  __builtin_ia32_monitorx((void *)__p, __extensions, __hints);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mwaitx(unsigned __extensions, unsigned __hints, unsigned __clock)
+{
+  __builtin_ia32_mwaitx(__extensions, __hints, __clock);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* _MWAITXINTRIN_H */
diff --git a/renderscript/clang-include/opencl-c.h b/renderscript/clang-include/opencl-c.h
new file mode 100644
index 0000000..8029274
--- /dev/null
+++ b/renderscript/clang-include/opencl-c.h
@@ -0,0 +1,16962 @@
+//===--- opencl-c.h - OpenCL C language builtin function header -----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _OPENCL_H_
+#define _OPENCL_H_
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#ifndef cl_khr_depth_images
+#define cl_khr_depth_images
+#endif //cl_khr_depth_images
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+#define __ovld __attribute__((overloadable))
+
+// Optimizations
+#define __purefn __attribute__((pure))
+#define __cnfn __attribute__((const))
+
+// built-in scalar data types:
+
+/**
+ * An unsigned 8-bit integer.
+ */
+typedef unsigned char uchar;
+
+/**
+ * An unsigned 16-bit integer.
+ */
+typedef unsigned short ushort;
+
+/**
+ * An unsigned 32-bit integer.
+ */
+typedef unsigned int uint;
+
+/**
+ * An unsigned 64-bit integer.
+ */
+typedef unsigned long ulong;
+
+/**
+ * The unsigned integer type of the result of the sizeof operator. This
+ * is a 32-bit unsigned integer if CL_DEVICE_ADDRESS_BITS
+ * defined in table 4.3 is 32-bits and is a 64-bit unsigned integer if
+ * CL_DEVICE_ADDRESS_BITS is 64-bits.
+ */
+typedef __SIZE_TYPE__ size_t;
+
+/**
+ * A signed integer type that is the result of subtracting two pointers.
+ * This is a 32-bit signed integer if CL_DEVICE_ADDRESS_BITS
+ * defined in table 4.3 is 32-bits and is a 64-bit signed integer if
+ * CL_DEVICE_ADDRESS_BITS is 64-bits.
+ */
+typedef __PTRDIFF_TYPE__ ptrdiff_t;
+
+/**
+* A signed integer type with the property that any valid pointer to
+* void can be converted to this type, then converted back to pointer
+* to void, and the result will compare equal to the original pointer.
+*/
+typedef __INTPTR_TYPE__ intptr_t;
+
+/**
+* An unsigned integer type with the property that any valid pointer to
+* void can be converted to this type, then converted back to pointer
+* to void, and the result will compare equal to the original pointer.
+*/
+typedef __UINTPTR_TYPE__ uintptr_t;
+
+// built-in vector data types:
+typedef char char2 __attribute__((ext_vector_type(2)));
+typedef char char3 __attribute__((ext_vector_type(3)));
+typedef char char4 __attribute__((ext_vector_type(4)));
+typedef char char8 __attribute__((ext_vector_type(8)));
+typedef char char16 __attribute__((ext_vector_type(16)));
+typedef uchar uchar2 __attribute__((ext_vector_type(2)));
+typedef uchar uchar3 __attribute__((ext_vector_type(3)));
+typedef uchar uchar4 __attribute__((ext_vector_type(4)));
+typedef uchar uchar8 __attribute__((ext_vector_type(8)));
+typedef uchar uchar16 __attribute__((ext_vector_type(16)));
+typedef short short2 __attribute__((ext_vector_type(2)));
+typedef short short3 __attribute__((ext_vector_type(3)));
+typedef short short4 __attribute__((ext_vector_type(4)));
+typedef short short8 __attribute__((ext_vector_type(8)));
+typedef short short16 __attribute__((ext_vector_type(16)));
+typedef ushort ushort2 __attribute__((ext_vector_type(2)));
+typedef ushort ushort3 __attribute__((ext_vector_type(3)));
+typedef ushort ushort4 __attribute__((ext_vector_type(4)));
+typedef ushort ushort8 __attribute__((ext_vector_type(8)));
+typedef ushort ushort16 __attribute__((ext_vector_type(16)));
+typedef int int2 __attribute__((ext_vector_type(2)));
+typedef int int3 __attribute__((ext_vector_type(3)));
+typedef int int4 __attribute__((ext_vector_type(4)));
+typedef int int8 __attribute__((ext_vector_type(8)));
+typedef int int16 __attribute__((ext_vector_type(16)));
+typedef uint uint2 __attribute__((ext_vector_type(2)));
+typedef uint uint3 __attribute__((ext_vector_type(3)));
+typedef uint uint4 __attribute__((ext_vector_type(4)));
+typedef uint uint8 __attribute__((ext_vector_type(8)));
+typedef uint uint16 __attribute__((ext_vector_type(16)));
+typedef long long2 __attribute__((ext_vector_type(2)));
+typedef long long3 __attribute__((ext_vector_type(3)));
+typedef long long4 __attribute__((ext_vector_type(4)));
+typedef long long8 __attribute__((ext_vector_type(8)));
+typedef long long16 __attribute__((ext_vector_type(16)));
+typedef ulong ulong2 __attribute__((ext_vector_type(2)));
+typedef ulong ulong3 __attribute__((ext_vector_type(3)));
+typedef ulong ulong4 __attribute__((ext_vector_type(4)));
+typedef ulong ulong8 __attribute__((ext_vector_type(8)));
+typedef ulong ulong16 __attribute__((ext_vector_type(16)));
+typedef float float2 __attribute__((ext_vector_type(2)));
+typedef float float3 __attribute__((ext_vector_type(3)));
+typedef float float4 __attribute__((ext_vector_type(4)));
+typedef float float8 __attribute__((ext_vector_type(8)));
+typedef float float16 __attribute__((ext_vector_type(16)));
+#ifdef cl_khr_fp16
+#pragma OPENCL EXTENSION cl_khr_fp16 : enable
+typedef half half2 __attribute__((ext_vector_type(2)));
+typedef half half3 __attribute__((ext_vector_type(3)));
+typedef half half4 __attribute__((ext_vector_type(4)));
+typedef half half8 __attribute__((ext_vector_type(8)));
+typedef half half16 __attribute__((ext_vector_type(16)));
+#endif
+#ifdef cl_khr_fp64
+#if __OPENCL_C_VERSION__ < CL_VERSION_1_2
+#pragma OPENCL EXTENSION cl_khr_fp64 : enable
+#endif
+typedef double double2 __attribute__((ext_vector_type(2)));
+typedef double double3 __attribute__((ext_vector_type(3)));
+typedef double double4 __attribute__((ext_vector_type(4)));
+typedef double double8 __attribute__((ext_vector_type(8)));
+typedef double double16 __attribute__((ext_vector_type(16)));
+#endif
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#define NULL ((void*)0)
+#endif
+
+/**
+ * Value of maximum non-infinite single-precision floating-point
+ * number.
+ */
+#define MAXFLOAT 0x1.fffffep127f
+
+/**
+ * A positive float constant expression. HUGE_VALF evaluates
+ * to +infinity. Used as an error value returned by the built-in
+ * math functions.
+ */
+#define HUGE_VALF (__builtin_huge_valf())
+
+/**
+ * A positive double constant expression. HUGE_VAL evaluates
+ * to +infinity. Used as an error value returned by the built-in
+ * math functions.
+ */
+#define HUGE_VAL (__builtin_huge_val())
+
+/**
+ * A constant expression of type float representing positive or
+ * unsigned infinity.
+ */
+#define INFINITY (__builtin_inff())
+
+/**
+ * A constant expression of type float representing a quiet NaN.
+ */
+#define NAN as_float(INT_MAX)
+
+#define FP_ILOGB0    INT_MIN
+#define FP_ILOGBNAN    INT_MAX
+
+#define FLT_DIG 6
+#define FLT_MANT_DIG 24
+#define FLT_MAX_10_EXP +38
+#define FLT_MAX_EXP +128
+#define FLT_MIN_10_EXP -37
+#define FLT_MIN_EXP -125
+#define FLT_RADIX 2
+#define FLT_MAX 0x1.fffffep127f
+#define FLT_MIN 0x1.0p-126f
+#define FLT_EPSILON 0x1.0p-23f
+
+#define M_E_F         2.71828182845904523536028747135266250f
+#define M_LOG2E_F     1.44269504088896340735992468100189214f
+#define M_LOG10E_F    0.434294481903251827651128918916605082f
+#define M_LN2_F       0.693147180559945309417232121458176568f
+#define M_LN10_F      2.30258509299404568401799145468436421f
+#define M_PI_F        3.14159265358979323846264338327950288f
+#define M_PI_2_F      1.57079632679489661923132169163975144f
+#define M_PI_4_F      0.785398163397448309615660845819875721f
+#define M_1_PI_F      0.318309886183790671537767526745028724f
+#define M_2_PI_F      0.636619772367581343075535053490057448f
+#define M_2_SQRTPI_F  1.12837916709551257389615890312154517f
+#define M_SQRT2_F     1.41421356237309504880168872420969808f
+#define M_SQRT1_2_F   0.707106781186547524400844362104849039f
+
+#define DBL_DIG 15
+#define DBL_MANT_DIG 53
+#define DBL_MAX_10_EXP +308
+#define DBL_MAX_EXP +1024
+#define DBL_MIN_10_EXP -307
+#define DBL_MIN_EXP -1021
+#define DBL_RADIX 2
+#define DBL_MAX 0x1.fffffffffffffp1023
+#define DBL_MIN 0x1.0p-1022
+#define DBL_EPSILON 0x1.0p-52
+
+#define M_E           0x1.5bf0a8b145769p+1
+#define M_LOG2E       0x1.71547652b82fep+0
+#define M_LOG10E      0x1.bcb7b1526e50ep-2
+#define M_LN2         0x1.62e42fefa39efp-1
+#define M_LN10        0x1.26bb1bbb55516p+1
+#define M_PI          0x1.921fb54442d18p+1
+#define M_PI_2        0x1.921fb54442d18p+0
+#define M_PI_4        0x1.921fb54442d18p-1
+#define M_1_PI        0x1.45f306dc9c883p-2
+#define M_2_PI        0x1.45f306dc9c883p-1
+#define M_2_SQRTPI    0x1.20dd750429b6dp+0
+#define M_SQRT2       0x1.6a09e667f3bcdp+0
+#define M_SQRT1_2     0x1.6a09e667f3bcdp-1
+
+#ifdef cl_khr_fp16
+
+#define HALF_DIG 3
+#define HALF_MANT_DIG 11
+#define HALF_MAX_10_EXP +4
+#define HALF_MAX_EXP +16
+#define HALF_MIN_10_EXP -4
+#define HALF_MIN_EXP -13
+#define HALF_RADIX 2
+#define HALF_MAX ((0x1.ffcp15h))
+#define HALF_MIN ((0x1.0p-14h))
+#define HALF_EPSILON ((0x1.0p-10h))
+
+#define M_E_H         2.71828182845904523536028747135266250h
+#define M_LOG2E_H     1.44269504088896340735992468100189214h
+#define M_LOG10E_H    0.434294481903251827651128918916605082h
+#define M_LN2_H       0.693147180559945309417232121458176568h
+#define M_LN10_H      2.30258509299404568401799145468436421h
+#define M_PI_H        3.14159265358979323846264338327950288h
+#define M_PI_2_H      1.57079632679489661923132169163975144h
+#define M_PI_4_H      0.785398163397448309615660845819875721h
+#define M_1_PI_H      0.318309886183790671537767526745028724h
+#define M_2_PI_H      0.636619772367581343075535053490057448h
+#define M_2_SQRTPI_H  1.12837916709551257389615890312154517h
+#define M_SQRT2_H     1.41421356237309504880168872420969808h
+#define M_SQRT1_2_H   0.707106781186547524400844362104849039h
+
+#endif //cl_khr_fp16
+
+#define CHAR_BIT    8
+#define SCHAR_MAX  127
+#define SCHAR_MIN  (-128)
+#define UCHAR_MAX  255
+#define CHAR_MAX  SCHAR_MAX
+#define CHAR_MIN  SCHAR_MIN
+#define USHRT_MAX  65535
+#define SHRT_MAX  32767
+#define SHRT_MIN  (-32768)
+#define UINT_MAX  0xffffffff
+#define INT_MAX    2147483647
+#define INT_MIN    (-2147483647-1)
+#define ULONG_MAX  0xffffffffffffffffUL
+#define LONG_MAX  0x7fffffffffffffffL
+#define LONG_MIN  (-0x7fffffffffffffffL-1)
+
+// OpenCL v1.1/1.2/2.0 s6.2.3 - Explicit conversions
+
+char __ovld __cnfn convert_char_rte(char);
+char __ovld __cnfn convert_char_sat_rte(char);
+char __ovld __cnfn convert_char_rtz(char);
+char __ovld __cnfn convert_char_sat_rtz(char);
+char __ovld __cnfn convert_char_rtp(char);
+char __ovld __cnfn convert_char_sat_rtp(char);
+char __ovld __cnfn convert_char_rtn(char);
+char __ovld __cnfn convert_char_sat_rtn(char);
+char __ovld __cnfn convert_char(char);
+char __ovld __cnfn convert_char_sat(char);
+char __ovld __cnfn convert_char_rte(uchar);
+char __ovld __cnfn convert_char_sat_rte(uchar);
+char __ovld __cnfn convert_char_rtz(uchar);
+char __ovld __cnfn convert_char_sat_rtz(uchar);
+char __ovld __cnfn convert_char_rtp(uchar);
+char __ovld __cnfn convert_char_sat_rtp(uchar);
+char __ovld __cnfn convert_char_rtn(uchar);
+char __ovld __cnfn convert_char_sat_rtn(uchar);
+char __ovld __cnfn convert_char(uchar);
+char __ovld __cnfn convert_char_sat(uchar);
+char __ovld __cnfn convert_char_rte(short);
+char __ovld __cnfn convert_char_sat_rte(short);
+char __ovld __cnfn convert_char_rtz(short);
+char __ovld __cnfn convert_char_sat_rtz(short);
+char __ovld __cnfn convert_char_rtp(short);
+char __ovld __cnfn convert_char_sat_rtp(short);
+char __ovld __cnfn convert_char_rtn(short);
+char __ovld __cnfn convert_char_sat_rtn(short);
+char __ovld __cnfn convert_char(short);
+char __ovld __cnfn convert_char_sat(short);
+char __ovld __cnfn convert_char_rte(ushort);
+char __ovld __cnfn convert_char_sat_rte(ushort);
+char __ovld __cnfn convert_char_rtz(ushort);
+char __ovld __cnfn convert_char_sat_rtz(ushort);
+char __ovld __cnfn convert_char_rtp(ushort);
+char __ovld __cnfn convert_char_sat_rtp(ushort);
+char __ovld __cnfn convert_char_rtn(ushort);
+char __ovld __cnfn convert_char_sat_rtn(ushort);
+char __ovld __cnfn convert_char(ushort);
+char __ovld __cnfn convert_char_sat(ushort);
+char __ovld __cnfn convert_char_rte(int);
+char __ovld __cnfn convert_char_sat_rte(int);
+char __ovld __cnfn convert_char_rtz(int);
+char __ovld __cnfn convert_char_sat_rtz(int);
+char __ovld __cnfn convert_char_rtp(int);
+char __ovld __cnfn convert_char_sat_rtp(int);
+char __ovld __cnfn convert_char_rtn(int);
+char __ovld __cnfn convert_char_sat_rtn(int);
+char __ovld __cnfn convert_char(int);
+char __ovld __cnfn convert_char_sat(int);
+char __ovld __cnfn convert_char_rte(uint);
+char __ovld __cnfn convert_char_sat_rte(uint);
+char __ovld __cnfn convert_char_rtz(uint);
+char __ovld __cnfn convert_char_sat_rtz(uint);
+char __ovld __cnfn convert_char_rtp(uint);
+char __ovld __cnfn convert_char_sat_rtp(uint);
+char __ovld __cnfn convert_char_rtn(uint);
+char __ovld __cnfn convert_char_sat_rtn(uint);
+char __ovld __cnfn convert_char(uint);
+char __ovld __cnfn convert_char_sat(uint);
+char __ovld __cnfn convert_char_rte(long);
+char __ovld __cnfn convert_char_sat_rte(long);
+char __ovld __cnfn convert_char_rtz(long);
+char __ovld __cnfn convert_char_sat_rtz(long);
+char __ovld __cnfn convert_char_rtp(long);
+char __ovld __cnfn convert_char_sat_rtp(long);
+char __ovld __cnfn convert_char_rtn(long);
+char __ovld __cnfn convert_char_sat_rtn(long);
+char __ovld __cnfn convert_char(long);
+char __ovld __cnfn convert_char_sat(long);
+char __ovld __cnfn convert_char_rte(ulong);
+char __ovld __cnfn convert_char_sat_rte(ulong);
+char __ovld __cnfn convert_char_rtz(ulong);
+char __ovld __cnfn convert_char_sat_rtz(ulong);
+char __ovld __cnfn convert_char_rtp(ulong);
+char __ovld __cnfn convert_char_sat_rtp(ulong);
+char __ovld __cnfn convert_char_rtn(ulong);
+char __ovld __cnfn convert_char_sat_rtn(ulong);
+char __ovld __cnfn convert_char(ulong);
+char __ovld __cnfn convert_char_sat(ulong);
+char __ovld __cnfn convert_char_rte(float);
+char __ovld __cnfn convert_char_sat_rte(float);
+char __ovld __cnfn convert_char_rtz(float);
+char __ovld __cnfn convert_char_sat_rtz(float);
+char __ovld __cnfn convert_char_rtp(float);
+char __ovld __cnfn convert_char_sat_rtp(float);
+char __ovld __cnfn convert_char_rtn(float);
+char __ovld __cnfn convert_char_sat_rtn(float);
+char __ovld __cnfn convert_char(float);
+char __ovld __cnfn convert_char_sat(float);
+uchar __ovld __cnfn convert_uchar_rte(char);
+uchar __ovld __cnfn convert_uchar_sat_rte(char);
+uchar __ovld __cnfn convert_uchar_rtz(char);
+uchar __ovld __cnfn convert_uchar_sat_rtz(char);
+uchar __ovld __cnfn convert_uchar_rtp(char);
+uchar __ovld __cnfn convert_uchar_sat_rtp(char);
+uchar __ovld __cnfn convert_uchar_rtn(char);
+uchar __ovld __cnfn convert_uchar_sat_rtn(char);
+uchar __ovld __cnfn convert_uchar(char);
+uchar __ovld __cnfn convert_uchar_sat(char);
+uchar __ovld __cnfn convert_uchar_rte(uchar);
+uchar __ovld __cnfn convert_uchar_sat_rte(uchar);
+uchar __ovld __cnfn convert_uchar_rtz(uchar);
+uchar __ovld __cnfn convert_uchar_sat_rtz(uchar);
+uchar __ovld __cnfn convert_uchar_rtp(uchar);
+uchar __ovld __cnfn convert_uchar_sat_rtp(uchar);
+uchar __ovld __cnfn convert_uchar_rtn(uchar);
+uchar __ovld __cnfn convert_uchar_sat_rtn(uchar);
+uchar __ovld __cnfn convert_uchar(uchar);
+uchar __ovld __cnfn convert_uchar_sat(uchar);
+uchar __ovld __cnfn convert_uchar_rte(short);
+uchar __ovld __cnfn convert_uchar_sat_rte(short);
+uchar __ovld __cnfn convert_uchar_rtz(short);
+uchar __ovld __cnfn convert_uchar_sat_rtz(short);
+uchar __ovld __cnfn convert_uchar_rtp(short);
+uchar __ovld __cnfn convert_uchar_sat_rtp(short);
+uchar __ovld __cnfn convert_uchar_rtn(short);
+uchar __ovld __cnfn convert_uchar_sat_rtn(short);
+uchar __ovld __cnfn convert_uchar(short);
+uchar __ovld __cnfn convert_uchar_sat(short);
+uchar __ovld __cnfn convert_uchar_rte(ushort);
+uchar __ovld __cnfn convert_uchar_sat_rte(ushort);
+uchar __ovld __cnfn convert_uchar_rtz(ushort);
+uchar __ovld __cnfn convert_uchar_sat_rtz(ushort);
+uchar __ovld __cnfn convert_uchar_rtp(ushort);
+uchar __ovld __cnfn convert_uchar_sat_rtp(ushort);
+uchar __ovld __cnfn convert_uchar_rtn(ushort);
+uchar __ovld __cnfn convert_uchar_sat_rtn(ushort);
+uchar __ovld __cnfn convert_uchar(ushort);
+uchar __ovld __cnfn convert_uchar_sat(ushort);
+uchar __ovld __cnfn convert_uchar_rte(int);
+uchar __ovld __cnfn convert_uchar_sat_rte(int);
+uchar __ovld __cnfn convert_uchar_rtz(int);
+uchar __ovld __cnfn convert_uchar_sat_rtz(int);
+uchar __ovld __cnfn convert_uchar_rtp(int);
+uchar __ovld __cnfn convert_uchar_sat_rtp(int);
+uchar __ovld __cnfn convert_uchar_rtn(int);
+uchar __ovld __cnfn convert_uchar_sat_rtn(int);
+uchar __ovld __cnfn convert_uchar(int);
+uchar __ovld __cnfn convert_uchar_sat(int);
+uchar __ovld __cnfn convert_uchar_rte(uint);
+uchar __ovld __cnfn convert_uchar_sat_rte(uint);
+uchar __ovld __cnfn convert_uchar_rtz(uint);
+uchar __ovld __cnfn convert_uchar_sat_rtz(uint);
+uchar __ovld __cnfn convert_uchar_rtp(uint);
+uchar __ovld __cnfn convert_uchar_sat_rtp(uint);
+uchar __ovld __cnfn convert_uchar_rtn(uint);
+uchar __ovld __cnfn convert_uchar_sat_rtn(uint);
+uchar __ovld __cnfn convert_uchar(uint);
+uchar __ovld __cnfn convert_uchar_sat(uint);
+uchar __ovld __cnfn convert_uchar_rte(long);
+uchar __ovld __cnfn convert_uchar_sat_rte(long);
+uchar __ovld __cnfn convert_uchar_rtz(long);
+uchar __ovld __cnfn convert_uchar_sat_rtz(long);
+uchar __ovld __cnfn convert_uchar_rtp(long);
+uchar __ovld __cnfn convert_uchar_sat_rtp(long);
+uchar __ovld __cnfn convert_uchar_rtn(long);
+uchar __ovld __cnfn convert_uchar_sat_rtn(long);
+uchar __ovld __cnfn convert_uchar(long);
+uchar __ovld __cnfn convert_uchar_sat(long);
+uchar __ovld __cnfn convert_uchar_rte(ulong);
+uchar __ovld __cnfn convert_uchar_sat_rte(ulong);
+uchar __ovld __cnfn convert_uchar_rtz(ulong);
+uchar __ovld __cnfn convert_uchar_sat_rtz(ulong);
+uchar __ovld __cnfn convert_uchar_rtp(ulong);
+uchar __ovld __cnfn convert_uchar_sat_rtp(ulong);
+uchar __ovld __cnfn convert_uchar_rtn(ulong);
+uchar __ovld __cnfn convert_uchar_sat_rtn(ulong);
+uchar __ovld __cnfn convert_uchar(ulong);
+uchar __ovld __cnfn convert_uchar_sat(ulong);
+uchar __ovld __cnfn convert_uchar_rte(float);
+uchar __ovld __cnfn convert_uchar_sat_rte(float);
+uchar __ovld __cnfn convert_uchar_rtz(float);
+uchar __ovld __cnfn convert_uchar_sat_rtz(float);
+uchar __ovld __cnfn convert_uchar_rtp(float);
+uchar __ovld __cnfn convert_uchar_sat_rtp(float);
+uchar __ovld __cnfn convert_uchar_rtn(float);
+uchar __ovld __cnfn convert_uchar_sat_rtn(float);
+uchar __ovld __cnfn convert_uchar(float);
+uchar __ovld __cnfn convert_uchar_sat(float);
+
+short __ovld __cnfn convert_short_rte(char);
+short __ovld __cnfn convert_short_sat_rte(char);
+short __ovld __cnfn convert_short_rtz(char);
+short __ovld __cnfn convert_short_sat_rtz(char);
+short __ovld __cnfn convert_short_rtp(char);
+short __ovld __cnfn convert_short_sat_rtp(char);
+short __ovld __cnfn convert_short_rtn(char);
+short __ovld __cnfn convert_short_sat_rtn(char);
+short __ovld __cnfn convert_short(char);
+short __ovld __cnfn convert_short_sat(char);
+short __ovld __cnfn convert_short_rte(uchar);
+short __ovld __cnfn convert_short_sat_rte(uchar);
+short __ovld __cnfn convert_short_rtz(uchar);
+short __ovld __cnfn convert_short_sat_rtz(uchar);
+short __ovld __cnfn convert_short_rtp(uchar);
+short __ovld __cnfn convert_short_sat_rtp(uchar);
+short __ovld __cnfn convert_short_rtn(uchar);
+short __ovld __cnfn convert_short_sat_rtn(uchar);
+short __ovld __cnfn convert_short(uchar);
+short __ovld __cnfn convert_short_sat(uchar);
+short __ovld __cnfn convert_short_rte(short);
+short __ovld __cnfn convert_short_sat_rte(short);
+short __ovld __cnfn convert_short_rtz(short);
+short __ovld __cnfn convert_short_sat_rtz(short);
+short __ovld __cnfn convert_short_rtp(short);
+short __ovld __cnfn convert_short_sat_rtp(short);
+short __ovld __cnfn convert_short_rtn(short);
+short __ovld __cnfn convert_short_sat_rtn(short);
+short __ovld __cnfn convert_short(short);
+short __ovld __cnfn convert_short_sat(short);
+short __ovld __cnfn convert_short_rte(ushort);
+short __ovld __cnfn convert_short_sat_rte(ushort);
+short __ovld __cnfn convert_short_rtz(ushort);
+short __ovld __cnfn convert_short_sat_rtz(ushort);
+short __ovld __cnfn convert_short_rtp(ushort);
+short __ovld __cnfn convert_short_sat_rtp(ushort);
+short __ovld __cnfn convert_short_rtn(ushort);
+short __ovld __cnfn convert_short_sat_rtn(ushort);
+short __ovld __cnfn convert_short(ushort);
+short __ovld __cnfn convert_short_sat(ushort);
+short __ovld __cnfn convert_short_rte(int);
+short __ovld __cnfn convert_short_sat_rte(int);
+short __ovld __cnfn convert_short_rtz(int);
+short __ovld __cnfn convert_short_sat_rtz(int);
+short __ovld __cnfn convert_short_rtp(int);
+short __ovld __cnfn convert_short_sat_rtp(int);
+short __ovld __cnfn convert_short_rtn(int);
+short __ovld __cnfn convert_short_sat_rtn(int);
+short __ovld __cnfn convert_short(int);
+short __ovld __cnfn convert_short_sat(int);
+short __ovld __cnfn convert_short_rte(uint);
+short __ovld __cnfn convert_short_sat_rte(uint);
+short __ovld __cnfn convert_short_rtz(uint);
+short __ovld __cnfn convert_short_sat_rtz(uint);
+short __ovld __cnfn convert_short_rtp(uint);
+short __ovld __cnfn convert_short_sat_rtp(uint);
+short __ovld __cnfn convert_short_rtn(uint);
+short __ovld __cnfn convert_short_sat_rtn(uint);
+short __ovld __cnfn convert_short(uint);
+short __ovld __cnfn convert_short_sat(uint);
+short __ovld __cnfn convert_short_rte(long);
+short __ovld __cnfn convert_short_sat_rte(long);
+short __ovld __cnfn convert_short_rtz(long);
+short __ovld __cnfn convert_short_sat_rtz(long);
+short __ovld __cnfn convert_short_rtp(long);
+short __ovld __cnfn convert_short_sat_rtp(long);
+short __ovld __cnfn convert_short_rtn(long);
+short __ovld __cnfn convert_short_sat_rtn(long);
+short __ovld __cnfn convert_short(long);
+short __ovld __cnfn convert_short_sat(long);
+short __ovld __cnfn convert_short_rte(ulong);
+short __ovld __cnfn convert_short_sat_rte(ulong);
+short __ovld __cnfn convert_short_rtz(ulong);
+short __ovld __cnfn convert_short_sat_rtz(ulong);
+short __ovld __cnfn convert_short_rtp(ulong);
+short __ovld __cnfn convert_short_sat_rtp(ulong);
+short __ovld __cnfn convert_short_rtn(ulong);
+short __ovld __cnfn convert_short_sat_rtn(ulong);
+short __ovld __cnfn convert_short(ulong);
+short __ovld __cnfn convert_short_sat(ulong);
+short __ovld __cnfn convert_short_rte(float);
+short __ovld __cnfn convert_short_sat_rte(float);
+short __ovld __cnfn convert_short_rtz(float);
+short __ovld __cnfn convert_short_sat_rtz(float);
+short __ovld __cnfn convert_short_rtp(float);
+short __ovld __cnfn convert_short_sat_rtp(float);
+short __ovld __cnfn convert_short_rtn(float);
+short __ovld __cnfn convert_short_sat_rtn(float);
+short __ovld __cnfn convert_short(float);
+short __ovld __cnfn convert_short_sat(float);
+ushort __ovld __cnfn convert_ushort_rte(char);
+ushort __ovld __cnfn convert_ushort_sat_rte(char);
+ushort __ovld __cnfn convert_ushort_rtz(char);
+ushort __ovld __cnfn convert_ushort_sat_rtz(char);
+ushort __ovld __cnfn convert_ushort_rtp(char);
+ushort __ovld __cnfn convert_ushort_sat_rtp(char);
+ushort __ovld __cnfn convert_ushort_rtn(char);
+ushort __ovld __cnfn convert_ushort_sat_rtn(char);
+ushort __ovld __cnfn convert_ushort(char);
+ushort __ovld __cnfn convert_ushort_sat(char);
+ushort __ovld __cnfn convert_ushort_rte(uchar);
+ushort __ovld __cnfn convert_ushort_sat_rte(uchar);
+ushort __ovld __cnfn convert_ushort_rtz(uchar);
+ushort __ovld __cnfn convert_ushort_sat_rtz(uchar);
+ushort __ovld __cnfn convert_ushort_rtp(uchar);
+ushort __ovld __cnfn convert_ushort_sat_rtp(uchar);
+ushort __ovld __cnfn convert_ushort_rtn(uchar);
+ushort __ovld __cnfn convert_ushort_sat_rtn(uchar);
+ushort __ovld __cnfn convert_ushort(uchar);
+ushort __ovld __cnfn convert_ushort_sat(uchar);
+ushort __ovld __cnfn convert_ushort_rte(short);
+ushort __ovld __cnfn convert_ushort_sat_rte(short);
+ushort __ovld __cnfn convert_ushort_rtz(short);
+ushort __ovld __cnfn convert_ushort_sat_rtz(short);
+ushort __ovld __cnfn convert_ushort_rtp(short);
+ushort __ovld __cnfn convert_ushort_sat_rtp(short);
+ushort __ovld __cnfn convert_ushort_rtn(short);
+ushort __ovld __cnfn convert_ushort_sat_rtn(short);
+ushort __ovld __cnfn convert_ushort(short);
+ushort __ovld __cnfn convert_ushort_sat(short);
+ushort __ovld __cnfn convert_ushort_rte(ushort);
+ushort __ovld __cnfn convert_ushort_sat_rte(ushort);
+ushort __ovld __cnfn convert_ushort_rtz(ushort);
+ushort __ovld __cnfn convert_ushort_sat_rtz(ushort);
+ushort __ovld __cnfn convert_ushort_rtp(ushort);
+ushort __ovld __cnfn convert_ushort_sat_rtp(ushort);
+ushort __ovld __cnfn convert_ushort_rtn(ushort);
+ushort __ovld __cnfn convert_ushort_sat_rtn(ushort);
+ushort __ovld __cnfn convert_ushort(ushort);
+ushort __ovld __cnfn convert_ushort_sat(ushort);
+ushort __ovld __cnfn convert_ushort_rte(int);
+ushort __ovld __cnfn convert_ushort_sat_rte(int);
+ushort __ovld __cnfn convert_ushort_rtz(int);
+ushort __ovld __cnfn convert_ushort_sat_rtz(int);
+ushort __ovld __cnfn convert_ushort_rtp(int);
+ushort __ovld __cnfn convert_ushort_sat_rtp(int);
+ushort __ovld __cnfn convert_ushort_rtn(int);
+ushort __ovld __cnfn convert_ushort_sat_rtn(int);
+ushort __ovld __cnfn convert_ushort(int);
+ushort __ovld __cnfn convert_ushort_sat(int);
+ushort __ovld __cnfn convert_ushort_rte(uint);
+ushort __ovld __cnfn convert_ushort_sat_rte(uint);
+ushort __ovld __cnfn convert_ushort_rtz(uint);
+ushort __ovld __cnfn convert_ushort_sat_rtz(uint);
+ushort __ovld __cnfn convert_ushort_rtp(uint);
+ushort __ovld __cnfn convert_ushort_sat_rtp(uint);
+ushort __ovld __cnfn convert_ushort_rtn(uint);
+ushort __ovld __cnfn convert_ushort_sat_rtn(uint);
+ushort __ovld __cnfn convert_ushort(uint);
+ushort __ovld __cnfn convert_ushort_sat(uint);
+ushort __ovld __cnfn convert_ushort_rte(long);
+ushort __ovld __cnfn convert_ushort_sat_rte(long);
+ushort __ovld __cnfn convert_ushort_rtz(long);
+ushort __ovld __cnfn convert_ushort_sat_rtz(long);
+ushort __ovld __cnfn convert_ushort_rtp(long);
+ushort __ovld __cnfn convert_ushort_sat_rtp(long);
+ushort __ovld __cnfn convert_ushort_rtn(long);
+ushort __ovld __cnfn convert_ushort_sat_rtn(long);
+ushort __ovld __cnfn convert_ushort(long);
+ushort __ovld __cnfn convert_ushort_sat(long);
+ushort __ovld __cnfn convert_ushort_rte(ulong);
+ushort __ovld __cnfn convert_ushort_sat_rte(ulong);
+ushort __ovld __cnfn convert_ushort_rtz(ulong);
+ushort __ovld __cnfn convert_ushort_sat_rtz(ulong);
+ushort __ovld __cnfn convert_ushort_rtp(ulong);
+ushort __ovld __cnfn convert_ushort_sat_rtp(ulong);
+ushort __ovld __cnfn convert_ushort_rtn(ulong);
+ushort __ovld __cnfn convert_ushort_sat_rtn(ulong);
+ushort __ovld __cnfn convert_ushort(ulong);
+ushort __ovld __cnfn convert_ushort_sat(ulong);
+ushort __ovld __cnfn convert_ushort_rte(float);
+ushort __ovld __cnfn convert_ushort_sat_rte(float);
+ushort __ovld __cnfn convert_ushort_rtz(float);
+ushort __ovld __cnfn convert_ushort_sat_rtz(float);
+ushort __ovld __cnfn convert_ushort_rtp(float);
+ushort __ovld __cnfn convert_ushort_sat_rtp(float);
+ushort __ovld __cnfn convert_ushort_rtn(float);
+ushort __ovld __cnfn convert_ushort_sat_rtn(float);
+ushort __ovld __cnfn convert_ushort(float);
+ushort __ovld __cnfn convert_ushort_sat(float);
+int __ovld __cnfn convert_int_rte(char);
+int __ovld __cnfn convert_int_sat_rte(char);
+int __ovld __cnfn convert_int_rtz(char);
+int __ovld __cnfn convert_int_sat_rtz(char);
+int __ovld __cnfn convert_int_rtp(char);
+int __ovld __cnfn convert_int_sat_rtp(char);
+int __ovld __cnfn convert_int_rtn(char);
+int __ovld __cnfn convert_int_sat_rtn(char);
+int __ovld __cnfn convert_int(char);
+int __ovld __cnfn convert_int_sat(char);
+int __ovld __cnfn convert_int_rte(uchar);
+int __ovld __cnfn convert_int_sat_rte(uchar);
+int __ovld __cnfn convert_int_rtz(uchar);
+int __ovld __cnfn convert_int_sat_rtz(uchar);
+int __ovld __cnfn convert_int_rtp(uchar);
+int __ovld __cnfn convert_int_sat_rtp(uchar);
+int __ovld __cnfn convert_int_rtn(uchar);
+int __ovld __cnfn convert_int_sat_rtn(uchar);
+int __ovld __cnfn convert_int(uchar);
+int __ovld __cnfn convert_int_sat(uchar);
+int __ovld __cnfn convert_int_rte(short);
+int __ovld __cnfn convert_int_sat_rte(short);
+int __ovld __cnfn convert_int_rtz(short);
+int __ovld __cnfn convert_int_sat_rtz(short);
+int __ovld __cnfn convert_int_rtp(short);
+int __ovld __cnfn convert_int_sat_rtp(short);
+int __ovld __cnfn convert_int_rtn(short);
+int __ovld __cnfn convert_int_sat_rtn(short);
+int __ovld __cnfn convert_int(short);
+int __ovld __cnfn convert_int_sat(short);
+int __ovld __cnfn convert_int_rte(ushort);
+int __ovld __cnfn convert_int_sat_rte(ushort);
+int __ovld __cnfn convert_int_rtz(ushort);
+int __ovld __cnfn convert_int_sat_rtz(ushort);
+int __ovld __cnfn convert_int_rtp(ushort);
+int __ovld __cnfn convert_int_sat_rtp(ushort);
+int __ovld __cnfn convert_int_rtn(ushort);
+int __ovld __cnfn convert_int_sat_rtn(ushort);
+int __ovld __cnfn convert_int(ushort);
+int __ovld __cnfn convert_int_sat(ushort);
+int __ovld __cnfn convert_int_rte(int);
+int __ovld __cnfn convert_int_sat_rte(int);
+int __ovld __cnfn convert_int_rtz(int);
+int __ovld __cnfn convert_int_sat_rtz(int);
+int __ovld __cnfn convert_int_rtp(int);
+int __ovld __cnfn convert_int_sat_rtp(int);
+int __ovld __cnfn convert_int_rtn(int);
+int __ovld __cnfn convert_int_sat_rtn(int);
+int __ovld __cnfn convert_int(int);
+int __ovld __cnfn convert_int_sat(int);
+int __ovld __cnfn convert_int_rte(uint);
+int __ovld __cnfn convert_int_sat_rte(uint);
+int __ovld __cnfn convert_int_rtz(uint);
+int __ovld __cnfn convert_int_sat_rtz(uint);
+int __ovld __cnfn convert_int_rtp(uint);
+int __ovld __cnfn convert_int_sat_rtp(uint);
+int __ovld __cnfn convert_int_rtn(uint);
+int __ovld __cnfn convert_int_sat_rtn(uint);
+int __ovld __cnfn convert_int(uint);
+int __ovld __cnfn convert_int_sat(uint);
+int __ovld __cnfn convert_int_rte(long);
+int __ovld __cnfn convert_int_sat_rte(long);
+int __ovld __cnfn convert_int_rtz(long);
+int __ovld __cnfn convert_int_sat_rtz(long);
+int __ovld __cnfn convert_int_rtp(long);
+int __ovld __cnfn convert_int_sat_rtp(long);
+int __ovld __cnfn convert_int_rtn(long);
+int __ovld __cnfn convert_int_sat_rtn(long);
+int __ovld __cnfn convert_int(long);
+int __ovld __cnfn convert_int_sat(long);
+int __ovld __cnfn convert_int_rte(ulong);
+int __ovld __cnfn convert_int_sat_rte(ulong);
+int __ovld __cnfn convert_int_rtz(ulong);
+int __ovld __cnfn convert_int_sat_rtz(ulong);
+int __ovld __cnfn convert_int_rtp(ulong);
+int __ovld __cnfn convert_int_sat_rtp(ulong);
+int __ovld __cnfn convert_int_rtn(ulong);
+int __ovld __cnfn convert_int_sat_rtn(ulong);
+int __ovld __cnfn convert_int(ulong);
+int __ovld __cnfn convert_int_sat(ulong);
+int __ovld __cnfn convert_int_rte(float);
+int __ovld __cnfn convert_int_sat_rte(float);
+int __ovld __cnfn convert_int_rtz(float);
+int __ovld __cnfn convert_int_sat_rtz(float);
+int __ovld __cnfn convert_int_rtp(float);
+int __ovld __cnfn convert_int_sat_rtp(float);
+int __ovld __cnfn convert_int_rtn(float);
+int __ovld __cnfn convert_int_sat_rtn(float);
+int __ovld __cnfn convert_int(float);
+int __ovld __cnfn convert_int_sat(float);
+uint __ovld __cnfn convert_uint_rte(char);
+uint __ovld __cnfn convert_uint_sat_rte(char);
+uint __ovld __cnfn convert_uint_rtz(char);
+uint __ovld __cnfn convert_uint_sat_rtz(char);
+uint __ovld __cnfn convert_uint_rtp(char);
+uint __ovld __cnfn convert_uint_sat_rtp(char);
+uint __ovld __cnfn convert_uint_rtn(char);
+uint __ovld __cnfn convert_uint_sat_rtn(char);
+uint __ovld __cnfn convert_uint(char);
+uint __ovld __cnfn convert_uint_sat(char);
+uint __ovld __cnfn convert_uint_rte(uchar);
+uint __ovld __cnfn convert_uint_sat_rte(uchar);
+uint __ovld __cnfn convert_uint_rtz(uchar);
+uint __ovld __cnfn convert_uint_sat_rtz(uchar);
+uint __ovld __cnfn convert_uint_rtp(uchar);
+uint __ovld __cnfn convert_uint_sat_rtp(uchar);
+uint __ovld __cnfn convert_uint_rtn(uchar);
+uint __ovld __cnfn convert_uint_sat_rtn(uchar);
+uint __ovld __cnfn convert_uint(uchar);
+uint __ovld __cnfn convert_uint_sat(uchar);
+uint __ovld __cnfn convert_uint_rte(short);
+uint __ovld __cnfn convert_uint_sat_rte(short);
+uint __ovld __cnfn convert_uint_rtz(short);
+uint __ovld __cnfn convert_uint_sat_rtz(short);
+uint __ovld __cnfn convert_uint_rtp(short);
+uint __ovld __cnfn convert_uint_sat_rtp(short);
+uint __ovld __cnfn convert_uint_rtn(short);
+uint __ovld __cnfn convert_uint_sat_rtn(short);
+uint __ovld __cnfn convert_uint(short);
+uint __ovld __cnfn convert_uint_sat(short);
+uint __ovld __cnfn convert_uint_rte(ushort);
+uint __ovld __cnfn convert_uint_sat_rte(ushort);
+uint __ovld __cnfn convert_uint_rtz(ushort);
+uint __ovld __cnfn convert_uint_sat_rtz(ushort);
+uint __ovld __cnfn convert_uint_rtp(ushort);
+uint __ovld __cnfn convert_uint_sat_rtp(ushort);
+uint __ovld __cnfn convert_uint_rtn(ushort);
+uint __ovld __cnfn convert_uint_sat_rtn(ushort);
+uint __ovld __cnfn convert_uint(ushort);
+uint __ovld __cnfn convert_uint_sat(ushort);
+uint __ovld __cnfn convert_uint_rte(int);
+uint __ovld __cnfn convert_uint_sat_rte(int);
+uint __ovld __cnfn convert_uint_rtz(int);
+uint __ovld __cnfn convert_uint_sat_rtz(int);
+uint __ovld __cnfn convert_uint_rtp(int);
+uint __ovld __cnfn convert_uint_sat_rtp(int);
+uint __ovld __cnfn convert_uint_rtn(int);
+uint __ovld __cnfn convert_uint_sat_rtn(int);
+uint __ovld __cnfn convert_uint(int);
+uint __ovld __cnfn convert_uint_sat(int);
+uint __ovld __cnfn convert_uint_rte(uint);
+uint __ovld __cnfn convert_uint_sat_rte(uint);
+uint __ovld __cnfn convert_uint_rtz(uint);
+uint __ovld __cnfn convert_uint_sat_rtz(uint);
+uint __ovld __cnfn convert_uint_rtp(uint);
+uint __ovld __cnfn convert_uint_sat_rtp(uint);
+uint __ovld __cnfn convert_uint_rtn(uint);
+uint __ovld __cnfn convert_uint_sat_rtn(uint);
+uint __ovld __cnfn convert_uint(uint);
+uint __ovld __cnfn convert_uint_sat(uint);
+uint __ovld __cnfn convert_uint_rte(long);
+uint __ovld __cnfn convert_uint_sat_rte(long);
+uint __ovld __cnfn convert_uint_rtz(long);
+uint __ovld __cnfn convert_uint_sat_rtz(long);
+uint __ovld __cnfn convert_uint_rtp(long);
+uint __ovld __cnfn convert_uint_sat_rtp(long);
+uint __ovld __cnfn convert_uint_rtn(long);
+uint __ovld __cnfn convert_uint_sat_rtn(long);
+uint __ovld __cnfn convert_uint(long);
+uint __ovld __cnfn convert_uint_sat(long);
+uint __ovld __cnfn convert_uint_rte(ulong);
+uint __ovld __cnfn convert_uint_sat_rte(ulong);
+uint __ovld __cnfn convert_uint_rtz(ulong);
+uint __ovld __cnfn convert_uint_sat_rtz(ulong);
+uint __ovld __cnfn convert_uint_rtp(ulong);
+uint __ovld __cnfn convert_uint_sat_rtp(ulong);
+uint __ovld __cnfn convert_uint_rtn(ulong);
+uint __ovld __cnfn convert_uint_sat_rtn(ulong);
+uint __ovld __cnfn convert_uint(ulong);
+uint __ovld __cnfn convert_uint_sat(ulong);
+uint __ovld __cnfn convert_uint_rte(float);
+uint __ovld __cnfn convert_uint_sat_rte(float);
+uint __ovld __cnfn convert_uint_rtz(float);
+uint __ovld __cnfn convert_uint_sat_rtz(float);
+uint __ovld __cnfn convert_uint_rtp(float);
+uint __ovld __cnfn convert_uint_sat_rtp(float);
+uint __ovld __cnfn convert_uint_rtn(float);
+uint __ovld __cnfn convert_uint_sat_rtn(float);
+uint __ovld __cnfn convert_uint(float);
+uint __ovld __cnfn convert_uint_sat(float);
+long __ovld __cnfn convert_long_rte(char);
+long __ovld __cnfn convert_long_sat_rte(char);
+long __ovld __cnfn convert_long_rtz(char);
+long __ovld __cnfn convert_long_sat_rtz(char);
+long __ovld __cnfn convert_long_rtp(char);
+long __ovld __cnfn convert_long_sat_rtp(char);
+long __ovld __cnfn convert_long_rtn(char);
+long __ovld __cnfn convert_long_sat_rtn(char);
+long __ovld __cnfn convert_long(char);
+long __ovld __cnfn convert_long_sat(char);
+long __ovld __cnfn convert_long_rte(uchar);
+long __ovld __cnfn convert_long_sat_rte(uchar);
+long __ovld __cnfn convert_long_rtz(uchar);
+long __ovld __cnfn convert_long_sat_rtz(uchar);
+long __ovld __cnfn convert_long_rtp(uchar);
+long __ovld __cnfn convert_long_sat_rtp(uchar);
+long __ovld __cnfn convert_long_rtn(uchar);
+long __ovld __cnfn convert_long_sat_rtn(uchar);
+long __ovld __cnfn convert_long(uchar);
+long __ovld __cnfn convert_long_sat(uchar);
+long __ovld __cnfn convert_long_rte(short);
+long __ovld __cnfn convert_long_sat_rte(short);
+long __ovld __cnfn convert_long_rtz(short);
+long __ovld __cnfn convert_long_sat_rtz(short);
+long __ovld __cnfn convert_long_rtp(short);
+long __ovld __cnfn convert_long_sat_rtp(short);
+long __ovld __cnfn convert_long_rtn(short);
+long __ovld __cnfn convert_long_sat_rtn(short);
+long __ovld __cnfn convert_long(short);
+long __ovld __cnfn convert_long_sat(short);
+long __ovld __cnfn convert_long_rte(ushort);
+long __ovld __cnfn convert_long_sat_rte(ushort);
+long __ovld __cnfn convert_long_rtz(ushort);
+long __ovld __cnfn convert_long_sat_rtz(ushort);
+long __ovld __cnfn convert_long_rtp(ushort);
+long __ovld __cnfn convert_long_sat_rtp(ushort);
+long __ovld __cnfn convert_long_rtn(ushort);
+long __ovld __cnfn convert_long_sat_rtn(ushort);
+long __ovld __cnfn convert_long(ushort);
+long __ovld __cnfn convert_long_sat(ushort);
+long __ovld __cnfn convert_long_rte(int);
+long __ovld __cnfn convert_long_sat_rte(int);
+long __ovld __cnfn convert_long_rtz(int);
+long __ovld __cnfn convert_long_sat_rtz(int);
+long __ovld __cnfn convert_long_rtp(int);
+long __ovld __cnfn convert_long_sat_rtp(int);
+long __ovld __cnfn convert_long_rtn(int);
+long __ovld __cnfn convert_long_sat_rtn(int);
+long __ovld __cnfn convert_long(int);
+long __ovld __cnfn convert_long_sat(int);
+long __ovld __cnfn convert_long_rte(uint);
+long __ovld __cnfn convert_long_sat_rte(uint);
+long __ovld __cnfn convert_long_rtz(uint);
+long __ovld __cnfn convert_long_sat_rtz(uint);
+long __ovld __cnfn convert_long_rtp(uint);
+long __ovld __cnfn convert_long_sat_rtp(uint);
+long __ovld __cnfn convert_long_rtn(uint);
+long __ovld __cnfn convert_long_sat_rtn(uint);
+long __ovld __cnfn convert_long(uint);
+long __ovld __cnfn convert_long_sat(uint);
+long __ovld __cnfn convert_long_rte(long);
+long __ovld __cnfn convert_long_sat_rte(long);
+long __ovld __cnfn convert_long_rtz(long);
+long __ovld __cnfn convert_long_sat_rtz(long);
+long __ovld __cnfn convert_long_rtp(long);
+long __ovld __cnfn convert_long_sat_rtp(long);
+long __ovld __cnfn convert_long_rtn(long);
+long __ovld __cnfn convert_long_sat_rtn(long);
+long __ovld __cnfn convert_long(long);
+long __ovld __cnfn convert_long_sat(long);
+long __ovld __cnfn convert_long_rte(ulong);
+long __ovld __cnfn convert_long_sat_rte(ulong);
+long __ovld __cnfn convert_long_rtz(ulong);
+long __ovld __cnfn convert_long_sat_rtz(ulong);
+long __ovld __cnfn convert_long_rtp(ulong);
+long __ovld __cnfn convert_long_sat_rtp(ulong);
+long __ovld __cnfn convert_long_rtn(ulong);
+long __ovld __cnfn convert_long_sat_rtn(ulong);
+long __ovld __cnfn convert_long(ulong);
+long __ovld __cnfn convert_long_sat(ulong);
+long __ovld __cnfn convert_long_rte(float);
+long __ovld __cnfn convert_long_sat_rte(float);
+long __ovld __cnfn convert_long_rtz(float);
+long __ovld __cnfn convert_long_sat_rtz(float);
+long __ovld __cnfn convert_long_rtp(float);
+long __ovld __cnfn convert_long_sat_rtp(float);
+long __ovld __cnfn convert_long_rtn(float);
+long __ovld __cnfn convert_long_sat_rtn(float);
+long __ovld __cnfn convert_long(float);
+long __ovld __cnfn convert_long_sat(float);
+ulong __ovld __cnfn convert_ulong_rte(char);
+ulong __ovld __cnfn convert_ulong_sat_rte(char);
+ulong __ovld __cnfn convert_ulong_rtz(char);
+ulong __ovld __cnfn convert_ulong_sat_rtz(char);
+ulong __ovld __cnfn convert_ulong_rtp(char);
+ulong __ovld __cnfn convert_ulong_sat_rtp(char);
+ulong __ovld __cnfn convert_ulong_rtn(char);
+ulong __ovld __cnfn convert_ulong_sat_rtn(char);
+ulong __ovld __cnfn convert_ulong(char);
+ulong __ovld __cnfn convert_ulong_sat(char);
+ulong __ovld __cnfn convert_ulong_rte(uchar);
+ulong __ovld __cnfn convert_ulong_sat_rte(uchar);
+ulong __ovld __cnfn convert_ulong_rtz(uchar);
+ulong __ovld __cnfn convert_ulong_sat_rtz(uchar);
+ulong __ovld __cnfn convert_ulong_rtp(uchar);
+ulong __ovld __cnfn convert_ulong_sat_rtp(uchar);
+ulong __ovld __cnfn convert_ulong_rtn(uchar);
+ulong __ovld __cnfn convert_ulong_sat_rtn(uchar);
+ulong __ovld __cnfn convert_ulong(uchar);
+ulong __ovld __cnfn convert_ulong_sat(uchar);
+ulong __ovld __cnfn convert_ulong_rte(short);
+ulong __ovld __cnfn convert_ulong_sat_rte(short);
+ulong __ovld __cnfn convert_ulong_rtz(short);
+ulong __ovld __cnfn convert_ulong_sat_rtz(short);
+ulong __ovld __cnfn convert_ulong_rtp(short);
+ulong __ovld __cnfn convert_ulong_sat_rtp(short);
+ulong __ovld __cnfn convert_ulong_rtn(short);
+ulong __ovld __cnfn convert_ulong_sat_rtn(short);
+ulong __ovld __cnfn convert_ulong(short);
+ulong __ovld __cnfn convert_ulong_sat(short);
+ulong __ovld __cnfn convert_ulong_rte(ushort);
+ulong __ovld __cnfn convert_ulong_sat_rte(ushort);
+ulong __ovld __cnfn convert_ulong_rtz(ushort);
+ulong __ovld __cnfn convert_ulong_sat_rtz(ushort);
+ulong __ovld __cnfn convert_ulong_rtp(ushort);
+ulong __ovld __cnfn convert_ulong_sat_rtp(ushort);
+ulong __ovld __cnfn convert_ulong_rtn(ushort);
+ulong __ovld __cnfn convert_ulong_sat_rtn(ushort);
+ulong __ovld __cnfn convert_ulong(ushort);
+ulong __ovld __cnfn convert_ulong_sat(ushort);
+ulong __ovld __cnfn convert_ulong_rte(int);
+ulong __ovld __cnfn convert_ulong_sat_rte(int);
+ulong __ovld __cnfn convert_ulong_rtz(int);
+ulong __ovld __cnfn convert_ulong_sat_rtz(int);
+ulong __ovld __cnfn convert_ulong_rtp(int);
+ulong __ovld __cnfn convert_ulong_sat_rtp(int);
+ulong __ovld __cnfn convert_ulong_rtn(int);
+ulong __ovld __cnfn convert_ulong_sat_rtn(int);
+ulong __ovld __cnfn convert_ulong(int);
+ulong __ovld __cnfn convert_ulong_sat(int);
+ulong __ovld __cnfn convert_ulong_rte(uint);
+ulong __ovld __cnfn convert_ulong_sat_rte(uint);
+ulong __ovld __cnfn convert_ulong_rtz(uint);
+ulong __ovld __cnfn convert_ulong_sat_rtz(uint);
+ulong __ovld __cnfn convert_ulong_rtp(uint);
+ulong __ovld __cnfn convert_ulong_sat_rtp(uint);
+ulong __ovld __cnfn convert_ulong_rtn(uint);
+ulong __ovld __cnfn convert_ulong_sat_rtn(uint);
+ulong __ovld __cnfn convert_ulong(uint);
+ulong __ovld __cnfn convert_ulong_sat(uint);
+ulong __ovld __cnfn convert_ulong_rte(long);
+ulong __ovld __cnfn convert_ulong_sat_rte(long);
+ulong __ovld __cnfn convert_ulong_rtz(long);
+ulong __ovld __cnfn convert_ulong_sat_rtz(long);
+ulong __ovld __cnfn convert_ulong_rtp(long);
+ulong __ovld __cnfn convert_ulong_sat_rtp(long);
+ulong __ovld __cnfn convert_ulong_rtn(long);
+ulong __ovld __cnfn convert_ulong_sat_rtn(long);
+ulong __ovld __cnfn convert_ulong(long);
+ulong __ovld __cnfn convert_ulong_sat(long);
+ulong __ovld __cnfn convert_ulong_rte(ulong);
+ulong __ovld __cnfn convert_ulong_sat_rte(ulong);
+ulong __ovld __cnfn convert_ulong_rtz(ulong);
+ulong __ovld __cnfn convert_ulong_sat_rtz(ulong);
+ulong __ovld __cnfn convert_ulong_rtp(ulong);
+ulong __ovld __cnfn convert_ulong_sat_rtp(ulong);
+ulong __ovld __cnfn convert_ulong_rtn(ulong);
+ulong __ovld __cnfn convert_ulong_sat_rtn(ulong);
+ulong __ovld __cnfn convert_ulong(ulong);
+ulong __ovld __cnfn convert_ulong_sat(ulong);
+ulong __ovld __cnfn convert_ulong_rte(float);
+ulong __ovld __cnfn convert_ulong_sat_rte(float);
+ulong __ovld __cnfn convert_ulong_rtz(float);
+ulong __ovld __cnfn convert_ulong_sat_rtz(float);
+ulong __ovld __cnfn convert_ulong_rtp(float);
+ulong __ovld __cnfn convert_ulong_sat_rtp(float);
+ulong __ovld __cnfn convert_ulong_rtn(float);
+ulong __ovld __cnfn convert_ulong_sat_rtn(float);
+ulong __ovld __cnfn convert_ulong(float);
+ulong __ovld __cnfn convert_ulong_sat(float);
+float __ovld __cnfn convert_float_rte(char);
+float __ovld __cnfn convert_float_rtz(char);
+float __ovld __cnfn convert_float_rtp(char);
+float __ovld __cnfn convert_float_rtn(char);
+float __ovld __cnfn convert_float(char);
+float __ovld __cnfn convert_float_rte(uchar);
+float __ovld __cnfn convert_float_rtz(uchar);
+float __ovld __cnfn convert_float_rtp(uchar);
+float __ovld __cnfn convert_float_rtn(uchar);
+float __ovld __cnfn convert_float(uchar);
+float __ovld __cnfn convert_float_rte(short);
+float __ovld __cnfn convert_float_rtz(short);
+float __ovld __cnfn convert_float_rtp(short);
+float __ovld __cnfn convert_float_rtn(short);
+float __ovld __cnfn convert_float(short);
+float __ovld __cnfn convert_float_rte(ushort);
+float __ovld __cnfn convert_float_rtz(ushort);
+float __ovld __cnfn convert_float_rtp(ushort);
+float __ovld __cnfn convert_float_rtn(ushort);
+float __ovld __cnfn convert_float(ushort);
+float __ovld __cnfn convert_float_rte(int);
+float __ovld __cnfn convert_float_rtz(int);
+float __ovld __cnfn convert_float_rtp(int);
+float __ovld __cnfn convert_float_rtn(int);
+float __ovld __cnfn convert_float(int);
+float __ovld __cnfn convert_float_rte(uint);
+float __ovld __cnfn convert_float_rtz(uint);
+float __ovld __cnfn convert_float_rtp(uint);
+float __ovld __cnfn convert_float_rtn(uint);
+float __ovld __cnfn convert_float(uint);
+float __ovld __cnfn convert_float_rte(long);
+float __ovld __cnfn convert_float_rtz(long);
+float __ovld __cnfn convert_float_rtp(long);
+float __ovld __cnfn convert_float_rtn(long);
+float __ovld __cnfn convert_float(long);
+float __ovld __cnfn convert_float_rte(ulong);
+float __ovld __cnfn convert_float_rtz(ulong);
+float __ovld __cnfn convert_float_rtp(ulong);
+float __ovld __cnfn convert_float_rtn(ulong);
+float __ovld __cnfn convert_float(ulong);
+float __ovld __cnfn convert_float_rte(float);
+float __ovld __cnfn convert_float_rtz(float);
+float __ovld __cnfn convert_float_rtp(float);
+float __ovld __cnfn convert_float_rtn(float);
+float __ovld __cnfn convert_float(float);
+char2 __ovld __cnfn convert_char2_rte(char2);
+char2 __ovld __cnfn convert_char2_sat_rte(char2);
+char2 __ovld __cnfn convert_char2_rtz(char2);
+char2 __ovld __cnfn convert_char2_sat_rtz(char2);
+char2 __ovld __cnfn convert_char2_rtp(char2);
+char2 __ovld __cnfn convert_char2_sat_rtp(char2);
+char2 __ovld __cnfn convert_char2_rtn(char2);
+char2 __ovld __cnfn convert_char2_sat_rtn(char2);
+char2 __ovld __cnfn convert_char2(char2);
+char2 __ovld __cnfn convert_char2_sat(char2);
+char2 __ovld __cnfn convert_char2_rte(uchar2);
+char2 __ovld __cnfn convert_char2_sat_rte(uchar2);
+char2 __ovld __cnfn convert_char2_rtz(uchar2);
+char2 __ovld __cnfn convert_char2_sat_rtz(uchar2);
+char2 __ovld __cnfn convert_char2_rtp(uchar2);
+char2 __ovld __cnfn convert_char2_sat_rtp(uchar2);
+char2 __ovld __cnfn convert_char2_rtn(uchar2);
+char2 __ovld __cnfn convert_char2_sat_rtn(uchar2);
+char2 __ovld __cnfn convert_char2(uchar2);
+char2 __ovld __cnfn convert_char2_sat(uchar2);
+char2 __ovld __cnfn convert_char2_rte(short2);
+char2 __ovld __cnfn convert_char2_sat_rte(short2);
+char2 __ovld __cnfn convert_char2_rtz(short2);
+char2 __ovld __cnfn convert_char2_sat_rtz(short2);
+char2 __ovld __cnfn convert_char2_rtp(short2);
+char2 __ovld __cnfn convert_char2_sat_rtp(short2);
+char2 __ovld __cnfn convert_char2_rtn(short2);
+char2 __ovld __cnfn convert_char2_sat_rtn(short2);
+char2 __ovld __cnfn convert_char2(short2);
+char2 __ovld __cnfn convert_char2_sat(short2);
+char2 __ovld __cnfn convert_char2_rte(ushort2);
+char2 __ovld __cnfn convert_char2_sat_rte(ushort2);
+char2 __ovld __cnfn convert_char2_rtz(ushort2);
+char2 __ovld __cnfn convert_char2_sat_rtz(ushort2);
+char2 __ovld __cnfn convert_char2_rtp(ushort2);
+char2 __ovld __cnfn convert_char2_sat_rtp(ushort2);
+char2 __ovld __cnfn convert_char2_rtn(ushort2);
+char2 __ovld __cnfn convert_char2_sat_rtn(ushort2);
+char2 __ovld __cnfn convert_char2(ushort2);
+char2 __ovld __cnfn convert_char2_sat(ushort2);
+char2 __ovld __cnfn convert_char2_rte(int2);
+char2 __ovld __cnfn convert_char2_sat_rte(int2);
+char2 __ovld __cnfn convert_char2_rtz(int2);
+char2 __ovld __cnfn convert_char2_sat_rtz(int2);
+char2 __ovld __cnfn convert_char2_rtp(int2);
+char2 __ovld __cnfn convert_char2_sat_rtp(int2);
+char2 __ovld __cnfn convert_char2_rtn(int2);
+char2 __ovld __cnfn convert_char2_sat_rtn(int2);
+char2 __ovld __cnfn convert_char2(int2);
+char2 __ovld __cnfn convert_char2_sat(int2);
+char2 __ovld __cnfn convert_char2_rte(uint2);
+char2 __ovld __cnfn convert_char2_sat_rte(uint2);
+char2 __ovld __cnfn convert_char2_rtz(uint2);
+char2 __ovld __cnfn convert_char2_sat_rtz(uint2);
+char2 __ovld __cnfn convert_char2_rtp(uint2);
+char2 __ovld __cnfn convert_char2_sat_rtp(uint2);
+char2 __ovld __cnfn convert_char2_rtn(uint2);
+char2 __ovld __cnfn convert_char2_sat_rtn(uint2);
+char2 __ovld __cnfn convert_char2(uint2);
+char2 __ovld __cnfn convert_char2_sat(uint2);
+char2 __ovld __cnfn convert_char2_rte(long2);
+char2 __ovld __cnfn convert_char2_sat_rte(long2);
+char2 __ovld __cnfn convert_char2_rtz(long2);
+char2 __ovld __cnfn convert_char2_sat_rtz(long2);
+char2 __ovld __cnfn convert_char2_rtp(long2);
+char2 __ovld __cnfn convert_char2_sat_rtp(long2);
+char2 __ovld __cnfn convert_char2_rtn(long2);
+char2 __ovld __cnfn convert_char2_sat_rtn(long2);
+char2 __ovld __cnfn convert_char2(long2);
+char2 __ovld __cnfn convert_char2_sat(long2);
+char2 __ovld __cnfn convert_char2_rte(ulong2);
+char2 __ovld __cnfn convert_char2_sat_rte(ulong2);
+char2 __ovld __cnfn convert_char2_rtz(ulong2);
+char2 __ovld __cnfn convert_char2_sat_rtz(ulong2);
+char2 __ovld __cnfn convert_char2_rtp(ulong2);
+char2 __ovld __cnfn convert_char2_sat_rtp(ulong2);
+char2 __ovld __cnfn convert_char2_rtn(ulong2);
+char2 __ovld __cnfn convert_char2_sat_rtn(ulong2);
+char2 __ovld __cnfn convert_char2(ulong2);
+char2 __ovld __cnfn convert_char2_sat(ulong2);
+char2 __ovld __cnfn convert_char2_rte(float2);
+char2 __ovld __cnfn convert_char2_sat_rte(float2);
+char2 __ovld __cnfn convert_char2_rtz(float2);
+char2 __ovld __cnfn convert_char2_sat_rtz(float2);
+char2 __ovld __cnfn convert_char2_rtp(float2);
+char2 __ovld __cnfn convert_char2_sat_rtp(float2);
+char2 __ovld __cnfn convert_char2_rtn(float2);
+char2 __ovld __cnfn convert_char2_sat_rtn(float2);
+char2 __ovld __cnfn convert_char2(float2);
+char2 __ovld __cnfn convert_char2_sat(float2);
+uchar2 __ovld __cnfn convert_uchar2_rte(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(char2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(char2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(char2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(char2);
+uchar2 __ovld __cnfn convert_uchar2(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat(char2);
+uchar2 __ovld __cnfn convert_uchar2_rte(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uchar2);
+uchar2 __ovld __cnfn convert_uchar2(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_rte(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(short2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(short2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(short2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(short2);
+uchar2 __ovld __cnfn convert_uchar2(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat(short2);
+uchar2 __ovld __cnfn convert_uchar2_rte(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ushort2);
+uchar2 __ovld __cnfn convert_uchar2(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_rte(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(int2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(int2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(int2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(int2);
+uchar2 __ovld __cnfn convert_uchar2(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat(int2);
+uchar2 __ovld __cnfn convert_uchar2_rte(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(uint2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uint2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uint2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uint2);
+uchar2 __ovld __cnfn convert_uchar2(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat(uint2);
+uchar2 __ovld __cnfn convert_uchar2_rte(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(long2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(long2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(long2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(long2);
+uchar2 __ovld __cnfn convert_uchar2(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat(long2);
+uchar2 __ovld __cnfn convert_uchar2_rte(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ulong2);
+uchar2 __ovld __cnfn convert_uchar2(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_rte(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(float2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(float2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(float2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(float2);
+uchar2 __ovld __cnfn convert_uchar2(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat(float2);
+short2 __ovld __cnfn convert_short2_rte(char2);
+short2 __ovld __cnfn convert_short2_sat_rte(char2);
+short2 __ovld __cnfn convert_short2_rtz(char2);
+short2 __ovld __cnfn convert_short2_sat_rtz(char2);
+short2 __ovld __cnfn convert_short2_rtp(char2);
+short2 __ovld __cnfn convert_short2_sat_rtp(char2);
+short2 __ovld __cnfn convert_short2_rtn(char2);
+short2 __ovld __cnfn convert_short2_sat_rtn(char2);
+short2 __ovld __cnfn convert_short2(char2);
+short2 __ovld __cnfn convert_short2_sat(char2);
+short2 __ovld __cnfn convert_short2_rte(uchar2);
+short2 __ovld __cnfn convert_short2_sat_rte(uchar2);
+short2 __ovld __cnfn convert_short2_rtz(uchar2);
+short2 __ovld __cnfn convert_short2_sat_rtz(uchar2);
+short2 __ovld __cnfn convert_short2_rtp(uchar2);
+short2 __ovld __cnfn convert_short2_sat_rtp(uchar2);
+short2 __ovld __cnfn convert_short2_rtn(uchar2);
+short2 __ovld __cnfn convert_short2_sat_rtn(uchar2);
+short2 __ovld __cnfn convert_short2(uchar2);
+short2 __ovld __cnfn convert_short2_sat(uchar2);
+short2 __ovld __cnfn convert_short2_rte(short2);
+short2 __ovld __cnfn convert_short2_sat_rte(short2);
+short2 __ovld __cnfn convert_short2_rtz(short2);
+short2 __ovld __cnfn convert_short2_sat_rtz(short2);
+short2 __ovld __cnfn convert_short2_rtp(short2);
+short2 __ovld __cnfn convert_short2_sat_rtp(short2);
+short2 __ovld __cnfn convert_short2_rtn(short2);
+short2 __ovld __cnfn convert_short2_sat_rtn(short2);
+short2 __ovld __cnfn convert_short2(short2);
+short2 __ovld __cnfn convert_short2_sat(short2);
+short2 __ovld __cnfn convert_short2_rte(ushort2);
+short2 __ovld __cnfn convert_short2_sat_rte(ushort2);
+short2 __ovld __cnfn convert_short2_rtz(ushort2);
+short2 __ovld __cnfn convert_short2_sat_rtz(ushort2);
+short2 __ovld __cnfn convert_short2_rtp(ushort2);
+short2 __ovld __cnfn convert_short2_sat_rtp(ushort2);
+short2 __ovld __cnfn convert_short2_rtn(ushort2);
+short2 __ovld __cnfn convert_short2_sat_rtn(ushort2);
+short2 __ovld __cnfn convert_short2(ushort2);
+short2 __ovld __cnfn convert_short2_sat(ushort2);
+short2 __ovld __cnfn convert_short2_rte(int2);
+short2 __ovld __cnfn convert_short2_sat_rte(int2);
+short2 __ovld __cnfn convert_short2_rtz(int2);
+short2 __ovld __cnfn convert_short2_sat_rtz(int2);
+short2 __ovld __cnfn convert_short2_rtp(int2);
+short2 __ovld __cnfn convert_short2_sat_rtp(int2);
+short2 __ovld __cnfn convert_short2_rtn(int2);
+short2 __ovld __cnfn convert_short2_sat_rtn(int2);
+short2 __ovld __cnfn convert_short2(int2);
+short2 __ovld __cnfn convert_short2_sat(int2);
+short2 __ovld __cnfn convert_short2_rte(uint2);
+short2 __ovld __cnfn convert_short2_sat_rte(uint2);
+short2 __ovld __cnfn convert_short2_rtz(uint2);
+short2 __ovld __cnfn convert_short2_sat_rtz(uint2);
+short2 __ovld __cnfn convert_short2_rtp(uint2);
+short2 __ovld __cnfn convert_short2_sat_rtp(uint2);
+short2 __ovld __cnfn convert_short2_rtn(uint2);
+short2 __ovld __cnfn convert_short2_sat_rtn(uint2);
+short2 __ovld __cnfn convert_short2(uint2);
+short2 __ovld __cnfn convert_short2_sat(uint2);
+short2 __ovld __cnfn convert_short2_rte(long2);
+short2 __ovld __cnfn convert_short2_sat_rte(long2);
+short2 __ovld __cnfn convert_short2_rtz(long2);
+short2 __ovld __cnfn convert_short2_sat_rtz(long2);
+short2 __ovld __cnfn convert_short2_rtp(long2);
+short2 __ovld __cnfn convert_short2_sat_rtp(long2);
+short2 __ovld __cnfn convert_short2_rtn(long2);
+short2 __ovld __cnfn convert_short2_sat_rtn(long2);
+short2 __ovld __cnfn convert_short2(long2);
+short2 __ovld __cnfn convert_short2_sat(long2);
+short2 __ovld __cnfn convert_short2_rte(ulong2);
+short2 __ovld __cnfn convert_short2_sat_rte(ulong2);
+short2 __ovld __cnfn convert_short2_rtz(ulong2);
+short2 __ovld __cnfn convert_short2_sat_rtz(ulong2);
+short2 __ovld __cnfn convert_short2_rtp(ulong2);
+short2 __ovld __cnfn convert_short2_sat_rtp(ulong2);
+short2 __ovld __cnfn convert_short2_rtn(ulong2);
+short2 __ovld __cnfn convert_short2_sat_rtn(ulong2);
+short2 __ovld __cnfn convert_short2(ulong2);
+short2 __ovld __cnfn convert_short2_sat(ulong2);
+short2 __ovld __cnfn convert_short2_rte(float2);
+short2 __ovld __cnfn convert_short2_sat_rte(float2);
+short2 __ovld __cnfn convert_short2_rtz(float2);
+short2 __ovld __cnfn convert_short2_sat_rtz(float2);
+short2 __ovld __cnfn convert_short2_rtp(float2);
+short2 __ovld __cnfn convert_short2_sat_rtp(float2);
+short2 __ovld __cnfn convert_short2_rtn(float2);
+short2 __ovld __cnfn convert_short2_sat_rtn(float2);
+short2 __ovld __cnfn convert_short2(float2);
+short2 __ovld __cnfn convert_short2_sat(float2);
+ushort2 __ovld __cnfn convert_ushort2_rte(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(char2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(char2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(char2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(char2);
+ushort2 __ovld __cnfn convert_ushort2(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat(char2);
+ushort2 __ovld __cnfn convert_ushort2_rte(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uchar2);
+ushort2 __ovld __cnfn convert_ushort2(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_rte(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(short2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(short2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(short2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(short2);
+ushort2 __ovld __cnfn convert_ushort2(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat(short2);
+ushort2 __ovld __cnfn convert_ushort2_rte(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ushort2);
+ushort2 __ovld __cnfn convert_ushort2(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_rte(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(int2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(int2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(int2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(int2);
+ushort2 __ovld __cnfn convert_ushort2(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat(int2);
+ushort2 __ovld __cnfn convert_ushort2_rte(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(uint2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uint2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uint2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uint2);
+ushort2 __ovld __cnfn convert_ushort2(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat(uint2);
+ushort2 __ovld __cnfn convert_ushort2_rte(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(long2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(long2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(long2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(long2);
+ushort2 __ovld __cnfn convert_ushort2(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat(long2);
+ushort2 __ovld __cnfn convert_ushort2_rte(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ulong2);
+ushort2 __ovld __cnfn convert_ushort2(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_rte(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(float2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(float2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(float2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(float2);
+ushort2 __ovld __cnfn convert_ushort2(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat(float2);
+int2 __ovld __cnfn convert_int2_rte(char2);
+int2 __ovld __cnfn convert_int2_sat_rte(char2);
+int2 __ovld __cnfn convert_int2_rtz(char2);
+int2 __ovld __cnfn convert_int2_sat_rtz(char2);
+int2 __ovld __cnfn convert_int2_rtp(char2);
+int2 __ovld __cnfn convert_int2_sat_rtp(char2);
+int2 __ovld __cnfn convert_int2_rtn(char2);
+int2 __ovld __cnfn convert_int2_sat_rtn(char2);
+int2 __ovld __cnfn convert_int2(char2);
+int2 __ovld __cnfn convert_int2_sat(char2);
+int2 __ovld __cnfn convert_int2_rte(uchar2);
+int2 __ovld __cnfn convert_int2_sat_rte(uchar2);
+int2 __ovld __cnfn convert_int2_rtz(uchar2);
+int2 __ovld __cnfn convert_int2_sat_rtz(uchar2);
+int2 __ovld __cnfn convert_int2_rtp(uchar2);
+int2 __ovld __cnfn convert_int2_sat_rtp(uchar2);
+int2 __ovld __cnfn convert_int2_rtn(uchar2);
+int2 __ovld __cnfn convert_int2_sat_rtn(uchar2);
+int2 __ovld __cnfn convert_int2(uchar2);
+int2 __ovld __cnfn convert_int2_sat(uchar2);
+int2 __ovld __cnfn convert_int2_rte(short2);
+int2 __ovld __cnfn convert_int2_sat_rte(short2);
+int2 __ovld __cnfn convert_int2_rtz(short2);
+int2 __ovld __cnfn convert_int2_sat_rtz(short2);
+int2 __ovld __cnfn convert_int2_rtp(short2);
+int2 __ovld __cnfn convert_int2_sat_rtp(short2);
+int2 __ovld __cnfn convert_int2_rtn(short2);
+int2 __ovld __cnfn convert_int2_sat_rtn(short2);
+int2 __ovld __cnfn convert_int2(short2);
+int2 __ovld __cnfn convert_int2_sat(short2);
+int2 __ovld __cnfn convert_int2_rte(ushort2);
+int2 __ovld __cnfn convert_int2_sat_rte(ushort2);
+int2 __ovld __cnfn convert_int2_rtz(ushort2);
+int2 __ovld __cnfn convert_int2_sat_rtz(ushort2);
+int2 __ovld __cnfn convert_int2_rtp(ushort2);
+int2 __ovld __cnfn convert_int2_sat_rtp(ushort2);
+int2 __ovld __cnfn convert_int2_rtn(ushort2);
+int2 __ovld __cnfn convert_int2_sat_rtn(ushort2);
+int2 __ovld __cnfn convert_int2(ushort2);
+int2 __ovld __cnfn convert_int2_sat(ushort2);
+int2 __ovld __cnfn convert_int2_rte(int2);
+int2 __ovld __cnfn convert_int2_sat_rte(int2);
+int2 __ovld __cnfn convert_int2_rtz(int2);
+int2 __ovld __cnfn convert_int2_sat_rtz(int2);
+int2 __ovld __cnfn convert_int2_rtp(int2);
+int2 __ovld __cnfn convert_int2_sat_rtp(int2);
+int2 __ovld __cnfn convert_int2_rtn(int2);
+int2 __ovld __cnfn convert_int2_sat_rtn(int2);
+int2 __ovld __cnfn convert_int2(int2);
+int2 __ovld __cnfn convert_int2_sat(int2);
+int2 __ovld __cnfn convert_int2_rte(uint2);
+int2 __ovld __cnfn convert_int2_sat_rte(uint2);
+int2 __ovld __cnfn convert_int2_rtz(uint2);
+int2 __ovld __cnfn convert_int2_sat_rtz(uint2);
+int2 __ovld __cnfn convert_int2_rtp(uint2);
+int2 __ovld __cnfn convert_int2_sat_rtp(uint2);
+int2 __ovld __cnfn convert_int2_rtn(uint2);
+int2 __ovld __cnfn convert_int2_sat_rtn(uint2);
+int2 __ovld __cnfn convert_int2(uint2);
+int2 __ovld __cnfn convert_int2_sat(uint2);
+int2 __ovld __cnfn convert_int2_rte(long2);
+int2 __ovld __cnfn convert_int2_sat_rte(long2);
+int2 __ovld __cnfn convert_int2_rtz(long2);
+int2 __ovld __cnfn convert_int2_sat_rtz(long2);
+int2 __ovld __cnfn convert_int2_rtp(long2);
+int2 __ovld __cnfn convert_int2_sat_rtp(long2);
+int2 __ovld __cnfn convert_int2_rtn(long2);
+int2 __ovld __cnfn convert_int2_sat_rtn(long2);
+int2 __ovld __cnfn convert_int2(long2);
+int2 __ovld __cnfn convert_int2_sat(long2);
+int2 __ovld __cnfn convert_int2_rte(ulong2);
+int2 __ovld __cnfn convert_int2_sat_rte(ulong2);
+int2 __ovld __cnfn convert_int2_rtz(ulong2);
+int2 __ovld __cnfn convert_int2_sat_rtz(ulong2);
+int2 __ovld __cnfn convert_int2_rtp(ulong2);
+int2 __ovld __cnfn convert_int2_sat_rtp(ulong2);
+int2 __ovld __cnfn convert_int2_rtn(ulong2);
+int2 __ovld __cnfn convert_int2_sat_rtn(ulong2);
+int2 __ovld __cnfn convert_int2(ulong2);
+int2 __ovld __cnfn convert_int2_sat(ulong2);
+int2 __ovld __cnfn convert_int2_rte(float2);
+int2 __ovld __cnfn convert_int2_sat_rte(float2);
+int2 __ovld __cnfn convert_int2_rtz(float2);
+int2 __ovld __cnfn convert_int2_sat_rtz(float2);
+int2 __ovld __cnfn convert_int2_rtp(float2);
+int2 __ovld __cnfn convert_int2_sat_rtp(float2);
+int2 __ovld __cnfn convert_int2_rtn(float2);
+int2 __ovld __cnfn convert_int2_sat_rtn(float2);
+int2 __ovld __cnfn convert_int2(float2);
+int2 __ovld __cnfn convert_int2_sat(float2);
+uint2 __ovld __cnfn convert_uint2_rte(char2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(char2);
+uint2 __ovld __cnfn convert_uint2_rtz(char2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(char2);
+uint2 __ovld __cnfn convert_uint2_rtp(char2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(char2);
+uint2 __ovld __cnfn convert_uint2_rtn(char2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(char2);
+uint2 __ovld __cnfn convert_uint2(char2);
+uint2 __ovld __cnfn convert_uint2_sat(char2);
+uint2 __ovld __cnfn convert_uint2_rte(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(uchar2);
+uint2 __ovld __cnfn convert_uint2_rtz(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(uchar2);
+uint2 __ovld __cnfn convert_uint2_rtp(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(uchar2);
+uint2 __ovld __cnfn convert_uint2_rtn(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(uchar2);
+uint2 __ovld __cnfn convert_uint2(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat(uchar2);
+uint2 __ovld __cnfn convert_uint2_rte(short2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(short2);
+uint2 __ovld __cnfn convert_uint2_rtz(short2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(short2);
+uint2 __ovld __cnfn convert_uint2_rtp(short2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(short2);
+uint2 __ovld __cnfn convert_uint2_rtn(short2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(short2);
+uint2 __ovld __cnfn convert_uint2(short2);
+uint2 __ovld __cnfn convert_uint2_sat(short2);
+uint2 __ovld __cnfn convert_uint2_rte(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(ushort2);
+uint2 __ovld __cnfn convert_uint2_rtz(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(ushort2);
+uint2 __ovld __cnfn convert_uint2_rtp(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(ushort2);
+uint2 __ovld __cnfn convert_uint2_rtn(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(ushort2);
+uint2 __ovld __cnfn convert_uint2(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat(ushort2);
+uint2 __ovld __cnfn convert_uint2_rte(int2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(int2);
+uint2 __ovld __cnfn convert_uint2_rtz(int2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(int2);
+uint2 __ovld __cnfn convert_uint2_rtp(int2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(int2);
+uint2 __ovld __cnfn convert_uint2_rtn(int2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(int2);
+uint2 __ovld __cnfn convert_uint2(int2);
+uint2 __ovld __cnfn convert_uint2_sat(int2);
+uint2 __ovld __cnfn convert_uint2_rte(uint2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(uint2);
+uint2 __ovld __cnfn convert_uint2_rtz(uint2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(uint2);
+uint2 __ovld __cnfn convert_uint2_rtp(uint2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(uint2);
+uint2 __ovld __cnfn convert_uint2_rtn(uint2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(uint2);
+uint2 __ovld __cnfn convert_uint2(uint2);
+uint2 __ovld __cnfn convert_uint2_sat(uint2);
+uint2 __ovld __cnfn convert_uint2_rte(long2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(long2);
+uint2 __ovld __cnfn convert_uint2_rtz(long2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(long2);
+uint2 __ovld __cnfn convert_uint2_rtp(long2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(long2);
+uint2 __ovld __cnfn convert_uint2_rtn(long2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(long2);
+uint2 __ovld __cnfn convert_uint2(long2);
+uint2 __ovld __cnfn convert_uint2_sat(long2);
+uint2 __ovld __cnfn convert_uint2_rte(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(ulong2);
+uint2 __ovld __cnfn convert_uint2_rtz(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(ulong2);
+uint2 __ovld __cnfn convert_uint2_rtp(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(ulong2);
+uint2 __ovld __cnfn convert_uint2_rtn(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(ulong2);
+uint2 __ovld __cnfn convert_uint2(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat(ulong2);
+uint2 __ovld __cnfn convert_uint2_rte(float2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(float2);
+uint2 __ovld __cnfn convert_uint2_rtz(float2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(float2);
+uint2 __ovld __cnfn convert_uint2_rtp(float2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(float2);
+uint2 __ovld __cnfn convert_uint2_rtn(float2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(float2);
+uint2 __ovld __cnfn convert_uint2(float2);
+uint2 __ovld __cnfn convert_uint2_sat(float2);
+long2 __ovld __cnfn convert_long2_rte(char2);
+long2 __ovld __cnfn convert_long2_sat_rte(char2);
+long2 __ovld __cnfn convert_long2_rtz(char2);
+long2 __ovld __cnfn convert_long2_sat_rtz(char2);
+long2 __ovld __cnfn convert_long2_rtp(char2);
+long2 __ovld __cnfn convert_long2_sat_rtp(char2);
+long2 __ovld __cnfn convert_long2_rtn(char2);
+long2 __ovld __cnfn convert_long2_sat_rtn(char2);
+long2 __ovld __cnfn convert_long2(char2);
+long2 __ovld __cnfn convert_long2_sat(char2);
+long2 __ovld __cnfn convert_long2_rte(uchar2);
+long2 __ovld __cnfn convert_long2_sat_rte(uchar2);
+long2 __ovld __cnfn convert_long2_rtz(uchar2);
+long2 __ovld __cnfn convert_long2_sat_rtz(uchar2);
+long2 __ovld __cnfn convert_long2_rtp(uchar2);
+long2 __ovld __cnfn convert_long2_sat_rtp(uchar2);
+long2 __ovld __cnfn convert_long2_rtn(uchar2);
+long2 __ovld __cnfn convert_long2_sat_rtn(uchar2);
+long2 __ovld __cnfn convert_long2(uchar2);
+long2 __ovld __cnfn convert_long2_sat(uchar2);
+long2 __ovld __cnfn convert_long2_rte(short2);
+long2 __ovld __cnfn convert_long2_sat_rte(short2);
+long2 __ovld __cnfn convert_long2_rtz(short2);
+long2 __ovld __cnfn convert_long2_sat_rtz(short2);
+long2 __ovld __cnfn convert_long2_rtp(short2);
+long2 __ovld __cnfn convert_long2_sat_rtp(short2);
+long2 __ovld __cnfn convert_long2_rtn(short2);
+long2 __ovld __cnfn convert_long2_sat_rtn(short2);
+long2 __ovld __cnfn convert_long2(short2);
+long2 __ovld __cnfn convert_long2_sat(short2);
+long2 __ovld __cnfn convert_long2_rte(ushort2);
+long2 __ovld __cnfn convert_long2_sat_rte(ushort2);
+long2 __ovld __cnfn convert_long2_rtz(ushort2);
+long2 __ovld __cnfn convert_long2_sat_rtz(ushort2);
+long2 __ovld __cnfn convert_long2_rtp(ushort2);
+long2 __ovld __cnfn convert_long2_sat_rtp(ushort2);
+long2 __ovld __cnfn convert_long2_rtn(ushort2);
+long2 __ovld __cnfn convert_long2_sat_rtn(ushort2);
+long2 __ovld __cnfn convert_long2(ushort2);
+long2 __ovld __cnfn convert_long2_sat(ushort2);
+long2 __ovld __cnfn convert_long2_rte(int2);
+long2 __ovld __cnfn convert_long2_sat_rte(int2);
+long2 __ovld __cnfn convert_long2_rtz(int2);
+long2 __ovld __cnfn convert_long2_sat_rtz(int2);
+long2 __ovld __cnfn convert_long2_rtp(int2);
+long2 __ovld __cnfn convert_long2_sat_rtp(int2);
+long2 __ovld __cnfn convert_long2_rtn(int2);
+long2 __ovld __cnfn convert_long2_sat_rtn(int2);
+long2 __ovld __cnfn convert_long2(int2);
+long2 __ovld __cnfn convert_long2_sat(int2);
+long2 __ovld __cnfn convert_long2_rte(uint2);
+long2 __ovld __cnfn convert_long2_sat_rte(uint2);
+long2 __ovld __cnfn convert_long2_rtz(uint2);
+long2 __ovld __cnfn convert_long2_sat_rtz(uint2);
+long2 __ovld __cnfn convert_long2_rtp(uint2);
+long2 __ovld __cnfn convert_long2_sat_rtp(uint2);
+long2 __ovld __cnfn convert_long2_rtn(uint2);
+long2 __ovld __cnfn convert_long2_sat_rtn(uint2);
+long2 __ovld __cnfn convert_long2(uint2);
+long2 __ovld __cnfn convert_long2_sat(uint2);
+long2 __ovld __cnfn convert_long2_rte(long2);
+long2 __ovld __cnfn convert_long2_sat_rte(long2);
+long2 __ovld __cnfn convert_long2_rtz(long2);
+long2 __ovld __cnfn convert_long2_sat_rtz(long2);
+long2 __ovld __cnfn convert_long2_rtp(long2);
+long2 __ovld __cnfn convert_long2_sat_rtp(long2);
+long2 __ovld __cnfn convert_long2_rtn(long2);
+long2 __ovld __cnfn convert_long2_sat_rtn(long2);
+long2 __ovld __cnfn convert_long2(long2);
+long2 __ovld __cnfn convert_long2_sat(long2);
+long2 __ovld __cnfn convert_long2_rte(ulong2);
+long2 __ovld __cnfn convert_long2_sat_rte(ulong2);
+long2 __ovld __cnfn convert_long2_rtz(ulong2);
+long2 __ovld __cnfn convert_long2_sat_rtz(ulong2);
+long2 __ovld __cnfn convert_long2_rtp(ulong2);
+long2 __ovld __cnfn convert_long2_sat_rtp(ulong2);
+long2 __ovld __cnfn convert_long2_rtn(ulong2);
+long2 __ovld __cnfn convert_long2_sat_rtn(ulong2);
+long2 __ovld __cnfn convert_long2(ulong2);
+long2 __ovld __cnfn convert_long2_sat(ulong2);
+long2 __ovld __cnfn convert_long2_rte(float2);
+long2 __ovld __cnfn convert_long2_sat_rte(float2);
+long2 __ovld __cnfn convert_long2_rtz(float2);
+long2 __ovld __cnfn convert_long2_sat_rtz(float2);
+long2 __ovld __cnfn convert_long2_rtp(float2);
+long2 __ovld __cnfn convert_long2_sat_rtp(float2);
+long2 __ovld __cnfn convert_long2_rtn(float2);
+long2 __ovld __cnfn convert_long2_sat_rtn(float2);
+long2 __ovld __cnfn convert_long2(float2);
+long2 __ovld __cnfn convert_long2_sat(float2);
+ulong2 __ovld __cnfn convert_ulong2_rte(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(char2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(char2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(char2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(char2);
+ulong2 __ovld __cnfn convert_ulong2(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat(char2);
+ulong2 __ovld __cnfn convert_ulong2_rte(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uchar2);
+ulong2 __ovld __cnfn convert_ulong2(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_rte(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(short2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(short2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(short2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(short2);
+ulong2 __ovld __cnfn convert_ulong2(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat(short2);
+ulong2 __ovld __cnfn convert_ulong2_rte(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ushort2);
+ulong2 __ovld __cnfn convert_ulong2(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_rte(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(int2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(int2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(int2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(int2);
+ulong2 __ovld __cnfn convert_ulong2(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat(int2);
+ulong2 __ovld __cnfn convert_ulong2_rte(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(uint2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uint2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uint2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uint2);
+ulong2 __ovld __cnfn convert_ulong2(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat(uint2);
+ulong2 __ovld __cnfn convert_ulong2_rte(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(long2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(long2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(long2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(long2);
+ulong2 __ovld __cnfn convert_ulong2(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat(long2);
+ulong2 __ovld __cnfn convert_ulong2_rte(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ulong2);
+ulong2 __ovld __cnfn convert_ulong2(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_rte(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(float2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(float2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(float2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(float2);
+ulong2 __ovld __cnfn convert_ulong2(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat(float2);
+float2 __ovld __cnfn convert_float2_rte(char2);
+float2 __ovld __cnfn convert_float2_rtz(char2);
+float2 __ovld __cnfn convert_float2_rtp(char2);
+float2 __ovld __cnfn convert_float2_rtn(char2);
+float2 __ovld __cnfn convert_float2(char2);
+float2 __ovld __cnfn convert_float2_rte(uchar2);
+float2 __ovld __cnfn convert_float2_rtz(uchar2);
+float2 __ovld __cnfn convert_float2_rtp(uchar2);
+float2 __ovld __cnfn convert_float2_rtn(uchar2);
+float2 __ovld __cnfn convert_float2(uchar2);
+float2 __ovld __cnfn convert_float2_rte(short2);
+float2 __ovld __cnfn convert_float2_rtz(short2);
+float2 __ovld __cnfn convert_float2_rtp(short2);
+float2 __ovld __cnfn convert_float2_rtn(short2);
+float2 __ovld __cnfn convert_float2(short2);
+float2 __ovld __cnfn convert_float2_rte(ushort2);
+float2 __ovld __cnfn convert_float2_rtz(ushort2);
+float2 __ovld __cnfn convert_float2_rtp(ushort2);
+float2 __ovld __cnfn convert_float2_rtn(ushort2);
+float2 __ovld __cnfn convert_float2(ushort2);
+float2 __ovld __cnfn convert_float2_rte(int2);
+float2 __ovld __cnfn convert_float2_rtz(int2);
+float2 __ovld __cnfn convert_float2_rtp(int2);
+float2 __ovld __cnfn convert_float2_rtn(int2);
+float2 __ovld __cnfn convert_float2(int2);
+float2 __ovld __cnfn convert_float2_rte(uint2);
+float2 __ovld __cnfn convert_float2_rtz(uint2);
+float2 __ovld __cnfn convert_float2_rtp(uint2);
+float2 __ovld __cnfn convert_float2_rtn(uint2);
+float2 __ovld __cnfn convert_float2(uint2);
+float2 __ovld __cnfn convert_float2_rte(long2);
+float2 __ovld __cnfn convert_float2_rtz(long2);
+float2 __ovld __cnfn convert_float2_rtp(long2);
+float2 __ovld __cnfn convert_float2_rtn(long2);
+float2 __ovld __cnfn convert_float2(long2);
+float2 __ovld __cnfn convert_float2_rte(ulong2);
+float2 __ovld __cnfn convert_float2_rtz(ulong2);
+float2 __ovld __cnfn convert_float2_rtp(ulong2);
+float2 __ovld __cnfn convert_float2_rtn(ulong2);
+float2 __ovld __cnfn convert_float2(ulong2);
+float2 __ovld __cnfn convert_float2_rte(float2);
+float2 __ovld __cnfn convert_float2_rtz(float2);
+float2 __ovld __cnfn convert_float2_rtp(float2);
+float2 __ovld __cnfn convert_float2_rtn(float2);
+float2 __ovld __cnfn convert_float2(float2);
+char3 __ovld __cnfn convert_char3_rte(char3);
+char3 __ovld __cnfn convert_char3_sat_rte(char3);
+char3 __ovld __cnfn convert_char3_rtz(char3);
+char3 __ovld __cnfn convert_char3_sat_rtz(char3);
+char3 __ovld __cnfn convert_char3_rtp(char3);
+char3 __ovld __cnfn convert_char3_sat_rtp(char3);
+char3 __ovld __cnfn convert_char3_rtn(char3);
+char3 __ovld __cnfn convert_char3_sat_rtn(char3);
+char3 __ovld __cnfn convert_char3(char3);
+char3 __ovld __cnfn convert_char3_sat(char3);
+char3 __ovld __cnfn convert_char3_rte(uchar3);
+char3 __ovld __cnfn convert_char3_sat_rte(uchar3);
+char3 __ovld __cnfn convert_char3_rtz(uchar3);
+char3 __ovld __cnfn convert_char3_sat_rtz(uchar3);
+char3 __ovld __cnfn convert_char3_rtp(uchar3);
+char3 __ovld __cnfn convert_char3_sat_rtp(uchar3);
+char3 __ovld __cnfn convert_char3_rtn(uchar3);
+char3 __ovld __cnfn convert_char3_sat_rtn(uchar3);
+char3 __ovld __cnfn convert_char3(uchar3);
+char3 __ovld __cnfn convert_char3_sat(uchar3);
+char3 __ovld __cnfn convert_char3_rte(short3);
+char3 __ovld __cnfn convert_char3_sat_rte(short3);
+char3 __ovld __cnfn convert_char3_rtz(short3);
+char3 __ovld __cnfn convert_char3_sat_rtz(short3);
+char3 __ovld __cnfn convert_char3_rtp(short3);
+char3 __ovld __cnfn convert_char3_sat_rtp(short3);
+char3 __ovld __cnfn convert_char3_rtn(short3);
+char3 __ovld __cnfn convert_char3_sat_rtn(short3);
+char3 __ovld __cnfn convert_char3(short3);
+char3 __ovld __cnfn convert_char3_sat(short3);
+char3 __ovld __cnfn convert_char3_rte(ushort3);
+char3 __ovld __cnfn convert_char3_sat_rte(ushort3);
+char3 __ovld __cnfn convert_char3_rtz(ushort3);
+char3 __ovld __cnfn convert_char3_sat_rtz(ushort3);
+char3 __ovld __cnfn convert_char3_rtp(ushort3);
+char3 __ovld __cnfn convert_char3_sat_rtp(ushort3);
+char3 __ovld __cnfn convert_char3_rtn(ushort3);
+char3 __ovld __cnfn convert_char3_sat_rtn(ushort3);
+char3 __ovld __cnfn convert_char3(ushort3);
+char3 __ovld __cnfn convert_char3_sat(ushort3);
+char3 __ovld __cnfn convert_char3_rte(int3);
+char3 __ovld __cnfn convert_char3_sat_rte(int3);
+char3 __ovld __cnfn convert_char3_rtz(int3);
+char3 __ovld __cnfn convert_char3_sat_rtz(int3);
+char3 __ovld __cnfn convert_char3_rtp(int3);
+char3 __ovld __cnfn convert_char3_sat_rtp(int3);
+char3 __ovld __cnfn convert_char3_rtn(int3);
+char3 __ovld __cnfn convert_char3_sat_rtn(int3);
+char3 __ovld __cnfn convert_char3(int3);
+char3 __ovld __cnfn convert_char3_sat(int3);
+char3 __ovld __cnfn convert_char3_rte(uint3);
+char3 __ovld __cnfn convert_char3_sat_rte(uint3);
+char3 __ovld __cnfn convert_char3_rtz(uint3);
+char3 __ovld __cnfn convert_char3_sat_rtz(uint3);
+char3 __ovld __cnfn convert_char3_rtp(uint3);
+char3 __ovld __cnfn convert_char3_sat_rtp(uint3);
+char3 __ovld __cnfn convert_char3_rtn(uint3);
+char3 __ovld __cnfn convert_char3_sat_rtn(uint3);
+char3 __ovld __cnfn convert_char3(uint3);
+char3 __ovld __cnfn convert_char3_sat(uint3);
+char3 __ovld __cnfn convert_char3_rte(long3);
+char3 __ovld __cnfn convert_char3_sat_rte(long3);
+char3 __ovld __cnfn convert_char3_rtz(long3);
+char3 __ovld __cnfn convert_char3_sat_rtz(long3);
+char3 __ovld __cnfn convert_char3_rtp(long3);
+char3 __ovld __cnfn convert_char3_sat_rtp(long3);
+char3 __ovld __cnfn convert_char3_rtn(long3);
+char3 __ovld __cnfn convert_char3_sat_rtn(long3);
+char3 __ovld __cnfn convert_char3(long3);
+char3 __ovld __cnfn convert_char3_sat(long3);
+char3 __ovld __cnfn convert_char3_rte(ulong3);
+char3 __ovld __cnfn convert_char3_sat_rte(ulong3);
+char3 __ovld __cnfn convert_char3_rtz(ulong3);
+char3 __ovld __cnfn convert_char3_sat_rtz(ulong3);
+char3 __ovld __cnfn convert_char3_rtp(ulong3);
+char3 __ovld __cnfn convert_char3_sat_rtp(ulong3);
+char3 __ovld __cnfn convert_char3_rtn(ulong3);
+char3 __ovld __cnfn convert_char3_sat_rtn(ulong3);
+char3 __ovld __cnfn convert_char3(ulong3);
+char3 __ovld __cnfn convert_char3_sat(ulong3);
+char3 __ovld __cnfn convert_char3_rte(float3);
+char3 __ovld __cnfn convert_char3_sat_rte(float3);
+char3 __ovld __cnfn convert_char3_rtz(float3);
+char3 __ovld __cnfn convert_char3_sat_rtz(float3);
+char3 __ovld __cnfn convert_char3_rtp(float3);
+char3 __ovld __cnfn convert_char3_sat_rtp(float3);
+char3 __ovld __cnfn convert_char3_rtn(float3);
+char3 __ovld __cnfn convert_char3_sat_rtn(float3);
+char3 __ovld __cnfn convert_char3(float3);
+char3 __ovld __cnfn convert_char3_sat(float3);
+uchar3 __ovld __cnfn convert_uchar3_rte(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(char3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(char3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(char3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(char3);
+uchar3 __ovld __cnfn convert_uchar3(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat(char3);
+uchar3 __ovld __cnfn convert_uchar3_rte(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uchar3);
+uchar3 __ovld __cnfn convert_uchar3(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_rte(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(short3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(short3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(short3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(short3);
+uchar3 __ovld __cnfn convert_uchar3(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat(short3);
+uchar3 __ovld __cnfn convert_uchar3_rte(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ushort3);
+uchar3 __ovld __cnfn convert_uchar3(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_rte(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(int3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(int3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(int3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(int3);
+uchar3 __ovld __cnfn convert_uchar3(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat(int3);
+uchar3 __ovld __cnfn convert_uchar3_rte(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(uint3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uint3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uint3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uint3);
+uchar3 __ovld __cnfn convert_uchar3(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat(uint3);
+uchar3 __ovld __cnfn convert_uchar3_rte(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(long3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(long3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(long3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(long3);
+uchar3 __ovld __cnfn convert_uchar3(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat(long3);
+uchar3 __ovld __cnfn convert_uchar3_rte(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ulong3);
+uchar3 __ovld __cnfn convert_uchar3(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_rte(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(float3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(float3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(float3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(float3);
+uchar3 __ovld __cnfn convert_uchar3(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat(float3);
+short3 __ovld __cnfn convert_short3_rte(char3);
+short3 __ovld __cnfn convert_short3_sat_rte(char3);
+short3 __ovld __cnfn convert_short3_rtz(char3);
+short3 __ovld __cnfn convert_short3_sat_rtz(char3);
+short3 __ovld __cnfn convert_short3_rtp(char3);
+short3 __ovld __cnfn convert_short3_sat_rtp(char3);
+short3 __ovld __cnfn convert_short3_rtn(char3);
+short3 __ovld __cnfn convert_short3_sat_rtn(char3);
+short3 __ovld __cnfn convert_short3(char3);
+short3 __ovld __cnfn convert_short3_sat(char3);
+short3 __ovld __cnfn convert_short3_rte(uchar3);
+short3 __ovld __cnfn convert_short3_sat_rte(uchar3);
+short3 __ovld __cnfn convert_short3_rtz(uchar3);
+short3 __ovld __cnfn convert_short3_sat_rtz(uchar3);
+short3 __ovld __cnfn convert_short3_rtp(uchar3);
+short3 __ovld __cnfn convert_short3_sat_rtp(uchar3);
+short3 __ovld __cnfn convert_short3_rtn(uchar3);
+short3 __ovld __cnfn convert_short3_sat_rtn(uchar3);
+short3 __ovld __cnfn convert_short3(uchar3);
+short3 __ovld __cnfn convert_short3_sat(uchar3);
+short3 __ovld __cnfn convert_short3_rte(short3);
+short3 __ovld __cnfn convert_short3_sat_rte(short3);
+short3 __ovld __cnfn convert_short3_rtz(short3);
+short3 __ovld __cnfn convert_short3_sat_rtz(short3);
+short3 __ovld __cnfn convert_short3_rtp(short3);
+short3 __ovld __cnfn convert_short3_sat_rtp(short3);
+short3 __ovld __cnfn convert_short3_rtn(short3);
+short3 __ovld __cnfn convert_short3_sat_rtn(short3);
+short3 __ovld __cnfn convert_short3(short3);
+short3 __ovld __cnfn convert_short3_sat(short3);
+short3 __ovld __cnfn convert_short3_rte(ushort3);
+short3 __ovld __cnfn convert_short3_sat_rte(ushort3);
+short3 __ovld __cnfn convert_short3_rtz(ushort3);
+short3 __ovld __cnfn convert_short3_sat_rtz(ushort3);
+short3 __ovld __cnfn convert_short3_rtp(ushort3);
+short3 __ovld __cnfn convert_short3_sat_rtp(ushort3);
+short3 __ovld __cnfn convert_short3_rtn(ushort3);
+short3 __ovld __cnfn convert_short3_sat_rtn(ushort3);
+short3 __ovld __cnfn convert_short3(ushort3);
+short3 __ovld __cnfn convert_short3_sat(ushort3);
+short3 __ovld __cnfn convert_short3_rte(int3);
+short3 __ovld __cnfn convert_short3_sat_rte(int3);
+short3 __ovld __cnfn convert_short3_rtz(int3);
+short3 __ovld __cnfn convert_short3_sat_rtz(int3);
+short3 __ovld __cnfn convert_short3_rtp(int3);
+short3 __ovld __cnfn convert_short3_sat_rtp(int3);
+short3 __ovld __cnfn convert_short3_rtn(int3);
+short3 __ovld __cnfn convert_short3_sat_rtn(int3);
+short3 __ovld __cnfn convert_short3(int3);
+short3 __ovld __cnfn convert_short3_sat(int3);
+short3 __ovld __cnfn convert_short3_rte(uint3);
+short3 __ovld __cnfn convert_short3_sat_rte(uint3);
+short3 __ovld __cnfn convert_short3_rtz(uint3);
+short3 __ovld __cnfn convert_short3_sat_rtz(uint3);
+short3 __ovld __cnfn convert_short3_rtp(uint3);
+short3 __ovld __cnfn convert_short3_sat_rtp(uint3);
+short3 __ovld __cnfn convert_short3_rtn(uint3);
+short3 __ovld __cnfn convert_short3_sat_rtn(uint3);
+short3 __ovld __cnfn convert_short3(uint3);
+short3 __ovld __cnfn convert_short3_sat(uint3);
+short3 __ovld __cnfn convert_short3_rte(long3);
+short3 __ovld __cnfn convert_short3_sat_rte(long3);
+short3 __ovld __cnfn convert_short3_rtz(long3);
+short3 __ovld __cnfn convert_short3_sat_rtz(long3);
+short3 __ovld __cnfn convert_short3_rtp(long3);
+short3 __ovld __cnfn convert_short3_sat_rtp(long3);
+short3 __ovld __cnfn convert_short3_rtn(long3);
+short3 __ovld __cnfn convert_short3_sat_rtn(long3);
+short3 __ovld __cnfn convert_short3(long3);
+short3 __ovld __cnfn convert_short3_sat(long3);
+short3 __ovld __cnfn convert_short3_rte(ulong3);
+short3 __ovld __cnfn convert_short3_sat_rte(ulong3);
+short3 __ovld __cnfn convert_short3_rtz(ulong3);
+short3 __ovld __cnfn convert_short3_sat_rtz(ulong3);
+short3 __ovld __cnfn convert_short3_rtp(ulong3);
+short3 __ovld __cnfn convert_short3_sat_rtp(ulong3);
+short3 __ovld __cnfn convert_short3_rtn(ulong3);
+short3 __ovld __cnfn convert_short3_sat_rtn(ulong3);
+short3 __ovld __cnfn convert_short3(ulong3);
+short3 __ovld __cnfn convert_short3_sat(ulong3);
+short3 __ovld __cnfn convert_short3_rte(float3);
+short3 __ovld __cnfn convert_short3_sat_rte(float3);
+short3 __ovld __cnfn convert_short3_rtz(float3);
+short3 __ovld __cnfn convert_short3_sat_rtz(float3);
+short3 __ovld __cnfn convert_short3_rtp(float3);
+short3 __ovld __cnfn convert_short3_sat_rtp(float3);
+short3 __ovld __cnfn convert_short3_rtn(float3);
+short3 __ovld __cnfn convert_short3_sat_rtn(float3);
+short3 __ovld __cnfn convert_short3(float3);
+short3 __ovld __cnfn convert_short3_sat(float3);
+ushort3 __ovld __cnfn convert_ushort3_rte(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(char3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(char3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(char3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(char3);
+ushort3 __ovld __cnfn convert_ushort3(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat(char3);
+ushort3 __ovld __cnfn convert_ushort3_rte(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uchar3);
+ushort3 __ovld __cnfn convert_ushort3(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_rte(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(short3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(short3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(short3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(short3);
+ushort3 __ovld __cnfn convert_ushort3(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat(short3);
+ushort3 __ovld __cnfn convert_ushort3_rte(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ushort3);
+ushort3 __ovld __cnfn convert_ushort3(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_rte(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(int3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(int3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(int3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(int3);
+ushort3 __ovld __cnfn convert_ushort3(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat(int3);
+ushort3 __ovld __cnfn convert_ushort3_rte(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(uint3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uint3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uint3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uint3);
+ushort3 __ovld __cnfn convert_ushort3(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat(uint3);
+ushort3 __ovld __cnfn convert_ushort3_rte(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(long3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(long3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(long3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(long3);
+ushort3 __ovld __cnfn convert_ushort3(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat(long3);
+ushort3 __ovld __cnfn convert_ushort3_rte(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ulong3);
+ushort3 __ovld __cnfn convert_ushort3(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_rte(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(float3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(float3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(float3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(float3);
+ushort3 __ovld __cnfn convert_ushort3(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat(float3);
+int3 __ovld __cnfn convert_int3_rte(char3);
+int3 __ovld __cnfn convert_int3_sat_rte(char3);
+int3 __ovld __cnfn convert_int3_rtz(char3);
+int3 __ovld __cnfn convert_int3_sat_rtz(char3);
+int3 __ovld __cnfn convert_int3_rtp(char3);
+int3 __ovld __cnfn convert_int3_sat_rtp(char3);
+int3 __ovld __cnfn convert_int3_rtn(char3);
+int3 __ovld __cnfn convert_int3_sat_rtn(char3);
+int3 __ovld __cnfn convert_int3(char3);
+int3 __ovld __cnfn convert_int3_sat(char3);
+int3 __ovld __cnfn convert_int3_rte(uchar3);
+int3 __ovld __cnfn convert_int3_sat_rte(uchar3);
+int3 __ovld __cnfn convert_int3_rtz(uchar3);
+int3 __ovld __cnfn convert_int3_sat_rtz(uchar3);
+int3 __ovld __cnfn convert_int3_rtp(uchar3);
+int3 __ovld __cnfn convert_int3_sat_rtp(uchar3);
+int3 __ovld __cnfn convert_int3_rtn(uchar3);
+int3 __ovld __cnfn convert_int3_sat_rtn(uchar3);
+int3 __ovld __cnfn convert_int3(uchar3);
+int3 __ovld __cnfn convert_int3_sat(uchar3);
+int3 __ovld __cnfn convert_int3_rte(short3);
+int3 __ovld __cnfn convert_int3_sat_rte(short3);
+int3 __ovld __cnfn convert_int3_rtz(short3);
+int3 __ovld __cnfn convert_int3_sat_rtz(short3);
+int3 __ovld __cnfn convert_int3_rtp(short3);
+int3 __ovld __cnfn convert_int3_sat_rtp(short3);
+int3 __ovld __cnfn convert_int3_rtn(short3);
+int3 __ovld __cnfn convert_int3_sat_rtn(short3);
+int3 __ovld __cnfn convert_int3(short3);
+int3 __ovld __cnfn convert_int3_sat(short3);
+int3 __ovld __cnfn convert_int3_rte(ushort3);
+int3 __ovld __cnfn convert_int3_sat_rte(ushort3);
+int3 __ovld __cnfn convert_int3_rtz(ushort3);
+int3 __ovld __cnfn convert_int3_sat_rtz(ushort3);
+int3 __ovld __cnfn convert_int3_rtp(ushort3);
+int3 __ovld __cnfn convert_int3_sat_rtp(ushort3);
+int3 __ovld __cnfn convert_int3_rtn(ushort3);
+int3 __ovld __cnfn convert_int3_sat_rtn(ushort3);
+int3 __ovld __cnfn convert_int3(ushort3);
+int3 __ovld __cnfn convert_int3_sat(ushort3);
+int3 __ovld __cnfn convert_int3_rte(int3);
+int3 __ovld __cnfn convert_int3_sat_rte(int3);
+int3 __ovld __cnfn convert_int3_rtz(int3);
+int3 __ovld __cnfn convert_int3_sat_rtz(int3);
+int3 __ovld __cnfn convert_int3_rtp(int3);
+int3 __ovld __cnfn convert_int3_sat_rtp(int3);
+int3 __ovld __cnfn convert_int3_rtn(int3);
+int3 __ovld __cnfn convert_int3_sat_rtn(int3);
+int3 __ovld __cnfn convert_int3(int3);
+int3 __ovld __cnfn convert_int3_sat(int3);
+int3 __ovld __cnfn convert_int3_rte(uint3);
+int3 __ovld __cnfn convert_int3_sat_rte(uint3);
+int3 __ovld __cnfn convert_int3_rtz(uint3);
+int3 __ovld __cnfn convert_int3_sat_rtz(uint3);
+int3 __ovld __cnfn convert_int3_rtp(uint3);
+int3 __ovld __cnfn convert_int3_sat_rtp(uint3);
+int3 __ovld __cnfn convert_int3_rtn(uint3);
+int3 __ovld __cnfn convert_int3_sat_rtn(uint3);
+int3 __ovld __cnfn convert_int3(uint3);
+int3 __ovld __cnfn convert_int3_sat(uint3);
+int3 __ovld __cnfn convert_int3_rte(long3);
+int3 __ovld __cnfn convert_int3_sat_rte(long3);
+int3 __ovld __cnfn convert_int3_rtz(long3);
+int3 __ovld __cnfn convert_int3_sat_rtz(long3);
+int3 __ovld __cnfn convert_int3_rtp(long3);
+int3 __ovld __cnfn convert_int3_sat_rtp(long3);
+int3 __ovld __cnfn convert_int3_rtn(long3);
+int3 __ovld __cnfn convert_int3_sat_rtn(long3);
+int3 __ovld __cnfn convert_int3(long3);
+int3 __ovld __cnfn convert_int3_sat(long3);
+int3 __ovld __cnfn convert_int3_rte(ulong3);
+int3 __ovld __cnfn convert_int3_sat_rte(ulong3);
+int3 __ovld __cnfn convert_int3_rtz(ulong3);
+int3 __ovld __cnfn convert_int3_sat_rtz(ulong3);
+int3 __ovld __cnfn convert_int3_rtp(ulong3);
+int3 __ovld __cnfn convert_int3_sat_rtp(ulong3);
+int3 __ovld __cnfn convert_int3_rtn(ulong3);
+int3 __ovld __cnfn convert_int3_sat_rtn(ulong3);
+int3 __ovld __cnfn convert_int3(ulong3);
+int3 __ovld __cnfn convert_int3_sat(ulong3);
+int3 __ovld __cnfn convert_int3_rte(float3);
+int3 __ovld __cnfn convert_int3_sat_rte(float3);
+int3 __ovld __cnfn convert_int3_rtz(float3);
+int3 __ovld __cnfn convert_int3_sat_rtz(float3);
+int3 __ovld __cnfn convert_int3_rtp(float3);
+int3 __ovld __cnfn convert_int3_sat_rtp(float3);
+int3 __ovld __cnfn convert_int3_rtn(float3);
+int3 __ovld __cnfn convert_int3_sat_rtn(float3);
+int3 __ovld __cnfn convert_int3(float3);
+int3 __ovld __cnfn convert_int3_sat(float3);
+uint3 __ovld __cnfn convert_uint3_rte(char3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(char3);
+uint3 __ovld __cnfn convert_uint3_rtz(char3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(char3);
+uint3 __ovld __cnfn convert_uint3_rtp(char3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(char3);
+uint3 __ovld __cnfn convert_uint3_rtn(char3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(char3);
+uint3 __ovld __cnfn convert_uint3(char3);
+uint3 __ovld __cnfn convert_uint3_sat(char3);
+uint3 __ovld __cnfn convert_uint3_rte(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(uchar3);
+uint3 __ovld __cnfn convert_uint3_rtz(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(uchar3);
+uint3 __ovld __cnfn convert_uint3_rtp(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(uchar3);
+uint3 __ovld __cnfn convert_uint3_rtn(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(uchar3);
+uint3 __ovld __cnfn convert_uint3(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat(uchar3);
+uint3 __ovld __cnfn convert_uint3_rte(short3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(short3);
+uint3 __ovld __cnfn convert_uint3_rtz(short3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(short3);
+uint3 __ovld __cnfn convert_uint3_rtp(short3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(short3);
+uint3 __ovld __cnfn convert_uint3_rtn(short3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(short3);
+uint3 __ovld __cnfn convert_uint3(short3);
+uint3 __ovld __cnfn convert_uint3_sat(short3);
+uint3 __ovld __cnfn convert_uint3_rte(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(ushort3);
+uint3 __ovld __cnfn convert_uint3_rtz(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(ushort3);
+uint3 __ovld __cnfn convert_uint3_rtp(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(ushort3);
+uint3 __ovld __cnfn convert_uint3_rtn(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(ushort3);
+uint3 __ovld __cnfn convert_uint3(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat(ushort3);
+uint3 __ovld __cnfn convert_uint3_rte(int3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(int3);
+uint3 __ovld __cnfn convert_uint3_rtz(int3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(int3);
+uint3 __ovld __cnfn convert_uint3_rtp(int3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(int3);
+uint3 __ovld __cnfn convert_uint3_rtn(int3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(int3);
+uint3 __ovld __cnfn convert_uint3(int3);
+uint3 __ovld __cnfn convert_uint3_sat(int3);
+uint3 __ovld __cnfn convert_uint3_rte(uint3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(uint3);
+uint3 __ovld __cnfn convert_uint3_rtz(uint3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(uint3);
+uint3 __ovld __cnfn convert_uint3_rtp(uint3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(uint3);
+uint3 __ovld __cnfn convert_uint3_rtn(uint3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(uint3);
+uint3 __ovld __cnfn convert_uint3(uint3);
+uint3 __ovld __cnfn convert_uint3_sat(uint3);
+uint3 __ovld __cnfn convert_uint3_rte(long3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(long3);
+uint3 __ovld __cnfn convert_uint3_rtz(long3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(long3);
+uint3 __ovld __cnfn convert_uint3_rtp(long3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(long3);
+uint3 __ovld __cnfn convert_uint3_rtn(long3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(long3);
+uint3 __ovld __cnfn convert_uint3(long3);
+uint3 __ovld __cnfn convert_uint3_sat(long3);
+uint3 __ovld __cnfn convert_uint3_rte(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(ulong3);
+uint3 __ovld __cnfn convert_uint3_rtz(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(ulong3);
+uint3 __ovld __cnfn convert_uint3_rtp(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(ulong3);
+uint3 __ovld __cnfn convert_uint3_rtn(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(ulong3);
+uint3 __ovld __cnfn convert_uint3(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat(ulong3);
+uint3 __ovld __cnfn convert_uint3_rte(float3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(float3);
+uint3 __ovld __cnfn convert_uint3_rtz(float3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(float3);
+uint3 __ovld __cnfn convert_uint3_rtp(float3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(float3);
+uint3 __ovld __cnfn convert_uint3_rtn(float3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(float3);
+uint3 __ovld __cnfn convert_uint3(float3);
+uint3 __ovld __cnfn convert_uint3_sat(float3);
+long3 __ovld __cnfn convert_long3_rte(char3);
+long3 __ovld __cnfn convert_long3_sat_rte(char3);
+long3 __ovld __cnfn convert_long3_rtz(char3);
+long3 __ovld __cnfn convert_long3_sat_rtz(char3);
+long3 __ovld __cnfn convert_long3_rtp(char3);
+long3 __ovld __cnfn convert_long3_sat_rtp(char3);
+long3 __ovld __cnfn convert_long3_rtn(char3);
+long3 __ovld __cnfn convert_long3_sat_rtn(char3);
+long3 __ovld __cnfn convert_long3(char3);
+long3 __ovld __cnfn convert_long3_sat(char3);
+long3 __ovld __cnfn convert_long3_rte(uchar3);
+long3 __ovld __cnfn convert_long3_sat_rte(uchar3);
+long3 __ovld __cnfn convert_long3_rtz(uchar3);
+long3 __ovld __cnfn convert_long3_sat_rtz(uchar3);
+long3 __ovld __cnfn convert_long3_rtp(uchar3);
+long3 __ovld __cnfn convert_long3_sat_rtp(uchar3);
+long3 __ovld __cnfn convert_long3_rtn(uchar3);
+long3 __ovld __cnfn convert_long3_sat_rtn(uchar3);
+long3 __ovld __cnfn convert_long3(uchar3);
+long3 __ovld __cnfn convert_long3_sat(uchar3);
+long3 __ovld __cnfn convert_long3_rte(short3);
+long3 __ovld __cnfn convert_long3_sat_rte(short3);
+long3 __ovld __cnfn convert_long3_rtz(short3);
+long3 __ovld __cnfn convert_long3_sat_rtz(short3);
+long3 __ovld __cnfn convert_long3_rtp(short3);
+long3 __ovld __cnfn convert_long3_sat_rtp(short3);
+long3 __ovld __cnfn convert_long3_rtn(short3);
+long3 __ovld __cnfn convert_long3_sat_rtn(short3);
+long3 __ovld __cnfn convert_long3(short3);
+long3 __ovld __cnfn convert_long3_sat(short3);
+long3 __ovld __cnfn convert_long3_rte(ushort3);
+long3 __ovld __cnfn convert_long3_sat_rte(ushort3);
+long3 __ovld __cnfn convert_long3_rtz(ushort3);
+long3 __ovld __cnfn convert_long3_sat_rtz(ushort3);
+long3 __ovld __cnfn convert_long3_rtp(ushort3);
+long3 __ovld __cnfn convert_long3_sat_rtp(ushort3);
+long3 __ovld __cnfn convert_long3_rtn(ushort3);
+long3 __ovld __cnfn convert_long3_sat_rtn(ushort3);
+long3 __ovld __cnfn convert_long3(ushort3);
+long3 __ovld __cnfn convert_long3_sat(ushort3);
+long3 __ovld __cnfn convert_long3_rte(int3);
+long3 __ovld __cnfn convert_long3_sat_rte(int3);
+long3 __ovld __cnfn convert_long3_rtz(int3);
+long3 __ovld __cnfn convert_long3_sat_rtz(int3);
+long3 __ovld __cnfn convert_long3_rtp(int3);
+long3 __ovld __cnfn convert_long3_sat_rtp(int3);
+long3 __ovld __cnfn convert_long3_rtn(int3);
+long3 __ovld __cnfn convert_long3_sat_rtn(int3);
+long3 __ovld __cnfn convert_long3(int3);
+long3 __ovld __cnfn convert_long3_sat(int3);
+long3 __ovld __cnfn convert_long3_rte(uint3);
+long3 __ovld __cnfn convert_long3_sat_rte(uint3);
+long3 __ovld __cnfn convert_long3_rtz(uint3);
+long3 __ovld __cnfn convert_long3_sat_rtz(uint3);
+long3 __ovld __cnfn convert_long3_rtp(uint3);
+long3 __ovld __cnfn convert_long3_sat_rtp(uint3);
+long3 __ovld __cnfn convert_long3_rtn(uint3);
+long3 __ovld __cnfn convert_long3_sat_rtn(uint3);
+long3 __ovld __cnfn convert_long3(uint3);
+long3 __ovld __cnfn convert_long3_sat(uint3);
+long3 __ovld __cnfn convert_long3_rte(long3);
+long3 __ovld __cnfn convert_long3_sat_rte(long3);
+long3 __ovld __cnfn convert_long3_rtz(long3);
+long3 __ovld __cnfn convert_long3_sat_rtz(long3);
+long3 __ovld __cnfn convert_long3_rtp(long3);
+long3 __ovld __cnfn convert_long3_sat_rtp(long3);
+long3 __ovld __cnfn convert_long3_rtn(long3);
+long3 __ovld __cnfn convert_long3_sat_rtn(long3);
+long3 __ovld __cnfn convert_long3(long3);
+long3 __ovld __cnfn convert_long3_sat(long3);
+long3 __ovld __cnfn convert_long3_rte(ulong3);
+long3 __ovld __cnfn convert_long3_sat_rte(ulong3);
+long3 __ovld __cnfn convert_long3_rtz(ulong3);
+long3 __ovld __cnfn convert_long3_sat_rtz(ulong3);
+long3 __ovld __cnfn convert_long3_rtp(ulong3);
+long3 __ovld __cnfn convert_long3_sat_rtp(ulong3);
+long3 __ovld __cnfn convert_long3_rtn(ulong3);
+long3 __ovld __cnfn convert_long3_sat_rtn(ulong3);
+long3 __ovld __cnfn convert_long3(ulong3);
+long3 __ovld __cnfn convert_long3_sat(ulong3);
+long3 __ovld __cnfn convert_long3_rte(float3);
+long3 __ovld __cnfn convert_long3_sat_rte(float3);
+long3 __ovld __cnfn convert_long3_rtz(float3);
+long3 __ovld __cnfn convert_long3_sat_rtz(float3);
+long3 __ovld __cnfn convert_long3_rtp(float3);
+long3 __ovld __cnfn convert_long3_sat_rtp(float3);
+long3 __ovld __cnfn convert_long3_rtn(float3);
+long3 __ovld __cnfn convert_long3_sat_rtn(float3);
+long3 __ovld __cnfn convert_long3(float3);
+long3 __ovld __cnfn convert_long3_sat(float3);
+ulong3 __ovld __cnfn convert_ulong3_rte(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(char3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(char3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(char3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(char3);
+ulong3 __ovld __cnfn convert_ulong3(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat(char3);
+ulong3 __ovld __cnfn convert_ulong3_rte(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uchar3);
+ulong3 __ovld __cnfn convert_ulong3(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_rte(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(short3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(short3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(short3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(short3);
+ulong3 __ovld __cnfn convert_ulong3(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat(short3);
+ulong3 __ovld __cnfn convert_ulong3_rte(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ushort3);
+ulong3 __ovld __cnfn convert_ulong3(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_rte(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(int3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(int3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(int3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(int3);
+ulong3 __ovld __cnfn convert_ulong3(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat(int3);
+ulong3 __ovld __cnfn convert_ulong3_rte(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(uint3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uint3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uint3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uint3);
+ulong3 __ovld __cnfn convert_ulong3(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat(uint3);
+ulong3 __ovld __cnfn convert_ulong3_rte(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(long3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(long3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(long3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(long3);
+ulong3 __ovld __cnfn convert_ulong3(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat(long3);
+ulong3 __ovld __cnfn convert_ulong3_rte(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ulong3);
+ulong3 __ovld __cnfn convert_ulong3(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_rte(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(float3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(float3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(float3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(float3);
+ulong3 __ovld __cnfn convert_ulong3(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat(float3);
+float3 __ovld __cnfn convert_float3_rte(char3);
+float3 __ovld __cnfn convert_float3_rtz(char3);
+float3 __ovld __cnfn convert_float3_rtp(char3);
+float3 __ovld __cnfn convert_float3_rtn(char3);
+float3 __ovld __cnfn convert_float3(char3);
+float3 __ovld __cnfn convert_float3_rte(uchar3);
+float3 __ovld __cnfn convert_float3_rtz(uchar3);
+float3 __ovld __cnfn convert_float3_rtp(uchar3);
+float3 __ovld __cnfn convert_float3_rtn(uchar3);
+float3 __ovld __cnfn convert_float3(uchar3);
+float3 __ovld __cnfn convert_float3_rte(short3);
+float3 __ovld __cnfn convert_float3_rtz(short3);
+float3 __ovld __cnfn convert_float3_rtp(short3);
+float3 __ovld __cnfn convert_float3_rtn(short3);
+float3 __ovld __cnfn convert_float3(short3);
+float3 __ovld __cnfn convert_float3_rte(ushort3);
+float3 __ovld __cnfn convert_float3_rtz(ushort3);
+float3 __ovld __cnfn convert_float3_rtp(ushort3);
+float3 __ovld __cnfn convert_float3_rtn(ushort3);
+float3 __ovld __cnfn convert_float3(ushort3);
+float3 __ovld __cnfn convert_float3_rte(int3);
+float3 __ovld __cnfn convert_float3_rtz(int3);
+float3 __ovld __cnfn convert_float3_rtp(int3);
+float3 __ovld __cnfn convert_float3_rtn(int3);
+float3 __ovld __cnfn convert_float3(int3);
+float3 __ovld __cnfn convert_float3_rte(uint3);
+float3 __ovld __cnfn convert_float3_rtz(uint3);
+float3 __ovld __cnfn convert_float3_rtp(uint3);
+float3 __ovld __cnfn convert_float3_rtn(uint3);
+float3 __ovld __cnfn convert_float3(uint3);
+float3 __ovld __cnfn convert_float3_rte(long3);
+float3 __ovld __cnfn convert_float3_rtz(long3);
+float3 __ovld __cnfn convert_float3_rtp(long3);
+float3 __ovld __cnfn convert_float3_rtn(long3);
+float3 __ovld __cnfn convert_float3(long3);
+float3 __ovld __cnfn convert_float3_rte(ulong3);
+float3 __ovld __cnfn convert_float3_rtz(ulong3);
+float3 __ovld __cnfn convert_float3_rtp(ulong3);
+float3 __ovld __cnfn convert_float3_rtn(ulong3);
+float3 __ovld __cnfn convert_float3(ulong3);
+float3 __ovld __cnfn convert_float3_rte(float3);
+float3 __ovld __cnfn convert_float3_rtz(float3);
+float3 __ovld __cnfn convert_float3_rtp(float3);
+float3 __ovld __cnfn convert_float3_rtn(float3);
+float3 __ovld __cnfn convert_float3(float3);
+char4 __ovld __cnfn convert_char4_rte(char4);
+char4 __ovld __cnfn convert_char4_sat_rte(char4);
+char4 __ovld __cnfn convert_char4_rtz(char4);
+char4 __ovld __cnfn convert_char4_sat_rtz(char4);
+char4 __ovld __cnfn convert_char4_rtp(char4);
+char4 __ovld __cnfn convert_char4_sat_rtp(char4);
+char4 __ovld __cnfn convert_char4_rtn(char4);
+char4 __ovld __cnfn convert_char4_sat_rtn(char4);
+char4 __ovld __cnfn convert_char4(char4);
+char4 __ovld __cnfn convert_char4_sat(char4);
+char4 __ovld __cnfn convert_char4_rte(uchar4);
+char4 __ovld __cnfn convert_char4_sat_rte(uchar4);
+char4 __ovld __cnfn convert_char4_rtz(uchar4);
+char4 __ovld __cnfn convert_char4_sat_rtz(uchar4);
+char4 __ovld __cnfn convert_char4_rtp(uchar4);
+char4 __ovld __cnfn convert_char4_sat_rtp(uchar4);
+char4 __ovld __cnfn convert_char4_rtn(uchar4);
+char4 __ovld __cnfn convert_char4_sat_rtn(uchar4);
+char4 __ovld __cnfn convert_char4(uchar4);
+char4 __ovld __cnfn convert_char4_sat(uchar4);
+char4 __ovld __cnfn convert_char4_rte(short4);
+char4 __ovld __cnfn convert_char4_sat_rte(short4);
+char4 __ovld __cnfn convert_char4_rtz(short4);
+char4 __ovld __cnfn convert_char4_sat_rtz(short4);
+char4 __ovld __cnfn convert_char4_rtp(short4);
+char4 __ovld __cnfn convert_char4_sat_rtp(short4);
+char4 __ovld __cnfn convert_char4_rtn(short4);
+char4 __ovld __cnfn convert_char4_sat_rtn(short4);
+char4 __ovld __cnfn convert_char4(short4);
+char4 __ovld __cnfn convert_char4_sat(short4);
+char4 __ovld __cnfn convert_char4_rte(ushort4);
+char4 __ovld __cnfn convert_char4_sat_rte(ushort4);
+char4 __ovld __cnfn convert_char4_rtz(ushort4);
+char4 __ovld __cnfn convert_char4_sat_rtz(ushort4);
+char4 __ovld __cnfn convert_char4_rtp(ushort4);
+char4 __ovld __cnfn convert_char4_sat_rtp(ushort4);
+char4 __ovld __cnfn convert_char4_rtn(ushort4);
+char4 __ovld __cnfn convert_char4_sat_rtn(ushort4);
+char4 __ovld __cnfn convert_char4(ushort4);
+char4 __ovld __cnfn convert_char4_sat(ushort4);
+char4 __ovld __cnfn convert_char4_rte(int4);
+char4 __ovld __cnfn convert_char4_sat_rte(int4);
+char4 __ovld __cnfn convert_char4_rtz(int4);
+char4 __ovld __cnfn convert_char4_sat_rtz(int4);
+char4 __ovld __cnfn convert_char4_rtp(int4);
+char4 __ovld __cnfn convert_char4_sat_rtp(int4);
+char4 __ovld __cnfn convert_char4_rtn(int4);
+char4 __ovld __cnfn convert_char4_sat_rtn(int4);
+char4 __ovld __cnfn convert_char4(int4);
+char4 __ovld __cnfn convert_char4_sat(int4);
+char4 __ovld __cnfn convert_char4_rte(uint4);
+char4 __ovld __cnfn convert_char4_sat_rte(uint4);
+char4 __ovld __cnfn convert_char4_rtz(uint4);
+char4 __ovld __cnfn convert_char4_sat_rtz(uint4);
+char4 __ovld __cnfn convert_char4_rtp(uint4);
+char4 __ovld __cnfn convert_char4_sat_rtp(uint4);
+char4 __ovld __cnfn convert_char4_rtn(uint4);
+char4 __ovld __cnfn convert_char4_sat_rtn(uint4);
+char4 __ovld __cnfn convert_char4(uint4);
+char4 __ovld __cnfn convert_char4_sat(uint4);
+char4 __ovld __cnfn convert_char4_rte(long4);
+char4 __ovld __cnfn convert_char4_sat_rte(long4);
+char4 __ovld __cnfn convert_char4_rtz(long4);
+char4 __ovld __cnfn convert_char4_sat_rtz(long4);
+char4 __ovld __cnfn convert_char4_rtp(long4);
+char4 __ovld __cnfn convert_char4_sat_rtp(long4);
+char4 __ovld __cnfn convert_char4_rtn(long4);
+char4 __ovld __cnfn convert_char4_sat_rtn(long4);
+char4 __ovld __cnfn convert_char4(long4);
+char4 __ovld __cnfn convert_char4_sat(long4);
+char4 __ovld __cnfn convert_char4_rte(ulong4);
+char4 __ovld __cnfn convert_char4_sat_rte(ulong4);
+char4 __ovld __cnfn convert_char4_rtz(ulong4);
+char4 __ovld __cnfn convert_char4_sat_rtz(ulong4);
+char4 __ovld __cnfn convert_char4_rtp(ulong4);
+char4 __ovld __cnfn convert_char4_sat_rtp(ulong4);
+char4 __ovld __cnfn convert_char4_rtn(ulong4);
+char4 __ovld __cnfn convert_char4_sat_rtn(ulong4);
+char4 __ovld __cnfn convert_char4(ulong4);
+char4 __ovld __cnfn convert_char4_sat(ulong4);
+char4 __ovld __cnfn convert_char4_rte(float4);
+char4 __ovld __cnfn convert_char4_sat_rte(float4);
+char4 __ovld __cnfn convert_char4_rtz(float4);
+char4 __ovld __cnfn convert_char4_sat_rtz(float4);
+char4 __ovld __cnfn convert_char4_rtp(float4);
+char4 __ovld __cnfn convert_char4_sat_rtp(float4);
+char4 __ovld __cnfn convert_char4_rtn(float4);
+char4 __ovld __cnfn convert_char4_sat_rtn(float4);
+char4 __ovld __cnfn convert_char4(float4);
+char4 __ovld __cnfn convert_char4_sat(float4);
+uchar4 __ovld __cnfn convert_uchar4_rte(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(char4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(char4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(char4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(char4);
+uchar4 __ovld __cnfn convert_uchar4(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat(char4);
+uchar4 __ovld __cnfn convert_uchar4_rte(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uchar4);
+uchar4 __ovld __cnfn convert_uchar4(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_rte(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(short4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(short4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(short4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(short4);
+uchar4 __ovld __cnfn convert_uchar4(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat(short4);
+uchar4 __ovld __cnfn convert_uchar4_rte(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ushort4);
+uchar4 __ovld __cnfn convert_uchar4(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_rte(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(int4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(int4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(int4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(int4);
+uchar4 __ovld __cnfn convert_uchar4(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat(int4);
+uchar4 __ovld __cnfn convert_uchar4_rte(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(uint4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uint4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uint4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uint4);
+uchar4 __ovld __cnfn convert_uchar4(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat(uint4);
+uchar4 __ovld __cnfn convert_uchar4_rte(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(long4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(long4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(long4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(long4);
+uchar4 __ovld __cnfn convert_uchar4(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat(long4);
+uchar4 __ovld __cnfn convert_uchar4_rte(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ulong4);
+uchar4 __ovld __cnfn convert_uchar4(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_rte(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(float4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(float4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(float4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(float4);
+uchar4 __ovld __cnfn convert_uchar4(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat(float4);
+short4 __ovld __cnfn convert_short4_rte(char4);
+short4 __ovld __cnfn convert_short4_sat_rte(char4);
+short4 __ovld __cnfn convert_short4_rtz(char4);
+short4 __ovld __cnfn convert_short4_sat_rtz(char4);
+short4 __ovld __cnfn convert_short4_rtp(char4);
+short4 __ovld __cnfn convert_short4_sat_rtp(char4);
+short4 __ovld __cnfn convert_short4_rtn(char4);
+short4 __ovld __cnfn convert_short4_sat_rtn(char4);
+short4 __ovld __cnfn convert_short4(char4);
+short4 __ovld __cnfn convert_short4_sat(char4);
+short4 __ovld __cnfn convert_short4_rte(uchar4);
+short4 __ovld __cnfn convert_short4_sat_rte(uchar4);
+short4 __ovld __cnfn convert_short4_rtz(uchar4);
+short4 __ovld __cnfn convert_short4_sat_rtz(uchar4);
+short4 __ovld __cnfn convert_short4_rtp(uchar4);
+short4 __ovld __cnfn convert_short4_sat_rtp(uchar4);
+short4 __ovld __cnfn convert_short4_rtn(uchar4);
+short4 __ovld __cnfn convert_short4_sat_rtn(uchar4);
+short4 __ovld __cnfn convert_short4(uchar4);
+short4 __ovld __cnfn convert_short4_sat(uchar4);
+short4 __ovld __cnfn convert_short4_rte(short4);
+short4 __ovld __cnfn convert_short4_sat_rte(short4);
+short4 __ovld __cnfn convert_short4_rtz(short4);
+short4 __ovld __cnfn convert_short4_sat_rtz(short4);
+short4 __ovld __cnfn convert_short4_rtp(short4);
+short4 __ovld __cnfn convert_short4_sat_rtp(short4);
+short4 __ovld __cnfn convert_short4_rtn(short4);
+short4 __ovld __cnfn convert_short4_sat_rtn(short4);
+short4 __ovld __cnfn convert_short4(short4);
+short4 __ovld __cnfn convert_short4_sat(short4);
+short4 __ovld __cnfn convert_short4_rte(ushort4);
+short4 __ovld __cnfn convert_short4_sat_rte(ushort4);
+short4 __ovld __cnfn convert_short4_rtz(ushort4);
+short4 __ovld __cnfn convert_short4_sat_rtz(ushort4);
+short4 __ovld __cnfn convert_short4_rtp(ushort4);
+short4 __ovld __cnfn convert_short4_sat_rtp(ushort4);
+short4 __ovld __cnfn convert_short4_rtn(ushort4);
+short4 __ovld __cnfn convert_short4_sat_rtn(ushort4);
+short4 __ovld __cnfn convert_short4(ushort4);
+short4 __ovld __cnfn convert_short4_sat(ushort4);
+short4 __ovld __cnfn convert_short4_rte(int4);
+short4 __ovld __cnfn convert_short4_sat_rte(int4);
+short4 __ovld __cnfn convert_short4_rtz(int4);
+short4 __ovld __cnfn convert_short4_sat_rtz(int4);
+short4 __ovld __cnfn convert_short4_rtp(int4);
+short4 __ovld __cnfn convert_short4_sat_rtp(int4);
+short4 __ovld __cnfn convert_short4_rtn(int4);
+short4 __ovld __cnfn convert_short4_sat_rtn(int4);
+short4 __ovld __cnfn convert_short4(int4);
+short4 __ovld __cnfn convert_short4_sat(int4);
+short4 __ovld __cnfn convert_short4_rte(uint4);
+short4 __ovld __cnfn convert_short4_sat_rte(uint4);
+short4 __ovld __cnfn convert_short4_rtz(uint4);
+short4 __ovld __cnfn convert_short4_sat_rtz(uint4);
+short4 __ovld __cnfn convert_short4_rtp(uint4);
+short4 __ovld __cnfn convert_short4_sat_rtp(uint4);
+short4 __ovld __cnfn convert_short4_rtn(uint4);
+short4 __ovld __cnfn convert_short4_sat_rtn(uint4);
+short4 __ovld __cnfn convert_short4(uint4);
+short4 __ovld __cnfn convert_short4_sat(uint4);
+short4 __ovld __cnfn convert_short4_rte(long4);
+short4 __ovld __cnfn convert_short4_sat_rte(long4);
+short4 __ovld __cnfn convert_short4_rtz(long4);
+short4 __ovld __cnfn convert_short4_sat_rtz(long4);
+short4 __ovld __cnfn convert_short4_rtp(long4);
+short4 __ovld __cnfn convert_short4_sat_rtp(long4);
+short4 __ovld __cnfn convert_short4_rtn(long4);
+short4 __ovld __cnfn convert_short4_sat_rtn(long4);
+short4 __ovld __cnfn convert_short4(long4);
+short4 __ovld __cnfn convert_short4_sat(long4);
+short4 __ovld __cnfn convert_short4_rte(ulong4);
+short4 __ovld __cnfn convert_short4_sat_rte(ulong4);
+short4 __ovld __cnfn convert_short4_rtz(ulong4);
+short4 __ovld __cnfn convert_short4_sat_rtz(ulong4);
+short4 __ovld __cnfn convert_short4_rtp(ulong4);
+short4 __ovld __cnfn convert_short4_sat_rtp(ulong4);
+short4 __ovld __cnfn convert_short4_rtn(ulong4);
+short4 __ovld __cnfn convert_short4_sat_rtn(ulong4);
+short4 __ovld __cnfn convert_short4(ulong4);
+short4 __ovld __cnfn convert_short4_sat(ulong4);
+short4 __ovld __cnfn convert_short4_rte(float4);
+short4 __ovld __cnfn convert_short4_sat_rte(float4);
+short4 __ovld __cnfn convert_short4_rtz(float4);
+short4 __ovld __cnfn convert_short4_sat_rtz(float4);
+short4 __ovld __cnfn convert_short4_rtp(float4);
+short4 __ovld __cnfn convert_short4_sat_rtp(float4);
+short4 __ovld __cnfn convert_short4_rtn(float4);
+short4 __ovld __cnfn convert_short4_sat_rtn(float4);
+short4 __ovld __cnfn convert_short4(float4);
+short4 __ovld __cnfn convert_short4_sat(float4);
+ushort4 __ovld __cnfn convert_ushort4_rte(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(char4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(char4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(char4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(char4);
+ushort4 __ovld __cnfn convert_ushort4(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat(char4);
+ushort4 __ovld __cnfn convert_ushort4_rte(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uchar4);
+ushort4 __ovld __cnfn convert_ushort4(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_rte(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(short4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(short4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(short4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(short4);
+ushort4 __ovld __cnfn convert_ushort4(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat(short4);
+ushort4 __ovld __cnfn convert_ushort4_rte(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ushort4);
+ushort4 __ovld __cnfn convert_ushort4(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_rte(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(int4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(int4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(int4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(int4);
+ushort4 __ovld __cnfn convert_ushort4(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat(int4);
+ushort4 __ovld __cnfn convert_ushort4_rte(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(uint4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uint4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uint4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uint4);
+ushort4 __ovld __cnfn convert_ushort4(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat(uint4);
+ushort4 __ovld __cnfn convert_ushort4_rte(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(long4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(long4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(long4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(long4);
+ushort4 __ovld __cnfn convert_ushort4(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat(long4);
+ushort4 __ovld __cnfn convert_ushort4_rte(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ulong4);
+ushort4 __ovld __cnfn convert_ushort4(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_rte(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(float4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(float4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(float4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(float4);
+ushort4 __ovld __cnfn convert_ushort4(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat(float4);
+int4 __ovld __cnfn convert_int4_rte(char4);
+int4 __ovld __cnfn convert_int4_sat_rte(char4);
+int4 __ovld __cnfn convert_int4_rtz(char4);
+int4 __ovld __cnfn convert_int4_sat_rtz(char4);
+int4 __ovld __cnfn convert_int4_rtp(char4);
+int4 __ovld __cnfn convert_int4_sat_rtp(char4);
+int4 __ovld __cnfn convert_int4_rtn(char4);
+int4 __ovld __cnfn convert_int4_sat_rtn(char4);
+int4 __ovld __cnfn convert_int4(char4);
+int4 __ovld __cnfn convert_int4_sat(char4);
+int4 __ovld __cnfn convert_int4_rte(uchar4);
+int4 __ovld __cnfn convert_int4_sat_rte(uchar4);
+int4 __ovld __cnfn convert_int4_rtz(uchar4);
+int4 __ovld __cnfn convert_int4_sat_rtz(uchar4);
+int4 __ovld __cnfn convert_int4_rtp(uchar4);
+int4 __ovld __cnfn convert_int4_sat_rtp(uchar4);
+int4 __ovld __cnfn convert_int4_rtn(uchar4);
+int4 __ovld __cnfn convert_int4_sat_rtn(uchar4);
+int4 __ovld __cnfn convert_int4(uchar4);
+int4 __ovld __cnfn convert_int4_sat(uchar4);
+int4 __ovld __cnfn convert_int4_rte(short4);
+int4 __ovld __cnfn convert_int4_sat_rte(short4);
+int4 __ovld __cnfn convert_int4_rtz(short4);
+int4 __ovld __cnfn convert_int4_sat_rtz(short4);
+int4 __ovld __cnfn convert_int4_rtp(short4);
+int4 __ovld __cnfn convert_int4_sat_rtp(short4);
+int4 __ovld __cnfn convert_int4_rtn(short4);
+int4 __ovld __cnfn convert_int4_sat_rtn(short4);
+int4 __ovld __cnfn convert_int4(short4);
+int4 __ovld __cnfn convert_int4_sat(short4);
+int4 __ovld __cnfn convert_int4_rte(ushort4);
+int4 __ovld __cnfn convert_int4_sat_rte(ushort4);
+int4 __ovld __cnfn convert_int4_rtz(ushort4);
+int4 __ovld __cnfn convert_int4_sat_rtz(ushort4);
+int4 __ovld __cnfn convert_int4_rtp(ushort4);
+int4 __ovld __cnfn convert_int4_sat_rtp(ushort4);
+int4 __ovld __cnfn convert_int4_rtn(ushort4);
+int4 __ovld __cnfn convert_int4_sat_rtn(ushort4);
+int4 __ovld __cnfn convert_int4(ushort4);
+int4 __ovld __cnfn convert_int4_sat(ushort4);
+int4 __ovld __cnfn convert_int4_rte(int4);
+int4 __ovld __cnfn convert_int4_sat_rte(int4);
+int4 __ovld __cnfn convert_int4_rtz(int4);
+int4 __ovld __cnfn convert_int4_sat_rtz(int4);
+int4 __ovld __cnfn convert_int4_rtp(int4);
+int4 __ovld __cnfn convert_int4_sat_rtp(int4);
+int4 __ovld __cnfn convert_int4_rtn(int4);
+int4 __ovld __cnfn convert_int4_sat_rtn(int4);
+int4 __ovld __cnfn convert_int4(int4);
+int4 __ovld __cnfn convert_int4_sat(int4);
+int4 __ovld __cnfn convert_int4_rte(uint4);
+int4 __ovld __cnfn convert_int4_sat_rte(uint4);
+int4 __ovld __cnfn convert_int4_rtz(uint4);
+int4 __ovld __cnfn convert_int4_sat_rtz(uint4);
+int4 __ovld __cnfn convert_int4_rtp(uint4);
+int4 __ovld __cnfn convert_int4_sat_rtp(uint4);
+int4 __ovld __cnfn convert_int4_rtn(uint4);
+int4 __ovld __cnfn convert_int4_sat_rtn(uint4);
+int4 __ovld __cnfn convert_int4(uint4);
+int4 __ovld __cnfn convert_int4_sat(uint4);
+int4 __ovld __cnfn convert_int4_rte(long4);
+int4 __ovld __cnfn convert_int4_sat_rte(long4);
+int4 __ovld __cnfn convert_int4_rtz(long4);
+int4 __ovld __cnfn convert_int4_sat_rtz(long4);
+int4 __ovld __cnfn convert_int4_rtp(long4);
+int4 __ovld __cnfn convert_int4_sat_rtp(long4);
+int4 __ovld __cnfn convert_int4_rtn(long4);
+int4 __ovld __cnfn convert_int4_sat_rtn(long4);
+int4 __ovld __cnfn convert_int4(long4);
+int4 __ovld __cnfn convert_int4_sat(long4);
+int4 __ovld __cnfn convert_int4_rte(ulong4);
+int4 __ovld __cnfn convert_int4_sat_rte(ulong4);
+int4 __ovld __cnfn convert_int4_rtz(ulong4);
+int4 __ovld __cnfn convert_int4_sat_rtz(ulong4);
+int4 __ovld __cnfn convert_int4_rtp(ulong4);
+int4 __ovld __cnfn convert_int4_sat_rtp(ulong4);
+int4 __ovld __cnfn convert_int4_rtn(ulong4);
+int4 __ovld __cnfn convert_int4_sat_rtn(ulong4);
+int4 __ovld __cnfn convert_int4(ulong4);
+int4 __ovld __cnfn convert_int4_sat(ulong4);
+int4 __ovld __cnfn convert_int4_rte(float4);
+int4 __ovld __cnfn convert_int4_sat_rte(float4);
+int4 __ovld __cnfn convert_int4_rtz(float4);
+int4 __ovld __cnfn convert_int4_sat_rtz(float4);
+int4 __ovld __cnfn convert_int4_rtp(float4);
+int4 __ovld __cnfn convert_int4_sat_rtp(float4);
+int4 __ovld __cnfn convert_int4_rtn(float4);
+int4 __ovld __cnfn convert_int4_sat_rtn(float4);
+int4 __ovld __cnfn convert_int4(float4);
+int4 __ovld __cnfn convert_int4_sat(float4);
+uint4 __ovld __cnfn convert_uint4_rte(char4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(char4);
+uint4 __ovld __cnfn convert_uint4_rtz(char4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(char4);
+uint4 __ovld __cnfn convert_uint4_rtp(char4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(char4);
+uint4 __ovld __cnfn convert_uint4_rtn(char4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(char4);
+uint4 __ovld __cnfn convert_uint4(char4);
+uint4 __ovld __cnfn convert_uint4_sat(char4);
+uint4 __ovld __cnfn convert_uint4_rte(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(uchar4);
+uint4 __ovld __cnfn convert_uint4_rtz(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(uchar4);
+uint4 __ovld __cnfn convert_uint4_rtp(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(uchar4);
+uint4 __ovld __cnfn convert_uint4_rtn(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(uchar4);
+uint4 __ovld __cnfn convert_uint4(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat(uchar4);
+uint4 __ovld __cnfn convert_uint4_rte(short4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(short4);
+uint4 __ovld __cnfn convert_uint4_rtz(short4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(short4);
+uint4 __ovld __cnfn convert_uint4_rtp(short4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(short4);
+uint4 __ovld __cnfn convert_uint4_rtn(short4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(short4);
+uint4 __ovld __cnfn convert_uint4(short4);
+uint4 __ovld __cnfn convert_uint4_sat(short4);
+uint4 __ovld __cnfn convert_uint4_rte(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(ushort4);
+uint4 __ovld __cnfn convert_uint4_rtz(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(ushort4);
+uint4 __ovld __cnfn convert_uint4_rtp(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(ushort4);
+uint4 __ovld __cnfn convert_uint4_rtn(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(ushort4);
+uint4 __ovld __cnfn convert_uint4(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat(ushort4);
+uint4 __ovld __cnfn convert_uint4_rte(int4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(int4);
+uint4 __ovld __cnfn convert_uint4_rtz(int4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(int4);
+uint4 __ovld __cnfn convert_uint4_rtp(int4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(int4);
+uint4 __ovld __cnfn convert_uint4_rtn(int4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(int4);
+uint4 __ovld __cnfn convert_uint4(int4);
+uint4 __ovld __cnfn convert_uint4_sat(int4);
+uint4 __ovld __cnfn convert_uint4_rte(uint4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(uint4);
+uint4 __ovld __cnfn convert_uint4_rtz(uint4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(uint4);
+uint4 __ovld __cnfn convert_uint4_rtp(uint4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(uint4);
+uint4 __ovld __cnfn convert_uint4_rtn(uint4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(uint4);
+uint4 __ovld __cnfn convert_uint4(uint4);
+uint4 __ovld __cnfn convert_uint4_sat(uint4);
+uint4 __ovld __cnfn convert_uint4_rte(long4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(long4);
+uint4 __ovld __cnfn convert_uint4_rtz(long4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(long4);
+uint4 __ovld __cnfn convert_uint4_rtp(long4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(long4);
+uint4 __ovld __cnfn convert_uint4_rtn(long4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(long4);
+uint4 __ovld __cnfn convert_uint4(long4);
+uint4 __ovld __cnfn convert_uint4_sat(long4);
+uint4 __ovld __cnfn convert_uint4_rte(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(ulong4);
+uint4 __ovld __cnfn convert_uint4_rtz(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(ulong4);
+uint4 __ovld __cnfn convert_uint4_rtp(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(ulong4);
+uint4 __ovld __cnfn convert_uint4_rtn(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(ulong4);
+uint4 __ovld __cnfn convert_uint4(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat(ulong4);
+uint4 __ovld __cnfn convert_uint4_rte(float4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(float4);
+uint4 __ovld __cnfn convert_uint4_rtz(float4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(float4);
+uint4 __ovld __cnfn convert_uint4_rtp(float4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(float4);
+uint4 __ovld __cnfn convert_uint4_rtn(float4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(float4);
+uint4 __ovld __cnfn convert_uint4(float4);
+uint4 __ovld __cnfn convert_uint4_sat(float4);
+long4 __ovld __cnfn convert_long4_rte(char4);
+long4 __ovld __cnfn convert_long4_sat_rte(char4);
+long4 __ovld __cnfn convert_long4_rtz(char4);
+long4 __ovld __cnfn convert_long4_sat_rtz(char4);
+long4 __ovld __cnfn convert_long4_rtp(char4);
+long4 __ovld __cnfn convert_long4_sat_rtp(char4);
+long4 __ovld __cnfn convert_long4_rtn(char4);
+long4 __ovld __cnfn convert_long4_sat_rtn(char4);
+long4 __ovld __cnfn convert_long4(char4);
+long4 __ovld __cnfn convert_long4_sat(char4);
+long4 __ovld __cnfn convert_long4_rte(uchar4);
+long4 __ovld __cnfn convert_long4_sat_rte(uchar4);
+long4 __ovld __cnfn convert_long4_rtz(uchar4);
+long4 __ovld __cnfn convert_long4_sat_rtz(uchar4);
+long4 __ovld __cnfn convert_long4_rtp(uchar4);
+long4 __ovld __cnfn convert_long4_sat_rtp(uchar4);
+long4 __ovld __cnfn convert_long4_rtn(uchar4);
+long4 __ovld __cnfn convert_long4_sat_rtn(uchar4);
+long4 __ovld __cnfn convert_long4(uchar4);
+long4 __ovld __cnfn convert_long4_sat(uchar4);
+long4 __ovld __cnfn convert_long4_rte(short4);
+long4 __ovld __cnfn convert_long4_sat_rte(short4);
+long4 __ovld __cnfn convert_long4_rtz(short4);
+long4 __ovld __cnfn convert_long4_sat_rtz(short4);
+long4 __ovld __cnfn convert_long4_rtp(short4);
+long4 __ovld __cnfn convert_long4_sat_rtp(short4);
+long4 __ovld __cnfn convert_long4_rtn(short4);
+long4 __ovld __cnfn convert_long4_sat_rtn(short4);
+long4 __ovld __cnfn convert_long4(short4);
+long4 __ovld __cnfn convert_long4_sat(short4);
+long4 __ovld __cnfn convert_long4_rte(ushort4);
+long4 __ovld __cnfn convert_long4_sat_rte(ushort4);
+long4 __ovld __cnfn convert_long4_rtz(ushort4);
+long4 __ovld __cnfn convert_long4_sat_rtz(ushort4);
+long4 __ovld __cnfn convert_long4_rtp(ushort4);
+long4 __ovld __cnfn convert_long4_sat_rtp(ushort4);
+long4 __ovld __cnfn convert_long4_rtn(ushort4);
+long4 __ovld __cnfn convert_long4_sat_rtn(ushort4);
+long4 __ovld __cnfn convert_long4(ushort4);
+long4 __ovld __cnfn convert_long4_sat(ushort4);
+long4 __ovld __cnfn convert_long4_rte(int4);
+long4 __ovld __cnfn convert_long4_sat_rte(int4);
+long4 __ovld __cnfn convert_long4_rtz(int4);
+long4 __ovld __cnfn convert_long4_sat_rtz(int4);
+long4 __ovld __cnfn convert_long4_rtp(int4);
+long4 __ovld __cnfn convert_long4_sat_rtp(int4);
+long4 __ovld __cnfn convert_long4_rtn(int4);
+long4 __ovld __cnfn convert_long4_sat_rtn(int4);
+long4 __ovld __cnfn convert_long4(int4);
+long4 __ovld __cnfn convert_long4_sat(int4);
+long4 __ovld __cnfn convert_long4_rte(uint4);
+long4 __ovld __cnfn convert_long4_sat_rte(uint4);
+long4 __ovld __cnfn convert_long4_rtz(uint4);
+long4 __ovld __cnfn convert_long4_sat_rtz(uint4);
+long4 __ovld __cnfn convert_long4_rtp(uint4);
+long4 __ovld __cnfn convert_long4_sat_rtp(uint4);
+long4 __ovld __cnfn convert_long4_rtn(uint4);
+long4 __ovld __cnfn convert_long4_sat_rtn(uint4);
+long4 __ovld __cnfn convert_long4(uint4);
+long4 __ovld __cnfn convert_long4_sat(uint4);
+long4 __ovld __cnfn convert_long4_rte(long4);
+long4 __ovld __cnfn convert_long4_sat_rte(long4);
+long4 __ovld __cnfn convert_long4_rtz(long4);
+long4 __ovld __cnfn convert_long4_sat_rtz(long4);
+long4 __ovld __cnfn convert_long4_rtp(long4);
+long4 __ovld __cnfn convert_long4_sat_rtp(long4);
+long4 __ovld __cnfn convert_long4_rtn(long4);
+long4 __ovld __cnfn convert_long4_sat_rtn(long4);
+long4 __ovld __cnfn convert_long4(long4);
+long4 __ovld __cnfn convert_long4_sat(long4);
+long4 __ovld __cnfn convert_long4_rte(ulong4);
+long4 __ovld __cnfn convert_long4_sat_rte(ulong4);
+long4 __ovld __cnfn convert_long4_rtz(ulong4);
+long4 __ovld __cnfn convert_long4_sat_rtz(ulong4);
+long4 __ovld __cnfn convert_long4_rtp(ulong4);
+long4 __ovld __cnfn convert_long4_sat_rtp(ulong4);
+long4 __ovld __cnfn convert_long4_rtn(ulong4);
+long4 __ovld __cnfn convert_long4_sat_rtn(ulong4);
+long4 __ovld __cnfn convert_long4(ulong4);
+long4 __ovld __cnfn convert_long4_sat(ulong4);
+long4 __ovld __cnfn convert_long4_rte(float4);
+long4 __ovld __cnfn convert_long4_sat_rte(float4);
+long4 __ovld __cnfn convert_long4_rtz(float4);
+long4 __ovld __cnfn convert_long4_sat_rtz(float4);
+long4 __ovld __cnfn convert_long4_rtp(float4);
+long4 __ovld __cnfn convert_long4_sat_rtp(float4);
+long4 __ovld __cnfn convert_long4_rtn(float4);
+long4 __ovld __cnfn convert_long4_sat_rtn(float4);
+long4 __ovld __cnfn convert_long4(float4);
+long4 __ovld __cnfn convert_long4_sat(float4);
+ulong4 __ovld __cnfn convert_ulong4_rte(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(char4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(char4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(char4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(char4);
+ulong4 __ovld __cnfn convert_ulong4(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat(char4);
+ulong4 __ovld __cnfn convert_ulong4_rte(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uchar4);
+ulong4 __ovld __cnfn convert_ulong4(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_rte(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(short4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(short4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(short4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(short4);
+ulong4 __ovld __cnfn convert_ulong4(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat(short4);
+ulong4 __ovld __cnfn convert_ulong4_rte(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ushort4);
+ulong4 __ovld __cnfn convert_ulong4(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_rte(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(int4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(int4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(int4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(int4);
+ulong4 __ovld __cnfn convert_ulong4(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat(int4);
+ulong4 __ovld __cnfn convert_ulong4_rte(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(uint4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uint4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uint4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uint4);
+ulong4 __ovld __cnfn convert_ulong4(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat(uint4);
+ulong4 __ovld __cnfn convert_ulong4_rte(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(long4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(long4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(long4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(long4);
+ulong4 __ovld __cnfn convert_ulong4(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat(long4);
+ulong4 __ovld __cnfn convert_ulong4_rte(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ulong4);
+ulong4 __ovld __cnfn convert_ulong4(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_rte(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(float4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(float4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(float4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(float4);
+ulong4 __ovld __cnfn convert_ulong4(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat(float4);
+float4 __ovld __cnfn convert_float4_rte(char4);
+float4 __ovld __cnfn convert_float4_rtz(char4);
+float4 __ovld __cnfn convert_float4_rtp(char4);
+float4 __ovld __cnfn convert_float4_rtn(char4);
+float4 __ovld __cnfn convert_float4(char4);
+float4 __ovld __cnfn convert_float4_rte(uchar4);
+float4 __ovld __cnfn convert_float4_rtz(uchar4);
+float4 __ovld __cnfn convert_float4_rtp(uchar4);
+float4 __ovld __cnfn convert_float4_rtn(uchar4);
+float4 __ovld __cnfn convert_float4(uchar4);
+float4 __ovld __cnfn convert_float4_rte(short4);
+float4 __ovld __cnfn convert_float4_rtz(short4);
+float4 __ovld __cnfn convert_float4_rtp(short4);
+float4 __ovld __cnfn convert_float4_rtn(short4);
+float4 __ovld __cnfn convert_float4(short4);
+float4 __ovld __cnfn convert_float4_rte(ushort4);
+float4 __ovld __cnfn convert_float4_rtz(ushort4);
+float4 __ovld __cnfn convert_float4_rtp(ushort4);
+float4 __ovld __cnfn convert_float4_rtn(ushort4);
+float4 __ovld __cnfn convert_float4(ushort4);
+float4 __ovld __cnfn convert_float4_rte(int4);
+float4 __ovld __cnfn convert_float4_rtz(int4);
+float4 __ovld __cnfn convert_float4_rtp(int4);
+float4 __ovld __cnfn convert_float4_rtn(int4);
+float4 __ovld __cnfn convert_float4(int4);
+float4 __ovld __cnfn convert_float4_rte(uint4);
+float4 __ovld __cnfn convert_float4_rtz(uint4);
+float4 __ovld __cnfn convert_float4_rtp(uint4);
+float4 __ovld __cnfn convert_float4_rtn(uint4);
+float4 __ovld __cnfn convert_float4(uint4);
+float4 __ovld __cnfn convert_float4_rte(long4);
+float4 __ovld __cnfn convert_float4_rtz(long4);
+float4 __ovld __cnfn convert_float4_rtp(long4);
+float4 __ovld __cnfn convert_float4_rtn(long4);
+float4 __ovld __cnfn convert_float4(long4);
+float4 __ovld __cnfn convert_float4_rte(ulong4);
+float4 __ovld __cnfn convert_float4_rtz(ulong4);
+float4 __ovld __cnfn convert_float4_rtp(ulong4);
+float4 __ovld __cnfn convert_float4_rtn(ulong4);
+float4 __ovld __cnfn convert_float4(ulong4);
+float4 __ovld __cnfn convert_float4_rte(float4);
+float4 __ovld __cnfn convert_float4_rtz(float4);
+float4 __ovld __cnfn convert_float4_rtp(float4);
+float4 __ovld __cnfn convert_float4_rtn(float4);
+float4 __ovld __cnfn convert_float4(float4);
+char8 __ovld __cnfn convert_char8_rte(char8);
+char8 __ovld __cnfn convert_char8_sat_rte(char8);
+char8 __ovld __cnfn convert_char8_rtz(char8);
+char8 __ovld __cnfn convert_char8_sat_rtz(char8);
+char8 __ovld __cnfn convert_char8_rtp(char8);
+char8 __ovld __cnfn convert_char8_sat_rtp(char8);
+char8 __ovld __cnfn convert_char8_rtn(char8);
+char8 __ovld __cnfn convert_char8_sat_rtn(char8);
+char8 __ovld __cnfn convert_char8(char8);
+char8 __ovld __cnfn convert_char8_sat(char8);
+char8 __ovld __cnfn convert_char8_rte(uchar8);
+char8 __ovld __cnfn convert_char8_sat_rte(uchar8);
+char8 __ovld __cnfn convert_char8_rtz(uchar8);
+char8 __ovld __cnfn convert_char8_sat_rtz(uchar8);
+char8 __ovld __cnfn convert_char8_rtp(uchar8);
+char8 __ovld __cnfn convert_char8_sat_rtp(uchar8);
+char8 __ovld __cnfn convert_char8_rtn(uchar8);
+char8 __ovld __cnfn convert_char8_sat_rtn(uchar8);
+char8 __ovld __cnfn convert_char8(uchar8);
+char8 __ovld __cnfn convert_char8_sat(uchar8);
+char8 __ovld __cnfn convert_char8_rte(short8);
+char8 __ovld __cnfn convert_char8_sat_rte(short8);
+char8 __ovld __cnfn convert_char8_rtz(short8);
+char8 __ovld __cnfn convert_char8_sat_rtz(short8);
+char8 __ovld __cnfn convert_char8_rtp(short8);
+char8 __ovld __cnfn convert_char8_sat_rtp(short8);
+char8 __ovld __cnfn convert_char8_rtn(short8);
+char8 __ovld __cnfn convert_char8_sat_rtn(short8);
+char8 __ovld __cnfn convert_char8(short8);
+char8 __ovld __cnfn convert_char8_sat(short8);
+char8 __ovld __cnfn convert_char8_rte(ushort8);
+char8 __ovld __cnfn convert_char8_sat_rte(ushort8);
+char8 __ovld __cnfn convert_char8_rtz(ushort8);
+char8 __ovld __cnfn convert_char8_sat_rtz(ushort8);
+char8 __ovld __cnfn convert_char8_rtp(ushort8);
+char8 __ovld __cnfn convert_char8_sat_rtp(ushort8);
+char8 __ovld __cnfn convert_char8_rtn(ushort8);
+char8 __ovld __cnfn convert_char8_sat_rtn(ushort8);
+char8 __ovld __cnfn convert_char8(ushort8);
+char8 __ovld __cnfn convert_char8_sat(ushort8);
+char8 __ovld __cnfn convert_char8_rte(int8);
+char8 __ovld __cnfn convert_char8_sat_rte(int8);
+char8 __ovld __cnfn convert_char8_rtz(int8);
+char8 __ovld __cnfn convert_char8_sat_rtz(int8);
+char8 __ovld __cnfn convert_char8_rtp(int8);
+char8 __ovld __cnfn convert_char8_sat_rtp(int8);
+char8 __ovld __cnfn convert_char8_rtn(int8);
+char8 __ovld __cnfn convert_char8_sat_rtn(int8);
+char8 __ovld __cnfn convert_char8(int8);
+char8 __ovld __cnfn convert_char8_sat(int8);
+char8 __ovld __cnfn convert_char8_rte(uint8);
+char8 __ovld __cnfn convert_char8_sat_rte(uint8);
+char8 __ovld __cnfn convert_char8_rtz(uint8);
+char8 __ovld __cnfn convert_char8_sat_rtz(uint8);
+char8 __ovld __cnfn convert_char8_rtp(uint8);
+char8 __ovld __cnfn convert_char8_sat_rtp(uint8);
+char8 __ovld __cnfn convert_char8_rtn(uint8);
+char8 __ovld __cnfn convert_char8_sat_rtn(uint8);
+char8 __ovld __cnfn convert_char8(uint8);
+char8 __ovld __cnfn convert_char8_sat(uint8);
+char8 __ovld __cnfn convert_char8_rte(long8);
+char8 __ovld __cnfn convert_char8_sat_rte(long8);
+char8 __ovld __cnfn convert_char8_rtz(long8);
+char8 __ovld __cnfn convert_char8_sat_rtz(long8);
+char8 __ovld __cnfn convert_char8_rtp(long8);
+char8 __ovld __cnfn convert_char8_sat_rtp(long8);
+char8 __ovld __cnfn convert_char8_rtn(long8);
+char8 __ovld __cnfn convert_char8_sat_rtn(long8);
+char8 __ovld __cnfn convert_char8(long8);
+char8 __ovld __cnfn convert_char8_sat(long8);
+char8 __ovld __cnfn convert_char8_rte(ulong8);
+char8 __ovld __cnfn convert_char8_sat_rte(ulong8);
+char8 __ovld __cnfn convert_char8_rtz(ulong8);
+char8 __ovld __cnfn convert_char8_sat_rtz(ulong8);
+char8 __ovld __cnfn convert_char8_rtp(ulong8);
+char8 __ovld __cnfn convert_char8_sat_rtp(ulong8);
+char8 __ovld __cnfn convert_char8_rtn(ulong8);
+char8 __ovld __cnfn convert_char8_sat_rtn(ulong8);
+char8 __ovld __cnfn convert_char8(ulong8);
+char8 __ovld __cnfn convert_char8_sat(ulong8);
+char8 __ovld __cnfn convert_char8_rte(float8);
+char8 __ovld __cnfn convert_char8_sat_rte(float8);
+char8 __ovld __cnfn convert_char8_rtz(float8);
+char8 __ovld __cnfn convert_char8_sat_rtz(float8);
+char8 __ovld __cnfn convert_char8_rtp(float8);
+char8 __ovld __cnfn convert_char8_sat_rtp(float8);
+char8 __ovld __cnfn convert_char8_rtn(float8);
+char8 __ovld __cnfn convert_char8_sat_rtn(float8);
+char8 __ovld __cnfn convert_char8(float8);
+char8 __ovld __cnfn convert_char8_sat(float8);
+uchar8 __ovld __cnfn convert_uchar8_rte(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(char8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(char8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(char8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(char8);
+uchar8 __ovld __cnfn convert_uchar8(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat(char8);
+uchar8 __ovld __cnfn convert_uchar8_rte(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uchar8);
+uchar8 __ovld __cnfn convert_uchar8(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_rte(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(short8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(short8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(short8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(short8);
+uchar8 __ovld __cnfn convert_uchar8(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat(short8);
+uchar8 __ovld __cnfn convert_uchar8_rte(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ushort8);
+uchar8 __ovld __cnfn convert_uchar8(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_rte(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(int8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(int8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(int8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(int8);
+uchar8 __ovld __cnfn convert_uchar8(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat(int8);
+uchar8 __ovld __cnfn convert_uchar8_rte(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(uint8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uint8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uint8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uint8);
+uchar8 __ovld __cnfn convert_uchar8(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat(uint8);
+uchar8 __ovld __cnfn convert_uchar8_rte(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(long8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(long8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(long8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(long8);
+uchar8 __ovld __cnfn convert_uchar8(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat(long8);
+uchar8 __ovld __cnfn convert_uchar8_rte(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ulong8);
+uchar8 __ovld __cnfn convert_uchar8(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_rte(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(float8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(float8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(float8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(float8);
+uchar8 __ovld __cnfn convert_uchar8(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat(float8);
+short8 __ovld __cnfn convert_short8_rte(char8);
+short8 __ovld __cnfn convert_short8_sat_rte(char8);
+short8 __ovld __cnfn convert_short8_rtz(char8);
+short8 __ovld __cnfn convert_short8_sat_rtz(char8);
+short8 __ovld __cnfn convert_short8_rtp(char8);
+short8 __ovld __cnfn convert_short8_sat_rtp(char8);
+short8 __ovld __cnfn convert_short8_rtn(char8);
+short8 __ovld __cnfn convert_short8_sat_rtn(char8);
+short8 __ovld __cnfn convert_short8(char8);
+short8 __ovld __cnfn convert_short8_sat(char8);
+short8 __ovld __cnfn convert_short8_rte(uchar8);
+short8 __ovld __cnfn convert_short8_sat_rte(uchar8);
+short8 __ovld __cnfn convert_short8_rtz(uchar8);
+short8 __ovld __cnfn convert_short8_sat_rtz(uchar8);
+short8 __ovld __cnfn convert_short8_rtp(uchar8);
+short8 __ovld __cnfn convert_short8_sat_rtp(uchar8);
+short8 __ovld __cnfn convert_short8_rtn(uchar8);
+short8 __ovld __cnfn convert_short8_sat_rtn(uchar8);
+short8 __ovld __cnfn convert_short8(uchar8);
+short8 __ovld __cnfn convert_short8_sat(uchar8);
+short8 __ovld __cnfn convert_short8_rte(short8);
+short8 __ovld __cnfn convert_short8_sat_rte(short8);
+short8 __ovld __cnfn convert_short8_rtz(short8);
+short8 __ovld __cnfn convert_short8_sat_rtz(short8);
+short8 __ovld __cnfn convert_short8_rtp(short8);
+short8 __ovld __cnfn convert_short8_sat_rtp(short8);
+short8 __ovld __cnfn convert_short8_rtn(short8);
+short8 __ovld __cnfn convert_short8_sat_rtn(short8);
+short8 __ovld __cnfn convert_short8(short8);
+short8 __ovld __cnfn convert_short8_sat(short8);
+short8 __ovld __cnfn convert_short8_rte(ushort8);
+short8 __ovld __cnfn convert_short8_sat_rte(ushort8);
+short8 __ovld __cnfn convert_short8_rtz(ushort8);
+short8 __ovld __cnfn convert_short8_sat_rtz(ushort8);
+short8 __ovld __cnfn convert_short8_rtp(ushort8);
+short8 __ovld __cnfn convert_short8_sat_rtp(ushort8);
+short8 __ovld __cnfn convert_short8_rtn(ushort8);
+short8 __ovld __cnfn convert_short8_sat_rtn(ushort8);
+short8 __ovld __cnfn convert_short8(ushort8);
+short8 __ovld __cnfn convert_short8_sat(ushort8);
+short8 __ovld __cnfn convert_short8_rte(int8);
+short8 __ovld __cnfn convert_short8_sat_rte(int8);
+short8 __ovld __cnfn convert_short8_rtz(int8);
+short8 __ovld __cnfn convert_short8_sat_rtz(int8);
+short8 __ovld __cnfn convert_short8_rtp(int8);
+short8 __ovld __cnfn convert_short8_sat_rtp(int8);
+short8 __ovld __cnfn convert_short8_rtn(int8);
+short8 __ovld __cnfn convert_short8_sat_rtn(int8);
+short8 __ovld __cnfn convert_short8(int8);
+short8 __ovld __cnfn convert_short8_sat(int8);
+short8 __ovld __cnfn convert_short8_rte(uint8);
+short8 __ovld __cnfn convert_short8_sat_rte(uint8);
+short8 __ovld __cnfn convert_short8_rtz(uint8);
+short8 __ovld __cnfn convert_short8_sat_rtz(uint8);
+short8 __ovld __cnfn convert_short8_rtp(uint8);
+short8 __ovld __cnfn convert_short8_sat_rtp(uint8);
+short8 __ovld __cnfn convert_short8_rtn(uint8);
+short8 __ovld __cnfn convert_short8_sat_rtn(uint8);
+short8 __ovld __cnfn convert_short8(uint8);
+short8 __ovld __cnfn convert_short8_sat(uint8);
+short8 __ovld __cnfn convert_short8_rte(long8);
+short8 __ovld __cnfn convert_short8_sat_rte(long8);
+short8 __ovld __cnfn convert_short8_rtz(long8);
+short8 __ovld __cnfn convert_short8_sat_rtz(long8);
+short8 __ovld __cnfn convert_short8_rtp(long8);
+short8 __ovld __cnfn convert_short8_sat_rtp(long8);
+short8 __ovld __cnfn convert_short8_rtn(long8);
+short8 __ovld __cnfn convert_short8_sat_rtn(long8);
+short8 __ovld __cnfn convert_short8(long8);
+short8 __ovld __cnfn convert_short8_sat(long8);
+short8 __ovld __cnfn convert_short8_rte(ulong8);
+short8 __ovld __cnfn convert_short8_sat_rte(ulong8);
+short8 __ovld __cnfn convert_short8_rtz(ulong8);
+short8 __ovld __cnfn convert_short8_sat_rtz(ulong8);
+short8 __ovld __cnfn convert_short8_rtp(ulong8);
+short8 __ovld __cnfn convert_short8_sat_rtp(ulong8);
+short8 __ovld __cnfn convert_short8_rtn(ulong8);
+short8 __ovld __cnfn convert_short8_sat_rtn(ulong8);
+short8 __ovld __cnfn convert_short8(ulong8);
+short8 __ovld __cnfn convert_short8_sat(ulong8);
+short8 __ovld __cnfn convert_short8_rte(float8);
+short8 __ovld __cnfn convert_short8_sat_rte(float8);
+short8 __ovld __cnfn convert_short8_rtz(float8);
+short8 __ovld __cnfn convert_short8_sat_rtz(float8);
+short8 __ovld __cnfn convert_short8_rtp(float8);
+short8 __ovld __cnfn convert_short8_sat_rtp(float8);
+short8 __ovld __cnfn convert_short8_rtn(float8);
+short8 __ovld __cnfn convert_short8_sat_rtn(float8);
+short8 __ovld __cnfn convert_short8(float8);
+short8 __ovld __cnfn convert_short8_sat(float8);
+ushort8 __ovld __cnfn convert_ushort8_rte(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(char8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(char8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(char8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(char8);
+ushort8 __ovld __cnfn convert_ushort8(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat(char8);
+ushort8 __ovld __cnfn convert_ushort8_rte(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uchar8);
+ushort8 __ovld __cnfn convert_ushort8(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_rte(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(short8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(short8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(short8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(short8);
+ushort8 __ovld __cnfn convert_ushort8(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat(short8);
+ushort8 __ovld __cnfn convert_ushort8_rte(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ushort8);
+ushort8 __ovld __cnfn convert_ushort8(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_rte(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(int8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(int8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(int8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(int8);
+ushort8 __ovld __cnfn convert_ushort8(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat(int8);
+ushort8 __ovld __cnfn convert_ushort8_rte(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(uint8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uint8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uint8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uint8);
+ushort8 __ovld __cnfn convert_ushort8(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat(uint8);
+ushort8 __ovld __cnfn convert_ushort8_rte(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(long8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(long8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(long8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(long8);
+ushort8 __ovld __cnfn convert_ushort8(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat(long8);
+ushort8 __ovld __cnfn convert_ushort8_rte(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ulong8);
+ushort8 __ovld __cnfn convert_ushort8(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_rte(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(float8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(float8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(float8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(float8);
+ushort8 __ovld __cnfn convert_ushort8(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat(float8);
+int8 __ovld __cnfn convert_int8_rte(char8);
+int8 __ovld __cnfn convert_int8_sat_rte(char8);
+int8 __ovld __cnfn convert_int8_rtz(char8);
+int8 __ovld __cnfn convert_int8_sat_rtz(char8);
+int8 __ovld __cnfn convert_int8_rtp(char8);
+int8 __ovld __cnfn convert_int8_sat_rtp(char8);
+int8 __ovld __cnfn convert_int8_rtn(char8);
+int8 __ovld __cnfn convert_int8_sat_rtn(char8);
+int8 __ovld __cnfn convert_int8(char8);
+int8 __ovld __cnfn convert_int8_sat(char8);
+int8 __ovld __cnfn convert_int8_rte(uchar8);
+int8 __ovld __cnfn convert_int8_sat_rte(uchar8);
+int8 __ovld __cnfn convert_int8_rtz(uchar8);
+int8 __ovld __cnfn convert_int8_sat_rtz(uchar8);
+int8 __ovld __cnfn convert_int8_rtp(uchar8);
+int8 __ovld __cnfn convert_int8_sat_rtp(uchar8);
+int8 __ovld __cnfn convert_int8_rtn(uchar8);
+int8 __ovld __cnfn convert_int8_sat_rtn(uchar8);
+int8 __ovld __cnfn convert_int8(uchar8);
+int8 __ovld __cnfn convert_int8_sat(uchar8);
+int8 __ovld __cnfn convert_int8_rte(short8);
+int8 __ovld __cnfn convert_int8_sat_rte(short8);
+int8 __ovld __cnfn convert_int8_rtz(short8);
+int8 __ovld __cnfn convert_int8_sat_rtz(short8);
+int8 __ovld __cnfn convert_int8_rtp(short8);
+int8 __ovld __cnfn convert_int8_sat_rtp(short8);
+int8 __ovld __cnfn convert_int8_rtn(short8);
+int8 __ovld __cnfn convert_int8_sat_rtn(short8);
+int8 __ovld __cnfn convert_int8(short8);
+int8 __ovld __cnfn convert_int8_sat(short8);
+int8 __ovld __cnfn convert_int8_rte(ushort8);
+int8 __ovld __cnfn convert_int8_sat_rte(ushort8);
+int8 __ovld __cnfn convert_int8_rtz(ushort8);
+int8 __ovld __cnfn convert_int8_sat_rtz(ushort8);
+int8 __ovld __cnfn convert_int8_rtp(ushort8);
+int8 __ovld __cnfn convert_int8_sat_rtp(ushort8);
+int8 __ovld __cnfn convert_int8_rtn(ushort8);
+int8 __ovld __cnfn convert_int8_sat_rtn(ushort8);
+int8 __ovld __cnfn convert_int8(ushort8);
+int8 __ovld __cnfn convert_int8_sat(ushort8);
+int8 __ovld __cnfn convert_int8_rte(int8);
+int8 __ovld __cnfn convert_int8_sat_rte(int8);
+int8 __ovld __cnfn convert_int8_rtz(int8);
+int8 __ovld __cnfn convert_int8_sat_rtz(int8);
+int8 __ovld __cnfn convert_int8_rtp(int8);
+int8 __ovld __cnfn convert_int8_sat_rtp(int8);
+int8 __ovld __cnfn convert_int8_rtn(int8);
+int8 __ovld __cnfn convert_int8_sat_rtn(int8);
+int8 __ovld __cnfn convert_int8(int8);
+int8 __ovld __cnfn convert_int8_sat(int8);
+int8 __ovld __cnfn convert_int8_rte(uint8);
+int8 __ovld __cnfn convert_int8_sat_rte(uint8);
+int8 __ovld __cnfn convert_int8_rtz(uint8);
+int8 __ovld __cnfn convert_int8_sat_rtz(uint8);
+int8 __ovld __cnfn convert_int8_rtp(uint8);
+int8 __ovld __cnfn convert_int8_sat_rtp(uint8);
+int8 __ovld __cnfn convert_int8_rtn(uint8);
+int8 __ovld __cnfn convert_int8_sat_rtn(uint8);
+int8 __ovld __cnfn convert_int8(uint8);
+int8 __ovld __cnfn convert_int8_sat(uint8);
+int8 __ovld __cnfn convert_int8_rte(long8);
+int8 __ovld __cnfn convert_int8_sat_rte(long8);
+int8 __ovld __cnfn convert_int8_rtz(long8);
+int8 __ovld __cnfn convert_int8_sat_rtz(long8);
+int8 __ovld __cnfn convert_int8_rtp(long8);
+int8 __ovld __cnfn convert_int8_sat_rtp(long8);
+int8 __ovld __cnfn convert_int8_rtn(long8);
+int8 __ovld __cnfn convert_int8_sat_rtn(long8);
+int8 __ovld __cnfn convert_int8(long8);
+int8 __ovld __cnfn convert_int8_sat(long8);
+int8 __ovld __cnfn convert_int8_rte(ulong8);
+int8 __ovld __cnfn convert_int8_sat_rte(ulong8);
+int8 __ovld __cnfn convert_int8_rtz(ulong8);
+int8 __ovld __cnfn convert_int8_sat_rtz(ulong8);
+int8 __ovld __cnfn convert_int8_rtp(ulong8);
+int8 __ovld __cnfn convert_int8_sat_rtp(ulong8);
+int8 __ovld __cnfn convert_int8_rtn(ulong8);
+int8 __ovld __cnfn convert_int8_sat_rtn(ulong8);
+int8 __ovld __cnfn convert_int8(ulong8);
+int8 __ovld __cnfn convert_int8_sat(ulong8);
+int8 __ovld __cnfn convert_int8_rte(float8);
+int8 __ovld __cnfn convert_int8_sat_rte(float8);
+int8 __ovld __cnfn convert_int8_rtz(float8);
+int8 __ovld __cnfn convert_int8_sat_rtz(float8);
+int8 __ovld __cnfn convert_int8_rtp(float8);
+int8 __ovld __cnfn convert_int8_sat_rtp(float8);
+int8 __ovld __cnfn convert_int8_rtn(float8);
+int8 __ovld __cnfn convert_int8_sat_rtn(float8);
+int8 __ovld __cnfn convert_int8(float8);
+int8 __ovld __cnfn convert_int8_sat(float8);
+uint8 __ovld __cnfn convert_uint8_rte(char8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(char8);
+uint8 __ovld __cnfn convert_uint8_rtz(char8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(char8);
+uint8 __ovld __cnfn convert_uint8_rtp(char8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(char8);
+uint8 __ovld __cnfn convert_uint8_rtn(char8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(char8);
+uint8 __ovld __cnfn convert_uint8(char8);
+uint8 __ovld __cnfn convert_uint8_sat(char8);
+uint8 __ovld __cnfn convert_uint8_rte(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(uchar8);
+uint8 __ovld __cnfn convert_uint8_rtz(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(uchar8);
+uint8 __ovld __cnfn convert_uint8_rtp(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(uchar8);
+uint8 __ovld __cnfn convert_uint8_rtn(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(uchar8);
+uint8 __ovld __cnfn convert_uint8(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat(uchar8);
+uint8 __ovld __cnfn convert_uint8_rte(short8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(short8);
+uint8 __ovld __cnfn convert_uint8_rtz(short8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(short8);
+uint8 __ovld __cnfn convert_uint8_rtp(short8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(short8);
+uint8 __ovld __cnfn convert_uint8_rtn(short8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(short8);
+uint8 __ovld __cnfn convert_uint8(short8);
+uint8 __ovld __cnfn convert_uint8_sat(short8);
+uint8 __ovld __cnfn convert_uint8_rte(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(ushort8);
+uint8 __ovld __cnfn convert_uint8_rtz(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(ushort8);
+uint8 __ovld __cnfn convert_uint8_rtp(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(ushort8);
+uint8 __ovld __cnfn convert_uint8_rtn(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(ushort8);
+uint8 __ovld __cnfn convert_uint8(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat(ushort8);
+uint8 __ovld __cnfn convert_uint8_rte(int8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(int8);
+uint8 __ovld __cnfn convert_uint8_rtz(int8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(int8);
+uint8 __ovld __cnfn convert_uint8_rtp(int8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(int8);
+uint8 __ovld __cnfn convert_uint8_rtn(int8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(int8);
+uint8 __ovld __cnfn convert_uint8(int8);
+uint8 __ovld __cnfn convert_uint8_sat(int8);
+uint8 __ovld __cnfn convert_uint8_rte(uint8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(uint8);
+uint8 __ovld __cnfn convert_uint8_rtz(uint8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(uint8);
+uint8 __ovld __cnfn convert_uint8_rtp(uint8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(uint8);
+uint8 __ovld __cnfn convert_uint8_rtn(uint8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(uint8);
+uint8 __ovld __cnfn convert_uint8(uint8);
+uint8 __ovld __cnfn convert_uint8_sat(uint8);
+uint8 __ovld __cnfn convert_uint8_rte(long8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(long8);
+uint8 __ovld __cnfn convert_uint8_rtz(long8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(long8);
+uint8 __ovld __cnfn convert_uint8_rtp(long8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(long8);
+uint8 __ovld __cnfn convert_uint8_rtn(long8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(long8);
+uint8 __ovld __cnfn convert_uint8(long8);
+uint8 __ovld __cnfn convert_uint8_sat(long8);
+uint8 __ovld __cnfn convert_uint8_rte(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(ulong8);
+uint8 __ovld __cnfn convert_uint8_rtz(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(ulong8);
+uint8 __ovld __cnfn convert_uint8_rtp(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(ulong8);
+uint8 __ovld __cnfn convert_uint8_rtn(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(ulong8);
+uint8 __ovld __cnfn convert_uint8(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat(ulong8);
+uint8 __ovld __cnfn convert_uint8_rte(float8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(float8);
+uint8 __ovld __cnfn convert_uint8_rtz(float8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(float8);
+uint8 __ovld __cnfn convert_uint8_rtp(float8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(float8);
+uint8 __ovld __cnfn convert_uint8_rtn(float8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(float8);
+uint8 __ovld __cnfn convert_uint8(float8);
+uint8 __ovld __cnfn convert_uint8_sat(float8);
+long8 __ovld __cnfn convert_long8_rte(char8);
+long8 __ovld __cnfn convert_long8_sat_rte(char8);
+long8 __ovld __cnfn convert_long8_rtz(char8);
+long8 __ovld __cnfn convert_long8_sat_rtz(char8);
+long8 __ovld __cnfn convert_long8_rtp(char8);
+long8 __ovld __cnfn convert_long8_sat_rtp(char8);
+long8 __ovld __cnfn convert_long8_rtn(char8);
+long8 __ovld __cnfn convert_long8_sat_rtn(char8);
+long8 __ovld __cnfn convert_long8(char8);
+long8 __ovld __cnfn convert_long8_sat(char8);
+long8 __ovld __cnfn convert_long8_rte(uchar8);
+long8 __ovld __cnfn convert_long8_sat_rte(uchar8);
+long8 __ovld __cnfn convert_long8_rtz(uchar8);
+long8 __ovld __cnfn convert_long8_sat_rtz(uchar8);
+long8 __ovld __cnfn convert_long8_rtp(uchar8);
+long8 __ovld __cnfn convert_long8_sat_rtp(uchar8);
+long8 __ovld __cnfn convert_long8_rtn(uchar8);
+long8 __ovld __cnfn convert_long8_sat_rtn(uchar8);
+long8 __ovld __cnfn convert_long8(uchar8);
+long8 __ovld __cnfn convert_long8_sat(uchar8);
+long8 __ovld __cnfn convert_long8_rte(short8);
+long8 __ovld __cnfn convert_long8_sat_rte(short8);
+long8 __ovld __cnfn convert_long8_rtz(short8);
+long8 __ovld __cnfn convert_long8_sat_rtz(short8);
+long8 __ovld __cnfn convert_long8_rtp(short8);
+long8 __ovld __cnfn convert_long8_sat_rtp(short8);
+long8 __ovld __cnfn convert_long8_rtn(short8);
+long8 __ovld __cnfn convert_long8_sat_rtn(short8);
+long8 __ovld __cnfn convert_long8(short8);
+long8 __ovld __cnfn convert_long8_sat(short8);
+long8 __ovld __cnfn convert_long8_rte(ushort8);
+long8 __ovld __cnfn convert_long8_sat_rte(ushort8);
+long8 __ovld __cnfn convert_long8_rtz(ushort8);
+long8 __ovld __cnfn convert_long8_sat_rtz(ushort8);
+long8 __ovld __cnfn convert_long8_rtp(ushort8);
+long8 __ovld __cnfn convert_long8_sat_rtp(ushort8);
+long8 __ovld __cnfn convert_long8_rtn(ushort8);
+long8 __ovld __cnfn convert_long8_sat_rtn(ushort8);
+long8 __ovld __cnfn convert_long8(ushort8);
+long8 __ovld __cnfn convert_long8_sat(ushort8);
+long8 __ovld __cnfn convert_long8_rte(int8);
+long8 __ovld __cnfn convert_long8_sat_rte(int8);
+long8 __ovld __cnfn convert_long8_rtz(int8);
+long8 __ovld __cnfn convert_long8_sat_rtz(int8);
+long8 __ovld __cnfn convert_long8_rtp(int8);
+long8 __ovld __cnfn convert_long8_sat_rtp(int8);
+long8 __ovld __cnfn convert_long8_rtn(int8);
+long8 __ovld __cnfn convert_long8_sat_rtn(int8);
+long8 __ovld __cnfn convert_long8(int8);
+long8 __ovld __cnfn convert_long8_sat(int8);
+long8 __ovld __cnfn convert_long8_rte(uint8);
+long8 __ovld __cnfn convert_long8_sat_rte(uint8);
+long8 __ovld __cnfn convert_long8_rtz(uint8);
+long8 __ovld __cnfn convert_long8_sat_rtz(uint8);
+long8 __ovld __cnfn convert_long8_rtp(uint8);
+long8 __ovld __cnfn convert_long8_sat_rtp(uint8);
+long8 __ovld __cnfn convert_long8_rtn(uint8);
+long8 __ovld __cnfn convert_long8_sat_rtn(uint8);
+long8 __ovld __cnfn convert_long8(uint8);
+long8 __ovld __cnfn convert_long8_sat(uint8);
+long8 __ovld __cnfn convert_long8_rte(long8);
+long8 __ovld __cnfn convert_long8_sat_rte(long8);
+long8 __ovld __cnfn convert_long8_rtz(long8);
+long8 __ovld __cnfn convert_long8_sat_rtz(long8);
+long8 __ovld __cnfn convert_long8_rtp(long8);
+long8 __ovld __cnfn convert_long8_sat_rtp(long8);
+long8 __ovld __cnfn convert_long8_rtn(long8);
+long8 __ovld __cnfn convert_long8_sat_rtn(long8);
+long8 __ovld __cnfn convert_long8(long8);
+long8 __ovld __cnfn convert_long8_sat(long8);
+long8 __ovld __cnfn convert_long8_rte(ulong8);
+long8 __ovld __cnfn convert_long8_sat_rte(ulong8);
+long8 __ovld __cnfn convert_long8_rtz(ulong8);
+long8 __ovld __cnfn convert_long8_sat_rtz(ulong8);
+long8 __ovld __cnfn convert_long8_rtp(ulong8);
+long8 __ovld __cnfn convert_long8_sat_rtp(ulong8);
+long8 __ovld __cnfn convert_long8_rtn(ulong8);
+long8 __ovld __cnfn convert_long8_sat_rtn(ulong8);
+long8 __ovld __cnfn convert_long8(ulong8);
+long8 __ovld __cnfn convert_long8_sat(ulong8);
+long8 __ovld __cnfn convert_long8_rte(float8);
+long8 __ovld __cnfn convert_long8_sat_rte(float8);
+long8 __ovld __cnfn convert_long8_rtz(float8);
+long8 __ovld __cnfn convert_long8_sat_rtz(float8);
+long8 __ovld __cnfn convert_long8_rtp(float8);
+long8 __ovld __cnfn convert_long8_sat_rtp(float8);
+long8 __ovld __cnfn convert_long8_rtn(float8);
+long8 __ovld __cnfn convert_long8_sat_rtn(float8);
+long8 __ovld __cnfn convert_long8(float8);
+long8 __ovld __cnfn convert_long8_sat(float8);
+ulong8 __ovld __cnfn convert_ulong8_rte(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(char8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(char8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(char8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(char8);
+ulong8 __ovld __cnfn convert_ulong8(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat(char8);
+ulong8 __ovld __cnfn convert_ulong8_rte(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uchar8);
+ulong8 __ovld __cnfn convert_ulong8(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_rte(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(short8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(short8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(short8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(short8);
+ulong8 __ovld __cnfn convert_ulong8(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat(short8);
+ulong8 __ovld __cnfn convert_ulong8_rte(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ushort8);
+ulong8 __ovld __cnfn convert_ulong8(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_rte(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(int8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(int8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(int8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(int8);
+ulong8 __ovld __cnfn convert_ulong8(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat(int8);
+ulong8 __ovld __cnfn convert_ulong8_rte(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(uint8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uint8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uint8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uint8);
+ulong8 __ovld __cnfn convert_ulong8(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat(uint8);
+ulong8 __ovld __cnfn convert_ulong8_rte(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(long8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(long8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(long8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(long8);
+ulong8 __ovld __cnfn convert_ulong8(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat(long8);
+ulong8 __ovld __cnfn convert_ulong8_rte(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ulong8);
+ulong8 __ovld __cnfn convert_ulong8(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_rte(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(float8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(float8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(float8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(float8);
+ulong8 __ovld __cnfn convert_ulong8(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat(float8);
+float8 __ovld __cnfn convert_float8_rte(char8);
+float8 __ovld __cnfn convert_float8_rtz(char8);
+float8 __ovld __cnfn convert_float8_rtp(char8);
+float8 __ovld __cnfn convert_float8_rtn(char8);
+float8 __ovld __cnfn convert_float8(char8);
+float8 __ovld __cnfn convert_float8_rte(uchar8);
+float8 __ovld __cnfn convert_float8_rtz(uchar8);
+float8 __ovld __cnfn convert_float8_rtp(uchar8);
+float8 __ovld __cnfn convert_float8_rtn(uchar8);
+float8 __ovld __cnfn convert_float8(uchar8);
+float8 __ovld __cnfn convert_float8_rte(short8);
+float8 __ovld __cnfn convert_float8_rtz(short8);
+float8 __ovld __cnfn convert_float8_rtp(short8);
+float8 __ovld __cnfn convert_float8_rtn(short8);
+float8 __ovld __cnfn convert_float8(short8);
+float8 __ovld __cnfn convert_float8_rte(ushort8);
+float8 __ovld __cnfn convert_float8_rtz(ushort8);
+float8 __ovld __cnfn convert_float8_rtp(ushort8);
+float8 __ovld __cnfn convert_float8_rtn(ushort8);
+float8 __ovld __cnfn convert_float8(ushort8);
+float8 __ovld __cnfn convert_float8_rte(int8);
+float8 __ovld __cnfn convert_float8_rtz(int8);
+float8 __ovld __cnfn convert_float8_rtp(int8);
+float8 __ovld __cnfn convert_float8_rtn(int8);
+float8 __ovld __cnfn convert_float8(int8);
+float8 __ovld __cnfn convert_float8_rte(uint8);
+float8 __ovld __cnfn convert_float8_rtz(uint8);
+float8 __ovld __cnfn convert_float8_rtp(uint8);
+float8 __ovld __cnfn convert_float8_rtn(uint8);
+float8 __ovld __cnfn convert_float8(uint8);
+float8 __ovld __cnfn convert_float8_rte(long8);
+float8 __ovld __cnfn convert_float8_rtz(long8);
+float8 __ovld __cnfn convert_float8_rtp(long8);
+float8 __ovld __cnfn convert_float8_rtn(long8);
+float8 __ovld __cnfn convert_float8(long8);
+float8 __ovld __cnfn convert_float8_rte(ulong8);
+float8 __ovld __cnfn convert_float8_rtz(ulong8);
+float8 __ovld __cnfn convert_float8_rtp(ulong8);
+float8 __ovld __cnfn convert_float8_rtn(ulong8);
+float8 __ovld __cnfn convert_float8(ulong8);
+float8 __ovld __cnfn convert_float8_rte(float8);
+float8 __ovld __cnfn convert_float8_rtz(float8);
+float8 __ovld __cnfn convert_float8_rtp(float8);
+float8 __ovld __cnfn convert_float8_rtn(float8);
+float8 __ovld __cnfn convert_float8(float8);
+char16 __ovld __cnfn convert_char16_rte(char16);
+char16 __ovld __cnfn convert_char16_sat_rte(char16);
+char16 __ovld __cnfn convert_char16_rtz(char16);
+char16 __ovld __cnfn convert_char16_sat_rtz(char16);
+char16 __ovld __cnfn convert_char16_rtp(char16);
+char16 __ovld __cnfn convert_char16_sat_rtp(char16);
+char16 __ovld __cnfn convert_char16_rtn(char16);
+char16 __ovld __cnfn convert_char16_sat_rtn(char16);
+char16 __ovld __cnfn convert_char16(char16);
+char16 __ovld __cnfn convert_char16_sat(char16);
+char16 __ovld __cnfn convert_char16_rte(uchar16);
+char16 __ovld __cnfn convert_char16_sat_rte(uchar16);
+char16 __ovld __cnfn convert_char16_rtz(uchar16);
+char16 __ovld __cnfn convert_char16_sat_rtz(uchar16);
+char16 __ovld __cnfn convert_char16_rtp(uchar16);
+char16 __ovld __cnfn convert_char16_sat_rtp(uchar16);
+char16 __ovld __cnfn convert_char16_rtn(uchar16);
+char16 __ovld __cnfn convert_char16_sat_rtn(uchar16);
+char16 __ovld __cnfn convert_char16(uchar16);
+char16 __ovld __cnfn convert_char16_sat(uchar16);
+char16 __ovld __cnfn convert_char16_rte(short16);
+char16 __ovld __cnfn convert_char16_sat_rte(short16);
+char16 __ovld __cnfn convert_char16_rtz(short16);
+char16 __ovld __cnfn convert_char16_sat_rtz(short16);
+char16 __ovld __cnfn convert_char16_rtp(short16);
+char16 __ovld __cnfn convert_char16_sat_rtp(short16);
+char16 __ovld __cnfn convert_char16_rtn(short16);
+char16 __ovld __cnfn convert_char16_sat_rtn(short16);
+char16 __ovld __cnfn convert_char16(short16);
+char16 __ovld __cnfn convert_char16_sat(short16);
+char16 __ovld __cnfn convert_char16_rte(ushort16);
+char16 __ovld __cnfn convert_char16_sat_rte(ushort16);
+char16 __ovld __cnfn convert_char16_rtz(ushort16);
+char16 __ovld __cnfn convert_char16_sat_rtz(ushort16);
+char16 __ovld __cnfn convert_char16_rtp(ushort16);
+char16 __ovld __cnfn convert_char16_sat_rtp(ushort16);
+char16 __ovld __cnfn convert_char16_rtn(ushort16);
+char16 __ovld __cnfn convert_char16_sat_rtn(ushort16);
+char16 __ovld __cnfn convert_char16(ushort16);
+char16 __ovld __cnfn convert_char16_sat(ushort16);
+char16 __ovld __cnfn convert_char16_rte(int16);
+char16 __ovld __cnfn convert_char16_sat_rte(int16);
+char16 __ovld __cnfn convert_char16_rtz(int16);
+char16 __ovld __cnfn convert_char16_sat_rtz(int16);
+char16 __ovld __cnfn convert_char16_rtp(int16);
+char16 __ovld __cnfn convert_char16_sat_rtp(int16);
+char16 __ovld __cnfn convert_char16_rtn(int16);
+char16 __ovld __cnfn convert_char16_sat_rtn(int16);
+char16 __ovld __cnfn convert_char16(int16);
+char16 __ovld __cnfn convert_char16_sat(int16);
+char16 __ovld __cnfn convert_char16_rte(uint16);
+char16 __ovld __cnfn convert_char16_sat_rte(uint16);
+char16 __ovld __cnfn convert_char16_rtz(uint16);
+char16 __ovld __cnfn convert_char16_sat_rtz(uint16);
+char16 __ovld __cnfn convert_char16_rtp(uint16);
+char16 __ovld __cnfn convert_char16_sat_rtp(uint16);
+char16 __ovld __cnfn convert_char16_rtn(uint16);
+char16 __ovld __cnfn convert_char16_sat_rtn(uint16);
+char16 __ovld __cnfn convert_char16(uint16);
+char16 __ovld __cnfn convert_char16_sat(uint16);
+char16 __ovld __cnfn convert_char16_rte(long16);
+char16 __ovld __cnfn convert_char16_sat_rte(long16);
+char16 __ovld __cnfn convert_char16_rtz(long16);
+char16 __ovld __cnfn convert_char16_sat_rtz(long16);
+char16 __ovld __cnfn convert_char16_rtp(long16);
+char16 __ovld __cnfn convert_char16_sat_rtp(long16);
+char16 __ovld __cnfn convert_char16_rtn(long16);
+char16 __ovld __cnfn convert_char16_sat_rtn(long16);
+char16 __ovld __cnfn convert_char16(long16);
+char16 __ovld __cnfn convert_char16_sat(long16);
+char16 __ovld __cnfn convert_char16_rte(ulong16);
+char16 __ovld __cnfn convert_char16_sat_rte(ulong16);
+char16 __ovld __cnfn convert_char16_rtz(ulong16);
+char16 __ovld __cnfn convert_char16_sat_rtz(ulong16);
+char16 __ovld __cnfn convert_char16_rtp(ulong16);
+char16 __ovld __cnfn convert_char16_sat_rtp(ulong16);
+char16 __ovld __cnfn convert_char16_rtn(ulong16);
+char16 __ovld __cnfn convert_char16_sat_rtn(ulong16);
+char16 __ovld __cnfn convert_char16(ulong16);
+char16 __ovld __cnfn convert_char16_sat(ulong16);
+char16 __ovld __cnfn convert_char16_rte(float16);
+char16 __ovld __cnfn convert_char16_sat_rte(float16);
+char16 __ovld __cnfn convert_char16_rtz(float16);
+char16 __ovld __cnfn convert_char16_sat_rtz(float16);
+char16 __ovld __cnfn convert_char16_rtp(float16);
+char16 __ovld __cnfn convert_char16_sat_rtp(float16);
+char16 __ovld __cnfn convert_char16_rtn(float16);
+char16 __ovld __cnfn convert_char16_sat_rtn(float16);
+char16 __ovld __cnfn convert_char16(float16);
+char16 __ovld __cnfn convert_char16_sat(float16);
+uchar16 __ovld __cnfn convert_uchar16_rte(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(char16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(char16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(char16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(char16);
+uchar16 __ovld __cnfn convert_uchar16(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat(char16);
+uchar16 __ovld __cnfn convert_uchar16_rte(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uchar16);
+uchar16 __ovld __cnfn convert_uchar16(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_rte(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(short16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(short16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(short16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(short16);
+uchar16 __ovld __cnfn convert_uchar16(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat(short16);
+uchar16 __ovld __cnfn convert_uchar16_rte(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ushort16);
+uchar16 __ovld __cnfn convert_uchar16(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_rte(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(int16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(int16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(int16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(int16);
+uchar16 __ovld __cnfn convert_uchar16(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat(int16);
+uchar16 __ovld __cnfn convert_uchar16_rte(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(uint16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uint16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uint16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uint16);
+uchar16 __ovld __cnfn convert_uchar16(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat(uint16);
+uchar16 __ovld __cnfn convert_uchar16_rte(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(long16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(long16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(long16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(long16);
+uchar16 __ovld __cnfn convert_uchar16(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat(long16);
+uchar16 __ovld __cnfn convert_uchar16_rte(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ulong16);
+uchar16 __ovld __cnfn convert_uchar16(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_rte(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(float16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(float16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(float16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(float16);
+uchar16 __ovld __cnfn convert_uchar16(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat(float16);
+short16 __ovld __cnfn convert_short16_rte(char16);
+short16 __ovld __cnfn convert_short16_sat_rte(char16);
+short16 __ovld __cnfn convert_short16_rtz(char16);
+short16 __ovld __cnfn convert_short16_sat_rtz(char16);
+short16 __ovld __cnfn convert_short16_rtp(char16);
+short16 __ovld __cnfn convert_short16_sat_rtp(char16);
+short16 __ovld __cnfn convert_short16_rtn(char16);
+short16 __ovld __cnfn convert_short16_sat_rtn(char16);
+short16 __ovld __cnfn convert_short16(char16);
+short16 __ovld __cnfn convert_short16_sat(char16);
+short16 __ovld __cnfn convert_short16_rte(uchar16);
+short16 __ovld __cnfn convert_short16_sat_rte(uchar16);
+short16 __ovld __cnfn convert_short16_rtz(uchar16);
+short16 __ovld __cnfn convert_short16_sat_rtz(uchar16);
+short16 __ovld __cnfn convert_short16_rtp(uchar16);
+short16 __ovld __cnfn convert_short16_sat_rtp(uchar16);
+short16 __ovld __cnfn convert_short16_rtn(uchar16);
+short16 __ovld __cnfn convert_short16_sat_rtn(uchar16);
+short16 __ovld __cnfn convert_short16(uchar16);
+short16 __ovld __cnfn convert_short16_sat(uchar16);
+short16 __ovld __cnfn convert_short16_rte(short16);
+short16 __ovld __cnfn convert_short16_sat_rte(short16);
+short16 __ovld __cnfn convert_short16_rtz(short16);
+short16 __ovld __cnfn convert_short16_sat_rtz(short16);
+short16 __ovld __cnfn convert_short16_rtp(short16);
+short16 __ovld __cnfn convert_short16_sat_rtp(short16);
+short16 __ovld __cnfn convert_short16_rtn(short16);
+short16 __ovld __cnfn convert_short16_sat_rtn(short16);
+short16 __ovld __cnfn convert_short16(short16);
+short16 __ovld __cnfn convert_short16_sat(short16);
+short16 __ovld __cnfn convert_short16_rte(ushort16);
+short16 __ovld __cnfn convert_short16_sat_rte(ushort16);
+short16 __ovld __cnfn convert_short16_rtz(ushort16);
+short16 __ovld __cnfn convert_short16_sat_rtz(ushort16);
+short16 __ovld __cnfn convert_short16_rtp(ushort16);
+short16 __ovld __cnfn convert_short16_sat_rtp(ushort16);
+short16 __ovld __cnfn convert_short16_rtn(ushort16);
+short16 __ovld __cnfn convert_short16_sat_rtn(ushort16);
+short16 __ovld __cnfn convert_short16(ushort16);
+short16 __ovld __cnfn convert_short16_sat(ushort16);
+short16 __ovld __cnfn convert_short16_rte(int16);
+short16 __ovld __cnfn convert_short16_sat_rte(int16);
+short16 __ovld __cnfn convert_short16_rtz(int16);
+short16 __ovld __cnfn convert_short16_sat_rtz(int16);
+short16 __ovld __cnfn convert_short16_rtp(int16);
+short16 __ovld __cnfn convert_short16_sat_rtp(int16);
+short16 __ovld __cnfn convert_short16_rtn(int16);
+short16 __ovld __cnfn convert_short16_sat_rtn(int16);
+short16 __ovld __cnfn convert_short16(int16);
+short16 __ovld __cnfn convert_short16_sat(int16);
+short16 __ovld __cnfn convert_short16_rte(uint16);
+short16 __ovld __cnfn convert_short16_sat_rte(uint16);
+short16 __ovld __cnfn convert_short16_rtz(uint16);
+short16 __ovld __cnfn convert_short16_sat_rtz(uint16);
+short16 __ovld __cnfn convert_short16_rtp(uint16);
+short16 __ovld __cnfn convert_short16_sat_rtp(uint16);
+short16 __ovld __cnfn convert_short16_rtn(uint16);
+short16 __ovld __cnfn convert_short16_sat_rtn(uint16);
+short16 __ovld __cnfn convert_short16(uint16);
+short16 __ovld __cnfn convert_short16_sat(uint16);
+short16 __ovld __cnfn convert_short16_rte(long16);
+short16 __ovld __cnfn convert_short16_sat_rte(long16);
+short16 __ovld __cnfn convert_short16_rtz(long16);
+short16 __ovld __cnfn convert_short16_sat_rtz(long16);
+short16 __ovld __cnfn convert_short16_rtp(long16);
+short16 __ovld __cnfn convert_short16_sat_rtp(long16);
+short16 __ovld __cnfn convert_short16_rtn(long16);
+short16 __ovld __cnfn convert_short16_sat_rtn(long16);
+short16 __ovld __cnfn convert_short16(long16);
+short16 __ovld __cnfn convert_short16_sat(long16);
+short16 __ovld __cnfn convert_short16_rte(ulong16);
+short16 __ovld __cnfn convert_short16_sat_rte(ulong16);
+short16 __ovld __cnfn convert_short16_rtz(ulong16);
+short16 __ovld __cnfn convert_short16_sat_rtz(ulong16);
+short16 __ovld __cnfn convert_short16_rtp(ulong16);
+short16 __ovld __cnfn convert_short16_sat_rtp(ulong16);
+short16 __ovld __cnfn convert_short16_rtn(ulong16);
+short16 __ovld __cnfn convert_short16_sat_rtn(ulong16);
+short16 __ovld __cnfn convert_short16(ulong16);
+short16 __ovld __cnfn convert_short16_sat(ulong16);
+short16 __ovld __cnfn convert_short16_rte(float16);
+short16 __ovld __cnfn convert_short16_sat_rte(float16);
+short16 __ovld __cnfn convert_short16_rtz(float16);
+short16 __ovld __cnfn convert_short16_sat_rtz(float16);
+short16 __ovld __cnfn convert_short16_rtp(float16);
+short16 __ovld __cnfn convert_short16_sat_rtp(float16);
+short16 __ovld __cnfn convert_short16_rtn(float16);
+short16 __ovld __cnfn convert_short16_sat_rtn(float16);
+short16 __ovld __cnfn convert_short16(float16);
+short16 __ovld __cnfn convert_short16_sat(float16);
+ushort16 __ovld __cnfn convert_ushort16_rte(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(char16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(char16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(char16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(char16);
+ushort16 __ovld __cnfn convert_ushort16(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat(char16);
+ushort16 __ovld __cnfn convert_ushort16_rte(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uchar16);
+ushort16 __ovld __cnfn convert_ushort16(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_rte(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(short16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(short16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(short16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(short16);
+ushort16 __ovld __cnfn convert_ushort16(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat(short16);
+ushort16 __ovld __cnfn convert_ushort16_rte(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ushort16);
+ushort16 __ovld __cnfn convert_ushort16(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_rte(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(int16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(int16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(int16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(int16);
+ushort16 __ovld __cnfn convert_ushort16(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat(int16);
+ushort16 __ovld __cnfn convert_ushort16_rte(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(uint16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uint16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uint16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uint16);
+ushort16 __ovld __cnfn convert_ushort16(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat(uint16);
+ushort16 __ovld __cnfn convert_ushort16_rte(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(long16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(long16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(long16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(long16);
+ushort16 __ovld __cnfn convert_ushort16(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat(long16);
+ushort16 __ovld __cnfn convert_ushort16_rte(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ulong16);
+ushort16 __ovld __cnfn convert_ushort16(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_rte(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(float16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(float16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(float16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(float16);
+ushort16 __ovld __cnfn convert_ushort16(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat(float16);
+int16 __ovld __cnfn convert_int16_rte(char16);
+int16 __ovld __cnfn convert_int16_sat_rte(char16);
+int16 __ovld __cnfn convert_int16_rtz(char16);
+int16 __ovld __cnfn convert_int16_sat_rtz(char16);
+int16 __ovld __cnfn convert_int16_rtp(char16);
+int16 __ovld __cnfn convert_int16_sat_rtp(char16);
+int16 __ovld __cnfn convert_int16_rtn(char16);
+int16 __ovld __cnfn convert_int16_sat_rtn(char16);
+int16 __ovld __cnfn convert_int16(char16);
+int16 __ovld __cnfn convert_int16_sat(char16);
+int16 __ovld __cnfn convert_int16_rte(uchar16);
+int16 __ovld __cnfn convert_int16_sat_rte(uchar16);
+int16 __ovld __cnfn convert_int16_rtz(uchar16);
+int16 __ovld __cnfn convert_int16_sat_rtz(uchar16);
+int16 __ovld __cnfn convert_int16_rtp(uchar16);
+int16 __ovld __cnfn convert_int16_sat_rtp(uchar16);
+int16 __ovld __cnfn convert_int16_rtn(uchar16);
+int16 __ovld __cnfn convert_int16_sat_rtn(uchar16);
+int16 __ovld __cnfn convert_int16(uchar16);
+int16 __ovld __cnfn convert_int16_sat(uchar16);
+int16 __ovld __cnfn convert_int16_rte(short16);
+int16 __ovld __cnfn convert_int16_sat_rte(short16);
+int16 __ovld __cnfn convert_int16_rtz(short16);
+int16 __ovld __cnfn convert_int16_sat_rtz(short16);
+int16 __ovld __cnfn convert_int16_rtp(short16);
+int16 __ovld __cnfn convert_int16_sat_rtp(short16);
+int16 __ovld __cnfn convert_int16_rtn(short16);
+int16 __ovld __cnfn convert_int16_sat_rtn(short16);
+int16 __ovld __cnfn convert_int16(short16);
+int16 __ovld __cnfn convert_int16_sat(short16);
+int16 __ovld __cnfn convert_int16_rte(ushort16);
+int16 __ovld __cnfn convert_int16_sat_rte(ushort16);
+int16 __ovld __cnfn convert_int16_rtz(ushort16);
+int16 __ovld __cnfn convert_int16_sat_rtz(ushort16);
+int16 __ovld __cnfn convert_int16_rtp(ushort16);
+int16 __ovld __cnfn convert_int16_sat_rtp(ushort16);
+int16 __ovld __cnfn convert_int16_rtn(ushort16);
+int16 __ovld __cnfn convert_int16_sat_rtn(ushort16);
+int16 __ovld __cnfn convert_int16(ushort16);
+int16 __ovld __cnfn convert_int16_sat(ushort16);
+int16 __ovld __cnfn convert_int16_rte(int16);
+int16 __ovld __cnfn convert_int16_sat_rte(int16);
+int16 __ovld __cnfn convert_int16_rtz(int16);
+int16 __ovld __cnfn convert_int16_sat_rtz(int16);
+int16 __ovld __cnfn convert_int16_rtp(int16);
+int16 __ovld __cnfn convert_int16_sat_rtp(int16);
+int16 __ovld __cnfn convert_int16_rtn(int16);
+int16 __ovld __cnfn convert_int16_sat_rtn(int16);
+int16 __ovld __cnfn convert_int16(int16);
+int16 __ovld __cnfn convert_int16_sat(int16);
+int16 __ovld __cnfn convert_int16_rte(uint16);
+int16 __ovld __cnfn convert_int16_sat_rte(uint16);
+int16 __ovld __cnfn convert_int16_rtz(uint16);
+int16 __ovld __cnfn convert_int16_sat_rtz(uint16);
+int16 __ovld __cnfn convert_int16_rtp(uint16);
+int16 __ovld __cnfn convert_int16_sat_rtp(uint16);
+int16 __ovld __cnfn convert_int16_rtn(uint16);
+int16 __ovld __cnfn convert_int16_sat_rtn(uint16);
+int16 __ovld __cnfn convert_int16(uint16);
+int16 __ovld __cnfn convert_int16_sat(uint16);
+int16 __ovld __cnfn convert_int16_rte(long16);
+int16 __ovld __cnfn convert_int16_sat_rte(long16);
+int16 __ovld __cnfn convert_int16_rtz(long16);
+int16 __ovld __cnfn convert_int16_sat_rtz(long16);
+int16 __ovld __cnfn convert_int16_rtp(long16);
+int16 __ovld __cnfn convert_int16_sat_rtp(long16);
+int16 __ovld __cnfn convert_int16_rtn(long16);
+int16 __ovld __cnfn convert_int16_sat_rtn(long16);
+int16 __ovld __cnfn convert_int16(long16);
+int16 __ovld __cnfn convert_int16_sat(long16);
+int16 __ovld __cnfn convert_int16_rte(ulong16);
+int16 __ovld __cnfn convert_int16_sat_rte(ulong16);
+int16 __ovld __cnfn convert_int16_rtz(ulong16);
+int16 __ovld __cnfn convert_int16_sat_rtz(ulong16);
+int16 __ovld __cnfn convert_int16_rtp(ulong16);
+int16 __ovld __cnfn convert_int16_sat_rtp(ulong16);
+int16 __ovld __cnfn convert_int16_rtn(ulong16);
+int16 __ovld __cnfn convert_int16_sat_rtn(ulong16);
+int16 __ovld __cnfn convert_int16(ulong16);
+int16 __ovld __cnfn convert_int16_sat(ulong16);
+int16 __ovld __cnfn convert_int16_rte(float16);
+int16 __ovld __cnfn convert_int16_sat_rte(float16);
+int16 __ovld __cnfn convert_int16_rtz(float16);
+int16 __ovld __cnfn convert_int16_sat_rtz(float16);
+int16 __ovld __cnfn convert_int16_rtp(float16);
+int16 __ovld __cnfn convert_int16_sat_rtp(float16);
+int16 __ovld __cnfn convert_int16_rtn(float16);
+int16 __ovld __cnfn convert_int16_sat_rtn(float16);
+int16 __ovld __cnfn convert_int16(float16);
+int16 __ovld __cnfn convert_int16_sat(float16);
+uint16 __ovld __cnfn convert_uint16_rte(char16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(char16);
+uint16 __ovld __cnfn convert_uint16_rtz(char16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(char16);
+uint16 __ovld __cnfn convert_uint16_rtp(char16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(char16);
+uint16 __ovld __cnfn convert_uint16_rtn(char16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(char16);
+uint16 __ovld __cnfn convert_uint16(char16);
+uint16 __ovld __cnfn convert_uint16_sat(char16);
+uint16 __ovld __cnfn convert_uint16_rte(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(uchar16);
+uint16 __ovld __cnfn convert_uint16_rtz(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(uchar16);
+uint16 __ovld __cnfn convert_uint16_rtp(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(uchar16);
+uint16 __ovld __cnfn convert_uint16_rtn(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(uchar16);
+uint16 __ovld __cnfn convert_uint16(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat(uchar16);
+uint16 __ovld __cnfn convert_uint16_rte(short16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(short16);
+uint16 __ovld __cnfn convert_uint16_rtz(short16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(short16);
+uint16 __ovld __cnfn convert_uint16_rtp(short16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(short16);
+uint16 __ovld __cnfn convert_uint16_rtn(short16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(short16);
+uint16 __ovld __cnfn convert_uint16(short16);
+uint16 __ovld __cnfn convert_uint16_sat(short16);
+uint16 __ovld __cnfn convert_uint16_rte(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(ushort16);
+uint16 __ovld __cnfn convert_uint16_rtz(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(ushort16);
+uint16 __ovld __cnfn convert_uint16_rtp(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(ushort16);
+uint16 __ovld __cnfn convert_uint16_rtn(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(ushort16);
+uint16 __ovld __cnfn convert_uint16(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat(ushort16);
+uint16 __ovld __cnfn convert_uint16_rte(int16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(int16);
+uint16 __ovld __cnfn convert_uint16_rtz(int16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(int16);
+uint16 __ovld __cnfn convert_uint16_rtp(int16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(int16);
+uint16 __ovld __cnfn convert_uint16_rtn(int16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(int16);
+uint16 __ovld __cnfn convert_uint16(int16);
+uint16 __ovld __cnfn convert_uint16_sat(int16);
+uint16 __ovld __cnfn convert_uint16_rte(uint16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(uint16);
+uint16 __ovld __cnfn convert_uint16_rtz(uint16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(uint16);
+uint16 __ovld __cnfn convert_uint16_rtp(uint16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(uint16);
+uint16 __ovld __cnfn convert_uint16_rtn(uint16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(uint16);
+uint16 __ovld __cnfn convert_uint16(uint16);
+uint16 __ovld __cnfn convert_uint16_sat(uint16);
+uint16 __ovld __cnfn convert_uint16_rte(long16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(long16);
+uint16 __ovld __cnfn convert_uint16_rtz(long16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(long16);
+uint16 __ovld __cnfn convert_uint16_rtp(long16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(long16);
+uint16 __ovld __cnfn convert_uint16_rtn(long16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(long16);
+uint16 __ovld __cnfn convert_uint16(long16);
+uint16 __ovld __cnfn convert_uint16_sat(long16);
+uint16 __ovld __cnfn convert_uint16_rte(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(ulong16);
+uint16 __ovld __cnfn convert_uint16_rtz(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(ulong16);
+uint16 __ovld __cnfn convert_uint16_rtp(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(ulong16);
+uint16 __ovld __cnfn convert_uint16_rtn(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(ulong16);
+uint16 __ovld __cnfn convert_uint16(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat(ulong16);
+uint16 __ovld __cnfn convert_uint16_rte(float16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(float16);
+uint16 __ovld __cnfn convert_uint16_rtz(float16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(float16);
+uint16 __ovld __cnfn convert_uint16_rtp(float16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(float16);
+uint16 __ovld __cnfn convert_uint16_rtn(float16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(float16);
+uint16 __ovld __cnfn convert_uint16(float16);
+uint16 __ovld __cnfn convert_uint16_sat(float16);
+long16 __ovld __cnfn convert_long16_rte(char16);
+long16 __ovld __cnfn convert_long16_sat_rte(char16);
+long16 __ovld __cnfn convert_long16_rtz(char16);
+long16 __ovld __cnfn convert_long16_sat_rtz(char16);
+long16 __ovld __cnfn convert_long16_rtp(char16);
+long16 __ovld __cnfn convert_long16_sat_rtp(char16);
+long16 __ovld __cnfn convert_long16_rtn(char16);
+long16 __ovld __cnfn convert_long16_sat_rtn(char16);
+long16 __ovld __cnfn convert_long16(char16);
+long16 __ovld __cnfn convert_long16_sat(char16);
+long16 __ovld __cnfn convert_long16_rte(uchar16);
+long16 __ovld __cnfn convert_long16_sat_rte(uchar16);
+long16 __ovld __cnfn convert_long16_rtz(uchar16);
+long16 __ovld __cnfn convert_long16_sat_rtz(uchar16);
+long16 __ovld __cnfn convert_long16_rtp(uchar16);
+long16 __ovld __cnfn convert_long16_sat_rtp(uchar16);
+long16 __ovld __cnfn convert_long16_rtn(uchar16);
+long16 __ovld __cnfn convert_long16_sat_rtn(uchar16);
+long16 __ovld __cnfn convert_long16(uchar16);
+long16 __ovld __cnfn convert_long16_sat(uchar16);
+long16 __ovld __cnfn convert_long16_rte(short16);
+long16 __ovld __cnfn convert_long16_sat_rte(short16);
+long16 __ovld __cnfn convert_long16_rtz(short16);
+long16 __ovld __cnfn convert_long16_sat_rtz(short16);
+long16 __ovld __cnfn convert_long16_rtp(short16);
+long16 __ovld __cnfn convert_long16_sat_rtp(short16);
+long16 __ovld __cnfn convert_long16_rtn(short16);
+long16 __ovld __cnfn convert_long16_sat_rtn(short16);
+long16 __ovld __cnfn convert_long16(short16);
+long16 __ovld __cnfn convert_long16_sat(short16);
+long16 __ovld __cnfn convert_long16_rte(ushort16);
+long16 __ovld __cnfn convert_long16_sat_rte(ushort16);
+long16 __ovld __cnfn convert_long16_rtz(ushort16);
+long16 __ovld __cnfn convert_long16_sat_rtz(ushort16);
+long16 __ovld __cnfn convert_long16_rtp(ushort16);
+long16 __ovld __cnfn convert_long16_sat_rtp(ushort16);
+long16 __ovld __cnfn convert_long16_rtn(ushort16);
+long16 __ovld __cnfn convert_long16_sat_rtn(ushort16);
+long16 __ovld __cnfn convert_long16(ushort16);
+long16 __ovld __cnfn convert_long16_sat(ushort16);
+long16 __ovld __cnfn convert_long16_rte(int16);
+long16 __ovld __cnfn convert_long16_sat_rte(int16);
+long16 __ovld __cnfn convert_long16_rtz(int16);
+long16 __ovld __cnfn convert_long16_sat_rtz(int16);
+long16 __ovld __cnfn convert_long16_rtp(int16);
+long16 __ovld __cnfn convert_long16_sat_rtp(int16);
+long16 __ovld __cnfn convert_long16_rtn(int16);
+long16 __ovld __cnfn convert_long16_sat_rtn(int16);
+long16 __ovld __cnfn convert_long16(int16);
+long16 __ovld __cnfn convert_long16_sat(int16);
+long16 __ovld __cnfn convert_long16_rte(uint16);
+long16 __ovld __cnfn convert_long16_sat_rte(uint16);
+long16 __ovld __cnfn convert_long16_rtz(uint16);
+long16 __ovld __cnfn convert_long16_sat_rtz(uint16);
+long16 __ovld __cnfn convert_long16_rtp(uint16);
+long16 __ovld __cnfn convert_long16_sat_rtp(uint16);
+long16 __ovld __cnfn convert_long16_rtn(uint16);
+long16 __ovld __cnfn convert_long16_sat_rtn(uint16);
+long16 __ovld __cnfn convert_long16(uint16);
+long16 __ovld __cnfn convert_long16_sat(uint16);
+long16 __ovld __cnfn convert_long16_rte(long16);
+long16 __ovld __cnfn convert_long16_sat_rte(long16);
+long16 __ovld __cnfn convert_long16_rtz(long16);
+long16 __ovld __cnfn convert_long16_sat_rtz(long16);
+long16 __ovld __cnfn convert_long16_rtp(long16);
+long16 __ovld __cnfn convert_long16_sat_rtp(long16);
+long16 __ovld __cnfn convert_long16_rtn(long16);
+long16 __ovld __cnfn convert_long16_sat_rtn(long16);
+long16 __ovld __cnfn convert_long16(long16);
+long16 __ovld __cnfn convert_long16_sat(long16);
+long16 __ovld __cnfn convert_long16_rte(ulong16);
+long16 __ovld __cnfn convert_long16_sat_rte(ulong16);
+long16 __ovld __cnfn convert_long16_rtz(ulong16);
+long16 __ovld __cnfn convert_long16_sat_rtz(ulong16);
+long16 __ovld __cnfn convert_long16_rtp(ulong16);
+long16 __ovld __cnfn convert_long16_sat_rtp(ulong16);
+long16 __ovld __cnfn convert_long16_rtn(ulong16);
+long16 __ovld __cnfn convert_long16_sat_rtn(ulong16);
+long16 __ovld __cnfn convert_long16(ulong16);
+long16 __ovld __cnfn convert_long16_sat(ulong16);
+long16 __ovld __cnfn convert_long16_rte(float16);
+long16 __ovld __cnfn convert_long16_sat_rte(float16);
+long16 __ovld __cnfn convert_long16_rtz(float16);
+long16 __ovld __cnfn convert_long16_sat_rtz(float16);
+long16 __ovld __cnfn convert_long16_rtp(float16);
+long16 __ovld __cnfn convert_long16_sat_rtp(float16);
+long16 __ovld __cnfn convert_long16_rtn(float16);
+long16 __ovld __cnfn convert_long16_sat_rtn(float16);
+long16 __ovld __cnfn convert_long16(float16);
+long16 __ovld __cnfn convert_long16_sat(float16);
+ulong16 __ovld __cnfn convert_ulong16_rte(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(char16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(char16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(char16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(char16);
+ulong16 __ovld __cnfn convert_ulong16(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat(char16);
+ulong16 __ovld __cnfn convert_ulong16_rte(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uchar16);
+ulong16 __ovld __cnfn convert_ulong16(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_rte(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(short16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(short16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(short16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(short16);
+ulong16 __ovld __cnfn convert_ulong16(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat(short16);
+ulong16 __ovld __cnfn convert_ulong16_rte(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ushort16);
+ulong16 __ovld __cnfn convert_ulong16(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_rte(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(int16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(int16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(int16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(int16);
+ulong16 __ovld __cnfn convert_ulong16(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat(int16);
+ulong16 __ovld __cnfn convert_ulong16_rte(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(uint16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uint16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uint16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uint16);
+ulong16 __ovld __cnfn convert_ulong16(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat(uint16);
+ulong16 __ovld __cnfn convert_ulong16_rte(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(long16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(long16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(long16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(long16);
+ulong16 __ovld __cnfn convert_ulong16(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat(long16);
+ulong16 __ovld __cnfn convert_ulong16_rte(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ulong16);
+ulong16 __ovld __cnfn convert_ulong16(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_rte(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(float16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(float16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(float16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(float16);
+ulong16 __ovld __cnfn convert_ulong16(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat(float16);
+float16 __ovld __cnfn convert_float16_rte(char16);
+float16 __ovld __cnfn convert_float16_rtz(char16);
+float16 __ovld __cnfn convert_float16_rtp(char16);
+float16 __ovld __cnfn convert_float16_rtn(char16);
+float16 __ovld __cnfn convert_float16(char16);
+float16 __ovld __cnfn convert_float16_rte(uchar16);
+float16 __ovld __cnfn convert_float16_rtz(uchar16);
+float16 __ovld __cnfn convert_float16_rtp(uchar16);
+float16 __ovld __cnfn convert_float16_rtn(uchar16);
+float16 __ovld __cnfn convert_float16(uchar16);
+float16 __ovld __cnfn convert_float16_rte(short16);
+float16 __ovld __cnfn convert_float16_rtz(short16);
+float16 __ovld __cnfn convert_float16_rtp(short16);
+float16 __ovld __cnfn convert_float16_rtn(short16);
+float16 __ovld __cnfn convert_float16(short16);
+float16 __ovld __cnfn convert_float16_rte(ushort16);
+float16 __ovld __cnfn convert_float16_rtz(ushort16);
+float16 __ovld __cnfn convert_float16_rtp(ushort16);
+float16 __ovld __cnfn convert_float16_rtn(ushort16);
+float16 __ovld __cnfn convert_float16(ushort16);
+float16 __ovld __cnfn convert_float16_rte(int16);
+float16 __ovld __cnfn convert_float16_rtz(int16);
+float16 __ovld __cnfn convert_float16_rtp(int16);
+float16 __ovld __cnfn convert_float16_rtn(int16);
+float16 __ovld __cnfn convert_float16(int16);
+float16 __ovld __cnfn convert_float16_rte(uint16);
+float16 __ovld __cnfn convert_float16_rtz(uint16);
+float16 __ovld __cnfn convert_float16_rtp(uint16);
+float16 __ovld __cnfn convert_float16_rtn(uint16);
+float16 __ovld __cnfn convert_float16(uint16);
+float16 __ovld __cnfn convert_float16_rte(long16);
+float16 __ovld __cnfn convert_float16_rtz(long16);
+float16 __ovld __cnfn convert_float16_rtp(long16);
+float16 __ovld __cnfn convert_float16_rtn(long16);
+float16 __ovld __cnfn convert_float16(long16);
+float16 __ovld __cnfn convert_float16_rte(ulong16);
+float16 __ovld __cnfn convert_float16_rtz(ulong16);
+float16 __ovld __cnfn convert_float16_rtp(ulong16);
+float16 __ovld __cnfn convert_float16_rtn(ulong16);
+float16 __ovld __cnfn convert_float16(ulong16);
+float16 __ovld __cnfn convert_float16_rte(float16);
+float16 __ovld __cnfn convert_float16_rtz(float16);
+float16 __ovld __cnfn convert_float16_rtp(float16);
+float16 __ovld __cnfn convert_float16_rtn(float16);
+float16 __ovld __cnfn convert_float16(float16);
+
+// Conversions with double data type parameters or return value.
+
+#ifdef cl_khr_fp64
+char __ovld __cnfn convert_char(double);
+char __ovld __cnfn convert_char_rte(double);
+char __ovld __cnfn convert_char_rtn(double);
+char __ovld __cnfn convert_char_rtp(double);
+char __ovld __cnfn convert_char_rtz(double);
+char __ovld __cnfn convert_char_sat(double);
+char __ovld __cnfn convert_char_sat_rte(double);
+char __ovld __cnfn convert_char_sat_rtn(double);
+char __ovld __cnfn convert_char_sat_rtp(double);
+char __ovld __cnfn convert_char_sat_rtz(double);
+char2 __ovld __cnfn convert_char2(double2);
+char2 __ovld __cnfn convert_char2_rte(double2);
+char2 __ovld __cnfn convert_char2_rtn(double2);
+char2 __ovld __cnfn convert_char2_rtp(double2);
+char2 __ovld __cnfn convert_char2_rtz(double2);
+char2 __ovld __cnfn convert_char2_sat(double2);
+char2 __ovld __cnfn convert_char2_sat_rte(double2);
+char2 __ovld __cnfn convert_char2_sat_rtn(double2);
+char2 __ovld __cnfn convert_char2_sat_rtp(double2);
+char2 __ovld __cnfn convert_char2_sat_rtz(double2);
+char3 __ovld __cnfn convert_char3(double3);
+char3 __ovld __cnfn convert_char3_rte(double3);
+char3 __ovld __cnfn convert_char3_rtn(double3);
+char3 __ovld __cnfn convert_char3_rtp(double3);
+char3 __ovld __cnfn convert_char3_rtz(double3);
+char3 __ovld __cnfn convert_char3_sat(double3);
+char3 __ovld __cnfn convert_char3_sat_rte(double3);
+char3 __ovld __cnfn convert_char3_sat_rtn(double3);
+char3 __ovld __cnfn convert_char3_sat_rtp(double3);
+char3 __ovld __cnfn convert_char3_sat_rtz(double3);
+char4 __ovld __cnfn convert_char4(double4);
+char4 __ovld __cnfn convert_char4_rte(double4);
+char4 __ovld __cnfn convert_char4_rtn(double4);
+char4 __ovld __cnfn convert_char4_rtp(double4);
+char4 __ovld __cnfn convert_char4_rtz(double4);
+char4 __ovld __cnfn convert_char4_sat(double4);
+char4 __ovld __cnfn convert_char4_sat_rte(double4);
+char4 __ovld __cnfn convert_char4_sat_rtn(double4);
+char4 __ovld __cnfn convert_char4_sat_rtp(double4);
+char4 __ovld __cnfn convert_char4_sat_rtz(double4);
+char8 __ovld __cnfn convert_char8(double8);
+char8 __ovld __cnfn convert_char8_rte(double8);
+char8 __ovld __cnfn convert_char8_rtn(double8);
+char8 __ovld __cnfn convert_char8_rtp(double8);
+char8 __ovld __cnfn convert_char8_rtz(double8);
+char8 __ovld __cnfn convert_char8_sat(double8);
+char8 __ovld __cnfn convert_char8_sat_rte(double8);
+char8 __ovld __cnfn convert_char8_sat_rtn(double8);
+char8 __ovld __cnfn convert_char8_sat_rtp(double8);
+char8 __ovld __cnfn convert_char8_sat_rtz(double8);
+char16 __ovld __cnfn convert_char16(double16);
+char16 __ovld __cnfn convert_char16_rte(double16);
+char16 __ovld __cnfn convert_char16_rtn(double16);
+char16 __ovld __cnfn convert_char16_rtp(double16);
+char16 __ovld __cnfn convert_char16_rtz(double16);
+char16 __ovld __cnfn convert_char16_sat(double16);
+char16 __ovld __cnfn convert_char16_sat_rte(double16);
+char16 __ovld __cnfn convert_char16_sat_rtn(double16);
+char16 __ovld __cnfn convert_char16_sat_rtp(double16);
+char16 __ovld __cnfn convert_char16_sat_rtz(double16);
+
+uchar __ovld __cnfn convert_uchar(double);
+uchar __ovld __cnfn convert_uchar_rte(double);
+uchar __ovld __cnfn convert_uchar_rtn(double);
+uchar __ovld __cnfn convert_uchar_rtp(double);
+uchar __ovld __cnfn convert_uchar_rtz(double);
+uchar __ovld __cnfn convert_uchar_sat(double);
+uchar __ovld __cnfn convert_uchar_sat_rte(double);
+uchar __ovld __cnfn convert_uchar_sat_rtn(double);
+uchar __ovld __cnfn convert_uchar_sat_rtp(double);
+uchar __ovld __cnfn convert_uchar_sat_rtz(double);
+uchar2 __ovld __cnfn convert_uchar2(double2);
+uchar2 __ovld __cnfn convert_uchar2_rte(double2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(double2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(double2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(double2);
+uchar3 __ovld __cnfn convert_uchar3(double3);
+uchar3 __ovld __cnfn convert_uchar3_rte(double3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(double3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(double3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(double3);
+uchar4 __ovld __cnfn convert_uchar4(double4);
+uchar4 __ovld __cnfn convert_uchar4_rte(double4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(double4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(double4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(double4);
+uchar8 __ovld __cnfn convert_uchar8(double8);
+uchar8 __ovld __cnfn convert_uchar8_rte(double8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(double8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(double8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(double8);
+uchar16 __ovld __cnfn convert_uchar16(double16);
+uchar16 __ovld __cnfn convert_uchar16_rte(double16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(double16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(double16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(double16);
+
+short __ovld __cnfn convert_short(double);
+short __ovld __cnfn convert_short_rte(double);
+short __ovld __cnfn convert_short_rtn(double);
+short __ovld __cnfn convert_short_rtp(double);
+short __ovld __cnfn convert_short_rtz(double);
+short __ovld __cnfn convert_short_sat(double);
+short __ovld __cnfn convert_short_sat_rte(double);
+short __ovld __cnfn convert_short_sat_rtn(double);
+short __ovld __cnfn convert_short_sat_rtp(double);
+short __ovld __cnfn convert_short_sat_rtz(double);
+short2 __ovld __cnfn convert_short2(double2);
+short2 __ovld __cnfn convert_short2_rte(double2);
+short2 __ovld __cnfn convert_short2_rtn(double2);
+short2 __ovld __cnfn convert_short2_rtp(double2);
+short2 __ovld __cnfn convert_short2_rtz(double2);
+short2 __ovld __cnfn convert_short2_sat(double2);
+short2 __ovld __cnfn convert_short2_sat_rte(double2);
+short2 __ovld __cnfn convert_short2_sat_rtn(double2);
+short2 __ovld __cnfn convert_short2_sat_rtp(double2);
+short2 __ovld __cnfn convert_short2_sat_rtz(double2);
+short3 __ovld __cnfn convert_short3(double3);
+short3 __ovld __cnfn convert_short3_rte(double3);
+short3 __ovld __cnfn convert_short3_rtn(double3);
+short3 __ovld __cnfn convert_short3_rtp(double3);
+short3 __ovld __cnfn convert_short3_rtz(double3);
+short3 __ovld __cnfn convert_short3_sat(double3);
+short3 __ovld __cnfn convert_short3_sat_rte(double3);
+short3 __ovld __cnfn convert_short3_sat_rtn(double3);
+short3 __ovld __cnfn convert_short3_sat_rtp(double3);
+short3 __ovld __cnfn convert_short3_sat_rtz(double3);
+short4 __ovld __cnfn convert_short4(double4);
+short4 __ovld __cnfn convert_short4_rte(double4);
+short4 __ovld __cnfn convert_short4_rtn(double4);
+short4 __ovld __cnfn convert_short4_rtp(double4);
+short4 __ovld __cnfn convert_short4_rtz(double4);
+short4 __ovld __cnfn convert_short4_sat(double4);
+short4 __ovld __cnfn convert_short4_sat_rte(double4);
+short4 __ovld __cnfn convert_short4_sat_rtn(double4);
+short4 __ovld __cnfn convert_short4_sat_rtp(double4);
+short4 __ovld __cnfn convert_short4_sat_rtz(double4);
+short8 __ovld __cnfn convert_short8(double8);
+short8 __ovld __cnfn convert_short8_rte(double8);
+short8 __ovld __cnfn convert_short8_rtn(double8);
+short8 __ovld __cnfn convert_short8_rtp(double8);
+short8 __ovld __cnfn convert_short8_rtz(double8);
+short8 __ovld __cnfn convert_short8_sat(double8);
+short8 __ovld __cnfn convert_short8_sat_rte(double8);
+short8 __ovld __cnfn convert_short8_sat_rtn(double8);
+short8 __ovld __cnfn convert_short8_sat_rtp(double8);
+short8 __ovld __cnfn convert_short8_sat_rtz(double8);
+short16 __ovld __cnfn convert_short16(double16);
+short16 __ovld __cnfn convert_short16_rte(double16);
+short16 __ovld __cnfn convert_short16_rtn(double16);
+short16 __ovld __cnfn convert_short16_rtp(double16);
+short16 __ovld __cnfn convert_short16_rtz(double16);
+short16 __ovld __cnfn convert_short16_sat(double16);
+short16 __ovld __cnfn convert_short16_sat_rte(double16);
+short16 __ovld __cnfn convert_short16_sat_rtn(double16);
+short16 __ovld __cnfn convert_short16_sat_rtp(double16);
+short16 __ovld __cnfn convert_short16_sat_rtz(double16);
+
+ushort __ovld __cnfn convert_ushort(double);
+ushort __ovld __cnfn convert_ushort_rte(double);
+ushort __ovld __cnfn convert_ushort_rtn(double);
+ushort __ovld __cnfn convert_ushort_rtp(double);
+ushort __ovld __cnfn convert_ushort_rtz(double);
+ushort __ovld __cnfn convert_ushort_sat(double);
+ushort __ovld __cnfn convert_ushort_sat_rte(double);
+ushort __ovld __cnfn convert_ushort_sat_rtn(double);
+ushort __ovld __cnfn convert_ushort_sat_rtp(double);
+ushort __ovld __cnfn convert_ushort_sat_rtz(double);
+ushort2 __ovld __cnfn convert_ushort2(double2);
+ushort2 __ovld __cnfn convert_ushort2_rte(double2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(double2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(double2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(double2);
+ushort3 __ovld __cnfn convert_ushort3(double3);
+ushort3 __ovld __cnfn convert_ushort3_rte(double3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(double3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(double3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(double3);
+ushort4 __ovld __cnfn convert_ushort4(double4);
+ushort4 __ovld __cnfn convert_ushort4_rte(double4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(double4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(double4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(double4);
+ushort8 __ovld __cnfn convert_ushort8(double8);
+ushort8 __ovld __cnfn convert_ushort8_rte(double8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(double8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(double8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(double8);
+ushort16 __ovld __cnfn convert_ushort16(double16);
+ushort16 __ovld __cnfn convert_ushort16_rte(double16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(double16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(double16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(double16);
+
+int __ovld __cnfn convert_int(double);
+int __ovld __cnfn convert_int_rte(double);
+int __ovld __cnfn convert_int_rtn(double);
+int __ovld __cnfn convert_int_rtp(double);
+int __ovld __cnfn convert_int_rtz(double);
+int __ovld __cnfn convert_int_sat(double);
+int __ovld __cnfn convert_int_sat_rte(double);
+int __ovld __cnfn convert_int_sat_rtn(double);
+int __ovld __cnfn convert_int_sat_rtp(double);
+int __ovld __cnfn convert_int_sat_rtz(double);
+int2 __ovld __cnfn convert_int2(double2);
+int2 __ovld __cnfn convert_int2_rte(double2);
+int2 __ovld __cnfn convert_int2_rtn(double2);
+int2 __ovld __cnfn convert_int2_rtp(double2);
+int2 __ovld __cnfn convert_int2_rtz(double2);
+int2 __ovld __cnfn convert_int2_sat(double2);
+int2 __ovld __cnfn convert_int2_sat_rte(double2);
+int2 __ovld __cnfn convert_int2_sat_rtn(double2);
+int2 __ovld __cnfn convert_int2_sat_rtp(double2);
+int2 __ovld __cnfn convert_int2_sat_rtz(double2);
+int3 __ovld __cnfn convert_int3(double3);
+int3 __ovld __cnfn convert_int3_rte(double3);
+int3 __ovld __cnfn convert_int3_rtn(double3);
+int3 __ovld __cnfn convert_int3_rtp(double3);
+int3 __ovld __cnfn convert_int3_rtz(double3);
+int3 __ovld __cnfn convert_int3_sat(double3);
+int3 __ovld __cnfn convert_int3_sat_rte(double3);
+int3 __ovld __cnfn convert_int3_sat_rtn(double3);
+int3 __ovld __cnfn convert_int3_sat_rtp(double3);
+int3 __ovld __cnfn convert_int3_sat_rtz(double3);
+int4 __ovld __cnfn convert_int4(double4);
+int4 __ovld __cnfn convert_int4_rte(double4);
+int4 __ovld __cnfn convert_int4_rtn(double4);
+int4 __ovld __cnfn convert_int4_rtp(double4);
+int4 __ovld __cnfn convert_int4_rtz(double4);
+int4 __ovld __cnfn convert_int4_sat(double4);
+int4 __ovld __cnfn convert_int4_sat_rte(double4);
+int4 __ovld __cnfn convert_int4_sat_rtn(double4);
+int4 __ovld __cnfn convert_int4_sat_rtp(double4);
+int4 __ovld __cnfn convert_int4_sat_rtz(double4);
+int8 __ovld __cnfn convert_int8(double8);
+int8 __ovld __cnfn convert_int8_rte(double8);
+int8 __ovld __cnfn convert_int8_rtn(double8);
+int8 __ovld __cnfn convert_int8_rtp(double8);
+int8 __ovld __cnfn convert_int8_rtz(double8);
+int8 __ovld __cnfn convert_int8_sat(double8);
+int8 __ovld __cnfn convert_int8_sat_rte(double8);
+int8 __ovld __cnfn convert_int8_sat_rtn(double8);
+int8 __ovld __cnfn convert_int8_sat_rtp(double8);
+int8 __ovld __cnfn convert_int8_sat_rtz(double8);
+int16 __ovld __cnfn convert_int16(double16);
+int16 __ovld __cnfn convert_int16_rte(double16);
+int16 __ovld __cnfn convert_int16_rtn(double16);
+int16 __ovld __cnfn convert_int16_rtp(double16);
+int16 __ovld __cnfn convert_int16_rtz(double16);
+int16 __ovld __cnfn convert_int16_sat(double16);
+int16 __ovld __cnfn convert_int16_sat_rte(double16);
+int16 __ovld __cnfn convert_int16_sat_rtn(double16);
+int16 __ovld __cnfn convert_int16_sat_rtp(double16);
+int16 __ovld __cnfn convert_int16_sat_rtz(double16);
+
+uint __ovld __cnfn convert_uint(double);
+uint __ovld __cnfn convert_uint_rte(double);
+uint __ovld __cnfn convert_uint_rtn(double);
+uint __ovld __cnfn convert_uint_rtp(double);
+uint __ovld __cnfn convert_uint_rtz(double);
+uint __ovld __cnfn convert_uint_sat(double);
+uint __ovld __cnfn convert_uint_sat_rte(double);
+uint __ovld __cnfn convert_uint_sat_rtn(double);
+uint __ovld __cnfn convert_uint_sat_rtp(double);
+uint __ovld __cnfn convert_uint_sat_rtz(double);
+uint2 __ovld __cnfn convert_uint2(double2);
+uint2 __ovld __cnfn convert_uint2_rte(double2);
+uint2 __ovld __cnfn convert_uint2_rtn(double2);
+uint2 __ovld __cnfn convert_uint2_rtp(double2);
+uint2 __ovld __cnfn convert_uint2_rtz(double2);
+uint2 __ovld __cnfn convert_uint2_sat(double2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(double2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(double2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(double2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(double2);
+uint3 __ovld __cnfn convert_uint3(double3);
+uint3 __ovld __cnfn convert_uint3_rte(double3);
+uint3 __ovld __cnfn convert_uint3_rtn(double3);
+uint3 __ovld __cnfn convert_uint3_rtp(double3);
+uint3 __ovld __cnfn convert_uint3_rtz(double3);
+uint3 __ovld __cnfn convert_uint3_sat(double3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(double3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(double3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(double3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(double3);
+uint4 __ovld __cnfn convert_uint4(double4);
+uint4 __ovld __cnfn convert_uint4_rte(double4);
+uint4 __ovld __cnfn convert_uint4_rtn(double4);
+uint4 __ovld __cnfn convert_uint4_rtp(double4);
+uint4 __ovld __cnfn convert_uint4_rtz(double4);
+uint4 __ovld __cnfn convert_uint4_sat(double4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(double4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(double4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(double4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(double4);
+uint8 __ovld __cnfn convert_uint8(double8);
+uint8 __ovld __cnfn convert_uint8_rte(double8);
+uint8 __ovld __cnfn convert_uint8_rtn(double8);
+uint8 __ovld __cnfn convert_uint8_rtp(double8);
+uint8 __ovld __cnfn convert_uint8_rtz(double8);
+uint8 __ovld __cnfn convert_uint8_sat(double8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(double8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(double8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(double8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(double8);
+uint16 __ovld __cnfn convert_uint16(double16);
+uint16 __ovld __cnfn convert_uint16_rte(double16);
+uint16 __ovld __cnfn convert_uint16_rtn(double16);
+uint16 __ovld __cnfn convert_uint16_rtp(double16);
+uint16 __ovld __cnfn convert_uint16_rtz(double16);
+uint16 __ovld __cnfn convert_uint16_sat(double16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(double16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(double16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(double16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(double16);
+
+long __ovld __cnfn convert_long(double);
+long __ovld __cnfn convert_long_rte(double);
+long __ovld __cnfn convert_long_rtn(double);
+long __ovld __cnfn convert_long_rtp(double);
+long __ovld __cnfn convert_long_rtz(double);
+long __ovld __cnfn convert_long_sat(double);
+long __ovld __cnfn convert_long_sat_rte(double);
+long __ovld __cnfn convert_long_sat_rtn(double);
+long __ovld __cnfn convert_long_sat_rtp(double);
+long __ovld __cnfn convert_long_sat_rtz(double);
+long2 __ovld __cnfn convert_long2(double2);
+long2 __ovld __cnfn convert_long2_rte(double2);
+long2 __ovld __cnfn convert_long2_rtn(double2);
+long2 __ovld __cnfn convert_long2_rtp(double2);
+long2 __ovld __cnfn convert_long2_rtz(double2);
+long2 __ovld __cnfn convert_long2_sat(double2);
+long2 __ovld __cnfn convert_long2_sat_rte(double2);
+long2 __ovld __cnfn convert_long2_sat_rtn(double2);
+long2 __ovld __cnfn convert_long2_sat_rtp(double2);
+long2 __ovld __cnfn convert_long2_sat_rtz(double2);
+long3 __ovld __cnfn convert_long3(double3);
+long3 __ovld __cnfn convert_long3_rte(double3);
+long3 __ovld __cnfn convert_long3_rtn(double3);
+long3 __ovld __cnfn convert_long3_rtp(double3);
+long3 __ovld __cnfn convert_long3_rtz(double3);
+long3 __ovld __cnfn convert_long3_sat(double3);
+long3 __ovld __cnfn convert_long3_sat_rte(double3);
+long3 __ovld __cnfn convert_long3_sat_rtn(double3);
+long3 __ovld __cnfn convert_long3_sat_rtp(double3);
+long3 __ovld __cnfn convert_long3_sat_rtz(double3);
+long4 __ovld __cnfn convert_long4(double4);
+long4 __ovld __cnfn convert_long4_rte(double4);
+long4 __ovld __cnfn convert_long4_rtn(double4);
+long4 __ovld __cnfn convert_long4_rtp(double4);
+long4 __ovld __cnfn convert_long4_rtz(double4);
+long4 __ovld __cnfn convert_long4_sat(double4);
+long4 __ovld __cnfn convert_long4_sat_rte(double4);
+long4 __ovld __cnfn convert_long4_sat_rtn(double4);
+long4 __ovld __cnfn convert_long4_sat_rtp(double4);
+long4 __ovld __cnfn convert_long4_sat_rtz(double4);
+long8 __ovld __cnfn convert_long8(double8);
+long8 __ovld __cnfn convert_long8_rte(double8);
+long8 __ovld __cnfn convert_long8_rtn(double8);
+long8 __ovld __cnfn convert_long8_rtp(double8);
+long8 __ovld __cnfn convert_long8_rtz(double8);
+long8 __ovld __cnfn convert_long8_sat(double8);
+long8 __ovld __cnfn convert_long8_sat_rte(double8);
+long8 __ovld __cnfn convert_long8_sat_rtn(double8);
+long8 __ovld __cnfn convert_long8_sat_rtp(double8);
+long8 __ovld __cnfn convert_long8_sat_rtz(double8);
+long16 __ovld __cnfn convert_long16(double16);
+long16 __ovld __cnfn convert_long16_rte(double16);
+long16 __ovld __cnfn convert_long16_rtn(double16);
+long16 __ovld __cnfn convert_long16_rtp(double16);
+long16 __ovld __cnfn convert_long16_rtz(double16);
+long16 __ovld __cnfn convert_long16_sat(double16);
+long16 __ovld __cnfn convert_long16_sat_rte(double16);
+long16 __ovld __cnfn convert_long16_sat_rtn(double16);
+long16 __ovld __cnfn convert_long16_sat_rtp(double16);
+long16 __ovld __cnfn convert_long16_sat_rtz(double16);
+
+ulong __ovld __cnfn convert_ulong(double);
+ulong __ovld __cnfn convert_ulong_rte(double);
+ulong __ovld __cnfn convert_ulong_rtn(double);
+ulong __ovld __cnfn convert_ulong_rtp(double);
+ulong __ovld __cnfn convert_ulong_rtz(double);
+ulong __ovld __cnfn convert_ulong_sat(double);
+ulong __ovld __cnfn convert_ulong_sat_rte(double);
+ulong __ovld __cnfn convert_ulong_sat_rtn(double);
+ulong __ovld __cnfn convert_ulong_sat_rtp(double);
+ulong __ovld __cnfn convert_ulong_sat_rtz(double);
+ulong2 __ovld __cnfn convert_ulong2(double2);
+ulong2 __ovld __cnfn convert_ulong2_rte(double2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(double2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(double2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(double2);
+ulong3 __ovld __cnfn convert_ulong3(double3);
+ulong3 __ovld __cnfn convert_ulong3_rte(double3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(double3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(double3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(double3);
+ulong4 __ovld __cnfn convert_ulong4(double4);
+ulong4 __ovld __cnfn convert_ulong4_rte(double4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(double4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(double4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(double4);
+ulong8 __ovld __cnfn convert_ulong8(double8);
+ulong8 __ovld __cnfn convert_ulong8_rte(double8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(double8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(double8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(double8);
+ulong16 __ovld __cnfn convert_ulong16(double16);
+ulong16 __ovld __cnfn convert_ulong16_rte(double16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(double16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(double16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(double16);
+
+float __ovld __cnfn convert_float(double);
+float __ovld __cnfn convert_float_rte(double);
+float __ovld __cnfn convert_float_rtn(double);
+float __ovld __cnfn convert_float_rtp(double);
+float __ovld __cnfn convert_float_rtz(double);
+float2 __ovld __cnfn convert_float2(double2);
+float2 __ovld __cnfn convert_float2_rte(double2);
+float2 __ovld __cnfn convert_float2_rtn(double2);
+float2 __ovld __cnfn convert_float2_rtp(double2);
+float2 __ovld __cnfn convert_float2_rtz(double2);
+float3 __ovld __cnfn convert_float3(double3);
+float3 __ovld __cnfn convert_float3_rte(double3);
+float3 __ovld __cnfn convert_float3_rtn(double3);
+float3 __ovld __cnfn convert_float3_rtp(double3);
+float3 __ovld __cnfn convert_float3_rtz(double3);
+float4 __ovld __cnfn convert_float4(double4);
+float4 __ovld __cnfn convert_float4_rte(double4);
+float4 __ovld __cnfn convert_float4_rtn(double4);
+float4 __ovld __cnfn convert_float4_rtp(double4);
+float4 __ovld __cnfn convert_float4_rtz(double4);
+float8 __ovld __cnfn convert_float8(double8);
+float8 __ovld __cnfn convert_float8_rte(double8);
+float8 __ovld __cnfn convert_float8_rtn(double8);
+float8 __ovld __cnfn convert_float8_rtp(double8);
+float8 __ovld __cnfn convert_float8_rtz(double8);
+float16 __ovld __cnfn convert_float16(double16);
+float16 __ovld __cnfn convert_float16_rte(double16);
+float16 __ovld __cnfn convert_float16_rtn(double16);
+float16 __ovld __cnfn convert_float16_rtp(double16);
+float16 __ovld __cnfn convert_float16_rtz(double16);
+
+double __ovld __cnfn convert_double(char);
+double __ovld __cnfn convert_double(double);
+double __ovld __cnfn convert_double(float);
+double __ovld __cnfn convert_double(int);
+double __ovld __cnfn convert_double(long);
+double __ovld __cnfn convert_double(short);
+double __ovld __cnfn convert_double(uchar);
+double __ovld __cnfn convert_double(uint);
+double __ovld __cnfn convert_double(ulong);
+double __ovld __cnfn convert_double(ushort);
+double __ovld __cnfn convert_double_rte(char);
+double __ovld __cnfn convert_double_rte(double);
+double __ovld __cnfn convert_double_rte(float);
+double __ovld __cnfn convert_double_rte(int);
+double __ovld __cnfn convert_double_rte(long);
+double __ovld __cnfn convert_double_rte(short);
+double __ovld __cnfn convert_double_rte(uchar);
+double __ovld __cnfn convert_double_rte(uint);
+double __ovld __cnfn convert_double_rte(ulong);
+double __ovld __cnfn convert_double_rte(ushort);
+double __ovld __cnfn convert_double_rtn(char);
+double __ovld __cnfn convert_double_rtn(double);
+double __ovld __cnfn convert_double_rtn(float);
+double __ovld __cnfn convert_double_rtn(int);
+double __ovld __cnfn convert_double_rtn(long);
+double __ovld __cnfn convert_double_rtn(short);
+double __ovld __cnfn convert_double_rtn(uchar);
+double __ovld __cnfn convert_double_rtn(uint);
+double __ovld __cnfn convert_double_rtn(ulong);
+double __ovld __cnfn convert_double_rtn(ushort);
+double __ovld __cnfn convert_double_rtp(char);
+double __ovld __cnfn convert_double_rtp(double);
+double __ovld __cnfn convert_double_rtp(float);
+double __ovld __cnfn convert_double_rtp(int);
+double __ovld __cnfn convert_double_rtp(long);
+double __ovld __cnfn convert_double_rtp(short);
+double __ovld __cnfn convert_double_rtp(uchar);
+double __ovld __cnfn convert_double_rtp(uint);
+double __ovld __cnfn convert_double_rtp(ulong);
+double __ovld __cnfn convert_double_rtp(ushort);
+double __ovld __cnfn convert_double_rtz(char);
+double __ovld __cnfn convert_double_rtz(double);
+double __ovld __cnfn convert_double_rtz(float);
+double __ovld __cnfn convert_double_rtz(int);
+double __ovld __cnfn convert_double_rtz(long);
+double __ovld __cnfn convert_double_rtz(short);
+double __ovld __cnfn convert_double_rtz(uchar);
+double __ovld __cnfn convert_double_rtz(uint);
+double __ovld __cnfn convert_double_rtz(ulong);
+double __ovld __cnfn convert_double_rtz(ushort);
+double2 __ovld __cnfn convert_double2(char2);
+double2 __ovld __cnfn convert_double2(double2);
+double2 __ovld __cnfn convert_double2(float2);
+double2 __ovld __cnfn convert_double2(int2);
+double2 __ovld __cnfn convert_double2(long2);
+double2 __ovld __cnfn convert_double2(short2);
+double2 __ovld __cnfn convert_double2(uchar2);
+double2 __ovld __cnfn convert_double2(uint2);
+double2 __ovld __cnfn convert_double2(ulong2);
+double2 __ovld __cnfn convert_double2(ushort2);
+double2 __ovld __cnfn convert_double2_rte(char2);
+double2 __ovld __cnfn convert_double2_rte(double2);
+double2 __ovld __cnfn convert_double2_rte(float2);
+double2 __ovld __cnfn convert_double2_rte(int2);
+double2 __ovld __cnfn convert_double2_rte(long2);
+double2 __ovld __cnfn convert_double2_rte(short2);
+double2 __ovld __cnfn convert_double2_rte(uchar2);
+double2 __ovld __cnfn convert_double2_rte(uint2);
+double2 __ovld __cnfn convert_double2_rte(ulong2);
+double2 __ovld __cnfn convert_double2_rte(ushort2);
+double2 __ovld __cnfn convert_double2_rtn(char2);
+double2 __ovld __cnfn convert_double2_rtn(double2);
+double2 __ovld __cnfn convert_double2_rtn(float2);
+double2 __ovld __cnfn convert_double2_rtn(int2);
+double2 __ovld __cnfn convert_double2_rtn(long2);
+double2 __ovld __cnfn convert_double2_rtn(short2);
+double2 __ovld __cnfn convert_double2_rtn(uchar2);
+double2 __ovld __cnfn convert_double2_rtn(uint2);
+double2 __ovld __cnfn convert_double2_rtn(ulong2);
+double2 __ovld __cnfn convert_double2_rtn(ushort2);
+double2 __ovld __cnfn convert_double2_rtp(char2);
+double2 __ovld __cnfn convert_double2_rtp(double2);
+double2 __ovld __cnfn convert_double2_rtp(float2);
+double2 __ovld __cnfn convert_double2_rtp(int2);
+double2 __ovld __cnfn convert_double2_rtp(long2);
+double2 __ovld __cnfn convert_double2_rtp(short2);
+double2 __ovld __cnfn convert_double2_rtp(uchar2);
+double2 __ovld __cnfn convert_double2_rtp(uint2);
+double2 __ovld __cnfn convert_double2_rtp(ulong2);
+double2 __ovld __cnfn convert_double2_rtp(ushort2);
+double2 __ovld __cnfn convert_double2_rtz(char2);
+double2 __ovld __cnfn convert_double2_rtz(double2);
+double2 __ovld __cnfn convert_double2_rtz(float2);
+double2 __ovld __cnfn convert_double2_rtz(int2);
+double2 __ovld __cnfn convert_double2_rtz(long2);
+double2 __ovld __cnfn convert_double2_rtz(short2);
+double2 __ovld __cnfn convert_double2_rtz(uchar2);
+double2 __ovld __cnfn convert_double2_rtz(uint2);
+double2 __ovld __cnfn convert_double2_rtz(ulong2);
+double2 __ovld __cnfn convert_double2_rtz(ushort2);
+double3 __ovld __cnfn convert_double3(char3);
+double3 __ovld __cnfn convert_double3(double3);
+double3 __ovld __cnfn convert_double3(float3);
+double3 __ovld __cnfn convert_double3(int3);
+double3 __ovld __cnfn convert_double3(long3);
+double3 __ovld __cnfn convert_double3(short3);
+double3 __ovld __cnfn convert_double3(uchar3);
+double3 __ovld __cnfn convert_double3(uint3);
+double3 __ovld __cnfn convert_double3(ulong3);
+double3 __ovld __cnfn convert_double3(ushort3);
+double3 __ovld __cnfn convert_double3_rte(char3);
+double3 __ovld __cnfn convert_double3_rte(double3);
+double3 __ovld __cnfn convert_double3_rte(float3);
+double3 __ovld __cnfn convert_double3_rte(int3);
+double3 __ovld __cnfn convert_double3_rte(long3);
+double3 __ovld __cnfn convert_double3_rte(short3);
+double3 __ovld __cnfn convert_double3_rte(uchar3);
+double3 __ovld __cnfn convert_double3_rte(uint3);
+double3 __ovld __cnfn convert_double3_rte(ulong3);
+double3 __ovld __cnfn convert_double3_rte(ushort3);
+double3 __ovld __cnfn convert_double3_rtn(char3);
+double3 __ovld __cnfn convert_double3_rtn(double3);
+double3 __ovld __cnfn convert_double3_rtn(float3);
+double3 __ovld __cnfn convert_double3_rtn(int3);
+double3 __ovld __cnfn convert_double3_rtn(long3);
+double3 __ovld __cnfn convert_double3_rtn(short3);
+double3 __ovld __cnfn convert_double3_rtn(uchar3);
+double3 __ovld __cnfn convert_double3_rtn(uint3);
+double3 __ovld __cnfn convert_double3_rtn(ulong3);
+double3 __ovld __cnfn convert_double3_rtn(ushort3);
+double3 __ovld __cnfn convert_double3_rtp(char3);
+double3 __ovld __cnfn convert_double3_rtp(double3);
+double3 __ovld __cnfn convert_double3_rtp(float3);
+double3 __ovld __cnfn convert_double3_rtp(int3);
+double3 __ovld __cnfn convert_double3_rtp(long3);
+double3 __ovld __cnfn convert_double3_rtp(short3);
+double3 __ovld __cnfn convert_double3_rtp(uchar3);
+double3 __ovld __cnfn convert_double3_rtp(uint3);
+double3 __ovld __cnfn convert_double3_rtp(ulong3);
+double3 __ovld __cnfn convert_double3_rtp(ushort3);
+double3 __ovld __cnfn convert_double3_rtz(char3);
+double3 __ovld __cnfn convert_double3_rtz(double3);
+double3 __ovld __cnfn convert_double3_rtz(float3);
+double3 __ovld __cnfn convert_double3_rtz(int3);
+double3 __ovld __cnfn convert_double3_rtz(long3);
+double3 __ovld __cnfn convert_double3_rtz(short3);
+double3 __ovld __cnfn convert_double3_rtz(uchar3);
+double3 __ovld __cnfn convert_double3_rtz(uint3);
+double3 __ovld __cnfn convert_double3_rtz(ulong3);
+double3 __ovld __cnfn convert_double3_rtz(ushort3);
+double4 __ovld __cnfn convert_double4(char4);
+double4 __ovld __cnfn convert_double4(double4);
+double4 __ovld __cnfn convert_double4(float4);
+double4 __ovld __cnfn convert_double4(int4);
+double4 __ovld __cnfn convert_double4(long4);
+double4 __ovld __cnfn convert_double4(short4);
+double4 __ovld __cnfn convert_double4(uchar4);
+double4 __ovld __cnfn convert_double4(uint4);
+double4 __ovld __cnfn convert_double4(ulong4);
+double4 __ovld __cnfn convert_double4(ushort4);
+double4 __ovld __cnfn convert_double4_rte(char4);
+double4 __ovld __cnfn convert_double4_rte(double4);
+double4 __ovld __cnfn convert_double4_rte(float4);
+double4 __ovld __cnfn convert_double4_rte(int4);
+double4 __ovld __cnfn convert_double4_rte(long4);
+double4 __ovld __cnfn convert_double4_rte(short4);
+double4 __ovld __cnfn convert_double4_rte(uchar4);
+double4 __ovld __cnfn convert_double4_rte(uint4);
+double4 __ovld __cnfn convert_double4_rte(ulong4);
+double4 __ovld __cnfn convert_double4_rte(ushort4);
+double4 __ovld __cnfn convert_double4_rtn(char4);
+double4 __ovld __cnfn convert_double4_rtn(double4);
+double4 __ovld __cnfn convert_double4_rtn(float4);
+double4 __ovld __cnfn convert_double4_rtn(int4);
+double4 __ovld __cnfn convert_double4_rtn(long4);
+double4 __ovld __cnfn convert_double4_rtn(short4);
+double4 __ovld __cnfn convert_double4_rtn(uchar4);
+double4 __ovld __cnfn convert_double4_rtn(uint4);
+double4 __ovld __cnfn convert_double4_rtn(ulong4);
+double4 __ovld __cnfn convert_double4_rtn(ushort4);
+double4 __ovld __cnfn convert_double4_rtp(char4);
+double4 __ovld __cnfn convert_double4_rtp(double4);
+double4 __ovld __cnfn convert_double4_rtp(float4);
+double4 __ovld __cnfn convert_double4_rtp(int4);
+double4 __ovld __cnfn convert_double4_rtp(long4);
+double4 __ovld __cnfn convert_double4_rtp(short4);
+double4 __ovld __cnfn convert_double4_rtp(uchar4);
+double4 __ovld __cnfn convert_double4_rtp(uint4);
+double4 __ovld __cnfn convert_double4_rtp(ulong4);
+double4 __ovld __cnfn convert_double4_rtp(ushort4);
+double4 __ovld __cnfn convert_double4_rtz(char4);
+double4 __ovld __cnfn convert_double4_rtz(double4);
+double4 __ovld __cnfn convert_double4_rtz(float4);
+double4 __ovld __cnfn convert_double4_rtz(int4);
+double4 __ovld __cnfn convert_double4_rtz(long4);
+double4 __ovld __cnfn convert_double4_rtz(short4);
+double4 __ovld __cnfn convert_double4_rtz(uchar4);
+double4 __ovld __cnfn convert_double4_rtz(uint4);
+double4 __ovld __cnfn convert_double4_rtz(ulong4);
+double4 __ovld __cnfn convert_double4_rtz(ushort4);
+double8 __ovld __cnfn convert_double8(char8);
+double8 __ovld __cnfn convert_double8(double8);
+double8 __ovld __cnfn convert_double8(float8);
+double8 __ovld __cnfn convert_double8(int8);
+double8 __ovld __cnfn convert_double8(long8);
+double8 __ovld __cnfn convert_double8(short8);
+double8 __ovld __cnfn convert_double8(uchar8);
+double8 __ovld __cnfn convert_double8(uint8);
+double8 __ovld __cnfn convert_double8(ulong8);
+double8 __ovld __cnfn convert_double8(ushort8);
+double8 __ovld __cnfn convert_double8_rte(char8);
+double8 __ovld __cnfn convert_double8_rte(double8);
+double8 __ovld __cnfn convert_double8_rte(float8);
+double8 __ovld __cnfn convert_double8_rte(int8);
+double8 __ovld __cnfn convert_double8_rte(long8);
+double8 __ovld __cnfn convert_double8_rte(short8);
+double8 __ovld __cnfn convert_double8_rte(uchar8);
+double8 __ovld __cnfn convert_double8_rte(uint8);
+double8 __ovld __cnfn convert_double8_rte(ulong8);
+double8 __ovld __cnfn convert_double8_rte(ushort8);
+double8 __ovld __cnfn convert_double8_rtn(char8);
+double8 __ovld __cnfn convert_double8_rtn(double8);
+double8 __ovld __cnfn convert_double8_rtn(float8);
+double8 __ovld __cnfn convert_double8_rtn(int8);
+double8 __ovld __cnfn convert_double8_rtn(long8);
+double8 __ovld __cnfn convert_double8_rtn(short8);
+double8 __ovld __cnfn convert_double8_rtn(uchar8);
+double8 __ovld __cnfn convert_double8_rtn(uint8);
+double8 __ovld __cnfn convert_double8_rtn(ulong8);
+double8 __ovld __cnfn convert_double8_rtn(ushort8);
+double8 __ovld __cnfn convert_double8_rtp(char8);
+double8 __ovld __cnfn convert_double8_rtp(double8);
+double8 __ovld __cnfn convert_double8_rtp(float8);
+double8 __ovld __cnfn convert_double8_rtp(int8);
+double8 __ovld __cnfn convert_double8_rtp(long8);
+double8 __ovld __cnfn convert_double8_rtp(short8);
+double8 __ovld __cnfn convert_double8_rtp(uchar8);
+double8 __ovld __cnfn convert_double8_rtp(uint8);
+double8 __ovld __cnfn convert_double8_rtp(ulong8);
+double8 __ovld __cnfn convert_double8_rtp(ushort8);
+double8 __ovld __cnfn convert_double8_rtz(char8);
+double8 __ovld __cnfn convert_double8_rtz(double8);
+double8 __ovld __cnfn convert_double8_rtz(float8);
+double8 __ovld __cnfn convert_double8_rtz(int8);
+double8 __ovld __cnfn convert_double8_rtz(long8);
+double8 __ovld __cnfn convert_double8_rtz(short8);
+double8 __ovld __cnfn convert_double8_rtz(uchar8);
+double8 __ovld __cnfn convert_double8_rtz(uint8);
+double8 __ovld __cnfn convert_double8_rtz(ulong8);
+double8 __ovld __cnfn convert_double8_rtz(ushort8);
+double16 __ovld __cnfn convert_double16(char16);
+double16 __ovld __cnfn convert_double16(double16);
+double16 __ovld __cnfn convert_double16(float16);
+double16 __ovld __cnfn convert_double16(int16);
+double16 __ovld __cnfn convert_double16(long16);
+double16 __ovld __cnfn convert_double16(short16);
+double16 __ovld __cnfn convert_double16(uchar16);
+double16 __ovld __cnfn convert_double16(uint16);
+double16 __ovld __cnfn convert_double16(ulong16);
+double16 __ovld __cnfn convert_double16(ushort16);
+double16 __ovld __cnfn convert_double16_rte(char16);
+double16 __ovld __cnfn convert_double16_rte(double16);
+double16 __ovld __cnfn convert_double16_rte(float16);
+double16 __ovld __cnfn convert_double16_rte(int16);
+double16 __ovld __cnfn convert_double16_rte(long16);
+double16 __ovld __cnfn convert_double16_rte(short16);
+double16 __ovld __cnfn convert_double16_rte(uchar16);
+double16 __ovld __cnfn convert_double16_rte(uint16);
+double16 __ovld __cnfn convert_double16_rte(ulong16);
+double16 __ovld __cnfn convert_double16_rte(ushort16);
+double16 __ovld __cnfn convert_double16_rtn(char16);
+double16 __ovld __cnfn convert_double16_rtn(double16);
+double16 __ovld __cnfn convert_double16_rtn(float16);
+double16 __ovld __cnfn convert_double16_rtn(int16);
+double16 __ovld __cnfn convert_double16_rtn(long16);
+double16 __ovld __cnfn convert_double16_rtn(short16);
+double16 __ovld __cnfn convert_double16_rtn(uchar16);
+double16 __ovld __cnfn convert_double16_rtn(uint16);
+double16 __ovld __cnfn convert_double16_rtn(ulong16);
+double16 __ovld __cnfn convert_double16_rtn(ushort16);
+double16 __ovld __cnfn convert_double16_rtp(char16);
+double16 __ovld __cnfn convert_double16_rtp(double16);
+double16 __ovld __cnfn convert_double16_rtp(float16);
+double16 __ovld __cnfn convert_double16_rtp(int16);
+double16 __ovld __cnfn convert_double16_rtp(long16);
+double16 __ovld __cnfn convert_double16_rtp(short16);
+double16 __ovld __cnfn convert_double16_rtp(uchar16);
+double16 __ovld __cnfn convert_double16_rtp(uint16);
+double16 __ovld __cnfn convert_double16_rtp(ulong16);
+double16 __ovld __cnfn convert_double16_rtp(ushort16);
+double16 __ovld __cnfn convert_double16_rtz(char16);
+double16 __ovld __cnfn convert_double16_rtz(double16);
+double16 __ovld __cnfn convert_double16_rtz(float16);
+double16 __ovld __cnfn convert_double16_rtz(int16);
+double16 __ovld __cnfn convert_double16_rtz(long16);
+double16 __ovld __cnfn convert_double16_rtz(short16);
+double16 __ovld __cnfn convert_double16_rtz(uchar16);
+double16 __ovld __cnfn convert_double16_rtz(uint16);
+double16 __ovld __cnfn convert_double16_rtz(ulong16);
+double16 __ovld __cnfn convert_double16_rtz(ushort16);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+// Convert half types to non-double types.
+uchar __ovld __cnfn convert_uchar(half);
+uchar __ovld __cnfn convert_uchar_rte(half);
+uchar __ovld __cnfn convert_uchar_rtp(half);
+uchar __ovld __cnfn convert_uchar_rtn(half);
+uchar __ovld __cnfn convert_uchar_rtz(half);
+uchar __ovld __cnfn convert_uchar_sat(half);
+uchar __ovld __cnfn convert_uchar_sat_rte(half);
+uchar __ovld __cnfn convert_uchar_sat_rtp(half);
+uchar __ovld __cnfn convert_uchar_sat_rtn(half);
+uchar __ovld __cnfn convert_uchar_sat_rtz(half);
+uchar2 __ovld __cnfn convert_uchar2(half2);
+uchar2 __ovld __cnfn convert_uchar2_rte(half2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(half2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(half2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(half2);
+uchar3 __ovld __cnfn convert_uchar3(half3);
+uchar3 __ovld __cnfn convert_uchar3_rte(half3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(half3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(half3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(half3);
+uchar4 __ovld __cnfn convert_uchar4(half4);
+uchar4 __ovld __cnfn convert_uchar4_rte(half4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(half4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(half4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(half4);
+uchar8 __ovld __cnfn convert_uchar8(half8);
+uchar8 __ovld __cnfn convert_uchar8_rte(half8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(half8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(half8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(half8);
+uchar16 __ovld __cnfn convert_uchar16(half16);
+uchar16 __ovld __cnfn convert_uchar16_rte(half16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(half16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(half16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(half16);
+ushort __ovld __cnfn convert_ushort(half);
+ushort __ovld __cnfn convert_ushort_rte(half);
+ushort __ovld __cnfn convert_ushort_rtp(half);
+ushort __ovld __cnfn convert_ushort_rtn(half);
+ushort __ovld __cnfn convert_ushort_rtz(half);
+ushort __ovld __cnfn convert_ushort_sat(half);
+ushort __ovld __cnfn convert_ushort_sat_rte(half);
+ushort __ovld __cnfn convert_ushort_sat_rtp(half);
+ushort __ovld __cnfn convert_ushort_sat_rtn(half);
+ushort __ovld __cnfn convert_ushort_sat_rtz(half);
+ushort2 __ovld __cnfn convert_ushort2(half2);
+ushort2 __ovld __cnfn convert_ushort2_rte(half2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(half2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(half2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(half2);
+ushort3 __ovld __cnfn convert_ushort3(half3);
+ushort3 __ovld __cnfn convert_ushort3_rte(half3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(half3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(half3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(half3);
+ushort4 __ovld __cnfn convert_ushort4(half4);
+ushort4 __ovld __cnfn convert_ushort4_rte(half4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(half4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(half4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(half4);
+ushort8 __ovld __cnfn convert_ushort8(half8);
+ushort8 __ovld __cnfn convert_ushort8_rte(half8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(half8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(half8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(half8);
+ushort16 __ovld __cnfn convert_ushort16(half16);
+ushort16 __ovld __cnfn convert_ushort16_rte(half16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(half16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(half16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(half16);
+uint __ovld __cnfn convert_uint(half);
+uint __ovld __cnfn convert_uint_rte(half);
+uint __ovld __cnfn convert_uint_rtp(half);
+uint __ovld __cnfn convert_uint_rtn(half);
+uint __ovld __cnfn convert_uint_rtz(half);
+uint __ovld __cnfn convert_uint_sat(half);
+uint __ovld __cnfn convert_uint_sat_rte(half);
+uint __ovld __cnfn convert_uint_sat_rtp(half);
+uint __ovld __cnfn convert_uint_sat_rtn(half);
+uint __ovld __cnfn convert_uint_sat_rtz(half);
+uint2 __ovld __cnfn convert_uint2(half2);
+uint2 __ovld __cnfn convert_uint2_rte(half2);
+uint2 __ovld __cnfn convert_uint2_rtp(half2);
+uint2 __ovld __cnfn convert_uint2_rtn(half2);
+uint2 __ovld __cnfn convert_uint2_rtz(half2);
+uint2 __ovld __cnfn convert_uint2_sat(half2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(half2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(half2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(half2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(half2);
+uint3 __ovld __cnfn convert_uint3(half3);
+uint3 __ovld __cnfn convert_uint3_rte(half3);
+uint3 __ovld __cnfn convert_uint3_rtp(half3);
+uint3 __ovld __cnfn convert_uint3_rtn(half3);
+uint3 __ovld __cnfn convert_uint3_rtz(half3);
+uint3 __ovld __cnfn convert_uint3_sat(half3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(half3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(half3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(half3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(half3);
+uint4 __ovld __cnfn convert_uint4(half4);
+uint4 __ovld __cnfn convert_uint4_rte(half4);
+uint4 __ovld __cnfn convert_uint4_rtp(half4);
+uint4 __ovld __cnfn convert_uint4_rtn(half4);
+uint4 __ovld __cnfn convert_uint4_rtz(half4);
+uint4 __ovld __cnfn convert_uint4_sat(half4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(half4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(half4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(half4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(half4);
+uint8 __ovld __cnfn convert_uint8(half8);
+uint8 __ovld __cnfn convert_uint8_rte(half8);
+uint8 __ovld __cnfn convert_uint8_rtp(half8);
+uint8 __ovld __cnfn convert_uint8_rtn(half8);
+uint8 __ovld __cnfn convert_uint8_rtz(half8);
+uint8 __ovld __cnfn convert_uint8_sat(half8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(half8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(half8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(half8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(half8);
+uint16 __ovld __cnfn convert_uint16(half16);
+uint16 __ovld __cnfn convert_uint16_rte(half16);
+uint16 __ovld __cnfn convert_uint16_rtp(half16);
+uint16 __ovld __cnfn convert_uint16_rtn(half16);
+uint16 __ovld __cnfn convert_uint16_rtz(half16);
+uint16 __ovld __cnfn convert_uint16_sat(half16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(half16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(half16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(half16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(half16);
+ulong __ovld __cnfn convert_ulong(half);
+ulong __ovld __cnfn convert_ulong_rte(half);
+ulong __ovld __cnfn convert_ulong_rtp(half);
+ulong __ovld __cnfn convert_ulong_rtn(half);
+ulong __ovld __cnfn convert_ulong_rtz(half);
+ulong __ovld __cnfn convert_ulong_sat(half);
+ulong __ovld __cnfn convert_ulong_sat_rte(half);
+ulong __ovld __cnfn convert_ulong_sat_rtp(half);
+ulong __ovld __cnfn convert_ulong_sat_rtn(half);
+ulong __ovld __cnfn convert_ulong_sat_rtz(half);
+ulong2 __ovld __cnfn convert_ulong2(half2);
+ulong2 __ovld __cnfn convert_ulong2_rte(half2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(half2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(half2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(half2);
+ulong3 __ovld __cnfn convert_ulong3(half3);
+ulong3 __ovld __cnfn convert_ulong3_rte(half3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(half3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(half3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(half3);
+ulong4 __ovld __cnfn convert_ulong4(half4);
+ulong4 __ovld __cnfn convert_ulong4_rte(half4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(half4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(half4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(half4);
+ulong8 __ovld __cnfn convert_ulong8(half8);
+ulong8 __ovld __cnfn convert_ulong8_rte(half8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(half8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(half8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(half8);
+ulong16 __ovld __cnfn convert_ulong16(half16);
+ulong16 __ovld __cnfn convert_ulong16_rte(half16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(half16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(half16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(half16);
+char __ovld __cnfn convert_char(half);
+char __ovld __cnfn convert_char_rte(half);
+char __ovld __cnfn convert_char_rtp(half);
+char __ovld __cnfn convert_char_rtn(half);
+char __ovld __cnfn convert_char_rtz(half);
+char __ovld __cnfn convert_char_sat(half);
+char __ovld __cnfn convert_char_sat_rte(half);
+char __ovld __cnfn convert_char_sat_rtp(half);
+char __ovld __cnfn convert_char_sat_rtn(half);
+char __ovld __cnfn convert_char_sat_rtz(half);
+char2 __ovld __cnfn convert_char2(half2);
+char2 __ovld __cnfn convert_char2_rte(half2);
+char2 __ovld __cnfn convert_char2_rtp(half2);
+char2 __ovld __cnfn convert_char2_rtn(half2);
+char2 __ovld __cnfn convert_char2_rtz(half2);
+char2 __ovld __cnfn convert_char2_sat(half2);
+char2 __ovld __cnfn convert_char2_sat_rte(half2);
+char2 __ovld __cnfn convert_char2_sat_rtp(half2);
+char2 __ovld __cnfn convert_char2_sat_rtn(half2);
+char2 __ovld __cnfn convert_char2_sat_rtz(half2);
+char3 __ovld __cnfn convert_char3(half3);
+char3 __ovld __cnfn convert_char3_rte(half3);
+char3 __ovld __cnfn convert_char3_rtp(half3);
+char3 __ovld __cnfn convert_char3_rtn(half3);
+char3 __ovld __cnfn convert_char3_rtz(half3);
+char3 __ovld __cnfn convert_char3_sat(half3);
+char3 __ovld __cnfn convert_char3_sat_rte(half3);
+char3 __ovld __cnfn convert_char3_sat_rtp(half3);
+char3 __ovld __cnfn convert_char3_sat_rtn(half3);
+char3 __ovld __cnfn convert_char3_sat_rtz(half3);
+char4 __ovld __cnfn convert_char4(half4);
+char4 __ovld __cnfn convert_char4_rte(half4);
+char4 __ovld __cnfn convert_char4_rtp(half4);
+char4 __ovld __cnfn convert_char4_rtn(half4);
+char4 __ovld __cnfn convert_char4_rtz(half4);
+char4 __ovld __cnfn convert_char4_sat(half4);
+char4 __ovld __cnfn convert_char4_sat_rte(half4);
+char4 __ovld __cnfn convert_char4_sat_rtp(half4);
+char4 __ovld __cnfn convert_char4_sat_rtn(half4);
+char4 __ovld __cnfn convert_char4_sat_rtz(half4);
+char8 __ovld __cnfn convert_char8(half8);
+char8 __ovld __cnfn convert_char8_rte(half8);
+char8 __ovld __cnfn convert_char8_rtp(half8);
+char8 __ovld __cnfn convert_char8_rtn(half8);
+char8 __ovld __cnfn convert_char8_rtz(half8);
+char8 __ovld __cnfn convert_char8_sat(half8);
+char8 __ovld __cnfn convert_char8_sat_rte(half8);
+char8 __ovld __cnfn convert_char8_sat_rtp(half8);
+char8 __ovld __cnfn convert_char8_sat_rtn(half8);
+char8 __ovld __cnfn convert_char8_sat_rtz(half8);
+char16 __ovld __cnfn convert_char16(half16);
+char16 __ovld __cnfn convert_char16_rte(half16);
+char16 __ovld __cnfn convert_char16_rtp(half16);
+char16 __ovld __cnfn convert_char16_rtn(half16);
+char16 __ovld __cnfn convert_char16_rtz(half16);
+char16 __ovld __cnfn convert_char16_sat(half16);
+char16 __ovld __cnfn convert_char16_sat_rte(half16);
+char16 __ovld __cnfn convert_char16_sat_rtp(half16);
+char16 __ovld __cnfn convert_char16_sat_rtn(half16);
+char16 __ovld __cnfn convert_char16_sat_rtz(half16);
+short __ovld __cnfn convert_short(half);
+short __ovld __cnfn convert_short_rte(half);
+short __ovld __cnfn convert_short_rtp(half);
+short __ovld __cnfn convert_short_rtn(half);
+short __ovld __cnfn convert_short_rtz(half);
+short __ovld __cnfn convert_short_sat(half);
+short __ovld __cnfn convert_short_sat_rte(half);
+short __ovld __cnfn convert_short_sat_rtp(half);
+short __ovld __cnfn convert_short_sat_rtn(half);
+short __ovld __cnfn convert_short_sat_rtz(half);
+short2 __ovld __cnfn convert_short2(half2);
+short2 __ovld __cnfn convert_short2_rte(half2);
+short2 __ovld __cnfn convert_short2_rtp(half2);
+short2 __ovld __cnfn convert_short2_rtn(half2);
+short2 __ovld __cnfn convert_short2_rtz(half2);
+short2 __ovld __cnfn convert_short2_sat(half2);
+short2 __ovld __cnfn convert_short2_sat_rte(half2);
+short2 __ovld __cnfn convert_short2_sat_rtp(half2);
+short2 __ovld __cnfn convert_short2_sat_rtn(half2);
+short2 __ovld __cnfn convert_short2_sat_rtz(half2);
+short3 __ovld __cnfn convert_short3(half3);
+short3 __ovld __cnfn convert_short3_rte(half3);
+short3 __ovld __cnfn convert_short3_rtp(half3);
+short3 __ovld __cnfn convert_short3_rtn(half3);
+short3 __ovld __cnfn convert_short3_rtz(half3);
+short3 __ovld __cnfn convert_short3_sat(half3);
+short3 __ovld __cnfn convert_short3_sat_rte(half3);
+short3 __ovld __cnfn convert_short3_sat_rtp(half3);
+short3 __ovld __cnfn convert_short3_sat_rtn(half3);
+short3 __ovld __cnfn convert_short3_sat_rtz(half3);
+short4 __ovld __cnfn convert_short4(half4);
+short4 __ovld __cnfn convert_short4_rte(half4);
+short4 __ovld __cnfn convert_short4_rtp(half4);
+short4 __ovld __cnfn convert_short4_rtn(half4);
+short4 __ovld __cnfn convert_short4_rtz(half4);
+short4 __ovld __cnfn convert_short4_sat(half4);
+short4 __ovld __cnfn convert_short4_sat_rte(half4);
+short4 __ovld __cnfn convert_short4_sat_rtp(half4);
+short4 __ovld __cnfn convert_short4_sat_rtn(half4);
+short4 __ovld __cnfn convert_short4_sat_rtz(half4);
+short8 __ovld __cnfn convert_short8(half8);
+short8 __ovld __cnfn convert_short8_rte(half8);
+short8 __ovld __cnfn convert_short8_rtp(half8);
+short8 __ovld __cnfn convert_short8_rtn(half8);
+short8 __ovld __cnfn convert_short8_rtz(half8);
+short8 __ovld __cnfn convert_short8_sat(half8);
+short8 __ovld __cnfn convert_short8_sat_rte(half8);
+short8 __ovld __cnfn convert_short8_sat_rtp(half8);
+short8 __ovld __cnfn convert_short8_sat_rtn(half8);
+short8 __ovld __cnfn convert_short8_sat_rtz(half8);
+short16 __ovld __cnfn convert_short16(half16);
+short16 __ovld __cnfn convert_short16_rte(half16);
+short16 __ovld __cnfn convert_short16_rtp(half16);
+short16 __ovld __cnfn convert_short16_rtn(half16);
+short16 __ovld __cnfn convert_short16_rtz(half16);
+short16 __ovld __cnfn convert_short16_sat(half16);
+short16 __ovld __cnfn convert_short16_sat_rte(half16);
+short16 __ovld __cnfn convert_short16_sat_rtp(half16);
+short16 __ovld __cnfn convert_short16_sat_rtn(half16);
+short16 __ovld __cnfn convert_short16_sat_rtz(half16);
+int __ovld __cnfn convert_int(half);
+int __ovld __cnfn convert_int_rte(half);
+int __ovld __cnfn convert_int_rtp(half);
+int __ovld __cnfn convert_int_rtn(half);
+int __ovld __cnfn convert_int_rtz(half);
+int __ovld __cnfn convert_int_sat(half);
+int __ovld __cnfn convert_int_sat_rte(half);
+int __ovld __cnfn convert_int_sat_rtp(half);
+int __ovld __cnfn convert_int_sat_rtn(half);
+int __ovld __cnfn convert_int_sat_rtz(half);
+int2 __ovld __cnfn convert_int2(half2);
+int2 __ovld __cnfn convert_int2_rte(half2);
+int2 __ovld __cnfn convert_int2_rtp(half2);
+int2 __ovld __cnfn convert_int2_rtn(half2);
+int2 __ovld __cnfn convert_int2_rtz(half2);
+int2 __ovld __cnfn convert_int2_sat(half2);
+int2 __ovld __cnfn convert_int2_sat_rte(half2);
+int2 __ovld __cnfn convert_int2_sat_rtp(half2);
+int2 __ovld __cnfn convert_int2_sat_rtn(half2);
+int2 __ovld __cnfn convert_int2_sat_rtz(half2);
+int3 __ovld __cnfn convert_int3(half3);
+int3 __ovld __cnfn convert_int3_rte(half3);
+int3 __ovld __cnfn convert_int3_rtp(half3);
+int3 __ovld __cnfn convert_int3_rtn(half3);
+int3 __ovld __cnfn convert_int3_rtz(half3);
+int3 __ovld __cnfn convert_int3_sat(half3);
+int3 __ovld __cnfn convert_int3_sat_rte(half3);
+int3 __ovld __cnfn convert_int3_sat_rtp(half3);
+int3 __ovld __cnfn convert_int3_sat_rtn(half3);
+int3 __ovld __cnfn convert_int3_sat_rtz(half3);
+int4 __ovld __cnfn convert_int4(half4);
+int4 __ovld __cnfn convert_int4_rte(half4);
+int4 __ovld __cnfn convert_int4_rtp(half4);
+int4 __ovld __cnfn convert_int4_rtn(half4);
+int4 __ovld __cnfn convert_int4_rtz(half4);
+int4 __ovld __cnfn convert_int4_sat(half4);
+int4 __ovld __cnfn convert_int4_sat_rte(half4);
+int4 __ovld __cnfn convert_int4_sat_rtp(half4);
+int4 __ovld __cnfn convert_int4_sat_rtn(half4);
+int4 __ovld __cnfn convert_int4_sat_rtz(half4);
+int8 __ovld __cnfn convert_int8(half8);
+int8 __ovld __cnfn convert_int8_rte(half8);
+int8 __ovld __cnfn convert_int8_rtp(half8);
+int8 __ovld __cnfn convert_int8_rtn(half8);
+int8 __ovld __cnfn convert_int8_rtz(half8);
+int8 __ovld __cnfn convert_int8_sat(half8);
+int8 __ovld __cnfn convert_int8_sat_rte(half8);
+int8 __ovld __cnfn convert_int8_sat_rtp(half8);
+int8 __ovld __cnfn convert_int8_sat_rtn(half8);
+int8 __ovld __cnfn convert_int8_sat_rtz(half8);
+int16 __ovld __cnfn convert_int16(half16);
+int16 __ovld __cnfn convert_int16_rte(half16);
+int16 __ovld __cnfn convert_int16_rtp(half16);
+int16 __ovld __cnfn convert_int16_rtn(half16);
+int16 __ovld __cnfn convert_int16_rtz(half16);
+int16 __ovld __cnfn convert_int16_sat(half16);
+int16 __ovld __cnfn convert_int16_sat_rte(half16);
+int16 __ovld __cnfn convert_int16_sat_rtp(half16);
+int16 __ovld __cnfn convert_int16_sat_rtn(half16);
+int16 __ovld __cnfn convert_int16_sat_rtz(half16);
+long __ovld __cnfn convert_long(half);
+long __ovld __cnfn convert_long_rte(half);
+long __ovld __cnfn convert_long_rtp(half);
+long __ovld __cnfn convert_long_rtn(half);
+long __ovld __cnfn convert_long_rtz(half);
+long __ovld __cnfn convert_long_sat(half);
+long __ovld __cnfn convert_long_sat_rte(half);
+long __ovld __cnfn convert_long_sat_rtp(half);
+long __ovld __cnfn convert_long_sat_rtn(half);
+long __ovld __cnfn convert_long_sat_rtz(half);
+long2 __ovld __cnfn convert_long2(half2);
+long2 __ovld __cnfn convert_long2_rte(half2);
+long2 __ovld __cnfn convert_long2_rtp(half2);
+long2 __ovld __cnfn convert_long2_rtn(half2);
+long2 __ovld __cnfn convert_long2_rtz(half2);
+long2 __ovld __cnfn convert_long2_sat(half2);
+long2 __ovld __cnfn convert_long2_sat_rte(half2);
+long2 __ovld __cnfn convert_long2_sat_rtp(half2);
+long2 __ovld __cnfn convert_long2_sat_rtn(half2);
+long2 __ovld __cnfn convert_long2_sat_rtz(half2);
+long3 __ovld __cnfn convert_long3(half3);
+long3 __ovld __cnfn convert_long3_rte(half3);
+long3 __ovld __cnfn convert_long3_rtp(half3);
+long3 __ovld __cnfn convert_long3_rtn(half3);
+long3 __ovld __cnfn convert_long3_rtz(half3);
+long3 __ovld __cnfn convert_long3_sat(half3);
+long3 __ovld __cnfn convert_long3_sat_rte(half3);
+long3 __ovld __cnfn convert_long3_sat_rtp(half3);
+long3 __ovld __cnfn convert_long3_sat_rtn(half3);
+long3 __ovld __cnfn convert_long3_sat_rtz(half3);
+long4 __ovld __cnfn convert_long4(half4);
+long4 __ovld __cnfn convert_long4_rte(half4);
+long4 __ovld __cnfn convert_long4_rtp(half4);
+long4 __ovld __cnfn convert_long4_rtn(half4);
+long4 __ovld __cnfn convert_long4_rtz(half4);
+long4 __ovld __cnfn convert_long4_sat(half4);
+long4 __ovld __cnfn convert_long4_sat_rte(half4);
+long4 __ovld __cnfn convert_long4_sat_rtp(half4);
+long4 __ovld __cnfn convert_long4_sat_rtn(half4);
+long4 __ovld __cnfn convert_long4_sat_rtz(half4);
+long8 __ovld __cnfn convert_long8(half8);
+long8 __ovld __cnfn convert_long8_rte(half8);
+long8 __ovld __cnfn convert_long8_rtp(half8);
+long8 __ovld __cnfn convert_long8_rtn(half8);
+long8 __ovld __cnfn convert_long8_rtz(half8);
+long8 __ovld __cnfn convert_long8_sat(half8);
+long8 __ovld __cnfn convert_long8_sat_rte(half8);
+long8 __ovld __cnfn convert_long8_sat_rtp(half8);
+long8 __ovld __cnfn convert_long8_sat_rtn(half8);
+long8 __ovld __cnfn convert_long8_sat_rtz(half8);
+long16 __ovld __cnfn convert_long16(half16);
+long16 __ovld __cnfn convert_long16_rte(half16);
+long16 __ovld __cnfn convert_long16_rtp(half16);
+long16 __ovld __cnfn convert_long16_rtn(half16);
+long16 __ovld __cnfn convert_long16_rtz(half16);
+long16 __ovld __cnfn convert_long16_sat(half16);
+long16 __ovld __cnfn convert_long16_sat_rte(half16);
+long16 __ovld __cnfn convert_long16_sat_rtp(half16);
+long16 __ovld __cnfn convert_long16_sat_rtn(half16);
+long16 __ovld __cnfn convert_long16_sat_rtz(half16);
+float __ovld __cnfn convert_float(half);
+float __ovld __cnfn convert_float_rte(half);
+float __ovld __cnfn convert_float_rtp(half);
+float __ovld __cnfn convert_float_rtn(half);
+float __ovld __cnfn convert_float_rtz(half);
+float2 __ovld __cnfn convert_float2(half2);
+float2 __ovld __cnfn convert_float2_rte(half2);
+float2 __ovld __cnfn convert_float2_rtp(half2);
+float2 __ovld __cnfn convert_float2_rtn(half2);
+float2 __ovld __cnfn convert_float2_rtz(half2);
+float3 __ovld __cnfn convert_float3(half3);
+float3 __ovld __cnfn convert_float3_rte(half3);
+float3 __ovld __cnfn convert_float3_rtp(half3);
+float3 __ovld __cnfn convert_float3_rtn(half3);
+float3 __ovld __cnfn convert_float3_rtz(half3);
+float4 __ovld __cnfn convert_float4(half4);
+float4 __ovld __cnfn convert_float4_rte(half4);
+float4 __ovld __cnfn convert_float4_rtp(half4);
+float4 __ovld __cnfn convert_float4_rtn(half4);
+float4 __ovld __cnfn convert_float4_rtz(half4);
+float8 __ovld __cnfn convert_float8(half8);
+float8 __ovld __cnfn convert_float8_rte(half8);
+float8 __ovld __cnfn convert_float8_rtp(half8);
+float8 __ovld __cnfn convert_float8_rtn(half8);
+float8 __ovld __cnfn convert_float8_rtz(half8);
+float16 __ovld __cnfn convert_float16(half16);
+float16 __ovld __cnfn convert_float16_rte(half16);
+float16 __ovld __cnfn convert_float16_rtp(half16);
+float16 __ovld __cnfn convert_float16_rtn(half16);
+float16 __ovld __cnfn convert_float16_rtz(half16);
+
+// Convert non-double types to half types.
+half __ovld __cnfn convert_half(uchar);
+half __ovld __cnfn convert_half(ushort);
+half __ovld __cnfn convert_half(uint);
+half __ovld __cnfn convert_half(ulong);
+half __ovld __cnfn convert_half(char);
+half __ovld __cnfn convert_half(short);
+half __ovld __cnfn convert_half(int);
+half __ovld __cnfn convert_half(long);
+half __ovld __cnfn convert_half(float);
+half __ovld __cnfn convert_half(half);
+half __ovld __cnfn convert_half_rte(uchar);
+half __ovld __cnfn convert_half_rte(ushort);
+half __ovld __cnfn convert_half_rte(uint);
+half __ovld __cnfn convert_half_rte(ulong);
+half __ovld __cnfn convert_half_rte(char);
+half __ovld __cnfn convert_half_rte(short);
+half __ovld __cnfn convert_half_rte(int);
+half __ovld __cnfn convert_half_rte(long);
+half __ovld __cnfn convert_half_rte(float);
+half __ovld __cnfn convert_half_rte(half);
+half __ovld __cnfn convert_half_rtp(uchar);
+half __ovld __cnfn convert_half_rtp(ushort);
+half __ovld __cnfn convert_half_rtp(uint);
+half __ovld __cnfn convert_half_rtp(ulong);
+half __ovld __cnfn convert_half_rtp(char);
+half __ovld __cnfn convert_half_rtp(short);
+half __ovld __cnfn convert_half_rtp(int);
+half __ovld __cnfn convert_half_rtp(long);
+half __ovld __cnfn convert_half_rtp(float);
+half __ovld __cnfn convert_half_rtp(half);
+half __ovld __cnfn convert_half_rtn(uchar);
+half __ovld __cnfn convert_half_rtn(ushort);
+half __ovld __cnfn convert_half_rtn(uint);
+half __ovld __cnfn convert_half_rtn(ulong);
+half __ovld __cnfn convert_half_rtn(char);
+half __ovld __cnfn convert_half_rtn(short);
+half __ovld __cnfn convert_half_rtn(int);
+half __ovld __cnfn convert_half_rtn(long);
+half __ovld __cnfn convert_half_rtn(float);
+half __ovld __cnfn convert_half_rtn(half);
+half __ovld __cnfn convert_half_rtz(uchar);
+half __ovld __cnfn convert_half_rtz(ushort);
+half __ovld __cnfn convert_half_rtz(uint);
+half __ovld __cnfn convert_half_rtz(ulong);
+half __ovld __cnfn convert_half_rtz(char);
+half __ovld __cnfn convert_half_rtz(short);
+half __ovld __cnfn convert_half_rtz(int);
+half __ovld __cnfn convert_half_rtz(long);
+half __ovld __cnfn convert_half_rtz(float);
+half __ovld __cnfn convert_half_rtz(half);
+half2 __ovld __cnfn convert_half2(char2);
+half2 __ovld __cnfn convert_half2(uchar2);
+half2 __ovld __cnfn convert_half2(short2);
+half2 __ovld __cnfn convert_half2(ushort2);
+half2 __ovld __cnfn convert_half2(int2);
+half2 __ovld __cnfn convert_half2(uint2);
+half2 __ovld __cnfn convert_half2(long2);
+half2 __ovld __cnfn convert_half2(ulong2);
+half2 __ovld __cnfn convert_half2(float2);
+half2 __ovld __cnfn convert_half2(half2);
+half2 __ovld __cnfn convert_half2_rte(char2);
+half2 __ovld __cnfn convert_half2_rte(uchar2);
+half2 __ovld __cnfn convert_half2_rte(short2);
+half2 __ovld __cnfn convert_half2_rte(ushort2);
+half2 __ovld __cnfn convert_half2_rte(int2);
+half2 __ovld __cnfn convert_half2_rte(uint2);
+half2 __ovld __cnfn convert_half2_rte(long2);
+half2 __ovld __cnfn convert_half2_rte(ulong2);
+half2 __ovld __cnfn convert_half2_rte(float2);
+half2 __ovld __cnfn convert_half2_rte(half2);
+half2 __ovld __cnfn convert_half2_rtp(char2);
+half2 __ovld __cnfn convert_half2_rtp(uchar2);
+half2 __ovld __cnfn convert_half2_rtp(short2);
+half2 __ovld __cnfn convert_half2_rtp(ushort2);
+half2 __ovld __cnfn convert_half2_rtp(int2);
+half2 __ovld __cnfn convert_half2_rtp(uint2);
+half2 __ovld __cnfn convert_half2_rtp(long2);
+half2 __ovld __cnfn convert_half2_rtp(ulong2);
+half2 __ovld __cnfn convert_half2_rtp(float2);
+half2 __ovld __cnfn convert_half2_rtp(half2);
+half2 __ovld __cnfn convert_half2_rtn(char2);
+half2 __ovld __cnfn convert_half2_rtn(uchar2);
+half2 __ovld __cnfn convert_half2_rtn(short2);
+half2 __ovld __cnfn convert_half2_rtn(ushort2);
+half2 __ovld __cnfn convert_half2_rtn(int2);
+half2 __ovld __cnfn convert_half2_rtn(uint2);
+half2 __ovld __cnfn convert_half2_rtn(long2);
+half2 __ovld __cnfn convert_half2_rtn(ulong2);
+half2 __ovld __cnfn convert_half2_rtn(float2);
+half2 __ovld __cnfn convert_half2_rtn(half2);
+half2 __ovld __cnfn convert_half2_rtz(char2);
+half2 __ovld __cnfn convert_half2_rtz(uchar2);
+half2 __ovld __cnfn convert_half2_rtz(short2);
+half2 __ovld __cnfn convert_half2_rtz(ushort2);
+half2 __ovld __cnfn convert_half2_rtz(int2);
+half2 __ovld __cnfn convert_half2_rtz(uint2);
+half2 __ovld __cnfn convert_half2_rtz(long2);
+half2 __ovld __cnfn convert_half2_rtz(ulong2);
+half2 __ovld __cnfn convert_half2_rtz(float2);
+half2 __ovld __cnfn convert_half2_rtz(half2);
+half3 __ovld __cnfn convert_half3(char3);
+half3 __ovld __cnfn convert_half3(uchar3);
+half3 __ovld __cnfn convert_half3(short3);
+half3 __ovld __cnfn convert_half3(ushort3);
+half3 __ovld __cnfn convert_half3(int3);
+half3 __ovld __cnfn convert_half3(uint3);
+half3 __ovld __cnfn convert_half3(long3);
+half3 __ovld __cnfn convert_half3(ulong3);
+half3 __ovld __cnfn convert_half3(float3);
+half3 __ovld __cnfn convert_half3(half3);
+half3 __ovld __cnfn convert_half3_rte(char3);
+half3 __ovld __cnfn convert_half3_rte(uchar3);
+half3 __ovld __cnfn convert_half3_rte(short3);
+half3 __ovld __cnfn convert_half3_rte(ushort3);
+half3 __ovld __cnfn convert_half3_rte(int3);
+half3 __ovld __cnfn convert_half3_rte(uint3);
+half3 __ovld __cnfn convert_half3_rte(long3);
+half3 __ovld __cnfn convert_half3_rte(ulong3);
+half3 __ovld __cnfn convert_half3_rte(float3);
+half3 __ovld __cnfn convert_half3_rte(half3);
+half3 __ovld __cnfn convert_half3_rtp(char3);
+half3 __ovld __cnfn convert_half3_rtp(uchar3);
+half3 __ovld __cnfn convert_half3_rtp(short3);
+half3 __ovld __cnfn convert_half3_rtp(ushort3);
+half3 __ovld __cnfn convert_half3_rtp(int3);
+half3 __ovld __cnfn convert_half3_rtp(uint3);
+half3 __ovld __cnfn convert_half3_rtp(long3);
+half3 __ovld __cnfn convert_half3_rtp(ulong3);
+half3 __ovld __cnfn convert_half3_rtp(float3);
+half3 __ovld __cnfn convert_half3_rtp(half3);
+half3 __ovld __cnfn convert_half3_rtn(char3);
+half3 __ovld __cnfn convert_half3_rtn(uchar3);
+half3 __ovld __cnfn convert_half3_rtn(short3);
+half3 __ovld __cnfn convert_half3_rtn(ushort3);
+half3 __ovld __cnfn convert_half3_rtn(int3);
+half3 __ovld __cnfn convert_half3_rtn(uint3);
+half3 __ovld __cnfn convert_half3_rtn(long3);
+half3 __ovld __cnfn convert_half3_rtn(ulong3);
+half3 __ovld __cnfn convert_half3_rtn(float3);
+half3 __ovld __cnfn convert_half3_rtn(half3);
+half3 __ovld __cnfn convert_half3_rtz(char3);
+half3 __ovld __cnfn convert_half3_rtz(uchar3);
+half3 __ovld __cnfn convert_half3_rtz(short3);
+half3 __ovld __cnfn convert_half3_rtz(ushort3);
+half3 __ovld __cnfn convert_half3_rtz(int3);
+half3 __ovld __cnfn convert_half3_rtz(uint3);
+half3 __ovld __cnfn convert_half3_rtz(long3);
+half3 __ovld __cnfn convert_half3_rtz(ulong3);
+half3 __ovld __cnfn convert_half3_rtz(float3);
+half3 __ovld __cnfn convert_half3_rtz(half3);
+half4 __ovld __cnfn convert_half4(char4);
+half4 __ovld __cnfn convert_half4(uchar4);
+half4 __ovld __cnfn convert_half4(short4);
+half4 __ovld __cnfn convert_half4(ushort4);
+half4 __ovld __cnfn convert_half4(int4);
+half4 __ovld __cnfn convert_half4(uint4);
+half4 __ovld __cnfn convert_half4(long4);
+half4 __ovld __cnfn convert_half4(ulong4);
+half4 __ovld __cnfn convert_half4(float4);
+half4 __ovld __cnfn convert_half4(half4);
+half4 __ovld __cnfn convert_half4_rte(char4);
+half4 __ovld __cnfn convert_half4_rte(uchar4);
+half4 __ovld __cnfn convert_half4_rte(short4);
+half4 __ovld __cnfn convert_half4_rte(ushort4);
+half4 __ovld __cnfn convert_half4_rte(int4);
+half4 __ovld __cnfn convert_half4_rte(uint4);
+half4 __ovld __cnfn convert_half4_rte(long4);
+half4 __ovld __cnfn convert_half4_rte(ulong4);
+half4 __ovld __cnfn convert_half4_rte(float4);
+half4 __ovld __cnfn convert_half4_rte(half4);
+half4 __ovld __cnfn convert_half4_rtp(char4);
+half4 __ovld __cnfn convert_half4_rtp(uchar4);
+half4 __ovld __cnfn convert_half4_rtp(short4);
+half4 __ovld __cnfn convert_half4_rtp(ushort4);
+half4 __ovld __cnfn convert_half4_rtp(int4);
+half4 __ovld __cnfn convert_half4_rtp(uint4);
+half4 __ovld __cnfn convert_half4_rtp(long4);
+half4 __ovld __cnfn convert_half4_rtp(ulong4);
+half4 __ovld __cnfn convert_half4_rtp(float4);
+half4 __ovld __cnfn convert_half4_rtp(half4);
+half4 __ovld __cnfn convert_half4_rtn(char4);
+half4 __ovld __cnfn convert_half4_rtn(uchar4);
+half4 __ovld __cnfn convert_half4_rtn(short4);
+half4 __ovld __cnfn convert_half4_rtn(ushort4);
+half4 __ovld __cnfn convert_half4_rtn(int4);
+half4 __ovld __cnfn convert_half4_rtn(uint4);
+half4 __ovld __cnfn convert_half4_rtn(long4);
+half4 __ovld __cnfn convert_half4_rtn(ulong4);
+half4 __ovld __cnfn convert_half4_rtn(float4);
+half4 __ovld __cnfn convert_half4_rtn(half4);
+half4 __ovld __cnfn convert_half4_rtz(char4);
+half4 __ovld __cnfn convert_half4_rtz(uchar4);
+half4 __ovld __cnfn convert_half4_rtz(short4);
+half4 __ovld __cnfn convert_half4_rtz(ushort4);
+half4 __ovld __cnfn convert_half4_rtz(int4);
+half4 __ovld __cnfn convert_half4_rtz(uint4);
+half4 __ovld __cnfn convert_half4_rtz(long4);
+half4 __ovld __cnfn convert_half4_rtz(ulong4);
+half4 __ovld __cnfn convert_half4_rtz(float4);
+half4 __ovld __cnfn convert_half4_rtz(half4);
+half8 __ovld __cnfn convert_half8(char8);
+half8 __ovld __cnfn convert_half8(uchar8);
+half8 __ovld __cnfn convert_half8(short8);
+half8 __ovld __cnfn convert_half8(ushort8);
+half8 __ovld __cnfn convert_half8(int8);
+half8 __ovld __cnfn convert_half8(uint8);
+half8 __ovld __cnfn convert_half8(long8);
+half8 __ovld __cnfn convert_half8(ulong8);
+half8 __ovld __cnfn convert_half8(float8);
+half8 __ovld __cnfn convert_half8(half8);
+half8 __ovld __cnfn convert_half8_rte(char8);
+half8 __ovld __cnfn convert_half8_rte(uchar8);
+half8 __ovld __cnfn convert_half8_rte(short8);
+half8 __ovld __cnfn convert_half8_rte(ushort8);
+half8 __ovld __cnfn convert_half8_rte(int8);
+half8 __ovld __cnfn convert_half8_rte(uint8);
+half8 __ovld __cnfn convert_half8_rte(long8);
+half8 __ovld __cnfn convert_half8_rte(ulong8);
+half8 __ovld __cnfn convert_half8_rte(float8);
+half8 __ovld __cnfn convert_half8_rte(half8);
+half8 __ovld __cnfn convert_half8_rtp(char8);
+half8 __ovld __cnfn convert_half8_rtp(uchar8);
+half8 __ovld __cnfn convert_half8_rtp(short8);
+half8 __ovld __cnfn convert_half8_rtp(ushort8);
+half8 __ovld __cnfn convert_half8_rtp(int8);
+half8 __ovld __cnfn convert_half8_rtp(uint8);
+half8 __ovld __cnfn convert_half8_rtp(long8);
+half8 __ovld __cnfn convert_half8_rtp(ulong8);
+half8 __ovld __cnfn convert_half8_rtp(float8);
+half8 __ovld __cnfn convert_half8_rtp(half8);
+half8 __ovld __cnfn convert_half8_rtn(char8);
+half8 __ovld __cnfn convert_half8_rtn(uchar8);
+half8 __ovld __cnfn convert_half8_rtn(short8);
+half8 __ovld __cnfn convert_half8_rtn(ushort8);
+half8 __ovld __cnfn convert_half8_rtn(int8);
+half8 __ovld __cnfn convert_half8_rtn(uint8);
+half8 __ovld __cnfn convert_half8_rtn(long8);
+half8 __ovld __cnfn convert_half8_rtn(ulong8);
+half8 __ovld __cnfn convert_half8_rtn(float8);
+half8 __ovld __cnfn convert_half8_rtn(half8);
+half8 __ovld __cnfn convert_half8_rtz(char8);
+half8 __ovld __cnfn convert_half8_rtz(uchar8);
+half8 __ovld __cnfn convert_half8_rtz(short8);
+half8 __ovld __cnfn convert_half8_rtz(ushort8);
+half8 __ovld __cnfn convert_half8_rtz(int8);
+half8 __ovld __cnfn convert_half8_rtz(uint8);
+half8 __ovld __cnfn convert_half8_rtz(long8);
+half8 __ovld __cnfn convert_half8_rtz(ulong8);
+half8 __ovld __cnfn convert_half8_rtz(float8);
+half8 __ovld __cnfn convert_half8_rtz(half8);
+half16 __ovld __cnfn convert_half16(char16);
+half16 __ovld __cnfn convert_half16(uchar16);
+half16 __ovld __cnfn convert_half16(short16);
+half16 __ovld __cnfn convert_half16(ushort16);
+half16 __ovld __cnfn convert_half16(int16);
+half16 __ovld __cnfn convert_half16(uint16);
+half16 __ovld __cnfn convert_half16(long16);
+half16 __ovld __cnfn convert_half16(ulong16);
+half16 __ovld __cnfn convert_half16(float16);
+half16 __ovld __cnfn convert_half16(half16);
+half16 __ovld __cnfn convert_half16_rte(char16);
+half16 __ovld __cnfn convert_half16_rte(uchar16);
+half16 __ovld __cnfn convert_half16_rte(short16);
+half16 __ovld __cnfn convert_half16_rte(ushort16);
+half16 __ovld __cnfn convert_half16_rte(int16);
+half16 __ovld __cnfn convert_half16_rte(uint16);
+half16 __ovld __cnfn convert_half16_rte(long16);
+half16 __ovld __cnfn convert_half16_rte(ulong16);
+half16 __ovld __cnfn convert_half16_rte(float16);
+half16 __ovld __cnfn convert_half16_rte(half16);
+half16 __ovld __cnfn convert_half16_rtp(char16);
+half16 __ovld __cnfn convert_half16_rtp(uchar16);
+half16 __ovld __cnfn convert_half16_rtp(short16);
+half16 __ovld __cnfn convert_half16_rtp(ushort16);
+half16 __ovld __cnfn convert_half16_rtp(int16);
+half16 __ovld __cnfn convert_half16_rtp(uint16);
+half16 __ovld __cnfn convert_half16_rtp(long16);
+half16 __ovld __cnfn convert_half16_rtp(ulong16);
+half16 __ovld __cnfn convert_half16_rtp(float16);
+half16 __ovld __cnfn convert_half16_rtp(half16);
+half16 __ovld __cnfn convert_half16_rtn(char16);
+half16 __ovld __cnfn convert_half16_rtn(uchar16);
+half16 __ovld __cnfn convert_half16_rtn(short16);
+half16 __ovld __cnfn convert_half16_rtn(ushort16);
+half16 __ovld __cnfn convert_half16_rtn(int16);
+half16 __ovld __cnfn convert_half16_rtn(uint16);
+half16 __ovld __cnfn convert_half16_rtn(long16);
+half16 __ovld __cnfn convert_half16_rtn(ulong16);
+half16 __ovld __cnfn convert_half16_rtn(float16);
+half16 __ovld __cnfn convert_half16_rtn(half16);
+half16 __ovld __cnfn convert_half16_rtz(char16);
+half16 __ovld __cnfn convert_half16_rtz(uchar16);
+half16 __ovld __cnfn convert_half16_rtz(short16);
+half16 __ovld __cnfn convert_half16_rtz(ushort16);
+half16 __ovld __cnfn convert_half16_rtz(int16);
+half16 __ovld __cnfn convert_half16_rtz(uint16);
+half16 __ovld __cnfn convert_half16_rtz(long16);
+half16 __ovld __cnfn convert_half16_rtz(ulong16);
+half16 __ovld __cnfn convert_half16_rtz(float16);
+half16 __ovld __cnfn convert_half16_rtz(half16);
+
+// Convert half types to double types.
+#ifdef cl_khr_fp64
+double __ovld __cnfn convert_double(half);
+double __ovld __cnfn convert_double_rte(half);
+double __ovld __cnfn convert_double_rtp(half);
+double __ovld __cnfn convert_double_rtn(half);
+double __ovld __cnfn convert_double_rtz(half);
+double2 __ovld __cnfn convert_double2(half2);
+double2 __ovld __cnfn convert_double2_rte(half2);
+double2 __ovld __cnfn convert_double2_rtp(half2);
+double2 __ovld __cnfn convert_double2_rtn(half2);
+double2 __ovld __cnfn convert_double2_rtz(half2);
+double3 __ovld __cnfn convert_double3(half3);
+double3 __ovld __cnfn convert_double3_rte(half3);
+double3 __ovld __cnfn convert_double3_rtp(half3);
+double3 __ovld __cnfn convert_double3_rtn(half3);
+double3 __ovld __cnfn convert_double3_rtz(half3);
+double4 __ovld __cnfn convert_double4(half4);
+double4 __ovld __cnfn convert_double4_rte(half4);
+double4 __ovld __cnfn convert_double4_rtp(half4);
+double4 __ovld __cnfn convert_double4_rtn(half4);
+double4 __ovld __cnfn convert_double4_rtz(half4);
+double8 __ovld __cnfn convert_double8(half8);
+double8 __ovld __cnfn convert_double8_rte(half8);
+double8 __ovld __cnfn convert_double8_rtp(half8);
+double8 __ovld __cnfn convert_double8_rtn(half8);
+double8 __ovld __cnfn convert_double8_rtz(half8);
+double16 __ovld __cnfn convert_double16(half16);
+double16 __ovld __cnfn convert_double16_rte(half16);
+double16 __ovld __cnfn convert_double16_rtp(half16);
+double16 __ovld __cnfn convert_double16_rtn(half16);
+double16 __ovld __cnfn convert_double16_rtz(half16);
+
+// Convert double types to half types.
+half __ovld __cnfn convert_half(double);
+half __ovld __cnfn convert_half_rte(double);
+half __ovld __cnfn convert_half_rtp(double);
+half __ovld __cnfn convert_half_rtn(double);
+half __ovld __cnfn convert_half_rtz(double);
+half2 __ovld __cnfn convert_half2(double2);
+half2 __ovld __cnfn convert_half2_rte(double2);
+half2 __ovld __cnfn convert_half2_rtp(double2);
+half2 __ovld __cnfn convert_half2_rtn(double2);
+half2 __ovld __cnfn convert_half2_rtz(double2);
+half3 __ovld __cnfn convert_half3(double3);
+half3 __ovld __cnfn convert_half3_rte(double3);
+half3 __ovld __cnfn convert_half3_rtp(double3);
+half3 __ovld __cnfn convert_half3_rtn(double3);
+half3 __ovld __cnfn convert_half3_rtz(double3);
+half4 __ovld __cnfn convert_half4(double4);
+half4 __ovld __cnfn convert_half4_rte(double4);
+half4 __ovld __cnfn convert_half4_rtp(double4);
+half4 __ovld __cnfn convert_half4_rtn(double4);
+half4 __ovld __cnfn convert_half4_rtz(double4);
+half8 __ovld __cnfn convert_half8(double8);
+half8 __ovld __cnfn convert_half8_rte(double8);
+half8 __ovld __cnfn convert_half8_rtp(double8);
+half8 __ovld __cnfn convert_half8_rtn(double8);
+half8 __ovld __cnfn convert_half8_rtz(double8);
+half16 __ovld __cnfn convert_half16(double16);
+half16 __ovld __cnfn convert_half16_rte(double16);
+half16 __ovld __cnfn convert_half16_rtp(double16);
+half16 __ovld __cnfn convert_half16_rtn(double16);
+half16 __ovld __cnfn convert_half16_rtz(double16);
+#endif //cl_khr_fp64
+
+#endif // cl_khr_fp16
+
+/**
+ * OpenCL v1.1/1.2/2.0 s6.2.4.2 - as_type operators
+ * Reinterprets a data type as another data type of the same size
+ */
+char __ovld __cnfn as_char(char);
+char __ovld __cnfn as_char(uchar);
+
+char2 __ovld __cnfn as_char2(char2);
+char2 __ovld __cnfn as_char2(uchar2);
+char2 __ovld __cnfn as_char2(short);
+char2 __ovld __cnfn as_char2(ushort);
+
+char3 __ovld __cnfn as_char3(char3);
+char3 __ovld __cnfn as_char3(char4);
+char3 __ovld __cnfn as_char3(uchar3);
+char3 __ovld __cnfn as_char3(uchar4);
+char3 __ovld __cnfn as_char3(short2);
+char3 __ovld __cnfn as_char3(ushort2);
+char3 __ovld __cnfn as_char3(int);
+char3 __ovld __cnfn as_char3(uint);
+char3 __ovld __cnfn as_char3(float);
+
+char4 __ovld __cnfn as_char4(char3);
+char4 __ovld __cnfn as_char4(char4);
+char4 __ovld __cnfn as_char4(uchar3);
+char4 __ovld __cnfn as_char4(uchar4);
+char4 __ovld __cnfn as_char4(short2);
+char4 __ovld __cnfn as_char4(ushort2);
+char4 __ovld __cnfn as_char4(int);
+char4 __ovld __cnfn as_char4(uint);
+char4 __ovld __cnfn as_char4(float);
+
+char8 __ovld __cnfn as_char8(char8);
+char8 __ovld __cnfn as_char8(uchar8);
+char8 __ovld __cnfn as_char8(short3);
+char8 __ovld __cnfn as_char8(short4);
+char8 __ovld __cnfn as_char8(ushort3);
+char8 __ovld __cnfn as_char8(ushort4);
+char8 __ovld __cnfn as_char8(int2);
+char8 __ovld __cnfn as_char8(uint2);
+char8 __ovld __cnfn as_char8(long);
+char8 __ovld __cnfn as_char8(ulong);
+char8 __ovld __cnfn as_char8(float2);
+
+char16 __ovld __cnfn as_char16(char16);
+char16 __ovld __cnfn as_char16(uchar16);
+char16 __ovld __cnfn as_char16(short8);
+char16 __ovld __cnfn as_char16(ushort8);
+char16 __ovld __cnfn as_char16(int3);
+char16 __ovld __cnfn as_char16(int4);
+char16 __ovld __cnfn as_char16(uint3);
+char16 __ovld __cnfn as_char16(uint4);
+char16 __ovld __cnfn as_char16(long2);
+char16 __ovld __cnfn as_char16(ulong2);
+char16 __ovld __cnfn as_char16(float3);
+char16 __ovld __cnfn as_char16(float4);
+
+uchar __ovld __cnfn as_uchar(char);
+uchar __ovld __cnfn as_uchar(uchar);
+
+uchar2 __ovld __cnfn as_uchar2(char2);
+uchar2 __ovld __cnfn as_uchar2(uchar2);
+uchar2 __ovld __cnfn as_uchar2(short);
+uchar2 __ovld __cnfn as_uchar2(ushort);
+
+uchar3 __ovld __cnfn as_uchar3(char3);
+uchar3 __ovld __cnfn as_uchar3(char4);
+uchar3 __ovld __cnfn as_uchar3(uchar3);
+uchar3 __ovld __cnfn as_uchar3(uchar4);
+uchar3 __ovld __cnfn as_uchar3(short2);
+uchar3 __ovld __cnfn as_uchar3(ushort2);
+uchar3 __ovld __cnfn as_uchar3(int);
+uchar3 __ovld __cnfn as_uchar3(uint);
+uchar3 __ovld __cnfn as_uchar3(float);
+
+uchar4 __ovld __cnfn as_uchar4(char3);
+uchar4 __ovld __cnfn as_uchar4(char4);
+uchar4 __ovld __cnfn as_uchar4(uchar3);
+uchar4 __ovld __cnfn as_uchar4(uchar4);
+uchar4 __ovld __cnfn as_uchar4(short2);
+uchar4 __ovld __cnfn as_uchar4(ushort2);
+uchar4 __ovld __cnfn as_uchar4(int);
+uchar4 __ovld __cnfn as_uchar4(uint);
+uchar4 __ovld __cnfn as_uchar4(float);
+
+uchar8 __ovld __cnfn as_uchar8(char8);
+uchar8 __ovld __cnfn as_uchar8(uchar8);
+uchar8 __ovld __cnfn as_uchar8(short3);
+uchar8 __ovld __cnfn as_uchar8(short4);
+uchar8 __ovld __cnfn as_uchar8(ushort3);
+uchar8 __ovld __cnfn as_uchar8(ushort4);
+uchar8 __ovld __cnfn as_uchar8(int2);
+uchar8 __ovld __cnfn as_uchar8(uint2);
+uchar8 __ovld __cnfn as_uchar8(long);
+uchar8 __ovld __cnfn as_uchar8(ulong);
+uchar8 __ovld __cnfn as_uchar8(float2);
+
+uchar16 __ovld __cnfn as_uchar16(char16);
+uchar16 __ovld __cnfn as_uchar16(uchar16);
+uchar16 __ovld __cnfn as_uchar16(short8);
+uchar16 __ovld __cnfn as_uchar16(ushort8);
+uchar16 __ovld __cnfn as_uchar16(int3);
+uchar16 __ovld __cnfn as_uchar16(int4);
+uchar16 __ovld __cnfn as_uchar16(uint3);
+uchar16 __ovld __cnfn as_uchar16(uint4);
+uchar16 __ovld __cnfn as_uchar16(long2);
+uchar16 __ovld __cnfn as_uchar16(ulong2);
+uchar16 __ovld __cnfn as_uchar16(float3);
+uchar16 __ovld __cnfn as_uchar16(float4);
+
+short __ovld __cnfn as_short(char2);
+short __ovld __cnfn as_short(uchar2);
+short __ovld __cnfn as_short(short);
+short __ovld __cnfn as_short(ushort);
+
+short2 __ovld __cnfn as_short2(char3);
+short2 __ovld __cnfn as_short2(char4);
+short2 __ovld __cnfn as_short2(uchar3);
+short2 __ovld __cnfn as_short2(uchar4);
+short2 __ovld __cnfn as_short2(short2);
+short2 __ovld __cnfn as_short2(ushort2);
+short2 __ovld __cnfn as_short2(int);
+short2 __ovld __cnfn as_short2(uint);
+short2 __ovld __cnfn as_short2(float);
+
+short3 __ovld __cnfn as_short3(char8);
+short3 __ovld __cnfn as_short3(uchar8);
+short3 __ovld __cnfn as_short3(short3);
+short3 __ovld __cnfn as_short3(short4);
+short3 __ovld __cnfn as_short3(ushort3);
+short3 __ovld __cnfn as_short3(ushort4);
+short3 __ovld __cnfn as_short3(int2);
+short3 __ovld __cnfn as_short3(uint2);
+short3 __ovld __cnfn as_short3(long);
+short3 __ovld __cnfn as_short3(ulong);
+short3 __ovld __cnfn as_short3(float2);
+
+short4 __ovld __cnfn as_short4(char8);
+short4 __ovld __cnfn as_short4(uchar8);
+short4 __ovld __cnfn as_short4(short3);
+short4 __ovld __cnfn as_short4(short4);
+short4 __ovld __cnfn as_short4(ushort3);
+short4 __ovld __cnfn as_short4(ushort4);
+short4 __ovld __cnfn as_short4(int2);
+short4 __ovld __cnfn as_short4(uint2);
+short4 __ovld __cnfn as_short4(long);
+short4 __ovld __cnfn as_short4(ulong);
+short4 __ovld __cnfn as_short4(float2);
+
+short8 __ovld __cnfn as_short8(char16);
+short8 __ovld __cnfn as_short8(uchar16);
+short8 __ovld __cnfn as_short8(short8);
+short8 __ovld __cnfn as_short8(ushort8);
+short8 __ovld __cnfn as_short8(int3);
+short8 __ovld __cnfn as_short8(int4);
+short8 __ovld __cnfn as_short8(uint3);
+short8 __ovld __cnfn as_short8(uint4);
+short8 __ovld __cnfn as_short8(long2);
+short8 __ovld __cnfn as_short8(ulong2);
+short8 __ovld __cnfn as_short8(float3);
+short8 __ovld __cnfn as_short8(float4);
+
+short16 __ovld __cnfn as_short16(short16);
+short16 __ovld __cnfn as_short16(ushort16);
+short16 __ovld __cnfn as_short16(int8);
+short16 __ovld __cnfn as_short16(uint8);
+short16 __ovld __cnfn as_short16(long3);
+short16 __ovld __cnfn as_short16(long4);
+short16 __ovld __cnfn as_short16(ulong3);
+short16 __ovld __cnfn as_short16(ulong4);
+short16 __ovld __cnfn as_short16(float8);
+
+ushort __ovld __cnfn as_ushort(char2);
+ushort __ovld __cnfn as_ushort(uchar2);
+ushort __ovld __cnfn as_ushort(short);
+ushort __ovld __cnfn as_ushort(ushort);
+
+ushort2 __ovld __cnfn as_ushort2(char3);
+ushort2 __ovld __cnfn as_ushort2(char4);
+ushort2 __ovld __cnfn as_ushort2(uchar3);
+ushort2 __ovld __cnfn as_ushort2(uchar4);
+ushort2 __ovld __cnfn as_ushort2(short2);
+ushort2 __ovld __cnfn as_ushort2(ushort2);
+ushort2 __ovld __cnfn as_ushort2(int);
+ushort2 __ovld __cnfn as_ushort2(uint);
+ushort2 __ovld __cnfn as_ushort2(float);
+
+ushort3 __ovld __cnfn as_ushort3(char8);
+ushort3 __ovld __cnfn as_ushort3(uchar8);
+ushort3 __ovld __cnfn as_ushort3(short3);
+ushort3 __ovld __cnfn as_ushort3(short4);
+ushort3 __ovld __cnfn as_ushort3(ushort3);
+ushort3 __ovld __cnfn as_ushort3(ushort4);
+ushort3 __ovld __cnfn as_ushort3(int2);
+ushort3 __ovld __cnfn as_ushort3(uint2);
+ushort3 __ovld __cnfn as_ushort3(long);
+ushort3 __ovld __cnfn as_ushort3(ulong);
+ushort3 __ovld __cnfn as_ushort3(float2);
+
+ushort4 __ovld __cnfn as_ushort4(char8);
+ushort4 __ovld __cnfn as_ushort4(uchar8);
+ushort4 __ovld __cnfn as_ushort4(short3);
+ushort4 __ovld __cnfn as_ushort4(short4);
+ushort4 __ovld __cnfn as_ushort4(ushort3);
+ushort4 __ovld __cnfn as_ushort4(ushort4);
+ushort4 __ovld __cnfn as_ushort4(int2);
+ushort4 __ovld __cnfn as_ushort4(uint2);
+ushort4 __ovld __cnfn as_ushort4(long);
+ushort4 __ovld __cnfn as_ushort4(ulong);
+ushort4 __ovld __cnfn as_ushort4(float2);
+
+ushort8 __ovld __cnfn as_ushort8(char16);
+ushort8 __ovld __cnfn as_ushort8(uchar16);
+ushort8 __ovld __cnfn as_ushort8(short8);
+ushort8 __ovld __cnfn as_ushort8(ushort8);
+ushort8 __ovld __cnfn as_ushort8(int3);
+ushort8 __ovld __cnfn as_ushort8(int4);
+ushort8 __ovld __cnfn as_ushort8(uint3);
+ushort8 __ovld __cnfn as_ushort8(uint4);
+ushort8 __ovld __cnfn as_ushort8(long2);
+ushort8 __ovld __cnfn as_ushort8(ulong2);
+ushort8 __ovld __cnfn as_ushort8(float3);
+ushort8 __ovld __cnfn as_ushort8(float4);
+
+ushort16 __ovld __cnfn as_ushort16(short16);
+ushort16 __ovld __cnfn as_ushort16(ushort16);
+ushort16 __ovld __cnfn as_ushort16(int8);
+ushort16 __ovld __cnfn as_ushort16(uint8);
+ushort16 __ovld __cnfn as_ushort16(long3);
+ushort16 __ovld __cnfn as_ushort16(long4);
+ushort16 __ovld __cnfn as_ushort16(ulong3);
+ushort16 __ovld __cnfn as_ushort16(ulong4);
+ushort16 __ovld __cnfn as_ushort16(float8);
+
+int __ovld __cnfn as_int(char3);
+int __ovld __cnfn as_int(char4);
+int __ovld __cnfn as_int(uchar3);
+int __ovld __cnfn as_int(uchar4);
+int __ovld __cnfn as_int(short2);
+int __ovld __cnfn as_int(ushort2);
+int __ovld __cnfn as_int(int);
+int __ovld __cnfn as_int(uint);
+int __ovld __cnfn as_int(float);
+
+int2 __ovld __cnfn as_int2(char8);
+int2 __ovld __cnfn as_int2(uchar8);
+int2 __ovld __cnfn as_int2(short3);
+int2 __ovld __cnfn as_int2(short4);
+int2 __ovld __cnfn as_int2(ushort3);
+int2 __ovld __cnfn as_int2(ushort4);
+int2 __ovld __cnfn as_int2(int2);
+int2 __ovld __cnfn as_int2(uint2);
+int2 __ovld __cnfn as_int2(long);
+int2 __ovld __cnfn as_int2(ulong);
+int2 __ovld __cnfn as_int2(float2);
+
+int3 __ovld __cnfn as_int3(char16);
+int3 __ovld __cnfn as_int3(uchar16);
+int3 __ovld __cnfn as_int3(short8);
+int3 __ovld __cnfn as_int3(ushort8);
+int3 __ovld __cnfn as_int3(int3);
+int3 __ovld __cnfn as_int3(int4);
+int3 __ovld __cnfn as_int3(uint3);
+int3 __ovld __cnfn as_int3(uint4);
+int3 __ovld __cnfn as_int3(long2);
+int3 __ovld __cnfn as_int3(ulong2);
+int3 __ovld __cnfn as_int3(float3);
+int3 __ovld __cnfn as_int3(float4);
+
+int4 __ovld __cnfn as_int4(char16);
+int4 __ovld __cnfn as_int4(uchar16);
+int4 __ovld __cnfn as_int4(short8);
+int4 __ovld __cnfn as_int4(ushort8);
+int4 __ovld __cnfn as_int4(int3);
+int4 __ovld __cnfn as_int4(int4);
+int4 __ovld __cnfn as_int4(uint3);
+int4 __ovld __cnfn as_int4(uint4);
+int4 __ovld __cnfn as_int4(long2);
+int4 __ovld __cnfn as_int4(ulong2);
+int4 __ovld __cnfn as_int4(float3);
+int4 __ovld __cnfn as_int4(float4);
+
+int8 __ovld __cnfn as_int8(short16);
+int8 __ovld __cnfn as_int8(ushort16);
+int8 __ovld __cnfn as_int8(int8);
+int8 __ovld __cnfn as_int8(uint8);
+int8 __ovld __cnfn as_int8(long3);
+int8 __ovld __cnfn as_int8(long4);
+int8 __ovld __cnfn as_int8(ulong3);
+int8 __ovld __cnfn as_int8(ulong4);
+int8 __ovld __cnfn as_int8(float8);
+
+int16 __ovld __cnfn as_int16(int16);
+int16 __ovld __cnfn as_int16(uint16);
+int16 __ovld __cnfn as_int16(long8);
+int16 __ovld __cnfn as_int16(ulong8);
+int16 __ovld __cnfn as_int16(float16);
+
+uint __ovld __cnfn as_uint(char3);
+uint __ovld __cnfn as_uint(char4);
+uint __ovld __cnfn as_uint(uchar3);
+uint __ovld __cnfn as_uint(uchar4);
+uint __ovld __cnfn as_uint(short2);
+uint __ovld __cnfn as_uint(ushort2);
+uint __ovld __cnfn as_uint(int);
+uint __ovld __cnfn as_uint(uint);
+uint __ovld __cnfn as_uint(float);
+
+uint2 __ovld __cnfn as_uint2(char8);
+uint2 __ovld __cnfn as_uint2(uchar8);
+uint2 __ovld __cnfn as_uint2(short3);
+uint2 __ovld __cnfn as_uint2(short4);
+uint2 __ovld __cnfn as_uint2(ushort3);
+uint2 __ovld __cnfn as_uint2(ushort4);
+uint2 __ovld __cnfn as_uint2(int2);
+uint2 __ovld __cnfn as_uint2(uint2);
+uint2 __ovld __cnfn as_uint2(long);
+uint2 __ovld __cnfn as_uint2(ulong);
+uint2 __ovld __cnfn as_uint2(float2);
+
+uint3 __ovld __cnfn as_uint3(char16);
+uint3 __ovld __cnfn as_uint3(uchar16);
+uint3 __ovld __cnfn as_uint3(short8);
+uint3 __ovld __cnfn as_uint3(ushort8);
+uint3 __ovld __cnfn as_uint3(int3);
+uint3 __ovld __cnfn as_uint3(int4);
+uint3 __ovld __cnfn as_uint3(uint3);
+uint3 __ovld __cnfn as_uint3(uint4);
+uint3 __ovld __cnfn as_uint3(long2);
+uint3 __ovld __cnfn as_uint3(ulong2);
+uint3 __ovld __cnfn as_uint3(float3);
+uint3 __ovld __cnfn as_uint3(float4);
+
+uint4 __ovld __cnfn as_uint4(char16);
+uint4 __ovld __cnfn as_uint4(uchar16);
+uint4 __ovld __cnfn as_uint4(short8);
+uint4 __ovld __cnfn as_uint4(ushort8);
+uint4 __ovld __cnfn as_uint4(int3);
+uint4 __ovld __cnfn as_uint4(int4);
+uint4 __ovld __cnfn as_uint4(uint3);
+uint4 __ovld __cnfn as_uint4(uint4);
+uint4 __ovld __cnfn as_uint4(long2);
+uint4 __ovld __cnfn as_uint4(ulong2);
+uint4 __ovld __cnfn as_uint4(float3);
+uint4 __ovld __cnfn as_uint4(float4);
+
+uint8 __ovld __cnfn as_uint8(short16);
+uint8 __ovld __cnfn as_uint8(ushort16);
+uint8 __ovld __cnfn as_uint8(int8);
+uint8 __ovld __cnfn as_uint8(uint8);
+uint8 __ovld __cnfn as_uint8(long3);
+uint8 __ovld __cnfn as_uint8(long4);
+uint8 __ovld __cnfn as_uint8(ulong3);
+uint8 __ovld __cnfn as_uint8(ulong4);
+uint8 __ovld __cnfn as_uint8(float8);
+
+uint16 __ovld __cnfn as_uint16(int16);
+uint16 __ovld __cnfn as_uint16(uint16);
+uint16 __ovld __cnfn as_uint16(long8);
+uint16 __ovld __cnfn as_uint16(ulong8);
+uint16 __ovld __cnfn as_uint16(float16);
+
+long __ovld __cnfn as_long(char8);
+long __ovld __cnfn as_long(uchar8);
+long __ovld __cnfn as_long(short3);
+long __ovld __cnfn as_long(short4);
+long __ovld __cnfn as_long(ushort3);
+long __ovld __cnfn as_long(ushort4);
+long __ovld __cnfn as_long(int2);
+long __ovld __cnfn as_long(uint2);
+long __ovld __cnfn as_long(long);
+long __ovld __cnfn as_long(ulong);
+long __ovld __cnfn as_long(float2);
+
+long2 __ovld __cnfn as_long2(char16);
+long2 __ovld __cnfn as_long2(uchar16);
+long2 __ovld __cnfn as_long2(short8);
+long2 __ovld __cnfn as_long2(ushort8);
+long2 __ovld __cnfn as_long2(int3);
+long2 __ovld __cnfn as_long2(int4);
+long2 __ovld __cnfn as_long2(uint3);
+long2 __ovld __cnfn as_long2(uint4);
+long2 __ovld __cnfn as_long2(long2);
+long2 __ovld __cnfn as_long2(ulong2);
+long2 __ovld __cnfn as_long2(float3);
+long2 __ovld __cnfn as_long2(float4);
+
+long3 __ovld __cnfn as_long3(short16);
+long3 __ovld __cnfn as_long3(ushort16);
+long3 __ovld __cnfn as_long3(int8);
+long3 __ovld __cnfn as_long3(uint8);
+long3 __ovld __cnfn as_long3(long3);
+long3 __ovld __cnfn as_long3(long4);
+long3 __ovld __cnfn as_long3(ulong3);
+long3 __ovld __cnfn as_long3(ulong4);
+long3 __ovld __cnfn as_long3(float8);
+
+long4 __ovld __cnfn as_long4(short16);
+long4 __ovld __cnfn as_long4(ushort16);
+long4 __ovld __cnfn as_long4(int8);
+long4 __ovld __cnfn as_long4(uint8);
+long4 __ovld __cnfn as_long4(long3);
+long4 __ovld __cnfn as_long4(long4);
+long4 __ovld __cnfn as_long4(ulong3);
+long4 __ovld __cnfn as_long4(ulong4);
+long4 __ovld __cnfn as_long4(float8);
+
+long8 __ovld __cnfn as_long8(int16);
+long8 __ovld __cnfn as_long8(uint16);
+long8 __ovld __cnfn as_long8(long8);
+long8 __ovld __cnfn as_long8(ulong8);
+long8 __ovld __cnfn as_long8(float16);
+
+long16 __ovld __cnfn as_long16(long16);
+long16 __ovld __cnfn as_long16(ulong16);
+
+ulong __ovld __cnfn as_ulong(char8);
+ulong __ovld __cnfn as_ulong(uchar8);
+ulong __ovld __cnfn as_ulong(short3);
+ulong __ovld __cnfn as_ulong(short4);
+ulong __ovld __cnfn as_ulong(ushort3);
+ulong __ovld __cnfn as_ulong(ushort4);
+ulong __ovld __cnfn as_ulong(int2);
+ulong __ovld __cnfn as_ulong(uint2);
+ulong __ovld __cnfn as_ulong(long);
+ulong __ovld __cnfn as_ulong(ulong);
+ulong __ovld __cnfn as_ulong(float2);
+
+ulong2 __ovld __cnfn as_ulong2(char16);
+ulong2 __ovld __cnfn as_ulong2(uchar16);
+ulong2 __ovld __cnfn as_ulong2(short8);
+ulong2 __ovld __cnfn as_ulong2(ushort8);
+ulong2 __ovld __cnfn as_ulong2(int3);
+ulong2 __ovld __cnfn as_ulong2(int4);
+ulong2 __ovld __cnfn as_ulong2(uint3);
+ulong2 __ovld __cnfn as_ulong2(uint4);
+ulong2 __ovld __cnfn as_ulong2(long2);
+ulong2 __ovld __cnfn as_ulong2(ulong2);
+ulong2 __ovld __cnfn as_ulong2(float3);
+ulong2 __ovld __cnfn as_ulong2(float4);
+
+ulong3 __ovld __cnfn as_ulong3(short16);
+ulong3 __ovld __cnfn as_ulong3(ushort16);
+ulong3 __ovld __cnfn as_ulong3(int8);
+ulong3 __ovld __cnfn as_ulong3(uint8);
+ulong3 __ovld __cnfn as_ulong3(long3);
+ulong3 __ovld __cnfn as_ulong3(long4);
+ulong3 __ovld __cnfn as_ulong3(ulong3);
+ulong3 __ovld __cnfn as_ulong3(ulong4);
+ulong3 __ovld __cnfn as_ulong3(float8);
+
+ulong4 __ovld __cnfn as_ulong4(short16);
+ulong4 __ovld __cnfn as_ulong4(ushort16);
+ulong4 __ovld __cnfn as_ulong4(int8);
+ulong4 __ovld __cnfn as_ulong4(uint8);
+ulong4 __ovld __cnfn as_ulong4(long3);
+ulong4 __ovld __cnfn as_ulong4(long4);
+ulong4 __ovld __cnfn as_ulong4(ulong3);
+ulong4 __ovld __cnfn as_ulong4(ulong4);
+ulong4 __ovld __cnfn as_ulong4(float8);
+
+ulong8 __ovld __cnfn as_ulong8(int16);
+ulong8 __ovld __cnfn as_ulong8(uint16);
+ulong8 __ovld __cnfn as_ulong8(long8);
+ulong8 __ovld __cnfn as_ulong8(ulong8);
+ulong8 __ovld __cnfn as_ulong8(float16);
+
+ulong16 __ovld __cnfn as_ulong16(long16);
+ulong16 __ovld __cnfn as_ulong16(ulong16);
+
+float __ovld __cnfn as_float(char3);
+float __ovld __cnfn as_float(char4);
+float __ovld __cnfn as_float(uchar3);
+float __ovld __cnfn as_float(uchar4);
+float __ovld __cnfn as_float(short2);
+float __ovld __cnfn as_float(ushort2);
+float __ovld __cnfn as_float(int);
+float __ovld __cnfn as_float(uint);
+float __ovld __cnfn as_float(float);
+
+float2 __ovld __cnfn as_float2(char8);
+float2 __ovld __cnfn as_float2(uchar8);
+float2 __ovld __cnfn as_float2(short3);
+float2 __ovld __cnfn as_float2(short4);
+float2 __ovld __cnfn as_float2(ushort3);
+float2 __ovld __cnfn as_float2(ushort4);
+float2 __ovld __cnfn as_float2(int2);
+float2 __ovld __cnfn as_float2(uint2);
+float2 __ovld __cnfn as_float2(long);
+float2 __ovld __cnfn as_float2(ulong);
+float2 __ovld __cnfn as_float2(float2);
+
+float3 __ovld __cnfn as_float3(char16);
+float3 __ovld __cnfn as_float3(uchar16);
+float3 __ovld __cnfn as_float3(short8);
+float3 __ovld __cnfn as_float3(ushort8);
+float3 __ovld __cnfn as_float3(int3);
+float3 __ovld __cnfn as_float3(int4);
+float3 __ovld __cnfn as_float3(uint3);
+float3 __ovld __cnfn as_float3(uint4);
+float3 __ovld __cnfn as_float3(long2);
+float3 __ovld __cnfn as_float3(ulong2);
+float3 __ovld __cnfn as_float3(float3);
+float3 __ovld __cnfn as_float3(float4);
+
+float4 __ovld __cnfn as_float4(char16);
+float4 __ovld __cnfn as_float4(uchar16);
+float4 __ovld __cnfn as_float4(short8);
+float4 __ovld __cnfn as_float4(ushort8);
+float4 __ovld __cnfn as_float4(int3);
+float4 __ovld __cnfn as_float4(int4);
+float4 __ovld __cnfn as_float4(uint3);
+float4 __ovld __cnfn as_float4(uint4);
+float4 __ovld __cnfn as_float4(long2);
+float4 __ovld __cnfn as_float4(ulong2);
+float4 __ovld __cnfn as_float4(float3);
+float4 __ovld __cnfn as_float4(float4);
+
+float8 __ovld __cnfn as_float8(short16);
+float8 __ovld __cnfn as_float8(ushort16);
+float8 __ovld __cnfn as_float8(int8);
+float8 __ovld __cnfn as_float8(uint8);
+float8 __ovld __cnfn as_float8(long3);
+float8 __ovld __cnfn as_float8(long4);
+float8 __ovld __cnfn as_float8(ulong3);
+float8 __ovld __cnfn as_float8(ulong4);
+float8 __ovld __cnfn as_float8(float8);
+
+float16 __ovld __cnfn as_float16(int16);
+float16 __ovld __cnfn as_float16(uint16);
+float16 __ovld __cnfn as_float16(long8);
+float16 __ovld __cnfn as_float16(ulong8);
+float16 __ovld __cnfn as_float16(float16);
+
+#ifdef cl_khr_fp64
+char8 __ovld __cnfn as_char8(double);
+char16 __ovld __cnfn as_char16(double2);
+uchar8 __ovld __cnfn as_uchar8(double);
+uchar16 __ovld __cnfn as_uchar16(double2);
+short3 __ovld __cnfn as_short3(double);
+short4 __ovld __cnfn as_short4(double);
+short8 __ovld __cnfn as_short8(double2);
+short16 __ovld __cnfn as_short16(double3);
+short16 __ovld __cnfn as_short16(double4);
+ushort3 __ovld __cnfn as_ushort3(double);
+ushort4 __ovld __cnfn as_ushort4(double);
+ushort8 __ovld __cnfn as_ushort8(double2);
+ushort16 __ovld __cnfn as_ushort16(double3);
+ushort16 __ovld __cnfn as_ushort16(double4);
+int2 __ovld __cnfn as_int2(double);
+int3 __ovld __cnfn as_int3(double2);
+int4 __ovld __cnfn as_int4(double2);
+int8 __ovld __cnfn as_int8(double3);
+int8 __ovld __cnfn as_int8(double4);
+int16 __ovld __cnfn as_int16(double8);
+uint2 __ovld __cnfn as_uint2(double);
+uint3 __ovld __cnfn as_uint3(double2);
+uint4 __ovld __cnfn as_uint4(double2);
+uint8 __ovld __cnfn as_uint8(double3);
+uint8 __ovld __cnfn as_uint8(double4);
+uint16 __ovld __cnfn as_uint16(double8);
+long __ovld __cnfn as_long(double);
+long2 __ovld __cnfn as_long2(double2);
+long3 __ovld __cnfn as_long3(double3);
+long3 __ovld __cnfn as_long3(double4);
+long4 __ovld __cnfn as_long4(double3);
+long4 __ovld __cnfn as_long4(double4);
+long8 __ovld __cnfn as_long8(double8);
+long16 __ovld __cnfn as_long16(double16);
+ulong __ovld __cnfn as_ulong(double);
+ulong2 __ovld __cnfn as_ulong2(double2);
+ulong3 __ovld __cnfn as_ulong3(double3);
+ulong3 __ovld __cnfn as_ulong3(double4);
+ulong4 __ovld __cnfn as_ulong4(double3);
+ulong4 __ovld __cnfn as_ulong4(double4);
+ulong8 __ovld __cnfn as_ulong8(double8);
+ulong16 __ovld __cnfn as_ulong16(double16);
+float2 __ovld __cnfn as_float2(double);
+float3 __ovld __cnfn as_float3(double2);
+float4 __ovld __cnfn as_float4(double2);
+float8 __ovld __cnfn as_float8(double3);
+float8 __ovld __cnfn as_float8(double4);
+float16 __ovld __cnfn as_float16(double8);
+double __ovld __cnfn as_double(char8);
+double __ovld __cnfn as_double(uchar8);
+double __ovld __cnfn as_double(short3);
+double __ovld __cnfn as_double(short4);
+double __ovld __cnfn as_double(ushort3);
+double __ovld __cnfn as_double(ushort4);
+double __ovld __cnfn as_double(int2);
+double __ovld __cnfn as_double(uint2);
+double __ovld __cnfn as_double(long);
+double __ovld __cnfn as_double(ulong);
+double __ovld __cnfn as_double(float2);
+double __ovld __cnfn as_double(double);
+double2 __ovld __cnfn as_double2(char16);
+double2 __ovld __cnfn as_double2(uchar16);
+double2 __ovld __cnfn as_double2(short8);
+double2 __ovld __cnfn as_double2(ushort8);
+double2 __ovld __cnfn as_double2(int3);
+double2 __ovld __cnfn as_double2(int4);
+double2 __ovld __cnfn as_double2(uint3);
+double2 __ovld __cnfn as_double2(uint4);
+double2 __ovld __cnfn as_double2(long2);
+double2 __ovld __cnfn as_double2(ulong2);
+double2 __ovld __cnfn as_double2(float3);
+double2 __ovld __cnfn as_double2(float4);
+double2 __ovld __cnfn as_double2(double2);
+double3 __ovld __cnfn as_double3(short16);
+double3 __ovld __cnfn as_double3(ushort16);
+double3 __ovld __cnfn as_double3(int8);
+double3 __ovld __cnfn as_double3(uint8);
+double3 __ovld __cnfn as_double3(long3);
+double3 __ovld __cnfn as_double3(long4);
+double3 __ovld __cnfn as_double3(ulong3);
+double3 __ovld __cnfn as_double3(ulong4);
+double3 __ovld __cnfn as_double3(float8);
+double3 __ovld __cnfn as_double3(double3);
+double3 __ovld __cnfn as_double3(double4);
+double4 __ovld __cnfn as_double4(short16);
+double4 __ovld __cnfn as_double4(ushort16);
+double4 __ovld __cnfn as_double4(int8);
+double4 __ovld __cnfn as_double4(uint8);
+double4 __ovld __cnfn as_double4(long3);
+double4 __ovld __cnfn as_double4(long4);
+double4 __ovld __cnfn as_double4(ulong3);
+double4 __ovld __cnfn as_double4(ulong4);
+double4 __ovld __cnfn as_double4(float8);
+double4 __ovld __cnfn as_double4(double3);
+double4 __ovld __cnfn as_double4(double4);
+double8 __ovld __cnfn as_double8(int16);
+double8 __ovld __cnfn as_double8(uint16);
+double8 __ovld __cnfn as_double8(long8);
+double8 __ovld __cnfn as_double8(ulong8);
+double8 __ovld __cnfn as_double8(float16);
+double8 __ovld __cnfn as_double8(double8);
+double16 __ovld __cnfn as_double16(long16);
+double16 __ovld __cnfn as_double16(ulong16);
+double16 __ovld __cnfn as_double16(double16);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+char2 __ovld __cnfn as_char2(half);
+char3 __ovld __cnfn as_char3(half2);
+char4 __ovld __cnfn as_char4(half2);
+char8 __ovld __cnfn as_char8(half3);
+char8 __ovld __cnfn as_char8(half4);
+char16 __ovld __cnfn as_char16(half8);
+uchar2 __ovld __cnfn as_uchar2(half);
+uchar3 __ovld __cnfn as_uchar3(half2);
+uchar4 __ovld __cnfn as_uchar4(half2);
+uchar8 __ovld __cnfn as_uchar8(half3);
+uchar8 __ovld __cnfn as_uchar8(half4);
+uchar16 __ovld __cnfn as_uchar16(half8);
+short __ovld __cnfn as_short(half);
+short2 __ovld __cnfn as_short2(half2);
+short3 __ovld __cnfn as_short3(half3);
+short3 __ovld __cnfn as_short3(half4);
+short4 __ovld __cnfn as_short4(half3);
+short4 __ovld __cnfn as_short4(half4);
+short8 __ovld __cnfn as_short8(half8);
+short16 __ovld __cnfn as_short16(half16);
+ushort __ovld __cnfn as_ushort(half);
+ushort2 __ovld __cnfn as_ushort2(half2);
+ushort3 __ovld __cnfn as_ushort3(half3);
+ushort3 __ovld __cnfn as_ushort3(half4);
+ushort4 __ovld __cnfn as_ushort4(half3);
+ushort4 __ovld __cnfn as_ushort4(half4);
+ushort8 __ovld __cnfn as_ushort8(half8);
+ushort16 __ovld __cnfn as_ushort16(half16);
+int __ovld __cnfn as_int(half2);
+int2 __ovld __cnfn as_int2(half3);
+int2 __ovld __cnfn as_int2(half4);
+int3 __ovld __cnfn as_int3(half8);
+int4 __ovld __cnfn as_int4(half8);
+int8 __ovld __cnfn as_int8(half16);
+uint __ovld __cnfn as_uint(half2);
+uint2 __ovld __cnfn as_uint2(half3);
+uint2 __ovld __cnfn as_uint2(half4);
+uint3 __ovld __cnfn as_uint3(half8);
+uint4 __ovld __cnfn as_uint4(half8);
+uint8 __ovld __cnfn as_uint8(half16);
+long __ovld __cnfn as_long(half3);
+long __ovld __cnfn as_long(half4);
+long2 __ovld __cnfn as_long2(half8);
+long3 __ovld __cnfn as_long3(half16);
+long4 __ovld __cnfn as_long4(half16);
+ulong __ovld __cnfn as_ulong(half3);
+ulong __ovld __cnfn as_ulong(half4);
+ulong2 __ovld __cnfn as_ulong2(half8);
+ulong3 __ovld __cnfn as_ulong3(half16);
+ulong4 __ovld __cnfn as_ulong4(half16);
+half __ovld __cnfn as_half(char2);
+half __ovld __cnfn as_half(uchar2);
+half __ovld __cnfn as_half(short);
+half __ovld __cnfn as_half(ushort);
+half __ovld __cnfn as_half(half);
+half2 __ovld __cnfn as_half2(char3);
+half2 __ovld __cnfn as_half2(char4);
+half2 __ovld __cnfn as_half2(uchar3);
+half2 __ovld __cnfn as_half2(uchar4);
+half2 __ovld __cnfn as_half2(short2);
+half2 __ovld __cnfn as_half2(ushort2);
+half2 __ovld __cnfn as_half2(int);
+half2 __ovld __cnfn as_half2(uint);
+half2 __ovld __cnfn as_half2(half2);
+half2 __ovld __cnfn as_half2(float);
+half3 __ovld __cnfn as_half3(char8);
+half3 __ovld __cnfn as_half3(uchar8);
+half3 __ovld __cnfn as_half3(short3);
+half3 __ovld __cnfn as_half3(short4);
+half3 __ovld __cnfn as_half3(ushort3);
+half3 __ovld __cnfn as_half3(ushort4);
+half3 __ovld __cnfn as_half3(int2);
+half3 __ovld __cnfn as_half3(uint2);
+half3 __ovld __cnfn as_half3(long);
+half3 __ovld __cnfn as_half3(ulong);
+half3 __ovld __cnfn as_half3(half3);
+half3 __ovld __cnfn as_half3(half4);
+half3 __ovld __cnfn as_half3(float2);
+half4 __ovld __cnfn as_half4(char8);
+half4 __ovld __cnfn as_half4(uchar8);
+half4 __ovld __cnfn as_half4(short3);
+half4 __ovld __cnfn as_half4(short4);
+half4 __ovld __cnfn as_half4(ushort3);
+half4 __ovld __cnfn as_half4(ushort4);
+half4 __ovld __cnfn as_half4(int2);
+half4 __ovld __cnfn as_half4(uint2);
+half4 __ovld __cnfn as_half4(long);
+half4 __ovld __cnfn as_half4(ulong);
+half4 __ovld __cnfn as_half4(half3);
+half4 __ovld __cnfn as_half4(half4);
+half4 __ovld __cnfn as_half4(float2);
+half8 __ovld __cnfn as_half8(char16);
+half8 __ovld __cnfn as_half8(uchar16);
+half8 __ovld __cnfn as_half8(short8);
+half8 __ovld __cnfn as_half8(ushort8);
+half8 __ovld __cnfn as_half8(int3);
+half8 __ovld __cnfn as_half8(int4);
+half8 __ovld __cnfn as_half8(uint3);
+half8 __ovld __cnfn as_half8(uint4);
+half8 __ovld __cnfn as_half8(long2);
+half8 __ovld __cnfn as_half8(ulong2);
+half8 __ovld __cnfn as_half8(half8);
+half8 __ovld __cnfn as_half8(float3);
+half8 __ovld __cnfn as_half8(float4);
+half16 __ovld __cnfn as_half16(short16);
+half16 __ovld __cnfn as_half16(ushort16);
+half16 __ovld __cnfn as_half16(int8);
+half16 __ovld __cnfn as_half16(uint8);
+half16 __ovld __cnfn as_half16(long3);
+half16 __ovld __cnfn as_half16(long4);
+half16 __ovld __cnfn as_half16(ulong3);
+half16 __ovld __cnfn as_half16(ulong4);
+half16 __ovld __cnfn as_half16(half16);
+half16 __ovld __cnfn as_half16(float8);
+float __ovld __cnfn as_float(half2);
+float2 __ovld __cnfn as_float2(half3);
+float2 __ovld __cnfn as_float2(half4);
+float3 __ovld __cnfn as_float3(half8);
+float4 __ovld __cnfn as_float4(half8);
+float8 __ovld __cnfn as_float8(half16);
+
+#ifdef cl_khr_fp64
+half3 __ovld __cnfn as_half3(double);
+half4 __ovld __cnfn as_half4(double);
+half8 __ovld __cnfn as_half8(double2);
+half16 __ovld __cnfn as_half16(double3);
+half16 __ovld __cnfn as_half16(double4);
+double __ovld __cnfn as_double(half3);
+double __ovld __cnfn as_double(half4);
+double2 __ovld __cnfn as_double2(half8);
+double3 __ovld __cnfn as_double3(half16);
+double4 __ovld __cnfn as_double4(half16);
+#endif //cl_khr_fp64
+#endif //cl_khr_fp16
+
+// OpenCL v1.1 s6.9, v1.2/2.0 s6.10 - Function qualifiers
+
+#define __kernel_exec(X, typen) __kernel \
+	__attribute__((work_group_size_hint(X, 1, 1))) \
+	__attribute__((vec_type_hint(typen)))
+
+#define kernel_exec(X, typen) __kernel \
+	__attribute__((work_group_size_hint(X, 1, 1))) \
+	__attribute__((vec_type_hint(typen)))
+
+// OpenCL v1.1 s6.11.1, v1.2 s6.12.1, v2.0 s6.13.1 - Work-item Functions
+
+/**
+ * Returns the number of dimensions in use. This is the
+ * value given to the work_dim argument specified in
+ * clEnqueueNDRangeKernel.
+ * For clEnqueueTask, this returns 1.
+ */
+uint __ovld __cnfn get_work_dim(void);
+
+/**
+ * Returns the number of global work-items specified for
+ * dimension identified by dimindx. This value is given by
+ * the global_work_size argument to
+ * clEnqueueNDRangeKernel. Valid values of dimindx
+ * are 0 to get_work_dim() - 1. For other values of
+ * dimindx, get_global_size() returns 1.
+ * For clEnqueueTask, this always returns 1.
+ */
+size_t __ovld __cnfn get_global_size(uint dimindx);
+
+/**
+ * Returns the unique global work-item ID value for
+ * dimension identified by dimindx. The global work-item
+ * ID specifies the work-item ID based on the number of
+ * global work-items specified to execute the kernel. Valid
+ * values of dimindx are 0 to get_work_dim() - 1. For
+ * other values of dimindx, get_global_id() returns 0.
+ * For clEnqueueTask, this returns 0.
+ */
+size_t __ovld __cnfn get_global_id(uint dimindx);
+
+/**
+ * Returns the number of local work-items specified in
+ * dimension identified by dimindx. This value is given by
+ * the local_work_size argument to
+ * clEnqueueNDRangeKernel if local_work_size is not
+ * NULL; otherwise the OpenCL implementation chooses
+ * an appropriate local_work_size value which is returned
+ * by this function. Valid values of dimindx are 0 to
+ * get_work_dim() - 1. For other values of dimindx,
+ * get_local_size() returns 1.
+ * For clEnqueueTask, this always returns 1.
+ */
+size_t __ovld __cnfn get_local_size(uint dimindx);
+
+/**
+ * Returns the unique local work-item ID i.e. a work-item
+ * within a specific work-group for dimension identified by
+ * dimindx. Valid values of dimindx are 0 to
+ * get_work_dim() - 1. For other values of dimindx,
+ * get_local_id() returns 0.
+ * For clEnqueueTask, this returns 0.
+ */
+size_t __ovld __cnfn get_local_id(uint dimindx);
+
+/**
+ * Returns the number of work-groups that will execute a
+ * kernel for dimension identified by dimindx.
+ * Valid values of dimindx are 0 to get_work_dim() - 1.
+ * For other values of dimindx, get_num_groups () returns
+ * 1.
+ * For clEnqueueTask, this always returns 1.
+ */
+size_t __ovld __cnfn get_num_groups(uint dimindx);
+
+/**
+ * get_group_id returns the work-group ID which is a
+ * number from 0 .. get_num_groups(dimindx) - 1.
+ * Valid values of dimindx are 0 to get_work_dim() - 1.
+ * For other values, get_group_id() returns 0.
+ * For clEnqueueTask, this returns 0.
+ */
+size_t __ovld __cnfn get_group_id(uint dimindx);
+
+/**
+ * get_global_offset returns the offset values specified in
+ * global_work_offset argument to
+ * clEnqueueNDRangeKernel.
+ * Valid values of dimindx are 0 to get_work_dim() - 1.
+ * For other values, get_global_offset() returns 0.
+ * For clEnqueueTask, this returns 0.
+ */
+size_t __ovld __cnfn get_global_offset(uint dimindx);
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+size_t __ovld get_enqueued_local_size(uint dimindx);
+size_t __ovld get_global_linear_id(void);
+size_t __ovld get_local_linear_id(void);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL v1.1 s6.11.2, v1.2 s6.12.2, v2.0 s6.13.2 - Math functions
+
+/**
+ * Arc cosine function.
+ */
+float __ovld __cnfn acos(float);
+float2 __ovld __cnfn acos(float2);
+float3 __ovld __cnfn acos(float3);
+float4 __ovld __cnfn acos(float4);
+float8 __ovld __cnfn acos(float8);
+float16 __ovld __cnfn acos(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn acos(double);
+double2 __ovld __cnfn acos(double2);
+double3 __ovld __cnfn acos(double3);
+double4 __ovld __cnfn acos(double4);
+double8 __ovld __cnfn acos(double8);
+double16 __ovld __cnfn acos(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn acos(half);
+half2 __ovld __cnfn acos(half2);
+half3 __ovld __cnfn acos(half3);
+half4 __ovld __cnfn acos(half4);
+half8 __ovld __cnfn acos(half8);
+half16 __ovld __cnfn acos(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Inverse hyperbolic cosine.
+ */
+float __ovld __cnfn acosh(float);
+float2 __ovld __cnfn acosh(float2);
+float3 __ovld __cnfn acosh(float3);
+float4 __ovld __cnfn acosh(float4);
+float8 __ovld __cnfn acosh(float8);
+float16 __ovld __cnfn acosh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn acosh(double);
+double2 __ovld __cnfn acosh(double2);
+double3 __ovld __cnfn acosh(double3);
+double4 __ovld __cnfn acosh(double4);
+double8 __ovld __cnfn acosh(double8);
+double16 __ovld __cnfn acosh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn acosh(half);
+half2 __ovld __cnfn acosh(half2);
+half3 __ovld __cnfn acosh(half3);
+half4 __ovld __cnfn acosh(half4);
+half8 __ovld __cnfn acosh(half8);
+half16 __ovld __cnfn acosh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute acos (x) / PI.
+ */
+float __ovld __cnfn acospi(float x);
+float2 __ovld __cnfn acospi(float2 x);
+float3 __ovld __cnfn acospi(float3 x);
+float4 __ovld __cnfn acospi(float4 x);
+float8 __ovld __cnfn acospi(float8 x);
+float16 __ovld __cnfn acospi(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn acospi(double x);
+double2 __ovld __cnfn acospi(double2 x);
+double3 __ovld __cnfn acospi(double3 x);
+double4 __ovld __cnfn acospi(double4 x);
+double8 __ovld __cnfn acospi(double8 x);
+double16 __ovld __cnfn acospi(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn acospi(half x);
+half2 __ovld __cnfn acospi(half2 x);
+half3 __ovld __cnfn acospi(half3 x);
+half4 __ovld __cnfn acospi(half4 x);
+half8 __ovld __cnfn acospi(half8 x);
+half16 __ovld __cnfn acospi(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Arc sine function.
+ */
+float __ovld __cnfn asin(float);
+float2 __ovld __cnfn asin(float2);
+float3 __ovld __cnfn asin(float3);
+float4 __ovld __cnfn asin(float4);
+float8 __ovld __cnfn asin(float8);
+float16 __ovld __cnfn asin(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn asin(double);
+double2 __ovld __cnfn asin(double2);
+double3 __ovld __cnfn asin(double3);
+double4 __ovld __cnfn asin(double4);
+double8 __ovld __cnfn asin(double8);
+double16 __ovld __cnfn asin(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn asin(half);
+half2 __ovld __cnfn asin(half2);
+half3 __ovld __cnfn asin(half3);
+half4 __ovld __cnfn asin(half4);
+half8 __ovld __cnfn asin(half8);
+half16 __ovld __cnfn asin(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Inverse hyperbolic sine.
+ */
+float __ovld __cnfn asinh(float);
+float2 __ovld __cnfn asinh(float2);
+float3 __ovld __cnfn asinh(float3);
+float4 __ovld __cnfn asinh(float4);
+float8 __ovld __cnfn asinh(float8);
+float16 __ovld __cnfn asinh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn asinh(double);
+double2 __ovld __cnfn asinh(double2);
+double3 __ovld __cnfn asinh(double3);
+double4 __ovld __cnfn asinh(double4);
+double8 __ovld __cnfn asinh(double8);
+double16 __ovld __cnfn asinh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn asinh(half);
+half2 __ovld __cnfn asinh(half2);
+half3 __ovld __cnfn asinh(half3);
+half4 __ovld __cnfn asinh(half4);
+half8 __ovld __cnfn asinh(half8);
+half16 __ovld __cnfn asinh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute asin (x) / PI.
+ */
+float __ovld __cnfn asinpi(float x);
+float2 __ovld __cnfn asinpi(float2 x);
+float3 __ovld __cnfn asinpi(float3 x);
+float4 __ovld __cnfn asinpi(float4 x);
+float8 __ovld __cnfn asinpi(float8 x);
+float16 __ovld __cnfn asinpi(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn asinpi(double x);
+double2 __ovld __cnfn asinpi(double2 x);
+double3 __ovld __cnfn asinpi(double3 x);
+double4 __ovld __cnfn asinpi(double4 x);
+double8 __ovld __cnfn asinpi(double8 x);
+double16 __ovld __cnfn asinpi(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn asinpi(half x);
+half2 __ovld __cnfn asinpi(half2 x);
+half3 __ovld __cnfn asinpi(half3 x);
+half4 __ovld __cnfn asinpi(half4 x);
+half8 __ovld __cnfn asinpi(half8 x);
+half16 __ovld __cnfn asinpi(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Arc tangent function.
+ */
+float __ovld __cnfn atan(float y_over_x);
+float2 __ovld __cnfn atan(float2 y_over_x);
+float3 __ovld __cnfn atan(float3 y_over_x);
+float4 __ovld __cnfn atan(float4 y_over_x);
+float8 __ovld __cnfn atan(float8 y_over_x);
+float16 __ovld __cnfn atan(float16 y_over_x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atan(double y_over_x);
+double2 __ovld __cnfn atan(double2 y_over_x);
+double3 __ovld __cnfn atan(double3 y_over_x);
+double4 __ovld __cnfn atan(double4 y_over_x);
+double8 __ovld __cnfn atan(double8 y_over_x);
+double16 __ovld __cnfn atan(double16 y_over_x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atan(half y_over_x);
+half2 __ovld __cnfn atan(half2 y_over_x);
+half3 __ovld __cnfn atan(half3 y_over_x);
+half4 __ovld __cnfn atan(half4 y_over_x);
+half8 __ovld __cnfn atan(half8 y_over_x);
+half16 __ovld __cnfn atan(half16 y_over_x);
+#endif //cl_khr_fp16
+
+/**
+ * Arc tangent of y / x.
+ */
+float __ovld __cnfn atan2(float y, float x);
+float2 __ovld __cnfn atan2(float2 y, float2 x);
+float3 __ovld __cnfn atan2(float3 y, float3 x);
+float4 __ovld __cnfn atan2(float4 y, float4 x);
+float8 __ovld __cnfn atan2(float8 y, float8 x);
+float16 __ovld __cnfn atan2(float16 y, float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atan2(double y, double x);
+double2 __ovld __cnfn atan2(double2 y, double2 x);
+double3 __ovld __cnfn atan2(double3 y, double3 x);
+double4 __ovld __cnfn atan2(double4 y, double4 x);
+double8 __ovld __cnfn atan2(double8 y, double8 x);
+double16 __ovld __cnfn atan2(double16 y, double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atan2(half y, half x);
+half2 __ovld __cnfn atan2(half2 y, half2 x);
+half3 __ovld __cnfn atan2(half3 y, half3 x);
+half4 __ovld __cnfn atan2(half4 y, half4 x);
+half8 __ovld __cnfn atan2(half8 y, half8 x);
+half16 __ovld __cnfn atan2(half16 y, half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Hyperbolic arc tangent.
+ */
+float __ovld __cnfn atanh(float);
+float2 __ovld __cnfn atanh(float2);
+float3 __ovld __cnfn atanh(float3);
+float4 __ovld __cnfn atanh(float4);
+float8 __ovld __cnfn atanh(float8);
+float16 __ovld __cnfn atanh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atanh(double);
+double2 __ovld __cnfn atanh(double2);
+double3 __ovld __cnfn atanh(double3);
+double4 __ovld __cnfn atanh(double4);
+double8 __ovld __cnfn atanh(double8);
+double16 __ovld __cnfn atanh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atanh(half);
+half2 __ovld __cnfn atanh(half2);
+half3 __ovld __cnfn atanh(half3);
+half4 __ovld __cnfn atanh(half4);
+half8 __ovld __cnfn atanh(half8);
+half16 __ovld __cnfn atanh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute atan (x) / PI.
+ */
+float __ovld __cnfn atanpi(float x);
+float2 __ovld __cnfn atanpi(float2 x);
+float3 __ovld __cnfn atanpi(float3 x);
+float4 __ovld __cnfn atanpi(float4 x);
+float8 __ovld __cnfn atanpi(float8 x);
+float16 __ovld __cnfn atanpi(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atanpi(double x);
+double2 __ovld __cnfn atanpi(double2 x);
+double3 __ovld __cnfn atanpi(double3 x);
+double4 __ovld __cnfn atanpi(double4 x);
+double8 __ovld __cnfn atanpi(double8 x);
+double16 __ovld __cnfn atanpi(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atanpi(half x);
+half2 __ovld __cnfn atanpi(half2 x);
+half3 __ovld __cnfn atanpi(half3 x);
+half4 __ovld __cnfn atanpi(half4 x);
+half8 __ovld __cnfn atanpi(half8 x);
+half16 __ovld __cnfn atanpi(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Compute atan2 (y, x) / PI.
+ */
+float __ovld __cnfn atan2pi(float y, float x);
+float2 __ovld __cnfn atan2pi(float2 y, float2 x);
+float3 __ovld __cnfn atan2pi(float3 y, float3 x);
+float4 __ovld __cnfn atan2pi(float4 y, float4 x);
+float8 __ovld __cnfn atan2pi(float8 y, float8 x);
+float16 __ovld __cnfn atan2pi(float16 y, float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atan2pi(double y, double x);
+double2 __ovld __cnfn atan2pi(double2 y, double2 x);
+double3 __ovld __cnfn atan2pi(double3 y, double3 x);
+double4 __ovld __cnfn atan2pi(double4 y, double4 x);
+double8 __ovld __cnfn atan2pi(double8 y, double8 x);
+double16 __ovld __cnfn atan2pi(double16 y, double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atan2pi(half y, half x);
+half2 __ovld __cnfn atan2pi(half2 y, half2 x);
+half3 __ovld __cnfn atan2pi(half3 y, half3 x);
+half4 __ovld __cnfn atan2pi(half4 y, half4 x);
+half8 __ovld __cnfn atan2pi(half8 y, half8 x);
+half16 __ovld __cnfn atan2pi(half16 y, half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Compute cube-root.
+ */
+float __ovld __cnfn cbrt(float);
+float2 __ovld __cnfn cbrt(float2);
+float3 __ovld __cnfn cbrt(float3);
+float4 __ovld __cnfn cbrt(float4);
+float8 __ovld __cnfn cbrt(float8);
+float16 __ovld __cnfn cbrt(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn cbrt(double);
+double2 __ovld __cnfn cbrt(double2);
+double3 __ovld __cnfn cbrt(double3);
+double4 __ovld __cnfn cbrt(double4);
+double8 __ovld __cnfn cbrt(double8);
+double16 __ovld __cnfn cbrt(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn cbrt(half);
+half2 __ovld __cnfn cbrt(half2);
+half3 __ovld __cnfn cbrt(half3);
+half4 __ovld __cnfn cbrt(half4);
+half8 __ovld __cnfn cbrt(half8);
+half16 __ovld __cnfn cbrt(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Round to integral value using the round to positive
+ * infinity rounding mode.
+ */
+float __ovld __cnfn ceil(float);
+float2 __ovld __cnfn ceil(float2);
+float3 __ovld __cnfn ceil(float3);
+float4 __ovld __cnfn ceil(float4);
+float8 __ovld __cnfn ceil(float8);
+float16 __ovld __cnfn ceil(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn ceil(double);
+double2 __ovld __cnfn ceil(double2);
+double3 __ovld __cnfn ceil(double3);
+double4 __ovld __cnfn ceil(double4);
+double8 __ovld __cnfn ceil(double8);
+double16 __ovld __cnfn ceil(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn ceil(half);
+half2 __ovld __cnfn ceil(half2);
+half3 __ovld __cnfn ceil(half3);
+half4 __ovld __cnfn ceil(half4);
+half8 __ovld __cnfn ceil(half8);
+half16 __ovld __cnfn ceil(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns x with its sign changed to match the sign of y.
+ */
+float __ovld __cnfn copysign(float x, float y);
+float2 __ovld __cnfn copysign(float2 x, float2 y);
+float3 __ovld __cnfn copysign(float3 x, float3 y);
+float4 __ovld __cnfn copysign(float4 x, float4 y);
+float8 __ovld __cnfn copysign(float8 x, float8 y);
+float16 __ovld __cnfn copysign(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn copysign(double x, double y);
+double2 __ovld __cnfn copysign(double2 x, double2 y);
+double3 __ovld __cnfn copysign(double3 x, double3 y);
+double4 __ovld __cnfn copysign(double4 x, double4 y);
+double8 __ovld __cnfn copysign(double8 x, double8 y);
+double16 __ovld __cnfn copysign(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn copysign(half x, half y);
+half2 __ovld __cnfn copysign(half2 x, half2 y);
+half3 __ovld __cnfn copysign(half3 x, half3 y);
+half4 __ovld __cnfn copysign(half4 x, half4 y);
+half8 __ovld __cnfn copysign(half8 x, half8 y);
+half16 __ovld __cnfn copysign(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Compute cosine.
+ */
+float __ovld __cnfn cos(float);
+float2 __ovld __cnfn cos(float2);
+float3 __ovld __cnfn cos(float3);
+float4 __ovld __cnfn cos(float4);
+float8 __ovld __cnfn cos(float8);
+float16 __ovld __cnfn cos(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn cos(double);
+double2 __ovld __cnfn cos(double2);
+double3 __ovld __cnfn cos(double3);
+double4 __ovld __cnfn cos(double4);
+double8 __ovld __cnfn cos(double8);
+double16 __ovld __cnfn cos(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn cos(half);
+half2 __ovld __cnfn cos(half2);
+half3 __ovld __cnfn cos(half3);
+half4 __ovld __cnfn cos(half4);
+half8 __ovld __cnfn cos(half8);
+half16 __ovld __cnfn cos(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute hyperbolic cosine.
+ */
+float __ovld __cnfn cosh(float);
+float2 __ovld __cnfn cosh(float2);
+float3 __ovld __cnfn cosh(float3);
+float4 __ovld __cnfn cosh(float4);
+float8 __ovld __cnfn cosh(float8);
+float16 __ovld __cnfn cosh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn cosh(double);
+double2 __ovld __cnfn cosh(double2);
+double3 __ovld __cnfn cosh(double3);
+double4 __ovld __cnfn cosh(double4);
+double8 __ovld __cnfn cosh(double8);
+double16 __ovld __cnfn cosh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn cosh(half);
+half2 __ovld __cnfn cosh(half2);
+half3 __ovld __cnfn cosh(half3);
+half4 __ovld __cnfn cosh(half4);
+half8 __ovld __cnfn cosh(half8);
+half16 __ovld __cnfn cosh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute cos (PI * x).
+ */
+float __ovld __cnfn cospi(float x);
+float2 __ovld __cnfn cospi(float2 x);
+float3 __ovld __cnfn cospi(float3 x);
+float4 __ovld __cnfn cospi(float4 x);
+float8 __ovld __cnfn cospi(float8 x);
+float16 __ovld __cnfn cospi(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn cospi(double x);
+double2 __ovld __cnfn cospi(double2 x);
+double3 __ovld __cnfn cospi(double3 x);
+double4 __ovld __cnfn cospi(double4 x);
+double8 __ovld __cnfn cospi(double8 x);
+double16 __ovld __cnfn cospi(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn cospi(half x);
+half2 __ovld __cnfn cospi(half2 x);
+half3 __ovld __cnfn cospi(half3 x);
+half4 __ovld __cnfn cospi(half4 x);
+half8 __ovld __cnfn cospi(half8 x);
+half16 __ovld __cnfn cospi(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Complementary error function.
+ */
+float __ovld __cnfn erfc(float);
+float2 __ovld __cnfn erfc(float2);
+float3 __ovld __cnfn erfc(float3);
+float4 __ovld __cnfn erfc(float4);
+float8 __ovld __cnfn erfc(float8);
+float16 __ovld __cnfn erfc(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn erfc(double);
+double2 __ovld __cnfn erfc(double2);
+double3 __ovld __cnfn erfc(double3);
+double4 __ovld __cnfn erfc(double4);
+double8 __ovld __cnfn erfc(double8);
+double16 __ovld __cnfn erfc(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn erfc(half);
+half2 __ovld __cnfn erfc(half2);
+half3 __ovld __cnfn erfc(half3);
+half4 __ovld __cnfn erfc(half4);
+half8 __ovld __cnfn erfc(half8);
+half16 __ovld __cnfn erfc(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Error function encountered in integrating the
+ * normal distribution.
+ */
+float __ovld __cnfn erf(float);
+float2 __ovld __cnfn erf(float2);
+float3 __ovld __cnfn erf(float3);
+float4 __ovld __cnfn erf(float4);
+float8 __ovld __cnfn erf(float8);
+float16 __ovld __cnfn erf(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn erf(double);
+double2 __ovld __cnfn erf(double2);
+double3 __ovld __cnfn erf(double3);
+double4 __ovld __cnfn erf(double4);
+double8 __ovld __cnfn erf(double8);
+double16 __ovld __cnfn erf(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn erf(half);
+half2 __ovld __cnfn erf(half2);
+half3 __ovld __cnfn erf(half3);
+half4 __ovld __cnfn erf(half4);
+half8 __ovld __cnfn erf(half8);
+half16 __ovld __cnfn erf(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute the base e exponential function of x.
+ */
+float __ovld __cnfn exp(float x);
+float2 __ovld __cnfn exp(float2 x);
+float3 __ovld __cnfn exp(float3 x);
+float4 __ovld __cnfn exp(float4 x);
+float8 __ovld __cnfn exp(float8 x);
+float16 __ovld __cnfn exp(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn exp(double x);
+double2 __ovld __cnfn exp(double2 x);
+double3 __ovld __cnfn exp(double3 x);
+double4 __ovld __cnfn exp(double4 x);
+double8 __ovld __cnfn exp(double8 x);
+double16 __ovld __cnfn exp(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn exp(half x);
+half2 __ovld __cnfn exp(half2 x);
+half3 __ovld __cnfn exp(half3 x);
+half4 __ovld __cnfn exp(half4 x);
+half8 __ovld __cnfn exp(half8 x);
+half16 __ovld __cnfn exp(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Exponential base 2 function.
+ */
+float __ovld __cnfn exp2(float);
+float2 __ovld __cnfn exp2(float2);
+float3 __ovld __cnfn exp2(float3);
+float4 __ovld __cnfn exp2(float4);
+float8 __ovld __cnfn exp2(float8);
+float16 __ovld __cnfn exp2(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn exp2(double);
+double2 __ovld __cnfn exp2(double2);
+double3 __ovld __cnfn exp2(double3);
+double4 __ovld __cnfn exp2(double4);
+double8 __ovld __cnfn exp2(double8);
+double16 __ovld __cnfn exp2(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn exp2(half);
+half2 __ovld __cnfn exp2(half2);
+half3 __ovld __cnfn exp2(half3);
+half4 __ovld __cnfn exp2(half4);
+half8 __ovld __cnfn exp2(half8);
+half16 __ovld __cnfn exp2(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Exponential base 10 function.
+ */
+float __ovld __cnfn exp10(float);
+float2 __ovld __cnfn exp10(float2);
+float3 __ovld __cnfn exp10(float3);
+float4 __ovld __cnfn exp10(float4);
+float8 __ovld __cnfn exp10(float8);
+float16 __ovld __cnfn exp10(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn exp10(double);
+double2 __ovld __cnfn exp10(double2);
+double3 __ovld __cnfn exp10(double3);
+double4 __ovld __cnfn exp10(double4);
+double8 __ovld __cnfn exp10(double8);
+double16 __ovld __cnfn exp10(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn exp10(half);
+half2 __ovld __cnfn exp10(half2);
+half3 __ovld __cnfn exp10(half3);
+half4 __ovld __cnfn exp10(half4);
+half8 __ovld __cnfn exp10(half8);
+half16 __ovld __cnfn exp10(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute e^x- 1.0.
+ */
+float __ovld __cnfn expm1(float x);
+float2 __ovld __cnfn expm1(float2 x);
+float3 __ovld __cnfn expm1(float3 x);
+float4 __ovld __cnfn expm1(float4 x);
+float8 __ovld __cnfn expm1(float8 x);
+float16 __ovld __cnfn expm1(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn expm1(double x);
+double2 __ovld __cnfn expm1(double2 x);
+double3 __ovld __cnfn expm1(double3 x);
+double4 __ovld __cnfn expm1(double4 x);
+double8 __ovld __cnfn expm1(double8 x);
+double16 __ovld __cnfn expm1(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn expm1(half x);
+half2 __ovld __cnfn expm1(half2 x);
+half3 __ovld __cnfn expm1(half3 x);
+half4 __ovld __cnfn expm1(half4 x);
+half8 __ovld __cnfn expm1(half8 x);
+half16 __ovld __cnfn expm1(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Compute absolute value of a floating-point number.
+ */
+float __ovld __cnfn fabs(float);
+float2 __ovld __cnfn fabs(float2);
+float3 __ovld __cnfn fabs(float3);
+float4 __ovld __cnfn fabs(float4);
+float8 __ovld __cnfn fabs(float8);
+float16 __ovld __cnfn fabs(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fabs(double);
+double2 __ovld __cnfn fabs(double2);
+double3 __ovld __cnfn fabs(double3);
+double4 __ovld __cnfn fabs(double4);
+double8 __ovld __cnfn fabs(double8);
+double16 __ovld __cnfn fabs(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fabs(half);
+half2 __ovld __cnfn fabs(half2);
+half3 __ovld __cnfn fabs(half3);
+half4 __ovld __cnfn fabs(half4);
+half8 __ovld __cnfn fabs(half8);
+half16 __ovld __cnfn fabs(half16);
+#endif //cl_khr_fp16
+
+/**
+ * x - y if x > y, +0 if x is less than or equal to y.
+ */
+float __ovld __cnfn fdim(float x, float y);
+float2 __ovld __cnfn fdim(float2 x, float2 y);
+float3 __ovld __cnfn fdim(float3 x, float3 y);
+float4 __ovld __cnfn fdim(float4 x, float4 y);
+float8 __ovld __cnfn fdim(float8 x, float8 y);
+float16 __ovld __cnfn fdim(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fdim(double x, double y);
+double2 __ovld __cnfn fdim(double2 x, double2 y);
+double3 __ovld __cnfn fdim(double3 x, double3 y);
+double4 __ovld __cnfn fdim(double4 x, double4 y);
+double8 __ovld __cnfn fdim(double8 x, double8 y);
+double16 __ovld __cnfn fdim(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fdim(half x, half y);
+half2 __ovld __cnfn fdim(half2 x, half2 y);
+half3 __ovld __cnfn fdim(half3 x, half3 y);
+half4 __ovld __cnfn fdim(half4 x, half4 y);
+half8 __ovld __cnfn fdim(half8 x, half8 y);
+half16 __ovld __cnfn fdim(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Round to integral value using the round to -ve
+ * infinity rounding mode.
+ */
+float __ovld __cnfn floor(float);
+float2 __ovld __cnfn floor(float2);
+float3 __ovld __cnfn floor(float3);
+float4 __ovld __cnfn floor(float4);
+float8 __ovld __cnfn floor(float8);
+float16 __ovld __cnfn floor(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn floor(double);
+double2 __ovld __cnfn floor(double2);
+double3 __ovld __cnfn floor(double3);
+double4 __ovld __cnfn floor(double4);
+double8 __ovld __cnfn floor(double8);
+double16 __ovld __cnfn floor(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn floor(half);
+half2 __ovld __cnfn floor(half2);
+half3 __ovld __cnfn floor(half3);
+half4 __ovld __cnfn floor(half4);
+half8 __ovld __cnfn floor(half8);
+half16 __ovld __cnfn floor(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the correctly rounded floating-point
+ * representation of the sum of c with the infinitely
+ * precise product of a and b. Rounding of
+ * intermediate products shall not occur. Edge case
+ * behavior is per the IEEE 754-2008 standard.
+ */
+float __ovld __cnfn fma(float a, float b, float c);
+float2 __ovld __cnfn fma(float2 a, float2 b, float2 c);
+float3 __ovld __cnfn fma(float3 a, float3 b, float3 c);
+float4 __ovld __cnfn fma(float4 a, float4 b, float4 c);
+float8 __ovld __cnfn fma(float8 a, float8 b, float8 c);
+float16 __ovld __cnfn fma(float16 a, float16 b, float16 c);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fma(double a, double b, double c);
+double2 __ovld __cnfn fma(double2 a, double2 b, double2 c);
+double3 __ovld __cnfn fma(double3 a, double3 b, double3 c);
+double4 __ovld __cnfn fma(double4 a, double4 b, double4 c);
+double8 __ovld __cnfn fma(double8 a, double8 b, double8 c);
+double16 __ovld __cnfn fma(double16 a, double16 b, double16 c);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fma(half a, half b, half c);
+half2 __ovld __cnfn fma(half2 a, half2 b, half2 c);
+half3 __ovld __cnfn fma(half3 a, half3 b, half3 c);
+half4 __ovld __cnfn fma(half4 a, half4 b, half4 c);
+half8 __ovld __cnfn fma(half8 a, half8 b, half8 c);
+half16 __ovld __cnfn fma(half16 a, half16 b, half16 c);
+#endif //cl_khr_fp16
+
+/**
+ * Returns y if x < y, otherwise it returns x. If one
+ * argument is a NaN, fmax() returns the other
+ * argument. If both arguments are NaNs, fmax()
+ * returns a NaN.
+ */
+float __ovld __cnfn fmax(float x, float y);
+float2 __ovld __cnfn fmax(float2 x, float2 y);
+float3 __ovld __cnfn fmax(float3 x, float3 y);
+float4 __ovld __cnfn fmax(float4 x, float4 y);
+float8 __ovld __cnfn fmax(float8 x, float8 y);
+float16 __ovld __cnfn fmax(float16 x, float16 y);
+float2 __ovld __cnfn fmax(float2 x, float y);
+float3 __ovld __cnfn fmax(float3 x, float y);
+float4 __ovld __cnfn fmax(float4 x, float y);
+float8 __ovld __cnfn fmax(float8 x, float y);
+float16 __ovld __cnfn fmax(float16 x, float y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fmax(double x, double y);
+double2 __ovld __cnfn fmax(double2 x, double2 y);
+double3 __ovld __cnfn fmax(double3 x, double3 y);
+double4 __ovld __cnfn fmax(double4 x, double4 y);
+double8 __ovld __cnfn fmax(double8 x, double8 y);
+double16 __ovld __cnfn fmax(double16 x, double16 y);
+double2 __ovld __cnfn fmax(double2 x, double y);
+double3 __ovld __cnfn fmax(double3 x, double y);
+double4 __ovld __cnfn fmax(double4 x, double y);
+double8 __ovld __cnfn fmax(double8 x, double y);
+double16 __ovld __cnfn fmax(double16 x, double y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fmax(half x, half y);
+half2 __ovld __cnfn fmax(half2 x, half2 y);
+half3 __ovld __cnfn fmax(half3 x, half3 y);
+half4 __ovld __cnfn fmax(half4 x, half4 y);
+half8 __ovld __cnfn fmax(half8 x, half8 y);
+half16 __ovld __cnfn fmax(half16 x, half16 y);
+half2 __ovld __cnfn fmax(half2 x, half y);
+half3 __ovld __cnfn fmax(half3 x, half y);
+half4 __ovld __cnfn fmax(half4 x, half y);
+half8 __ovld __cnfn fmax(half8 x, half y);
+half16 __ovld __cnfn fmax(half16 x, half y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns y if y < x, otherwise it returns x. If one
+ * argument is a NaN, fmin() returns the other
+ * argument. If both arguments are NaNs, fmin()
+ * returns a NaN.
+ */
+float __ovld __cnfn fmin(float x, float y);
+float2 __ovld __cnfn fmin(float2 x, float2 y);
+float3 __ovld __cnfn fmin(float3 x, float3 y);
+float4 __ovld __cnfn fmin(float4 x, float4 y);
+float8 __ovld __cnfn fmin(float8 x, float8 y);
+float16 __ovld __cnfn fmin(float16 x, float16 y);
+float2 __ovld __cnfn fmin(float2 x, float y);
+float3 __ovld __cnfn fmin(float3 x, float y);
+float4 __ovld __cnfn fmin(float4 x, float y);
+float8 __ovld __cnfn fmin(float8 x, float y);
+float16 __ovld __cnfn fmin(float16 x, float y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fmin(double x, double y);
+double2 __ovld __cnfn fmin(double2 x, double2 y);
+double3 __ovld __cnfn fmin(double3 x, double3 y);
+double4 __ovld __cnfn fmin(double4 x, double4 y);
+double8 __ovld __cnfn fmin(double8 x, double8 y);
+double16 __ovld __cnfn fmin(double16 x, double16 y);
+double2 __ovld __cnfn fmin(double2 x, double y);
+double3 __ovld __cnfn fmin(double3 x, double y);
+double4 __ovld __cnfn fmin(double4 x, double y);
+double8 __ovld __cnfn fmin(double8 x, double y);
+double16 __ovld __cnfn fmin(double16 x, double y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fmin(half x, half y);
+half2 __ovld __cnfn fmin(half2 x, half2 y);
+half3 __ovld __cnfn fmin(half3 x, half3 y);
+half4 __ovld __cnfn fmin(half4 x, half4 y);
+half8 __ovld __cnfn fmin(half8 x, half8 y);
+half16 __ovld __cnfn fmin(half16 x, half16 y);
+half2 __ovld __cnfn fmin(half2 x, half y);
+half3 __ovld __cnfn fmin(half3 x, half y);
+half4 __ovld __cnfn fmin(half4 x, half y);
+half8 __ovld __cnfn fmin(half8 x, half y);
+half16 __ovld __cnfn fmin(half16 x, half y);
+#endif //cl_khr_fp16
+
+/**
+ * Modulus. Returns x - y * trunc (x/y).
+ */
+float __ovld __cnfn fmod(float x, float y);
+float2 __ovld __cnfn fmod(float2 x, float2 y);
+float3 __ovld __cnfn fmod(float3 x, float3 y);
+float4 __ovld __cnfn fmod(float4 x, float4 y);
+float8 __ovld __cnfn fmod(float8 x, float8 y);
+float16 __ovld __cnfn fmod(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fmod(double x, double y);
+double2 __ovld __cnfn fmod(double2 x, double2 y);
+double3 __ovld __cnfn fmod(double3 x, double3 y);
+double4 __ovld __cnfn fmod(double4 x, double4 y);
+double8 __ovld __cnfn fmod(double8 x, double8 y);
+double16 __ovld __cnfn fmod(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fmod(half x, half y);
+half2 __ovld __cnfn fmod(half2 x, half2 y);
+half3 __ovld __cnfn fmod(half3 x, half3 y);
+half4 __ovld __cnfn fmod(half4 x, half4 y);
+half8 __ovld __cnfn fmod(half8 x, half8 y);
+half16 __ovld __cnfn fmod(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns fmin(x - floor (x), 0x1.fffffep-1f ).
+ * floor(x) is returned in iptr.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float __ovld fract(float x, float *iptr);
+float2 __ovld fract(float2 x, float2 *iptr);
+float3 __ovld fract(float3 x, float3 *iptr);
+float4 __ovld fract(float4 x, float4 *iptr);
+float8 __ovld fract(float8 x, float8 *iptr);
+float16 __ovld fract(float16 x, float16 *iptr);
+#ifdef cl_khr_fp64
+double __ovld fract(double x, double *iptr);
+double2 __ovld fract(double2 x, double2 *iptr);
+double3 __ovld fract(double3 x, double3 *iptr);
+double4 __ovld fract(double4 x, double4 *iptr);
+double8 __ovld fract(double8 x, double8 *iptr);
+double16 __ovld fract(double16 x, double16 *iptr);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld fract(half x, half *iptr);
+half2 __ovld fract(half2 x, half2 *iptr);
+half3 __ovld fract(half3 x, half3 *iptr);
+half4 __ovld fract(half4 x, half4 *iptr);
+half8 __ovld fract(half8 x, half8 *iptr);
+half16 __ovld fract(half16 x, half16 *iptr);
+#endif //cl_khr_fp16
+#else
+float __ovld fract(float x, __global float *iptr);
+float2 __ovld fract(float2 x, __global float2 *iptr);
+float3 __ovld fract(float3 x, __global float3 *iptr);
+float4 __ovld fract(float4 x, __global float4 *iptr);
+float8 __ovld fract(float8 x, __global float8 *iptr);
+float16 __ovld fract(float16 x, __global float16 *iptr);
+float __ovld fract(float x, __local float *iptr);
+float2 __ovld fract(float2 x, __local float2 *iptr);
+float3 __ovld fract(float3 x, __local float3 *iptr);
+float4 __ovld fract(float4 x, __local float4 *iptr);
+float8 __ovld fract(float8 x, __local float8 *iptr);
+float16 __ovld fract(float16 x, __local float16 *iptr);
+float __ovld fract(float x, __private float *iptr);
+float2 __ovld fract(float2 x, __private float2 *iptr);
+float3 __ovld fract(float3 x, __private float3 *iptr);
+float4 __ovld fract(float4 x, __private float4 *iptr);
+float8 __ovld fract(float8 x, __private float8 *iptr);
+float16 __ovld fract(float16 x, __private float16 *iptr);
+#ifdef cl_khr_fp64
+double __ovld fract(double x, __global double *iptr);
+double2 __ovld fract(double2 x, __global double2 *iptr);
+double3 __ovld fract(double3 x, __global double3 *iptr);
+double4 __ovld fract(double4 x, __global double4 *iptr);
+double8 __ovld fract(double8 x, __global double8 *iptr);
+double16 __ovld fract(double16 x, __global double16 *iptr);
+double __ovld fract(double x, __local double *iptr);
+double2 __ovld fract(double2 x, __local double2 *iptr);
+double3 __ovld fract(double3 x, __local double3 *iptr);
+double4 __ovld fract(double4 x, __local double4 *iptr);
+double8 __ovld fract(double8 x, __local double8 *iptr);
+double16 __ovld fract(double16 x, __local double16 *iptr);
+double __ovld fract(double x, __private double *iptr);
+double2 __ovld fract(double2 x, __private double2 *iptr);
+double3 __ovld fract(double3 x, __private double3 *iptr);
+double4 __ovld fract(double4 x, __private double4 *iptr);
+double8 __ovld fract(double8 x, __private double8 *iptr);
+double16 __ovld fract(double16 x, __private double16 *iptr);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld fract(half x, __global half *iptr);
+half2 __ovld fract(half2 x, __global half2 *iptr);
+half3 __ovld fract(half3 x, __global half3 *iptr);
+half4 __ovld fract(half4 x, __global half4 *iptr);
+half8 __ovld fract(half8 x, __global half8 *iptr);
+half16 __ovld fract(half16 x, __global half16 *iptr);
+half __ovld fract(half x, __local half *iptr);
+half2 __ovld fract(half2 x, __local half2 *iptr);
+half3 __ovld fract(half3 x, __local half3 *iptr);
+half4 __ovld fract(half4 x, __local half4 *iptr);
+half8 __ovld fract(half8 x, __local half8 *iptr);
+half16 __ovld fract(half16 x, __local half16 *iptr);
+half __ovld fract(half x, __private half *iptr);
+half2 __ovld fract(half2 x, __private half2 *iptr);
+half3 __ovld fract(half3 x, __private half3 *iptr);
+half4 __ovld fract(half4 x, __private half4 *iptr);
+half8 __ovld fract(half8 x, __private half8 *iptr);
+half16 __ovld fract(half16 x, __private half16 *iptr);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Extract mantissa and exponent from x. For each
+ * component the mantissa returned is a float with
+ * magnitude in the interval [1/2, 1) or 0. Each
+ * component of x equals mantissa returned * 2^exp.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float __ovld frexp(float x, int *exp);
+float2 __ovld frexp(float2 x, int2 *exp);
+float3 __ovld frexp(float3 x, int3 *exp);
+float4 __ovld frexp(float4 x, int4 *exp);
+float8 __ovld frexp(float8 x, int8 *exp);
+float16 __ovld frexp(float16 x, int16 *exp);
+#ifdef cl_khr_fp64
+double __ovld frexp(double x, int *exp);
+double2 __ovld frexp(double2 x, int2 *exp);
+double3 __ovld frexp(double3 x, int3 *exp);
+double4 __ovld frexp(double4 x, int4 *exp);
+double8 __ovld frexp(double8 x, int8 *exp);
+double16 __ovld frexp(double16 x, int16 *exp);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld frexp(half x, int *exp);
+half2 __ovld frexp(half2 x, int2 *exp);
+half3 __ovld frexp(half3 x, int3 *exp);
+half4 __ovld frexp(half4 x, int4 *exp);
+half8 __ovld frexp(half8 x, int8 *exp);
+half16 __ovld frexp(half16 x, int16 *exp);
+#endif //cl_khr_fp16
+#else
+float __ovld frexp(float x, __global int *exp);
+float2 __ovld frexp(float2 x, __global int2 *exp);
+float3 __ovld frexp(float3 x, __global int3 *exp);
+float4 __ovld frexp(float4 x, __global int4 *exp);
+float8 __ovld frexp(float8 x, __global int8 *exp);
+float16 __ovld frexp(float16 x, __global int16 *exp);
+float __ovld frexp(float x, __local int *exp);
+float2 __ovld frexp(float2 x, __local int2 *exp);
+float3 __ovld frexp(float3 x, __local int3 *exp);
+float4 __ovld frexp(float4 x, __local int4 *exp);
+float8 __ovld frexp(float8 x, __local int8 *exp);
+float16 __ovld frexp(float16 x, __local int16 *exp);
+float __ovld frexp(float x, __private int *exp);
+float2 __ovld frexp(float2 x, __private int2 *exp);
+float3 __ovld frexp(float3 x, __private int3 *exp);
+float4 __ovld frexp(float4 x, __private int4 *exp);
+float8 __ovld frexp(float8 x, __private int8 *exp);
+float16 __ovld frexp(float16 x, __private int16 *exp);
+#ifdef cl_khr_fp64
+double __ovld frexp(double x, __global int *exp);
+double2 __ovld frexp(double2 x, __global int2 *exp);
+double3 __ovld frexp(double3 x, __global int3 *exp);
+double4 __ovld frexp(double4 x, __global int4 *exp);
+double8 __ovld frexp(double8 x, __global int8 *exp);
+double16 __ovld frexp(double16 x, __global int16 *exp);
+double __ovld frexp(double x, __local int *exp);
+double2 __ovld frexp(double2 x, __local int2 *exp);
+double3 __ovld frexp(double3 x, __local int3 *exp);
+double4 __ovld frexp(double4 x, __local int4 *exp);
+double8 __ovld frexp(double8 x, __local int8 *exp);
+double16 __ovld frexp(double16 x, __local int16 *exp);
+double __ovld frexp(double x, __private int *exp);
+double2 __ovld frexp(double2 x, __private int2 *exp);
+double3 __ovld frexp(double3 x, __private int3 *exp);
+double4 __ovld frexp(double4 x, __private int4 *exp);
+double8 __ovld frexp(double8 x, __private int8 *exp);
+double16 __ovld frexp(double16 x, __private int16 *exp);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld frexp(half x, __global int *exp);
+half2 __ovld frexp(half2 x, __global int2 *exp);
+half3 __ovld frexp(half3 x, __global int3 *exp);
+half4 __ovld frexp(half4 x, __global int4 *exp);
+half8 __ovld frexp(half8 x, __global int8 *exp);
+half16 __ovld frexp(half16 x, __global int16 *exp);
+half __ovld frexp(half x, __local int *exp);
+half2 __ovld frexp(half2 x, __local int2 *exp);
+half3 __ovld frexp(half3 x, __local int3 *exp);
+half4 __ovld frexp(half4 x, __local int4 *exp);
+half8 __ovld frexp(half8 x, __local int8 *exp);
+half16 __ovld frexp(half16 x, __local int16 *exp);
+half __ovld frexp(half x, __private int *exp);
+half2 __ovld frexp(half2 x, __private int2 *exp);
+half3 __ovld frexp(half3 x, __private int3 *exp);
+half4 __ovld frexp(half4 x, __private int4 *exp);
+half8 __ovld frexp(half8 x, __private int8 *exp);
+half16 __ovld frexp(half16 x, __private int16 *exp);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Compute the value of the square root of x^2 + y^2
+ * without undue overflow or underflow.
+ */
+float __ovld __cnfn hypot(float x, float y);
+float2 __ovld __cnfn hypot(float2 x, float2 y);
+float3 __ovld __cnfn hypot(float3 x, float3 y);
+float4 __ovld __cnfn hypot(float4 x, float4 y);
+float8 __ovld __cnfn hypot(float8 x, float8 y);
+float16 __ovld __cnfn hypot(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn hypot(double x, double y);
+double2 __ovld __cnfn hypot(double2 x, double2 y);
+double3 __ovld __cnfn hypot(double3 x, double3 y);
+double4 __ovld __cnfn hypot(double4 x, double4 y);
+double8 __ovld __cnfn hypot(double8 x, double8 y);
+double16 __ovld __cnfn hypot(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn hypot(half x, half y);
+half2 __ovld __cnfn hypot(half2 x, half2 y);
+half3 __ovld __cnfn hypot(half3 x, half3 y);
+half4 __ovld __cnfn hypot(half4 x, half4 y);
+half8 __ovld __cnfn hypot(half8 x, half8 y);
+half16 __ovld __cnfn hypot(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Return the exponent as an integer value.
+ */
+int __ovld __cnfn ilogb(float x);
+int2 __ovld __cnfn ilogb(float2 x);
+int3 __ovld __cnfn ilogb(float3 x);
+int4 __ovld __cnfn ilogb(float4 x);
+int8 __ovld __cnfn ilogb(float8 x);
+int16 __ovld __cnfn ilogb(float16 x);
+#ifdef cl_khr_fp64
+int __ovld __cnfn ilogb(double x);
+int2 __ovld __cnfn ilogb(double2 x);
+int3 __ovld __cnfn ilogb(double3 x);
+int4 __ovld __cnfn ilogb(double4 x);
+int8 __ovld __cnfn ilogb(double8 x);
+int16 __ovld __cnfn ilogb(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn ilogb(half x);
+int2 __ovld __cnfn ilogb(half2 x);
+int3 __ovld __cnfn ilogb(half3 x);
+int4 __ovld __cnfn ilogb(half4 x);
+int8 __ovld __cnfn ilogb(half8 x);
+int16 __ovld __cnfn ilogb(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Multiply x by 2 to the power n.
+ */
+float __ovld __cnfn ldexp(float x, int n);
+float2 __ovld __cnfn ldexp(float2 x, int2 n);
+float3 __ovld __cnfn ldexp(float3 x, int3 n);
+float4 __ovld __cnfn ldexp(float4 x, int4 n);
+float8 __ovld __cnfn ldexp(float8 x, int8 n);
+float16 __ovld __cnfn ldexp(float16 x, int16 n);
+float2 __ovld __cnfn ldexp(float2 x, int n);
+float3 __ovld __cnfn ldexp(float3 x, int n);
+float4 __ovld __cnfn ldexp(float4 x, int n);
+float8 __ovld __cnfn ldexp(float8 x, int n);
+float16 __ovld __cnfn ldexp(float16 x, int n);
+#ifdef cl_khr_fp64
+double __ovld __cnfn ldexp(double x, int n);
+double2 __ovld __cnfn ldexp(double2 x, int2 n);
+double3 __ovld __cnfn ldexp(double3 x, int3 n);
+double4 __ovld __cnfn ldexp(double4 x, int4 n);
+double8 __ovld __cnfn ldexp(double8 x, int8 n);
+double16 __ovld __cnfn ldexp(double16 x, int16 n);
+double2 __ovld __cnfn ldexp(double2 x, int n);
+double3 __ovld __cnfn ldexp(double3 x, int n);
+double4 __ovld __cnfn ldexp(double4 x, int n);
+double8 __ovld __cnfn ldexp(double8 x, int n);
+double16 __ovld __cnfn ldexp(double16 x, int n);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn ldexp(half x, int n);
+half2 __ovld __cnfn ldexp(half2 x, int2 n);
+half3 __ovld __cnfn ldexp(half3 x, int3 n);
+half4 __ovld __cnfn ldexp(half4 x, int4 n);
+half8 __ovld __cnfn ldexp(half8 x, int8 n);
+half16 __ovld __cnfn ldexp(half16 x, int16 n);
+half2 __ovld __cnfn ldexp(half2 x, int n);
+half3 __ovld __cnfn ldexp(half3 x, int n);
+half4 __ovld __cnfn ldexp(half4 x, int n);
+half8 __ovld __cnfn ldexp(half8 x, int n);
+half16 __ovld __cnfn ldexp(half16 x, int n);
+#endif //cl_khr_fp16
+
+/**
+ * Log gamma function. Returns the natural
+ * logarithm of the absolute value of the gamma
+ * function. The sign of the gamma function is
+ * returned in the signp argument of lgamma_r.
+ */
+float __ovld __cnfn lgamma(float x);
+float2 __ovld __cnfn lgamma(float2 x);
+float3 __ovld __cnfn lgamma(float3 x);
+float4 __ovld __cnfn lgamma(float4 x);
+float8 __ovld __cnfn lgamma(float8 x);
+float16 __ovld __cnfn lgamma(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn lgamma(double x);
+double2 __ovld __cnfn lgamma(double2 x);
+double3 __ovld __cnfn lgamma(double3 x);
+double4 __ovld __cnfn lgamma(double4 x);
+double8 __ovld __cnfn lgamma(double8 x);
+double16 __ovld __cnfn lgamma(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn lgamma(half x);
+half2 __ovld __cnfn lgamma(half2 x);
+half3 __ovld __cnfn lgamma(half3 x);
+half4 __ovld __cnfn lgamma(half4 x);
+half8 __ovld __cnfn lgamma(half8 x);
+half16 __ovld __cnfn lgamma(half16 x);
+#endif //cl_khr_fp16
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float __ovld lgamma_r(float x, int *signp);
+float2 __ovld lgamma_r(float2 x, int2 *signp);
+float3 __ovld lgamma_r(float3 x, int3 *signp);
+float4 __ovld lgamma_r(float4 x, int4 *signp);
+float8 __ovld lgamma_r(float8 x, int8 *signp);
+float16 __ovld lgamma_r(float16 x, int16 *signp);
+#ifdef cl_khr_fp64
+double __ovld lgamma_r(double x, int *signp);
+double2 __ovld lgamma_r(double2 x, int2 *signp);
+double3 __ovld lgamma_r(double3 x, int3 *signp);
+double4 __ovld lgamma_r(double4 x, int4 *signp);
+double8 __ovld lgamma_r(double8 x, int8 *signp);
+double16 __ovld lgamma_r(double16 x, int16 *signp);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld lgamma_r(half x, int *signp);
+half2 __ovld lgamma_r(half2 x, int2 *signp);
+half3 __ovld lgamma_r(half3 x, int3 *signp);
+half4 __ovld lgamma_r(half4 x, int4 *signp);
+half8 __ovld lgamma_r(half8 x, int8 *signp);
+half16 __ovld lgamma_r(half16 x, int16 *signp);
+#endif //cl_khr_fp16
+#else
+float __ovld lgamma_r(float x, __global int *signp);
+float2 __ovld lgamma_r(float2 x, __global int2 *signp);
+float3 __ovld lgamma_r(float3 x, __global int3 *signp);
+float4 __ovld lgamma_r(float4 x, __global int4 *signp);
+float8 __ovld lgamma_r(float8 x, __global int8 *signp);
+float16 __ovld lgamma_r(float16 x, __global int16 *signp);
+float __ovld lgamma_r(float x, __local int *signp);
+float2 __ovld lgamma_r(float2 x, __local int2 *signp);
+float3 __ovld lgamma_r(float3 x, __local int3 *signp);
+float4 __ovld lgamma_r(float4 x, __local int4 *signp);
+float8 __ovld lgamma_r(float8 x, __local int8 *signp);
+float16 __ovld lgamma_r(float16 x, __local int16 *signp);
+float __ovld lgamma_r(float x, __private int *signp);
+float2 __ovld lgamma_r(float2 x, __private int2 *signp);
+float3 __ovld lgamma_r(float3 x, __private int3 *signp);
+float4 __ovld lgamma_r(float4 x, __private int4 *signp);
+float8 __ovld lgamma_r(float8 x, __private int8 *signp);
+float16 __ovld lgamma_r(float16 x, __private int16 *signp);
+#ifdef cl_khr_fp64
+double __ovld lgamma_r(double x, __global int *signp);
+double2 __ovld lgamma_r(double2 x, __global int2 *signp);
+double3 __ovld lgamma_r(double3 x, __global int3 *signp);
+double4 __ovld lgamma_r(double4 x, __global int4 *signp);
+double8 __ovld lgamma_r(double8 x, __global int8 *signp);
+double16 __ovld lgamma_r(double16 x, __global int16 *signp);
+double __ovld lgamma_r(double x, __local int *signp);
+double2 __ovld lgamma_r(double2 x, __local int2 *signp);
+double3 __ovld lgamma_r(double3 x, __local int3 *signp);
+double4 __ovld lgamma_r(double4 x, __local int4 *signp);
+double8 __ovld lgamma_r(double8 x, __local int8 *signp);
+double16 __ovld lgamma_r(double16 x, __local int16 *signp);
+double __ovld lgamma_r(double x, __private int *signp);
+double2 __ovld lgamma_r(double2 x, __private int2 *signp);
+double3 __ovld lgamma_r(double3 x, __private int3 *signp);
+double4 __ovld lgamma_r(double4 x, __private int4 *signp);
+double8 __ovld lgamma_r(double8 x, __private int8 *signp);
+double16 __ovld lgamma_r(double16 x, __private int16 *signp);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld lgamma_r(half x, __global int *signp);
+half2 __ovld lgamma_r(half2 x, __global int2 *signp);
+half3 __ovld lgamma_r(half3 x, __global int3 *signp);
+half4 __ovld lgamma_r(half4 x, __global int4 *signp);
+half8 __ovld lgamma_r(half8 x, __global int8 *signp);
+half16 __ovld lgamma_r(half16 x, __global int16 *signp);
+half __ovld lgamma_r(half x, __local int *signp);
+half2 __ovld lgamma_r(half2 x, __local int2 *signp);
+half3 __ovld lgamma_r(half3 x, __local int3 *signp);
+half4 __ovld lgamma_r(half4 x, __local int4 *signp);
+half8 __ovld lgamma_r(half8 x, __local int8 *signp);
+half16 __ovld lgamma_r(half16 x, __local int16 *signp);
+half __ovld lgamma_r(half x, __private int *signp);
+half2 __ovld lgamma_r(half2 x, __private int2 *signp);
+half3 __ovld lgamma_r(half3 x, __private int3 *signp);
+half4 __ovld lgamma_r(half4 x, __private int4 *signp);
+half8 __ovld lgamma_r(half8 x, __private int8 *signp);
+half16 __ovld lgamma_r(half16 x, __private int16 *signp);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Compute natural logarithm.
+ */
+float __ovld __cnfn log(float);
+float2 __ovld __cnfn log(float2);
+float3 __ovld __cnfn log(float3);
+float4 __ovld __cnfn log(float4);
+float8 __ovld __cnfn log(float8);
+float16 __ovld __cnfn log(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn log(double);
+double2 __ovld __cnfn log(double2);
+double3 __ovld __cnfn log(double3);
+double4 __ovld __cnfn log(double4);
+double8 __ovld __cnfn log(double8);
+double16 __ovld __cnfn log(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn log(half);
+half2 __ovld __cnfn log(half2);
+half3 __ovld __cnfn log(half3);
+half4 __ovld __cnfn log(half4);
+half8 __ovld __cnfn log(half8);
+half16 __ovld __cnfn log(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute a base 2 logarithm.
+ */
+float __ovld __cnfn log2(float);
+float2 __ovld __cnfn log2(float2);
+float3 __ovld __cnfn log2(float3);
+float4 __ovld __cnfn log2(float4);
+float8 __ovld __cnfn log2(float8);
+float16 __ovld __cnfn log2(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn log2(double);
+double2 __ovld __cnfn log2(double2);
+double3 __ovld __cnfn log2(double3);
+double4 __ovld __cnfn log2(double4);
+double8 __ovld __cnfn log2(double8);
+double16 __ovld __cnfn log2(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn log2(half);
+half2 __ovld __cnfn log2(half2);
+half3 __ovld __cnfn log2(half3);
+half4 __ovld __cnfn log2(half4);
+half8 __ovld __cnfn log2(half8);
+half16 __ovld __cnfn log2(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute a base 10 logarithm.
+ */
+float __ovld __cnfn log10(float);
+float2 __ovld __cnfn log10(float2);
+float3 __ovld __cnfn log10(float3);
+float4 __ovld __cnfn log10(float4);
+float8 __ovld __cnfn log10(float8);
+float16 __ovld __cnfn log10(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn log10(double);
+double2 __ovld __cnfn log10(double2);
+double3 __ovld __cnfn log10(double3);
+double4 __ovld __cnfn log10(double4);
+double8 __ovld __cnfn log10(double8);
+double16 __ovld __cnfn log10(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn log10(half);
+half2 __ovld __cnfn log10(half2);
+half3 __ovld __cnfn log10(half3);
+half4 __ovld __cnfn log10(half4);
+half8 __ovld __cnfn log10(half8);
+half16 __ovld __cnfn log10(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute a base e logarithm of (1.0 + x).
+ */
+float __ovld __cnfn log1p(float x);
+float2 __ovld __cnfn log1p(float2 x);
+float3 __ovld __cnfn log1p(float3 x);
+float4 __ovld __cnfn log1p(float4 x);
+float8 __ovld __cnfn log1p(float8 x);
+float16 __ovld __cnfn log1p(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn log1p(double x);
+double2 __ovld __cnfn log1p(double2 x);
+double3 __ovld __cnfn log1p(double3 x);
+double4 __ovld __cnfn log1p(double4 x);
+double8 __ovld __cnfn log1p(double8 x);
+double16 __ovld __cnfn log1p(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn log1p(half x);
+half2 __ovld __cnfn log1p(half2 x);
+half3 __ovld __cnfn log1p(half3 x);
+half4 __ovld __cnfn log1p(half4 x);
+half8 __ovld __cnfn log1p(half8 x);
+half16 __ovld __cnfn log1p(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Compute the exponent of x, which is the integral
+ * part of logr | x |.
+ */
+float __ovld __cnfn logb(float x);
+float2 __ovld __cnfn logb(float2 x);
+float3 __ovld __cnfn logb(float3 x);
+float4 __ovld __cnfn logb(float4 x);
+float8 __ovld __cnfn logb(float8 x);
+float16 __ovld __cnfn logb(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn logb(double x);
+double2 __ovld __cnfn logb(double2 x);
+double3 __ovld __cnfn logb(double3 x);
+double4 __ovld __cnfn logb(double4 x);
+double8 __ovld __cnfn logb(double8 x);
+double16 __ovld __cnfn logb(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn logb(half x);
+half2 __ovld __cnfn logb(half2 x);
+half3 __ovld __cnfn logb(half3 x);
+half4 __ovld __cnfn logb(half4 x);
+half8 __ovld __cnfn logb(half8 x);
+half16 __ovld __cnfn logb(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * mad approximates a * b + c. Whether or how the
+ * product of a * b is rounded and how supernormal or
+ * subnormal intermediate products are handled is not
+ * defined. mad is intended to be used where speed is
+ * preferred over accuracy.
+ */
+float __ovld __cnfn mad(float a, float b, float c);
+float2 __ovld __cnfn mad(float2 a, float2 b, float2 c);
+float3 __ovld __cnfn mad(float3 a, float3 b, float3 c);
+float4 __ovld __cnfn mad(float4 a, float4 b, float4 c);
+float8 __ovld __cnfn mad(float8 a, float8 b, float8 c);
+float16 __ovld __cnfn mad(float16 a, float16 b, float16 c);
+#ifdef cl_khr_fp64
+double __ovld __cnfn mad(double a, double b, double c);
+double2 __ovld __cnfn mad(double2 a, double2 b, double2 c);
+double3 __ovld __cnfn mad(double3 a, double3 b, double3 c);
+double4 __ovld __cnfn mad(double4 a, double4 b, double4 c);
+double8 __ovld __cnfn mad(double8 a, double8 b, double8 c);
+double16 __ovld __cnfn mad(double16 a, double16 b, double16 c);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn mad(half a, half b, half c);
+half2 __ovld __cnfn mad(half2 a, half2 b, half2 c);
+half3 __ovld __cnfn mad(half3 a, half3 b, half3 c);
+half4 __ovld __cnfn mad(half4 a, half4 b, half4 c);
+half8 __ovld __cnfn mad(half8 a, half8 b, half8 c);
+half16 __ovld __cnfn mad(half16 a, half16 b, half16 c);
+#endif //cl_khr_fp16
+
+/**
+ * Returns x if | x | > | y |, y if | y | > | x |, otherwise
+ * fmax(x, y).
+ */
+float __ovld __cnfn maxmag(float x, float y);
+float2 __ovld __cnfn maxmag(float2 x, float2 y);
+float3 __ovld __cnfn maxmag(float3 x, float3 y);
+float4 __ovld __cnfn maxmag(float4 x, float4 y);
+float8 __ovld __cnfn maxmag(float8 x, float8 y);
+float16 __ovld __cnfn maxmag(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn maxmag(double x, double y);
+double2 __ovld __cnfn maxmag(double2 x, double2 y);
+double3 __ovld __cnfn maxmag(double3 x, double3 y);
+double4 __ovld __cnfn maxmag(double4 x, double4 y);
+double8 __ovld __cnfn maxmag(double8 x, double8 y);
+double16 __ovld __cnfn maxmag(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn maxmag(half x, half y);
+half2 __ovld __cnfn maxmag(half2 x, half2 y);
+half3 __ovld __cnfn maxmag(half3 x, half3 y);
+half4 __ovld __cnfn maxmag(half4 x, half4 y);
+half8 __ovld __cnfn maxmag(half8 x, half8 y);
+half16 __ovld __cnfn maxmag(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns x if | x | < | y |, y if | y | < | x |, otherwise
+ * fmin(x, y).
+ */
+float __ovld __cnfn minmag(float x, float y);
+float2 __ovld __cnfn minmag(float2 x, float2 y);
+float3 __ovld __cnfn minmag(float3 x, float3 y);
+float4 __ovld __cnfn minmag(float4 x, float4 y);
+float8 __ovld __cnfn minmag(float8 x, float8 y);
+float16 __ovld __cnfn minmag(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn minmag(double x, double y);
+double2 __ovld __cnfn minmag(double2 x, double2 y);
+double3 __ovld __cnfn minmag(double3 x, double3 y);
+double4 __ovld __cnfn minmag(double4 x, double4 y);
+double8 __ovld __cnfn minmag(double8 x, double8 y);
+double16 __ovld __cnfn minmag(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn minmag(half x, half y);
+half2 __ovld __cnfn minmag(half2 x, half2 y);
+half3 __ovld __cnfn minmag(half3 x, half3 y);
+half4 __ovld __cnfn minmag(half4 x, half4 y);
+half8 __ovld __cnfn minmag(half8 x, half8 y);
+half16 __ovld __cnfn minmag(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Decompose a floating-point number. The modf
+ * function breaks the argument x into integral and
+ * fractional parts, each of which has the same sign as
+ * the argument. It stores the integral part in the object
+ * pointed to by iptr.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float __ovld modf(float x, float *iptr);
+float2 __ovld modf(float2 x, float2 *iptr);
+float3 __ovld modf(float3 x, float3 *iptr);
+float4 __ovld modf(float4 x, float4 *iptr);
+float8 __ovld modf(float8 x, float8 *iptr);
+float16 __ovld modf(float16 x, float16 *iptr);
+#ifdef cl_khr_fp64
+double __ovld modf(double x, double *iptr);
+double2 __ovld modf(double2 x, double2 *iptr);
+double3 __ovld modf(double3 x, double3 *iptr);
+double4 __ovld modf(double4 x, double4 *iptr);
+double8 __ovld modf(double8 x, double8 *iptr);
+double16 __ovld modf(double16 x, double16 *iptr);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld modf(half x, half *iptr);
+half2 __ovld modf(half2 x, half2 *iptr);
+half3 __ovld modf(half3 x, half3 *iptr);
+half4 __ovld modf(half4 x, half4 *iptr);
+half8 __ovld modf(half8 x, half8 *iptr);
+half16 __ovld modf(half16 x, half16 *iptr);
+#endif //cl_khr_fp16
+#else
+float __ovld modf(float x, __global float *iptr);
+float2 __ovld modf(float2 x, __global float2 *iptr);
+float3 __ovld modf(float3 x, __global float3 *iptr);
+float4 __ovld modf(float4 x, __global float4 *iptr);
+float8 __ovld modf(float8 x, __global float8 *iptr);
+float16 __ovld modf(float16 x, __global float16 *iptr);
+float __ovld modf(float x, __local float *iptr);
+float2 __ovld modf(float2 x, __local float2 *iptr);
+float3 __ovld modf(float3 x, __local float3 *iptr);
+float4 __ovld modf(float4 x, __local float4 *iptr);
+float8 __ovld modf(float8 x, __local float8 *iptr);
+float16 __ovld modf(float16 x, __local float16 *iptr);
+float __ovld modf(float x, __private float *iptr);
+float2 __ovld modf(float2 x, __private float2 *iptr);
+float3 __ovld modf(float3 x, __private float3 *iptr);
+float4 __ovld modf(float4 x, __private float4 *iptr);
+float8 __ovld modf(float8 x, __private float8 *iptr);
+float16 __ovld modf(float16 x, __private float16 *iptr);
+#ifdef cl_khr_fp64
+double __ovld modf(double x, __global double *iptr);
+double2 __ovld modf(double2 x, __global double2 *iptr);
+double3 __ovld modf(double3 x, __global double3 *iptr);
+double4 __ovld modf(double4 x, __global double4 *iptr);
+double8 __ovld modf(double8 x, __global double8 *iptr);
+double16 __ovld modf(double16 x, __global double16 *iptr);
+double __ovld modf(double x, __local double *iptr);
+double2 __ovld modf(double2 x, __local double2 *iptr);
+double3 __ovld modf(double3 x, __local double3 *iptr);
+double4 __ovld modf(double4 x, __local double4 *iptr);
+double8 __ovld modf(double8 x, __local double8 *iptr);
+double16 __ovld modf(double16 x, __local double16 *iptr);
+double __ovld modf(double x, __private double *iptr);
+double2 __ovld modf(double2 x, __private double2 *iptr);
+double3 __ovld modf(double3 x, __private double3 *iptr);
+double4 __ovld modf(double4 x, __private double4 *iptr);
+double8 __ovld modf(double8 x, __private double8 *iptr);
+double16 __ovld modf(double16 x, __private double16 *iptr);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld modf(half x, __global half *iptr);
+half2 __ovld modf(half2 x, __global half2 *iptr);
+half3 __ovld modf(half3 x, __global half3 *iptr);
+half4 __ovld modf(half4 x, __global half4 *iptr);
+half8 __ovld modf(half8 x, __global half8 *iptr);
+half16 __ovld modf(half16 x, __global half16 *iptr);
+half __ovld modf(half x, __local half *iptr);
+half2 __ovld modf(half2 x, __local half2 *iptr);
+half3 __ovld modf(half3 x, __local half3 *iptr);
+half4 __ovld modf(half4 x, __local half4 *iptr);
+half8 __ovld modf(half8 x, __local half8 *iptr);
+half16 __ovld modf(half16 x, __local half16 *iptr);
+half __ovld modf(half x, __private half *iptr);
+half2 __ovld modf(half2 x, __private half2 *iptr);
+half3 __ovld modf(half3 x, __private half3 *iptr);
+half4 __ovld modf(half4 x, __private half4 *iptr);
+half8 __ovld modf(half8 x, __private half8 *iptr);
+half16 __ovld modf(half16 x, __private half16 *iptr);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Returns a quiet NaN. The nancode may be placed
+ * in the significand of the resulting NaN.
+ */
+float __ovld __cnfn nan(uint nancode);
+float2 __ovld __cnfn nan(uint2 nancode);
+float3 __ovld __cnfn nan(uint3 nancode);
+float4 __ovld __cnfn nan(uint4 nancode);
+float8 __ovld __cnfn nan(uint8 nancode);
+float16 __ovld __cnfn nan(uint16 nancode);
+#ifdef cl_khr_fp64
+double __ovld __cnfn nan(ulong nancode);
+double2 __ovld __cnfn nan(ulong2 nancode);
+double3 __ovld __cnfn nan(ulong3 nancode);
+double4 __ovld __cnfn nan(ulong4 nancode);
+double8 __ovld __cnfn nan(ulong8 nancode);
+double16 __ovld __cnfn nan(ulong16 nancode);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn nan(ushort nancode);
+half2 __ovld __cnfn nan(ushort2 nancode);
+half3 __ovld __cnfn nan(ushort3 nancode);
+half4 __ovld __cnfn nan(ushort4 nancode);
+half8 __ovld __cnfn nan(ushort8 nancode);
+half16 __ovld __cnfn nan(ushort16 nancode);
+#endif //cl_khr_fp16
+
+/**
+ * Computes the next representable single-precision
+ * floating-point value following x in the direction of
+ * y. Thus, if y is less than x, nextafter() returns the
+ * largest representable floating-point number less
+ * than x.
+ */
+float __ovld __cnfn nextafter(float x, float y);
+float2 __ovld __cnfn nextafter(float2 x, float2 y);
+float3 __ovld __cnfn nextafter(float3 x, float3 y);
+float4 __ovld __cnfn nextafter(float4 x, float4 y);
+float8 __ovld __cnfn nextafter(float8 x, float8 y);
+float16 __ovld __cnfn nextafter(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn nextafter(double x, double y);
+double2 __ovld __cnfn nextafter(double2 x, double2 y);
+double3 __ovld __cnfn nextafter(double3 x, double3 y);
+double4 __ovld __cnfn nextafter(double4 x, double4 y);
+double8 __ovld __cnfn nextafter(double8 x, double8 y);
+double16 __ovld __cnfn nextafter(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn nextafter(half x, half y);
+half2 __ovld __cnfn nextafter(half2 x, half2 y);
+half3 __ovld __cnfn nextafter(half3 x, half3 y);
+half4 __ovld __cnfn nextafter(half4 x, half4 y);
+half8 __ovld __cnfn nextafter(half8 x, half8 y);
+half16 __ovld __cnfn nextafter(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Compute x to the power y.
+ */
+float __ovld __cnfn pow(float x, float y);
+float2 __ovld __cnfn pow(float2 x, float2 y);
+float3 __ovld __cnfn pow(float3 x, float3 y);
+float4 __ovld __cnfn pow(float4 x, float4 y);
+float8 __ovld __cnfn pow(float8 x, float8 y);
+float16 __ovld __cnfn pow(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn pow(double x, double y);
+double2 __ovld __cnfn pow(double2 x, double2 y);
+double3 __ovld __cnfn pow(double3 x, double3 y);
+double4 __ovld __cnfn pow(double4 x, double4 y);
+double8 __ovld __cnfn pow(double8 x, double8 y);
+double16 __ovld __cnfn pow(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn pow(half x, half y);
+half2 __ovld __cnfn pow(half2 x, half2 y);
+half3 __ovld __cnfn pow(half3 x, half3 y);
+half4 __ovld __cnfn pow(half4 x, half4 y);
+half8 __ovld __cnfn pow(half8 x, half8 y);
+half16 __ovld __cnfn pow(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Compute x to the power y, where y is an integer.
+ */
+float __ovld __cnfn pown(float x, int y);
+float2 __ovld __cnfn pown(float2 x, int2 y);
+float3 __ovld __cnfn pown(float3 x, int3 y);
+float4 __ovld __cnfn pown(float4 x, int4 y);
+float8 __ovld __cnfn pown(float8 x, int8 y);
+float16 __ovld __cnfn pown(float16 x, int16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn pown(double x, int y);
+double2 __ovld __cnfn pown(double2 x, int2 y);
+double3 __ovld __cnfn pown(double3 x, int3 y);
+double4 __ovld __cnfn pown(double4 x, int4 y);
+double8 __ovld __cnfn pown(double8 x, int8 y);
+double16 __ovld __cnfn pown(double16 x, int16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn pown(half x, int y);
+half2 __ovld __cnfn pown(half2 x, int2 y);
+half3 __ovld __cnfn pown(half3 x, int3 y);
+half4 __ovld __cnfn pown(half4 x, int4 y);
+half8 __ovld __cnfn pown(half8 x, int8 y);
+half16 __ovld __cnfn pown(half16 x, int16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Compute x to the power y, where x is >= 0.
+ */
+float __ovld __cnfn powr(float x, float y);
+float2 __ovld __cnfn powr(float2 x, float2 y);
+float3 __ovld __cnfn powr(float3 x, float3 y);
+float4 __ovld __cnfn powr(float4 x, float4 y);
+float8 __ovld __cnfn powr(float8 x, float8 y);
+float16 __ovld __cnfn powr(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn powr(double x, double y);
+double2 __ovld __cnfn powr(double2 x, double2 y);
+double3 __ovld __cnfn powr(double3 x, double3 y);
+double4 __ovld __cnfn powr(double4 x, double4 y);
+double8 __ovld __cnfn powr(double8 x, double8 y);
+double16 __ovld __cnfn powr(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn powr(half x, half y);
+half2 __ovld __cnfn powr(half2 x, half2 y);
+half3 __ovld __cnfn powr(half3 x, half3 y);
+half4 __ovld __cnfn powr(half4 x, half4 y);
+half8 __ovld __cnfn powr(half8 x, half8 y);
+half16 __ovld __cnfn powr(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Compute the value r such that r = x - n*y, where n
+ * is the integer nearest the exact value of x/y. If there
+ * are two integers closest to x/y, n shall be the even
+ * one. If r is zero, it is given the same sign as x.
+ */
+float __ovld __cnfn remainder(float x, float y);
+float2 __ovld __cnfn remainder(float2 x, float2 y);
+float3 __ovld __cnfn remainder(float3 x, float3 y);
+float4 __ovld __cnfn remainder(float4 x, float4 y);
+float8 __ovld __cnfn remainder(float8 x, float8 y);
+float16 __ovld __cnfn remainder(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn remainder(double x, double y);
+double2 __ovld __cnfn remainder(double2 x, double2 y);
+double3 __ovld __cnfn remainder(double3 x, double3 y);
+double4 __ovld __cnfn remainder(double4 x, double4 y);
+double8 __ovld __cnfn remainder(double8 x, double8 y);
+double16 __ovld __cnfn remainder(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn remainder(half x, half y);
+half2 __ovld __cnfn remainder(half2 x, half2 y);
+half3 __ovld __cnfn remainder(half3 x, half3 y);
+half4 __ovld __cnfn remainder(half4 x, half4 y);
+half8 __ovld __cnfn remainder(half8 x, half8 y);
+half16 __ovld __cnfn remainder(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * The remquo function computes the value r such
+ * that r = x - n*y, where n is the integer nearest the
+ * exact value of x/y. If there are two integers closest
+ * to x/y, n shall be the even one. If r is zero, it is
+ * given the same sign as x. This is the same value
+ * that is returned by the remainder function.
+ * remquo also calculates the lower seven bits of the
+ * integral quotient x/y, and gives that value the same
+ * sign as x/y. It stores this signed value in the object
+ * pointed to by quo.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float __ovld remquo(float x, float y, int *quo);
+float2 __ovld remquo(float2 x, float2 y, int2 *quo);
+float3 __ovld remquo(float3 x, float3 y, int3 *quo);
+float4 __ovld remquo(float4 x, float4 y, int4 *quo);
+float8 __ovld remquo(float8 x, float8 y, int8 *quo);
+float16 __ovld remquo(float16 x, float16 y, int16 *quo);
+#ifdef cl_khr_fp64
+double __ovld remquo(double x, double y, int *quo);
+double2 __ovld remquo(double2 x, double2 y, int2 *quo);
+double3 __ovld remquo(double3 x, double3 y, int3 *quo);
+double4 __ovld remquo(double4 x, double4 y, int4 *quo);
+double8 __ovld remquo(double8 x, double8 y, int8 *quo);
+double16 __ovld remquo(double16 x, double16 y, int16 *quo);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld remquo(half x, half y, int *quo);
+half2 __ovld remquo(half2 x, half2 y, int2 *quo);
+half3 __ovld remquo(half3 x, half3 y, int3 *quo);
+half4 __ovld remquo(half4 x, half4 y, int4 *quo);
+half8 __ovld remquo(half8 x, half8 y, int8 *quo);
+half16 __ovld remquo(half16 x, half16 y, int16 *quo);
+
+#endif //cl_khr_fp16
+#else
+float __ovld remquo(float x, float y, __global int *quo);
+float2 __ovld remquo(float2 x, float2 y, __global int2 *quo);
+float3 __ovld remquo(float3 x, float3 y, __global int3 *quo);
+float4 __ovld remquo(float4 x, float4 y, __global int4 *quo);
+float8 __ovld remquo(float8 x, float8 y, __global int8 *quo);
+float16 __ovld remquo(float16 x, float16 y, __global int16 *quo);
+float __ovld remquo(float x, float y, __local int *quo);
+float2 __ovld remquo(float2 x, float2 y, __local int2 *quo);
+float3 __ovld remquo(float3 x, float3 y, __local int3 *quo);
+float4 __ovld remquo(float4 x, float4 y, __local int4 *quo);
+float8 __ovld remquo(float8 x, float8 y, __local int8 *quo);
+float16 __ovld remquo(float16 x, float16 y, __local int16 *quo);
+float __ovld remquo(float x, float y, __private int *quo);
+float2 __ovld remquo(float2 x, float2 y, __private int2 *quo);
+float3 __ovld remquo(float3 x, float3 y, __private int3 *quo);
+float4 __ovld remquo(float4 x, float4 y, __private int4 *quo);
+float8 __ovld remquo(float8 x, float8 y, __private int8 *quo);
+float16 __ovld remquo(float16 x, float16 y, __private int16 *quo);
+#ifdef cl_khr_fp64
+double __ovld remquo(double x, double y, __global int *quo);
+double2 __ovld remquo(double2 x, double2 y, __global int2 *quo);
+double3 __ovld remquo(double3 x, double3 y, __global int3 *quo);
+double4 __ovld remquo(double4 x, double4 y, __global int4 *quo);
+double8 __ovld remquo(double8 x, double8 y, __global int8 *quo);
+double16 __ovld remquo(double16 x, double16 y, __global int16 *quo);
+double __ovld remquo(double x, double y, __local int *quo);
+double2 __ovld remquo(double2 x, double2 y, __local int2 *quo);
+double3 __ovld remquo(double3 x, double3 y, __local int3 *quo);
+double4 __ovld remquo(double4 x, double4 y, __local int4 *quo);
+double8 __ovld remquo(double8 x, double8 y, __local int8 *quo);
+double16 __ovld remquo(double16 x, double16 y, __local int16 *quo);
+double __ovld remquo(double x, double y, __private int *quo);
+double2 __ovld remquo(double2 x, double2 y, __private int2 *quo);
+double3 __ovld remquo(double3 x, double3 y, __private int3 *quo);
+double4 __ovld remquo(double4 x, double4 y, __private int4 *quo);
+double8 __ovld remquo(double8 x, double8 y, __private int8 *quo);
+double16 __ovld remquo(double16 x, double16 y, __private int16 *quo);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld remquo(half x, half y, __global int *quo);
+half2 __ovld remquo(half2 x, half2 y, __global int2 *quo);
+half3 __ovld remquo(half3 x, half3 y, __global int3 *quo);
+half4 __ovld remquo(half4 x, half4 y, __global int4 *quo);
+half8 __ovld remquo(half8 x, half8 y, __global int8 *quo);
+half16 __ovld remquo(half16 x, half16 y, __global int16 *quo);
+half __ovld remquo(half x, half y, __local int *quo);
+half2 __ovld remquo(half2 x, half2 y, __local int2 *quo);
+half3 __ovld remquo(half3 x, half3 y, __local int3 *quo);
+half4 __ovld remquo(half4 x, half4 y, __local int4 *quo);
+half8 __ovld remquo(half8 x, half8 y, __local int8 *quo);
+half16 __ovld remquo(half16 x, half16 y, __local int16 *quo);
+half __ovld remquo(half x, half y, __private int *quo);
+half2 __ovld remquo(half2 x, half2 y, __private int2 *quo);
+half3 __ovld remquo(half3 x, half3 y, __private int3 *quo);
+half4 __ovld remquo(half4 x, half4 y, __private int4 *quo);
+half8 __ovld remquo(half8 x, half8 y, __private int8 *quo);
+half16 __ovld remquo(half16 x, half16 y, __private int16 *quo);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+/**
+ * Round to integral value (using round to nearest
+ * even rounding mode) in floating-point format.
+ * Refer to section 7.1 for description of rounding
+ * modes.
+ */
+float __ovld __cnfn rint(float);
+float2 __ovld __cnfn rint(float2);
+float3 __ovld __cnfn rint(float3);
+float4 __ovld __cnfn rint(float4);
+float8 __ovld __cnfn rint(float8);
+float16 __ovld __cnfn rint(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn rint(double);
+double2 __ovld __cnfn rint(double2);
+double3 __ovld __cnfn rint(double3);
+double4 __ovld __cnfn rint(double4);
+double8 __ovld __cnfn rint(double8);
+double16 __ovld __cnfn rint(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn rint(half);
+half2 __ovld __cnfn rint(half2);
+half3 __ovld __cnfn rint(half3);
+half4 __ovld __cnfn rint(half4);
+half8 __ovld __cnfn rint(half8);
+half16 __ovld __cnfn rint(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute x to the power 1/y.
+ */
+float __ovld __cnfn rootn(float x, int y);
+float2 __ovld __cnfn rootn(float2 x, int2 y);
+float3 __ovld __cnfn rootn(float3 x, int3 y);
+float4 __ovld __cnfn rootn(float4 x, int4 y);
+float8 __ovld __cnfn rootn(float8 x, int8 y);
+float16 __ovld __cnfn rootn(float16 x, int16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn rootn(double x, int y);
+double2 __ovld __cnfn rootn(double2 x, int2 y);
+double3 __ovld __cnfn rootn(double3 x, int3 y);
+double4 __ovld __cnfn rootn(double4 x, int4 y);
+double8 __ovld __cnfn rootn(double8 x, int8 y);
+double16 __ovld __cnfn rootn(double16 x, int16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn rootn(half x, int y);
+half2 __ovld __cnfn rootn(half2 x, int2 y);
+half3 __ovld __cnfn rootn(half3 x, int3 y);
+half4 __ovld __cnfn rootn(half4 x, int4 y);
+half8 __ovld __cnfn rootn(half8 x, int8 y);
+half16 __ovld __cnfn rootn(half16 x, int16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Return the integral value nearest to x rounding
+ * halfway cases away from zero, regardless of the
+ * current rounding direction.
+ */
+float __ovld __cnfn round(float x);
+float2 __ovld __cnfn round(float2 x);
+float3 __ovld __cnfn round(float3 x);
+float4 __ovld __cnfn round(float4 x);
+float8 __ovld __cnfn round(float8 x);
+float16 __ovld __cnfn round(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn round(double x);
+double2 __ovld __cnfn round(double2 x);
+double3 __ovld __cnfn round(double3 x);
+double4 __ovld __cnfn round(double4 x);
+double8 __ovld __cnfn round(double8 x);
+double16 __ovld __cnfn round(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn round(half x);
+half2 __ovld __cnfn round(half2 x);
+half3 __ovld __cnfn round(half3 x);
+half4 __ovld __cnfn round(half4 x);
+half8 __ovld __cnfn round(half8 x);
+half16 __ovld __cnfn round(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Compute inverse square root.
+ */
+float __ovld __cnfn rsqrt(float);
+float2 __ovld __cnfn rsqrt(float2);
+float3 __ovld __cnfn rsqrt(float3);
+float4 __ovld __cnfn rsqrt(float4);
+float8 __ovld __cnfn rsqrt(float8);
+float16 __ovld __cnfn rsqrt(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn rsqrt(double);
+double2 __ovld __cnfn rsqrt(double2);
+double3 __ovld __cnfn rsqrt(double3);
+double4 __ovld __cnfn rsqrt(double4);
+double8 __ovld __cnfn rsqrt(double8);
+double16 __ovld __cnfn rsqrt(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn rsqrt(half);
+half2 __ovld __cnfn rsqrt(half2);
+half3 __ovld __cnfn rsqrt(half3);
+half4 __ovld __cnfn rsqrt(half4);
+half8 __ovld __cnfn rsqrt(half8);
+half16 __ovld __cnfn rsqrt(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute sine.
+ */
+float __ovld __cnfn sin(float);
+float2 __ovld __cnfn sin(float2);
+float3 __ovld __cnfn sin(float3);
+float4 __ovld __cnfn sin(float4);
+float8 __ovld __cnfn sin(float8);
+float16 __ovld __cnfn sin(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sin(double);
+double2 __ovld __cnfn sin(double2);
+double3 __ovld __cnfn sin(double3);
+double4 __ovld __cnfn sin(double4);
+double8 __ovld __cnfn sin(double8);
+double16 __ovld __cnfn sin(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sin(half);
+half2 __ovld __cnfn sin(half2);
+half3 __ovld __cnfn sin(half3);
+half4 __ovld __cnfn sin(half4);
+half8 __ovld __cnfn sin(half8);
+half16 __ovld __cnfn sin(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute sine and cosine of x. The computed sine
+ * is the return value and computed cosine is returned
+ * in cosval.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float __ovld sincos(float x, float *cosval);
+float2 __ovld sincos(float2 x, float2 *cosval);
+float3 __ovld sincos(float3 x, float3 *cosval);
+float4 __ovld sincos(float4 x, float4 *cosval);
+float8 __ovld sincos(float8 x, float8 *cosval);
+float16 __ovld sincos(float16 x, float16 *cosval);
+#ifdef cl_khr_fp64
+double __ovld sincos(double x, double *cosval);
+double2 __ovld sincos(double2 x, double2 *cosval);
+double3 __ovld sincos(double3 x, double3 *cosval);
+double4 __ovld sincos(double4 x, double4 *cosval);
+double8 __ovld sincos(double8 x, double8 *cosval);
+double16 __ovld sincos(double16 x, double16 *cosval);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld sincos(half x, half *cosval);
+half2 __ovld sincos(half2 x, half2 *cosval);
+half3 __ovld sincos(half3 x, half3 *cosval);
+half4 __ovld sincos(half4 x, half4 *cosval);
+half8 __ovld sincos(half8 x, half8 *cosval);
+half16 __ovld sincos(half16 x, half16 *cosval);
+#endif //cl_khr_fp16
+#else
+float __ovld sincos(float x, __global float *cosval);
+float2 __ovld sincos(float2 x, __global float2 *cosval);
+float3 __ovld sincos(float3 x, __global float3 *cosval);
+float4 __ovld sincos(float4 x, __global float4 *cosval);
+float8 __ovld sincos(float8 x, __global float8 *cosval);
+float16 __ovld sincos(float16 x, __global float16 *cosval);
+float __ovld sincos(float x, __local float *cosval);
+float2 __ovld sincos(float2 x, __local float2 *cosval);
+float3 __ovld sincos(float3 x, __local float3 *cosval);
+float4 __ovld sincos(float4 x, __local float4 *cosval);
+float8 __ovld sincos(float8 x, __local float8 *cosval);
+float16 __ovld sincos(float16 x, __local float16 *cosval);
+float __ovld sincos(float x, __private float *cosval);
+float2 __ovld sincos(float2 x, __private float2 *cosval);
+float3 __ovld sincos(float3 x, __private float3 *cosval);
+float4 __ovld sincos(float4 x, __private float4 *cosval);
+float8 __ovld sincos(float8 x, __private float8 *cosval);
+float16 __ovld sincos(float16 x, __private float16 *cosval);
+#ifdef cl_khr_fp64
+double __ovld sincos(double x, __global double *cosval);
+double2 __ovld sincos(double2 x, __global double2 *cosval);
+double3 __ovld sincos(double3 x, __global double3 *cosval);
+double4 __ovld sincos(double4 x, __global double4 *cosval);
+double8 __ovld sincos(double8 x, __global double8 *cosval);
+double16 __ovld sincos(double16 x, __global double16 *cosval);
+double __ovld sincos(double x, __local double *cosval);
+double2 __ovld sincos(double2 x, __local double2 *cosval);
+double3 __ovld sincos(double3 x, __local double3 *cosval);
+double4 __ovld sincos(double4 x, __local double4 *cosval);
+double8 __ovld sincos(double8 x, __local double8 *cosval);
+double16 __ovld sincos(double16 x, __local double16 *cosval);
+double __ovld sincos(double x, __private double *cosval);
+double2 __ovld sincos(double2 x, __private double2 *cosval);
+double3 __ovld sincos(double3 x, __private double3 *cosval);
+double4 __ovld sincos(double4 x, __private double4 *cosval);
+double8 __ovld sincos(double8 x, __private double8 *cosval);
+double16 __ovld sincos(double16 x, __private double16 *cosval);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld sincos(half x, __global half *cosval);
+half2 __ovld sincos(half2 x, __global half2 *cosval);
+half3 __ovld sincos(half3 x, __global half3 *cosval);
+half4 __ovld sincos(half4 x, __global half4 *cosval);
+half8 __ovld sincos(half8 x, __global half8 *cosval);
+half16 __ovld sincos(half16 x, __global half16 *cosval);
+half __ovld sincos(half x, __local half *cosval);
+half2 __ovld sincos(half2 x, __local half2 *cosval);
+half3 __ovld sincos(half3 x, __local half3 *cosval);
+half4 __ovld sincos(half4 x, __local half4 *cosval);
+half8 __ovld sincos(half8 x, __local half8 *cosval);
+half16 __ovld sincos(half16 x, __local half16 *cosval);
+half __ovld sincos(half x, __private half *cosval);
+half2 __ovld sincos(half2 x, __private half2 *cosval);
+half3 __ovld sincos(half3 x, __private half3 *cosval);
+half4 __ovld sincos(half4 x, __private half4 *cosval);
+half8 __ovld sincos(half8 x, __private half8 *cosval);
+half16 __ovld sincos(half16 x, __private half16 *cosval);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Compute hyperbolic sine.
+ */
+float __ovld __cnfn sinh(float);
+float2 __ovld __cnfn sinh(float2);
+float3 __ovld __cnfn sinh(float3);
+float4 __ovld __cnfn sinh(float4);
+float8 __ovld __cnfn sinh(float8);
+float16 __ovld __cnfn sinh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sinh(double);
+double2 __ovld __cnfn sinh(double2);
+double3 __ovld __cnfn sinh(double3);
+double4 __ovld __cnfn sinh(double4);
+double8 __ovld __cnfn sinh(double8);
+double16 __ovld __cnfn sinh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sinh(half);
+half2 __ovld __cnfn sinh(half2);
+half3 __ovld __cnfn sinh(half3);
+half4 __ovld __cnfn sinh(half4);
+half8 __ovld __cnfn sinh(half8);
+half16 __ovld __cnfn sinh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute sin (PI * x).
+ */
+float __ovld __cnfn sinpi(float x);
+float2 __ovld __cnfn sinpi(float2 x);
+float3 __ovld __cnfn sinpi(float3 x);
+float4 __ovld __cnfn sinpi(float4 x);
+float8 __ovld __cnfn sinpi(float8 x);
+float16 __ovld __cnfn sinpi(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sinpi(double x);
+double2 __ovld __cnfn sinpi(double2 x);
+double3 __ovld __cnfn sinpi(double3 x);
+double4 __ovld __cnfn sinpi(double4 x);
+double8 __ovld __cnfn sinpi(double8 x);
+double16 __ovld __cnfn sinpi(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sinpi(half x);
+half2 __ovld __cnfn sinpi(half2 x);
+half3 __ovld __cnfn sinpi(half3 x);
+half4 __ovld __cnfn sinpi(half4 x);
+half8 __ovld __cnfn sinpi(half8 x);
+half16 __ovld __cnfn sinpi(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Compute square root.
+ */
+float __ovld __cnfn sqrt(float);
+float2 __ovld __cnfn sqrt(float2);
+float3 __ovld __cnfn sqrt(float3);
+float4 __ovld __cnfn sqrt(float4);
+float8 __ovld __cnfn sqrt(float8);
+float16 __ovld __cnfn sqrt(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sqrt(double);
+double2 __ovld __cnfn sqrt(double2);
+double3 __ovld __cnfn sqrt(double3);
+double4 __ovld __cnfn sqrt(double4);
+double8 __ovld __cnfn sqrt(double8);
+double16 __ovld __cnfn sqrt(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sqrt(half);
+half2 __ovld __cnfn sqrt(half2);
+half3 __ovld __cnfn sqrt(half3);
+half4 __ovld __cnfn sqrt(half4);
+half8 __ovld __cnfn sqrt(half8);
+half16 __ovld __cnfn sqrt(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute tangent.
+ */
+float __ovld __cnfn tan(float);
+float2 __ovld __cnfn tan(float2);
+float3 __ovld __cnfn tan(float3);
+float4 __ovld __cnfn tan(float4);
+float8 __ovld __cnfn tan(float8);
+float16 __ovld __cnfn tan(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn tan(double);
+double2 __ovld __cnfn tan(double2);
+double3 __ovld __cnfn tan(double3);
+double4 __ovld __cnfn tan(double4);
+double8 __ovld __cnfn tan(double8);
+double16 __ovld __cnfn tan(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn tan(half);
+half2 __ovld __cnfn tan(half2);
+half3 __ovld __cnfn tan(half3);
+half4 __ovld __cnfn tan(half4);
+half8 __ovld __cnfn tan(half8);
+half16 __ovld __cnfn tan(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute hyperbolic tangent.
+ */
+float __ovld __cnfn tanh(float);
+float2 __ovld __cnfn tanh(float2);
+float3 __ovld __cnfn tanh(float3);
+float4 __ovld __cnfn tanh(float4);
+float8 __ovld __cnfn tanh(float8);
+float16 __ovld __cnfn tanh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn tanh(double);
+double2 __ovld __cnfn tanh(double2);
+double3 __ovld __cnfn tanh(double3);
+double4 __ovld __cnfn tanh(double4);
+double8 __ovld __cnfn tanh(double8);
+double16 __ovld __cnfn tanh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn tanh(half);
+half2 __ovld __cnfn tanh(half2);
+half3 __ovld __cnfn tanh(half3);
+half4 __ovld __cnfn tanh(half4);
+half8 __ovld __cnfn tanh(half8);
+half16 __ovld __cnfn tanh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute tan (PI * x).
+ */
+float __ovld __cnfn tanpi(float x);
+float2 __ovld __cnfn tanpi(float2 x);
+float3 __ovld __cnfn tanpi(float3 x);
+float4 __ovld __cnfn tanpi(float4 x);
+float8 __ovld __cnfn tanpi(float8 x);
+float16 __ovld __cnfn tanpi(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn tanpi(double x);
+double2 __ovld __cnfn tanpi(double2 x);
+double3 __ovld __cnfn tanpi(double3 x);
+double4 __ovld __cnfn tanpi(double4 x);
+double8 __ovld __cnfn tanpi(double8 x);
+double16 __ovld __cnfn tanpi(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn tanpi(half x);
+half2 __ovld __cnfn tanpi(half2 x);
+half3 __ovld __cnfn tanpi(half3 x);
+half4 __ovld __cnfn tanpi(half4 x);
+half8 __ovld __cnfn tanpi(half8 x);
+half16 __ovld __cnfn tanpi(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Compute the gamma function.
+ */
+float __ovld __cnfn tgamma(float);
+float2 __ovld __cnfn tgamma(float2);
+float3 __ovld __cnfn tgamma(float3);
+float4 __ovld __cnfn tgamma(float4);
+float8 __ovld __cnfn tgamma(float8);
+float16 __ovld __cnfn tgamma(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn tgamma(double);
+double2 __ovld __cnfn tgamma(double2);
+double3 __ovld __cnfn tgamma(double3);
+double4 __ovld __cnfn tgamma(double4);
+double8 __ovld __cnfn tgamma(double8);
+double16 __ovld __cnfn tgamma(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn tgamma(half);
+half2 __ovld __cnfn tgamma(half2);
+half3 __ovld __cnfn tgamma(half3);
+half4 __ovld __cnfn tgamma(half4);
+half8 __ovld __cnfn tgamma(half8);
+half16 __ovld __cnfn tgamma(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Round to integral value using the round to zero
+ * rounding mode.
+ */
+float __ovld __cnfn trunc(float);
+float2 __ovld __cnfn trunc(float2);
+float3 __ovld __cnfn trunc(float3);
+float4 __ovld __cnfn trunc(float4);
+float8 __ovld __cnfn trunc(float8);
+float16 __ovld __cnfn trunc(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn trunc(double);
+double2 __ovld __cnfn trunc(double2);
+double3 __ovld __cnfn trunc(double3);
+double4 __ovld __cnfn trunc(double4);
+double8 __ovld __cnfn trunc(double8);
+double16 __ovld __cnfn trunc(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn trunc(half);
+half2 __ovld __cnfn trunc(half2);
+half3 __ovld __cnfn trunc(half3);
+half4 __ovld __cnfn trunc(half4);
+half8 __ovld __cnfn trunc(half8);
+half16 __ovld __cnfn trunc(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute cosine. x must be in the range -2^16 ... +2^16.
+ */
+float __ovld __cnfn half_cos(float x);
+float2 __ovld __cnfn half_cos(float2 x);
+float3 __ovld __cnfn half_cos(float3 x);
+float4 __ovld __cnfn half_cos(float4 x);
+float8 __ovld __cnfn half_cos(float8 x);
+float16 __ovld __cnfn half_cos(float16 x);
+
+/**
+ * Compute x / y.
+ */
+float __ovld __cnfn half_divide(float x, float y);
+float2 __ovld __cnfn half_divide(float2 x, float2 y);
+float3 __ovld __cnfn half_divide(float3 x, float3 y);
+float4 __ovld __cnfn half_divide(float4 x, float4 y);
+float8 __ovld __cnfn half_divide(float8 x, float8 y);
+float16 __ovld __cnfn half_divide(float16 x, float16 y);
+
+/**
+ * Compute the base- e exponential of x.
+ */
+float __ovld __cnfn half_exp(float x);
+float2 __ovld __cnfn half_exp(float2 x);
+float3 __ovld __cnfn half_exp(float3 x);
+float4 __ovld __cnfn half_exp(float4 x);
+float8 __ovld __cnfn half_exp(float8 x);
+float16 __ovld __cnfn half_exp(float16 x);
+
+/**
+ * Compute the base- 2 exponential of x.
+ */
+float __ovld __cnfn half_exp2(float x);
+float2 __ovld __cnfn half_exp2(float2 x);
+float3 __ovld __cnfn half_exp2(float3 x);
+float4 __ovld __cnfn half_exp2(float4 x);
+float8 __ovld __cnfn half_exp2(float8 x);
+float16 __ovld __cnfn half_exp2(float16 x);
+
+/**
+ * Compute the base- 10 exponential of x.
+ */
+float __ovld __cnfn half_exp10(float x);
+float2 __ovld __cnfn half_exp10(float2 x);
+float3 __ovld __cnfn half_exp10(float3 x);
+float4 __ovld __cnfn half_exp10(float4 x);
+float8 __ovld __cnfn half_exp10(float8 x);
+float16 __ovld __cnfn half_exp10(float16 x);
+
+/**
+ * Compute natural logarithm.
+ */
+float __ovld __cnfn half_log(float x);
+float2 __ovld __cnfn half_log(float2 x);
+float3 __ovld __cnfn half_log(float3 x);
+float4 __ovld __cnfn half_log(float4 x);
+float8 __ovld __cnfn half_log(float8 x);
+float16 __ovld __cnfn half_log(float16 x);
+
+/**
+ * Compute a base 2 logarithm.
+ */
+float __ovld __cnfn half_log2(float x);
+float2 __ovld __cnfn half_log2(float2 x);
+float3 __ovld __cnfn half_log2(float3 x);
+float4 __ovld __cnfn half_log2(float4 x);
+float8 __ovld __cnfn half_log2(float8 x);
+float16 __ovld __cnfn half_log2(float16 x);
+
+/**
+ * Compute a base 10 logarithm.
+ */
+float __ovld __cnfn half_log10(float x);
+float2 __ovld __cnfn half_log10(float2 x);
+float3 __ovld __cnfn half_log10(float3 x);
+float4 __ovld __cnfn half_log10(float4 x);
+float8 __ovld __cnfn half_log10(float8 x);
+float16 __ovld __cnfn half_log10(float16 x);
+
+/**
+ * Compute x to the power y, where x is >= 0.
+ */
+float __ovld __cnfn half_powr(float x, float y);
+float2 __ovld __cnfn half_powr(float2 x, float2 y);
+float3 __ovld __cnfn half_powr(float3 x, float3 y);
+float4 __ovld __cnfn half_powr(float4 x, float4 y);
+float8 __ovld __cnfn half_powr(float8 x, float8 y);
+float16 __ovld __cnfn half_powr(float16 x, float16 y);
+
+/**
+ * Compute reciprocal.
+ */
+float __ovld __cnfn half_recip(float x);
+float2 __ovld __cnfn half_recip(float2 x);
+float3 __ovld __cnfn half_recip(float3 x);
+float4 __ovld __cnfn half_recip(float4 x);
+float8 __ovld __cnfn half_recip(float8 x);
+float16 __ovld __cnfn half_recip(float16 x);
+
+/**
+ * Compute inverse square root.
+ */
+float __ovld __cnfn half_rsqrt(float x);
+float2 __ovld __cnfn half_rsqrt(float2 x);
+float3 __ovld __cnfn half_rsqrt(float3 x);
+float4 __ovld __cnfn half_rsqrt(float4 x);
+float8 __ovld __cnfn half_rsqrt(float8 x);
+float16 __ovld __cnfn half_rsqrt(float16 x);
+
+/**
+ * Compute sine. x must be in the range -2^16 ... +2^16.
+ */
+float __ovld __cnfn half_sin(float x);
+float2 __ovld __cnfn half_sin(float2 x);
+float3 __ovld __cnfn half_sin(float3 x);
+float4 __ovld __cnfn half_sin(float4 x);
+float8 __ovld __cnfn half_sin(float8 x);
+float16 __ovld __cnfn half_sin(float16 x);
+
+/**
+ * Compute square root.
+ */
+float __ovld __cnfn half_sqrt(float x);
+float2 __ovld __cnfn half_sqrt(float2 x);
+float3 __ovld __cnfn half_sqrt(float3 x);
+float4 __ovld __cnfn half_sqrt(float4 x);
+float8 __ovld __cnfn half_sqrt(float8 x);
+float16 __ovld __cnfn half_sqrt(float16 x);
+
+/**
+ * Compute tangent. x must be in the range -216 ... +216.
+ */
+float __ovld __cnfn half_tan(float x);
+float2 __ovld __cnfn half_tan(float2 x);
+float3 __ovld __cnfn half_tan(float3 x);
+float4 __ovld __cnfn half_tan(float4 x);
+float8 __ovld __cnfn half_tan(float8 x);
+float16 __ovld __cnfn half_tan(float16 x);
+
+/**
+ * Compute cosine over an implementation-defined range.
+ * The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_cos(float x);
+float2 __ovld __cnfn native_cos(float2 x);
+float3 __ovld __cnfn native_cos(float3 x);
+float4 __ovld __cnfn native_cos(float4 x);
+float8 __ovld __cnfn native_cos(float8 x);
+float16 __ovld __cnfn native_cos(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_cos(double x);
+double2 __ovld __cnfn native_cos(double2 x);
+double3 __ovld __cnfn native_cos(double3 x);
+double4 __ovld __cnfn native_cos(double4 x);
+double8 __ovld __cnfn native_cos(double8 x);
+double16 __ovld __cnfn native_cos(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute x / y over an implementation-defined range.
+ * The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_divide(float x, float y);
+float2 __ovld __cnfn native_divide(float2 x, float2 y);
+float3 __ovld __cnfn native_divide(float3 x, float3 y);
+float4 __ovld __cnfn native_divide(float4 x, float4 y);
+float8 __ovld __cnfn native_divide(float8 x, float8 y);
+float16 __ovld __cnfn native_divide(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_divide(double x, double y);
+double2 __ovld __cnfn native_divide(double2 x, double2 y);
+double3 __ovld __cnfn native_divide(double3 x, double3 y);
+double4 __ovld __cnfn native_divide(double4 x, double4 y);
+double8 __ovld __cnfn native_divide(double8 x, double8 y);
+double16 __ovld __cnfn native_divide(double16 x, double16 y);
+#endif //cl_khr_fp64
+
+/**
+ * Compute the base- e exponential of x over an
+ * implementation-defined range. The maximum error is
+ * implementation-defined.
+ */
+float __ovld __cnfn native_exp(float x);
+float2 __ovld __cnfn native_exp(float2 x);
+float3 __ovld __cnfn native_exp(float3 x);
+float4 __ovld __cnfn native_exp(float4 x);
+float8 __ovld __cnfn native_exp(float8 x);
+float16 __ovld __cnfn native_exp(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_exp(double x);
+double2 __ovld __cnfn native_exp(double2 x);
+double3 __ovld __cnfn native_exp(double3 x);
+double4 __ovld __cnfn native_exp(double4 x);
+double8 __ovld __cnfn native_exp(double8 x);
+double16 __ovld __cnfn native_exp(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute the base- 2 exponential of x over an
+ * implementation-defined range. The maximum error is
+ * implementation-defined.
+ */
+float __ovld __cnfn native_exp2(float x);
+float2 __ovld __cnfn native_exp2(float2 x);
+float3 __ovld __cnfn native_exp2(float3 x);
+float4 __ovld __cnfn native_exp2(float4 x);
+float8 __ovld __cnfn native_exp2(float8 x);
+float16 __ovld __cnfn native_exp2(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_exp2(double x);
+double2 __ovld __cnfn native_exp2(double2 x);
+double3 __ovld __cnfn native_exp2(double3 x);
+double4 __ovld __cnfn native_exp2(double4 x);
+double8 __ovld __cnfn native_exp2(double8 x);
+double16 __ovld __cnfn native_exp2(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute the base- 10 exponential of x over an
+ * implementation-defined range. The maximum error is
+ * implementation-defined.
+ */
+float __ovld __cnfn native_exp10(float x);
+float2 __ovld __cnfn native_exp10(float2 x);
+float3 __ovld __cnfn native_exp10(float3 x);
+float4 __ovld __cnfn native_exp10(float4 x);
+float8 __ovld __cnfn native_exp10(float8 x);
+float16 __ovld __cnfn native_exp10(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_exp10(double x);
+double2 __ovld __cnfn native_exp10(double2 x);
+double3 __ovld __cnfn native_exp10(double3 x);
+double4 __ovld __cnfn native_exp10(double4 x);
+double8 __ovld __cnfn native_exp10(double8 x);
+double16 __ovld __cnfn native_exp10(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute natural logarithm over an implementationdefined
+ * range. The maximum error is implementation
+ * defined.
+ */
+float __ovld __cnfn native_log(float x);
+float2 __ovld __cnfn native_log(float2 x);
+float3 __ovld __cnfn native_log(float3 x);
+float4 __ovld __cnfn native_log(float4 x);
+float8 __ovld __cnfn native_log(float8 x);
+float16 __ovld __cnfn native_log(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_log(double x);
+double2 __ovld __cnfn native_log(double2 x);
+double3 __ovld __cnfn native_log(double3 x);
+double4 __ovld __cnfn native_log(double4 x);
+double8 __ovld __cnfn native_log(double8 x);
+double16 __ovld __cnfn native_log(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute a base 2 logarithm over an implementationdefined
+ * range. The maximum error is implementationdefined.
+ */
+float __ovld __cnfn native_log2(float x);
+float2 __ovld __cnfn native_log2(float2 x);
+float3 __ovld __cnfn native_log2(float3 x);
+float4 __ovld __cnfn native_log2(float4 x);
+float8 __ovld __cnfn native_log2(float8 x);
+float16 __ovld __cnfn native_log2(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_log2(double x);
+double2 __ovld __cnfn native_log2(double2 x);
+double3 __ovld __cnfn native_log2(double3 x);
+double4 __ovld __cnfn native_log2(double4 x);
+double8 __ovld __cnfn native_log2(double8 x);
+double16 __ovld __cnfn native_log2(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute a base 10 logarithm over an implementationdefined
+ * range. The maximum error is implementationdefined.
+ */
+float __ovld __cnfn native_log10(float x);
+float2 __ovld __cnfn native_log10(float2 x);
+float3 __ovld __cnfn native_log10(float3 x);
+float4 __ovld __cnfn native_log10(float4 x);
+float8 __ovld __cnfn native_log10(float8 x);
+float16 __ovld __cnfn native_log10(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_log10(double x);
+double2 __ovld __cnfn native_log10(double2 x);
+double3 __ovld __cnfn native_log10(double3 x);
+double4 __ovld __cnfn native_log10(double4 x);
+double8 __ovld __cnfn native_log10(double8 x);
+double16 __ovld __cnfn native_log10(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute x to the power y, where x is >= 0. The range of
+ * x and y are implementation-defined. The maximum error
+ * is implementation-defined.
+ */
+float __ovld __cnfn native_powr(float x, float y);
+float2 __ovld __cnfn native_powr(float2 x, float2 y);
+float3 __ovld __cnfn native_powr(float3 x, float3 y);
+float4 __ovld __cnfn native_powr(float4 x, float4 y);
+float8 __ovld __cnfn native_powr(float8 x, float8 y);
+float16 __ovld __cnfn native_powr(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_powr(double x, double y);
+double2 __ovld __cnfn native_powr(double2 x, double2 y);
+double3 __ovld __cnfn native_powr(double3 x, double3 y);
+double4 __ovld __cnfn native_powr(double4 x, double4 y);
+double8 __ovld __cnfn native_powr(double8 x, double8 y);
+double16 __ovld __cnfn native_powr(double16 x, double16 y);
+#endif //cl_khr_fp64
+
+/**
+ * Compute reciprocal over an implementation-defined
+ * range. The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_recip(float x);
+float2 __ovld __cnfn native_recip(float2 x);
+float3 __ovld __cnfn native_recip(float3 x);
+float4 __ovld __cnfn native_recip(float4 x);
+float8 __ovld __cnfn native_recip(float8 x);
+float16 __ovld __cnfn native_recip(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_recip(double x);
+double2 __ovld __cnfn native_recip(double2 x);
+double3 __ovld __cnfn native_recip(double3 x);
+double4 __ovld __cnfn native_recip(double4 x);
+double8 __ovld __cnfn native_recip(double8 x);
+double16 __ovld __cnfn native_recip(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute inverse square root over an implementationdefined
+ * range. The maximum error is implementationdefined.
+ */
+float __ovld __cnfn native_rsqrt(float x);
+float2 __ovld __cnfn native_rsqrt(float2 x);
+float3 __ovld __cnfn native_rsqrt(float3 x);
+float4 __ovld __cnfn native_rsqrt(float4 x);
+float8 __ovld __cnfn native_rsqrt(float8 x);
+float16 __ovld __cnfn native_rsqrt(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_rsqrt(double x);
+double2 __ovld __cnfn native_rsqrt(double2 x);
+double3 __ovld __cnfn native_rsqrt(double3 x);
+double4 __ovld __cnfn native_rsqrt(double4 x);
+double8 __ovld __cnfn native_rsqrt(double8 x);
+double16 __ovld __cnfn native_rsqrt(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute sine over an implementation-defined range.
+ * The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_sin(float x);
+float2 __ovld __cnfn native_sin(float2 x);
+float3 __ovld __cnfn native_sin(float3 x);
+float4 __ovld __cnfn native_sin(float4 x);
+float8 __ovld __cnfn native_sin(float8 x);
+float16 __ovld __cnfn native_sin(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_sin(double x);
+double2 __ovld __cnfn native_sin(double2 x);
+double3 __ovld __cnfn native_sin(double3 x);
+double4 __ovld __cnfn native_sin(double4 x);
+double8 __ovld __cnfn native_sin(double8 x);
+double16 __ovld __cnfn native_sin(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute square root over an implementation-defined
+ * range. The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_sqrt(float x);
+float2 __ovld __cnfn native_sqrt(float2 x);
+float3 __ovld __cnfn native_sqrt(float3 x);
+float4 __ovld __cnfn native_sqrt(float4 x);
+float8 __ovld __cnfn native_sqrt(float8 x);
+float16 __ovld __cnfn native_sqrt(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_sqrt(double x);
+double2 __ovld __cnfn native_sqrt(double2 x);
+double3 __ovld __cnfn native_sqrt(double3 x);
+double4 __ovld __cnfn native_sqrt(double4 x);
+double8 __ovld __cnfn native_sqrt(double8 x);
+double16 __ovld __cnfn native_sqrt(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute tangent over an implementation-defined range.
+ * The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_tan(float x);
+float2 __ovld __cnfn native_tan(float2 x);
+float3 __ovld __cnfn native_tan(float3 x);
+float4 __ovld __cnfn native_tan(float4 x);
+float8 __ovld __cnfn native_tan(float8 x);
+float16 __ovld __cnfn native_tan(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_tan(double x);
+double2 __ovld __cnfn native_tan(double2 x);
+double3 __ovld __cnfn native_tan(double3 x);
+double4 __ovld __cnfn native_tan(double4 x);
+double8 __ovld __cnfn native_tan(double8 x);
+double16 __ovld __cnfn native_tan(double16 x);
+#endif //cl_khr_fp64
+
+// OpenCL v1.1 s6.11.3, v1.2 s6.12.3, v2.0 s6.13.3 - Integer Functions
+
+/**
+ * Returns | x |.
+ */
+uchar __ovld __cnfn abs(char x);
+uchar __ovld __cnfn abs(uchar x);
+uchar2 __ovld __cnfn abs(char2 x);
+uchar2 __ovld __cnfn abs(uchar2 x);
+uchar3 __ovld __cnfn abs(char3 x);
+uchar3 __ovld __cnfn abs(uchar3 x);
+uchar4 __ovld __cnfn abs(char4 x);
+uchar4 __ovld __cnfn abs(uchar4 x);
+uchar8 __ovld __cnfn abs(char8 x);
+uchar8 __ovld __cnfn abs(uchar8 x);
+uchar16 __ovld __cnfn abs(char16 x);
+uchar16 __ovld __cnfn abs(uchar16 x);
+ushort __ovld __cnfn abs(short x);
+ushort __ovld __cnfn abs(ushort x);
+ushort2 __ovld __cnfn abs(short2 x);
+ushort2 __ovld __cnfn abs(ushort2 x);
+ushort3 __ovld __cnfn abs(short3 x);
+ushort3 __ovld __cnfn abs(ushort3 x);
+ushort4 __ovld __cnfn abs(short4 x);
+ushort4 __ovld __cnfn abs(ushort4 x);
+ushort8 __ovld __cnfn abs(short8 x);
+ushort8 __ovld __cnfn abs(ushort8 x);
+ushort16 __ovld __cnfn abs(short16 x);
+ushort16 __ovld __cnfn abs(ushort16 x);
+uint __ovld __cnfn abs(int x);
+uint __ovld __cnfn abs(uint x);
+uint2 __ovld __cnfn abs(int2 x);
+uint2 __ovld __cnfn abs(uint2 x);
+uint3 __ovld __cnfn abs(int3 x);
+uint3 __ovld __cnfn abs(uint3 x);
+uint4 __ovld __cnfn abs(int4 x);
+uint4 __ovld __cnfn abs(uint4 x);
+uint8 __ovld __cnfn abs(int8 x);
+uint8 __ovld __cnfn abs(uint8 x);
+uint16 __ovld __cnfn abs(int16 x);
+uint16 __ovld __cnfn abs(uint16 x);
+ulong __ovld __cnfn abs(long x);
+ulong __ovld __cnfn abs(ulong x);
+ulong2 __ovld __cnfn abs(long2 x);
+ulong2 __ovld __cnfn abs(ulong2 x);
+ulong3 __ovld __cnfn abs(long3 x);
+ulong3 __ovld __cnfn abs(ulong3 x);
+ulong4 __ovld __cnfn abs(long4 x);
+ulong4 __ovld __cnfn abs(ulong4 x);
+ulong8 __ovld __cnfn abs(long8 x);
+ulong8 __ovld __cnfn abs(ulong8 x);
+ulong16 __ovld __cnfn abs(long16 x);
+ulong16 __ovld __cnfn abs(ulong16 x);
+
+/**
+ * Returns | x - y | without modulo overflow.
+ */
+uchar __ovld __cnfn abs_diff(char x, char y);
+uchar __ovld __cnfn abs_diff(uchar x, uchar y);
+uchar2 __ovld __cnfn abs_diff(char2 x, char2 y);
+uchar2 __ovld __cnfn abs_diff(uchar2 x, uchar2 y);
+uchar3 __ovld __cnfn abs_diff(char3 x, char3 y);
+uchar3 __ovld __cnfn abs_diff(uchar3 x, uchar3 y);
+uchar4 __ovld __cnfn abs_diff(char4 x, char4 y);
+uchar4 __ovld __cnfn abs_diff(uchar4 x, uchar4 y);
+uchar8 __ovld __cnfn abs_diff(char8 x, char8 y);
+uchar8 __ovld __cnfn abs_diff(uchar8 x, uchar8 y);
+uchar16 __ovld __cnfn abs_diff(char16 x, char16 y);
+uchar16 __ovld __cnfn abs_diff(uchar16 x, uchar16 y);
+ushort __ovld __cnfn abs_diff(short x, short y);
+ushort __ovld __cnfn abs_diff(ushort x, ushort y);
+ushort2 __ovld __cnfn abs_diff(short2 x, short2 y);
+ushort2 __ovld __cnfn abs_diff(ushort2 x, ushort2 y);
+ushort3 __ovld __cnfn abs_diff(short3 x, short3 y);
+ushort3 __ovld __cnfn abs_diff(ushort3 x, ushort3 y);
+ushort4 __ovld __cnfn abs_diff(short4 x, short4 y);
+ushort4 __ovld __cnfn abs_diff(ushort4 x, ushort4 y);
+ushort8 __ovld __cnfn abs_diff(short8 x, short8 y);
+ushort8 __ovld __cnfn abs_diff(ushort8 x, ushort8 y);
+ushort16 __ovld __cnfn abs_diff(short16 x, short16 y);
+ushort16 __ovld __cnfn abs_diff(ushort16 x, ushort16 y);
+uint __ovld __cnfn abs_diff(int x, int y);
+uint __ovld __cnfn abs_diff(uint x, uint y);
+uint2 __ovld __cnfn abs_diff(int2 x, int2 y);
+uint2 __ovld __cnfn abs_diff(uint2 x, uint2 y);
+uint3 __ovld __cnfn abs_diff(int3 x, int3 y);
+uint3 __ovld __cnfn abs_diff(uint3 x, uint3 y);
+uint4 __ovld __cnfn abs_diff(int4 x, int4 y);
+uint4 __ovld __cnfn abs_diff(uint4 x, uint4 y);
+uint8 __ovld __cnfn abs_diff(int8 x, int8 y);
+uint8 __ovld __cnfn abs_diff(uint8 x, uint8 y);
+uint16 __ovld __cnfn abs_diff(int16 x, int16 y);
+uint16 __ovld __cnfn abs_diff(uint16 x, uint16 y);
+ulong __ovld __cnfn abs_diff(long x, long y);
+ulong __ovld __cnfn abs_diff(ulong x, ulong y);
+ulong2 __ovld __cnfn abs_diff(long2 x, long2 y);
+ulong2 __ovld __cnfn abs_diff(ulong2 x, ulong2 y);
+ulong3 __ovld __cnfn abs_diff(long3 x, long3 y);
+ulong3 __ovld __cnfn abs_diff(ulong3 x, ulong3 y);
+ulong4 __ovld __cnfn abs_diff(long4 x, long4 y);
+ulong4 __ovld __cnfn abs_diff(ulong4 x, ulong4 y);
+ulong8 __ovld __cnfn abs_diff(long8 x, long8 y);
+ulong8 __ovld __cnfn abs_diff(ulong8 x, ulong8 y);
+ulong16 __ovld __cnfn abs_diff(long16 x, long16 y);
+ulong16 __ovld __cnfn abs_diff(ulong16 x, ulong16 y);
+
+/**
+ * Returns x + y and saturates the result.
+ */
+char __ovld __cnfn add_sat(char x, char y);
+uchar __ovld __cnfn add_sat(uchar x, uchar y);
+char2 __ovld __cnfn add_sat(char2 x, char2 y);
+uchar2 __ovld __cnfn add_sat(uchar2 x, uchar2 y);
+char3 __ovld __cnfn add_sat(char3 x, char3 y);
+uchar3 __ovld __cnfn add_sat(uchar3 x, uchar3 y);
+char4 __ovld __cnfn add_sat(char4 x, char4 y);
+uchar4 __ovld __cnfn add_sat(uchar4 x, uchar4 y);
+char8 __ovld __cnfn add_sat(char8 x, char8 y);
+uchar8 __ovld __cnfn add_sat(uchar8 x, uchar8 y);
+char16 __ovld __cnfn add_sat(char16 x, char16 y);
+uchar16 __ovld __cnfn add_sat(uchar16 x, uchar16 y);
+short __ovld __cnfn add_sat(short x, short y);
+ushort __ovld __cnfn add_sat(ushort x, ushort y);
+short2 __ovld __cnfn add_sat(short2 x, short2 y);
+ushort2 __ovld __cnfn add_sat(ushort2 x, ushort2 y);
+short3 __ovld __cnfn add_sat(short3 x, short3 y);
+ushort3 __ovld __cnfn add_sat(ushort3 x, ushort3 y);
+short4 __ovld __cnfn add_sat(short4 x, short4 y);
+ushort4 __ovld __cnfn add_sat(ushort4 x, ushort4 y);
+short8 __ovld __cnfn add_sat(short8 x, short8 y);
+ushort8 __ovld __cnfn add_sat(ushort8 x, ushort8 y);
+short16 __ovld __cnfn add_sat(short16 x, short16 y);
+ushort16 __ovld __cnfn add_sat(ushort16 x, ushort16 y);
+int __ovld __cnfn add_sat(int x, int y);
+uint __ovld __cnfn add_sat(uint x, uint y);
+int2 __ovld __cnfn add_sat(int2 x, int2 y);
+uint2 __ovld __cnfn add_sat(uint2 x, uint2 y);
+int3 __ovld __cnfn add_sat(int3 x, int3 y);
+uint3 __ovld __cnfn add_sat(uint3 x, uint3 y);
+int4 __ovld __cnfn add_sat(int4 x, int4 y);
+uint4 __ovld __cnfn add_sat(uint4 x, uint4 y);
+int8 __ovld __cnfn add_sat(int8 x, int8 y);
+uint8 __ovld __cnfn add_sat(uint8 x, uint8 y);
+int16 __ovld __cnfn add_sat(int16 x, int16 y);
+uint16 __ovld __cnfn add_sat(uint16 x, uint16 y);
+long __ovld __cnfn add_sat(long x, long y);
+ulong __ovld __cnfn add_sat(ulong x, ulong y);
+long2 __ovld __cnfn add_sat(long2 x, long2 y);
+ulong2 __ovld __cnfn add_sat(ulong2 x, ulong2 y);
+long3 __ovld __cnfn add_sat(long3 x, long3 y);
+ulong3 __ovld __cnfn add_sat(ulong3 x, ulong3 y);
+long4 __ovld __cnfn add_sat(long4 x, long4 y);
+ulong4 __ovld __cnfn add_sat(ulong4 x, ulong4 y);
+long8 __ovld __cnfn add_sat(long8 x, long8 y);
+ulong8 __ovld __cnfn add_sat(ulong8 x, ulong8 y);
+long16 __ovld __cnfn add_sat(long16 x, long16 y);
+ulong16 __ovld __cnfn add_sat(ulong16 x, ulong16 y);
+
+/**
+ * Returns (x + y) >> 1. The intermediate sum does
+ * not modulo overflow.
+ */
+char __ovld __cnfn hadd(char x, char y);
+uchar __ovld __cnfn hadd(uchar x, uchar y);
+char2 __ovld __cnfn hadd(char2 x, char2 y);
+uchar2 __ovld __cnfn hadd(uchar2 x, uchar2 y);
+char3 __ovld __cnfn hadd(char3 x, char3 y);
+uchar3 __ovld __cnfn hadd(uchar3 x, uchar3 y);
+char4 __ovld __cnfn hadd(char4 x, char4 y);
+uchar4 __ovld __cnfn hadd(uchar4 x, uchar4 y);
+char8 __ovld __cnfn hadd(char8 x, char8 y);
+uchar8 __ovld __cnfn hadd(uchar8 x, uchar8 y);
+char16 __ovld __cnfn hadd(char16 x, char16 y);
+uchar16 __ovld __cnfn hadd(uchar16 x, uchar16 y);
+short __ovld __cnfn hadd(short x, short y);
+ushort __ovld __cnfn hadd(ushort x, ushort y);
+short2 __ovld __cnfn hadd(short2 x, short2 y);
+ushort2 __ovld __cnfn hadd(ushort2 x, ushort2 y);
+short3 __ovld __cnfn hadd(short3 x, short3 y);
+ushort3 __ovld __cnfn hadd(ushort3 x, ushort3 y);
+short4 __ovld __cnfn hadd(short4 x, short4 y);
+ushort4 __ovld __cnfn hadd(ushort4 x, ushort4 y);
+short8 __ovld __cnfn hadd(short8 x, short8 y);
+ushort8 __ovld __cnfn hadd(ushort8 x, ushort8 y);
+short16 __ovld __cnfn hadd(short16 x, short16 y);
+ushort16 __ovld __cnfn hadd(ushort16 x, ushort16 y);
+int __ovld __cnfn hadd(int x, int y);
+uint __ovld __cnfn hadd(uint x, uint y);
+int2 __ovld __cnfn hadd(int2 x, int2 y);
+uint2 __ovld __cnfn hadd(uint2 x, uint2 y);
+int3 __ovld __cnfn hadd(int3 x, int3 y);
+uint3 __ovld __cnfn hadd(uint3 x, uint3 y);
+int4 __ovld __cnfn hadd(int4 x, int4 y);
+uint4 __ovld __cnfn hadd(uint4 x, uint4 y);
+int8 __ovld __cnfn hadd(int8 x, int8 y);
+uint8 __ovld __cnfn hadd(uint8 x, uint8 y);
+int16 __ovld __cnfn hadd(int16 x, int16 y);
+uint16 __ovld __cnfn hadd(uint16 x, uint16 y);
+long __ovld __cnfn hadd(long x, long y);
+ulong __ovld __cnfn hadd(ulong x, ulong y);
+long2 __ovld __cnfn hadd(long2 x, long2 y);
+ulong2 __ovld __cnfn hadd(ulong2 x, ulong2 y);
+long3 __ovld __cnfn hadd(long3 x, long3 y);
+ulong3 __ovld __cnfn hadd(ulong3 x, ulong3 y);
+long4 __ovld __cnfn hadd(long4 x, long4 y);
+ulong4 __ovld __cnfn hadd(ulong4 x, ulong4 y);
+long8 __ovld __cnfn hadd(long8 x, long8 y);
+ulong8 __ovld __cnfn hadd(ulong8 x, ulong8 y);
+long16 __ovld __cnfn hadd(long16 x, long16 y);
+ulong16 __ovld __cnfn hadd(ulong16 x, ulong16 y);
+
+/**
+ * Returns (x + y + 1) >> 1. The intermediate sum
+ * does not modulo overflow.
+ */
+char __ovld __cnfn rhadd(char x, char y);
+uchar __ovld __cnfn rhadd(uchar x, uchar y);
+char2 __ovld __cnfn rhadd(char2 x, char2 y);
+uchar2 __ovld __cnfn rhadd(uchar2 x, uchar2 y);
+char3 __ovld __cnfn rhadd(char3 x, char3 y);
+uchar3 __ovld __cnfn rhadd(uchar3 x, uchar3 y);
+char4 __ovld __cnfn rhadd(char4 x, char4 y);
+uchar4 __ovld __cnfn rhadd(uchar4 x, uchar4 y);
+char8 __ovld __cnfn rhadd(char8 x, char8 y);
+uchar8 __ovld __cnfn rhadd(uchar8 x, uchar8 y);
+char16 __ovld __cnfn rhadd(char16 x, char16 y);
+uchar16 __ovld __cnfn rhadd(uchar16 x, uchar16 y);
+short __ovld __cnfn rhadd(short x, short y);
+ushort __ovld __cnfn rhadd(ushort x, ushort y);
+short2 __ovld __cnfn rhadd(short2 x, short2 y);
+ushort2 __ovld __cnfn rhadd(ushort2 x, ushort2 y);
+short3 __ovld __cnfn rhadd(short3 x, short3 y);
+ushort3 __ovld __cnfn rhadd(ushort3 x, ushort3 y);
+short4 __ovld __cnfn rhadd(short4 x, short4 y);
+ushort4 __ovld __cnfn rhadd(ushort4 x, ushort4 y);
+short8 __ovld __cnfn rhadd(short8 x, short8 y);
+ushort8 __ovld __cnfn rhadd(ushort8 x, ushort8 y);
+short16 __ovld __cnfn rhadd(short16 x, short16 y);
+ushort16 __ovld __cnfn rhadd(ushort16 x, ushort16 y);
+int __ovld __cnfn rhadd(int x, int y);
+uint __ovld __cnfn rhadd(uint x, uint y);
+int2 __ovld __cnfn rhadd(int2 x, int2 y);
+uint2 __ovld __cnfn rhadd(uint2 x, uint2 y);
+int3 __ovld __cnfn rhadd(int3 x, int3 y);
+uint3 __ovld __cnfn rhadd(uint3 x, uint3 y);
+int4 __ovld __cnfn rhadd(int4 x, int4 y);
+uint4 __ovld __cnfn rhadd(uint4 x, uint4 y);
+int8 __ovld __cnfn rhadd(int8 x, int8 y);
+uint8 __ovld __cnfn rhadd(uint8 x, uint8 y);
+int16 __ovld __cnfn rhadd(int16 x, int16 y);
+uint16 __ovld __cnfn rhadd(uint16 x, uint16 y);
+long __ovld __cnfn rhadd(long x, long y);
+ulong __ovld __cnfn rhadd(ulong x, ulong y);
+long2 __ovld __cnfn rhadd(long2 x, long2 y);
+ulong2 __ovld __cnfn rhadd(ulong2 x, ulong2 y);
+long3 __ovld __cnfn rhadd(long3 x, long3 y);
+ulong3 __ovld __cnfn rhadd(ulong3 x, ulong3 y);
+long4 __ovld __cnfn rhadd(long4 x, long4 y);
+ulong4 __ovld __cnfn rhadd(ulong4 x, ulong4 y);
+long8 __ovld __cnfn rhadd(long8 x, long8 y);
+ulong8 __ovld __cnfn rhadd(ulong8 x, ulong8 y);
+long16 __ovld __cnfn rhadd(long16 x, long16 y);
+ulong16 __ovld __cnfn rhadd(ulong16 x, ulong16 y);
+
+/**
+ * Returns min(max(x, minval), maxval).
+ * Results are undefined if minval > maxval.
+ */
+char __ovld __cnfn clamp(char x, char minval, char maxval);
+uchar __ovld __cnfn clamp(uchar x, uchar minval, uchar maxval);
+char2 __ovld __cnfn clamp(char2 x, char2 minval, char2 maxval);
+uchar2 __ovld __cnfn clamp(uchar2 x, uchar2 minval, uchar2 maxval);
+char3 __ovld __cnfn clamp(char3 x, char3 minval, char3 maxval);
+uchar3 __ovld __cnfn clamp(uchar3 x, uchar3 minval, uchar3 maxval);
+char4 __ovld __cnfn clamp(char4 x, char4 minval, char4 maxval);
+uchar4 __ovld __cnfn clamp(uchar4 x, uchar4 minval, uchar4 maxval);
+char8 __ovld __cnfn clamp(char8 x, char8 minval, char8 maxval);
+uchar8 __ovld __cnfn clamp(uchar8 x, uchar8 minval, uchar8 maxval);
+char16 __ovld __cnfn clamp(char16 x, char16 minval, char16 maxval);
+uchar16 __ovld __cnfn clamp(uchar16 x, uchar16 minval, uchar16 maxval);
+short __ovld __cnfn clamp(short x, short minval, short maxval);
+ushort __ovld __cnfn clamp(ushort x, ushort minval, ushort maxval);
+short2 __ovld __cnfn clamp(short2 x, short2 minval, short2 maxval);
+ushort2 __ovld __cnfn clamp(ushort2 x, ushort2 minval, ushort2 maxval);
+short3 __ovld __cnfn clamp(short3 x, short3 minval, short3 maxval);
+ushort3 __ovld __cnfn clamp(ushort3 x, ushort3 minval, ushort3 maxval);
+short4 __ovld __cnfn clamp(short4 x, short4 minval, short4 maxval);
+ushort4 __ovld __cnfn clamp(ushort4 x, ushort4 minval, ushort4 maxval);
+short8 __ovld __cnfn clamp(short8 x, short8 minval, short8 maxval);
+ushort8 __ovld __cnfn clamp(ushort8 x, ushort8 minval, ushort8 maxval);
+short16 __ovld __cnfn clamp(short16 x, short16 minval, short16 maxval);
+ushort16 __ovld __cnfn clamp(ushort16 x, ushort16 minval, ushort16 maxval);
+int __ovld __cnfn clamp(int x, int minval, int maxval);
+uint __ovld __cnfn clamp(uint x, uint minval, uint maxval);
+int2 __ovld __cnfn clamp(int2 x, int2 minval, int2 maxval);
+uint2 __ovld __cnfn clamp(uint2 x, uint2 minval, uint2 maxval);
+int3 __ovld __cnfn clamp(int3 x, int3 minval, int3 maxval);
+uint3 __ovld __cnfn clamp(uint3 x, uint3 minval, uint3 maxval);
+int4 __ovld __cnfn clamp(int4 x, int4 minval, int4 maxval);
+uint4 __ovld __cnfn clamp(uint4 x, uint4 minval, uint4 maxval);
+int8 __ovld __cnfn clamp(int8 x, int8 minval, int8 maxval);
+uint8 __ovld __cnfn clamp(uint8 x, uint8 minval, uint8 maxval);
+int16 __ovld __cnfn clamp(int16 x, int16 minval, int16 maxval);
+uint16 __ovld __cnfn clamp(uint16 x, uint16 minval, uint16 maxval);
+long __ovld __cnfn clamp(long x, long minval, long maxval);
+ulong __ovld __cnfn clamp(ulong x, ulong minval, ulong maxval);
+long2 __ovld __cnfn clamp(long2 x, long2 minval, long2 maxval);
+ulong2 __ovld __cnfn clamp(ulong2 x, ulong2 minval, ulong2 maxval);
+long3 __ovld __cnfn clamp(long3 x, long3 minval, long3 maxval);
+ulong3 __ovld __cnfn clamp(ulong3 x, ulong3 minval, ulong3 maxval);
+long4 __ovld __cnfn clamp(long4 x, long4 minval, long4 maxval);
+ulong4 __ovld __cnfn clamp(ulong4 x, ulong4 minval, ulong4 maxval);
+long8 __ovld __cnfn clamp(long8 x, long8 minval, long8 maxval);
+ulong8 __ovld __cnfn clamp(ulong8 x, ulong8 minval, ulong8 maxval);
+long16 __ovld __cnfn clamp(long16 x, long16 minval, long16 maxval);
+ulong16 __ovld __cnfn clamp(ulong16 x, ulong16 minval, ulong16 maxval);
+char __ovld __cnfn clamp(char x, char minval, char maxval);
+uchar __ovld __cnfn clamp(uchar x, uchar minval, uchar maxval);
+char2 __ovld __cnfn clamp(char2 x, char minval, char maxval);
+uchar2 __ovld __cnfn clamp(uchar2 x, uchar minval, uchar maxval);
+char3 __ovld __cnfn clamp(char3 x, char minval, char maxval);
+uchar3 __ovld __cnfn clamp(uchar3 x, uchar minval, uchar maxval);
+char4 __ovld __cnfn clamp(char4 x, char minval, char maxval);
+uchar4 __ovld __cnfn clamp(uchar4 x, uchar minval, uchar maxval);
+char8 __ovld __cnfn clamp(char8 x, char minval, char maxval);
+uchar8 __ovld __cnfn clamp(uchar8 x, uchar minval, uchar maxval);
+char16 __ovld __cnfn clamp(char16 x, char minval, char maxval);
+uchar16 __ovld __cnfn clamp(uchar16 x, uchar minval, uchar maxval);
+short __ovld __cnfn clamp(short x, short minval, short maxval);
+ushort __ovld __cnfn clamp(ushort x, ushort minval, ushort maxval);
+short2 __ovld __cnfn clamp(short2 x, short minval, short maxval);
+ushort2 __ovld __cnfn clamp(ushort2 x, ushort minval, ushort maxval);
+short3 __ovld __cnfn clamp(short3 x, short minval, short maxval);
+ushort3 __ovld __cnfn clamp(ushort3 x, ushort minval, ushort maxval);
+short4 __ovld __cnfn clamp(short4 x, short minval, short maxval);
+ushort4 __ovld __cnfn clamp(ushort4 x, ushort minval, ushort maxval);
+short8 __ovld __cnfn clamp(short8 x, short minval, short maxval);
+ushort8 __ovld __cnfn clamp(ushort8 x, ushort minval, ushort maxval);
+short16 __ovld __cnfn clamp(short16 x, short minval, short maxval);
+ushort16 __ovld __cnfn clamp(ushort16 x, ushort minval, ushort maxval);
+int __ovld __cnfn clamp(int x, int minval, int maxval);
+uint __ovld __cnfn clamp(uint x, uint minval, uint maxval);
+int2 __ovld __cnfn clamp(int2 x, int minval, int maxval);
+uint2 __ovld __cnfn clamp(uint2 x, uint minval, uint maxval);
+int3 __ovld __cnfn clamp(int3 x, int minval, int maxval);
+uint3 __ovld __cnfn clamp(uint3 x, uint minval, uint maxval);
+int4 __ovld __cnfn clamp(int4 x, int minval, int maxval);
+uint4 __ovld __cnfn clamp(uint4 x, uint minval, uint maxval);
+int8 __ovld __cnfn clamp(int8 x, int minval, int maxval);
+uint8 __ovld __cnfn clamp(uint8 x, uint minval, uint maxval);
+int16 __ovld __cnfn clamp(int16 x, int minval, int maxval);
+uint16 __ovld __cnfn clamp(uint16 x, uint minval, uint maxval);
+long __ovld __cnfn clamp(long x, long minval, long maxval);
+ulong __ovld __cnfn clamp(ulong x, ulong minval, ulong maxval);
+long2 __ovld __cnfn clamp(long2 x, long minval, long maxval);
+ulong2 __ovld __cnfn clamp(ulong2 x, ulong minval, ulong maxval);
+long3 __ovld __cnfn clamp(long3 x, long minval, long maxval);
+ulong3 __ovld __cnfn clamp(ulong3 x, ulong minval, ulong maxval);
+long4 __ovld __cnfn clamp(long4 x, long minval, long maxval);
+ulong4 __ovld __cnfn clamp(ulong4 x, ulong minval, ulong maxval);
+long8 __ovld __cnfn clamp(long8 x, long minval, long maxval);
+ulong8 __ovld __cnfn clamp(ulong8 x, ulong minval, ulong maxval);
+long16 __ovld __cnfn clamp(long16 x, long minval, long maxval);
+ulong16 __ovld __cnfn clamp(ulong16 x, ulong minval, ulong maxval);
+
+/**
+ * Returns the number of leading 0-bits in x, starting
+ * at the most significant bit position.
+ */
+char __ovld __cnfn clz(char x);
+uchar __ovld __cnfn clz(uchar x);
+char2 __ovld __cnfn clz(char2 x);
+uchar2 __ovld __cnfn clz(uchar2 x);
+char3 __ovld __cnfn clz(char3 x);
+uchar3 __ovld __cnfn clz(uchar3 x);
+char4 __ovld __cnfn clz(char4 x);
+uchar4 __ovld __cnfn clz(uchar4 x);
+char8 __ovld __cnfn clz(char8 x);
+uchar8 __ovld __cnfn clz(uchar8 x);
+char16 __ovld __cnfn clz(char16 x);
+uchar16 __ovld __cnfn clz(uchar16 x);
+short __ovld __cnfn clz(short x);
+ushort __ovld __cnfn clz(ushort x);
+short2 __ovld __cnfn clz(short2 x);
+ushort2 __ovld __cnfn clz(ushort2 x);
+short3 __ovld __cnfn clz(short3 x);
+ushort3 __ovld __cnfn clz(ushort3 x);
+short4 __ovld __cnfn clz(short4 x);
+ushort4 __ovld __cnfn clz(ushort4 x);
+short8 __ovld __cnfn clz(short8 x);
+ushort8 __ovld __cnfn clz(ushort8 x);
+short16 __ovld __cnfn clz(short16 x);
+ushort16 __ovld __cnfn clz(ushort16 x);
+int __ovld __cnfn clz(int x);
+uint __ovld __cnfn clz(uint x);
+int2 __ovld __cnfn clz(int2 x);
+uint2 __ovld __cnfn clz(uint2 x);
+int3 __ovld __cnfn clz(int3 x);
+uint3 __ovld __cnfn clz(uint3 x);
+int4 __ovld __cnfn clz(int4 x);
+uint4 __ovld __cnfn clz(uint4 x);
+int8 __ovld __cnfn clz(int8 x);
+uint8 __ovld __cnfn clz(uint8 x);
+int16 __ovld __cnfn clz(int16 x);
+uint16 __ovld __cnfn clz(uint16 x);
+long __ovld __cnfn clz(long x);
+ulong __ovld __cnfn clz(ulong x);
+long2 __ovld __cnfn clz(long2 x);
+ulong2 __ovld __cnfn clz(ulong2 x);
+long3 __ovld __cnfn clz(long3 x);
+ulong3 __ovld __cnfn clz(ulong3 x);
+long4 __ovld __cnfn clz(long4 x);
+ulong4 __ovld __cnfn clz(ulong4 x);
+long8 __ovld __cnfn clz(long8 x);
+ulong8 __ovld __cnfn clz(ulong8 x);
+long16 __ovld __cnfn clz(long16 x);
+ulong16 __ovld __cnfn clz(ulong16 x);
+
+/**
+ * Returns the count of trailing 0-bits in x. If x is 0,
+ * returns the size in bits of the type of x or
+ * component type of x, if x is a vector.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+char __ovld ctz(char x);
+uchar __ovld ctz(uchar x);
+char2 __ovld ctz(char2 x);
+uchar2 __ovld ctz(uchar2 x);
+char3 __ovld ctz(char3 x);
+uchar3 __ovld ctz(uchar3 x);
+char4 __ovld ctz(char4 x);
+uchar4 __ovld ctz(uchar4 x);
+char8 __ovld ctz(char8 x);
+uchar8 __ovld ctz(uchar8 x);
+char16 __ovld ctz(char16 x);
+uchar16 __ovld ctz(uchar16 x);
+short __ovld ctz(short x);
+ushort __ovld ctz(ushort x);
+short2 __ovld ctz(short2 x);
+ushort2 __ovld ctz(ushort2 x);
+short3 __ovld ctz(short3 x);
+ushort3 __ovld ctz(ushort3 x);
+short4 __ovld ctz(short4 x);
+ushort4 __ovld ctz(ushort4 x);
+short8 __ovld ctz(short8 x);
+ushort8 __ovld ctz(ushort8 x);
+short16 __ovld ctz(short16 x);
+ushort16 __ovld ctz(ushort16 x);
+int __ovld ctz(int x);
+uint __ovld ctz(uint x);
+int2 __ovld ctz(int2 x);
+uint2 __ovld ctz(uint2 x);
+int3 __ovld ctz(int3 x);
+uint3 __ovld ctz(uint3 x);
+int4 __ovld ctz(int4 x);
+uint4 __ovld ctz(uint4 x);
+int8 __ovld ctz(int8 x);
+uint8 __ovld ctz(uint8 x);
+int16 __ovld ctz(int16 x);
+uint16 __ovld ctz(uint16 x);
+long __ovld ctz(long x);
+ulong __ovld ctz(ulong x);
+long2 __ovld ctz(long2 x);
+ulong2 __ovld ctz(ulong2 x);
+long3 __ovld ctz(long3 x);
+ulong3 __ovld ctz(ulong3 x);
+long4 __ovld ctz(long4 x);
+ulong4 __ovld ctz(ulong4 x);
+long8 __ovld ctz(long8 x);
+ulong8 __ovld ctz(ulong8 x);
+long16 __ovld ctz(long16 x);
+ulong16 __ovld ctz(ulong16 x);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Returns mul_hi(a, b) + c.
+ */
+char __ovld __cnfn mad_hi(char a, char b, char c);
+uchar __ovld __cnfn mad_hi(uchar a, uchar b, uchar c);
+char2 __ovld __cnfn mad_hi(char2 a, char2 b, char2 c);
+uchar2 __ovld __cnfn mad_hi(uchar2 a, uchar2 b, uchar2 c);
+char3 __ovld __cnfn mad_hi(char3 a, char3 b, char3 c);
+uchar3 __ovld __cnfn mad_hi(uchar3 a, uchar3 b, uchar3 c);
+char4 __ovld __cnfn mad_hi(char4 a, char4 b, char4 c);
+uchar4 __ovld __cnfn mad_hi(uchar4 a, uchar4 b, uchar4 c);
+char8 __ovld __cnfn mad_hi(char8 a, char8 b, char8 c);
+uchar8 __ovld __cnfn mad_hi(uchar8 a, uchar8 b, uchar8 c);
+char16 __ovld __cnfn mad_hi(char16 a, char16 b, char16 c);
+uchar16 __ovld __cnfn mad_hi(uchar16 a, uchar16 b, uchar16 c);
+short __ovld __cnfn mad_hi(short a, short b, short c);
+ushort __ovld __cnfn mad_hi(ushort a, ushort b, ushort c);
+short2 __ovld __cnfn mad_hi(short2 a, short2 b, short2 c);
+ushort2 __ovld __cnfn mad_hi(ushort2 a, ushort2 b, ushort2 c);
+short3 __ovld __cnfn mad_hi(short3 a, short3 b, short3 c);
+ushort3 __ovld __cnfn mad_hi(ushort3 a, ushort3 b, ushort3 c);
+short4 __ovld __cnfn mad_hi(short4 a, short4 b, short4 c);
+ushort4 __ovld __cnfn mad_hi(ushort4 a, ushort4 b, ushort4 c);
+short8 __ovld __cnfn mad_hi(short8 a, short8 b, short8 c);
+ushort8 __ovld __cnfn mad_hi(ushort8 a, ushort8 b, ushort8 c);
+short16 __ovld __cnfn mad_hi(short16 a, short16 b, short16 c);
+ushort16 __ovld __cnfn mad_hi(ushort16 a, ushort16 b, ushort16 c);
+int __ovld __cnfn mad_hi(int a, int b, int c);
+uint __ovld __cnfn mad_hi(uint a, uint b, uint c);
+int2 __ovld __cnfn mad_hi(int2 a, int2 b, int2 c);
+uint2 __ovld __cnfn mad_hi(uint2 a, uint2 b, uint2 c);
+int3 __ovld __cnfn mad_hi(int3 a, int3 b, int3 c);
+uint3 __ovld __cnfn mad_hi(uint3 a, uint3 b, uint3 c);
+int4 __ovld __cnfn mad_hi(int4 a, int4 b, int4 c);
+uint4 __ovld __cnfn mad_hi(uint4 a, uint4 b, uint4 c);
+int8 __ovld __cnfn mad_hi(int8 a, int8 b, int8 c);
+uint8 __ovld __cnfn mad_hi(uint8 a, uint8 b, uint8 c);
+int16 __ovld __cnfn mad_hi(int16 a, int16 b, int16 c);
+uint16 __ovld __cnfn mad_hi(uint16 a, uint16 b, uint16 c);
+long __ovld __cnfn mad_hi(long a, long b, long c);
+ulong __ovld __cnfn mad_hi(ulong a, ulong b, ulong c);
+long2 __ovld __cnfn mad_hi(long2 a, long2 b, long2 c);
+ulong2 __ovld __cnfn mad_hi(ulong2 a, ulong2 b, ulong2 c);
+long3 __ovld __cnfn mad_hi(long3 a, long3 b, long3 c);
+ulong3 __ovld __cnfn mad_hi(ulong3 a, ulong3 b, ulong3 c);
+long4 __ovld __cnfn mad_hi(long4 a, long4 b, long4 c);
+ulong4 __ovld __cnfn mad_hi(ulong4 a, ulong4 b, ulong4 c);
+long8 __ovld __cnfn mad_hi(long8 a, long8 b, long8 c);
+ulong8 __ovld __cnfn mad_hi(ulong8 a, ulong8 b, ulong8 c);
+long16 __ovld __cnfn mad_hi(long16 a, long16 b, long16 c);
+ulong16 __ovld __cnfn mad_hi(ulong16 a, ulong16 b, ulong16 c);
+
+/**
+ * Returns a * b + c and saturates the result.
+ */
+char __ovld __cnfn mad_sat(char a, char b, char c);
+uchar __ovld __cnfn mad_sat(uchar a, uchar b, uchar c);
+char2 __ovld __cnfn mad_sat(char2 a, char2 b, char2 c);
+uchar2 __ovld __cnfn mad_sat(uchar2 a, uchar2 b, uchar2 c);
+char3 __ovld __cnfn mad_sat(char3 a, char3 b, char3 c);
+uchar3 __ovld __cnfn mad_sat(uchar3 a, uchar3 b, uchar3 c);
+char4 __ovld __cnfn mad_sat(char4 a, char4 b, char4 c);
+uchar4 __ovld __cnfn mad_sat(uchar4 a, uchar4 b, uchar4 c);
+char8 __ovld __cnfn mad_sat(char8 a, char8 b, char8 c);
+uchar8 __ovld __cnfn mad_sat(uchar8 a, uchar8 b, uchar8 c);
+char16 __ovld __cnfn mad_sat(char16 a, char16 b, char16 c);
+uchar16 __ovld __cnfn mad_sat(uchar16 a, uchar16 b, uchar16 c);
+short __ovld __cnfn mad_sat(short a, short b, short c);
+ushort __ovld __cnfn mad_sat(ushort a, ushort b, ushort c);
+short2 __ovld __cnfn mad_sat(short2 a, short2 b, short2 c);
+ushort2 __ovld __cnfn mad_sat(ushort2 a, ushort2 b, ushort2 c);
+short3 __ovld __cnfn mad_sat(short3 a, short3 b, short3 c);
+ushort3 __ovld __cnfn mad_sat(ushort3 a, ushort3 b, ushort3 c);
+short4 __ovld __cnfn mad_sat(short4 a, short4 b, short4 c);
+ushort4 __ovld __cnfn mad_sat(ushort4 a, ushort4 b, ushort4 c);
+short8 __ovld __cnfn mad_sat(short8 a, short8 b, short8 c);
+ushort8 __ovld __cnfn mad_sat(ushort8 a, ushort8 b, ushort8 c);
+short16 __ovld __cnfn mad_sat(short16 a, short16 b, short16 c);
+ushort16 __ovld __cnfn mad_sat(ushort16 a, ushort16 b, ushort16 c);
+int __ovld __cnfn mad_sat(int a, int b, int c);
+uint __ovld __cnfn mad_sat(uint a, uint b, uint c);
+int2 __ovld __cnfn mad_sat(int2 a, int2 b, int2 c);
+uint2 __ovld __cnfn mad_sat(uint2 a, uint2 b, uint2 c);
+int3 __ovld __cnfn mad_sat(int3 a, int3 b, int3 c);
+uint3 __ovld __cnfn mad_sat(uint3 a, uint3 b, uint3 c);
+int4 __ovld __cnfn mad_sat(int4 a, int4 b, int4 c);
+uint4 __ovld __cnfn mad_sat(uint4 a, uint4 b, uint4 c);
+int8 __ovld __cnfn mad_sat(int8 a, int8 b, int8 c);
+uint8 __ovld __cnfn mad_sat(uint8 a, uint8 b, uint8 c);
+int16 __ovld __cnfn mad_sat(int16 a, int16 b, int16 c);
+uint16 __ovld __cnfn mad_sat(uint16 a, uint16 b, uint16 c);
+long __ovld __cnfn mad_sat(long a, long b, long c);
+ulong __ovld __cnfn mad_sat(ulong a, ulong b, ulong c);
+long2 __ovld __cnfn mad_sat(long2 a, long2 b, long2 c);
+ulong2 __ovld __cnfn mad_sat(ulong2 a, ulong2 b, ulong2 c);
+long3 __ovld __cnfn mad_sat(long3 a, long3 b, long3 c);
+ulong3 __ovld __cnfn mad_sat(ulong3 a, ulong3 b, ulong3 c);
+long4 __ovld __cnfn mad_sat(long4 a, long4 b, long4 c);
+ulong4 __ovld __cnfn mad_sat(ulong4 a, ulong4 b, ulong4 c);
+long8 __ovld __cnfn mad_sat(long8 a, long8 b, long8 c);
+ulong8 __ovld __cnfn mad_sat(ulong8 a, ulong8 b, ulong8 c);
+long16 __ovld __cnfn mad_sat(long16 a, long16 b, long16 c);
+ulong16 __ovld __cnfn mad_sat(ulong16 a, ulong16 b, ulong16 c);
+
+/**
+ * Returns y if x < y, otherwise it returns x.
+ */
+char __ovld __cnfn max(char x, char y);
+uchar __ovld __cnfn max(uchar x, uchar y);
+char2 __ovld __cnfn max(char2 x, char2 y);
+uchar2 __ovld __cnfn max(uchar2 x, uchar2 y);
+char3 __ovld __cnfn max(char3 x, char3 y);
+uchar3 __ovld __cnfn max(uchar3 x, uchar3 y);
+char4 __ovld __cnfn max(char4 x, char4 y);
+uchar4 __ovld __cnfn max(uchar4 x, uchar4 y);
+char8 __ovld __cnfn max(char8 x, char8 y);
+uchar8 __ovld __cnfn max(uchar8 x, uchar8 y);
+char16 __ovld __cnfn max(char16 x, char16 y);
+uchar16 __ovld __cnfn max(uchar16 x, uchar16 y);
+short __ovld __cnfn max(short x, short y);
+ushort __ovld __cnfn max(ushort x, ushort y);
+short2 __ovld __cnfn max(short2 x, short2 y);
+ushort2 __ovld __cnfn max(ushort2 x, ushort2 y);
+short3 __ovld __cnfn max(short3 x, short3 y);
+ushort3 __ovld __cnfn max(ushort3 x, ushort3 y);
+short4 __ovld __cnfn max(short4 x, short4 y);
+ushort4 __ovld __cnfn max(ushort4 x, ushort4 y);
+short8 __ovld __cnfn max(short8 x, short8 y);
+ushort8 __ovld __cnfn max(ushort8 x, ushort8 y);
+short16 __ovld __cnfn max(short16 x, short16 y);
+ushort16 __ovld __cnfn max(ushort16 x, ushort16 y);
+int __ovld __cnfn max(int x, int y);
+uint __ovld __cnfn max(uint x, uint y);
+int2 __ovld __cnfn max(int2 x, int2 y);
+uint2 __ovld __cnfn max(uint2 x, uint2 y);
+int3 __ovld __cnfn max(int3 x, int3 y);
+uint3 __ovld __cnfn max(uint3 x, uint3 y);
+int4 __ovld __cnfn max(int4 x, int4 y);
+uint4 __ovld __cnfn max(uint4 x, uint4 y);
+int8 __ovld __cnfn max(int8 x, int8 y);
+uint8 __ovld __cnfn max(uint8 x, uint8 y);
+int16 __ovld __cnfn max(int16 x, int16 y);
+uint16 __ovld __cnfn max(uint16 x, uint16 y);
+long __ovld __cnfn max(long x, long y);
+ulong __ovld __cnfn max(ulong x, ulong y);
+long2 __ovld __cnfn max(long2 x, long2 y);
+ulong2 __ovld __cnfn max(ulong2 x, ulong2 y);
+long3 __ovld __cnfn max(long3 x, long3 y);
+ulong3 __ovld __cnfn max(ulong3 x, ulong3 y);
+long4 __ovld __cnfn max(long4 x, long4 y);
+ulong4 __ovld __cnfn max(ulong4 x, ulong4 y);
+long8 __ovld __cnfn max(long8 x, long8 y);
+ulong8 __ovld __cnfn max(ulong8 x, ulong8 y);
+long16 __ovld __cnfn max(long16 x, long16 y);
+ulong16 __ovld __cnfn max(ulong16 x, ulong16 y);
+char __ovld __cnfn max(char x, char y);
+uchar __ovld __cnfn max(uchar x, uchar y);
+char2 __ovld __cnfn max(char2 x, char y);
+uchar2 __ovld __cnfn max(uchar2 x, uchar y);
+char3 __ovld __cnfn max(char3 x, char y);
+uchar3 __ovld __cnfn max(uchar3 x, uchar y);
+char4 __ovld __cnfn max(char4 x, char y);
+uchar4 __ovld __cnfn max(uchar4 x, uchar y);
+char8 __ovld __cnfn max(char8 x, char y);
+uchar8 __ovld __cnfn max(uchar8 x, uchar y);
+char16 __ovld __cnfn max(char16 x, char y);
+uchar16 __ovld __cnfn max(uchar16 x, uchar y);
+short __ovld __cnfn max(short x, short y);
+ushort __ovld __cnfn max(ushort x, ushort y);
+short2 __ovld __cnfn max(short2 x, short y);
+ushort2 __ovld __cnfn max(ushort2 x, ushort y);
+short3 __ovld __cnfn max(short3 x, short y);
+ushort3 __ovld __cnfn max(ushort3 x, ushort y);
+short4 __ovld __cnfn max(short4 x, short y);
+ushort4 __ovld __cnfn max(ushort4 x, ushort y);
+short8 __ovld __cnfn max(short8 x, short y);
+ushort8 __ovld __cnfn max(ushort8 x, ushort y);
+short16 __ovld __cnfn max(short16 x, short y);
+ushort16 __ovld __cnfn max(ushort16 x, ushort y);
+int __ovld __cnfn max(int x, int y);
+uint __ovld __cnfn max(uint x, uint y);
+int2 __ovld __cnfn max(int2 x, int y);
+uint2 __ovld __cnfn max(uint2 x, uint y);
+int3 __ovld __cnfn max(int3 x, int y);
+uint3 __ovld __cnfn max(uint3 x, uint y);
+int4 __ovld __cnfn max(int4 x, int y);
+uint4 __ovld __cnfn max(uint4 x, uint y);
+int8 __ovld __cnfn max(int8 x, int y);
+uint8 __ovld __cnfn max(uint8 x, uint y);
+int16 __ovld __cnfn max(int16 x, int y);
+uint16 __ovld __cnfn max(uint16 x, uint y);
+long __ovld __cnfn max(long x, long y);
+ulong __ovld __cnfn max(ulong x, ulong y);
+long2 __ovld __cnfn max(long2 x, long y);
+ulong2 __ovld __cnfn max(ulong2 x, ulong y);
+long3 __ovld __cnfn max(long3 x, long y);
+ulong3 __ovld __cnfn max(ulong3 x, ulong y);
+long4 __ovld __cnfn max(long4 x, long y);
+ulong4 __ovld __cnfn max(ulong4 x, ulong y);
+long8 __ovld __cnfn max(long8 x, long y);
+ulong8 __ovld __cnfn max(ulong8 x, ulong y);
+long16 __ovld __cnfn max(long16 x, long y);
+ulong16 __ovld __cnfn max(ulong16 x, ulong y);
+
+/**
+ * Returns y if y < x, otherwise it returns x.
+ */
+char __ovld __cnfn min(char x, char y);
+uchar __ovld __cnfn min(uchar x, uchar y);
+char2 __ovld __cnfn min(char2 x, char2 y);
+uchar2 __ovld __cnfn min(uchar2 x, uchar2 y);
+char3 __ovld __cnfn min(char3 x, char3 y);
+uchar3 __ovld __cnfn min(uchar3 x, uchar3 y);
+char4 __ovld __cnfn min(char4 x, char4 y);
+uchar4 __ovld __cnfn min(uchar4 x, uchar4 y);
+char8 __ovld __cnfn min(char8 x, char8 y);
+uchar8 __ovld __cnfn min(uchar8 x, uchar8 y);
+char16 __ovld __cnfn min(char16 x, char16 y);
+uchar16 __ovld __cnfn min(uchar16 x, uchar16 y);
+short __ovld __cnfn min(short x, short y);
+ushort __ovld __cnfn min(ushort x, ushort y);
+short2 __ovld __cnfn min(short2 x, short2 y);
+ushort2 __ovld __cnfn min(ushort2 x, ushort2 y);
+short3 __ovld __cnfn min(short3 x, short3 y);
+ushort3 __ovld __cnfn min(ushort3 x, ushort3 y);
+short4 __ovld __cnfn min(short4 x, short4 y);
+ushort4 __ovld __cnfn min(ushort4 x, ushort4 y);
+short8 __ovld __cnfn min(short8 x, short8 y);
+ushort8 __ovld __cnfn min(ushort8 x, ushort8 y);
+short16 __ovld __cnfn min(short16 x, short16 y);
+ushort16 __ovld __cnfn min(ushort16 x, ushort16 y);
+int __ovld __cnfn min(int x, int y);
+uint __ovld __cnfn min(uint x, uint y);
+int2 __ovld __cnfn min(int2 x, int2 y);
+uint2 __ovld __cnfn min(uint2 x, uint2 y);
+int3 __ovld __cnfn min(int3 x, int3 y);
+uint3 __ovld __cnfn min(uint3 x, uint3 y);
+int4 __ovld __cnfn min(int4 x, int4 y);
+uint4 __ovld __cnfn min(uint4 x, uint4 y);
+int8 __ovld __cnfn min(int8 x, int8 y);
+uint8 __ovld __cnfn min(uint8 x, uint8 y);
+int16 __ovld __cnfn min(int16 x, int16 y);
+uint16 __ovld __cnfn min(uint16 x, uint16 y);
+long __ovld __cnfn min(long x, long y);
+ulong __ovld __cnfn min(ulong x, ulong y);
+long2 __ovld __cnfn min(long2 x, long2 y);
+ulong2 __ovld __cnfn min(ulong2 x, ulong2 y);
+long3 __ovld __cnfn min(long3 x, long3 y);
+ulong3 __ovld __cnfn min(ulong3 x, ulong3 y);
+long4 __ovld __cnfn min(long4 x, long4 y);
+ulong4 __ovld __cnfn min(ulong4 x, ulong4 y);
+long8 __ovld __cnfn min(long8 x, long8 y);
+ulong8 __ovld __cnfn min(ulong8 x, ulong8 y);
+long16 __ovld __cnfn min(long16 x, long16 y);
+ulong16 __ovld __cnfn min(ulong16 x, ulong16 y);
+char __ovld __cnfn min(char x, char y);
+uchar __ovld __cnfn min(uchar x, uchar y);
+char2 __ovld __cnfn min(char2 x, char y);
+uchar2 __ovld __cnfn min(uchar2 x, uchar y);
+char3 __ovld __cnfn min(char3 x, char y);
+uchar3 __ovld __cnfn min(uchar3 x, uchar y);
+char4 __ovld __cnfn min(char4 x, char y);
+uchar4 __ovld __cnfn min(uchar4 x, uchar y);
+char8 __ovld __cnfn min(char8 x, char y);
+uchar8 __ovld __cnfn min(uchar8 x, uchar y);
+char16 __ovld __cnfn min(char16 x, char y);
+uchar16 __ovld __cnfn min(uchar16 x, uchar y);
+short __ovld __cnfn min(short x, short y);
+ushort __ovld __cnfn min(ushort x, ushort y);
+short2 __ovld __cnfn min(short2 x, short y);
+ushort2 __ovld __cnfn min(ushort2 x, ushort y);
+short3 __ovld __cnfn min(short3 x, short y);
+ushort3 __ovld __cnfn min(ushort3 x, ushort y);
+short4 __ovld __cnfn min(short4 x, short y);
+ushort4 __ovld __cnfn min(ushort4 x, ushort y);
+short8 __ovld __cnfn min(short8 x, short y);
+ushort8 __ovld __cnfn min(ushort8 x, ushort y);
+short16 __ovld __cnfn min(short16 x, short y);
+ushort16 __ovld __cnfn min(ushort16 x, ushort y);
+int __ovld __cnfn min(int x, int y);
+uint __ovld __cnfn min(uint x, uint y);
+int2 __ovld __cnfn min(int2 x, int y);
+uint2 __ovld __cnfn min(uint2 x, uint y);
+int3 __ovld __cnfn min(int3 x, int y);
+uint3 __ovld __cnfn min(uint3 x, uint y);
+int4 __ovld __cnfn min(int4 x, int y);
+uint4 __ovld __cnfn min(uint4 x, uint y);
+int8 __ovld __cnfn min(int8 x, int y);
+uint8 __ovld __cnfn min(uint8 x, uint y);
+int16 __ovld __cnfn min(int16 x, int y);
+uint16 __ovld __cnfn min(uint16 x, uint y);
+long __ovld __cnfn min(long x, long y);
+ulong __ovld __cnfn min(ulong x, ulong y);
+long2 __ovld __cnfn min(long2 x, long y);
+ulong2 __ovld __cnfn min(ulong2 x, ulong y);
+long3 __ovld __cnfn min(long3 x, long y);
+ulong3 __ovld __cnfn min(ulong3 x, ulong y);
+long4 __ovld __cnfn min(long4 x, long y);
+ulong4 __ovld __cnfn min(ulong4 x, ulong y);
+long8 __ovld __cnfn min(long8 x, long y);
+ulong8 __ovld __cnfn min(ulong8 x, ulong y);
+long16 __ovld __cnfn min(long16 x, long y);
+ulong16 __ovld __cnfn min(ulong16 x, ulong y);
+
+/**
+ * Computes x * y and returns the high half of the
+ * product of x and y.
+ */
+char __ovld __cnfn mul_hi(char x, char y);
+uchar __ovld __cnfn mul_hi(uchar x, uchar y);
+char2 __ovld __cnfn mul_hi(char2 x, char2 y);
+uchar2 __ovld __cnfn mul_hi(uchar2 x, uchar2 y);
+char3 __ovld __cnfn mul_hi(char3 x, char3 y);
+uchar3 __ovld __cnfn mul_hi(uchar3 x, uchar3 y);
+char4 __ovld __cnfn mul_hi(char4 x, char4 y);
+uchar4 __ovld __cnfn mul_hi(uchar4 x, uchar4 y);
+char8 __ovld __cnfn mul_hi(char8 x, char8 y);
+uchar8 __ovld __cnfn mul_hi(uchar8 x, uchar8 y);
+char16 __ovld __cnfn mul_hi(char16 x, char16 y);
+uchar16 __ovld __cnfn mul_hi(uchar16 x, uchar16 y);
+short __ovld __cnfn mul_hi(short x, short y);
+ushort __ovld __cnfn mul_hi(ushort x, ushort y);
+short2 __ovld __cnfn mul_hi(short2 x, short2 y);
+ushort2 __ovld __cnfn mul_hi(ushort2 x, ushort2 y);
+short3 __ovld __cnfn mul_hi(short3 x, short3 y);
+ushort3 __ovld __cnfn mul_hi(ushort3 x, ushort3 y);
+short4 __ovld __cnfn mul_hi(short4 x, short4 y);
+ushort4 __ovld __cnfn mul_hi(ushort4 x, ushort4 y);
+short8 __ovld __cnfn mul_hi(short8 x, short8 y);
+ushort8 __ovld __cnfn mul_hi(ushort8 x, ushort8 y);
+short16 __ovld __cnfn mul_hi(short16 x, short16 y);
+ushort16 __ovld __cnfn mul_hi(ushort16 x, ushort16 y);
+int __ovld __cnfn mul_hi(int x, int y);
+uint __ovld __cnfn mul_hi(uint x, uint y);
+int2 __ovld __cnfn mul_hi(int2 x, int2 y);
+uint2 __ovld __cnfn mul_hi(uint2 x, uint2 y);
+int3 __ovld __cnfn mul_hi(int3 x, int3 y);
+uint3 __ovld __cnfn mul_hi(uint3 x, uint3 y);
+int4 __ovld __cnfn mul_hi(int4 x, int4 y);
+uint4 __ovld __cnfn mul_hi(uint4 x, uint4 y);
+int8 __ovld __cnfn mul_hi(int8 x, int8 y);
+uint8 __ovld __cnfn mul_hi(uint8 x, uint8 y);
+int16 __ovld __cnfn mul_hi(int16 x, int16 y);
+uint16 __ovld __cnfn mul_hi(uint16 x, uint16 y);
+long __ovld __cnfn mul_hi(long x, long y);
+ulong __ovld __cnfn mul_hi(ulong x, ulong y);
+long2 __ovld __cnfn mul_hi(long2 x, long2 y);
+ulong2 __ovld __cnfn mul_hi(ulong2 x, ulong2 y);
+long3 __ovld __cnfn mul_hi(long3 x, long3 y);
+ulong3 __ovld __cnfn mul_hi(ulong3 x, ulong3 y);
+long4 __ovld __cnfn mul_hi(long4 x, long4 y);
+ulong4 __ovld __cnfn mul_hi(ulong4 x, ulong4 y);
+long8 __ovld __cnfn mul_hi(long8 x, long8 y);
+ulong8 __ovld __cnfn mul_hi(ulong8 x, ulong8 y);
+long16 __ovld __cnfn mul_hi(long16 x, long16 y);
+ulong16 __ovld __cnfn mul_hi(ulong16 x, ulong16 y);
+
+/**
+ * For each element in v, the bits are shifted left by
+ * the number of bits given by the corresponding
+ * element in i (subject to usual shift modulo rules
+ * described in section 6.3). Bits shifted off the left
+ * side of the element are shifted back in from the
+ * right.
+ */
+char __ovld __cnfn rotate(char v, char i);
+uchar __ovld __cnfn rotate(uchar v, uchar i);
+char2 __ovld __cnfn rotate(char2 v, char2 i);
+uchar2 __ovld __cnfn rotate(uchar2 v, uchar2 i);
+char3 __ovld __cnfn rotate(char3 v, char3 i);
+uchar3 __ovld __cnfn rotate(uchar3 v, uchar3 i);
+char4 __ovld __cnfn rotate(char4 v, char4 i);
+uchar4 __ovld __cnfn rotate(uchar4 v, uchar4 i);
+char8 __ovld __cnfn rotate(char8 v, char8 i);
+uchar8 __ovld __cnfn rotate(uchar8 v, uchar8 i);
+char16 __ovld __cnfn rotate(char16 v, char16 i);
+uchar16 __ovld __cnfn rotate(uchar16 v, uchar16 i);
+short __ovld __cnfn rotate(short v, short i);
+ushort __ovld __cnfn rotate(ushort v, ushort i);
+short2 __ovld __cnfn rotate(short2 v, short2 i);
+ushort2 __ovld __cnfn rotate(ushort2 v, ushort2 i);
+short3 __ovld __cnfn rotate(short3 v, short3 i);
+ushort3 __ovld __cnfn rotate(ushort3 v, ushort3 i);
+short4 __ovld __cnfn rotate(short4 v, short4 i);
+ushort4 __ovld __cnfn rotate(ushort4 v, ushort4 i);
+short8 __ovld __cnfn rotate(short8 v, short8 i);
+ushort8 __ovld __cnfn rotate(ushort8 v, ushort8 i);
+short16 __ovld __cnfn rotate(short16 v, short16 i);
+ushort16 __ovld __cnfn rotate(ushort16 v, ushort16 i);
+int __ovld __cnfn rotate(int v, int i);
+uint __ovld __cnfn rotate(uint v, uint i);
+int2 __ovld __cnfn rotate(int2 v, int2 i);
+uint2 __ovld __cnfn rotate(uint2 v, uint2 i);
+int3 __ovld __cnfn rotate(int3 v, int3 i);
+uint3 __ovld __cnfn rotate(uint3 v, uint3 i);
+int4 __ovld __cnfn rotate(int4 v, int4 i);
+uint4 __ovld __cnfn rotate(uint4 v, uint4 i);
+int8 __ovld __cnfn rotate(int8 v, int8 i);
+uint8 __ovld __cnfn rotate(uint8 v, uint8 i);
+int16 __ovld __cnfn rotate(int16 v, int16 i);
+uint16 __ovld __cnfn rotate(uint16 v, uint16 i);
+long __ovld __cnfn rotate(long v, long i);
+ulong __ovld __cnfn rotate(ulong v, ulong i);
+long2 __ovld __cnfn rotate(long2 v, long2 i);
+ulong2 __ovld __cnfn rotate(ulong2 v, ulong2 i);
+long3 __ovld __cnfn rotate(long3 v, long3 i);
+ulong3 __ovld __cnfn rotate(ulong3 v, ulong3 i);
+long4 __ovld __cnfn rotate(long4 v, long4 i);
+ulong4 __ovld __cnfn rotate(ulong4 v, ulong4 i);
+long8 __ovld __cnfn rotate(long8 v, long8 i);
+ulong8 __ovld __cnfn rotate(ulong8 v, ulong8 i);
+long16 __ovld __cnfn rotate(long16 v, long16 i);
+ulong16 __ovld __cnfn rotate(ulong16 v, ulong16 i);
+
+/**
+ * Returns x - y and saturates the result.
+ */
+char __ovld __cnfn sub_sat(char x, char y);
+uchar __ovld __cnfn sub_sat(uchar x, uchar y);
+char2 __ovld __cnfn sub_sat(char2 x, char2 y);
+uchar2 __ovld __cnfn sub_sat(uchar2 x, uchar2 y);
+char3 __ovld __cnfn sub_sat(char3 x, char3 y);
+uchar3 __ovld __cnfn sub_sat(uchar3 x, uchar3 y);
+char4 __ovld __cnfn sub_sat(char4 x, char4 y);
+uchar4 __ovld __cnfn sub_sat(uchar4 x, uchar4 y);
+char8 __ovld __cnfn sub_sat(char8 x, char8 y);
+uchar8 __ovld __cnfn sub_sat(uchar8 x, uchar8 y);
+char16 __ovld __cnfn sub_sat(char16 x, char16 y);
+uchar16 __ovld __cnfn sub_sat(uchar16 x, uchar16 y);
+short __ovld __cnfn sub_sat(short x, short y);
+ushort __ovld __cnfn sub_sat(ushort x, ushort y);
+short2 __ovld __cnfn sub_sat(short2 x, short2 y);
+ushort2 __ovld __cnfn sub_sat(ushort2 x, ushort2 y);
+short3 __ovld __cnfn sub_sat(short3 x, short3 y);
+ushort3 __ovld __cnfn sub_sat(ushort3 x, ushort3 y);
+short4 __ovld __cnfn sub_sat(short4 x, short4 y);
+ushort4 __ovld __cnfn sub_sat(ushort4 x, ushort4 y);
+short8 __ovld __cnfn sub_sat(short8 x, short8 y);
+ushort8 __ovld __cnfn sub_sat(ushort8 x, ushort8 y);
+short16 __ovld __cnfn sub_sat(short16 x, short16 y);
+ushort16 __ovld __cnfn sub_sat(ushort16 x, ushort16 y);
+int __ovld __cnfn sub_sat(int x, int y);
+uint __ovld __cnfn sub_sat(uint x, uint y);
+int2 __ovld __cnfn sub_sat(int2 x, int2 y);
+uint2 __ovld __cnfn sub_sat(uint2 x, uint2 y);
+int3 __ovld __cnfn sub_sat(int3 x, int3 y);
+uint3 __ovld __cnfn sub_sat(uint3 x, uint3 y);
+int4 __ovld __cnfn sub_sat(int4 x, int4 y);
+uint4 __ovld __cnfn sub_sat(uint4 x, uint4 y);
+int8 __ovld __cnfn sub_sat(int8 x, int8 y);
+uint8 __ovld __cnfn sub_sat(uint8 x, uint8 y);
+int16 __ovld __cnfn sub_sat(int16 x, int16 y);
+uint16 __ovld __cnfn sub_sat(uint16 x, uint16 y);
+long __ovld __cnfn sub_sat(long x, long y);
+ulong __ovld __cnfn sub_sat(ulong x, ulong y);
+long2 __ovld __cnfn sub_sat(long2 x, long2 y);
+ulong2 __ovld __cnfn sub_sat(ulong2 x, ulong2 y);
+long3 __ovld __cnfn sub_sat(long3 x, long3 y);
+ulong3 __ovld __cnfn sub_sat(ulong3 x, ulong3 y);
+long4 __ovld __cnfn sub_sat(long4 x, long4 y);
+ulong4 __ovld __cnfn sub_sat(ulong4 x, ulong4 y);
+long8 __ovld __cnfn sub_sat(long8 x, long8 y);
+ulong8 __ovld __cnfn sub_sat(ulong8 x, ulong8 y);
+long16 __ovld __cnfn sub_sat(long16 x, long16 y);
+ulong16 __ovld __cnfn sub_sat(ulong16 x, ulong16 y);
+
+/**
+ * result[i] = ((short)hi[i] << 8) | lo[i]
+ * result[i] = ((ushort)hi[i] << 8) | lo[i]
+ */
+short __ovld __cnfn upsample(char hi, uchar lo);
+ushort __ovld __cnfn upsample(uchar hi, uchar lo);
+short2 __ovld __cnfn upsample(char2 hi, uchar2 lo);
+short3 __ovld __cnfn upsample(char3 hi, uchar3 lo);
+short4 __ovld __cnfn upsample(char4 hi, uchar4 lo);
+short8 __ovld __cnfn upsample(char8 hi, uchar8 lo);
+short16 __ovld __cnfn upsample(char16 hi, uchar16 lo);
+ushort2 __ovld __cnfn upsample(uchar2 hi, uchar2 lo);
+ushort3 __ovld __cnfn upsample(uchar3 hi, uchar3 lo);
+ushort4 __ovld __cnfn upsample(uchar4 hi, uchar4 lo);
+ushort8 __ovld __cnfn upsample(uchar8 hi, uchar8 lo);
+ushort16 __ovld __cnfn upsample(uchar16 hi, uchar16 lo);
+
+/**
+ * result[i] = ((int)hi[i] << 16) | lo[i]
+ * result[i] = ((uint)hi[i] << 16) | lo[i]
+ */
+int __ovld __cnfn upsample(short hi, ushort lo);
+uint __ovld __cnfn upsample(ushort hi, ushort lo);
+int2 __ovld __cnfn upsample(short2 hi, ushort2 lo);
+int3 __ovld __cnfn upsample(short3 hi, ushort3 lo);
+int4 __ovld __cnfn upsample(short4 hi, ushort4 lo);
+int8 __ovld __cnfn upsample(short8 hi, ushort8 lo);
+int16 __ovld __cnfn upsample(short16 hi, ushort16 lo);
+uint2 __ovld __cnfn upsample(ushort2 hi, ushort2 lo);
+uint3 __ovld __cnfn upsample(ushort3 hi, ushort3 lo);
+uint4 __ovld __cnfn upsample(ushort4 hi, ushort4 lo);
+uint8 __ovld __cnfn upsample(ushort8 hi, ushort8 lo);
+uint16 __ovld __cnfn upsample(ushort16 hi, ushort16 lo);
+/**
+ * result[i] = ((long)hi[i] << 32) | lo[i]
+ * result[i] = ((ulong)hi[i] << 32) | lo[i]
+ */
+long __ovld __cnfn upsample(int hi, uint lo);
+ulong __ovld __cnfn upsample(uint hi, uint lo);
+long2 __ovld __cnfn upsample(int2 hi, uint2 lo);
+long3 __ovld __cnfn upsample(int3 hi, uint3 lo);
+long4 __ovld __cnfn upsample(int4 hi, uint4 lo);
+long8 __ovld __cnfn upsample(int8 hi, uint8 lo);
+long16 __ovld __cnfn upsample(int16 hi, uint16 lo);
+ulong2 __ovld __cnfn upsample(uint2 hi, uint2 lo);
+ulong3 __ovld __cnfn upsample(uint3 hi, uint3 lo);
+ulong4 __ovld __cnfn upsample(uint4 hi, uint4 lo);
+ulong8 __ovld __cnfn upsample(uint8 hi, uint8 lo);
+ulong16 __ovld __cnfn upsample(uint16 hi, uint16 lo);
+
+/*
+ * popcount(x): returns the number of set bit in x
+ */
+char __ovld __cnfn popcount(char x);
+uchar __ovld __cnfn popcount(uchar x);
+char2 __ovld __cnfn popcount(char2 x);
+uchar2 __ovld __cnfn popcount(uchar2 x);
+char3 __ovld __cnfn popcount(char3 x);
+uchar3 __ovld __cnfn popcount(uchar3 x);
+char4 __ovld __cnfn popcount(char4 x);
+uchar4 __ovld __cnfn popcount(uchar4 x);
+char8 __ovld __cnfn popcount(char8 x);
+uchar8 __ovld __cnfn popcount(uchar8 x);
+char16 __ovld __cnfn popcount(char16 x);
+uchar16 __ovld __cnfn popcount(uchar16 x);
+short __ovld __cnfn popcount(short x);
+ushort __ovld __cnfn popcount(ushort x);
+short2 __ovld __cnfn popcount(short2 x);
+ushort2 __ovld __cnfn popcount(ushort2 x);
+short3 __ovld __cnfn popcount(short3 x);
+ushort3 __ovld __cnfn popcount(ushort3 x);
+short4 __ovld __cnfn popcount(short4 x);
+ushort4 __ovld __cnfn popcount(ushort4 x);
+short8 __ovld __cnfn popcount(short8 x);
+ushort8 __ovld __cnfn popcount(ushort8 x);
+short16 __ovld __cnfn popcount(short16 x);
+ushort16 __ovld __cnfn popcount(ushort16 x);
+int __ovld __cnfn popcount(int x);
+uint __ovld __cnfn popcount(uint x);
+int2 __ovld __cnfn popcount(int2 x);
+uint2 __ovld __cnfn popcount(uint2 x);
+int3 __ovld __cnfn popcount(int3 x);
+uint3 __ovld __cnfn popcount(uint3 x);
+int4 __ovld __cnfn popcount(int4 x);
+uint4 __ovld __cnfn popcount(uint4 x);
+int8 __ovld __cnfn popcount(int8 x);
+uint8 __ovld __cnfn popcount(uint8 x);
+int16 __ovld __cnfn popcount(int16 x);
+uint16 __ovld __cnfn popcount(uint16 x);
+long __ovld __cnfn popcount(long x);
+ulong __ovld __cnfn popcount(ulong x);
+long2 __ovld __cnfn popcount(long2 x);
+ulong2 __ovld __cnfn popcount(ulong2 x);
+long3 __ovld __cnfn popcount(long3 x);
+ulong3 __ovld __cnfn popcount(ulong3 x);
+long4 __ovld __cnfn popcount(long4 x);
+ulong4 __ovld __cnfn popcount(ulong4 x);
+long8 __ovld __cnfn popcount(long8 x);
+ulong8 __ovld __cnfn popcount(ulong8 x);
+long16 __ovld __cnfn popcount(long16 x);
+ulong16 __ovld __cnfn popcount(ulong16 x);
+
+/**
+ * Multiply two 24-bit integer values x and y and add
+ * the 32-bit integer result to the 32-bit integer z.
+ * Refer to definition of mul24 to see how the 24-bit
+ * integer multiplication is performed.
+ */
+int __ovld __cnfn mad24(int x, int y, int z);
+uint __ovld __cnfn mad24(uint x, uint y, uint z);
+int2 __ovld __cnfn mad24(int2 x, int2 y, int2 z);
+uint2 __ovld __cnfn mad24(uint2 x, uint2 y, uint2 z);
+int3 __ovld __cnfn mad24(int3 x, int3 y, int3 z);
+uint3 __ovld __cnfn mad24(uint3 x, uint3 y, uint3 z);
+int4 __ovld __cnfn mad24(int4 x, int4 y, int4 z);
+uint4 __ovld __cnfn mad24(uint4 x, uint4 y, uint4 z);
+int8 __ovld __cnfn mad24(int8 x, int8 y, int8 z);
+uint8 __ovld __cnfn mad24(uint8 x, uint8 y, uint8 z);
+int16 __ovld __cnfn mad24(int16 x, int16 y, int16 z);
+uint16 __ovld __cnfn mad24(uint16 x, uint16 y, uint16 z);
+
+/**
+ * Multiply two 24-bit integer values x and y. x and y
+ * are 32-bit integers but only the low 24-bits are used
+ * to perform the multiplication. mul24 should only
+ * be used when values in x and y are in the range [-
+ * 2^23, 2^23-1] if x and y are signed integers and in the
+ * range [0, 2^24-1] if x and y are unsigned integers. If
+ * x and y are not in this range, the multiplication
+ * result is implementation-defined.
+ */
+int __ovld __cnfn mul24(int x, int y);
+uint __ovld __cnfn mul24(uint x, uint y);
+int2 __ovld __cnfn mul24(int2 x, int2 y);
+uint2 __ovld __cnfn mul24(uint2 x, uint2 y);
+int3 __ovld __cnfn mul24(int3 x, int3 y);
+uint3 __ovld __cnfn mul24(uint3 x, uint3 y);
+int4 __ovld __cnfn mul24(int4 x, int4 y);
+uint4 __ovld __cnfn mul24(uint4 x, uint4 y);
+int8 __ovld __cnfn mul24(int8 x, int8 y);
+uint8 __ovld __cnfn mul24(uint8 x, uint8 y);
+int16 __ovld __cnfn mul24(int16 x, int16 y);
+uint16 __ovld __cnfn mul24(uint16 x, uint16 y);
+
+// OpenCL v1.1 s6.11.4, v1.2 s6.12.4, v2.0 s6.13.4 - Common Functions
+
+/**
+ * Returns fmin(fmax(x, minval), maxval).
+ * Results are undefined if minval > maxval.
+ */
+float __ovld __cnfn clamp(float x, float minval, float maxval);
+float2 __ovld __cnfn clamp(float2 x, float2 minval, float2 maxval);
+float3 __ovld __cnfn clamp(float3 x, float3 minval, float3 maxval);
+float4 __ovld __cnfn clamp(float4 x, float4 minval, float4 maxval);
+float8 __ovld __cnfn clamp(float8 x, float8 minval, float8 maxval);
+float16 __ovld __cnfn clamp(float16 x, float16 minval, float16 maxval);
+float2 __ovld __cnfn clamp(float2 x, float minval, float maxval);
+float3 __ovld __cnfn clamp(float3 x, float minval, float maxval);
+float4 __ovld __cnfn clamp(float4 x, float minval, float maxval);
+float8 __ovld __cnfn clamp(float8 x, float minval, float maxval);
+float16 __ovld __cnfn clamp(float16 x, float minval, float maxval);
+#ifdef cl_khr_fp64
+double __ovld __cnfn clamp(double x, double minval, double maxval);
+double2 __ovld __cnfn clamp(double2 x, double2 minval, double2 maxval);
+double3 __ovld __cnfn clamp(double3 x, double3 minval, double3 maxval);
+double4 __ovld __cnfn clamp(double4 x, double4 minval, double4 maxval);
+double8 __ovld __cnfn clamp(double8 x, double8 minval, double8 maxval);
+double16 __ovld __cnfn clamp(double16 x, double16 minval, double16 maxval);
+double2 __ovld __cnfn clamp(double2 x, double minval, double maxval);
+double3 __ovld __cnfn clamp(double3 x, double minval, double maxval);
+double4 __ovld __cnfn clamp(double4 x, double minval, double maxval);
+double8 __ovld __cnfn clamp(double8 x, double minval, double maxval);
+double16 __ovld __cnfn clamp(double16 x, double minval, double maxval);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn clamp(half x, half minval, half maxval);
+half2 __ovld __cnfn clamp(half2 x, half2 minval, half2 maxval);
+half3 __ovld __cnfn clamp(half3 x, half3 minval, half3 maxval);
+half4 __ovld __cnfn clamp(half4 x, half4 minval, half4 maxval);
+half8 __ovld __cnfn clamp(half8 x, half8 minval, half8 maxval);
+half16 __ovld __cnfn clamp(half16 x, half16 minval, half16 maxval);
+half2 __ovld __cnfn clamp(half2 x, half minval, half maxval);
+half3 __ovld __cnfn clamp(half3 x, half minval, half maxval);
+half4 __ovld __cnfn clamp(half4 x, half minval, half maxval);
+half8 __ovld __cnfn clamp(half8 x, half minval, half maxval);
+half16 __ovld __cnfn clamp(half16 x, half minval, half maxval);
+#endif //cl_khr_fp16
+
+/**
+ * Converts radians to degrees, i.e. (180 / PI) *
+ * radians.
+ */
+float __ovld __cnfn degrees(float radians);
+float2 __ovld __cnfn degrees(float2 radians);
+float3 __ovld __cnfn degrees(float3 radians);
+float4 __ovld __cnfn degrees(float4 radians);
+float8 __ovld __cnfn degrees(float8 radians);
+float16 __ovld __cnfn degrees(float16 radians);
+#ifdef cl_khr_fp64
+double __ovld __cnfn degrees(double radians);
+double2 __ovld __cnfn degrees(double2 radians);
+double3 __ovld __cnfn degrees(double3 radians);
+double4 __ovld __cnfn degrees(double4 radians);
+double8 __ovld __cnfn degrees(double8 radians);
+double16 __ovld __cnfn degrees(double16 radians);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn degrees(half radians);
+half2 __ovld __cnfn degrees(half2 radians);
+half3 __ovld __cnfn degrees(half3 radians);
+half4 __ovld __cnfn degrees(half4 radians);
+half8 __ovld __cnfn degrees(half8 radians);
+half16 __ovld __cnfn degrees(half16 radians);
+#endif //cl_khr_fp16
+
+/**
+ * Returns y if x < y, otherwise it returns x. If x and y
+ * are infinite or NaN, the return values are undefined.
+ */
+float __ovld __cnfn max(float x, float y);
+float2 __ovld __cnfn max(float2 x, float2 y);
+float3 __ovld __cnfn max(float3 x, float3 y);
+float4 __ovld __cnfn max(float4 x, float4 y);
+float8 __ovld __cnfn max(float8 x, float8 y);
+float16 __ovld __cnfn max(float16 x, float16 y);
+float2 __ovld __cnfn max(float2 x, float y);
+float3 __ovld __cnfn max(float3 x, float y);
+float4 __ovld __cnfn max(float4 x, float y);
+float8 __ovld __cnfn max(float8 x, float y);
+float16 __ovld __cnfn max(float16 x, float y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn max(double x, double y);
+double2 __ovld __cnfn max(double2 x, double2 y);
+double3 __ovld __cnfn max(double3 x, double3 y);
+double4 __ovld __cnfn max(double4 x, double4 y);
+double8 __ovld __cnfn max(double8 x, double8 y);
+double16 __ovld __cnfn max(double16 x, double16 y);
+double2 __ovld __cnfn max(double2 x, double y);
+double3 __ovld __cnfn max(double3 x, double y);
+double4 __ovld __cnfn max(double4 x, double y);
+double8 __ovld __cnfn max(double8 x, double y);
+double16 __ovld __cnfn max(double16 x, double y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn max(half x, half y);
+half2 __ovld __cnfn max(half2 x, half2 y);
+half3 __ovld __cnfn max(half3 x, half3 y);
+half4 __ovld __cnfn max(half4 x, half4 y);
+half8 __ovld __cnfn max(half8 x, half8 y);
+half16 __ovld __cnfn max(half16 x, half16 y);
+half2 __ovld __cnfn max(half2 x, half y);
+half3 __ovld __cnfn max(half3 x, half y);
+half4 __ovld __cnfn max(half4 x, half y);
+half8 __ovld __cnfn max(half8 x, half y);
+half16 __ovld __cnfn max(half16 x, half y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns y if y < x, otherwise it returns x. If x and y
+ * are infinite or NaN, the return values are undefined.
+ */
+float __ovld __cnfn min(float x, float y);
+float2 __ovld __cnfn min(float2 x, float2 y);
+float3 __ovld __cnfn min(float3 x, float3 y);
+float4 __ovld __cnfn min(float4 x, float4 y);
+float8 __ovld __cnfn min(float8 x, float8 y);
+float16 __ovld __cnfn min(float16 x, float16 y);
+float2 __ovld __cnfn min(float2 x, float y);
+float3 __ovld __cnfn min(float3 x, float y);
+float4 __ovld __cnfn min(float4 x, float y);
+float8 __ovld __cnfn min(float8 x, float y);
+float16 __ovld __cnfn min(float16 x, float y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn min(double x, double y);
+double2 __ovld __cnfn min(double2 x, double2 y);
+double3 __ovld __cnfn min(double3 x, double3 y);
+double4 __ovld __cnfn min(double4 x, double4 y);
+double8 __ovld __cnfn min(double8 x, double8 y);
+double16 __ovld __cnfn min(double16 x, double16 y);
+double2 __ovld __cnfn min(double2 x, double y);
+double3 __ovld __cnfn min(double3 x, double y);
+double4 __ovld __cnfn min(double4 x, double y);
+double8 __ovld __cnfn min(double8 x, double y);
+double16 __ovld __cnfn min(double16 x, double y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn min(half x, half y);
+half2 __ovld __cnfn min(half2 x, half2 y);
+half3 __ovld __cnfn min(half3 x, half3 y);
+half4 __ovld __cnfn min(half4 x, half4 y);
+half8 __ovld __cnfn min(half8 x, half8 y);
+half16 __ovld __cnfn min(half16 x, half16 y);
+half2 __ovld __cnfn min(half2 x, half y);
+half3 __ovld __cnfn min(half3 x, half y);
+half4 __ovld __cnfn min(half4 x, half y);
+half8 __ovld __cnfn min(half8 x, half y);
+half16 __ovld __cnfn min(half16 x, half y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the linear blend of x & y implemented as:
+ * x + (y - x) * a
+ * a must be a value in the range 0.0 ... 1.0. If a is not
+ * in the range 0.0 ... 1.0, the return values are
+ * undefined.
+ */
+float __ovld __cnfn mix(float x, float y, float a);
+float2 __ovld __cnfn mix(float2 x, float2 y, float2 a);
+float3 __ovld __cnfn mix(float3 x, float3 y, float3 a);
+float4 __ovld __cnfn mix(float4 x, float4 y, float4 a);
+float8 __ovld __cnfn mix(float8 x, float8 y, float8 a);
+float16 __ovld __cnfn mix(float16 x, float16 y, float16 a);
+float2 __ovld __cnfn mix(float2 x, float2 y, float a);
+float3 __ovld __cnfn mix(float3 x, float3 y, float a);
+float4 __ovld __cnfn mix(float4 x, float4 y, float a);
+float8 __ovld __cnfn mix(float8 x, float8 y, float a);
+float16 __ovld __cnfn mix(float16 x, float16 y, float a);
+#ifdef cl_khr_fp64
+double __ovld __cnfn mix(double x, double y, double a);
+double2 __ovld __cnfn mix(double2 x, double2 y, double2 a);
+double3 __ovld __cnfn mix(double3 x, double3 y, double3 a);
+double4 __ovld __cnfn mix(double4 x, double4 y, double4 a);
+double8 __ovld __cnfn mix(double8 x, double8 y, double8 a);
+double16 __ovld __cnfn mix(double16 x, double16 y, double16 a);
+double2 __ovld __cnfn mix(double2 x, double2 y, double a);
+double3 __ovld __cnfn mix(double3 x, double3 y, double a);
+double4 __ovld __cnfn mix(double4 x, double4 y, double a);
+double8 __ovld __cnfn mix(double8 x, double8 y, double a);
+double16 __ovld __cnfn mix(double16 x, double16 y, double a);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn mix(half x, half y, half a);
+half2 __ovld __cnfn mix(half2 x, half2 y, half2 a);
+half3 __ovld __cnfn mix(half3 x, half3 y, half3 a);
+half4 __ovld __cnfn mix(half4 x, half4 y, half4 a);
+half8 __ovld __cnfn mix(half8 x, half8 y, half8 a);
+half16 __ovld __cnfn mix(half16 x, half16 y, half16 a);
+half2 __ovld __cnfn mix(half2 x, half2 y, half a);
+half3 __ovld __cnfn mix(half3 x, half3 y, half a);
+half4 __ovld __cnfn mix(half4 x, half4 y, half a);
+half8 __ovld __cnfn mix(half8 x, half8 y, half a);
+half16 __ovld __cnfn mix(half16 x, half16 y, half a);
+#endif //cl_khr_fp16
+
+/**
+ * Converts degrees to radians, i.e. (PI / 180) *
+ * degrees.
+ */
+float __ovld __cnfn radians(float degrees);
+float2 __ovld __cnfn radians(float2 degrees);
+float3 __ovld __cnfn radians(float3 degrees);
+float4 __ovld __cnfn radians(float4 degrees);
+float8 __ovld __cnfn radians(float8 degrees);
+float16 __ovld __cnfn radians(float16 degrees);
+#ifdef cl_khr_fp64
+double __ovld __cnfn radians(double degrees);
+double2 __ovld __cnfn radians(double2 degrees);
+double3 __ovld __cnfn radians(double3 degrees);
+double4 __ovld __cnfn radians(double4 degrees);
+double8 __ovld __cnfn radians(double8 degrees);
+double16 __ovld __cnfn radians(double16 degrees);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn radians(half degrees);
+half2 __ovld __cnfn radians(half2 degrees);
+half3 __ovld __cnfn radians(half3 degrees);
+half4 __ovld __cnfn radians(half4 degrees);
+half8 __ovld __cnfn radians(half8 degrees);
+half16 __ovld __cnfn radians(half16 degrees);
+#endif //cl_khr_fp16
+
+/**
+ * Returns 0.0 if x < edge, otherwise it returns 1.0.
+ */
+float __ovld __cnfn step(float edge, float x);
+float2 __ovld __cnfn step(float2 edge, float2 x);
+float3 __ovld __cnfn step(float3 edge, float3 x);
+float4 __ovld __cnfn step(float4 edge, float4 x);
+float8 __ovld __cnfn step(float8 edge, float8 x);
+float16 __ovld __cnfn step(float16 edge, float16 x);
+float2 __ovld __cnfn step(float edge, float2 x);
+float3 __ovld __cnfn step(float edge, float3 x);
+float4 __ovld __cnfn step(float edge, float4 x);
+float8 __ovld __cnfn step(float edge, float8 x);
+float16 __ovld __cnfn step(float edge, float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn step(double edge, double x);
+double2 __ovld __cnfn step(double2 edge, double2 x);
+double3 __ovld __cnfn step(double3 edge, double3 x);
+double4 __ovld __cnfn step(double4 edge, double4 x);
+double8 __ovld __cnfn step(double8 edge, double8 x);
+double16 __ovld __cnfn step(double16 edge, double16 x);
+double2 __ovld __cnfn step(double edge, double2 x);
+double3 __ovld __cnfn step(double edge, double3 x);
+double4 __ovld __cnfn step(double edge, double4 x);
+double8 __ovld __cnfn step(double edge, double8 x);
+double16 __ovld __cnfn step(double edge, double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn step(half edge, half x);
+half2 __ovld __cnfn step(half2 edge, half2 x);
+half3 __ovld __cnfn step(half3 edge, half3 x);
+half4 __ovld __cnfn step(half4 edge, half4 x);
+half8 __ovld __cnfn step(half8 edge, half8 x);
+half16 __ovld __cnfn step(half16 edge, half16 x);
+half __ovld __cnfn step(half edge, half x);
+half2 __ovld __cnfn step(half edge, half2 x);
+half3 __ovld __cnfn step(half edge, half3 x);
+half4 __ovld __cnfn step(half edge, half4 x);
+half8 __ovld __cnfn step(half edge, half8 x);
+half16 __ovld __cnfn step(half edge, half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and
+ * performs smooth Hermite interpolation between 0
+ * and 1when edge0 < x < edge1. This is useful in
+ * cases where you would want a threshold function
+ * with a smooth transition.
+ * This is equivalent to:
+ * gentype t;
+ * t = clamp ((x - edge0) / (edge1 - edge0), 0, 1);
+ * return t * t * (3 - 2 * t);
+ * Results are undefined if edge0 >= edge1 or if x,
+ * edge0 or edge1 is a NaN.
+ */
+float __ovld __cnfn smoothstep(float edge0, float edge1, float x);
+float2 __ovld __cnfn smoothstep(float2 edge0, float2 edge1, float2 x);
+float3 __ovld __cnfn smoothstep(float3 edge0, float3 edge1, float3 x);
+float4 __ovld __cnfn smoothstep(float4 edge0, float4 edge1, float4 x);
+float8 __ovld __cnfn smoothstep(float8 edge0, float8 edge1, float8 x);
+float16 __ovld __cnfn smoothstep(float16 edge0, float16 edge1, float16 x);
+float2 __ovld __cnfn smoothstep(float edge0, float edge1, float2 x);
+float3 __ovld __cnfn smoothstep(float edge0, float edge1, float3 x);
+float4 __ovld __cnfn smoothstep(float edge0, float edge1, float4 x);
+float8 __ovld __cnfn smoothstep(float edge0, float edge1, float8 x);
+float16 __ovld __cnfn smoothstep(float edge0, float edge1, float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn smoothstep(double edge0, double edge1, double x);
+double2 __ovld __cnfn smoothstep(double2 edge0, double2 edge1, double2 x);
+double3 __ovld __cnfn smoothstep(double3 edge0, double3 edge1, double3 x);
+double4 __ovld __cnfn smoothstep(double4 edge0, double4 edge1, double4 x);
+double8 __ovld __cnfn smoothstep(double8 edge0, double8 edge1, double8 x);
+double16 __ovld __cnfn smoothstep(double16 edge0, double16 edge1, double16 x);
+double2 __ovld __cnfn smoothstep(double edge0, double edge1, double2 x);
+double3 __ovld __cnfn smoothstep(double edge0, double edge1, double3 x);
+double4 __ovld __cnfn smoothstep(double edge0, double edge1, double4 x);
+double8 __ovld __cnfn smoothstep(double edge0, double edge1, double8 x);
+double16 __ovld __cnfn smoothstep(double edge0, double edge1, double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn smoothstep(half edge0, half edge1, half x);
+half2 __ovld __cnfn smoothstep(half2 edge0, half2 edge1, half2 x);
+half3 __ovld __cnfn smoothstep(half3 edge0, half3 edge1, half3 x);
+half4 __ovld __cnfn smoothstep(half4 edge0, half4 edge1, half4 x);
+half8 __ovld __cnfn smoothstep(half8 edge0, half8 edge1, half8 x);
+half16 __ovld __cnfn smoothstep(half16 edge0, half16 edge1, half16 x);
+half __ovld __cnfn smoothstep(half edge0, half edge1, half x);
+half2 __ovld __cnfn smoothstep(half edge0, half edge1, half2 x);
+half3 __ovld __cnfn smoothstep(half edge0, half edge1, half3 x);
+half4 __ovld __cnfn smoothstep(half edge0, half edge1, half4 x);
+half8 __ovld __cnfn smoothstep(half edge0, half edge1, half8 x);
+half16 __ovld __cnfn smoothstep(half edge0, half edge1, half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Returns 1.0 if x > 0, -0.0 if x = -0.0, +0.0 if x =
+ * +0.0, or -1.0 if x < 0. Returns 0.0 if x is a NaN.
+ */
+float __ovld __cnfn sign(float x);
+float2 __ovld __cnfn sign(float2 x);
+float3 __ovld __cnfn sign(float3 x);
+float4 __ovld __cnfn sign(float4 x);
+float8 __ovld __cnfn sign(float8 x);
+float16 __ovld __cnfn sign(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sign(double x);
+double2 __ovld __cnfn sign(double2 x);
+double3 __ovld __cnfn sign(double3 x);
+double4 __ovld __cnfn sign(double4 x);
+double8 __ovld __cnfn sign(double8 x);
+double16 __ovld __cnfn sign(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sign(half x);
+half2 __ovld __cnfn sign(half2 x);
+half3 __ovld __cnfn sign(half3 x);
+half4 __ovld __cnfn sign(half4 x);
+half8 __ovld __cnfn sign(half8 x);
+half16 __ovld __cnfn sign(half16 x);
+#endif //cl_khr_fp16
+
+// OpenCL v1.1 s6.11.5, v1.2 s6.12.5, v2.0 s6.13.5 - Geometric Functions
+
+/**
+ * Returns the cross product of p0.xyz and p1.xyz. The
+ * w component of float4 result returned will be 0.0.
+ */
+float4 __ovld __cnfn cross(float4 p0, float4 p1);
+float3 __ovld __cnfn cross(float3 p0, float3 p1);
+#ifdef cl_khr_fp64
+double4 __ovld __cnfn cross(double4 p0, double4 p1);
+double3 __ovld __cnfn cross(double3 p0, double3 p1);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half4 __ovld __cnfn cross(half4 p0, half4 p1);
+half3 __ovld __cnfn cross(half3 p0, half3 p1);
+#endif //cl_khr_fp16
+
+/**
+ * Compute dot product.
+ */
+float __ovld __cnfn dot(float p0, float p1);
+float __ovld __cnfn dot(float2 p0, float2 p1);
+float __ovld __cnfn dot(float3 p0, float3 p1);
+float __ovld __cnfn dot(float4 p0, float4 p1);
+#ifdef cl_khr_fp64
+double __ovld __cnfn dot(double p0, double p1);
+double __ovld __cnfn dot(double2 p0, double2 p1);
+double __ovld __cnfn dot(double3 p0, double3 p1);
+double __ovld __cnfn dot(double4 p0, double4 p1);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn dot(half p0, half p1);
+half __ovld __cnfn dot(half2 p0, half2 p1);
+half __ovld __cnfn dot(half3 p0, half3 p1);
+half __ovld __cnfn dot(half4 p0, half4 p1);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the distance between p0 and p1. This is
+ * calculated as length(p0 - p1).
+ */
+float __ovld __cnfn distance(float p0, float p1);
+float __ovld __cnfn distance(float2 p0, float2 p1);
+float __ovld __cnfn distance(float3 p0, float3 p1);
+float __ovld __cnfn distance(float4 p0, float4 p1);
+#ifdef cl_khr_fp64
+double __ovld __cnfn distance(double p0, double p1);
+double __ovld __cnfn distance(double2 p0, double2 p1);
+double __ovld __cnfn distance(double3 p0, double3 p1);
+double __ovld __cnfn distance(double4 p0, double4 p1);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn distance(half p0, half p1);
+half __ovld __cnfn distance(half2 p0, half2 p1);
+half __ovld __cnfn distance(half3 p0, half3 p1);
+half __ovld __cnfn distance(half4 p0, half4 p1);
+#endif //cl_khr_fp16
+
+/**
+ * Return the length of vector p, i.e.,
+ * sqrt(p.x2 + p.y 2 + ...)
+ */
+float __ovld __cnfn length(float p);
+float __ovld __cnfn length(float2 p);
+float __ovld __cnfn length(float3 p);
+float __ovld __cnfn length(float4 p);
+#ifdef cl_khr_fp64
+double __ovld __cnfn length(double p);
+double __ovld __cnfn length(double2 p);
+double __ovld __cnfn length(double3 p);
+double __ovld __cnfn length(double4 p);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn length(half p);
+half __ovld __cnfn length(half2 p);
+half __ovld __cnfn length(half3 p);
+half __ovld __cnfn length(half4 p);
+#endif //cl_khr_fp16
+
+/**
+ * Returns a vector in the same direction as p but with a
+ * length of 1.
+ */
+float __ovld __cnfn normalize(float p);
+float2 __ovld __cnfn normalize(float2 p);
+float3 __ovld __cnfn normalize(float3 p);
+float4 __ovld __cnfn normalize(float4 p);
+#ifdef cl_khr_fp64
+double __ovld __cnfn normalize(double p);
+double2 __ovld __cnfn normalize(double2 p);
+double3 __ovld __cnfn normalize(double3 p);
+double4 __ovld __cnfn normalize(double4 p);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn normalize(half p);
+half2 __ovld __cnfn normalize(half2 p);
+half3 __ovld __cnfn normalize(half3 p);
+half4 __ovld __cnfn normalize(half4 p);
+#endif //cl_khr_fp16
+
+/**
+ * Returns fast_length(p0 - p1).
+ */
+float __ovld __cnfn fast_distance(float p0, float p1);
+float __ovld __cnfn fast_distance(float2 p0, float2 p1);
+float __ovld __cnfn fast_distance(float3 p0, float3 p1);
+float __ovld __cnfn fast_distance(float4 p0, float4 p1);
+#ifdef cl_khr_fp16
+half __ovld __cnfn fast_distance(half p0, half p1);
+half __ovld __cnfn fast_distance(half2 p0, half2 p1);
+half __ovld __cnfn fast_distance(half3 p0, half3 p1);
+half __ovld __cnfn fast_distance(half4 p0, half4 p1);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the length of vector p computed as:
+ * half_sqrt(p.x2 + p.y2 + ...)
+ */
+float __ovld __cnfn fast_length(float p);
+float __ovld __cnfn fast_length(float2 p);
+float __ovld __cnfn fast_length(float3 p);
+float __ovld __cnfn fast_length(float4 p);
+#ifdef cl_khr_fp16
+half __ovld __cnfn fast_length(half p);
+half __ovld __cnfn fast_length(half2 p);
+half __ovld __cnfn fast_length(half3 p);
+half __ovld __cnfn fast_length(half4 p);
+#endif //cl_khr_fp16
+
+/**
+ * Returns a vector in the same direction as p but with a
+ * length of 1. fast_normalize is computed as:
+ * p * half_rsqrt (p.x^2 + p.y^2 + ... )
+ * The result shall be within 8192 ulps error from the
+ * infinitely precise result of
+ * if (all(p == 0.0f))
+ * result = p;
+ * else
+ * result = p / sqrt (p.x^2 + p.y^2 + ...);
+ * with the following exceptions:
+ * 1) If the sum of squares is greater than FLT_MAX
+ * then the value of the floating-point values in the
+ * result vector are undefined.
+ * 2) If the sum of squares is less than FLT_MIN then
+ * the implementation may return back p.
+ * 3) If the device is in "denorms are flushed to zero"
+ * mode, individual operand elements with magnitude
+ * less than sqrt(FLT_MIN) may be flushed to zero
+ * before proceeding with the calculation.
+ */
+float __ovld __cnfn fast_normalize(float p);
+float2 __ovld __cnfn fast_normalize(float2 p);
+float3 __ovld __cnfn fast_normalize(float3 p);
+float4 __ovld __cnfn fast_normalize(float4 p);
+#ifdef cl_khr_fp16
+half __ovld __cnfn fast_normalize(half p);
+half2 __ovld __cnfn fast_normalize(half2 p);
+half3 __ovld __cnfn fast_normalize(half3 p);
+half4 __ovld __cnfn fast_normalize(half4 p);
+#endif //cl_khr_fp16
+
+// OpenCL v1.1 s6.11.6, v1.2 s6.12.6, v2.0 s6.13.6 - Relational Functions
+
+/**
+ * intn isequal (floatn x, floatn y)
+ * Returns the component-wise compare of x == y.
+ */
+int __ovld __cnfn isequal(float x, float y);
+int2 __ovld __cnfn isequal(float2 x, float2 y);
+int3 __ovld __cnfn isequal(float3 x, float3 y);
+int4 __ovld __cnfn isequal(float4 x, float4 y);
+int8 __ovld __cnfn isequal(float8 x, float8 y);
+int16 __ovld __cnfn isequal(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isequal(double x, double y);
+long2 __ovld __cnfn isequal(double2 x, double2 y);
+long3 __ovld __cnfn isequal(double3 x, double3 y);
+long4 __ovld __cnfn isequal(double4 x, double4 y);
+long8 __ovld __cnfn isequal(double8 x, double8 y);
+long16 __ovld __cnfn isequal(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isequal(half x, half y);
+short2 __ovld __cnfn isequal(half2 x, half2 y);
+short3 __ovld __cnfn isequal(half3 x, half3 y);
+short4 __ovld __cnfn isequal(half4 x, half4 y);
+short8 __ovld __cnfn isequal(half8 x, half8 y);
+short16 __ovld __cnfn isequal(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x != y.
+ */
+int __ovld __cnfn isnotequal(float x, float y);
+int2 __ovld __cnfn isnotequal(float2 x, float2 y);
+int3 __ovld __cnfn isnotequal(float3 x, float3 y);
+int4 __ovld __cnfn isnotequal(float4 x, float4 y);
+int8 __ovld __cnfn isnotequal(float8 x, float8 y);
+int16 __ovld __cnfn isnotequal(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isnotequal(double x, double y);
+long2 __ovld __cnfn isnotequal(double2 x, double2 y);
+long3 __ovld __cnfn isnotequal(double3 x, double3 y);
+long4 __ovld __cnfn isnotequal(double4 x, double4 y);
+long8 __ovld __cnfn isnotequal(double8 x, double8 y);
+long16 __ovld __cnfn isnotequal(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isnotequal(half x, half y);
+short2 __ovld __cnfn isnotequal(half2 x, half2 y);
+short3 __ovld __cnfn isnotequal(half3 x, half3 y);
+short4 __ovld __cnfn isnotequal(half4 x, half4 y);
+short8 __ovld __cnfn isnotequal(half8 x, half8 y);
+short16 __ovld __cnfn isnotequal(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x > y.
+ */
+int __ovld __cnfn isgreater(float x, float y);
+int2 __ovld __cnfn isgreater(float2 x, float2 y);
+int3 __ovld __cnfn isgreater(float3 x, float3 y);
+int4 __ovld __cnfn isgreater(float4 x, float4 y);
+int8 __ovld __cnfn isgreater(float8 x, float8 y);
+int16 __ovld __cnfn isgreater(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isgreater(double x, double y);
+long2 __ovld __cnfn isgreater(double2 x, double2 y);
+long3 __ovld __cnfn isgreater(double3 x, double3 y);
+long4 __ovld __cnfn isgreater(double4 x, double4 y);
+long8 __ovld __cnfn isgreater(double8 x, double8 y);
+long16 __ovld __cnfn isgreater(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isgreater(half x, half y);
+short2 __ovld __cnfn isgreater(half2 x, half2 y);
+short3 __ovld __cnfn isgreater(half3 x, half3 y);
+short4 __ovld __cnfn isgreater(half4 x, half4 y);
+short8 __ovld __cnfn isgreater(half8 x, half8 y);
+short16 __ovld __cnfn isgreater(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x >= y.
+ */
+int __ovld __cnfn isgreaterequal(float x, float y);
+int2 __ovld __cnfn isgreaterequal(float2 x, float2 y);
+int3 __ovld __cnfn isgreaterequal(float3 x, float3 y);
+int4 __ovld __cnfn isgreaterequal(float4 x, float4 y);
+int8 __ovld __cnfn isgreaterequal(float8 x, float8 y);
+int16 __ovld __cnfn isgreaterequal(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isgreaterequal(double x, double y);
+long2 __ovld __cnfn isgreaterequal(double2 x, double2 y);
+long3 __ovld __cnfn isgreaterequal(double3 x, double3 y);
+long4 __ovld __cnfn isgreaterequal(double4 x, double4 y);
+long8 __ovld __cnfn isgreaterequal(double8 x, double8 y);
+long16 __ovld __cnfn isgreaterequal(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isgreaterequal(half x, half y);
+short2 __ovld __cnfn isgreaterequal(half2 x, half2 y);
+short3 __ovld __cnfn isgreaterequal(half3 x, half3 y);
+short4 __ovld __cnfn isgreaterequal(half4 x, half4 y);
+short8 __ovld __cnfn isgreaterequal(half8 x, half8 y);
+short16 __ovld __cnfn isgreaterequal(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x < y.
+ */
+int __ovld __cnfn isless(float x, float y);
+int2 __ovld __cnfn isless(float2 x, float2 y);
+int3 __ovld __cnfn isless(float3 x, float3 y);
+int4 __ovld __cnfn isless(float4 x, float4 y);
+int8 __ovld __cnfn isless(float8 x, float8 y);
+int16 __ovld __cnfn isless(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isless(double x, double y);
+long2 __ovld __cnfn isless(double2 x, double2 y);
+long3 __ovld __cnfn isless(double3 x, double3 y);
+long4 __ovld __cnfn isless(double4 x, double4 y);
+long8 __ovld __cnfn isless(double8 x, double8 y);
+long16 __ovld __cnfn isless(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isless(half x, half y);
+short2 __ovld __cnfn isless(half2 x, half2 y);
+short3 __ovld __cnfn isless(half3 x, half3 y);
+short4 __ovld __cnfn isless(half4 x, half4 y);
+short8 __ovld __cnfn isless(half8 x, half8 y);
+short16 __ovld __cnfn isless(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x <= y.
+ */
+int __ovld __cnfn islessequal(float x, float y);
+int2 __ovld __cnfn islessequal(float2 x, float2 y);
+int3 __ovld __cnfn islessequal(float3 x, float3 y);
+int4 __ovld __cnfn islessequal(float4 x, float4 y);
+int8 __ovld __cnfn islessequal(float8 x, float8 y);
+int16 __ovld __cnfn islessequal(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn islessequal(double x, double y);
+long2 __ovld __cnfn islessequal(double2 x, double2 y);
+long3 __ovld __cnfn islessequal(double3 x, double3 y);
+long4 __ovld __cnfn islessequal(double4 x, double4 y);
+long8 __ovld __cnfn islessequal(double8 x, double8 y);
+long16 __ovld __cnfn islessequal(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn islessequal(half x, half y);
+short2 __ovld __cnfn islessequal(half2 x, half2 y);
+short3 __ovld __cnfn islessequal(half3 x, half3 y);
+short4 __ovld __cnfn islessequal(half4 x, half4 y);
+short8 __ovld __cnfn islessequal(half8 x, half8 y);
+short16 __ovld __cnfn islessequal(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of
+ * (x < y) || (x > y) .
+ */
+int __ovld __cnfn islessgreater(float x, float y);
+int2 __ovld __cnfn islessgreater(float2 x, float2 y);
+int3 __ovld __cnfn islessgreater(float3 x, float3 y);
+int4 __ovld __cnfn islessgreater(float4 x, float4 y);
+int8 __ovld __cnfn islessgreater(float8 x, float8 y);
+int16 __ovld __cnfn islessgreater(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn islessgreater(double x, double y);
+long2 __ovld __cnfn islessgreater(double2 x, double2 y);
+long3 __ovld __cnfn islessgreater(double3 x, double3 y);
+long4 __ovld __cnfn islessgreater(double4 x, double4 y);
+long8 __ovld __cnfn islessgreater(double8 x, double8 y);
+long16 __ovld __cnfn islessgreater(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn islessgreater(half x, half y);
+short2 __ovld __cnfn islessgreater(half2 x, half2 y);
+short3 __ovld __cnfn islessgreater(half3 x, half3 y);
+short4 __ovld __cnfn islessgreater(half4 x, half4 y);
+short8 __ovld __cnfn islessgreater(half8 x, half8 y);
+short16 __ovld __cnfn islessgreater(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Test for finite value.
+ */
+int __ovld __cnfn isfinite(float);
+int2 __ovld __cnfn isfinite(float2);
+int3 __ovld __cnfn isfinite(float3);
+int4 __ovld __cnfn isfinite(float4);
+int8 __ovld __cnfn isfinite(float8);
+int16 __ovld __cnfn isfinite(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isfinite(double);
+long2 __ovld __cnfn isfinite(double2);
+long3 __ovld __cnfn isfinite(double3);
+long4 __ovld __cnfn isfinite(double4);
+long8 __ovld __cnfn isfinite(double8);
+long16 __ovld __cnfn isfinite(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isfinite(half);
+short2 __ovld __cnfn isfinite(half2);
+short3 __ovld __cnfn isfinite(half3);
+short4 __ovld __cnfn isfinite(half4);
+short8 __ovld __cnfn isfinite(half8);
+short16 __ovld __cnfn isfinite(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test for infinity value (+ve or -ve) .
+ */
+int __ovld __cnfn isinf(float);
+int2 __ovld __cnfn isinf(float2);
+int3 __ovld __cnfn isinf(float3);
+int4 __ovld __cnfn isinf(float4);
+int8 __ovld __cnfn isinf(float8);
+int16 __ovld __cnfn isinf(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isinf(double);
+long2 __ovld __cnfn isinf(double2);
+long3 __ovld __cnfn isinf(double3);
+long4 __ovld __cnfn isinf(double4);
+long8 __ovld __cnfn isinf(double8);
+long16 __ovld __cnfn isinf(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isinf(half);
+short2 __ovld __cnfn isinf(half2);
+short3 __ovld __cnfn isinf(half3);
+short4 __ovld __cnfn isinf(half4);
+short8 __ovld __cnfn isinf(half8);
+short16 __ovld __cnfn isinf(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test for a NaN.
+ */
+int __ovld __cnfn isnan(float);
+int2 __ovld __cnfn isnan(float2);
+int3 __ovld __cnfn isnan(float3);
+int4 __ovld __cnfn isnan(float4);
+int8 __ovld __cnfn isnan(float8);
+int16 __ovld __cnfn isnan(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isnan(double);
+long2 __ovld __cnfn isnan(double2);
+long3 __ovld __cnfn isnan(double3);
+long4 __ovld __cnfn isnan(double4);
+long8 __ovld __cnfn isnan(double8);
+long16 __ovld __cnfn isnan(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isnan(half);
+short2 __ovld __cnfn isnan(half2);
+short3 __ovld __cnfn isnan(half3);
+short4 __ovld __cnfn isnan(half4);
+short8 __ovld __cnfn isnan(half8);
+short16 __ovld __cnfn isnan(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test for a normal value.
+ */
+int __ovld __cnfn isnormal(float);
+int2 __ovld __cnfn isnormal(float2);
+int3 __ovld __cnfn isnormal(float3);
+int4 __ovld __cnfn isnormal(float4);
+int8 __ovld __cnfn isnormal(float8);
+int16 __ovld __cnfn isnormal(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isnormal(double);
+long2 __ovld __cnfn isnormal(double2);
+long3 __ovld __cnfn isnormal(double3);
+long4 __ovld __cnfn isnormal(double4);
+long8 __ovld __cnfn isnormal(double8);
+long16 __ovld __cnfn isnormal(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isnormal(half);
+short2 __ovld __cnfn isnormal(half2);
+short3 __ovld __cnfn isnormal(half3);
+short4 __ovld __cnfn isnormal(half4);
+short8 __ovld __cnfn isnormal(half8);
+short16 __ovld __cnfn isnormal(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test if arguments are ordered. isordered() takes
+ * arguments x and y, and returns the result
+ * isequal(x, x) && isequal(y, y).
+ */
+int __ovld __cnfn isordered(float x, float y);
+int2 __ovld __cnfn isordered(float2 x, float2 y);
+int3 __ovld __cnfn isordered(float3 x, float3 y);
+int4 __ovld __cnfn isordered(float4 x, float4 y);
+int8 __ovld __cnfn isordered(float8 x, float8 y);
+int16 __ovld __cnfn isordered(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isordered(double x, double y);
+long2 __ovld __cnfn isordered(double2 x, double2 y);
+long3 __ovld __cnfn isordered(double3 x, double3 y);
+long4 __ovld __cnfn isordered(double4 x, double4 y);
+long8 __ovld __cnfn isordered(double8 x, double8 y);
+long16 __ovld __cnfn isordered(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isordered(half x, half y);
+short2 __ovld __cnfn isordered(half2 x, half2 y);
+short3 __ovld __cnfn isordered(half3 x, half3 y);
+short4 __ovld __cnfn isordered(half4 x, half4 y);
+short8 __ovld __cnfn isordered(half8 x, half8 y);
+short16 __ovld __cnfn isordered(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Test if arguments are unordered. isunordered()
+ * takes arguments x and y, returning non-zero if x or y
+ * is NaN, and zero otherwise.
+ */
+int __ovld __cnfn isunordered(float x, float y);
+int2 __ovld __cnfn isunordered(float2 x, float2 y);
+int3 __ovld __cnfn isunordered(float3 x, float3 y);
+int4 __ovld __cnfn isunordered(float4 x, float4 y);
+int8 __ovld __cnfn isunordered(float8 x, float8 y);
+int16 __ovld __cnfn isunordered(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isunordered(double x, double y);
+long2 __ovld __cnfn isunordered(double2 x, double2 y);
+long3 __ovld __cnfn isunordered(double3 x, double3 y);
+long4 __ovld __cnfn isunordered(double4 x, double4 y);
+long8 __ovld __cnfn isunordered(double8 x, double8 y);
+long16 __ovld __cnfn isunordered(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isunordered(half x, half y);
+short2 __ovld __cnfn isunordered(half2 x, half2 y);
+short3 __ovld __cnfn isunordered(half3 x, half3 y);
+short4 __ovld __cnfn isunordered(half4 x, half4 y);
+short8 __ovld __cnfn isunordered(half8 x, half8 y);
+short16 __ovld __cnfn isunordered(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Test for sign bit. The scalar version of the function
+ * returns a 1 if the sign bit in the float is set else returns
+ * 0. The vector version of the function returns the
+ * following for each component in floatn: a -1 if the
+ * sign bit in the float is set else returns 0.
+ */
+int __ovld __cnfn signbit(float);
+int2 __ovld __cnfn signbit(float2);
+int3 __ovld __cnfn signbit(float3);
+int4 __ovld __cnfn signbit(float4);
+int8 __ovld __cnfn signbit(float8);
+int16 __ovld __cnfn signbit(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn signbit(double);
+long2 __ovld __cnfn signbit(double2);
+long3 __ovld __cnfn signbit(double3);
+long4 __ovld __cnfn signbit(double4);
+long8 __ovld __cnfn signbit(double8);
+long16 __ovld __cnfn signbit(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn signbit(half);
+short2 __ovld __cnfn signbit(half2);
+short3 __ovld __cnfn signbit(half3);
+short4 __ovld __cnfn signbit(half4);
+short8 __ovld __cnfn signbit(half8);
+short16 __ovld __cnfn signbit(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns 1 if the most significant bit in any component
+ * of x is set; otherwise returns 0.
+ */
+int __ovld __cnfn any(char x);
+int __ovld __cnfn any(char2 x);
+int __ovld __cnfn any(char3 x);
+int __ovld __cnfn any(char4 x);
+int __ovld __cnfn any(char8 x);
+int __ovld __cnfn any(char16 x);
+int __ovld __cnfn any(short x);
+int __ovld __cnfn any(short2 x);
+int __ovld __cnfn any(short3 x);
+int __ovld __cnfn any(short4 x);
+int __ovld __cnfn any(short8 x);
+int __ovld __cnfn any(short16 x);
+int __ovld __cnfn any(int x);
+int __ovld __cnfn any(int2 x);
+int __ovld __cnfn any(int3 x);
+int __ovld __cnfn any(int4 x);
+int __ovld __cnfn any(int8 x);
+int __ovld __cnfn any(int16 x);
+int __ovld __cnfn any(long x);
+int __ovld __cnfn any(long2 x);
+int __ovld __cnfn any(long3 x);
+int __ovld __cnfn any(long4 x);
+int __ovld __cnfn any(long8 x);
+int __ovld __cnfn any(long16 x);
+
+/**
+ * Returns 1 if the most significant bit in all components
+ * of x is set; otherwise returns 0.
+ */
+int __ovld __cnfn all(char x);
+int __ovld __cnfn all(char2 x);
+int __ovld __cnfn all(char3 x);
+int __ovld __cnfn all(char4 x);
+int __ovld __cnfn all(char8 x);
+int __ovld __cnfn all(char16 x);
+int __ovld __cnfn all(short x);
+int __ovld __cnfn all(short2 x);
+int __ovld __cnfn all(short3 x);
+int __ovld __cnfn all(short4 x);
+int __ovld __cnfn all(short8 x);
+int __ovld __cnfn all(short16 x);
+int __ovld __cnfn all(int x);
+int __ovld __cnfn all(int2 x);
+int __ovld __cnfn all(int3 x);
+int __ovld __cnfn all(int4 x);
+int __ovld __cnfn all(int8 x);
+int __ovld __cnfn all(int16 x);
+int __ovld __cnfn all(long x);
+int __ovld __cnfn all(long2 x);
+int __ovld __cnfn all(long3 x);
+int __ovld __cnfn all(long4 x);
+int __ovld __cnfn all(long8 x);
+int __ovld __cnfn all(long16 x);
+
+/**
+ * Each bit of the result is the corresponding bit of a if
+ * the corresponding bit of c is 0. Otherwise it is the
+ * corresponding bit of b.
+ */
+char __ovld __cnfn bitselect(char a, char b, char c);
+uchar __ovld __cnfn bitselect(uchar a, uchar b, uchar c);
+char2 __ovld __cnfn bitselect(char2 a, char2 b, char2 c);
+uchar2 __ovld __cnfn bitselect(uchar2 a, uchar2 b, uchar2 c);
+char3 __ovld __cnfn bitselect(char3 a, char3 b, char3 c);
+uchar3 __ovld __cnfn bitselect(uchar3 a, uchar3 b, uchar3 c);
+char4 __ovld __cnfn bitselect(char4 a, char4 b, char4 c);
+uchar4 __ovld __cnfn bitselect(uchar4 a, uchar4 b, uchar4 c);
+char8 __ovld __cnfn bitselect(char8 a, char8 b, char8 c);
+uchar8 __ovld __cnfn bitselect(uchar8 a, uchar8 b, uchar8 c);
+char16 __ovld __cnfn bitselect(char16 a, char16 b, char16 c);
+uchar16 __ovld __cnfn bitselect(uchar16 a, uchar16 b, uchar16 c);
+short __ovld __cnfn bitselect(short a, short b, short c);
+ushort __ovld __cnfn bitselect(ushort a, ushort b, ushort c);
+short2 __ovld __cnfn bitselect(short2 a, short2 b, short2 c);
+ushort2 __ovld __cnfn bitselect(ushort2 a, ushort2 b, ushort2 c);
+short3 __ovld __cnfn bitselect(short3 a, short3 b, short3 c);
+ushort3 __ovld __cnfn bitselect(ushort3 a, ushort3 b, ushort3 c);
+short4 __ovld __cnfn bitselect(short4 a, short4 b, short4 c);
+ushort4 __ovld __cnfn bitselect(ushort4 a, ushort4 b, ushort4 c);
+short8 __ovld __cnfn bitselect(short8 a, short8 b, short8 c);
+ushort8 __ovld __cnfn bitselect(ushort8 a, ushort8 b, ushort8 c);
+short16 __ovld __cnfn bitselect(short16 a, short16 b, short16 c);
+ushort16 __ovld __cnfn bitselect(ushort16 a, ushort16 b, ushort16 c);
+int __ovld __cnfn bitselect(int a, int b, int c);
+uint __ovld __cnfn bitselect(uint a, uint b, uint c);
+int2 __ovld __cnfn bitselect(int2 a, int2 b, int2 c);
+uint2 __ovld __cnfn bitselect(uint2 a, uint2 b, uint2 c);
+int3 __ovld __cnfn bitselect(int3 a, int3 b, int3 c);
+uint3 __ovld __cnfn bitselect(uint3 a, uint3 b, uint3 c);
+int4 __ovld __cnfn bitselect(int4 a, int4 b, int4 c);
+uint4 __ovld __cnfn bitselect(uint4 a, uint4 b, uint4 c);
+int8 __ovld __cnfn bitselect(int8 a, int8 b, int8 c);
+uint8 __ovld __cnfn bitselect(uint8 a, uint8 b, uint8 c);
+int16 __ovld __cnfn bitselect(int16 a, int16 b, int16 c);
+uint16 __ovld __cnfn bitselect(uint16 a, uint16 b, uint16 c);
+long __ovld __cnfn bitselect(long a, long b, long c);
+ulong __ovld __cnfn bitselect(ulong a, ulong b, ulong c);
+long2 __ovld __cnfn bitselect(long2 a, long2 b, long2 c);
+ulong2 __ovld __cnfn bitselect(ulong2 a, ulong2 b, ulong2 c);
+long3 __ovld __cnfn bitselect(long3 a, long3 b, long3 c);
+ulong3 __ovld __cnfn bitselect(ulong3 a, ulong3 b, ulong3 c);
+long4 __ovld __cnfn bitselect(long4 a, long4 b, long4 c);
+ulong4 __ovld __cnfn bitselect(ulong4 a, ulong4 b, ulong4 c);
+long8 __ovld __cnfn bitselect(long8 a, long8 b, long8 c);
+ulong8 __ovld __cnfn bitselect(ulong8 a, ulong8 b, ulong8 c);
+long16 __ovld __cnfn bitselect(long16 a, long16 b, long16 c);
+ulong16 __ovld __cnfn bitselect(ulong16 a, ulong16 b, ulong16 c);
+float __ovld __cnfn bitselect(float a, float b, float c);
+float2 __ovld __cnfn bitselect(float2 a, float2 b, float2 c);
+float3 __ovld __cnfn bitselect(float3 a, float3 b, float3 c);
+float4 __ovld __cnfn bitselect(float4 a, float4 b, float4 c);
+float8 __ovld __cnfn bitselect(float8 a, float8 b, float8 c);
+float16 __ovld __cnfn bitselect(float16 a, float16 b, float16 c);
+#ifdef cl_khr_fp64
+double __ovld __cnfn bitselect(double a, double b, double c);
+double2 __ovld __cnfn bitselect(double2 a, double2 b, double2 c);
+double3 __ovld __cnfn bitselect(double3 a, double3 b, double3 c);
+double4 __ovld __cnfn bitselect(double4 a, double4 b, double4 c);
+double8 __ovld __cnfn bitselect(double8 a, double8 b, double8 c);
+double16 __ovld __cnfn bitselect(double16 a, double16 b, double16 c);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn bitselect(half a, half b, half c);
+half2 __ovld __cnfn bitselect(half2 a, half2 b, half2 c);
+half3 __ovld __cnfn bitselect(half3 a, half3 b, half3 c);
+half4 __ovld __cnfn bitselect(half4 a, half4 b, half4 c);
+half8 __ovld __cnfn bitselect(half8 a, half8 b, half8 c);
+half16 __ovld __cnfn bitselect(half16 a, half16 b, half16 c);
+#endif //cl_khr_fp16
+
+/**
+ * For each component of a vector type,
+ * result[i] = if MSB of c[i] is set ? b[i] : a[i].
+ * For a scalar type, result = c ? b : a.
+ */
+char __ovld __cnfn select(char a, char b, char c);
+uchar __ovld __cnfn select(uchar a, uchar b, char c);
+char2 __ovld __cnfn select(char2 a, char2 b, char2 c);
+uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, char2 c);
+char3 __ovld __cnfn select(char3 a, char3 b, char3 c);
+uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, char3 c);
+char4 __ovld __cnfn select(char4 a, char4 b, char4 c);
+uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, char4 c);
+char8 __ovld __cnfn select(char8 a, char8 b, char8 c);
+uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, char8 c);
+char16 __ovld __cnfn select(char16 a, char16 b, char16 c);
+uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, char16 c);
+short __ovld __cnfn select(short a, short b, char c);
+ushort __ovld __cnfn select(ushort a, ushort b, char c);
+short2 __ovld __cnfn select(short2 a, short2 b, char2 c);
+ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, char2 c);
+short3 __ovld __cnfn select(short3 a, short3 b, char3 c);
+ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, char3 c);
+short4 __ovld __cnfn select(short4 a, short4 b, char4 c);
+ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, char4 c);
+short8 __ovld __cnfn select(short8 a, short8 b, char8 c);
+ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, char8 c);
+short16 __ovld __cnfn select(short16 a, short16 b, char16 c);
+ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, char16 c);
+int __ovld __cnfn select(int a, int b, char c);
+uint __ovld __cnfn select(uint a, uint b, char c);
+int2 __ovld __cnfn select(int2 a, int2 b, char2 c);
+uint2 __ovld __cnfn select(uint2 a, uint2 b, char2 c);
+int3 __ovld __cnfn select(int3 a, int3 b, char3 c);
+uint3 __ovld __cnfn select(uint3 a, uint3 b, char3 c);
+int4 __ovld __cnfn select(int4 a, int4 b, char4 c);
+uint4 __ovld __cnfn select(uint4 a, uint4 b, char4 c);
+int8 __ovld __cnfn select(int8 a, int8 b, char8 c);
+uint8 __ovld __cnfn select(uint8 a, uint8 b, char8 c);
+int16 __ovld __cnfn select(int16 a, int16 b, char16 c);
+uint16 __ovld __cnfn select(uint16 a, uint16 b, char16 c);
+long __ovld __cnfn select(long a, long b, char c);
+ulong __ovld __cnfn select(ulong a, ulong b, char c);
+long2 __ovld __cnfn select(long2 a, long2 b, char2 c);
+ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, char2 c);
+long3 __ovld __cnfn select(long3 a, long3 b, char3 c);
+ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, char3 c);
+long4 __ovld __cnfn select(long4 a, long4 b, char4 c);
+ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, char4 c);
+long8 __ovld __cnfn select(long8 a, long8 b, char8 c);
+ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, char8 c);
+long16 __ovld __cnfn select(long16 a, long16 b, char16 c);
+ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, char16 c);
+float __ovld __cnfn select(float a, float b, char c);
+float2 __ovld __cnfn select(float2 a, float2 b, char2 c);
+float3 __ovld __cnfn select(float3 a, float3 b, char3 c);
+float4 __ovld __cnfn select(float4 a, float4 b, char4 c);
+float8 __ovld __cnfn select(float8 a, float8 b, char8 c);
+float16 __ovld __cnfn select(float16 a, float16 b, char16 c);
+char __ovld __cnfn select(char a, char b, short c);
+uchar __ovld __cnfn select(uchar a, uchar b, short c);
+char2 __ovld __cnfn select(char2 a, char2 b, short2 c);
+uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, short2 c);
+char3 __ovld __cnfn select(char3 a, char3 b, short3 c);
+uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, short3 c);
+char4 __ovld __cnfn select(char4 a, char4 b, short4 c);
+uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, short4 c);
+char8 __ovld __cnfn select(char8 a, char8 b, short8 c);
+uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, short8 c);
+char16 __ovld __cnfn select(char16 a, char16 b, short16 c);
+uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, short16 c);
+short __ovld __cnfn select(short a, short b, short c);
+ushort __ovld __cnfn select(ushort a, ushort b, short c);
+short2 __ovld __cnfn select(short2 a, short2 b, short2 c);
+ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, short2 c);
+short3 __ovld __cnfn select(short3 a, short3 b, short3 c);
+ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, short3 c);
+short4 __ovld __cnfn select(short4 a, short4 b, short4 c);
+ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, short4 c);
+short8 __ovld __cnfn select(short8 a, short8 b, short8 c);
+ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, short8 c);
+short16 __ovld __cnfn select(short16 a, short16 b, short16 c);
+ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, short16 c);
+int __ovld __cnfn select(int a, int b, short c);
+uint __ovld __cnfn select(uint a, uint b, short c);
+int2 __ovld __cnfn select(int2 a, int2 b, short2 c);
+uint2 __ovld __cnfn select(uint2 a, uint2 b, short2 c);
+int3 __ovld __cnfn select(int3 a, int3 b, short3 c);
+uint3 __ovld __cnfn select(uint3 a, uint3 b, short3 c);
+int4 __ovld __cnfn select(int4 a, int4 b, short4 c);
+uint4 __ovld __cnfn select(uint4 a, uint4 b, short4 c);
+int8 __ovld __cnfn select(int8 a, int8 b, short8 c);
+uint8 __ovld __cnfn select(uint8 a, uint8 b, short8 c);
+int16 __ovld __cnfn select(int16 a, int16 b, short16 c);
+uint16 __ovld __cnfn select(uint16 a, uint16 b, short16 c);
+long __ovld __cnfn select(long a, long b, short c);
+ulong __ovld __cnfn select(ulong a, ulong b, short c);
+long2 __ovld __cnfn select(long2 a, long2 b, short2 c);
+ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, short2 c);
+long3 __ovld __cnfn select(long3 a, long3 b, short3 c);
+ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, short3 c);
+long4 __ovld __cnfn select(long4 a, long4 b, short4 c);
+ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, short4 c);
+long8 __ovld __cnfn select(long8 a, long8 b, short8 c);
+ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, short8 c);
+long16 __ovld __cnfn select(long16 a, long16 b, short16 c);
+ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, short16 c);
+float __ovld __cnfn select(float a, float b, short c);
+float2 __ovld __cnfn select(float2 a, float2 b, short2 c);
+float3 __ovld __cnfn select(float3 a, float3 b, short3 c);
+float4 __ovld __cnfn select(float4 a, float4 b, short4 c);
+float8 __ovld __cnfn select(float8 a, float8 b, short8 c);
+float16 __ovld __cnfn select(float16 a, float16 b, short16 c);
+char __ovld __cnfn select(char a, char b, int c);
+uchar __ovld __cnfn select(uchar a, uchar b, int c);
+char2 __ovld __cnfn select(char2 a, char2 b, int2 c);
+uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, int2 c);
+char3 __ovld __cnfn select(char3 a, char3 b, int3 c);
+uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, int3 c);
+char4 __ovld __cnfn select(char4 a, char4 b, int4 c);
+uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, int4 c);
+char8 __ovld __cnfn select(char8 a, char8 b, int8 c);
+uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, int8 c);
+char16 __ovld __cnfn select(char16 a, char16 b, int16 c);
+uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, int16 c);
+short __ovld __cnfn select(short a, short b, int c);
+ushort __ovld __cnfn select(ushort a, ushort b, int c);
+short2 __ovld __cnfn select(short2 a, short2 b, int2 c);
+ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, int2 c);
+short3 __ovld __cnfn select(short3 a, short3 b, int3 c);
+ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, int3 c);
+short4 __ovld __cnfn select(short4 a, short4 b, int4 c);
+ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, int4 c);
+short8 __ovld __cnfn select(short8 a, short8 b, int8 c);
+ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, int8 c);
+short16 __ovld __cnfn select(short16 a, short16 b, int16 c);
+ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, int16 c);
+int __ovld __cnfn select(int a, int b, int c);
+uint __ovld __cnfn select(uint a, uint b, int c);
+int2 __ovld __cnfn select(int2 a, int2 b, int2 c);
+uint2 __ovld __cnfn select(uint2 a, uint2 b, int2 c);
+int3 __ovld __cnfn select(int3 a, int3 b, int3 c);
+uint3 __ovld __cnfn select(uint3 a, uint3 b, int3 c);
+int4 __ovld __cnfn select(int4 a, int4 b, int4 c);
+uint4 __ovld __cnfn select(uint4 a, uint4 b, int4 c);
+int8 __ovld __cnfn select(int8 a, int8 b, int8 c);
+uint8 __ovld __cnfn select(uint8 a, uint8 b, int8 c);
+int16 __ovld __cnfn select(int16 a, int16 b, int16 c);
+uint16 __ovld __cnfn select(uint16 a, uint16 b, int16 c);
+long __ovld __cnfn select(long a, long b, int c);
+ulong __ovld __cnfn select(ulong a, ulong b, int c);
+long2 __ovld __cnfn select(long2 a, long2 b, int2 c);
+ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, int2 c);
+long3 __ovld __cnfn select(long3 a, long3 b, int3 c);
+ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, int3 c);
+long4 __ovld __cnfn select(long4 a, long4 b, int4 c);
+ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, int4 c);
+long8 __ovld __cnfn select(long8 a, long8 b, int8 c);
+ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, int8 c);
+long16 __ovld __cnfn select(long16 a, long16 b, int16 c);
+ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, int16 c);
+float __ovld __cnfn select(float a, float b, int c);
+float2 __ovld __cnfn select(float2 a, float2 b, int2 c);
+float3 __ovld __cnfn select(float3 a, float3 b, int3 c);
+float4 __ovld __cnfn select(float4 a, float4 b, int4 c);
+float8 __ovld __cnfn select(float8 a, float8 b, int8 c);
+float16 __ovld __cnfn select(float16 a, float16 b, int16 c);
+char __ovld __cnfn select(char a, char b, long c);
+uchar __ovld __cnfn select(uchar a, uchar b, long c);
+char2 __ovld __cnfn select(char2 a, char2 b, long2 c);
+uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, long2 c);
+char3 __ovld __cnfn select(char3 a, char3 b, long3 c);
+uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, long3 c);
+char4 __ovld __cnfn select(char4 a, char4 b, long4 c);
+uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, long4 c);
+char8 __ovld __cnfn select(char8 a, char8 b, long8 c);
+uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, long8 c);
+char16 __ovld __cnfn select(char16 a, char16 b, long16 c);
+uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, long16 c);
+short __ovld __cnfn select(short a, short b, long c);
+ushort __ovld __cnfn select(ushort a, ushort b, long c);
+short2 __ovld __cnfn select(short2 a, short2 b, long2 c);
+ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, long2 c);
+short3 __ovld __cnfn select(short3 a, short3 b, long3 c);
+ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, long3 c);
+short4 __ovld __cnfn select(short4 a, short4 b, long4 c);
+ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, long4 c);
+short8 __ovld __cnfn select(short8 a, short8 b, long8 c);
+ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, long8 c);
+short16 __ovld __cnfn select(short16 a, short16 b, long16 c);
+ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, long16 c);
+int __ovld __cnfn select(int a, int b, long c);
+uint __ovld __cnfn select(uint a, uint b, long c);
+int2 __ovld __cnfn select(int2 a, int2 b, long2 c);
+uint2 __ovld __cnfn select(uint2 a, uint2 b, long2 c);
+int3 __ovld __cnfn select(int3 a, int3 b, long3 c);
+uint3 __ovld __cnfn select(uint3 a, uint3 b, long3 c);
+int4 __ovld __cnfn select(int4 a, int4 b, long4 c);
+uint4 __ovld __cnfn select(uint4 a, uint4 b, long4 c);
+int8 __ovld __cnfn select(int8 a, int8 b, long8 c);
+uint8 __ovld __cnfn select(uint8 a, uint8 b, long8 c);
+int16 __ovld __cnfn select(int16 a, int16 b, long16 c);
+uint16 __ovld __cnfn select(uint16 a, uint16 b, long16 c);
+long __ovld __cnfn select(long a, long b, long c);
+ulong __ovld __cnfn select(ulong a, ulong b, long c);
+long2 __ovld __cnfn select(long2 a, long2 b, long2 c);
+ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, long2 c);
+long3 __ovld __cnfn select(long3 a, long3 b, long3 c);
+ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, long3 c);
+long4 __ovld __cnfn select(long4 a, long4 b, long4 c);
+ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, long4 c);
+long8 __ovld __cnfn select(long8 a, long8 b, long8 c);
+ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, long8 c);
+long16 __ovld __cnfn select(long16 a, long16 b, long16 c);
+ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, long16 c);
+float __ovld __cnfn select(float a, float b, long c);
+float2 __ovld __cnfn select(float2 a, float2 b, long2 c);
+float3 __ovld __cnfn select(float3 a, float3 b, long3 c);
+float4 __ovld __cnfn select(float4 a, float4 b, long4 c);
+float8 __ovld __cnfn select(float8 a, float8 b, long8 c);
+float16 __ovld __cnfn select(float16 a, float16 b, long16 c);
+char __ovld __cnfn select(char a, char b, uchar c);
+uchar __ovld __cnfn select(uchar a, uchar b, uchar c);
+char2 __ovld __cnfn select(char2 a, char2 b, uchar2 c);
+uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, uchar2 c);
+char3 __ovld __cnfn select(char3 a, char3 b, uchar3 c);
+uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, uchar3 c);
+char4 __ovld __cnfn select(char4 a, char4 b, uchar4 c);
+uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, uchar4 c);
+char8 __ovld __cnfn select(char8 a, char8 b, uchar8 c);
+uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, uchar8 c);
+char16 __ovld __cnfn select(char16 a, char16 b, uchar16 c);
+uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, uchar16 c);
+short __ovld __cnfn select(short a, short b, uchar c);
+ushort __ovld __cnfn select(ushort a, ushort b, uchar c);
+short2 __ovld __cnfn select(short2 a, short2 b, uchar2 c);
+ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, uchar2 c);
+short3 __ovld __cnfn select(short3 a, short3 b, uchar3 c);
+ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, uchar3 c);
+short4 __ovld __cnfn select(short4 a, short4 b, uchar4 c);
+ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, uchar4 c);
+short8 __ovld __cnfn select(short8 a, short8 b, uchar8 c);
+ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, uchar8 c);
+short16 __ovld __cnfn select(short16 a, short16 b, uchar16 c);
+ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, uchar16 c);
+int __ovld __cnfn select(int a, int b, uchar c);
+uint __ovld __cnfn select(uint a, uint b, uchar c);
+int2 __ovld __cnfn select(int2 a, int2 b, uchar2 c);
+uint2 __ovld __cnfn select(uint2 a, uint2 b, uchar2 c);
+int3 __ovld __cnfn select(int3 a, int3 b, uchar3 c);
+uint3 __ovld __cnfn select(uint3 a, uint3 b, uchar3 c);
+int4 __ovld __cnfn select(int4 a, int4 b, uchar4 c);
+uint4 __ovld __cnfn select(uint4 a, uint4 b, uchar4 c);
+int8 __ovld __cnfn select(int8 a, int8 b, uchar8 c);
+uint8 __ovld __cnfn select(uint8 a, uint8 b, uchar8 c);
+int16 __ovld __cnfn select(int16 a, int16 b, uchar16 c);
+uint16 __ovld __cnfn select(uint16 a, uint16 b, uchar16 c);
+long __ovld __cnfn select(long a, long b, uchar c);
+ulong __ovld __cnfn select(ulong a, ulong b, uchar c);
+long2 __ovld __cnfn select(long2 a, long2 b, uchar2 c);
+ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, uchar2 c);
+long3 __ovld __cnfn select(long3 a, long3 b, uchar3 c);
+ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, uchar3 c);
+long4 __ovld __cnfn select(long4 a, long4 b, uchar4 c);
+ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, uchar4 c);
+long8 __ovld __cnfn select(long8 a, long8 b, uchar8 c);
+ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, uchar8 c);
+long16 __ovld __cnfn select(long16 a, long16 b, uchar16 c);
+ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, uchar16 c);
+float __ovld __cnfn select(float a, float b, uchar c);
+float2 __ovld __cnfn select(float2 a, float2 b, uchar2 c);
+float3 __ovld __cnfn select(float3 a, float3 b, uchar3 c);
+float4 __ovld __cnfn select(float4 a, float4 b, uchar4 c);
+float8 __ovld __cnfn select(float8 a, float8 b, uchar8 c);
+float16 __ovld __cnfn select(float16 a, float16 b, uchar16 c);
+char __ovld __cnfn select(char a, char b, ushort c);
+uchar __ovld __cnfn select(uchar a, uchar b, ushort c);
+char2 __ovld __cnfn select(char2 a, char2 b, ushort2 c);
+uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, ushort2 c);
+char3 __ovld __cnfn select(char3 a, char3 b, ushort3 c);
+uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, ushort3 c);
+char4 __ovld __cnfn select(char4 a, char4 b, ushort4 c);
+uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, ushort4 c);
+char8 __ovld __cnfn select(char8 a, char8 b, ushort8 c);
+uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, ushort8 c);
+char16 __ovld __cnfn select(char16 a, char16 b, ushort16 c);
+uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, ushort16 c);
+short __ovld __cnfn select(short a, short b, ushort c);
+ushort __ovld __cnfn select(ushort a, ushort b, ushort c);
+short2 __ovld __cnfn select(short2 a, short2 b, ushort2 c);
+ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, ushort2 c);
+short3 __ovld __cnfn select(short3 a, short3 b, ushort3 c);
+ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, ushort3 c);
+short4 __ovld __cnfn select(short4 a, short4 b, ushort4 c);
+ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, ushort4 c);
+short8 __ovld __cnfn select(short8 a, short8 b, ushort8 c);
+ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, ushort8 c);
+short16 __ovld __cnfn select(short16 a, short16 b, ushort16 c);
+ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, ushort16 c);
+int __ovld __cnfn select(int a, int b, ushort c);
+uint __ovld __cnfn select(uint a, uint b, ushort c);
+int2 __ovld __cnfn select(int2 a, int2 b, ushort2 c);
+uint2 __ovld __cnfn select(uint2 a, uint2 b, ushort2 c);
+int3 __ovld __cnfn select(int3 a, int3 b, ushort3 c);
+uint3 __ovld __cnfn select(uint3 a, uint3 b, ushort3 c);
+int4 __ovld __cnfn select(int4 a, int4 b, ushort4 c);
+uint4 __ovld __cnfn select(uint4 a, uint4 b, ushort4 c);
+int8 __ovld __cnfn select(int8 a, int8 b, ushort8 c);
+uint8 __ovld __cnfn select(uint8 a, uint8 b, ushort8 c);
+int16 __ovld __cnfn select(int16 a, int16 b, ushort16 c);
+uint16 __ovld __cnfn select(uint16 a, uint16 b, ushort16 c);
+long __ovld __cnfn select(long a, long b, ushort c);
+ulong __ovld __cnfn select(ulong a, ulong b, ushort c);
+long2 __ovld __cnfn select(long2 a, long2 b, ushort2 c);
+ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, ushort2 c);
+long3 __ovld __cnfn select(long3 a, long3 b, ushort3 c);
+ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, ushort3 c);
+long4 __ovld __cnfn select(long4 a, long4 b, ushort4 c);
+ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, ushort4 c);
+long8 __ovld __cnfn select(long8 a, long8 b, ushort8 c);
+ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, ushort8 c);
+long16 __ovld __cnfn select(long16 a, long16 b, ushort16 c);
+ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, ushort16 c);
+float __ovld __cnfn select(float a, float b, ushort c);
+float2 __ovld __cnfn select(float2 a, float2 b, ushort2 c);
+float3 __ovld __cnfn select(float3 a, float3 b, ushort3 c);
+float4 __ovld __cnfn select(float4 a, float4 b, ushort4 c);
+float8 __ovld __cnfn select(float8 a, float8 b, ushort8 c);
+float16 __ovld __cnfn select(float16 a, float16 b, ushort16 c);
+char __ovld __cnfn select(char a, char b, uint c);
+uchar __ovld __cnfn select(uchar a, uchar b, uint c);
+char2 __ovld __cnfn select(char2 a, char2 b, uint2 c);
+uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, uint2 c);
+char3 __ovld __cnfn select(char3 a, char3 b, uint3 c);
+uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, uint3 c);
+char4 __ovld __cnfn select(char4 a, char4 b, uint4 c);
+uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, uint4 c);
+char8 __ovld __cnfn select(char8 a, char8 b, uint8 c);
+uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, uint8 c);
+char16 __ovld __cnfn select(char16 a, char16 b, uint16 c);
+uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, uint16 c);
+short __ovld __cnfn select(short a, short b, uint c);
+ushort __ovld __cnfn select(ushort a, ushort b, uint c);
+short2 __ovld __cnfn select(short2 a, short2 b, uint2 c);
+ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, uint2 c);
+short3 __ovld __cnfn select(short3 a, short3 b, uint3 c);
+ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, uint3 c);
+short4 __ovld __cnfn select(short4 a, short4 b, uint4 c);
+ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, uint4 c);
+short8 __ovld __cnfn select(short8 a, short8 b, uint8 c);
+ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, uint8 c);
+short16 __ovld __cnfn select(short16 a, short16 b, uint16 c);
+ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, uint16 c);
+int __ovld __cnfn select(int a, int b, uint c);
+uint __ovld __cnfn select(uint a, uint b, uint c);
+int2 __ovld __cnfn select(int2 a, int2 b, uint2 c);
+uint2 __ovld __cnfn select(uint2 a, uint2 b, uint2 c);
+int3 __ovld __cnfn select(int3 a, int3 b, uint3 c);
+uint3 __ovld __cnfn select(uint3 a, uint3 b, uint3 c);
+int4 __ovld __cnfn select(int4 a, int4 b, uint4 c);
+uint4 __ovld __cnfn select(uint4 a, uint4 b, uint4 c);
+int8 __ovld __cnfn select(int8 a, int8 b, uint8 c);
+uint8 __ovld __cnfn select(uint8 a, uint8 b, uint8 c);
+int16 __ovld __cnfn select(int16 a, int16 b, uint16 c);
+uint16 __ovld __cnfn select(uint16 a, uint16 b, uint16 c);
+long __ovld __cnfn select(long a, long b, uint c);
+ulong __ovld __cnfn select(ulong a, ulong b, uint c);
+long2 __ovld __cnfn select(long2 a, long2 b, uint2 c);
+ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, uint2 c);
+long3 __ovld __cnfn select(long3 a, long3 b, uint3 c);
+ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, uint3 c);
+long4 __ovld __cnfn select(long4 a, long4 b, uint4 c);
+ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, uint4 c);
+long8 __ovld __cnfn select(long8 a, long8 b, uint8 c);
+ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, uint8 c);
+long16 __ovld __cnfn select(long16 a, long16 b, uint16 c);
+ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, uint16 c);
+float __ovld __cnfn select(float a, float b, uint c);
+float2 __ovld __cnfn select(float2 a, float2 b, uint2 c);
+float3 __ovld __cnfn select(float3 a, float3 b, uint3 c);
+float4 __ovld __cnfn select(float4 a, float4 b, uint4 c);
+float8 __ovld __cnfn select(float8 a, float8 b, uint8 c);
+float16 __ovld __cnfn select(float16 a, float16 b, uint16 c);
+char __ovld __cnfn select(char a, char b, ulong c);
+uchar __ovld __cnfn select(uchar a, uchar b, ulong c);
+char2 __ovld __cnfn select(char2 a, char2 b, ulong2 c);
+uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, ulong2 c);
+char3 __ovld __cnfn select(char3 a, char3 b, ulong3 c);
+uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, ulong3 c);
+char4 __ovld __cnfn select(char4 a, char4 b, ulong4 c);
+uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, ulong4 c);
+char8 __ovld __cnfn select(char8 a, char8 b, ulong8 c);
+uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, ulong8 c);
+char16 __ovld __cnfn select(char16 a, char16 b, ulong16 c);
+uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, ulong16 c);
+short __ovld __cnfn select(short a, short b, ulong c);
+ushort __ovld __cnfn select(ushort a, ushort b, ulong c);
+short2 __ovld __cnfn select(short2 a, short2 b, ulong2 c);
+ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, ulong2 c);
+short3 __ovld __cnfn select(short3 a, short3 b, ulong3 c);
+ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, ulong3 c);
+short4 __ovld __cnfn select(short4 a, short4 b, ulong4 c);
+ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, ulong4 c);
+short8 __ovld __cnfn select(short8 a, short8 b, ulong8 c);
+ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, ulong8 c);
+short16 __ovld __cnfn select(short16 a, short16 b, ulong16 c);
+ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, ulong16 c);
+int __ovld __cnfn select(int a, int b, ulong c);
+uint __ovld __cnfn select(uint a, uint b, ulong c);
+int2 __ovld __cnfn select(int2 a, int2 b, ulong2 c);
+uint2 __ovld __cnfn select(uint2 a, uint2 b, ulong2 c);
+int3 __ovld __cnfn select(int3 a, int3 b, ulong3 c);
+uint3 __ovld __cnfn select(uint3 a, uint3 b, ulong3 c);
+int4 __ovld __cnfn select(int4 a, int4 b, ulong4 c);
+uint4 __ovld __cnfn select(uint4 a, uint4 b, ulong4 c);
+int8 __ovld __cnfn select(int8 a, int8 b, ulong8 c);
+uint8 __ovld __cnfn select(uint8 a, uint8 b, ulong8 c);
+int16 __ovld __cnfn select(int16 a, int16 b, ulong16 c);
+uint16 __ovld __cnfn select(uint16 a, uint16 b, ulong16 c);
+long __ovld __cnfn select(long a, long b, ulong c);
+ulong __ovld __cnfn select(ulong a, ulong b, ulong c);
+long2 __ovld __cnfn select(long2 a, long2 b, ulong2 c);
+ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, ulong2 c);
+long3 __ovld __cnfn select(long3 a, long3 b, ulong3 c);
+ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, ulong3 c);
+long4 __ovld __cnfn select(long4 a, long4 b, ulong4 c);
+ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, ulong4 c);
+long8 __ovld __cnfn select(long8 a, long8 b, ulong8 c);
+ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, ulong8 c);
+long16 __ovld __cnfn select(long16 a, long16 b, ulong16 c);
+ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, ulong16 c);
+float __ovld __cnfn select(float a, float b, ulong c);
+float2 __ovld __cnfn select(float2 a, float2 b, ulong2 c);
+float3 __ovld __cnfn select(float3 a, float3 b, ulong3 c);
+float4 __ovld __cnfn select(float4 a, float4 b, ulong4 c);
+float8 __ovld __cnfn select(float8 a, float8 b, ulong8 c);
+float16 __ovld __cnfn select(float16 a, float16 b, ulong16 c);
+#ifdef cl_khr_fp64
+double __ovld __cnfn select(double a, double b, long c);
+double2 __ovld __cnfn select(double2 a, double2 b, long2 c);
+double3 __ovld __cnfn select(double3 a, double3 b, long3 c);
+double4 __ovld __cnfn select(double4 a, double4 b, long4 c);
+double8 __ovld __cnfn select(double8 a, double8 b, long8 c);
+double16 __ovld __cnfn select(double16 a, double16 b, long16 c);
+double __ovld __cnfn select(double a, double b, ulong c);
+double2 __ovld __cnfn select(double2 a, double2 b, ulong2 c);
+double3 __ovld __cnfn select(double3 a, double3 b, ulong3 c);
+double4 __ovld __cnfn select(double4 a, double4 b, ulong4 c);
+double8 __ovld __cnfn select(double8 a, double8 b, ulong8 c);
+double16 __ovld __cnfn select(double16 a, double16 b, ulong16 c);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn select(half a, half b, short c);
+half2 __ovld __cnfn select(half2 a, half2 b, short2 c);
+half3 __ovld __cnfn select(half3 a, half3 b, short3 c);
+half4 __ovld __cnfn select(half4 a, half4 b, short4 c);
+half8 __ovld __cnfn select(half8 a, half8 b, short8 c);
+half16 __ovld __cnfn select(half16 a, half16 b, short16 c);
+half __ovld __cnfn select(half a, half b, ushort c);
+half2 __ovld __cnfn select(half2 a, half2 b, ushort2 c);
+half3 __ovld __cnfn select(half3 a, half3 b, ushort3 c);
+half4 __ovld __cnfn select(half4 a, half4 b, ushort4 c);
+half8 __ovld __cnfn select(half8 a, half8 b, ushort8 c);
+half16 __ovld __cnfn select(half16 a, half16 b, ushort16 c);
+#endif //cl_khr_fp16
+
+// OpenCL v1.1 s6.11.7, v1.2 s6.12.7, v2.0 s6.13.7 - Vector Data Load and Store Functions
+// OpenCL extensions v1.1 s9.6.6, v1.2 s9.5.6, v2.0 s9.4.6 - Vector Data Load and Store Functions for Half Type
+/**
+ * Use generic type gentype to indicate the built-in data types
+ * char, uchar, short, ushort, int, uint, long, ulong, float,
+ * double or half.
+ *
+ * vloadn return sizeof (gentypen) bytes of data read from address (p + (offset * n)).
+ *
+ * vstoren write sizeof (gentypen) bytes given by data to address (p + (offset * n)).
+ *
+ * The address computed as (p + (offset * n)) must be 
+ * 8-bit aligned if gentype is char, uchar;
+ * 16-bit aligned if gentype is short, ushort, half;
+ * 32-bit aligned if gentype is int, uint, float;
+ * 64-bit aligned if gentype is long, ulong, double.
+ */
+
+char2 __ovld vload2(size_t offset, const __constant char *p);
+uchar2 __ovld vload2(size_t offset, const __constant uchar *p);
+short2 __ovld vload2(size_t offset, const __constant short *p);
+ushort2 __ovld vload2(size_t offset, const __constant ushort *p);
+int2 __ovld vload2(size_t offset, const __constant int *p);
+uint2 __ovld vload2(size_t offset, const __constant uint *p);
+long2 __ovld vload2(size_t offset, const __constant long *p);
+ulong2 __ovld vload2(size_t offset, const __constant ulong *p);
+float2 __ovld vload2(size_t offset, const __constant float *p);
+char3 __ovld vload3(size_t offset, const __constant char *p);
+uchar3 __ovld vload3(size_t offset, const __constant uchar *p);
+short3 __ovld vload3(size_t offset, const __constant short *p);
+ushort3 __ovld vload3(size_t offset, const __constant ushort *p);
+int3 __ovld vload3(size_t offset, const __constant int *p);
+uint3 __ovld vload3(size_t offset, const __constant uint *p);
+long3 __ovld vload3(size_t offset, const __constant long *p);
+ulong3 __ovld vload3(size_t offset, const __constant ulong *p);
+float3 __ovld vload3(size_t offset, const __constant float *p);
+char4 __ovld vload4(size_t offset, const __constant char *p);
+uchar4 __ovld vload4(size_t offset, const __constant uchar *p);
+short4 __ovld vload4(size_t offset, const __constant short *p);
+ushort4 __ovld vload4(size_t offset, const __constant ushort *p);
+int4 __ovld vload4(size_t offset, const __constant int *p);
+uint4 __ovld vload4(size_t offset, const __constant uint *p);
+long4 __ovld vload4(size_t offset, const __constant long *p);
+ulong4 __ovld vload4(size_t offset, const __constant ulong *p);
+float4 __ovld vload4(size_t offset, const __constant float *p);
+char8 __ovld vload8(size_t offset, const __constant char *p);
+uchar8 __ovld vload8(size_t offset, const __constant uchar *p);
+short8 __ovld vload8(size_t offset, const __constant short *p);
+ushort8 __ovld vload8(size_t offset, const __constant ushort *p);
+int8 __ovld vload8(size_t offset, const __constant int *p);
+uint8 __ovld vload8(size_t offset, const __constant uint *p);
+long8 __ovld vload8(size_t offset, const __constant long *p);
+ulong8 __ovld vload8(size_t offset, const __constant ulong *p);
+float8 __ovld vload8(size_t offset, const __constant float *p);
+char16 __ovld vload16(size_t offset, const __constant char *p);
+uchar16 __ovld vload16(size_t offset, const __constant uchar *p);
+short16 __ovld vload16(size_t offset, const __constant short *p);
+ushort16 __ovld vload16(size_t offset, const __constant ushort *p);
+int16 __ovld vload16(size_t offset, const __constant int *p);
+uint16 __ovld vload16(size_t offset, const __constant uint *p);
+long16 __ovld vload16(size_t offset, const __constant long *p);
+ulong16 __ovld vload16(size_t offset, const __constant ulong *p);
+float16 __ovld vload16(size_t offset, const __constant float *p);
+#ifdef cl_khr_fp64
+double2 __ovld vload2(size_t offset, const __constant double *p);
+double3 __ovld vload3(size_t offset, const __constant double *p);
+double4 __ovld vload4(size_t offset, const __constant double *p);
+double8 __ovld vload8(size_t offset, const __constant double *p);
+double16 __ovld vload16(size_t offset, const __constant double *p);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half __ovld vload(size_t offset, const __constant half *p);
+half2 __ovld vload2(size_t offset, const __constant half *p);
+half3 __ovld vload3(size_t offset, const __constant half *p);
+half4 __ovld vload4(size_t offset, const __constant half *p);
+half8 __ovld vload8(size_t offset, const __constant half *p);
+half16 __ovld vload16(size_t offset, const __constant half *p);
+#endif //cl_khr_fp16
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+char2 __ovld vload2(size_t offset, const char *p);
+uchar2 __ovld vload2(size_t offset, const uchar *p);
+short2 __ovld vload2(size_t offset, const short *p);
+ushort2 __ovld vload2(size_t offset, const ushort *p);
+int2 __ovld vload2(size_t offset, const int *p);
+uint2 __ovld vload2(size_t offset, const uint *p);
+long2 __ovld vload2(size_t offset, const long *p);
+ulong2 __ovld vload2(size_t offset, const ulong *p);
+float2 __ovld vload2(size_t offset, const float *p);
+char3 __ovld vload3(size_t offset, const char *p);
+uchar3 __ovld vload3(size_t offset, const uchar *p);
+short3 __ovld vload3(size_t offset, const short *p);
+ushort3 __ovld vload3(size_t offset, const ushort *p);
+int3 __ovld vload3(size_t offset, const int *p);
+uint3 __ovld vload3(size_t offset, const uint *p);
+long3 __ovld vload3(size_t offset, const long *p);
+ulong3 __ovld vload3(size_t offset, const ulong *p);
+float3 __ovld vload3(size_t offset, const float *p);
+char4 __ovld vload4(size_t offset, const char *p);
+uchar4 __ovld vload4(size_t offset, const uchar *p);
+short4 __ovld vload4(size_t offset, const short *p);
+ushort4 __ovld vload4(size_t offset, const ushort *p);
+int4 __ovld vload4(size_t offset, const int *p);
+uint4 __ovld vload4(size_t offset, const uint *p);
+long4 __ovld vload4(size_t offset, const long *p);
+ulong4 __ovld vload4(size_t offset, const ulong *p);
+float4 __ovld vload4(size_t offset, const float *p);
+char8 __ovld vload8(size_t offset, const char *p);
+uchar8 __ovld vload8(size_t offset, const uchar *p);
+short8 __ovld vload8(size_t offset, const short *p);
+ushort8 __ovld vload8(size_t offset, const ushort *p);
+int8 __ovld vload8(size_t offset, const int *p);
+uint8 __ovld vload8(size_t offset, const uint *p);
+long8 __ovld vload8(size_t offset, const long *p);
+ulong8 __ovld vload8(size_t offset, const ulong *p);
+float8 __ovld vload8(size_t offset, const float *p);
+char16 __ovld vload16(size_t offset, const char *p);
+uchar16 __ovld vload16(size_t offset, const uchar *p);
+short16 __ovld vload16(size_t offset, const short *p);
+ushort16 __ovld vload16(size_t offset, const ushort *p);
+int16 __ovld vload16(size_t offset, const int *p);
+uint16 __ovld vload16(size_t offset, const uint *p);
+long16 __ovld vload16(size_t offset, const long *p);
+ulong16 __ovld vload16(size_t offset, const ulong *p);
+float16 __ovld vload16(size_t offset, const float *p);
+
+#ifdef cl_khr_fp64
+double2 __ovld vload2(size_t offset, const double *p);
+double3 __ovld vload3(size_t offset, const double *p);
+double4 __ovld vload4(size_t offset, const double *p);
+double8 __ovld vload8(size_t offset, const double *p);
+double16 __ovld vload16(size_t offset, const double *p);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half __ovld vload(size_t offset, const half *p);
+half2 __ovld vload2(size_t offset, const half *p);
+half3 __ovld vload3(size_t offset, const half *p);
+half4 __ovld vload4(size_t offset, const half *p);
+half8 __ovld vload8(size_t offset, const half *p);
+half16 __ovld vload16(size_t offset, const half *p);
+#endif //cl_khr_fp16
+#else
+char2 __ovld vload2(size_t offset, const __global char *p);
+uchar2 __ovld vload2(size_t offset, const __global uchar *p);
+short2 __ovld vload2(size_t offset, const __global short *p);
+ushort2 __ovld vload2(size_t offset, const __global ushort *p);
+int2 __ovld vload2(size_t offset, const __global int *p);
+uint2 __ovld vload2(size_t offset, const __global uint *p);
+long2 __ovld vload2(size_t offset, const __global long *p);
+ulong2 __ovld vload2(size_t offset, const __global ulong *p);
+float2 __ovld vload2(size_t offset, const __global float *p);
+char3 __ovld vload3(size_t offset, const __global char *p);
+uchar3 __ovld vload3(size_t offset, const __global uchar *p);
+short3 __ovld vload3(size_t offset, const __global short *p);
+ushort3 __ovld vload3(size_t offset, const __global ushort *p);
+int3 __ovld vload3(size_t offset, const __global int *p);
+uint3 __ovld vload3(size_t offset, const __global uint *p);
+long3 __ovld vload3(size_t offset, const __global long *p);
+ulong3 __ovld vload3(size_t offset, const __global ulong *p);
+float3 __ovld vload3(size_t offset, const __global float *p);
+char4 __ovld vload4(size_t offset, const __global char *p);
+uchar4 __ovld vload4(size_t offset, const __global uchar *p);
+short4 __ovld vload4(size_t offset, const __global short *p);
+ushort4 __ovld vload4(size_t offset, const __global ushort *p);
+int4 __ovld vload4(size_t offset, const __global int *p);
+uint4 __ovld vload4(size_t offset, const __global uint *p);
+long4 __ovld vload4(size_t offset, const __global long *p);
+ulong4 __ovld vload4(size_t offset, const __global ulong *p);
+float4 __ovld vload4(size_t offset, const __global float *p);
+char8 __ovld vload8(size_t offset, const __global char *p);
+uchar8 __ovld vload8(size_t offset, const __global uchar *p);
+short8 __ovld vload8(size_t offset, const __global short *p);
+ushort8 __ovld vload8(size_t offset, const __global ushort *p);
+int8 __ovld vload8(size_t offset, const __global int *p);
+uint8 __ovld vload8(size_t offset, const __global uint *p);
+long8 __ovld vload8(size_t offset, const __global long *p);
+ulong8 __ovld vload8(size_t offset, const __global ulong *p);
+float8 __ovld vload8(size_t offset, const __global float *p);
+char16 __ovld vload16(size_t offset, const __global char *p);
+uchar16 __ovld vload16(size_t offset, const __global uchar *p);
+short16 __ovld vload16(size_t offset, const __global short *p);
+ushort16 __ovld vload16(size_t offset, const __global ushort *p);
+int16 __ovld vload16(size_t offset, const __global int *p);
+uint16 __ovld vload16(size_t offset, const __global uint *p);
+long16 __ovld vload16(size_t offset, const __global long *p);
+ulong16 __ovld vload16(size_t offset, const __global ulong *p);
+float16 __ovld vload16(size_t offset, const __global float *p);
+char2 __ovld vload2(size_t offset, const __local char *p);
+uchar2 __ovld vload2(size_t offset, const __local uchar *p);
+short2 __ovld vload2(size_t offset, const __local short *p);
+ushort2 __ovld vload2(size_t offset, const __local ushort *p);
+int2 __ovld vload2(size_t offset, const __local int *p);
+uint2 __ovld vload2(size_t offset, const __local uint *p);
+long2 __ovld vload2(size_t offset, const __local long *p);
+ulong2 __ovld vload2(size_t offset, const __local ulong *p);
+float2 __ovld vload2(size_t offset, const __local float *p);
+char3 __ovld vload3(size_t offset, const __local char *p);
+uchar3 __ovld vload3(size_t offset, const __local uchar *p);
+short3 __ovld vload3(size_t offset, const __local short *p);
+ushort3 __ovld vload3(size_t offset, const __local ushort *p);
+int3 __ovld vload3(size_t offset, const __local int *p);
+uint3 __ovld vload3(size_t offset, const __local uint *p);
+long3 __ovld vload3(size_t offset, const __local long *p);
+ulong3 __ovld vload3(size_t offset, const __local ulong *p);
+float3 __ovld vload3(size_t offset, const __local float *p);
+char4 __ovld vload4(size_t offset, const __local char *p);
+uchar4 __ovld vload4(size_t offset, const __local uchar *p);
+short4 __ovld vload4(size_t offset, const __local short *p);
+ushort4 __ovld vload4(size_t offset, const __local ushort *p);
+int4 __ovld vload4(size_t offset, const __local int *p);
+uint4 __ovld vload4(size_t offset, const __local uint *p);
+long4 __ovld vload4(size_t offset, const __local long *p);
+ulong4 __ovld vload4(size_t offset, const __local ulong *p);
+float4 __ovld vload4(size_t offset, const __local float *p);
+char8 __ovld vload8(size_t offset, const __local char *p);
+uchar8 __ovld vload8(size_t offset, const __local uchar *p);
+short8 __ovld vload8(size_t offset, const __local short *p);
+ushort8 __ovld vload8(size_t offset, const __local ushort *p);
+int8 __ovld vload8(size_t offset, const __local int *p);
+uint8 __ovld vload8(size_t offset, const __local uint *p);
+long8 __ovld vload8(size_t offset, const __local long *p);
+ulong8 __ovld vload8(size_t offset, const __local ulong *p);
+float8 __ovld vload8(size_t offset, const __local float *p);
+char16 __ovld vload16(size_t offset, const __local char *p);
+uchar16 __ovld vload16(size_t offset, const __local uchar *p);
+short16 __ovld vload16(size_t offset, const __local short *p);
+ushort16 __ovld vload16(size_t offset, const __local ushort *p);
+int16 __ovld vload16(size_t offset, const __local int *p);
+uint16 __ovld vload16(size_t offset, const __local uint *p);
+long16 __ovld vload16(size_t offset, const __local long *p);
+ulong16 __ovld vload16(size_t offset, const __local ulong *p);
+float16 __ovld vload16(size_t offset, const __local float *p);
+char2 __ovld vload2(size_t offset, const __private char *p);
+uchar2 __ovld vload2(size_t offset, const __private uchar *p);
+short2 __ovld vload2(size_t offset, const __private short *p);
+ushort2 __ovld vload2(size_t offset, const __private ushort *p);
+int2 __ovld vload2(size_t offset, const __private int *p);
+uint2 __ovld vload2(size_t offset, const __private uint *p);
+long2 __ovld vload2(size_t offset, const __private long *p);
+ulong2 __ovld vload2(size_t offset, const __private ulong *p);
+float2 __ovld vload2(size_t offset, const __private float *p);
+char3 __ovld vload3(size_t offset, const __private char *p);
+uchar3 __ovld vload3(size_t offset, const __private uchar *p);
+short3 __ovld vload3(size_t offset, const __private short *p);
+ushort3 __ovld vload3(size_t offset, const __private ushort *p);
+int3 __ovld vload3(size_t offset, const __private int *p);
+uint3 __ovld vload3(size_t offset, const __private uint *p);
+long3 __ovld vload3(size_t offset, const __private long *p);
+ulong3 __ovld vload3(size_t offset, const __private ulong *p);
+float3 __ovld vload3(size_t offset, const __private float *p);
+char4 __ovld vload4(size_t offset, const __private char *p);
+uchar4 __ovld vload4(size_t offset, const __private uchar *p);
+short4 __ovld vload4(size_t offset, const __private short *p);
+ushort4 __ovld vload4(size_t offset, const __private ushort *p);
+int4 __ovld vload4(size_t offset, const __private int *p);
+uint4 __ovld vload4(size_t offset, const __private uint *p);
+long4 __ovld vload4(size_t offset, const __private long *p);
+ulong4 __ovld vload4(size_t offset, const __private ulong *p);
+float4 __ovld vload4(size_t offset, const __private float *p);
+char8 __ovld vload8(size_t offset, const __private char *p);
+uchar8 __ovld vload8(size_t offset, const __private uchar *p);
+short8 __ovld vload8(size_t offset, const __private short *p);
+ushort8 __ovld vload8(size_t offset, const __private ushort *p);
+int8 __ovld vload8(size_t offset, const __private int *p);
+uint8 __ovld vload8(size_t offset, const __private uint *p);
+long8 __ovld vload8(size_t offset, const __private long *p);
+ulong8 __ovld vload8(size_t offset, const __private ulong *p);
+float8 __ovld vload8(size_t offset, const __private float *p);
+char16 __ovld vload16(size_t offset, const __private char *p);
+uchar16 __ovld vload16(size_t offset, const __private uchar *p);
+short16 __ovld vload16(size_t offset, const __private short *p);
+ushort16 __ovld vload16(size_t offset, const __private ushort *p);
+int16 __ovld vload16(size_t offset, const __private int *p);
+uint16 __ovld vload16(size_t offset, const __private uint *p);
+long16 __ovld vload16(size_t offset, const __private long *p);
+ulong16 __ovld vload16(size_t offset, const __private ulong *p);
+float16 __ovld vload16(size_t offset, const __private float *p);
+
+#ifdef cl_khr_fp64
+double2 __ovld vload2(size_t offset, const __global double *p);
+double3 __ovld vload3(size_t offset, const __global double *p);
+double4 __ovld vload4(size_t offset, const __global double *p);
+double8 __ovld vload8(size_t offset, const __global double *p);
+double16 __ovld vload16(size_t offset, const __global double *p);
+double2 __ovld vload2(size_t offset, const __local double *p);
+double3 __ovld vload3(size_t offset, const __local double *p);
+double4 __ovld vload4(size_t offset, const __local double *p);
+double8 __ovld vload8(size_t offset, const __local double *p);
+double16 __ovld vload16(size_t offset, const __local double *p);
+double2 __ovld vload2(size_t offset, const __private double *p);
+double3 __ovld vload3(size_t offset, const __private double *p);
+double4 __ovld vload4(size_t offset, const __private double *p);
+double8 __ovld vload8(size_t offset, const __private double *p);
+double16 __ovld vload16(size_t offset, const __private double *p);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half __ovld vload(size_t offset, const __global half *p);
+half2 __ovld vload2(size_t offset, const __global half *p);
+half3 __ovld vload3(size_t offset, const __global half *p);
+half4 __ovld vload4(size_t offset, const __global half *p);
+half8 __ovld vload8(size_t offset, const __global half *p);
+half16 __ovld vload16(size_t offset, const __global half *p);
+half __ovld vload(size_t offset, const __local half *p);
+half2 __ovld vload2(size_t offset, const __local half *p);
+half3 __ovld vload3(size_t offset, const __local half *p);
+half4 __ovld vload4(size_t offset, const __local half *p);
+half8 __ovld vload8(size_t offset, const __local half *p);
+half16 __ovld vload16(size_t offset, const __local half *p);
+half __ovld vload(size_t offset, const __private half *p);
+half2 __ovld vload2(size_t offset, const __private half *p);
+half3 __ovld vload3(size_t offset, const __private half *p);
+half4 __ovld vload4(size_t offset, const __private half *p);
+half8 __ovld vload8(size_t offset, const __private half *p);
+half16 __ovld vload16(size_t offset, const __private half *p);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+void __ovld vstore2(char2 data, size_t offset, char *p);
+void __ovld vstore2(uchar2 data, size_t offset, uchar *p);
+void __ovld vstore2(short2 data, size_t offset, short *p);
+void __ovld vstore2(ushort2 data, size_t offset, ushort *p);
+void __ovld vstore2(int2 data, size_t offset, int *p);
+void __ovld vstore2(uint2 data, size_t offset, uint *p);
+void __ovld vstore2(long2 data, size_t offset, long *p);
+void __ovld vstore2(ulong2 data, size_t offset, ulong *p);
+void __ovld vstore2(float2 data, size_t offset, float *p);
+void __ovld vstore3(char3 data, size_t offset, char *p);
+void __ovld vstore3(uchar3 data, size_t offset, uchar *p);
+void __ovld vstore3(short3 data, size_t offset, short *p);
+void __ovld vstore3(ushort3 data, size_t offset, ushort *p);
+void __ovld vstore3(int3 data, size_t offset, int *p);
+void __ovld vstore3(uint3 data, size_t offset, uint *p);
+void __ovld vstore3(long3 data, size_t offset, long *p);
+void __ovld vstore3(ulong3 data, size_t offset, ulong *p);
+void __ovld vstore3(float3 data, size_t offset, float *p);
+void __ovld vstore4(char4 data, size_t offset, char *p);
+void __ovld vstore4(uchar4 data, size_t offset, uchar *p);
+void __ovld vstore4(short4 data, size_t offset, short *p);
+void __ovld vstore4(ushort4 data, size_t offset, ushort *p);
+void __ovld vstore4(int4 data, size_t offset, int *p);
+void __ovld vstore4(uint4 data, size_t offset, uint *p);
+void __ovld vstore4(long4 data, size_t offset, long *p);
+void __ovld vstore4(ulong4 data, size_t offset, ulong *p);
+void __ovld vstore4(float4 data, size_t offset, float *p);
+void __ovld vstore8(char8 data, size_t offset, char *p);
+void __ovld vstore8(uchar8 data, size_t offset, uchar *p);
+void __ovld vstore8(short8 data, size_t offset, short *p);
+void __ovld vstore8(ushort8 data, size_t offset, ushort *p);
+void __ovld vstore8(int8 data, size_t offset, int *p);
+void __ovld vstore8(uint8 data, size_t offset, uint *p);
+void __ovld vstore8(long8 data, size_t offset, long *p);
+void __ovld vstore8(ulong8 data, size_t offset, ulong *p);
+void __ovld vstore8(float8 data, size_t offset, float *p);
+void __ovld vstore16(char16 data, size_t offset, char *p);
+void __ovld vstore16(uchar16 data, size_t offset, uchar *p);
+void __ovld vstore16(short16 data, size_t offset, short *p);
+void __ovld vstore16(ushort16 data, size_t offset, ushort *p);
+void __ovld vstore16(int16 data, size_t offset, int *p);
+void __ovld vstore16(uint16 data, size_t offset, uint *p);
+void __ovld vstore16(long16 data, size_t offset, long *p);
+void __ovld vstore16(ulong16 data, size_t offset, ulong *p);
+void __ovld vstore16(float16 data, size_t offset, float *p);
+#ifdef cl_khr_fp64
+void __ovld vstore2(double2 data, size_t offset, double *p);
+void __ovld vstore3(double3 data, size_t offset, double *p);
+void __ovld vstore4(double4 data, size_t offset, double *p);
+void __ovld vstore8(double8 data, size_t offset, double *p);
+void __ovld vstore16(double16 data, size_t offset, double *p);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+void __ovld vstore(half data, size_t offset, half *p);
+void __ovld vstore2(half2 data, size_t offset, half *p);
+void __ovld vstore3(half3 data, size_t offset, half *p);
+void __ovld vstore4(half4 data, size_t offset, half *p);
+void __ovld vstore8(half8 data, size_t offset, half *p);
+void __ovld vstore16(half16 data, size_t offset, half *p);
+#endif //cl_khr_fp16
+#else
+void __ovld vstore2(char2 data, size_t offset, __global char *p);
+void __ovld vstore2(uchar2 data, size_t offset, __global uchar *p);
+void __ovld vstore2(short2 data, size_t offset, __global short *p);
+void __ovld vstore2(ushort2 data, size_t offset, __global ushort *p);
+void __ovld vstore2(int2 data, size_t offset, __global int *p);
+void __ovld vstore2(uint2 data, size_t offset, __global uint *p);
+void __ovld vstore2(long2 data, size_t offset, __global long *p);
+void __ovld vstore2(ulong2 data, size_t offset, __global ulong *p);
+void __ovld vstore2(float2 data, size_t offset, __global float *p);
+void __ovld vstore3(char3 data, size_t offset, __global char *p);
+void __ovld vstore3(uchar3 data, size_t offset, __global uchar *p);
+void __ovld vstore3(short3 data, size_t offset, __global short *p);
+void __ovld vstore3(ushort3 data, size_t offset, __global ushort *p);
+void __ovld vstore3(int3 data, size_t offset, __global int *p);
+void __ovld vstore3(uint3 data, size_t offset, __global uint *p);
+void __ovld vstore3(long3 data, size_t offset, __global long *p);
+void __ovld vstore3(ulong3 data, size_t offset, __global ulong *p);
+void __ovld vstore3(float3 data, size_t offset, __global float *p);
+void __ovld vstore4(char4 data, size_t offset, __global char *p);
+void __ovld vstore4(uchar4 data, size_t offset, __global uchar *p);
+void __ovld vstore4(short4 data, size_t offset, __global short *p);
+void __ovld vstore4(ushort4 data, size_t offset, __global ushort *p);
+void __ovld vstore4(int4 data, size_t offset, __global int *p);
+void __ovld vstore4(uint4 data, size_t offset, __global uint *p);
+void __ovld vstore4(long4 data, size_t offset, __global long *p);
+void __ovld vstore4(ulong4 data, size_t offset, __global ulong *p);
+void __ovld vstore4(float4 data, size_t offset, __global float *p);
+void __ovld vstore8(char8 data, size_t offset, __global char *p);
+void __ovld vstore8(uchar8 data, size_t offset, __global uchar *p);
+void __ovld vstore8(short8 data, size_t offset, __global short *p);
+void __ovld vstore8(ushort8 data, size_t offset, __global ushort *p);
+void __ovld vstore8(int8 data, size_t offset, __global int *p);
+void __ovld vstore8(uint8 data, size_t offset, __global uint *p);
+void __ovld vstore8(long8 data, size_t offset, __global long *p);
+void __ovld vstore8(ulong8 data, size_t offset, __global ulong *p);
+void __ovld vstore8(float8 data, size_t offset, __global float *p);
+void __ovld vstore16(char16 data, size_t offset, __global char *p);
+void __ovld vstore16(uchar16 data, size_t offset, __global uchar *p);
+void __ovld vstore16(short16 data, size_t offset, __global short *p);
+void __ovld vstore16(ushort16 data, size_t offset, __global ushort *p);
+void __ovld vstore16(int16 data, size_t offset, __global int *p);
+void __ovld vstore16(uint16 data, size_t offset, __global uint *p);
+void __ovld vstore16(long16 data, size_t offset, __global long *p);
+void __ovld vstore16(ulong16 data, size_t offset, __global ulong *p);
+void __ovld vstore16(float16 data, size_t offset, __global float *p);
+void __ovld vstore2(char2 data, size_t offset, __local char *p);
+void __ovld vstore2(uchar2 data, size_t offset, __local uchar *p);
+void __ovld vstore2(short2 data, size_t offset, __local short *p);
+void __ovld vstore2(ushort2 data, size_t offset, __local ushort *p);
+void __ovld vstore2(int2 data, size_t offset, __local int *p);
+void __ovld vstore2(uint2 data, size_t offset, __local uint *p);
+void __ovld vstore2(long2 data, size_t offset, __local long *p);
+void __ovld vstore2(ulong2 data, size_t offset, __local ulong *p);
+void __ovld vstore2(float2 data, size_t offset, __local float *p);
+void __ovld vstore3(char3 data, size_t offset, __local char *p);
+void __ovld vstore3(uchar3 data, size_t offset, __local uchar *p);
+void __ovld vstore3(short3 data, size_t offset, __local short *p);
+void __ovld vstore3(ushort3 data, size_t offset, __local ushort *p);
+void __ovld vstore3(int3 data, size_t offset, __local int *p);
+void __ovld vstore3(uint3 data, size_t offset, __local uint *p);
+void __ovld vstore3(long3 data, size_t offset, __local long *p);
+void __ovld vstore3(ulong3 data, size_t offset, __local ulong *p);
+void __ovld vstore3(float3 data, size_t offset, __local float *p);
+void __ovld vstore4(char4 data, size_t offset, __local char *p);
+void __ovld vstore4(uchar4 data, size_t offset, __local uchar *p);
+void __ovld vstore4(short4 data, size_t offset, __local short *p);
+void __ovld vstore4(ushort4 data, size_t offset, __local ushort *p);
+void __ovld vstore4(int4 data, size_t offset, __local int *p);
+void __ovld vstore4(uint4 data, size_t offset, __local uint *p);
+void __ovld vstore4(long4 data, size_t offset, __local long *p);
+void __ovld vstore4(ulong4 data, size_t offset, __local ulong *p);
+void __ovld vstore4(float4 data, size_t offset, __local float *p);
+void __ovld vstore8(char8 data, size_t offset, __local char *p);
+void __ovld vstore8(uchar8 data, size_t offset, __local uchar *p);
+void __ovld vstore8(short8 data, size_t offset, __local short *p);
+void __ovld vstore8(ushort8 data, size_t offset, __local ushort *p);
+void __ovld vstore8(int8 data, size_t offset, __local int *p);
+void __ovld vstore8(uint8 data, size_t offset, __local uint *p);
+void __ovld vstore8(long8 data, size_t offset, __local long *p);
+void __ovld vstore8(ulong8 data, size_t offset, __local ulong *p);
+void __ovld vstore8(float8 data, size_t offset, __local float *p);
+void __ovld vstore16(char16 data, size_t offset, __local char *p);
+void __ovld vstore16(uchar16 data, size_t offset, __local uchar *p);
+void __ovld vstore16(short16 data, size_t offset, __local short *p);
+void __ovld vstore16(ushort16 data, size_t offset, __local ushort *p);
+void __ovld vstore16(int16 data, size_t offset, __local int *p);
+void __ovld vstore16(uint16 data, size_t offset, __local uint *p);
+void __ovld vstore16(long16 data, size_t offset, __local long *p);
+void __ovld vstore16(ulong16 data, size_t offset, __local ulong *p);
+void __ovld vstore16(float16 data, size_t offset, __local float *p);
+void __ovld vstore2(char2 data, size_t offset, __private char *p);
+void __ovld vstore2(uchar2 data, size_t offset, __private uchar *p);
+void __ovld vstore2(short2 data, size_t offset, __private short *p);
+void __ovld vstore2(ushort2 data, size_t offset, __private ushort *p);
+void __ovld vstore2(int2 data, size_t offset, __private int *p);
+void __ovld vstore2(uint2 data, size_t offset, __private uint *p);
+void __ovld vstore2(long2 data, size_t offset, __private long *p);
+void __ovld vstore2(ulong2 data, size_t offset, __private ulong *p);
+void __ovld vstore2(float2 data, size_t offset, __private float *p);
+void __ovld vstore3(char3 data, size_t offset, __private char *p);
+void __ovld vstore3(uchar3 data, size_t offset, __private uchar *p);
+void __ovld vstore3(short3 data, size_t offset, __private short *p);
+void __ovld vstore3(ushort3 data, size_t offset, __private ushort *p);
+void __ovld vstore3(int3 data, size_t offset, __private int *p);
+void __ovld vstore3(uint3 data, size_t offset, __private uint *p);
+void __ovld vstore3(long3 data, size_t offset, __private long *p);
+void __ovld vstore3(ulong3 data, size_t offset, __private ulong *p);
+void __ovld vstore3(float3 data, size_t offset, __private float *p);
+void __ovld vstore4(char4 data, size_t offset, __private char *p);
+void __ovld vstore4(uchar4 data, size_t offset, __private uchar *p);
+void __ovld vstore4(short4 data, size_t offset, __private short *p);
+void __ovld vstore4(ushort4 data, size_t offset, __private ushort *p);
+void __ovld vstore4(int4 data, size_t offset, __private int *p);
+void __ovld vstore4(uint4 data, size_t offset, __private uint *p);
+void __ovld vstore4(long4 data, size_t offset, __private long *p);
+void __ovld vstore4(ulong4 data, size_t offset, __private ulong *p);
+void __ovld vstore4(float4 data, size_t offset, __private float *p);
+void __ovld vstore8(char8 data, size_t offset, __private char *p);
+void __ovld vstore8(uchar8 data, size_t offset, __private uchar *p);
+void __ovld vstore8(short8 data, size_t offset, __private short *p);
+void __ovld vstore8(ushort8 data, size_t offset, __private ushort *p);
+void __ovld vstore8(int8 data, size_t offset, __private int *p);
+void __ovld vstore8(uint8 data, size_t offset, __private uint *p);
+void __ovld vstore8(long8 data, size_t offset, __private long *p);
+void __ovld vstore8(ulong8 data, size_t offset, __private ulong *p);
+void __ovld vstore8(float8 data, size_t offset, __private float *p);
+void __ovld vstore16(char16 data, size_t offset, __private char *p);
+void __ovld vstore16(uchar16 data, size_t offset, __private uchar *p);
+void __ovld vstore16(short16 data, size_t offset, __private short *p);
+void __ovld vstore16(ushort16 data, size_t offset, __private ushort *p);
+void __ovld vstore16(int16 data, size_t offset, __private int *p);
+void __ovld vstore16(uint16 data, size_t offset, __private uint *p);
+void __ovld vstore16(long16 data, size_t offset, __private long *p);
+void __ovld vstore16(ulong16 data, size_t offset, __private ulong *p);
+void __ovld vstore16(float16 data, size_t offset, __private float *p);
+#ifdef cl_khr_fp64
+void __ovld vstore2(double2 data, size_t offset, __global double *p);
+void __ovld vstore3(double3 data, size_t offset, __global double *p);
+void __ovld vstore4(double4 data, size_t offset, __global double *p);
+void __ovld vstore8(double8 data, size_t offset, __global double *p);
+void __ovld vstore16(double16 data, size_t offset, __global double *p);
+void __ovld vstore2(double2 data, size_t offset, __local double *p);
+void __ovld vstore3(double3 data, size_t offset, __local double *p);
+void __ovld vstore4(double4 data, size_t offset, __local double *p);
+void __ovld vstore8(double8 data, size_t offset, __local double *p);
+void __ovld vstore16(double16 data, size_t offset, __local double *p);
+void __ovld vstore2(double2 data, size_t offset, __private double *p);
+void __ovld vstore3(double3 data, size_t offset, __private double *p);
+void __ovld vstore4(double4 data, size_t offset, __private double *p);
+void __ovld vstore8(double8 data, size_t offset, __private double *p);
+void __ovld vstore16(double16 data, size_t offset, __private double *p);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+void __ovld vstore(half data, size_t offset, __global half *p);
+void __ovld vstore2(half2 data, size_t offset, __global half *p);
+void __ovld vstore3(half3 data, size_t offset, __global half *p);
+void __ovld vstore4(half4 data, size_t offset, __global half *p);
+void __ovld vstore8(half8 data, size_t offset, __global half *p);
+void __ovld vstore16(half16 data, size_t offset, __global half *p);
+void __ovld vstore(half data, size_t offset, __local half *p);
+void __ovld vstore2(half2 data, size_t offset, __local half *p);
+void __ovld vstore3(half3 data, size_t offset, __local half *p);
+void __ovld vstore4(half4 data, size_t offset, __local half *p);
+void __ovld vstore8(half8 data, size_t offset, __local half *p);
+void __ovld vstore16(half16 data, size_t offset, __local half *p);
+void __ovld vstore(half data, size_t offset, __private half *p);
+void __ovld vstore2(half2 data, size_t offset, __private half *p);
+void __ovld vstore3(half3 data, size_t offset, __private half *p);
+void __ovld vstore4(half4 data, size_t offset, __private half *p);
+void __ovld vstore8(half8 data, size_t offset, __private half *p);
+void __ovld vstore16(half16 data, size_t offset, __private half *p);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Read sizeof (half) bytes of data from address
+ * (p + offset). The data read is interpreted as a
+ * half value. The half value is converted to a
+ * float value and the float value is returned.
+ * The read address computed as (p + offset)
+ * must be 16-bit aligned.
+ */
+float __ovld vload_half(size_t offset, const __constant half *p);
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float __ovld vload_half(size_t offset, const half *p);
+#else
+float __ovld vload_half(size_t offset, const __global half *p);
+float __ovld vload_half(size_t offset, const __local half *p);
+float __ovld vload_half(size_t offset, const __private half *p);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Read sizeof (halfn) bytes of data from address
+ * (p + (offset * n)). The data read is interpreted
+ * as a halfn value. The halfn value read is
+ * converted to a floatn value and the floatn
+ * value is returned. The read address computed
+ * as (p + (offset * n)) must be 16-bit aligned.
+ */
+float2 __ovld vload_half2(size_t offset, const __constant half *p);
+float3 __ovld vload_half3(size_t offset, const __constant half *p);
+float4 __ovld vload_half4(size_t offset, const __constant half *p);
+float8 __ovld vload_half8(size_t offset, const __constant half *p);
+float16 __ovld vload_half16(size_t offset, const __constant half *p);
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float2 __ovld vload_half2(size_t offset, const half *p);
+float3 __ovld vload_half3(size_t offset, const half *p);
+float4 __ovld vload_half4(size_t offset, const half *p);
+float8 __ovld vload_half8(size_t offset, const half *p);
+float16 __ovld vload_half16(size_t offset, const half *p);
+#else
+float2 __ovld vload_half2(size_t offset, const __global half *p);
+float3 __ovld vload_half3(size_t offset, const __global half *p);
+float4 __ovld vload_half4(size_t offset, const __global half *p);
+float8 __ovld vload_half8(size_t offset, const __global half *p);
+float16 __ovld vload_half16(size_t offset, const __global half *p);
+float2 __ovld vload_half2(size_t offset, const __local half *p);
+float3 __ovld vload_half3(size_t offset, const __local half *p);
+float4 __ovld vload_half4(size_t offset, const __local half *p);
+float8 __ovld vload_half8(size_t offset, const __local half *p);
+float16 __ovld vload_half16(size_t offset, const __local half *p);
+float2 __ovld vload_half2(size_t offset, const __private half *p);
+float3 __ovld vload_half3(size_t offset, const __private half *p);
+float4 __ovld vload_half4(size_t offset, const __private half *p);
+float8 __ovld vload_half8(size_t offset, const __private half *p);
+float16 __ovld vload_half16(size_t offset, const __private half *p);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * The float value given by data is first
+ * converted to a half value using the appropriate
+ * rounding mode. The half value is then written
+ * to address computed as (p + offset). The
+ * address computed as (p + offset) must be 16-
+ * bit aligned.
+ * vstore_half use the current rounding mode.
+ * The default current rounding mode is round to
+ * nearest even.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+void __ovld vstore_half(float data, size_t offset, half *p);
+void __ovld vstore_half_rte(float data, size_t offset, half *p);
+void __ovld vstore_half_rtz(float data, size_t offset, half *p);
+void __ovld vstore_half_rtp(float data, size_t offset, half *p);
+void __ovld vstore_half_rtn(float data, size_t offset, half *p);
+#ifdef cl_khr_fp64
+void __ovld vstore_half(double data, size_t offset, half *p);
+void __ovld vstore_half_rte(double data, size_t offset, half *p);
+void __ovld vstore_half_rtz(double data, size_t offset, half *p);
+void __ovld vstore_half_rtp(double data, size_t offset, half *p);
+void __ovld vstore_half_rtn(double data, size_t offset, half *p);
+#endif //cl_khr_fp64
+#else
+void __ovld vstore_half(float data, size_t offset, __global half *p);
+void __ovld vstore_half_rte(float data, size_t offset, __global half *p);
+void __ovld vstore_half_rtz(float data, size_t offset, __global half *p);
+void __ovld vstore_half_rtp(float data, size_t offset, __global half *p);
+void __ovld vstore_half_rtn(float data, size_t offset, __global half *p);
+void __ovld vstore_half(float data, size_t offset, __local half *p);
+void __ovld vstore_half_rte(float data, size_t offset, __local half *p);
+void __ovld vstore_half_rtz(float data, size_t offset, __local half *p);
+void __ovld vstore_half_rtp(float data, size_t offset, __local half *p);
+void __ovld vstore_half_rtn(float data, size_t offset, __local half *p);
+void __ovld vstore_half(float data, size_t offset, __private half *p);
+void __ovld vstore_half_rte(float data, size_t offset, __private half *p);
+void __ovld vstore_half_rtz(float data, size_t offset, __private half *p);
+void __ovld vstore_half_rtp(float data, size_t offset, __private half *p);
+void __ovld vstore_half_rtn(float data, size_t offset, __private half *p);
+#ifdef cl_khr_fp64
+void __ovld vstore_half(double data, size_t offset, __global half *p);
+void __ovld vstore_half_rte(double data, size_t offset, __global half *p);
+void __ovld vstore_half_rtz(double data, size_t offset, __global half *p);
+void __ovld vstore_half_rtp(double data, size_t offset, __global half *p);
+void __ovld vstore_half_rtn(double data, size_t offset, __global half *p);
+void __ovld vstore_half(double data, size_t offset, __local half *p);
+void __ovld vstore_half_rte(double data, size_t offset, __local half *p);
+void __ovld vstore_half_rtz(double data, size_t offset, __local half *p);
+void __ovld vstore_half_rtp(double data, size_t offset, __local half *p);
+void __ovld vstore_half_rtn(double data, size_t offset, __local half *p);
+void __ovld vstore_half(double data, size_t offset, __private half *p);
+void __ovld vstore_half_rte(double data, size_t offset, __private half *p);
+void __ovld vstore_half_rtz(double data, size_t offset, __private half *p);
+void __ovld vstore_half_rtp(double data, size_t offset, __private half *p);
+void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
+#endif //cl_khr_fp64
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * The floatn value given by data is converted to
+ * a halfn value using the appropriate rounding
+ * mode. The halfn value is then written to
+ * address computed as (p + (offset * n)). The
+ * address computed as (p + (offset * n)) must be
+ * 16-bit aligned.
+ * vstore_halfn uses the current rounding mode.
+ * The default current rounding mode is round to
+ * nearest even.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+void __ovld vstore_half2(float2 data, size_t offset, half *p);
+void __ovld vstore_half3(float3 data, size_t offset, half *p);
+void __ovld vstore_half4(float4 data, size_t offset, half *p);
+void __ovld vstore_half8(float8 data, size_t offset, half *p);
+void __ovld vstore_half16(float16 data, size_t offset, half *p);
+void __ovld vstore_half2_rte(float2 data, size_t offset, half *p);
+void __ovld vstore_half3_rte(float3 data, size_t offset, half *p);
+void __ovld vstore_half4_rte(float4 data, size_t offset, half *p);
+void __ovld vstore_half8_rte(float8 data, size_t offset, half *p);
+void __ovld vstore_half16_rte(float16 data, size_t offset, half *p);
+void __ovld vstore_half2_rtz(float2 data, size_t offset, half *p);
+void __ovld vstore_half3_rtz(float3 data, size_t offset, half *p);
+void __ovld vstore_half4_rtz(float4 data, size_t offset, half *p);
+void __ovld vstore_half8_rtz(float8 data, size_t offset, half *p);
+void __ovld vstore_half16_rtz(float16 data, size_t offset, half *p);
+void __ovld vstore_half2_rtp(float2 data, size_t offset, half *p);
+void __ovld vstore_half3_rtp(float3 data, size_t offset, half *p);
+void __ovld vstore_half4_rtp(float4 data, size_t offset, half *p);
+void __ovld vstore_half8_rtp(float8 data, size_t offset, half *p);
+void __ovld vstore_half16_rtp(float16 data, size_t offset, half *p);
+void __ovld vstore_half2_rtn(float2 data, size_t offset, half *p);
+void __ovld vstore_half3_rtn(float3 data, size_t offset, half *p);
+void __ovld vstore_half4_rtn(float4 data, size_t offset, half *p);
+void __ovld vstore_half8_rtn(float8 data, size_t offset, half *p);
+void __ovld vstore_half16_rtn(float16 data, size_t offset, half *p);
+#ifdef cl_khr_fp64
+void __ovld vstore_half2(double2 data, size_t offset, half *p);
+void __ovld vstore_half3(double3 data, size_t offset, half *p);
+void __ovld vstore_half4(double4 data, size_t offset, half *p);
+void __ovld vstore_half8(double8 data, size_t offset, half *p);
+void __ovld vstore_half16(double16 data, size_t offset, half *p);
+void __ovld vstore_half2_rte(double2 data, size_t offset, half *p);
+void __ovld vstore_half3_rte(double3 data, size_t offset, half *p);
+void __ovld vstore_half4_rte(double4 data, size_t offset, half *p);
+void __ovld vstore_half8_rte(double8 data, size_t offset, half *p);
+void __ovld vstore_half16_rte(double16 data, size_t offset, half *p);
+void __ovld vstore_half2_rtz(double2 data, size_t offset, half *p);
+void __ovld vstore_half3_rtz(double3 data, size_t offset, half *p);
+void __ovld vstore_half4_rtz(double4 data, size_t offset, half *p);
+void __ovld vstore_half8_rtz(double8 data, size_t offset, half *p);
+void __ovld vstore_half16_rtz(double16 data, size_t offset, half *p);
+void __ovld vstore_half2_rtp(double2 data, size_t offset, half *p);
+void __ovld vstore_half3_rtp(double3 data, size_t offset, half *p);
+void __ovld vstore_half4_rtp(double4 data, size_t offset, half *p);
+void __ovld vstore_half8_rtp(double8 data, size_t offset, half *p);
+void __ovld vstore_half16_rtp(double16 data, size_t offset, half *p);
+void __ovld vstore_half2_rtn(double2 data, size_t offset, half *p);
+void __ovld vstore_half3_rtn(double3 data, size_t offset, half *p);
+void __ovld vstore_half4_rtn(double4 data, size_t offset, half *p);
+void __ovld vstore_half8_rtn(double8 data, size_t offset, half *p);
+void __ovld vstore_half16_rtn(double16 data, size_t offset, half *p);
+#endif //cl_khr_fp64
+#else
+void __ovld vstore_half2(float2 data, size_t offset, __global half *p);
+void __ovld vstore_half3(float3 data, size_t offset, __global half *p);
+void __ovld vstore_half4(float4 data, size_t offset, __global half *p);
+void __ovld vstore_half8(float8 data, size_t offset, __global half *p);
+void __ovld vstore_half16(float16 data, size_t offset, __global half *p);
+void __ovld vstore_half2_rte(float2 data, size_t offset, __global half *p);
+void __ovld vstore_half3_rte(float3 data, size_t offset, __global half *p);
+void __ovld vstore_half4_rte(float4 data, size_t offset, __global half *p);
+void __ovld vstore_half8_rte(float8 data, size_t offset, __global half *p);
+void __ovld vstore_half16_rte(float16 data, size_t offset, __global half *p);
+void __ovld vstore_half2_rtz(float2 data, size_t offset, __global half *p);
+void __ovld vstore_half3_rtz(float3 data, size_t offset, __global half *p);
+void __ovld vstore_half4_rtz(float4 data, size_t offset, __global half *p);
+void __ovld vstore_half8_rtz(float8 data, size_t offset, __global half *p);
+void __ovld vstore_half16_rtz(float16 data, size_t offset, __global half *p);
+void __ovld vstore_half2_rtp(float2 data, size_t offset, __global half *p);
+void __ovld vstore_half3_rtp(float3 data, size_t offset, __global half *p);
+void __ovld vstore_half4_rtp(float4 data, size_t offset, __global half *p);
+void __ovld vstore_half8_rtp(float8 data, size_t offset, __global half *p);
+void __ovld vstore_half16_rtp(float16 data, size_t offset, __global half *p);
+void __ovld vstore_half2_rtn(float2 data, size_t offset, __global half *p);
+void __ovld vstore_half3_rtn(float3 data, size_t offset, __global half *p);
+void __ovld vstore_half4_rtn(float4 data, size_t offset, __global half *p);
+void __ovld vstore_half8_rtn(float8 data, size_t offset, __global half *p);
+void __ovld vstore_half16_rtn(float16 data, size_t offset, __global half *p);
+void __ovld vstore_half2(float2 data, size_t offset, __local half *p);
+void __ovld vstore_half3(float3 data, size_t offset, __local half *p);
+void __ovld vstore_half4(float4 data, size_t offset, __local half *p);
+void __ovld vstore_half8(float8 data, size_t offset, __local half *p);
+void __ovld vstore_half16(float16 data, size_t offset, __local half *p);
+void __ovld vstore_half2_rte(float2 data, size_t offset, __local half *p);
+void __ovld vstore_half3_rte(float3 data, size_t offset, __local half *p);
+void __ovld vstore_half4_rte(float4 data, size_t offset, __local half *p);
+void __ovld vstore_half8_rte(float8 data, size_t offset, __local half *p);
+void __ovld vstore_half16_rte(float16 data, size_t offset, __local half *p);
+void __ovld vstore_half2_rtz(float2 data, size_t offset, __local half *p);
+void __ovld vstore_half3_rtz(float3 data, size_t offset, __local half *p);
+void __ovld vstore_half4_rtz(float4 data, size_t offset, __local half *p);
+void __ovld vstore_half8_rtz(float8 data, size_t offset, __local half *p);
+void __ovld vstore_half16_rtz(float16 data, size_t offset, __local half *p);
+void __ovld vstore_half2_rtp(float2 data, size_t offset, __local half *p);
+void __ovld vstore_half3_rtp(float3 data, size_t offset, __local half *p);
+void __ovld vstore_half4_rtp(float4 data, size_t offset, __local half *p);
+void __ovld vstore_half8_rtp(float8 data, size_t offset, __local half *p);
+void __ovld vstore_half16_rtp(float16 data, size_t offset, __local half *p);
+void __ovld vstore_half2_rtn(float2 data, size_t offset, __local half *p);
+void __ovld vstore_half3_rtn(float3 data, size_t offset, __local half *p);
+void __ovld vstore_half4_rtn(float4 data, size_t offset, __local half *p);
+void __ovld vstore_half8_rtn(float8 data, size_t offset, __local half *p);
+void __ovld vstore_half16_rtn(float16 data, size_t offset, __local half *p);
+void __ovld vstore_half2(float2 data, size_t offset, __private half *p);
+void __ovld vstore_half3(float3 data, size_t offset, __private half *p);
+void __ovld vstore_half4(float4 data, size_t offset, __private half *p);
+void __ovld vstore_half8(float8 data, size_t offset, __private half *p);
+void __ovld vstore_half16(float16 data, size_t offset, __private half *p);
+void __ovld vstore_half2_rte(float2 data, size_t offset, __private half *p);
+void __ovld vstore_half3_rte(float3 data, size_t offset, __private half *p);
+void __ovld vstore_half4_rte(float4 data, size_t offset, __private half *p);
+void __ovld vstore_half8_rte(float8 data, size_t offset, __private half *p);
+void __ovld vstore_half16_rte(float16 data, size_t offset, __private half *p);
+void __ovld vstore_half2_rtz(float2 data, size_t offset, __private half *p);
+void __ovld vstore_half3_rtz(float3 data, size_t offset, __private half *p);
+void __ovld vstore_half4_rtz(float4 data, size_t offset, __private half *p);
+void __ovld vstore_half8_rtz(float8 data, size_t offset, __private half *p);
+void __ovld vstore_half16_rtz(float16 data, size_t offset, __private half *p);
+void __ovld vstore_half2_rtp(float2 data, size_t offset, __private half *p);
+void __ovld vstore_half3_rtp(float3 data, size_t offset, __private half *p);
+void __ovld vstore_half4_rtp(float4 data, size_t offset, __private half *p);
+void __ovld vstore_half8_rtp(float8 data, size_t offset, __private half *p);
+void __ovld vstore_half16_rtp(float16 data, size_t offset, __private half *p);
+void __ovld vstore_half2_rtn(float2 data, size_t offset, __private half *p);
+void __ovld vstore_half3_rtn(float3 data, size_t offset, __private half *p);
+void __ovld vstore_half4_rtn(float4 data, size_t offset, __private half *p);
+void __ovld vstore_half8_rtn(float8 data, size_t offset, __private half *p);
+void __ovld vstore_half16_rtn(float16 data, size_t offset, __private half *p);
+#ifdef cl_khr_fp64
+void __ovld vstore_half2(double2 data, size_t offset, __global half *p);
+void __ovld vstore_half3(double3 data, size_t offset, __global half *p);
+void __ovld vstore_half4(double4 data, size_t offset, __global half *p);
+void __ovld vstore_half8(double8 data, size_t offset, __global half *p);
+void __ovld vstore_half16(double16 data, size_t offset, __global half *p);
+void __ovld vstore_half2_rte(double2 data, size_t offset, __global half *p);
+void __ovld vstore_half3_rte(double3 data, size_t offset, __global half *p);
+void __ovld vstore_half4_rte(double4 data, size_t offset, __global half *p);
+void __ovld vstore_half8_rte(double8 data, size_t offset, __global half *p);
+void __ovld vstore_half16_rte(double16 data, size_t offset, __global half *p);
+void __ovld vstore_half2_rtz(double2 data, size_t offset, __global half *p);
+void __ovld vstore_half3_rtz(double3 data, size_t offset, __global half *p);
+void __ovld vstore_half4_rtz(double4 data, size_t offset, __global half *p);
+void __ovld vstore_half8_rtz(double8 data, size_t offset, __global half *p);
+void __ovld vstore_half16_rtz(double16 data, size_t offset, __global half *p);
+void __ovld vstore_half2_rtp(double2 data, size_t offset, __global half *p);
+void __ovld vstore_half3_rtp(double3 data, size_t offset, __global half *p);
+void __ovld vstore_half4_rtp(double4 data, size_t offset, __global half *p);
+void __ovld vstore_half8_rtp(double8 data, size_t offset, __global half *p);
+void __ovld vstore_half16_rtp(double16 data, size_t offset, __global half *p);
+void __ovld vstore_half2_rtn(double2 data, size_t offset, __global half *p);
+void __ovld vstore_half3_rtn(double3 data, size_t offset, __global half *p);
+void __ovld vstore_half4_rtn(double4 data, size_t offset, __global half *p);
+void __ovld vstore_half8_rtn(double8 data, size_t offset, __global half *p);
+void __ovld vstore_half16_rtn(double16 data, size_t offset, __global half *p);
+void __ovld vstore_half2(double2 data, size_t offset, __local half *p);
+void __ovld vstore_half3(double3 data, size_t offset, __local half *p);
+void __ovld vstore_half4(double4 data, size_t offset, __local half *p);
+void __ovld vstore_half8(double8 data, size_t offset, __local half *p);
+void __ovld vstore_half16(double16 data, size_t offset, __local half *p);
+void __ovld vstore_half2_rte(double2 data, size_t offset, __local half *p);
+void __ovld vstore_half3_rte(double3 data, size_t offset, __local half *p);
+void __ovld vstore_half4_rte(double4 data, size_t offset, __local half *p);
+void __ovld vstore_half8_rte(double8 data, size_t offset, __local half *p);
+void __ovld vstore_half16_rte(double16 data, size_t offset, __local half *p);
+void __ovld vstore_half2_rtz(double2 data, size_t offset, __local half *p);
+void __ovld vstore_half3_rtz(double3 data, size_t offset, __local half *p);
+void __ovld vstore_half4_rtz(double4 data, size_t offset, __local half *p);
+void __ovld vstore_half8_rtz(double8 data, size_t offset, __local half *p);
+void __ovld vstore_half16_rtz(double16 data, size_t offset, __local half *p);
+void __ovld vstore_half2_rtp(double2 data, size_t offset, __local half *p);
+void __ovld vstore_half3_rtp(double3 data, size_t offset, __local half *p);
+void __ovld vstore_half4_rtp(double4 data, size_t offset, __local half *p);
+void __ovld vstore_half8_rtp(double8 data, size_t offset, __local half *p);
+void __ovld vstore_half16_rtp(double16 data, size_t offset, __local half *p);
+void __ovld vstore_half2_rtn(double2 data, size_t offset, __local half *p);
+void __ovld vstore_half3_rtn(double3 data, size_t offset, __local half *p);
+void __ovld vstore_half4_rtn(double4 data, size_t offset, __local half *p);
+void __ovld vstore_half8_rtn(double8 data, size_t offset, __local half *p);
+void __ovld vstore_half16_rtn(double16 data, size_t offset, __local half *p);
+void __ovld vstore_half2(double2 data, size_t offset, __private half *p);
+void __ovld vstore_half3(double3 data, size_t offset, __private half *p);
+void __ovld vstore_half4(double4 data, size_t offset, __private half *p);
+void __ovld vstore_half8(double8 data, size_t offset, __private half *p);
+void __ovld vstore_half16(double16 data, size_t offset, __private half *p);
+void __ovld vstore_half2_rte(double2 data, size_t offset, __private half *p);
+void __ovld vstore_half3_rte(double3 data, size_t offset, __private half *p);
+void __ovld vstore_half4_rte(double4 data, size_t offset, __private half *p);
+void __ovld vstore_half8_rte(double8 data, size_t offset, __private half *p);
+void __ovld vstore_half16_rte(double16 data, size_t offset, __private half *p);
+void __ovld vstore_half2_rtz(double2 data, size_t offset, __private half *p);
+void __ovld vstore_half3_rtz(double3 data, size_t offset, __private half *p);
+void __ovld vstore_half4_rtz(double4 data, size_t offset, __private half *p);
+void __ovld vstore_half8_rtz(double8 data, size_t offset, __private half *p);
+void __ovld vstore_half16_rtz(double16 data, size_t offset, __private half *p);
+void __ovld vstore_half2_rtp(double2 data, size_t offset, __private half *p);
+void __ovld vstore_half3_rtp(double3 data, size_t offset, __private half *p);
+void __ovld vstore_half4_rtp(double4 data, size_t offset, __private half *p);
+void __ovld vstore_half8_rtp(double8 data, size_t offset, __private half *p);
+void __ovld vstore_half16_rtp(double16 data, size_t offset, __private half *p);
+void __ovld vstore_half2_rtn(double2 data, size_t offset, __private half *p);
+void __ovld vstore_half3_rtn(double3 data, size_t offset, __private half *p);
+void __ovld vstore_half4_rtn(double4 data, size_t offset, __private half *p);
+void __ovld vstore_half8_rtn(double8 data, size_t offset, __private half *p);
+void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p);
+#endif //cl_khr_fp64
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * For n = 1, 2, 4, 8 and 16 read sizeof (halfn)
+ * bytes of data from address (p + (offset * n)).
+ * The data read is interpreted as a halfn value.
+ * The halfn value read is converted to a floatn
+ * value and the floatn value is returned.
+ * The address computed as (p + (offset * n))
+ * must be aligned to sizeof (halfn) bytes.
+ * For n = 3, vloada_half3 reads a half3 from
+ * address (p + (offset * 4)) and returns a float3.
+ * The address computed as (p + (offset * 4))
+ * must be aligned to sizeof (half) * 4 bytes.
+ */
+float __ovld vloada_half(size_t offset, const __constant half *p);
+float2 __ovld vloada_half2(size_t offset, const __constant half *p);
+float3 __ovld vloada_half3(size_t offset, const __constant half *p);
+float4 __ovld vloada_half4(size_t offset, const __constant half *p);
+float8 __ovld vloada_half8(size_t offset, const __constant half *p);
+float16 __ovld vloada_half16(size_t offset, const __constant half *p);
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float __ovld vloada_half(size_t offset, const half *p);
+float2 __ovld vloada_half2(size_t offset, const half *p);
+float3 __ovld vloada_half3(size_t offset, const half *p);
+float4 __ovld vloada_half4(size_t offset, const half *p);
+float8 __ovld vloada_half8(size_t offset, const half *p);
+float16 __ovld vloada_half16(size_t offset, const half *p);
+#else
+float __ovld vloada_half(size_t offset, const __global half *p);
+float2 __ovld vloada_half2(size_t offset, const __global half *p);
+float3 __ovld vloada_half3(size_t offset, const __global half *p);
+float4 __ovld vloada_half4(size_t offset, const __global half *p);
+float8 __ovld vloada_half8(size_t offset, const __global half *p);
+float16 __ovld vloada_half16(size_t offset, const __global half *p);
+float __ovld vloada_half(size_t offset, const __local half *p);
+float2 __ovld vloada_half2(size_t offset, const __local half *p);
+float3 __ovld vloada_half3(size_t offset, const __local half *p);
+float4 __ovld vloada_half4(size_t offset, const __local half *p);
+float8 __ovld vloada_half8(size_t offset, const __local half *p);
+float16 __ovld vloada_half16(size_t offset, const __local half *p);
+float __ovld vloada_half(size_t offset, const __private half *p);
+float2 __ovld vloada_half2(size_t offset, const __private half *p);
+float3 __ovld vloada_half3(size_t offset, const __private half *p);
+float4 __ovld vloada_half4(size_t offset, const __private half *p);
+float8 __ovld vloada_half8(size_t offset, const __private half *p);
+float16 __ovld vloada_half16(size_t offset, const __private half *p);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * The floatn value given by data is converted to
+ * a halfn value using the appropriate rounding
+ * mode.
+ * For n = 1, 2, 4, 8 and 16, the halfn value is
+ * written to the address computed as (p + (offset
+ * * n)). The address computed as (p + (offset *
+ * n)) must be aligned to sizeof (halfn) bytes.
+ * For n = 3, the half3 value is written to the
+ * address computed as (p + (offset * 4)). The
+ * address computed as (p + (offset * 4)) must be
+ * aligned to sizeof (half) * 4 bytes.
+ * vstorea_halfn uses the current rounding
+ * mode. The default current rounding mode is
+ * round to nearest even.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+void __ovld vstorea_half(float data, size_t offset, half *p);
+void __ovld vstorea_half2(float2 data, size_t offset, half *p);
+void __ovld vstorea_half3(float3 data, size_t offset, half *p);
+void __ovld vstorea_half4(float4 data, size_t offset, half *p);
+void __ovld vstorea_half8(float8 data, size_t offset, half *p);
+void __ovld vstorea_half16(float16 data, size_t offset, half *p);
+
+void __ovld vstorea_half_rte(float data, size_t offset, half *p);
+void __ovld vstorea_half2_rte(float2 data, size_t offset, half *p);
+void __ovld vstorea_half3_rte(float3 data, size_t offset, half *p);
+void __ovld vstorea_half4_rte(float4 data, size_t offset, half *p);
+void __ovld vstorea_half8_rte(float8 data, size_t offset, half *p);
+void __ovld vstorea_half16_rte(float16 data, size_t offset, half *p);
+
+void __ovld vstorea_half_rtz(float data, size_t offset, half *p);
+void __ovld vstorea_half2_rtz(float2 data, size_t offset, half *p);
+void __ovld vstorea_half3_rtz(float3 data, size_t offset, half *p);
+void __ovld vstorea_half4_rtz(float4 data, size_t offset, half *p);
+void __ovld vstorea_half8_rtz(float8 data, size_t offset, half *p);
+void __ovld vstorea_half16_rtz(float16 data, size_t offset, half *p);
+
+void __ovld vstorea_half_rtp(float data, size_t offset, half *p);
+void __ovld vstorea_half2_rtp(float2 data, size_t offset, half *p);
+void __ovld vstorea_half3_rtp(float3 data, size_t offset, half *p);
+void __ovld vstorea_half4_rtp(float4 data, size_t offset, half *p);
+void __ovld vstorea_half8_rtp(float8 data, size_t offset, half *p);
+void __ovld vstorea_half16_rtp(float16 data, size_t offset, half *p);
+
+void __ovld vstorea_half_rtn(float data, size_t offset, half *p);
+void __ovld vstorea_half2_rtn(float2 data, size_t offset, half *p);
+void __ovld vstorea_half3_rtn(float3 data, size_t offset, half *p);
+void __ovld vstorea_half4_rtn(float4 data, size_t offset, half *p);
+void __ovld vstorea_half8_rtn(float8 data, size_t offset, half *p);
+void __ovld vstorea_half16_rtn(float16 data, size_t offset, half *p);
+
+#ifdef cl_khr_fp64
+void __ovld vstorea_half(double data, size_t offset, half *p);
+void __ovld vstorea_half2(double2 data, size_t offset, half *p);
+void __ovld vstorea_half3(double3 data, size_t offset, half *p);
+void __ovld vstorea_half4(double4 data, size_t offset, half *p);
+void __ovld vstorea_half8(double8 data, size_t offset, half *p);
+void __ovld vstorea_half16(double16 data, size_t offset, half *p);
+
+void __ovld vstorea_half_rte(double data, size_t offset, half *p);
+void __ovld vstorea_half2_rte(double2 data, size_t offset, half *p);
+void __ovld vstorea_half3_rte(double3 data, size_t offset, half *p);
+void __ovld vstorea_half4_rte(double4 data, size_t offset, half *p);
+void __ovld vstorea_half8_rte(double8 data, size_t offset, half *p);
+void __ovld vstorea_half16_rte(double16 data, size_t offset, half *p);
+
+void __ovld vstorea_half_rtz(double data, size_t offset, half *p);
+void __ovld vstorea_half2_rtz(double2 data, size_t offset, half *p);
+void __ovld vstorea_half3_rtz(double3 data, size_t offset, half *p);
+void __ovld vstorea_half4_rtz(double4 data, size_t offset, half *p);
+void __ovld vstorea_half8_rtz(double8 data, size_t offset, half *p);
+void __ovld vstorea_half16_rtz(double16 data, size_t offset, half *p);
+
+void __ovld vstorea_half_rtp(double data, size_t offset, half *p);
+void __ovld vstorea_half2_rtp(double2 data, size_t offset, half *p);
+void __ovld vstorea_half3_rtp(double3 data, size_t offset, half *p);
+void __ovld vstorea_half4_rtp(double4 data, size_t offset, half *p);
+void __ovld vstorea_half8_rtp(double8 data, size_t offset, half *p);
+void __ovld vstorea_half16_rtp(double16 data, size_t offset, half *p);
+
+void __ovld vstorea_half_rtn(double data, size_t offset, half *p);
+void __ovld vstorea_half2_rtn(double2 data, size_t offset, half *p);
+void __ovld vstorea_half3_rtn(double3 data, size_t offset, half *p);
+void __ovld vstorea_half4_rtn(double4 data, size_t offset, half *p);
+void __ovld vstorea_half8_rtn(double8 data, size_t offset, half *p);
+void __ovld vstorea_half16_rtn(double16 data, size_t offset, half *p);
+#endif //cl_khr_fp64
+
+#else
+void __ovld vstorea_half(float data, size_t offset, __global half *p);
+void __ovld vstorea_half2(float2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3(float3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4(float4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8(float8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16(float16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half_rte(float data, size_t offset, __global half *p);
+void __ovld vstorea_half2_rte(float2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3_rte(float3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4_rte(float4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8_rte(float8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16_rte(float16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half_rtz(float data, size_t offset, __global half *p);
+void __ovld vstorea_half2_rtz(float2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3_rtz(float3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4_rtz(float4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8_rtz(float8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16_rtz(float16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half_rtp(float data, size_t offset, __global half *p);
+void __ovld vstorea_half2_rtp(float2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3_rtp(float3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4_rtp(float4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8_rtp(float8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16_rtp(float16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half_rtn(float data, size_t offset, __global half *p);
+void __ovld vstorea_half2_rtn(float2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3_rtn(float3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4_rtn(float4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8_rtn(float8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16_rtn(float16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half(float data, size_t offset, __local half *p);
+void __ovld vstorea_half2(float2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3(float3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4(float4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8(float8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16(float16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half_rte(float data, size_t offset, __local half *p);
+void __ovld vstorea_half2_rte(float2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3_rte(float3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4_rte(float4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8_rte(float8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16_rte(float16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half_rtz(float data, size_t offset, __local half *p);
+void __ovld vstorea_half2_rtz(float2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3_rtz(float3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4_rtz(float4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8_rtz(float8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16_rtz(float16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half_rtp(float data, size_t offset, __local half *p);
+void __ovld vstorea_half2_rtp(float2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3_rtp(float3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4_rtp(float4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8_rtp(float8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16_rtp(float16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half_rtn(float data, size_t offset, __local half *p);
+void __ovld vstorea_half2_rtn(float2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3_rtn(float3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4_rtn(float4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8_rtn(float8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16_rtn(float16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half(float data, size_t offset, __private half *p);
+void __ovld vstorea_half2(float2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3(float3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4(float4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8(float8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16(float16 data, size_t offset, __private half *p);
+
+void __ovld vstorea_half_rte(float data, size_t offset, __private half *p);
+void __ovld vstorea_half2_rte(float2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3_rte(float3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4_rte(float4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8_rte(float8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16_rte(float16 data, size_t offset, __private half *p);
+
+void __ovld vstorea_half_rtz(float data, size_t offset, __private half *p);
+void __ovld vstorea_half2_rtz(float2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3_rtz(float3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4_rtz(float4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8_rtz(float8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16_rtz(float16 data, size_t offset, __private half *p);
+
+void __ovld vstorea_half_rtp(float data, size_t offset, __private half *p);
+void __ovld vstorea_half2_rtp(float2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3_rtp(float3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4_rtp(float4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8_rtp(float8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16_rtp(float16 data, size_t offset, __private half *p);
+
+void __ovld vstorea_half_rtn(float data, size_t offset, __private half *p);
+void __ovld vstorea_half2_rtn(float2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3_rtn(float3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4_rtn(float4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8_rtn(float8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16_rtn(float16 data, size_t offset, __private half *p);
+
+#ifdef cl_khr_fp64
+void __ovld vstorea_half(double data, size_t offset, __global half *p);
+void __ovld vstorea_half2(double2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3(double3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4(double4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8(double8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16(double16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half_rte(double data, size_t offset, __global half *p);
+void __ovld vstorea_half2_rte(double2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3_rte(double3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4_rte(double4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8_rte(double8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16_rte(double16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half_rtz(double data, size_t offset, __global half *p);
+void __ovld vstorea_half2_rtz(double2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3_rtz(double3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4_rtz(double4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8_rtz(double8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16_rtz(double16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half_rtp(double data, size_t offset, __global half *p);
+void __ovld vstorea_half2_rtp(double2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3_rtp(double3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4_rtp(double4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8_rtp(double8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16_rtp(double16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half_rtn(double data, size_t offset, __global half *p);
+void __ovld vstorea_half2_rtn(double2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3_rtn(double3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4_rtn(double4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8_rtn(double8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16_rtn(double16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half(double data, size_t offset, __local half *p);
+void __ovld vstorea_half2(double2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3(double3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4(double4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8(double8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16(double16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half_rte(double data, size_t offset, __local half *p);
+void __ovld vstorea_half2_rte(double2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3_rte(double3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4_rte(double4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8_rte(double8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16_rte(double16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half_rtz(double data, size_t offset, __local half *p);
+void __ovld vstorea_half2_rtz(double2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3_rtz(double3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4_rtz(double4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8_rtz(double8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16_rtz(double16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half_rtp(double data, size_t offset, __local half *p);
+void __ovld vstorea_half2_rtp(double2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3_rtp(double3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4_rtp(double4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8_rtp(double8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16_rtp(double16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half_rtn(double data, size_t offset, __local half *p);
+void __ovld vstorea_half2_rtn(double2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3_rtn(double3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4_rtn(double4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8_rtn(double8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16_rtn(double16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half(double data, size_t offset, __private half *p);
+void __ovld vstorea_half2(double2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3(double3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4(double4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8(double8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16(double16 data, size_t offset, __private half *p);
+
+void __ovld vstorea_half_rte(double data, size_t offset, __private half *p);
+void __ovld vstorea_half2_rte(double2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3_rte(double3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4_rte(double4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8_rte(double8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16_rte(double16 data, size_t offset, __private half *p);
+
+void __ovld vstorea_half_rtz(double data, size_t offset, __private half *p);
+void __ovld vstorea_half2_rtz(double2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3_rtz(double3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4_rtz(double4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8_rtz(double8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16_rtz(double16 data, size_t offset, __private half *p);
+
+void __ovld vstorea_half_rtp(double data, size_t offset, __private half *p);
+void __ovld vstorea_half2_rtp(double2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3_rtp(double3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4_rtp(double4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8_rtp(double8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16_rtp(double16 data, size_t offset, __private half *p);
+
+void __ovld vstorea_half_rtn(double data, size_t offset, __private half *p);
+void __ovld vstorea_half2_rtn(double2 data,size_t offset, __private half *p);
+void __ovld vstorea_half3_rtn(double3 data,size_t offset, __private half *p);
+void __ovld vstorea_half4_rtn(double4 data,size_t offset, __private half *p);
+void __ovld vstorea_half8_rtn(double8 data,size_t offset, __private half *p);
+void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
+#endif //cl_khr_fp64
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
+
+// Flag type and values for barrier, mem_fence, read_mem_fence, write_mem_fence
+typedef uint cl_mem_fence_flags;
+
+/**
+ * Queue a memory fence to ensure correct
+ * ordering of memory operations to local memory
+ */
+#define CLK_LOCAL_MEM_FENCE    0x01
+
+/**
+ * Queue a memory fence to ensure correct
+ * ordering of memory operations to global memory
+ */
+#define CLK_GLOBAL_MEM_FENCE   0x02
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+/**
+ * Queue a memory fence to ensure correct ordering of memory
+ * operations between work-items of a work-group to
+ * image memory.
+ */
+#define CLK_IMAGE_MEM_FENCE  0x04
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * All work-items in a work-group executing the kernel
+ * on a processor must execute this function before any
+ * are allowed to continue execution beyond the barrier.
+ * This function must be encountered by all work-items in
+ * a work-group executing the kernel.
+ * If barrier is inside a conditional statement, then all
+ * work-items must enter the conditional if any work-item
+ * enters the conditional statement and executes the
+ * barrier.
+ * If barrer is inside a loop, all work-items must execute
+ * the barrier for each iteration of the loop before any are
+ * allowed to continue execution beyond the barrier.
+ * The barrier function also queues a memory fence
+ * (reads and writes) to ensure correct ordering of
+ * memory operations to local or global memory.
+ * The flags argument specifies the memory address space
+ * and can be set to a combination of the following literal
+ * values.
+ * CLK_LOCAL_MEM_FENCE - The barrier function
+ * will either flush any variables stored in local memory
+ * or queue a memory fence to ensure correct ordering of
+ * memory operations to local memory.
+ * CLK_GLOBAL_MEM_FENCE - The barrier function
+ * will queue a memory fence to ensure correct ordering
+ * of memory operations to global memory. This can be
+ * useful when work-items, for example, write to buffer or
+ * image objects and then want to read the updated data.
+ */
+
+void __ovld barrier(cl_mem_fence_flags flags);
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+typedef enum memory_scope
+{
+  memory_scope_work_item,
+  memory_scope_work_group,
+  memory_scope_device,
+  memory_scope_all_svm_devices,
+  memory_scope_sub_group
+} memory_scope;
+
+void __ovld work_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
+void __ovld work_group_barrier(cl_mem_fence_flags flags);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL v1.1 s6.11.9, v1.2 s6.12.9 - Explicit Memory Fence Functions
+
+/**
+ * Orders loads and stores of a work-item
+ * executing a kernel. This means that loads
+ * and stores preceding the mem_fence will
+ * be committed to memory before any loads
+ * and stores following the mem_fence.
+ * The flags argument specifies the memory
+ * address space and can be set to a
+ * combination of the following literal
+ * values:
+ * CLK_LOCAL_MEM_FENCE
+ * CLK_GLOBAL_MEM_FENCE.
+ */
+void __ovld mem_fence(cl_mem_fence_flags flags);
+
+/**
+ * Read memory barrier that orders only
+ * loads.
+ * The flags argument specifies the memory
+ * address space and can be set to to a
+ * combination of the following literal
+ * values:
+ * CLK_LOCAL_MEM_FENCE
+ * CLK_GLOBAL_MEM_FENCE.
+ */
+void __ovld read_mem_fence(cl_mem_fence_flags flags);
+
+/**
+ * Write memory barrier that orders only
+ * stores.
+ * The flags argument specifies the memory
+ * address space and can be set to to a
+ * combination of the following literal
+ * values:
+ * CLK_LOCAL_MEM_FENCE
+ * CLK_GLOBAL_MEM_FENCE.
+ */
+void __ovld write_mem_fence(cl_mem_fence_flags flags);
+
+// OpenCL v2.0 s6.13.9 - Address Space Qualifier Functions
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+cl_mem_fence_flags __ovld get_fence(const void *ptr);
+cl_mem_fence_flags __ovld get_fence(void *ptr);
+
+/** 
+ * Builtin functions to_global, to_local, and to_private need to be declared as Clang builtin functions
+ * and checked in Sema since they should be declared as
+ *   addr gentype* to_addr (gentype*);
+ * where gentype is builtin type or user defined type.
+ */
+
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL v1.1 s6.11.10, v1.2 s6.12.10, v2.0 s6.13.10 - Async Copies from Global to Local Memory, Local to Global Memory, and Prefetch
+
+/**
+ * event_t async_work_group_copy (
+ * __global gentype *dst,
+ * const __local gentype *src,
+ * size_t num_elements,
+ * event_t event)
+ * Perform an async copy of num_elements
+ * gentype elements from src to dst. The async
+ * copy is performed by all work-items in a workgroup
+ * and this built-in function must therefore
+ * be encountered by all work-items in a workgroup
+ * executing the kernel with the same
+ * argument values; otherwise the results are
+ * undefined.
+ * Returns an event object that can be used by
+ * wait_group_events to wait for the async copy
+ * to finish. The event argument can also be used
+ * to associate the async_work_group_copy with
+ * a previous async copy allowing an event to be
+ * shared by multiple async copies; otherwise event
+ * should be zero.
+ * If event argument is non-zero, the event object
+ * supplied in event argument will be returned.
+ * This function does not perform any implicit
+ * synchronization of source data such as using a
+ * barrier before performing the copy.
+ */
+event_t __ovld async_work_group_copy(__local char *dst, const __global char *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local short *dst, const __global short *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local int *dst, const __global int *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uint *dst, const __global uint *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local long *dst, const __global long *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local float *dst, const __global float *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global char *dst, const __local char *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global short *dst, const __local short *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global int *dst, const __local int *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uint *dst, const __local uint *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global long *dst, const __local long *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global float *dst, const __local float *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, event_t event);
+#ifdef cl_khr_fp64
+event_t __ovld async_work_group_copy(__local double *dst, const __global double *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global double *dst, const __local double *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, event_t event);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+event_t __ovld async_work_group_copy(__local half *dst, const __global half *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global half *dst, const __local half *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, event_t event);
+#endif //cl_khr_fp16
+
+/**
+ * Perform an async gather of num_elements
+ * gentype elements from src to dst. The
+ * src_stride is the stride in elements for each
+ * gentype element read from src. The dst_stride
+ * is the stride in elements for each gentype
+ * element written to dst. The async gather is
+ * performed by all work-items in a work-group.
+ * This built-in function must therefore be
+ * encountered by all work-items in a work-group
+ * executing the kernel with the same argument
+ * values; otherwise the results are undefined.
+ * Returns an event object that can be used by
+ * wait_group_events to wait for the async copy
+ * to finish. The event argument can also be used
+ * to associate the
+ * async_work_group_strided_copy with a
+ * previous async copy allowing an event to be
+ * shared by multiple async copies; otherwise event
+ * should be zero.
+ * If event argument is non-zero, the event object
+ * supplied in event argument will be returned.
+ * This function does not perform any implicit
+ * synchronization of source data such as using a
+ * barrier before performing the copy.
+ */
+event_t __ovld async_work_group_strided_copy(__local char *dst, const __global char *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local short *dst, const __global short *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local int *dst, const __global int *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uint *dst, const __global uint *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local long *dst, const __global long *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local float *dst, const __global float *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global char *dst, const __local char *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global short *dst, const __local short *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global int *dst, const __local int *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uint *dst, const __local uint *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global long *dst, const __local long *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global float *dst, const __local float *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, size_t dst_stride, event_t event);
+#ifdef cl_khr_fp64
+event_t __ovld async_work_group_strided_copy(__local double *dst, const __global double *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global double *dst, const __local double *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, size_t dst_stride, event_t event);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+event_t __ovld async_work_group_strided_copy(__local half *dst, const __global half *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global half *dst, const __local half *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, size_t dst_stride, event_t event);
+#endif //cl_khr_fp16
+
+/**
+ * Wait for events that identify the
+ * async_work_group_copy operations to
+ * complete. The event objects specified in
+ * event_list will be released after the wait is
+ * performed.
+ * This function must be encountered by all workitems
+ * in a work-group executing the kernel with
+ * the same num_events and event objects specified
+ * in event_list; otherwise the results are undefined.
+ */
+void __ovld wait_group_events(int num_events, event_t *event_list);
+
+/**
+ * Prefetch num_elements * sizeof(gentype)
+ * bytes into the global cache. The prefetch
+ * instruction is applied to a work-item in a workgroup
+ * and does not affect the functional
+ * behavior of the kernel.
+ */
+void __ovld prefetch(const __global char *p, size_t num_elements);
+void __ovld prefetch(const __global uchar *p, size_t num_elements);
+void __ovld prefetch(const __global short *p, size_t num_elements);
+void __ovld prefetch(const __global ushort *p, size_t num_elements);
+void __ovld prefetch(const __global int *p, size_t num_elements);
+void __ovld prefetch(const __global uint *p, size_t num_elements);
+void __ovld prefetch(const __global long *p, size_t num_elements);
+void __ovld prefetch(const __global ulong *p, size_t num_elements);
+void __ovld prefetch(const __global float *p, size_t num_elements);
+void __ovld prefetch(const __global char2 *p, size_t num_elements);
+void __ovld prefetch(const __global uchar2 *p, size_t num_elements);
+void __ovld prefetch(const __global short2 *p, size_t num_elements);
+void __ovld prefetch(const __global ushort2 *p, size_t num_elements);
+void __ovld prefetch(const __global int2 *p, size_t num_elements);
+void __ovld prefetch(const __global uint2 *p, size_t num_elements);
+void __ovld prefetch(const __global long2 *p, size_t num_elements);
+void __ovld prefetch(const __global ulong2 *p, size_t num_elements);
+void __ovld prefetch(const __global float2 *p, size_t num_elements);
+void __ovld prefetch(const __global char3 *p, size_t num_elements);
+void __ovld prefetch(const __global uchar3 *p, size_t num_elements);
+void __ovld prefetch(const __global short3 *p, size_t num_elements);
+void __ovld prefetch(const __global ushort3 *p, size_t num_elements);
+void __ovld prefetch(const __global int3 *p, size_t num_elements);
+void __ovld prefetch(const __global uint3 *p, size_t num_elements);
+void __ovld prefetch(const __global long3 *p, size_t num_elements);
+void __ovld prefetch(const __global ulong3 *p, size_t num_elements);
+void __ovld prefetch(const __global float3 *p, size_t num_elements);
+void __ovld prefetch(const __global char4 *p, size_t num_elements);
+void __ovld prefetch(const __global uchar4 *p, size_t num_elements);
+void __ovld prefetch(const __global short4 *p, size_t num_elements);
+void __ovld prefetch(const __global ushort4 *p, size_t num_elements);
+void __ovld prefetch(const __global int4 *p, size_t num_elements);
+void __ovld prefetch(const __global uint4 *p, size_t num_elements);
+void __ovld prefetch(const __global long4 *p, size_t num_elements);
+void __ovld prefetch(const __global ulong4 *p, size_t num_elements);
+void __ovld prefetch(const __global float4 *p, size_t num_elements);
+void __ovld prefetch(const __global char8 *p, size_t num_elements);
+void __ovld prefetch(const __global uchar8 *p, size_t num_elements);
+void __ovld prefetch(const __global short8 *p, size_t num_elements);
+void __ovld prefetch(const __global ushort8 *p, size_t num_elements);
+void __ovld prefetch(const __global int8 *p, size_t num_elements);
+void __ovld prefetch(const __global uint8 *p, size_t num_elements);
+void __ovld prefetch(const __global long8 *p, size_t num_elements);
+void __ovld prefetch(const __global ulong8 *p, size_t num_elements);
+void __ovld prefetch(const __global float8 *p, size_t num_elements);
+void __ovld prefetch(const __global char16 *p, size_t num_elements);
+void __ovld prefetch(const __global uchar16 *p, size_t num_elements);
+void __ovld prefetch(const __global short16 *p, size_t num_elements);
+void __ovld prefetch(const __global ushort16 *p, size_t num_elements);
+void __ovld prefetch(const __global int16 *p, size_t num_elements);
+void __ovld prefetch(const __global uint16 *p, size_t num_elements);
+void __ovld prefetch(const __global long16 *p, size_t num_elements);
+void __ovld prefetch(const __global ulong16 *p, size_t num_elements);
+void __ovld prefetch(const __global float16 *p, size_t num_elements);
+#ifdef cl_khr_fp64
+void __ovld prefetch(const __global double *p, size_t num_elements);
+void __ovld prefetch(const __global double2 *p, size_t num_elements);
+void __ovld prefetch(const __global double3 *p, size_t num_elements);
+void __ovld prefetch(const __global double4 *p, size_t num_elements);
+void __ovld prefetch(const __global double8 *p, size_t num_elements);
+void __ovld prefetch(const __global double16 *p, size_t num_elements);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+void __ovld prefetch(const __global half *p, size_t num_elements);
+void __ovld prefetch(const __global half2 *p, size_t num_elements);
+void __ovld prefetch(const __global half3 *p, size_t num_elements);
+void __ovld prefetch(const __global half4 *p, size_t num_elements);
+void __ovld prefetch(const __global half8 *p, size_t num_elements);
+void __ovld prefetch(const __global half16 *p, size_t num_elements);
+#endif // cl_khr_fp16
+
+// OpenCL v1.1 s6.11.1, v1.2 s6.12.11 - Atomic Functions
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
+#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
+#endif
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old + val) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_add(volatile __global int *p, int val);
+unsigned int __ovld atomic_add(volatile __global unsigned int *p, unsigned int val);
+int __ovld atomic_add(volatile __local int *p, int val);
+unsigned int __ovld atomic_add(volatile __local unsigned int *p, unsigned int val);
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_add(volatile __global int *p, int val);
+unsigned int __ovld atom_add(volatile __global unsigned int *p, unsigned int val);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_add(volatile __local int *p, int val);
+unsigned int __ovld atom_add(volatile __local unsigned int *p, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_add(volatile __global long *p, long val);
+unsigned long __ovld atom_add(volatile __global unsigned long *p, unsigned long val);
+long __ovld atom_add(volatile __local long *p, long val);
+unsigned long __ovld atom_add(volatile __local unsigned long *p, unsigned long val);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old) stored at location pointed by p.
+ * Compute (old - val) and store result at location pointed by p. The function
+ * returns old.
+ */
+int __ovld atomic_sub(volatile __global int *p, int val);
+unsigned int __ovld atomic_sub(volatile __global unsigned int *p, unsigned int val);
+int __ovld atomic_sub(volatile __local int *p, int val);
+unsigned int __ovld atomic_sub(volatile __local unsigned int *p, unsigned int val);
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_sub(volatile __global int *p, int val);
+unsigned int __ovld atom_sub(volatile __global unsigned int *p, unsigned int val);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_sub(volatile __local int *p, int val);
+unsigned int __ovld atom_sub(volatile __local unsigned int *p, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_sub(volatile __global long *p, long val);
+unsigned long __ovld atom_sub(volatile __global unsigned long *p, unsigned long val);
+long __ovld atom_sub(volatile __local long *p, long val);
+unsigned long __ovld atom_sub(volatile __local unsigned long *p, unsigned long val);
+#endif
+
+/**
+ * Swaps the old value stored at location p
+ * with new value given by val. Returns old
+ * value.
+ */
+int __ovld atomic_xchg(volatile __global int *p, int val);
+unsigned int __ovld atomic_xchg(volatile __global unsigned int *p, unsigned int val);
+int __ovld atomic_xchg(volatile __local int *p, int val);
+unsigned int __ovld atomic_xchg(volatile __local unsigned int *p, unsigned int val);
+float __ovld atomic_xchg(volatile __global float *p, float val);
+float __ovld atomic_xchg(volatile __local float *p, float val);
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_xchg(volatile __global int *p, int val);
+int __ovld atom_xchg(volatile __local int *p, int val);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+unsigned int __ovld atom_xchg(volatile __global unsigned int *p, unsigned int val);
+unsigned int __ovld atom_xchg(volatile __local unsigned int *p, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_xchg(volatile __global long *p, long val);
+long __ovld atom_xchg(volatile __local long *p, long val);
+unsigned long __ovld atom_xchg(volatile __global unsigned long *p, unsigned long val);
+unsigned long __ovld atom_xchg(volatile __local unsigned long *p, unsigned long val);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old + 1) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_inc(volatile __global int *p);
+unsigned int __ovld atomic_inc(volatile __global unsigned int *p);
+int __ovld atomic_inc(volatile __local int *p);
+unsigned int __ovld atomic_inc(volatile __local unsigned int *p);
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_inc(volatile __global int *p);
+unsigned int __ovld atom_inc(volatile __global unsigned int *p);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_inc(volatile __local int *p);
+unsigned int __ovld atom_inc(volatile __local unsigned int *p);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_inc(volatile __global long *p);
+unsigned long __ovld atom_inc(volatile __global unsigned long *p);
+long __ovld atom_inc(volatile __local long *p);
+unsigned long __ovld atom_inc(volatile __local unsigned long *p);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old - 1) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_dec(volatile __global int *p);
+unsigned int __ovld atomic_dec(volatile __global unsigned int *p);
+int __ovld atomic_dec(volatile __local int *p);
+unsigned int __ovld atomic_dec(volatile __local unsigned int *p);
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_dec(volatile __global int *p);
+unsigned int __ovld atom_dec(volatile __global unsigned int *p);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_dec(volatile __local int *p);
+unsigned int __ovld atom_dec(volatile __local unsigned int *p);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_dec(volatile __global long *p);
+unsigned long __ovld atom_dec(volatile __global unsigned long *p);
+long __ovld atom_dec(volatile __local long *p);
+unsigned long __ovld atom_dec(volatile __local unsigned long *p);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old == cmp) ? val : old and store result at
+ * location pointed by p. The function
+ * returns old.
+ */
+int __ovld atomic_cmpxchg(volatile __global int *p, int cmp, int val);
+unsigned int __ovld atomic_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
+int __ovld atomic_cmpxchg(volatile __local int *p, int cmp, int val);
+unsigned int __ovld atomic_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_cmpxchg(volatile __global int *p, int cmp, int val);
+unsigned int __ovld atom_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_cmpxchg(volatile __local int *p, int cmp, int val);
+unsigned int __ovld atom_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_cmpxchg(volatile __global long *p, long cmp, long val);
+unsigned long __ovld atom_cmpxchg(volatile __global unsigned long *p, unsigned long cmp, unsigned long val);
+long __ovld atom_cmpxchg(volatile __local long *p, long cmp, long val);
+unsigned long __ovld atom_cmpxchg(volatile __local unsigned long *p, unsigned long cmp, unsigned long val);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * min(old, val) and store minimum value at
+ * location pointed by p. The function
+ * returns old.
+ */
+int __ovld atomic_min(volatile __global int *p, int val);
+unsigned int __ovld atomic_min(volatile __global unsigned int *p, unsigned int val);
+int __ovld atomic_min(volatile __local int *p, int val);
+unsigned int __ovld atomic_min(volatile __local unsigned int *p, unsigned int val);
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_min(volatile __global int *p, int val);
+unsigned int __ovld atom_min(volatile __global unsigned int *p, unsigned int val);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_min(volatile __local int *p, int val);
+unsigned int __ovld atom_min(volatile __local unsigned int *p, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_min(volatile __global long *p, long val);
+unsigned long __ovld atom_min(volatile __global unsigned long *p, unsigned long val);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+long __ovld atom_min(volatile __local long *p, long val);
+unsigned long __ovld atom_min(volatile __local unsigned long *p, unsigned long val);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * max(old, val) and store maximum value at
+ * location pointed by p. The function
+ * returns old.
+ */
+int __ovld atomic_max(volatile __global int *p, int val);
+unsigned int __ovld atomic_max(volatile __global unsigned int *p, unsigned int val);
+int __ovld atomic_max(volatile __local int *p, int val);
+unsigned int __ovld atomic_max(volatile __local unsigned int *p, unsigned int val);
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_max(volatile __global int *p, int val);
+unsigned int __ovld atom_max(volatile __global unsigned int *p, unsigned int val);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_max(volatile __local int *p, int val);
+unsigned int __ovld atom_max(volatile __local unsigned int *p, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_max(volatile __global long *p, long val);
+unsigned long __ovld atom_max(volatile __global unsigned long *p, unsigned long val);
+long __ovld atom_max(volatile __local long *p, long val);
+unsigned long __ovld atom_max(volatile __local unsigned long *p, unsigned long val);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old & val) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_and(volatile __global int *p, int val);
+unsigned int __ovld atomic_and(volatile __global unsigned int *p, unsigned int val);
+int __ovld atomic_and(volatile __local int *p, int val);
+unsigned int __ovld atomic_and(volatile __local unsigned int *p, unsigned int val);
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_and(volatile __global int *p, int val);
+unsigned int __ovld atom_and(volatile __global unsigned int *p, unsigned int val);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_and(volatile __local int *p, int val);
+unsigned int __ovld atom_and(volatile __local unsigned int *p, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_and(volatile __global long *p, long val);
+unsigned long __ovld atom_and(volatile __global unsigned long *p, unsigned long val);
+long __ovld atom_and(volatile __local long *p, long val);
+unsigned long __ovld atom_and(volatile __local unsigned long *p, unsigned long val);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old | val) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_or(volatile __global int *p, int val);
+unsigned int __ovld atomic_or(volatile __global unsigned int *p, unsigned int val);
+int __ovld atomic_or(volatile __local int *p, int val);
+unsigned int __ovld atomic_or(volatile __local unsigned int *p, unsigned int val);
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_or(volatile __global int *p, int val);
+unsigned int __ovld atom_or(volatile __global unsigned int *p, unsigned int val);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_or(volatile __local int *p, int val);
+unsigned int __ovld atom_or(volatile __local unsigned int *p, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_or(volatile __global long *p, long val);
+unsigned long __ovld atom_or(volatile __global unsigned long *p, unsigned long val);
+long __ovld atom_or(volatile __local long *p, long val);
+unsigned long __ovld atom_or(volatile __local unsigned long *p, unsigned long val);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old ^ val) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_xor(volatile __global int *p, int val);
+unsigned int __ovld atomic_xor(volatile __global unsigned int *p, unsigned int val);
+int __ovld atomic_xor(volatile __local int *p, int val);
+unsigned int __ovld atomic_xor(volatile __local unsigned int *p, unsigned int val);
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_xor(volatile __global int *p, int val);
+unsigned int __ovld atom_xor(volatile __global unsigned int *p, unsigned int val);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_xor(volatile __local int *p, int val);
+unsigned int __ovld atom_xor(volatile __local unsigned int *p, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : disable
+#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : disable
+#endif
+
+// OpenCL v2.0 s6.13.11 - Atomics Functions
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#ifndef ATOMIC_VAR_INIT
+#define ATOMIC_VAR_INIT(x) (x)
+#endif //ATOMIC_VAR_INIT
+#define ATOMIC_FLAG_INIT 0
+
+// enum values aligned with what clang uses in EmitAtomicExpr()
+typedef enum memory_order
+{
+  memory_order_relaxed,
+  memory_order_acquire,
+  memory_order_release,
+  memory_order_acq_rel,
+  memory_order_seq_cst
+} memory_order;
+
+// double atomics support requires extensions cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
+#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
+#endif
+
+// atomic_init()
+void __ovld atomic_init(volatile atomic_int *object, int value);
+void __ovld atomic_init(volatile atomic_uint *object, uint value);
+void __ovld atomic_init(volatile atomic_float *object, float value);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+void __ovld atomic_init(volatile atomic_long *object, long value);
+void __ovld atomic_init(volatile atomic_ulong *object, ulong value);
+#ifdef cl_khr_fp64
+void __ovld atomic_init(volatile atomic_double *object, double value);
+#endif //cl_khr_fp64
+#endif
+
+// atomic_work_item_fence()
+void __ovld atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scope scope);
+
+// atomic_fetch()
+
+int __ovld atomic_fetch_add(volatile atomic_int *object, int operand);
+int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order);
+int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_add(volatile atomic_uint *object, uint operand);
+uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_sub(volatile atomic_int *object, int operand);
+int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order);
+int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_sub(volatile atomic_uint *object, uint operand);
+uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_or(volatile atomic_int *object, int operand);
+int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order);
+int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_or(volatile atomic_uint *object, uint operand);
+uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_xor(volatile atomic_int *object, int operand);
+int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order);
+int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_xor(volatile atomic_uint *object, uint operand);
+uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_and(volatile atomic_int *object, int operand);
+int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order);
+int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_and(volatile atomic_uint *object, uint operand);
+uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_min(volatile atomic_int *object, int operand);
+int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order);
+int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_min(volatile atomic_uint *object, uint operand);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_min(volatile atomic_uint *object, int operand);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, int operand, memory_order order);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, int operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_max(volatile atomic_int *object, int operand);
+int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order);
+int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_max(volatile atomic_uint *object, uint operand);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_max(volatile atomic_uint *object, int operand);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, int operand, memory_order order);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, int operand, memory_order order, memory_scope scope);
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add(volatile atomic_long *object, long operand);
+long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order);
+long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_add(volatile atomic_ulong *object, ulong operand);
+ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_sub(volatile atomic_long *object, long operand);
+long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order);
+long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_sub(volatile atomic_ulong *object, ulong operand);
+ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_or(volatile atomic_long *object, long operand);
+long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order);
+long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_or(volatile atomic_ulong *object, ulong operand);
+ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_xor(volatile atomic_long *object, long operand);
+long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order);
+long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_xor(volatile atomic_ulong *object, ulong operand);
+ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_and(volatile atomic_long *object, long operand);
+long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order);
+long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_and(volatile atomic_ulong *object, ulong operand);
+ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_min(volatile atomic_long *object, long operand);
+long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order);
+long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, ulong operand);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, long operand);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, long operand, memory_order order);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, long operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_max(volatile atomic_long *object, long operand);
+long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order);
+long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, ulong operand);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, long operand);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, long operand, memory_order order);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, long operand, memory_order order, memory_scope scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+
+// OpenCL v2.0 s6.13.11.7.5:
+// add/sub: atomic type argument can be uintptr_t/intptr_t, value type argument can be ptrdiff_t.
+// or/xor/and/min/max: atomic type argument can be intptr_t/uintptr_t, value type argument can be intptr_t/uintptr_t.
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 
+uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *object, ptrdiff_t operand);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
+uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *object, ptrdiff_t operand);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
+
+uintptr_t __ovld atomic_fetch_or(volatile atomic_uintptr_t *object, intptr_t operand);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
+uintptr_t __ovld atomic_fetch_xor(volatile atomic_uintptr_t *object, intptr_t operand);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
+uintptr_t __ovld atomic_fetch_and(volatile atomic_uintptr_t *object, intptr_t operand);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
+uintptr_t __ovld atomic_fetch_min(volatile atomic_uintptr_t *object, intptr_t opermax);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder, memory_scope scope);
+uintptr_t __ovld atomic_fetch_max(volatile atomic_uintptr_t *object, intptr_t opermax);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder, memory_scope scope);
+
+intptr_t __ovld atomic_fetch_or(volatile atomic_intptr_t *object, uintptr_t operand);
+intptr_t __ovld atomic_fetch_or_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order);
+intptr_t __ovld atomic_fetch_or_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
+intptr_t __ovld atomic_fetch_xor(volatile atomic_intptr_t *object, uintptr_t operand);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
+intptr_t __ovld atomic_fetch_and(volatile atomic_intptr_t *object, uintptr_t operand);
+intptr_t __ovld atomic_fetch_and_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order);
+intptr_t __ovld atomic_fetch_and_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
+intptr_t __ovld atomic_fetch_min(volatile atomic_intptr_t *object, uintptr_t opermax);
+intptr_t __ovld atomic_fetch_min_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder);
+intptr_t __ovld atomic_fetch_min_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder, memory_scope scope);
+intptr_t __ovld atomic_fetch_max(volatile atomic_intptr_t *object, uintptr_t opermax);
+intptr_t __ovld atomic_fetch_max_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder);
+intptr_t __ovld atomic_fetch_max_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder, memory_scope scope);
+#endif
+
+// atomic_store()
+
+void __ovld atomic_store(volatile atomic_int *object, int desired);
+void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
+void __ovld atomic_store(volatile atomic_uint *object, uint desired);
+void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
+void __ovld atomic_store(volatile atomic_float *object, float desired);
+void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store(volatile atomic_double *object, double desired);
+void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
+#endif //cl_khr_fp64
+void __ovld atomic_store(volatile atomic_long *object, long desired);
+void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
+void __ovld atomic_store(volatile atomic_ulong *object, ulong desired);
+void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
+#endif
+
+// atomic_load()
+
+int __ovld atomic_load(volatile atomic_int *object);
+int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order);
+int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order, memory_scope scope);
+uint __ovld atomic_load(volatile atomic_uint *object);
+uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order);
+uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order, memory_scope scope);
+float __ovld atomic_load(volatile atomic_float *object);
+float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order);
+float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order, memory_scope scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load(volatile atomic_double *object);
+double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order);
+double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order, memory_scope scope);
+#endif //cl_khr_fp64
+long __ovld atomic_load(volatile atomic_long *object);
+long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order);
+long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order, memory_scope scope);
+ulong __ovld atomic_load(volatile atomic_ulong *object);
+ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order);
+ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order, memory_scope scope);
+#endif
+
+// atomic_exchange()
+
+int __ovld atomic_exchange(volatile atomic_int *object, int desired);
+int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order);
+int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
+uint __ovld atomic_exchange(volatile atomic_uint *object, uint desired);
+uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order);
+uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
+float __ovld atomic_exchange(volatile atomic_float *object, float desired);
+float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order);
+float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange(volatile atomic_double *object, double desired);
+double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order);
+double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange(volatile atomic_long *object, long desired);
+long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order);
+long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
+ulong __ovld atomic_exchange(volatile atomic_ulong *object, ulong desired);
+ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
+ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
+#endif
+
+// atomic_compare_exchange_strong() and atomic_compare_exchange_weak()
+
+bool __ovld atomic_compare_exchange_strong(volatile atomic_int *object, int *expected, int desired);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
+                                                                                 int desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
+                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *object, uint *expected, uint desired);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
+                                                                                 uint desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
+                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_int *object, int *expected, int desired);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
+                                                                                 int desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
+                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *object, uint *expected, uint desired);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
+                                                                                 uint desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
+                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_float *object, float *expected, float desired);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
+                                                                                 float desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
+                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_float *object, float *expected, float desired);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
+                                                                                 float desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
+                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong(volatile atomic_double *object, double *expected, double desired);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
+                                                                                 double desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
+                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_double *object, double *expected, double desired);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
+                                                                                 double desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
+                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong(volatile atomic_long *object, long *expected, long desired);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
+                                                                                 long desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
+                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_long *object, long *expected, long desired);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
+                                                                                 long desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
+                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *object, ulong *expected, ulong desired);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
+                                                                                 ulong desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
+                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *object, ulong *expected, ulong desired);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
+                                                                                 ulong desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
+                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
+#endif
+
+// atomic_flag_test_and_set() and atomic_flag_clear()
+
+bool __ovld atomic_flag_test_and_set(volatile atomic_flag *object);
+bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order);
+bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
+void __ovld atomic_flag_clear(volatile atomic_flag *object);
+void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order);
+void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
+
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions
+
+/**
+ * The shuffle and shuffle2 built-in functions construct
+ * a permutation of elements from one or two input
+ * vectors respectively that are of the same type,
+ * returning a vector with the same element type as the
+ * input and length that is the same as the shuffle mask.
+ * The size of each element in the mask must match the
+ * size of each element in the result. For shuffle, only
+ * the ilogb(2m-1) least significant bits of each mask
+ * element are considered. For shuffle2, only the
+ * ilogb(2m-1)+1 least significant bits of each mask
+ * element are considered. Other bits in the mask shall
+ * be ignored.
+ * The elements of the input vectors are numbered from
+ * left to right across one or both of the vectors. For this
+ * purpose, the number of elements in a vector is given
+ * by vec_step(gentypem). The shuffle mask operand
+ * specifies, for each element of the result vector, which
+ * element of the one or two input vectors the result
+ * element gets.
+ * Examples:
+ * uint4 mask = (uint4)(3, 2,
+ * 1, 0);
+ * float4 a;
+ * float4 r = shuffle(a, mask);
+ * // r.s0123 = a.wzyx
+ * uint8 mask = (uint8)(0, 1, 2, 3,
+ * 4, 5, 6, 7);
+ * float4 a, b;
+ * float8 r = shuffle2(a, b, mask);
+ * // r.s0123 = a.xyzw
+ * // r.s4567 = b.xyzw
+ * uint4 mask;
+ * float8 a;
+ * float4 b;
+ * b = shuffle(a, mask);
+ * Examples that are not valid are:
+ * uint8 mask;
+ * short16 a;
+ * short8 b;
+ * b = shuffle(a, mask); <- not valid
+ */
+char2 __ovld __cnfn shuffle(char2 x, uchar2 mask);
+char2 __ovld __cnfn shuffle(char4 x, uchar2 mask);
+char2 __ovld __cnfn shuffle(char8 x, uchar2 mask);
+char2 __ovld __cnfn shuffle(char16 x, uchar2 mask);
+
+uchar2 __ovld __cnfn shuffle(uchar2 x, uchar2 mask);
+uchar2 __ovld __cnfn shuffle(uchar4 x, uchar2 mask);
+uchar2 __ovld __cnfn shuffle(uchar8 x, uchar2 mask);
+uchar2 __ovld __cnfn shuffle(uchar16 x, uchar2 mask);
+
+short2 __ovld __cnfn shuffle(short2 x, ushort2 mask);
+short2 __ovld __cnfn shuffle(short4 x, ushort2 mask);
+short2 __ovld __cnfn shuffle(short8 x, ushort2 mask);
+short2 __ovld __cnfn shuffle(short16 x, ushort2 mask);
+
+ushort2 __ovld __cnfn shuffle(ushort2 x, ushort2 mask);
+ushort2 __ovld __cnfn shuffle(ushort4 x, ushort2 mask);
+ushort2 __ovld __cnfn shuffle(ushort8 x, ushort2 mask);
+ushort2 __ovld __cnfn shuffle(ushort16 x, ushort2 mask);
+
+int2 __ovld __cnfn shuffle(int2 x, uint2 mask);
+int2 __ovld __cnfn shuffle(int4 x, uint2 mask);
+int2 __ovld __cnfn shuffle(int8 x, uint2 mask);
+int2 __ovld __cnfn shuffle(int16 x, uint2 mask);
+
+uint2 __ovld __cnfn shuffle(uint2 x, uint2 mask);
+uint2 __ovld __cnfn shuffle(uint4 x, uint2 mask);
+uint2 __ovld __cnfn shuffle(uint8 x, uint2 mask);
+uint2 __ovld __cnfn shuffle(uint16 x, uint2 mask);
+
+long2 __ovld __cnfn shuffle(long2 x, ulong2 mask);
+long2 __ovld __cnfn shuffle(long4 x, ulong2 mask);
+long2 __ovld __cnfn shuffle(long8 x, ulong2 mask);
+long2 __ovld __cnfn shuffle(long16 x, ulong2 mask);
+
+ulong2 __ovld __cnfn shuffle(ulong2 x, ulong2 mask);
+ulong2 __ovld __cnfn shuffle(ulong4 x, ulong2 mask);
+ulong2 __ovld __cnfn shuffle(ulong8 x, ulong2 mask);
+ulong2 __ovld __cnfn shuffle(ulong16 x, ulong2 mask);
+
+float2 __ovld __cnfn shuffle(float2 x, uint2 mask);
+float2 __ovld __cnfn shuffle(float4 x, uint2 mask);
+float2 __ovld __cnfn shuffle(float8 x, uint2 mask);
+float2 __ovld __cnfn shuffle(float16 x, uint2 mask);
+
+char4 __ovld __cnfn shuffle(char2 x, uchar4 mask);
+char4 __ovld __cnfn shuffle(char4 x, uchar4 mask);
+char4 __ovld __cnfn shuffle(char8 x, uchar4 mask);
+char4 __ovld __cnfn shuffle(char16 x, uchar4 mask);
+
+uchar4 __ovld __cnfn shuffle(uchar2 x, uchar4 mask);
+uchar4 __ovld __cnfn shuffle(uchar4 x, uchar4 mask);
+uchar4 __ovld __cnfn shuffle(uchar8 x, uchar4 mask);
+uchar4 __ovld __cnfn shuffle(uchar16 x, uchar4 mask);
+
+short4 __ovld __cnfn shuffle(short2 x, ushort4 mask);
+short4 __ovld __cnfn shuffle(short4 x, ushort4 mask);
+short4 __ovld __cnfn shuffle(short8 x, ushort4 mask);
+short4 __ovld __cnfn shuffle(short16 x, ushort4 mask);
+
+ushort4 __ovld __cnfn shuffle(ushort2 x, ushort4 mask);
+ushort4 __ovld __cnfn shuffle(ushort4 x, ushort4 mask);
+ushort4 __ovld __cnfn shuffle(ushort8 x, ushort4 mask);
+ushort4 __ovld __cnfn shuffle(ushort16 x, ushort4 mask);
+
+int4 __ovld __cnfn shuffle(int2 x, uint4 mask);
+int4 __ovld __cnfn shuffle(int4 x, uint4 mask);
+int4 __ovld __cnfn shuffle(int8 x, uint4 mask);
+int4 __ovld __cnfn shuffle(int16 x, uint4 mask);
+
+uint4 __ovld __cnfn shuffle(uint2 x, uint4 mask);
+uint4 __ovld __cnfn shuffle(uint4 x, uint4 mask);
+uint4 __ovld __cnfn shuffle(uint8 x, uint4 mask);
+uint4 __ovld __cnfn shuffle(uint16 x, uint4 mask);
+
+long4 __ovld __cnfn shuffle(long2 x, ulong4 mask);
+long4 __ovld __cnfn shuffle(long4 x, ulong4 mask);
+long4 __ovld __cnfn shuffle(long8 x, ulong4 mask);
+long4 __ovld __cnfn shuffle(long16 x, ulong4 mask);
+
+ulong4 __ovld __cnfn shuffle(ulong2 x, ulong4 mask);
+ulong4 __ovld __cnfn shuffle(ulong4 x, ulong4 mask);
+ulong4 __ovld __cnfn shuffle(ulong8 x, ulong4 mask);
+ulong4 __ovld __cnfn shuffle(ulong16 x, ulong4 mask);
+
+float4 __ovld __cnfn shuffle(float2 x, uint4 mask);
+float4 __ovld __cnfn shuffle(float4 x, uint4 mask);
+float4 __ovld __cnfn shuffle(float8 x, uint4 mask);
+float4 __ovld __cnfn shuffle(float16 x, uint4 mask);
+
+char8 __ovld __cnfn shuffle(char2 x, uchar8 mask);
+char8 __ovld __cnfn shuffle(char4 x, uchar8 mask);
+char8 __ovld __cnfn shuffle(char8 x, uchar8 mask);
+char8 __ovld __cnfn shuffle(char16 x, uchar8 mask);
+
+uchar8 __ovld __cnfn shuffle(uchar2 x, uchar8 mask);
+uchar8 __ovld __cnfn shuffle(uchar4 x, uchar8 mask);
+uchar8 __ovld __cnfn shuffle(uchar8 x, uchar8 mask);
+uchar8 __ovld __cnfn shuffle(uchar16 x, uchar8 mask);
+
+short8 __ovld __cnfn shuffle(short2 x, ushort8 mask);
+short8 __ovld __cnfn shuffle(short4 x, ushort8 mask);
+short8 __ovld __cnfn shuffle(short8 x, ushort8 mask);
+short8 __ovld __cnfn shuffle(short16 x, ushort8 mask);
+
+ushort8 __ovld __cnfn shuffle(ushort2 x, ushort8 mask);
+ushort8 __ovld __cnfn shuffle(ushort4 x, ushort8 mask);
+ushort8 __ovld __cnfn shuffle(ushort8 x, ushort8 mask);
+ushort8 __ovld __cnfn shuffle(ushort16 x, ushort8 mask);
+
+int8 __ovld __cnfn shuffle(int2 x, uint8 mask);
+int8 __ovld __cnfn shuffle(int4 x, uint8 mask);
+int8 __ovld __cnfn shuffle(int8 x, uint8 mask);
+int8 __ovld __cnfn shuffle(int16 x, uint8 mask);
+
+uint8 __ovld __cnfn shuffle(uint2 x, uint8 mask);
+uint8 __ovld __cnfn shuffle(uint4 x, uint8 mask);
+uint8 __ovld __cnfn shuffle(uint8 x, uint8 mask);
+uint8 __ovld __cnfn shuffle(uint16 x, uint8 mask);
+
+long8 __ovld __cnfn shuffle(long2 x, ulong8 mask);
+long8 __ovld __cnfn shuffle(long4 x, ulong8 mask);
+long8 __ovld __cnfn shuffle(long8 x, ulong8 mask);
+long8 __ovld __cnfn shuffle(long16 x, ulong8 mask);
+
+ulong8 __ovld __cnfn shuffle(ulong2 x, ulong8 mask);
+ulong8 __ovld __cnfn shuffle(ulong4 x, ulong8 mask);
+ulong8 __ovld __cnfn shuffle(ulong8 x, ulong8 mask);
+ulong8 __ovld __cnfn shuffle(ulong16 x, ulong8 mask);
+
+float8 __ovld __cnfn shuffle(float2 x, uint8 mask);
+float8 __ovld __cnfn shuffle(float4 x, uint8 mask);
+float8 __ovld __cnfn shuffle(float8 x, uint8 mask);
+float8 __ovld __cnfn shuffle(float16 x, uint8 mask);
+
+char16 __ovld __cnfn shuffle(char2 x, uchar16 mask);
+char16 __ovld __cnfn shuffle(char4 x, uchar16 mask);
+char16 __ovld __cnfn shuffle(char8 x, uchar16 mask);
+char16 __ovld __cnfn shuffle(char16 x, uchar16 mask);
+
+uchar16 __ovld __cnfn shuffle(uchar2 x, uchar16 mask);
+uchar16 __ovld __cnfn shuffle(uchar4 x, uchar16 mask);
+uchar16 __ovld __cnfn shuffle(uchar8 x, uchar16 mask);
+uchar16 __ovld __cnfn shuffle(uchar16 x, uchar16 mask);
+
+short16 __ovld __cnfn shuffle(short2 x, ushort16 mask);
+short16 __ovld __cnfn shuffle(short4 x, ushort16 mask);
+short16 __ovld __cnfn shuffle(short8 x, ushort16 mask);
+short16 __ovld __cnfn shuffle(short16 x, ushort16 mask);
+
+ushort16 __ovld __cnfn shuffle(ushort2 x, ushort16 mask);
+ushort16 __ovld __cnfn shuffle(ushort4 x, ushort16 mask);
+ushort16 __ovld __cnfn shuffle(ushort8 x, ushort16 mask);
+ushort16 __ovld __cnfn shuffle(ushort16 x, ushort16 mask);
+
+int16 __ovld __cnfn shuffle(int2 x, uint16 mask);
+int16 __ovld __cnfn shuffle(int4 x, uint16 mask);
+int16 __ovld __cnfn shuffle(int8 x, uint16 mask);
+int16 __ovld __cnfn shuffle(int16 x, uint16 mask);
+
+uint16 __ovld __cnfn shuffle(uint2 x, uint16 mask);
+uint16 __ovld __cnfn shuffle(uint4 x, uint16 mask);
+uint16 __ovld __cnfn shuffle(uint8 x, uint16 mask);
+uint16 __ovld __cnfn shuffle(uint16 x, uint16 mask);
+
+long16 __ovld __cnfn shuffle(long2 x, ulong16 mask);
+long16 __ovld __cnfn shuffle(long4 x, ulong16 mask);
+long16 __ovld __cnfn shuffle(long8 x, ulong16 mask);
+long16 __ovld __cnfn shuffle(long16 x, ulong16 mask);
+
+ulong16 __ovld __cnfn shuffle(ulong2 x, ulong16 mask);
+ulong16 __ovld __cnfn shuffle(ulong4 x, ulong16 mask);
+ulong16 __ovld __cnfn shuffle(ulong8 x, ulong16 mask);
+ulong16 __ovld __cnfn shuffle(ulong16 x, ulong16 mask);
+
+float16 __ovld __cnfn shuffle(float2 x, uint16 mask);
+float16 __ovld __cnfn shuffle(float4 x, uint16 mask);
+float16 __ovld __cnfn shuffle(float8 x, uint16 mask);
+float16 __ovld __cnfn shuffle(float16 x, uint16 mask);
+
+#ifdef cl_khr_fp64
+double2 __ovld __cnfn shuffle(double2 x, ulong2 mask);
+double2 __ovld __cnfn shuffle(double4 x, ulong2 mask);
+double2 __ovld __cnfn shuffle(double8 x, ulong2 mask);
+double2 __ovld __cnfn shuffle(double16 x, ulong2 mask);
+
+double4 __ovld __cnfn shuffle(double2 x, ulong4 mask);
+double4 __ovld __cnfn shuffle(double4 x, ulong4 mask);
+double4 __ovld __cnfn shuffle(double8 x, ulong4 mask);
+double4 __ovld __cnfn shuffle(double16 x, ulong4 mask);
+
+double8 __ovld __cnfn shuffle(double2 x, ulong8 mask);
+double8 __ovld __cnfn shuffle(double4 x, ulong8 mask);
+double8 __ovld __cnfn shuffle(double8 x, ulong8 mask);
+double8 __ovld __cnfn shuffle(double16 x, ulong8 mask);
+
+double16 __ovld __cnfn shuffle(double2 x, ulong16 mask);
+double16 __ovld __cnfn shuffle(double4 x, ulong16 mask);
+double16 __ovld __cnfn shuffle(double8 x, ulong16 mask);
+double16 __ovld __cnfn shuffle(double16 x, ulong16 mask);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half2 __ovld __cnfn shuffle(half2 x, ushort2 mask);
+half2 __ovld __cnfn shuffle(half4 x, ushort2 mask);
+half2 __ovld __cnfn shuffle(half8 x, ushort2 mask);
+half2 __ovld __cnfn shuffle(half16 x, ushort2 mask);
+
+half4 __ovld __cnfn shuffle(half2 x, ushort4 mask);
+half4 __ovld __cnfn shuffle(half4 x, ushort4 mask);
+half4 __ovld __cnfn shuffle(half8 x, ushort4 mask);
+half4 __ovld __cnfn shuffle(half16 x, ushort4 mask);
+
+half8 __ovld __cnfn shuffle(half2 x, ushort8 mask);
+half8 __ovld __cnfn shuffle(half4 x, ushort8 mask);
+half8 __ovld __cnfn shuffle(half8 x, ushort8 mask);
+half8 __ovld __cnfn shuffle(half16 x, ushort8 mask);
+
+half16 __ovld __cnfn shuffle(half2 x, ushort16 mask);
+half16 __ovld __cnfn shuffle(half4 x, ushort16 mask);
+half16 __ovld __cnfn shuffle(half8 x, ushort16 mask);
+half16 __ovld __cnfn shuffle(half16 x, ushort16 mask);
+#endif //cl_khr_fp16
+
+char2 __ovld __cnfn shuffle2(char2 x, char2 y, uchar2 mask);
+char2 __ovld __cnfn shuffle2(char4 x, char4 y, uchar2 mask);
+char2 __ovld __cnfn shuffle2(char8 x, char8 y, uchar2 mask);
+char2 __ovld __cnfn shuffle2(char16 x, char16 y, uchar2 mask);
+
+uchar2 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar2 mask);
+uchar2 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar2 mask);
+uchar2 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar2 mask);
+uchar2 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar2 mask);
+
+short2 __ovld __cnfn shuffle2(short2 x, short2 y, ushort2 mask);
+short2 __ovld __cnfn shuffle2(short4 x, short4 y, ushort2 mask);
+short2 __ovld __cnfn shuffle2(short8 x, short8 y, ushort2 mask);
+short2 __ovld __cnfn shuffle2(short16 x, short16 y, ushort2 mask);
+
+ushort2 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort2 mask);
+ushort2 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort2 mask);
+ushort2 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort2 mask);
+ushort2 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort2 mask);
+
+int2 __ovld __cnfn shuffle2(int2 x, int2 y, uint2 mask);
+int2 __ovld __cnfn shuffle2(int4 x, int4 y, uint2 mask);
+int2 __ovld __cnfn shuffle2(int8 x, int8 y, uint2 mask);
+int2 __ovld __cnfn shuffle2(int16 x, int16 y, uint2 mask);
+
+uint2 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint2 mask);
+uint2 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint2 mask);
+uint2 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint2 mask);
+uint2 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint2 mask);
+
+long2 __ovld __cnfn shuffle2(long2 x, long2 y, ulong2 mask);
+long2 __ovld __cnfn shuffle2(long4 x, long4 y, ulong2 mask);
+long2 __ovld __cnfn shuffle2(long8 x, long8 y, ulong2 mask);
+long2 __ovld __cnfn shuffle2(long16 x, long16 y, ulong2 mask);
+
+ulong2 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong2 mask);
+ulong2 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong2 mask);
+ulong2 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong2 mask);
+ulong2 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong2 mask);
+
+float2 __ovld __cnfn shuffle2(float2 x, float2 y, uint2 mask);
+float2 __ovld __cnfn shuffle2(float4 x, float4 y, uint2 mask);
+float2 __ovld __cnfn shuffle2(float8 x, float8 y, uint2 mask);
+float2 __ovld __cnfn shuffle2(float16 x, float16 y, uint2 mask);
+
+char4 __ovld __cnfn shuffle2(char2 x, char2 y, uchar4 mask);
+char4 __ovld __cnfn shuffle2(char4 x, char4 y, uchar4 mask);
+char4 __ovld __cnfn shuffle2(char8 x, char8 y, uchar4 mask);
+char4 __ovld __cnfn shuffle2(char16 x, char16 y, uchar4 mask);
+
+uchar4 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar4 mask);
+uchar4 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar4 mask);
+uchar4 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar4 mask);
+uchar4 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar4 mask);
+
+short4 __ovld __cnfn shuffle2(short2 x, short2 y, ushort4 mask);
+short4 __ovld __cnfn shuffle2(short4 x, short4 y, ushort4 mask);
+short4 __ovld __cnfn shuffle2(short8 x, short8 y, ushort4 mask);
+short4 __ovld __cnfn shuffle2(short16 x, short16 y, ushort4 mask);
+
+ushort4 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort4 mask);
+ushort4 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort4 mask);
+ushort4 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort4 mask);
+ushort4 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort4 mask);
+
+int4 __ovld __cnfn shuffle2(int2 x, int2 y, uint4 mask);
+int4 __ovld __cnfn shuffle2(int4 x, int4 y, uint4 mask);
+int4 __ovld __cnfn shuffle2(int8 x, int8 y, uint4 mask);
+int4 __ovld __cnfn shuffle2(int16 x, int16 y, uint4 mask);
+
+uint4 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint4 mask);
+uint4 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint4 mask);
+uint4 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint4 mask);
+uint4 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint4 mask);
+
+long4 __ovld __cnfn shuffle2(long2 x, long2 y, ulong4 mask);
+long4 __ovld __cnfn shuffle2(long4 x, long4 y, ulong4 mask);
+long4 __ovld __cnfn shuffle2(long8 x, long8 y, ulong4 mask);
+long4 __ovld __cnfn shuffle2(long16 x, long16 y, ulong4 mask);
+
+ulong4 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong4 mask);
+ulong4 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong4 mask);
+ulong4 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong4 mask);
+ulong4 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong4 mask);
+
+float4 __ovld __cnfn shuffle2(float2 x, float2 y, uint4 mask);
+float4 __ovld __cnfn shuffle2(float4 x, float4 y, uint4 mask);
+float4 __ovld __cnfn shuffle2(float8 x, float8 y, uint4 mask);
+float4 __ovld __cnfn shuffle2(float16 x, float16 y, uint4 mask);
+
+char8 __ovld __cnfn shuffle2(char2 x, char2 y, uchar8 mask);
+char8 __ovld __cnfn shuffle2(char4 x, char4 y, uchar8 mask);
+char8 __ovld __cnfn shuffle2(char8 x, char8 y, uchar8 mask);
+char8 __ovld __cnfn shuffle2(char16 x, char16 y, uchar8 mask);
+
+uchar8 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar8 mask);
+uchar8 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar8 mask);
+uchar8 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar8 mask);
+uchar8 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar8 mask);
+
+short8 __ovld __cnfn shuffle2(short2 x, short2 y, ushort8 mask);
+short8 __ovld __cnfn shuffle2(short4 x, short4 y, ushort8 mask);
+short8 __ovld __cnfn shuffle2(short8 x, short8 y, ushort8 mask);
+short8 __ovld __cnfn shuffle2(short16 x, short16 y, ushort8 mask);
+
+ushort8 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort8 mask);
+ushort8 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort8 mask);
+ushort8 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort8 mask);
+ushort8 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort8 mask);
+
+int8 __ovld __cnfn shuffle2(int2 x, int2 y, uint8 mask);
+int8 __ovld __cnfn shuffle2(int4 x, int4 y, uint8 mask);
+int8 __ovld __cnfn shuffle2(int8 x, int8 y, uint8 mask);
+int8 __ovld __cnfn shuffle2(int16 x, int16 y, uint8 mask);
+
+uint8 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint8 mask);
+uint8 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint8 mask);
+uint8 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint8 mask);
+uint8 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint8 mask);
+
+long8 __ovld __cnfn shuffle2(long2 x, long2 y, ulong8 mask);
+long8 __ovld __cnfn shuffle2(long4 x, long4 y, ulong8 mask);
+long8 __ovld __cnfn shuffle2(long8 x, long8 y, ulong8 mask);
+long8 __ovld __cnfn shuffle2(long16 x, long16 y, ulong8 mask);
+
+ulong8 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong8 mask);
+ulong8 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong8 mask);
+ulong8 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong8 mask);
+ulong8 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong8 mask);
+
+float8 __ovld __cnfn shuffle2(float2 x, float2 y, uint8 mask);
+float8 __ovld __cnfn shuffle2(float4 x, float4 y, uint8 mask);
+float8 __ovld __cnfn shuffle2(float8 x, float8 y, uint8 mask);
+float8 __ovld __cnfn shuffle2(float16 x, float16 y, uint8 mask);
+
+char16 __ovld __cnfn shuffle2(char2 x, char2 y, uchar16 mask);
+char16 __ovld __cnfn shuffle2(char4 x, char4 y, uchar16 mask);
+char16 __ovld __cnfn shuffle2(char8 x, char8 y, uchar16 mask);
+char16 __ovld __cnfn shuffle2(char16 x, char16 y, uchar16 mask);
+
+uchar16 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar16 mask);
+uchar16 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar16 mask);
+uchar16 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar16 mask);
+uchar16 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar16 mask);
+
+short16 __ovld __cnfn shuffle2(short2 x, short2 y, ushort16 mask);
+short16 __ovld __cnfn shuffle2(short4 x, short4 y, ushort16 mask);
+short16 __ovld __cnfn shuffle2(short8 x, short8 y, ushort16 mask);
+short16 __ovld __cnfn shuffle2(short16 x, short16 y, ushort16 mask);
+
+ushort16 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort16 mask);
+ushort16 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort16 mask);
+ushort16 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort16 mask);
+ushort16 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort16 mask);
+
+int16 __ovld __cnfn shuffle2(int2 x, int2 y, uint16 mask);
+int16 __ovld __cnfn shuffle2(int4 x, int4 y, uint16 mask);
+int16 __ovld __cnfn shuffle2(int8 x, int8 y, uint16 mask);
+int16 __ovld __cnfn shuffle2(int16 x, int16 y, uint16 mask);
+
+uint16 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint16 mask);
+uint16 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint16 mask);
+uint16 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint16 mask);
+uint16 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint16 mask);
+
+long16 __ovld __cnfn shuffle2(long2 x, long2 y, ulong16 mask);
+long16 __ovld __cnfn shuffle2(long4 x, long4 y, ulong16 mask);
+long16 __ovld __cnfn shuffle2(long8 x, long8 y, ulong16 mask);
+long16 __ovld __cnfn shuffle2(long16 x, long16 y, ulong16 mask);
+
+ulong16 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong16 mask);
+ulong16 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong16 mask);
+ulong16 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong16 mask);
+ulong16 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong16 mask);
+
+float16 __ovld __cnfn shuffle2(float2 x, float2 y, uint16 mask);
+float16 __ovld __cnfn shuffle2(float4 x, float4 y, uint16 mask);
+float16 __ovld __cnfn shuffle2(float8 x, float8 y, uint16 mask);
+float16 __ovld __cnfn shuffle2(float16 x, float16 y, uint16 mask);
+
+#ifdef cl_khr_fp64
+double2 __ovld __cnfn shuffle2(double2 x, double2 y, ulong2 mask);
+double2 __ovld __cnfn shuffle2(double4 x, double4 y, ulong2 mask);
+double2 __ovld __cnfn shuffle2(double8 x, double8 y, ulong2 mask);
+double2 __ovld __cnfn shuffle2(double16 x, double16 y, ulong2 mask);
+
+double4 __ovld __cnfn shuffle2(double2 x, double2 y, ulong4 mask);
+double4 __ovld __cnfn shuffle2(double4 x, double4 y, ulong4 mask);
+double4 __ovld __cnfn shuffle2(double8 x, double8 y, ulong4 mask);
+double4 __ovld __cnfn shuffle2(double16 x, double16 y, ulong4 mask);
+
+double8 __ovld __cnfn shuffle2(double2 x, double2 y, ulong8 mask);
+double8 __ovld __cnfn shuffle2(double4 x, double4 y, ulong8 mask);
+double8 __ovld __cnfn shuffle2(double8 x, double8 y, ulong8 mask);
+double8 __ovld __cnfn shuffle2(double16 x, double16 y, ulong8 mask);
+
+double16 __ovld __cnfn shuffle2(double2 x, double2 y, ulong16 mask);
+double16 __ovld __cnfn shuffle2(double4 x, double4 y, ulong16 mask);
+double16 __ovld __cnfn shuffle2(double8 x, double8 y, ulong16 mask);
+double16 __ovld __cnfn shuffle2(double16 x, double16 y, ulong16 mask);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half2 __ovld __cnfn shuffle2(half2 x, half2 y, ushort2 mask);
+half2 __ovld __cnfn shuffle2(half4 x, half4 y, ushort2 mask);
+half2 __ovld __cnfn shuffle2(half8 x, half8 y, ushort2 mask);
+half2 __ovld __cnfn shuffle2(half16 x, half16 y, ushort2 mask);
+
+half4 __ovld __cnfn shuffle2(half2 x, half2 y, ushort4 mask);
+half4 __ovld __cnfn shuffle2(half4 x, half4 y, ushort4 mask);
+half4 __ovld __cnfn shuffle2(half8 x, half8 y, ushort4 mask);
+half4 __ovld __cnfn shuffle2(half16 x, half16 y, ushort4 mask);
+
+half8 __ovld __cnfn shuffle2(half2 x, half2 y, ushort8 mask);
+half8 __ovld __cnfn shuffle2(half4 x, half4 y, ushort8 mask);
+half8 __ovld __cnfn shuffle2(half8 x, half8 y, ushort8 mask);
+half8 __ovld __cnfn shuffle2(half16 x, half16 y, ushort8 mask);
+
+half16 __ovld __cnfn shuffle2(half2 x, half2 y, ushort16 mask);
+half16 __ovld __cnfn shuffle2(half4 x, half4 y, ushort16 mask);
+half16 __ovld __cnfn shuffle2(half8 x, half8 y, ushort16 mask);
+half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask);
+#endif //cl_khr_fp16
+
+// OpenCL v1.2 s6.12.13, v2.0 s6.13.13 - printf
+
+int printf(__constant const char* st, ...);
+
+// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
+
+// These values need to match the runtime equivalent
+//
+// Addressing Mode.
+//
+#define CLK_ADDRESS_NONE                0
+#define CLK_ADDRESS_CLAMP_TO_EDGE       2
+#define CLK_ADDRESS_CLAMP               4
+#define CLK_ADDRESS_REPEAT              6
+#define CLK_ADDRESS_MIRRORED_REPEAT     8
+
+//
+// Coordination Normalization
+//
+#define CLK_NORMALIZED_COORDS_FALSE     0
+#define CLK_NORMALIZED_COORDS_TRUE      1
+
+//
+// Filtering Mode.
+//
+#define CLK_FILTER_NEAREST              0x10
+#define CLK_FILTER_LINEAR               0x20
+
+/**
+ * Use the coordinate (coord.xy) to do an element lookup in
+ * the 2D image object specified by image.
+ *
+ * Use the coordinate (coord.x, coord.y, coord.z) to do
+ * an element lookup in the 3D image object specified
+ * by image. coord.w is ignored.
+ *
+ * Use the coordinate (coord.z) to index into the
+ * 2D image array object specified by image_array
+ * and (coord.x, coord.y) to do an element lookup in
+ * the 2D image object specified by image.
+ *
+ * Use the coordinate (x) to do an element lookup in
+ * the 1D image object specified by image.
+ *
+ * Use the coordinate (coord.y) to index into the
+ * 1D image array object specified by image_array
+ * and (coord.x) to do an element lookup in
+ * the 1D image object specified by image.
+ *
+ * Use the coordinate (cood.xy) and sample to do an
+ * element lookup in the 2D multi-sample image specified
+ * by image.
+ *
+ * Use coord.xy and sample to do an element
+ * lookup in the 2D multi-sample image layer
+ * identified by index coord.z in the 2D multi-sample
+ * image array specified by image.
+ *
+ * For mipmap images, use the mip-level specified by
+ * the Level-of-Detail (lod) or use gradients for LOD
+ * computation.
+ *
+ * read_imagef returns floating-point values in the
+ * range [0.0 ... 1.0] for image objects created with
+ * image_channel_data_type set to one of the predefined
+ * packed formats or CL_UNORM_INT8, or
+ * CL_UNORM_INT16.
+ *
+ * read_imagef returns floating-point values in the
+ * range [-1.0 ... 1.0] for image objects created with
+ * image_channel_data_type set to CL_SNORM_INT8,
+ * or CL_SNORM_INT16.
+ *
+ * read_imagef returns floating-point values for image
+ * objects created with image_channel_data_type set to
+ * CL_HALF_FLOAT or CL_FLOAT.
+ *
+ * read_imagei and read_imageui return
+ * unnormalized signed integer and unsigned integer
+ * values respectively. Each channel will be stored in a
+ * 32-bit integer.
+ *
+ * read_imagei can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the following values:
+ * CL_SIGNED_INT8,
+ * CL_SIGNED_INT16 and
+ * CL_SIGNED_INT32.
+ * If the image_channel_data_type is not one of the
+ * above values, the values returned by read_imagei
+ * are undefined.
+ *
+ * read_imageui can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the following values:
+ * CL_UNSIGNED_INT8,
+ * CL_UNSIGNED_INT16 and
+ * CL_UNSIGNED_INT32.
+ * If the image_channel_data_type is not one of the
+ * above values, the values returned by read_imageui
+ * are undefined.
+ *
+ * The read_image{i|ui} calls support a nearest filter
+ * only. The filter_mode specified in sampler
+ * must be set to CLK_FILTER_NEAREST; otherwise
+ * the values returned are undefined.
+ 
+ * The read_image{f|i|ui} calls that take
+ * integer coordinates must use a sampler with
+ * normalized coordinates set to
+ * CLK_NORMALIZED_COORDS_FALSE and
+ * addressing mode set to
+ * CLK_ADDRESS_CLAMP_TO_EDGE,
+ * CLK_ADDRESS_CLAMP or CLK_ADDRESS_NONE;
+ * otherwise the values returned are undefined.
+ *
+ * Values returned by read_imagef for image objects
+ * with image_channel_data_type values not specified
+ * in the description above are undefined.
+ */
+
+float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, int2 coord);
+float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord);
+
+int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, int2 coord);
+int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord);
+uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, int2 coord);
+uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord);
+
+float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, int4 coord);
+float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord);
+
+int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, int4 coord);
+int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord);
+uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, int4 coord);
+uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord);
+
+float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
+float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
+
+int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
+int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
+uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
+uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
+
+float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, int coord);
+float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord);
+
+int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, int coord);
+int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord);
+uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, int coord);
+uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord);
+
+float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
+float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
+
+int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
+int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
+uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
+uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
+
+#ifdef cl_khr_depth_images
+float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord);
+float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, int2 coord);
+
+float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord);
+float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, int4 coord);
+#endif //cl_khr_depth_images
+
+#if defined(cl_khr_gl_msaa_sharing)
+float4 __purefn __ovld read_imagef(read_only image2d_msaa_t image, int2 coord, int sample);
+int4 __purefn __ovld read_imagei(read_only image2d_msaa_t image, int2 coord, int sample);
+uint4 __purefn __ovld read_imageui(read_only image2d_msaa_t image, int2 coord, int sample);
+
+float __purefn __ovld read_imagef(read_only image2d_msaa_depth_t image, int2 coord, int sample);
+
+float4 __purefn __ovld read_imagef(read_only image2d_array_msaa_t image, int4 coord, int sample);
+int4 __purefn __ovld read_imagei(read_only image2d_array_msaa_t image, int4 coord, int sample);
+uint4 __purefn __ovld read_imageui(read_only image2d_array_msaa_t image, int4 coord, int sample);
+
+float __purefn __ovld read_imagef(read_only image2d_array_msaa_depth_t image, int4 coord, int sample);
+#endif //cl_khr_gl_msaa_sharing
+
+// OpenCL Extension v2.0 s9.18 - Mipmaps
+#ifdef cl_khr_mipmap_image
+
+float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
+
+float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+
+float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
+int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
+uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
+
+float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
+int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
+uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
+
+float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+
+float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+
+float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+
+float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+
+float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
+int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
+uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
+
+float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
+
+float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+
+float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
+
+#endif //cl_khr_mipmap_image
+
+/**
+* Sampler-less Image Access
+*/
+
+float4 __purefn __ovld read_imagef(read_only image1d_t image, int coord);
+int4 __purefn __ovld read_imagei(read_only image1d_t image, int coord);
+uint4 __purefn __ovld read_imageui(read_only image1d_t image, int coord);
+
+float4 __purefn __ovld read_imagef(read_only image1d_buffer_t image, int coord);
+int4 __purefn __ovld read_imagei(read_only image1d_buffer_t image, int coord);
+uint4 __purefn __ovld read_imageui(read_only image1d_buffer_t image, int coord);
+
+float4 __purefn __ovld read_imagef(read_only image1d_array_t image, int2 coord);
+int4 __purefn __ovld read_imagei(read_only image1d_array_t image, int2 coord);
+uint4 __purefn __ovld read_imageui(read_only image1d_array_t image, int2 coord);
+
+float4 __purefn __ovld read_imagef(read_only image2d_t image, int2 coord);
+int4 __purefn __ovld read_imagei(read_only image2d_t image, int2 coord);
+uint4 __purefn __ovld read_imageui(read_only image2d_t image, int2 coord);
+
+float4 __purefn __ovld read_imagef(read_only image2d_array_t image, int4 coord);
+int4 __purefn __ovld read_imagei(read_only image2d_array_t image, int4 coord);
+uint4 __purefn __ovld read_imageui(read_only image2d_array_t image, int4 coord);
+
+#ifdef cl_khr_depth_images
+float __purefn __ovld read_imagef(read_only image2d_depth_t image, int2 coord);
+float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, int4 coord);
+#endif //cl_khr_depth_images
+
+float4 __purefn __ovld read_imagef(read_only image3d_t image, int4 coord);
+int4 __purefn __ovld read_imagei(read_only image3d_t image, int4 coord);
+uint4 __purefn __ovld read_imageui(read_only image3d_t image, int4 coord);
+
+// Image read functions returning half4 type
+#ifdef cl_khr_fp16
+half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, int coord);
+half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, float coord);
+half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, int2 coord);
+half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, float2 coord);
+half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, int2 coord);
+half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, float2 coord);
+half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, int4 coord);
+half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, float4 coord);
+half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, int4 coord);
+half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, float4 coord);
+half4 __purefn __ovld read_imageh(read_only image1d_t image, int coord);
+half4 __purefn __ovld read_imageh(read_only image2d_t image, int2 coord);
+half4 __purefn __ovld read_imageh(read_only image3d_t image, int4 coord);
+half4 __purefn __ovld read_imageh(read_only image1d_array_t image, int2 coord);
+half4 __purefn __ovld read_imageh(read_only image2d_array_t image, int4 coord);
+half4 __purefn __ovld read_imageh(read_only image1d_buffer_t image, int coord);
+#endif //cl_khr_fp16
+
+// Image read functions for read_write images
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float4 __purefn __ovld read_imagef(read_write image1d_t image, int coord);
+int4 __purefn __ovld read_imagei(read_write image1d_t image, int coord);
+uint4 __purefn __ovld read_imageui(read_write image1d_t image, int coord);
+
+float4 __purefn __ovld read_imagef(read_write image1d_buffer_t image, int coord);
+int4 __purefn __ovld read_imagei(read_write image1d_buffer_t image, int coord);
+uint4 __purefn __ovld read_imageui(read_write image1d_buffer_t image, int coord);
+
+float4 __purefn __ovld read_imagef(read_write image1d_array_t image, int2 coord);
+int4 __purefn __ovld read_imagei(read_write image1d_array_t image, int2 coord);
+uint4 __purefn __ovld read_imageui(read_write image1d_array_t image, int2 coord);
+
+float4 __purefn __ovld read_imagef(read_write image2d_t image, int2 coord);
+int4 __purefn __ovld read_imagei(read_write image2d_t image, int2 coord);
+uint4 __purefn __ovld read_imageui(read_write image2d_t image, int2 coord);
+
+float4 __purefn __ovld read_imagef(read_write image2d_array_t image, int4 coord);
+int4 __purefn __ovld read_imagei(read_write image2d_array_t image, int4 coord);
+uint4 __purefn __ovld read_imageui(read_write image2d_array_t image, int4 coord);
+
+float4 __purefn __ovld read_imagef(read_write image3d_t image, int4 coord);
+int4 __purefn __ovld read_imagei(read_write image3d_t image, int4 coord);
+uint4 __purefn __ovld read_imageui(read_write image3d_t image, int4 coord);
+
+#ifdef cl_khr_depth_images
+float __purefn __ovld read_imagef(read_write image2d_depth_t image, int2 coord);
+float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, int4 coord);
+#endif //cl_khr_depth_images
+
+#if cl_khr_gl_msaa_sharing
+float4 __purefn __ovld read_imagef(read_write image2d_msaa_t image, int2 coord, int sample);
+int4 __purefn __ovld read_imagei(read_write image2d_msaa_t image, int2 coord, int sample);
+uint4 __purefn __ovld read_imageui(read_write image2d_msaa_t image, int2 coord, int sample);
+
+float4 __purefn __ovld read_imagef(read_write image2d_array_msaa_t image, int4 coord, int sample);
+int4 __purefn __ovld read_imagei(read_write image2d_array_msaa_t image, int4 coord, int sample);
+uint4 __purefn __ovld read_imageui(read_write image2d_array_msaa_t image, int4 coord, int sample);
+
+float __purefn __ovld read_imagef(read_write image2d_msaa_depth_t image, int2 coord, int sample);
+float __purefn __ovld read_imagef(read_write image2d_array_msaa_depth_t image, int4 coord, int sample);
+#endif //cl_khr_gl_msaa_sharing
+
+#ifdef cl_khr_mipmap_image
+float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
+
+float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+
+float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
+int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
+uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
+
+float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
+int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
+uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
+
+float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+
+float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+
+float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+
+float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+
+float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
+int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
+uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
+
+float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
+
+float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+
+float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
+#endif //cl_khr_mipmap_image
+
+// Image read functions returning half4 type
+#ifdef cl_khr_fp16
+half4 __purefn __ovld read_imageh(read_write image1d_t image, int coord);
+half4 __purefn __ovld read_imageh(read_write image2d_t image, int2 coord);
+half4 __purefn __ovld read_imageh(read_write image3d_t image, int4 coord);
+half4 __purefn __ovld read_imageh(read_write image1d_array_t image, int2 coord);
+half4 __purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord);
+half4 __purefn __ovld read_imageh(read_write image1d_buffer_t image, int coord);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Write color value to location specified by coordinate
+ * (coord.x, coord.y) in the 2D image object specified by image.
+ * (coord.x, coord.y) are considered to be unnormalized coordinates
+ * and must be in the range 0 ... image width - 1, and 0
+ * ... image height - 1.
+
+ * Write color value to location specified by coordinate
+ * (coord.x, coord.y) in the 2D image object specified by index
+ * (coord.z) of the 2D image array object image_array.
+ * (coord.x, coord.y) are considered to be unnormalized
+ * coordinates and must be in the range 0 ... image width
+ * - 1.
+ *
+ * Write color value to location specified by coordinate
+ * (coord) in the 1D image (buffer) object specified by image.
+ * coord is considered to be unnormalized coordinates
+ * and must be in the range 0 ... image width - 1.
+ *
+ * Write color value to location specified by coordinate
+ * (coord.x) in the 1D image object specified by index
+ * (coord.y) of the 1D image array object image_array.
+ * x is considered to be unnormalized coordinates
+ * and must be in the range 0 ... image width - 1.
+ *
+ * Write color value to location specified by coordinate
+ * (coord.x, coord.y, coord.z) in the 3D image object specified by image.
+ * coord.x & coord.y are considered to be unnormalized coordinates
+ * and must be in the range 0 ... image width - 1, and 0
+ * ... image height - 1.
+ *
+ * For mipmap images, use mip-level specified by lod.
+ *
+ * Appropriate data format conversion to the specified
+ * image format is done before writing the color value.
+ *
+ * write_imagef can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the pre-defined packed formats or set to
+ * CL_SNORM_INT8, CL_UNORM_INT8,
+ * CL_SNORM_INT16, CL_UNORM_INT16,
+ * CL_HALF_FLOAT or CL_FLOAT. Appropriate data
+ * format conversion will be done to convert channel
+ * data from a floating-point value to actual data format
+ * in which the channels are stored.
+ *
+ * write_imagei can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the following values:
+ * CL_SIGNED_INT8,
+ * CL_SIGNED_INT16 and
+ * CL_SIGNED_INT32.
+ *
+ * write_imageui can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the following values:
+ * CL_UNSIGNED_INT8,
+ * CL_UNSIGNED_INT16 and
+ * CL_UNSIGNED_INT32.
+ *
+ * The behavior of write_imagef, write_imagei and
+ * write_imageui for image objects created with
+ * image_channel_data_type values not specified in
+ * the description above or with (x, y) coordinate
+ * values that are not in the range (0 ... image width -1,
+ * 0 ... image height - 1), respectively, is undefined.
+ */
+void __ovld write_imagef(write_only image2d_t image, int2 coord, float4 color);
+void __ovld write_imagei(write_only image2d_t image, int2 coord, int4 color);
+void __ovld write_imageui(write_only image2d_t image, int2 coord, uint4 color);
+
+void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, float4 color);
+void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int4 color);
+void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, uint4 color);
+
+void __ovld write_imagef(write_only image1d_t image, int coord, float4 color);
+void __ovld write_imagei(write_only image1d_t image, int coord, int4 color);
+void __ovld write_imageui(write_only image1d_t image, int coord, uint4 color);
+
+void __ovld write_imagef(write_only image1d_buffer_t image, int coord, float4 color);
+void __ovld write_imagei(write_only image1d_buffer_t image, int coord, int4 color);
+void __ovld write_imageui(write_only image1d_buffer_t image, int coord, uint4 color);
+
+void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, float4 color);
+void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int4 color);
+void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, uint4 color);
+
+void __ovld write_imagef(write_only image3d_t image, int4 coord, float4 color);
+void __ovld write_imagei(write_only image3d_t image, int4 coord, int4 color);
+void __ovld write_imageui(write_only image3d_t image, int4 coord, uint4 color);
+
+#ifdef cl_khr_depth_images
+void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, float color);
+void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, float color);
+#endif //cl_khr_depth_images
+
+// OpenCL Extension v2.0 s9.18 - Mipmaps
+#ifdef cl_khr_mipmap_image
+void __ovld write_imagef(write_only image1d_t image, int coord, int lod, float4 color);
+void __ovld write_imagei(write_only image1d_t image, int coord, int lod, int4 color);
+void __ovld write_imageui(write_only image1d_t image, int coord, int lod, uint4 color);
+
+void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, int lod, float4 color);
+void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int lod, int4 color);
+void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, int lod, uint4 color);
+
+void __ovld write_imagef(write_only image2d_t image, int2 coord, int lod, float4 color);
+void __ovld write_imagei(write_only image2d_t image, int2 coord, int lod, int4 color);
+void __ovld write_imageui(write_only image2d_t image, int2 coord, int lod, uint4 color);
+
+void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, int lod, float4 color);
+void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int lod, int4 color);
+void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, int lod, uint4 color);
+
+void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, int lod, float color);
+void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, int lod, float color);
+
+void __ovld write_imagef(write_only image3d_t image, int4 coord, int lod, float4 color);
+void __ovld write_imagei(write_only image3d_t image, int4 coord, int lod, int4 color);
+void __ovld write_imageui(write_only image3d_t image, int4 coord, int lod, uint4 color);
+#endif //cl_khr_mipmap_image
+
+// Image write functions for half4 type
+#ifdef cl_khr_fp16
+void __ovld write_imageh(write_only image1d_t image, int coord, half4 color);
+void __ovld write_imageh(write_only image2d_t image, int2 coord, half4 color);
+void __ovld write_imageh(write_only image3d_t image, int4 coord, half4 color);
+void __ovld write_imageh(write_only image1d_array_t image, int2 coord, half4 color);
+void __ovld write_imageh(write_only image2d_array_t image, int4 coord, half4 color);
+void __ovld write_imageh(write_only image1d_buffer_t image, int coord, half4 color);
+#endif //cl_khr_fp16
+
+// Image write functions for read_write images
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+void __ovld write_imagef(read_write image2d_t image, int2 coord, float4 color);
+void __ovld write_imagei(read_write image2d_t image, int2 coord, int4 color);
+void __ovld write_imageui(read_write image2d_t image, int2 coord, uint4 color);
+
+void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, float4 color);
+void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int4 color);
+void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, uint4 color);
+
+void __ovld write_imagef(read_write image1d_t image, int coord, float4 color);
+void __ovld write_imagei(read_write image1d_t image, int coord, int4 color);
+void __ovld write_imageui(read_write image1d_t image, int coord, uint4 color);
+
+void __ovld write_imagef(read_write image1d_buffer_t image, int coord, float4 color);
+void __ovld write_imagei(read_write image1d_buffer_t image, int coord, int4 color);
+void __ovld write_imageui(read_write image1d_buffer_t image, int coord, uint4 color);
+
+void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, float4 color);
+void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int4 color);
+void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, uint4 color);
+
+void __ovld write_imagef(read_write image3d_t image, int4 coord, float4 color);
+void __ovld write_imagei(read_write image3d_t image, int4 coord, int4 color);
+void __ovld write_imageui(read_write image3d_t image, int4 coord, uint4 color);
+
+#ifdef cl_khr_depth_images
+void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, float color);
+void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, float color);
+#endif //cl_khr_depth_images
+
+#ifdef cl_khr_mipmap_image
+void __ovld write_imagef(read_write image1d_t image, int coord, int lod, float4 color);
+void __ovld write_imagei(read_write image1d_t image, int coord, int lod, int4 color);
+void __ovld write_imageui(read_write image1d_t image, int coord, int lod, uint4 color);
+
+void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, int lod, float4 color);
+void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int lod, int4 color);
+void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, int lod, uint4 color);
+
+void __ovld write_imagef(read_write image2d_t image, int2 coord, int lod, float4 color);
+void __ovld write_imagei(read_write image2d_t image, int2 coord, int lod, int4 color);
+void __ovld write_imageui(read_write image2d_t image, int2 coord, int lod, uint4 color);
+
+void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, int lod, float4 color);
+void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int lod, int4 color);
+void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, int lod, uint4 color);
+
+void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, int lod, float color);
+void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, int lod, float color);
+
+void __ovld write_imagef(read_write image3d_t image, int4 coord, int lod, float4 color);
+void __ovld write_imagei(read_write image3d_t image, int4 coord, int lod, int4 color);
+void __ovld write_imageui(read_write image3d_t image, int4 coord, int lod, uint4 color);
+#endif //cl_khr_mipmap_image
+
+// Image write functions for half4 type
+#ifdef cl_khr_fp16
+void __ovld write_imageh(read_write image1d_t image, int coord, half4 color);
+void __ovld write_imageh(read_write image2d_t image, int2 coord, half4 color);
+void __ovld write_imageh(read_write image3d_t image, int4 coord, half4 color);
+void __ovld write_imageh(read_write image1d_array_t image, int2 coord, half4 color);
+void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color);
+void __ovld write_imageh(read_write image1d_buffer_t image, int coord, half4 color);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// Note: In OpenCL v1.0/1.1/1.2, image argument of image query builtin functions does not have
+// access qualifier, which by default assume read_only access qualifier. Image query builtin
+// functions with write_only image argument should also be declared.
+
+/**
+ * Return the image width in pixels.
+ *
+  */
+int __ovld __cnfn get_image_width(read_only image1d_t image);
+int __ovld __cnfn get_image_width(read_only image1d_buffer_t image);
+int __ovld __cnfn get_image_width(read_only image2d_t image);
+int __ovld __cnfn get_image_width(read_only image3d_t image);
+int __ovld __cnfn get_image_width(read_only image1d_array_t image);
+int __ovld __cnfn get_image_width(read_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_width(read_only image2d_depth_t image);
+int __ovld __cnfn get_image_width(read_only image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_width(read_only image2d_msaa_t image);
+int __ovld __cnfn get_image_width(read_only image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_width(read_only image2d_array_msaa_t image);
+int __ovld __cnfn get_image_width(read_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+int __ovld __cnfn get_image_width(write_only image1d_t image);
+int __ovld __cnfn get_image_width(write_only image1d_buffer_t image);
+int __ovld __cnfn get_image_width(write_only image2d_t image);
+int __ovld __cnfn get_image_width(write_only image3d_t image);
+int __ovld __cnfn get_image_width(write_only image1d_array_t image);
+int __ovld __cnfn get_image_width(write_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_width(write_only image2d_depth_t image);
+int __ovld __cnfn get_image_width(write_only image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_width(write_only image2d_msaa_t image);
+int __ovld __cnfn get_image_width(write_only image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t image);
+int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld __cnfn get_image_width(read_write image1d_t image);
+int __ovld __cnfn get_image_width(read_write image1d_buffer_t image);
+int __ovld __cnfn get_image_width(read_write image2d_t image);
+int __ovld __cnfn get_image_width(read_write image3d_t image);
+int __ovld __cnfn get_image_width(read_write image1d_array_t image);
+int __ovld __cnfn get_image_width(read_write image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_width(read_write image2d_depth_t image);
+int __ovld __cnfn get_image_width(read_write image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_width(read_write image2d_msaa_t image);
+int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t image);
+int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Return the image height in pixels.
+ */
+int __ovld __cnfn get_image_height(read_only image2d_t image);
+int __ovld __cnfn get_image_height(read_only image3d_t image);
+int __ovld __cnfn get_image_height(read_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_height(read_only image2d_depth_t image);
+int __ovld __cnfn get_image_height(read_only image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_height(read_only image2d_msaa_t image);
+int __ovld __cnfn get_image_height(read_only image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_height(read_only image2d_array_msaa_t image);
+int __ovld __cnfn get_image_height(read_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+int __ovld __cnfn get_image_height(write_only image2d_t image);
+int __ovld __cnfn get_image_height(write_only image3d_t image);
+int __ovld __cnfn get_image_height(write_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_height(write_only image2d_depth_t image);
+int __ovld __cnfn get_image_height(write_only image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_height(write_only image2d_msaa_t image);
+int __ovld __cnfn get_image_height(write_only image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t image);
+int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld __cnfn get_image_height(read_write image2d_t image);
+int __ovld __cnfn get_image_height(read_write image3d_t image);
+int __ovld __cnfn get_image_height(read_write image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_height(read_write image2d_depth_t image);
+int __ovld __cnfn get_image_height(read_write image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_height(read_write image2d_msaa_t image);
+int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t image);
+int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Return the image depth in pixels.
+ */
+int __ovld __cnfn get_image_depth(read_only image3d_t image);
+
+int __ovld __cnfn get_image_depth(write_only image3d_t image);
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld __cnfn get_image_depth(read_write image3d_t image);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL Extension v2.0 s9.18 - Mipmaps
+#ifdef cl_khr_mipmap_image
+/**
+ * Return the image miplevels.
+ */
+
+int __ovld get_image_num_mip_levels(read_only image1d_t image);
+int __ovld get_image_num_mip_levels(read_only image2d_t image);
+int __ovld get_image_num_mip_levels(read_only image3d_t image);
+
+int __ovld get_image_num_mip_levels(write_only image1d_t image);
+int __ovld get_image_num_mip_levels(write_only image2d_t image);
+int __ovld get_image_num_mip_levels(write_only image3d_t image);
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld get_image_num_mip_levels(read_write image1d_t image);
+int __ovld get_image_num_mip_levels(read_write image2d_t image);
+int __ovld get_image_num_mip_levels(read_write image3d_t image);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+int __ovld get_image_num_mip_levels(read_only image1d_array_t image);
+int __ovld get_image_num_mip_levels(read_only image2d_array_t image);
+int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t image);
+int __ovld get_image_num_mip_levels(read_only image2d_depth_t image);
+
+int __ovld get_image_num_mip_levels(write_only image1d_array_t image);
+int __ovld get_image_num_mip_levels(write_only image2d_array_t image);
+int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t image);
+int __ovld get_image_num_mip_levels(write_only image2d_depth_t image);
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld get_image_num_mip_levels(read_write image1d_array_t image);
+int __ovld get_image_num_mip_levels(read_write image2d_array_t image);
+int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image);
+int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+#endif //cl_khr_mipmap_image
+
+/**
+ * Return the channel data type. Valid values are:
+ * CLK_SNORM_INT8
+ * CLK_SNORM_INT16
+ * CLK_UNORM_INT8
+ * CLK_UNORM_INT16
+ * CLK_UNORM_SHORT_565
+ * CLK_UNORM_SHORT_555
+ * CLK_UNORM_SHORT_101010
+ * CLK_SIGNED_INT8
+ * CLK_SIGNED_INT16
+ * CLK_SIGNED_INT32
+ * CLK_UNSIGNED_INT8
+ * CLK_UNSIGNED_INT16
+ * CLK_UNSIGNED_INT32
+ * CLK_HALF_FLOAT
+ * CLK_FLOAT
+ */
+
+//
+// Channel Datatype.
+//
+#define CLK_SNORM_INT8        0x10D0
+#define CLK_SNORM_INT16       0x10D1
+#define CLK_UNORM_INT8        0x10D2
+#define CLK_UNORM_INT16       0x10D3
+#define CLK_UNORM_SHORT_565   0x10D4
+#define CLK_UNORM_SHORT_555   0x10D5
+#define CLK_UNORM_INT_101010  0x10D6
+#define CLK_SIGNED_INT8       0x10D7
+#define CLK_SIGNED_INT16      0x10D8
+#define CLK_SIGNED_INT32      0x10D9
+#define CLK_UNSIGNED_INT8     0x10DA
+#define CLK_UNSIGNED_INT16    0x10DB
+#define CLK_UNSIGNED_INT32    0x10DC
+#define CLK_HALF_FLOAT        0x10DD
+#define CLK_FLOAT             0x10DE
+#define CLK_UNORM_INT24       0x10DF
+
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_buffer_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image3d_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_array_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_buffer_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image3d_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_array_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image3d_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_array_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Return the image channel order. Valid values are:
+ * CLK_A
+ * CLK_R
+ * CLK_Rx
+ * CLK_RG
+ * CLK_RGx
+ * CLK_RA
+ * CLK_RGB
+ * CLK_RGBx
+ * CLK_RGBA
+ * CLK_ARGB
+ * CLK_BGRA
+ * CLK_INTENSITY
+ * CLK_LUMINANCE
+ */
+// Channel order, numbering must be aligned with cl_channel_order in cl.h
+//
+#define CLK_R         0x10B0
+#define CLK_A         0x10B1
+#define CLK_RG        0x10B2
+#define CLK_RA        0x10B3
+#define CLK_RGB       0x10B4
+#define CLK_RGBA      0x10B5
+#define CLK_BGRA      0x10B6
+#define CLK_ARGB      0x10B7
+#define CLK_INTENSITY 0x10B8
+#define CLK_LUMINANCE 0x10B9
+#define CLK_Rx                0x10BA
+#define CLK_RGx               0x10BB
+#define CLK_RGBx              0x10BC
+#define CLK_DEPTH             0x10BD
+#define CLK_DEPTH_STENCIL     0x10BE
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#define CLK_sRGB              0x10BF
+#define CLK_sRGBA             0x10C1
+#define CLK_sRGBx             0x10C0
+#define CLK_sBGRA             0x10C2
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+int __ovld __cnfn get_image_channel_order(read_only image1d_t image);
+int __ovld __cnfn get_image_channel_order(read_only image1d_buffer_t image);
+int __ovld __cnfn get_image_channel_order(read_only image2d_t image);
+int __ovld __cnfn get_image_channel_order(read_only image3d_t image);
+int __ovld __cnfn get_image_channel_order(read_only image1d_array_t image);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_order(read_only image2d_depth_t image);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_t image);
+int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_t image);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+int __ovld __cnfn get_image_channel_order(write_only image1d_t image);
+int __ovld __cnfn get_image_channel_order(write_only image1d_buffer_t image);
+int __ovld __cnfn get_image_channel_order(write_only image2d_t image);
+int __ovld __cnfn get_image_channel_order(write_only image3d_t image);
+int __ovld __cnfn get_image_channel_order(write_only image1d_array_t image);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_order(write_only image2d_depth_t image);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_t image);
+int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t image);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld __cnfn get_image_channel_order(read_write image1d_t image);
+int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t image);
+int __ovld __cnfn get_image_channel_order(read_write image2d_t image);
+int __ovld __cnfn get_image_channel_order(read_write image3d_t image);
+int __ovld __cnfn get_image_channel_order(read_write image1d_array_t image);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_order(read_write image2d_depth_t image);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_t image);
+int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t image);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Return the 2D image width and height as an int2
+ * type. The width is returned in the x component, and
+ * the height in the y component.
+ */
+int2 __ovld __cnfn get_image_dim(read_only image2d_t image);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_depth_t image);
+int2 __ovld __cnfn get_image_dim(read_only image2d_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_t image);
+int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_depth_t image);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_t image);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+int2 __ovld __cnfn get_image_dim(write_only image2d_t image);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_depth_t image);
+int2 __ovld __cnfn get_image_dim(write_only image2d_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_t image);
+int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_depth_t image);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t image);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int2 __ovld __cnfn get_image_dim(read_write image2d_t image);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_t image);
+#ifdef cl_khr_depth_images
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_depth_t image);
+int2 __ovld __cnfn get_image_dim(read_write image2d_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_t image);
+int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t image);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t image);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Return the 3D image width, height, and depth as an
+ * int4 type. The width is returned in the x
+ * component, height in the y component, depth in the z
+ * component and the w component is 0.
+ */
+int4 __ovld __cnfn get_image_dim(read_only image3d_t image);
+int4 __ovld __cnfn get_image_dim(write_only image3d_t image);
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int4 __ovld __cnfn get_image_dim(read_write image3d_t image);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Return the image array size.
+ */
+
+size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_t image_array);
+#ifdef cl_khr_depth_images
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_depth_t image_array);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_depth_t image_array);
+#endif //cl_khr_gl_msaa_sharing
+
+size_t __ovld __cnfn get_image_array_size(write_only image1d_array_t image_array);
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_t image_array);
+#ifdef cl_khr_depth_images
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_depth_t image_array);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t image_array);
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t image_array);
+#endif //cl_khr_gl_msaa_sharing
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t image_array);
+#ifdef cl_khr_depth_images
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t image_array);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t image_array);
+#endif //cl_khr_gl_msaa_sharing
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+* Return the number of samples associated with image
+*/
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld get_image_num_samples(read_only image2d_msaa_t image);
+int __ovld get_image_num_samples(read_only image2d_msaa_depth_t image);
+int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
+int __ovld get_image_num_samples(read_only image2d_array_msaa_t image);
+int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
+
+int __ovld get_image_num_samples(write_only image2d_msaa_t image);
+int __ovld get_image_num_samples(write_only image2d_msaa_depth_t image);
+int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
+int __ovld get_image_num_samples(write_only image2d_array_msaa_t image);
+int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld get_image_num_samples(read_write image2d_msaa_t image);
+int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image);
+int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
+int __ovld get_image_num_samples(read_write image2d_array_msaa_t image);
+int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif
+
+// OpenCL v2.0 s6.13.15 - Work-group Functions
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld work_group_all(int predicate);
+int __ovld work_group_any(int predicate);
+
+#ifdef cl_khr_fp16
+half __ovld work_group_broadcast(half a, size_t local_id);
+half __ovld work_group_broadcast(half a, size_t x, size_t y);
+half __ovld work_group_broadcast(half a, size_t x, size_t y, size_t z);
+#endif
+int __ovld work_group_broadcast(int a, size_t local_id);
+int __ovld work_group_broadcast(int a, size_t x, size_t y);
+int __ovld work_group_broadcast(int a, size_t x, size_t y, size_t z);
+uint __ovld work_group_broadcast(uint a, size_t local_id);
+uint __ovld work_group_broadcast(uint a, size_t x, size_t y);
+uint __ovld work_group_broadcast(uint a, size_t x, size_t y, size_t z);
+long __ovld work_group_broadcast(long a, size_t local_id);
+long __ovld work_group_broadcast(long a, size_t x, size_t y);
+long __ovld work_group_broadcast(long a, size_t x, size_t y, size_t z);
+ulong __ovld work_group_broadcast(ulong a, size_t local_id);
+ulong __ovld work_group_broadcast(ulong a, size_t x, size_t y);
+ulong __ovld work_group_broadcast(ulong a, size_t x, size_t y, size_t z);
+float __ovld work_group_broadcast(float a, size_t local_id);
+float __ovld work_group_broadcast(float a, size_t x, size_t y);
+float __ovld work_group_broadcast(float a, size_t x, size_t y, size_t z);
+#ifdef cl_khr_fp64
+double __ovld work_group_broadcast(double a, size_t local_id);
+double __ovld work_group_broadcast(double a, size_t x, size_t y);
+double __ovld work_group_broadcast(double a, size_t x, size_t y, size_t z);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half __ovld work_group_reduce_add(half x);
+half __ovld work_group_reduce_min(half x);
+half __ovld work_group_reduce_max(half x);
+half __ovld work_group_scan_exclusive_add(half x);
+half __ovld work_group_scan_exclusive_min(half x);
+half __ovld work_group_scan_exclusive_max(half x);
+half __ovld work_group_scan_inclusive_add(half x);
+half __ovld work_group_scan_inclusive_min(half x);
+half __ovld work_group_scan_inclusive_max(half x);
+#endif
+int __ovld work_group_reduce_add(int x);
+int __ovld work_group_reduce_min(int x);
+int __ovld work_group_reduce_max(int x);
+int __ovld work_group_scan_exclusive_add(int x);
+int __ovld work_group_scan_exclusive_min(int x);
+int __ovld work_group_scan_exclusive_max(int x);
+int __ovld work_group_scan_inclusive_add(int x);
+int __ovld work_group_scan_inclusive_min(int x);
+int __ovld work_group_scan_inclusive_max(int x);
+uint __ovld work_group_reduce_add(uint x);
+uint __ovld work_group_reduce_min(uint x);
+uint __ovld work_group_reduce_max(uint x);
+uint __ovld work_group_scan_exclusive_add(uint x);
+uint __ovld work_group_scan_exclusive_min(uint x);
+uint __ovld work_group_scan_exclusive_max(uint x);
+uint __ovld work_group_scan_inclusive_add(uint x);
+uint __ovld work_group_scan_inclusive_min(uint x);
+uint __ovld work_group_scan_inclusive_max(uint x);
+long __ovld work_group_reduce_add(long x);
+long __ovld work_group_reduce_min(long x);
+long __ovld work_group_reduce_max(long x);
+long __ovld work_group_scan_exclusive_add(long x);
+long __ovld work_group_scan_exclusive_min(long x);
+long __ovld work_group_scan_exclusive_max(long x);
+long __ovld work_group_scan_inclusive_add(long x);
+long __ovld work_group_scan_inclusive_min(long x);
+long __ovld work_group_scan_inclusive_max(long x);
+ulong __ovld work_group_reduce_add(ulong x);
+ulong __ovld work_group_reduce_min(ulong x);
+ulong __ovld work_group_reduce_max(ulong x);
+ulong __ovld work_group_scan_exclusive_add(ulong x);
+ulong __ovld work_group_scan_exclusive_min(ulong x);
+ulong __ovld work_group_scan_exclusive_max(ulong x);
+ulong __ovld work_group_scan_inclusive_add(ulong x);
+ulong __ovld work_group_scan_inclusive_min(ulong x);
+ulong __ovld work_group_scan_inclusive_max(ulong x);
+float __ovld work_group_reduce_add(float x);
+float __ovld work_group_reduce_min(float x);
+float __ovld work_group_reduce_max(float x);
+float __ovld work_group_scan_exclusive_add(float x);
+float __ovld work_group_scan_exclusive_min(float x);
+float __ovld work_group_scan_exclusive_max(float x);
+float __ovld work_group_scan_inclusive_add(float x);
+float __ovld work_group_scan_inclusive_min(float x);
+float __ovld work_group_scan_inclusive_max(float x);
+#ifdef cl_khr_fp64
+double __ovld work_group_reduce_add(double x);
+double __ovld work_group_reduce_min(double x);
+double __ovld work_group_reduce_max(double x);
+double __ovld work_group_scan_exclusive_add(double x);
+double __ovld work_group_scan_exclusive_min(double x);
+double __ovld work_group_scan_exclusive_max(double x);
+double __ovld work_group_scan_inclusive_add(double x);
+double __ovld work_group_scan_inclusive_min(double x);
+double __ovld work_group_scan_inclusive_max(double x);
+#endif //cl_khr_fp64
+
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL v2.0 s6.13.16 - Pipe Functions
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#define PIPE_RESERVE_ID_VALID_BIT (1U << 30)
+#define CLK_NULL_RESERVE_ID (__builtin_astype(((void*)(__SIZE_MAX__)), reserve_id_t))
+bool __ovld is_valid_reserve_id(reserve_id_t reserve_id);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+
+// OpenCL v2.0 s6.13.17 - Enqueue Kernels
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+#define CL_COMPLETE                                 0x0
+#define CL_RUNNING                                  0x1
+#define CL_SUBMITTED                                0x2
+#define CL_QUEUED                                   0x3
+
+#define CLK_SUCCESS                                 0
+#define CLK_ENQUEUE_FAILURE                         -101
+#define CLK_INVALID_QUEUE                           -102
+#define CLK_INVALID_NDRANGE                         -160
+#define CLK_INVALID_EVENT_WAIT_LIST                 -57
+#define CLK_DEVICE_QUEUE_FULL                       -161
+#define CLK_INVALID_ARG_SIZE                        -51
+#define CLK_EVENT_ALLOCATION_FAILURE                -100
+#define CLK_OUT_OF_RESOURCES                        -5
+
+#define CLK_NULL_QUEUE                              0
+#define CLK_NULL_EVENT (__builtin_astype(((void*)(__SIZE_MAX__)), clk_event_t))
+
+// execution model related definitions
+#define CLK_ENQUEUE_FLAGS_NO_WAIT                   0x0
+#define CLK_ENQUEUE_FLAGS_WAIT_KERNEL               0x1
+#define CLK_ENQUEUE_FLAGS_WAIT_WORK_GROUP           0x2
+
+typedef int kernel_enqueue_flags_t;
+typedef int clk_profiling_info;
+
+// Profiling info name (see capture_event_profiling_info)
+#define CLK_PROFILING_COMMAND_EXEC_TIME 0x1
+
+#define MAX_WORK_DIM        3
+
+// ToDo: Remove definition of ndrange_t in Clang as an opaque type and add back
+// the following ndrange_t definition.
+#if 0
+typedef struct {
+    unsigned int workDimension;
+    size_t globalWorkOffset[MAX_WORK_DIM];
+    size_t globalWorkSize[MAX_WORK_DIM];
+    size_t localWorkSize[MAX_WORK_DIM];
+} ndrange_t;
+#endif
+
+ndrange_t __ovld ndrange_1D(size_t);
+ndrange_t __ovld ndrange_1D(size_t, size_t);
+ndrange_t __ovld ndrange_1D(size_t, size_t, size_t);
+
+ndrange_t __ovld ndrange_2D(const size_t[2]);
+ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2]);
+ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2], const size_t[2]);
+
+ndrange_t __ovld ndrange_3D(const size_t[3]);
+ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3]);
+ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3], const size_t[3]);
+
+int __ovld enqueue_marker(queue_t, uint, const __private clk_event_t*, __private clk_event_t*);
+
+void __ovld retain_event(clk_event_t);
+
+void __ovld release_event(clk_event_t);
+
+clk_event_t create_user_event(void);
+
+void __ovld set_user_event_status(clk_event_t e, int state);
+
+bool is_valid_event (clk_event_t event);
+
+void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void* value);
+
+queue_t __ovld get_default_queue(void);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL Extension v2.0 s9.17 - Sub-groups
+
+#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups)
+// Shared Sub Group Functions
+uint    __ovld get_sub_group_size(void);
+uint    __ovld get_max_sub_group_size(void);
+uint    __ovld get_num_sub_groups(void);
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+uint    __ovld get_enqueued_num_sub_groups(void);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+uint    __ovld get_sub_group_id(void);
+uint    __ovld get_sub_group_local_id(void);
+
+void    __ovld sub_group_barrier(cl_mem_fence_flags flags);
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+void    __ovld sub_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+int     __ovld sub_group_all(int predicate);
+int     __ovld sub_group_any(int predicate);
+
+int     __ovld sub_group_broadcast(int   x, uint sub_group_local_id);
+uint    __ovld sub_group_broadcast(uint  x, uint sub_group_local_id);
+long    __ovld sub_group_broadcast(long  x, uint sub_group_local_id);
+ulong   __ovld sub_group_broadcast(ulong x, uint sub_group_local_id);
+float   __ovld sub_group_broadcast(float x, uint sub_group_local_id);
+
+int     __ovld sub_group_reduce_add(int   x);
+uint    __ovld sub_group_reduce_add(uint  x);
+long    __ovld sub_group_reduce_add(long  x);
+ulong   __ovld sub_group_reduce_add(ulong x);
+float   __ovld sub_group_reduce_add(float x);
+int     __ovld sub_group_reduce_min(int   x);
+uint    __ovld sub_group_reduce_min(uint  x);
+long    __ovld sub_group_reduce_min(long  x);
+ulong   __ovld sub_group_reduce_min(ulong x);
+float   __ovld sub_group_reduce_min(float x);
+int     __ovld sub_group_reduce_max(int   x);
+uint    __ovld sub_group_reduce_max(uint  x);
+long    __ovld sub_group_reduce_max(long  x);
+ulong   __ovld sub_group_reduce_max(ulong x);
+float   __ovld sub_group_reduce_max(float x);
+
+int     __ovld sub_group_scan_exclusive_add(int   x);
+uint    __ovld sub_group_scan_exclusive_add(uint  x);
+long    __ovld sub_group_scan_exclusive_add(long  x);
+ulong   __ovld sub_group_scan_exclusive_add(ulong x);
+float   __ovld sub_group_scan_exclusive_add(float x);
+int     __ovld sub_group_scan_exclusive_min(int   x);
+uint    __ovld sub_group_scan_exclusive_min(uint  x);
+long    __ovld sub_group_scan_exclusive_min(long  x);
+ulong   __ovld sub_group_scan_exclusive_min(ulong x);
+float   __ovld sub_group_scan_exclusive_min(float x);
+int     __ovld sub_group_scan_exclusive_max(int   x);
+uint    __ovld sub_group_scan_exclusive_max(uint  x);
+long    __ovld sub_group_scan_exclusive_max(long  x);
+ulong   __ovld sub_group_scan_exclusive_max(ulong x);
+float   __ovld sub_group_scan_exclusive_max(float x);
+
+int     __ovld sub_group_scan_inclusive_add(int   x);
+uint    __ovld sub_group_scan_inclusive_add(uint  x);
+long    __ovld sub_group_scan_inclusive_add(long  x);
+ulong   __ovld sub_group_scan_inclusive_add(ulong x);
+float   __ovld sub_group_scan_inclusive_add(float x);
+int     __ovld sub_group_scan_inclusive_min(int   x);
+uint    __ovld sub_group_scan_inclusive_min(uint  x);
+long    __ovld sub_group_scan_inclusive_min(long  x);
+ulong   __ovld sub_group_scan_inclusive_min(ulong x);
+float   __ovld sub_group_scan_inclusive_min(float x);
+int     __ovld sub_group_scan_inclusive_max(int   x);
+uint    __ovld sub_group_scan_inclusive_max(uint  x);
+long    __ovld sub_group_scan_inclusive_max(long  x);
+ulong   __ovld sub_group_scan_inclusive_max(ulong x);
+float   __ovld sub_group_scan_inclusive_max(float x);
+
+#ifdef cl_khr_fp16
+half    __ovld sub_group_broadcast(half x, uint sub_group_local_id);
+half    __ovld sub_group_reduce_add(half x);
+half    __ovld sub_group_reduce_min(half x);
+half    __ovld sub_group_reduce_max(half x);
+half    __ovld sub_group_scan_exclusive_add(half x);
+half    __ovld sub_group_scan_exclusive_min(half x);
+half    __ovld sub_group_scan_exclusive_max(half x);
+half    __ovld sub_group_scan_inclusive_add(half x);
+half    __ovld sub_group_scan_inclusive_min(half x);
+half    __ovld sub_group_scan_inclusive_max(half x);
+#endif //cl_khr_fp16
+
+#ifdef cl_khr_fp64
+double  __ovld sub_group_broadcast(double x, uint sub_group_local_id);
+double  __ovld sub_group_reduce_add(double x);
+double  __ovld sub_group_reduce_min(double x);
+double  __ovld sub_group_reduce_max(double x);
+double  __ovld sub_group_scan_exclusive_add(double x);
+double  __ovld sub_group_scan_exclusive_min(double x);
+double  __ovld sub_group_scan_exclusive_max(double x);
+double  __ovld sub_group_scan_inclusive_add(double x);
+double  __ovld sub_group_scan_inclusive_min(double x);
+double  __ovld sub_group_scan_inclusive_max(double x);
+#endif //cl_khr_fp64
+
+#endif //cl_khr_subgroups cl_intel_subgroups
+
+// Disable any extensions we may have enabled previously.
+#pragma OPENCL EXTENSION all : disable
+
+#undef __cnfn
+#undef __ovld
+#endif //_OPENCL_H_
diff --git a/renderscript/clang-include/pkuintrin.h b/renderscript/clang-include/pkuintrin.h
new file mode 100644
index 0000000..9e54594
--- /dev/null
+++ b/renderscript/clang-include/pkuintrin.h
@@ -0,0 +1,48 @@
+/*===------------- pkuintrin.h - PKU intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <pkuintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __PKUINTRIN_H
+#define __PKUINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("pku")))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_rdpkru_u32(void)
+{
+  return __builtin_ia32_rdpkru();
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_wrpkru(unsigned int __val)
+{
+  return __builtin_ia32_wrpkru(__val);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/renderscript/clang-include/pmmintrin.h b/renderscript/clang-include/pmmintrin.h
index 0ff9409..5b10580 100644
--- a/renderscript/clang-include/pmmintrin.h
+++ b/renderscript/clang-include/pmmintrin.h
@@ -27,68 +27,235 @@
 #include <emmintrin.h>
 
 /* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse3")))
+#define __DEFAULT_FN_ATTRS \
+  __attribute__((__always_inline__, __nodebug__, __target__("sse3")))
 
+/// \brief Loads data from an unaligned memory location to elements in a 128-bit
+///    vector. If the address of the data is not 16-byte aligned, the
+///    instruction may read two adjacent aligned blocks of memory to retrieve
+///    the requested data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VLDDQU instruction.
+///
+/// \param __p
+///    A pointer to a 128-bit integer vector containing integer values.
+/// \returns A 128-bit vector containing the moved values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_lddqu_si128(__m128i const *__p)
 {
   return (__m128i)__builtin_ia32_lddqu((char const *)__p);
 }
 
+/// \brief Adds the even-indexed values and subtracts the odd-indexed values of
+///    two 128-bit vectors of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VADDSUBPS instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing the left source operand.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing the right source operand.
+/// \returns A 128-bit vector of [4 x float] containing the alternating sums and
+///    differences of both operands.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_addsub_ps(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_addsubps(__a, __b);
+  return __builtin_ia32_addsubps((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Horizontally adds the adjacent pairs of values contained in two
+///    128-bit vectors of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VHADDPS instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the source operands.
+///    The horizontal sums of the values are stored in the lower bits of the
+///    destination.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the source operands.
+///    The horizontal sums of the values are stored in the upper bits of the
+///    destination.
+/// \returns A 128-bit vector of [4 x float] containing the horizontal sums of
+///    both operands.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_hadd_ps(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_haddps(__a, __b);
+  return __builtin_ia32_haddps((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Horizontally subtracts the adjacent pairs of values contained in two
+///    128-bit vectors of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VHSUBPS instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the source operands.
+///    The horizontal differences between the values are stored in the lower
+///    bits of the destination.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the source operands.
+///    The horizontal differences between the values are stored in the upper
+///    bits of the destination.
+/// \returns A 128-bit vector of [4 x float] containing the horizontal
+///    differences of both operands.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_hsub_ps(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_hsubps(__a, __b);
+  return __builtin_ia32_hsubps((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Moves and duplicates high-order (odd-indexed) values from a 128-bit
+///    vector of [4 x float] to float values stored in a 128-bit vector of
+///    [4 x float].
+///    Bits [127:96] of the source are written to bits [127:96] and [95:64] of
+///    the destination.
+///    Bits [63:32] of the source are written to bits [63:32] and [31:0] of the
+///    destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVSHDUP instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the moved and duplicated
+///    values.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_movehdup_ps(__m128 __a)
 {
-  return __builtin_shufflevector(__a, __a, 1, 1, 3, 3);
+  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 1, 1, 3, 3);
 }
 
+/// \brief Duplicates low-order (even-indexed) values from a 128-bit
+///    vector of [4 x float] to float values stored in a 128-bit vector of
+///    [4 x float].
+///    Bits [95:64] of the source are written to bits [127:96] and [95:64] of
+///    the destination.
+///    Bits [31:0] of the source are written to bits [63:32] and [31:0] of the
+///    destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVSLDUP instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the moved and duplicated
+///    values.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_moveldup_ps(__m128 __a)
 {
-  return __builtin_shufflevector(__a, __a, 0, 0, 2, 2);
+  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 2, 2);
 }
 
+/// \brief Adds the even-indexed values and subtracts the odd-indexed values of
+///    two 128-bit vectors of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VADDSUBPD instruction.
+///
+/// \param __a
+///    A 128-bit vector of [2 x double] containing the left source operand.
+/// \param __b
+///    A 128-bit vector of [2 x double] containing the right source operand.
+/// \returns A 128-bit vector of [2 x double] containing the alternating sums
+///    and differences of both operands.
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_addsub_pd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_addsubpd(__a, __b);
+  return __builtin_ia32_addsubpd((__v2df)__a, (__v2df)__b);
 }
 
+/// \brief Horizontally adds the pairs of values contained in two 128-bit
+///    vectors of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VHADDPD instruction.
+///
+/// \param __a
+///    A 128-bit vector of [2 x double] containing one of the source operands.
+///    The horizontal sum of the values is stored in the lower bits of the
+///    destination.
+/// \param __b
+///    A 128-bit vector of [2 x double] containing one of the source operands.
+///    The horizontal sum of the values is stored in the upper bits of the
+///    destination.
+/// \returns A 128-bit vector of [2 x double] containing the horizontal sums of
+///    both operands.
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_hadd_pd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_haddpd(__a, __b);
+  return __builtin_ia32_haddpd((__v2df)__a, (__v2df)__b);
 }
 
+/// \brief Horizontally subtracts the pairs of values contained in two 128-bit
+///    vectors of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VHSUBPD instruction.
+///
+/// \param __a
+///    A 128-bit vector of [2 x double] containing one of the source operands.
+///    The horizontal difference of the values is stored in the lower bits of
+///    the destination.
+/// \param __b
+///    A 128-bit vector of [2 x double] containing one of the source operands.
+///    The horizontal difference of the values is stored in the upper bits of
+///    the destination.
+/// \returns A 128-bit vector of [2 x double] containing the horizontal
+///    differences of both operands.
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_hsub_pd(__m128d __a, __m128d __b)
 {
-  return __builtin_ia32_hsubpd(__a, __b);
+  return __builtin_ia32_hsubpd((__v2df)__a, (__v2df)__b);
 }
 
+/// \brief Moves and duplicates one double-precision value to double-precision
+///    values stored in a 128-bit vector of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128d _mm_loaddup_pd(double const * dp);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VMOVDDUP instruction.
+///
+/// \param dp
+///    A pointer to a double-precision value to be moved and duplicated.
+/// \returns A 128-bit vector of [2 x double] containing the moved and
+///    duplicated values.
 #define        _mm_loaddup_pd(dp)        _mm_load1_pd(dp)
 
+/// \brief Moves and duplicates the double-precision value in the lower bits of
+///    a 128-bit vector of [2 x double] to double-precision values stored in a
+///    128-bit vector of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVDDUP instruction.
+///
+/// \param __a
+///    A 128-bit vector of [2 x double]. Bits [63:0] are written to bits
+///    [127:64] and [63:0] of the destination.
+/// \returns A 128-bit vector of [2 x double] containing the moved and
+///    duplicated values.
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_movedup_pd(__m128d __a)
 {
-  return __builtin_shufflevector(__a, __a, 0, 0);
+  return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
 }
 
 #define _MM_DENORMALS_ZERO_ON   (0x0040)
@@ -99,12 +266,40 @@
 #define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
 #define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
 
+/// \brief Establishes a linear address memory range to be monitored and puts
+///    the processor in the monitor event pending state. Data stored in the
+///    monitored address range causes the processor to exit the pending state.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c MONITOR instruction.
+///
+/// \param __p
+///    The memory range to be monitored. The size of the range is determined by
+///    CPUID function 0000_0005h.
+/// \param __extensions
+///    Optional extensions for the monitoring state.
+/// \param __hints
+///    Optional hints for the monitoring state.
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_monitor(void const *__p, unsigned __extensions, unsigned __hints)
 {
   __builtin_ia32_monitor((void *)__p, __extensions, __hints);
 }
 
+/// \brief Used with the MONITOR instruction to wait while the processor is in
+///    the monitor event pending state. Data stored in the monitored address
+///    range causes the processor to exit the pending state.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c MWAIT instruction.
+///
+/// \param __extensions
+///    Optional extensions for the monitoring state, which may vary by
+///    processor.
+/// \param __hints
+///    Optional hints for the monitoring state, which may vary by processor.
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_mwait(unsigned __extensions, unsigned __hints)
 {
diff --git a/renderscript/clang-include/popcntintrin.h b/renderscript/clang-include/popcntintrin.h
index 6fcda65..7e2f167 100644
--- a/renderscript/clang-include/popcntintrin.h
+++ b/renderscript/clang-include/popcntintrin.h
@@ -27,12 +27,32 @@
 /* Define the default attributes for the functions in this file. */
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("popcnt")))
 
+/// \brief Counts the number of bits in the source operand having a value of 1.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c POPCNT instruction.
+///
+/// \param __A
+///    An unsigned 32-bit integer operand.
+/// \returns A 32-bit integer containing the number of bits with value 1 in the
+///    source operand.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_popcnt_u32(unsigned int __A)
 {
   return __builtin_popcount(__A);
 }
 
+/// \brief Counts the number of bits in the source operand having a value of 1.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c POPCNT instruction.
+///
+/// \param __A
+///    A signed 32-bit integer operand.
+/// \returns A 32-bit integer containing the number of bits with value 1 in the
+///    source operand.
 static __inline__ int __DEFAULT_FN_ATTRS
 _popcnt32(int __A)
 {
@@ -40,12 +60,32 @@
 }
 
 #ifdef __x86_64__
+/// \brief Counts the number of bits in the source operand having a value of 1.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c POPCNT instruction.
+///
+/// \param __A
+///    An unsigned 64-bit integer operand.
+/// \returns A 64-bit integer containing the number of bits with value 1 in the
+///    source operand.
 static __inline__ long long __DEFAULT_FN_ATTRS
 _mm_popcnt_u64(unsigned long long __A)
 {
   return __builtin_popcountll(__A);
 }
 
+/// \brief Counts the number of bits in the source operand having a value of 1.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c POPCNT instruction.
+///
+/// \param __A
+///    A signed 64-bit integer operand.
+/// \returns A 64-bit integer containing the number of bits with value 1 in the
+///    source operand.
 static __inline__ long long __DEFAULT_FN_ATTRS
 _popcnt64(long long __A)
 {
diff --git a/renderscript/clang-include/smmintrin.h b/renderscript/clang-include/smmintrin.h
index 69ad07f..e48ab03 100644
--- a/renderscript/clang-include/smmintrin.h
+++ b/renderscript/clang-include/smmintrin.h
@@ -121,7 +121,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_mullo_epi32 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) ((__v4si)__V1 * (__v4si)__V2);
+  return (__m128i) ((__v4su)__V1 * (__v4su)__V2);
 }
 
 static __inline__  __m128i __DEFAULT_FN_ATTRS
@@ -220,16 +220,16 @@
 #define _mm_insert_epi8(X, I, N) (__extension__                           \
                                   ({ __v16qi __a = (__v16qi)(__m128i)(X); \
                                      __a[(N) & 15] = (I);                 \
-                                     __a;}))
+                                     (__m128i)__a;}))
 #define _mm_insert_epi32(X, I, N) (__extension__                         \
                                    ({ __v4si __a = (__v4si)(__m128i)(X); \
                                       __a[(N) & 3] = (I);                \
-                                      __a;}))
+                                      (__m128i)__a;}))
 #ifdef __x86_64__
 #define _mm_insert_epi64(X, I, N) (__extension__                         \
                                    ({ __v2di __a = (__v2di)(__m128i)(X); \
                                       __a[(N) & 1] = (I);                \
-                                      __a;}))
+                                      (__m128i)__a;}))
 #endif /* __x86_64__ */
 
 /* Extract int from packed integer array at index.  This returns the element
@@ -299,7 +299,6 @@
 {
   /* This function always performs a signed extension, but __v16qi is a char
      which may be signed or unsigned, so use __v16qs. */
-  typedef signed char __v16qs __attribute__((__vector_size__(16)));
   return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
 }
 
@@ -325,37 +324,37 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cvtepu8_epi16(__m128i __V)
 {
-  return (__m128i) __builtin_ia32_pmovzxbw128((__v16qi) __V);
+  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cvtepu8_epi32(__m128i __V)
 {
-  return (__m128i) __builtin_ia32_pmovzxbd128((__v16qi)__V);
+  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cvtepu8_epi64(__m128i __V)
 {
-  return (__m128i) __builtin_ia32_pmovzxbq128((__v16qi)__V);
+  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cvtepu16_epi32(__m128i __V)
 {
-  return (__m128i) __builtin_ia32_pmovzxwd128((__v8hi)__V);
+  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cvtepu16_epi64(__m128i __V)
 {
-  return (__m128i) __builtin_ia32_pmovzxwq128((__v8hi)__V);
+  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cvtepu32_epi64(__m128i __V)
 {
-  return (__m128i) __builtin_ia32_pmovzxdq128((__v4si)__V);
+  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
 }
 
 /* SSE4 Pack with Unsigned Saturation.  */
diff --git a/renderscript/clang-include/tbmintrin.h b/renderscript/clang-include/tbmintrin.h
index 785961c..1d0d746 100644
--- a/renderscript/clang-include/tbmintrin.h
+++ b/renderscript/clang-include/tbmintrin.h
@@ -36,57 +36,57 @@
                                            (unsigned int)(b)))
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__blcfill_u32(unsigned int a)
+__blcfill_u32(unsigned int __a)
 {
-  return a & (a + 1);
+  return __a & (__a + 1);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__blci_u32(unsigned int a)
+__blci_u32(unsigned int __a)
 {
-  return a | ~(a + 1);
+  return __a | ~(__a + 1);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__blcic_u32(unsigned int a)
+__blcic_u32(unsigned int __a)
 {
-  return ~a & (a + 1);
+  return ~__a & (__a + 1);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__blcmsk_u32(unsigned int a)
+__blcmsk_u32(unsigned int __a)
 {
-  return a ^ (a + 1);
+  return __a ^ (__a + 1);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__blcs_u32(unsigned int a)
+__blcs_u32(unsigned int __a)
 {
-  return a | (a + 1);
+  return __a | (__a + 1);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__blsfill_u32(unsigned int a)
+__blsfill_u32(unsigned int __a)
 {
-  return a | (a - 1);
+  return __a | (__a - 1);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__blsic_u32(unsigned int a)
+__blsic_u32(unsigned int __a)
 {
-  return ~a | (a - 1);
+  return ~__a | (__a - 1);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__t1mskc_u32(unsigned int a)
+__t1mskc_u32(unsigned int __a)
 {
-  return ~a | (a + 1);
+  return ~__a | (__a + 1);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__tzmsk_u32(unsigned int a)
+__tzmsk_u32(unsigned int __a)
 {
-  return ~a & (a - 1);
+  return ~__a & (__a - 1);
 }
 
 #ifdef __x86_64__
@@ -95,57 +95,57 @@
                                                  (unsigned long long)(b)))
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__blcfill_u64(unsigned long long a)
+__blcfill_u64(unsigned long long __a)
 {
-  return a & (a + 1);
+  return __a & (__a + 1);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__blci_u64(unsigned long long a)
+__blci_u64(unsigned long long __a)
 {
-  return a | ~(a + 1);
+  return __a | ~(__a + 1);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__blcic_u64(unsigned long long a)
+__blcic_u64(unsigned long long __a)
 {
-  return ~a & (a + 1);
+  return ~__a & (__a + 1);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__blcmsk_u64(unsigned long long a)
+__blcmsk_u64(unsigned long long __a)
 {
-  return a ^ (a + 1);
+  return __a ^ (__a + 1);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__blcs_u64(unsigned long long a)
+__blcs_u64(unsigned long long __a)
 {
-  return a | (a + 1);
+  return __a | (__a + 1);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__blsfill_u64(unsigned long long a)
+__blsfill_u64(unsigned long long __a)
 {
-  return a | (a - 1);
+  return __a | (__a - 1);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__blsic_u64(unsigned long long a)
+__blsic_u64(unsigned long long __a)
 {
-  return ~a | (a - 1);
+  return ~__a | (__a - 1);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__t1mskc_u64(unsigned long long a)
+__t1mskc_u64(unsigned long long __a)
 {
-  return ~a | (a + 1);
+  return ~__a | (__a + 1);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__tzmsk_u64(unsigned long long a)
+__tzmsk_u64(unsigned long long __a)
 {
-  return ~a & (a - 1);
+  return ~__a & (__a - 1);
 }
 #endif
 
diff --git a/renderscript/clang-include/tmmintrin.h b/renderscript/clang-include/tmmintrin.h
index 0002890..a72796b 100644
--- a/renderscript/clang-include/tmmintrin.h
+++ b/renderscript/clang-include/tmmintrin.h
@@ -29,187 +29,739 @@
 /* Define the default attributes for the functions in this file. */
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("ssse3")))
 
+/// \brief Computes the absolute value of each of the packed 8-bit signed
+///    integers in the source operand and stores the 8-bit unsigned integer
+///    results in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PABSB instruction.
+///
+/// \param __a
+///    A 64-bit vector of [8 x i8].
+/// \returns A 64-bit integer vector containing the absolute values of the
+///    elements in the operand.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_abs_pi8(__m64 __a)
 {
     return (__m64)__builtin_ia32_pabsb((__v8qi)__a);
 }
 
+/// \brief Computes the absolute value of each of the packed 8-bit signed
+///    integers in the source operand and stores the 8-bit unsigned integer
+///    results in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPABSB instruction.
+///
+/// \param __a
+///    A 128-bit vector of [16 x i8].
+/// \returns A 128-bit integer vector containing the absolute values of the
+///    elements in the operand.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_abs_epi8(__m128i __a)
 {
     return (__m128i)__builtin_ia32_pabsb128((__v16qi)__a);
 }
 
+/// \brief Computes the absolute value of each of the packed 16-bit signed
+///    integers in the source operand and stores the 16-bit unsigned integer
+///    results in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PABSW instruction.
+///
+/// \param __a
+///    A 64-bit vector of [4 x i16].
+/// \returns A 64-bit integer vector containing the absolute values of the
+///    elements in the operand.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_abs_pi16(__m64 __a)
 {
     return (__m64)__builtin_ia32_pabsw((__v4hi)__a);
 }
 
+/// \brief Computes the absolute value of each of the packed 16-bit signed
+///    integers in the source operand and stores the 16-bit unsigned integer
+///    results in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPABSW instruction.
+///
+/// \param __a
+///    A 128-bit vector of [8 x i16].
+/// \returns A 128-bit integer vector containing the absolute values of the
+///    elements in the operand.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_abs_epi16(__m128i __a)
 {
     return (__m128i)__builtin_ia32_pabsw128((__v8hi)__a);
 }
 
+/// \brief Computes the absolute value of each of the packed 32-bit signed
+///    integers in the source operand and stores the 32-bit unsigned integer
+///    results in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PABSD instruction.
+///
+/// \param __a
+///    A 64-bit vector of [2 x i32].
+/// \returns A 64-bit integer vector containing the absolute values of the
+///    elements in the operand.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_abs_pi32(__m64 __a)
 {
     return (__m64)__builtin_ia32_pabsd((__v2si)__a);
 }
 
+/// \brief Computes the absolute value of each of the packed 32-bit signed
+///    integers in the source operand and stores the 32-bit unsigned integer
+///    results in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPABSD instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x i32].
+/// \returns A 128-bit integer vector containing the absolute values of the
+///    elements in the operand.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_abs_epi32(__m128i __a)
 {
     return (__m128i)__builtin_ia32_pabsd128((__v4si)__a);
 }
 
+/// \brief Concatenates the two 128-bit integer vector operands, and
+///    right-shifts the result by the number of bytes specified in the immediate
+///    operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_alignr_epi8(__m128i a, __m128i b, const int n);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c PALIGNR instruction.
+///
+/// \param a
+///    A 128-bit vector of [16 x i8] containing one of the source operands.
+/// \param b
+///    A 128-bit vector of [16 x i8] containing one of the source operands.
+/// \param n
+///    An immediate operand specifying how many bytes to right-shift the result.
+/// \returns A 128-bit integer vector containing the concatenated right-shifted
+///    value.
 #define _mm_alignr_epi8(a, b, n) __extension__ ({ \
   (__m128i)__builtin_ia32_palignr128((__v16qi)(__m128i)(a), \
                                      (__v16qi)(__m128i)(b), (n)); })
 
+/// \brief Concatenates the two 64-bit integer vector operands, and right-shifts
+///    the result by the number of bytes specified in the immediate operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m64 _mm_alignr_pi8(__m64 a, __m64 b, const int n);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c PALIGNR instruction.
+///
+/// \param a
+///    A 64-bit vector of [8 x i8] containing one of the source operands.
+/// \param b
+///    A 64-bit vector of [8 x i8] containing one of the source operands.
+/// \param n
+///    An immediate operand specifying how many bytes to right-shift the result.
+/// \returns A 64-bit integer vector containing the concatenated right-shifted
+///    value.
 #define _mm_alignr_pi8(a, b, n) __extension__ ({ \
   (__m64)__builtin_ia32_palignr((__v8qi)(__m64)(a), (__v8qi)(__m64)(b), (n)); })
 
+/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
+///    128-bit vectors of [8 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPHADDW instruction.
+///
+/// \param __a
+///    A 128-bit vector of [8 x i16] containing one of the source operands. The
+///    horizontal sums of the values are stored in the lower bits of the
+///    destination.
+/// \param __b
+///    A 128-bit vector of [8 x i16] containing one of the source operands. The
+///    horizontal sums of the values are stored in the upper bits of the
+///    destination.
+/// \returns A 128-bit vector of [8 x i16] containing the horizontal sums of
+///    both operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_hadd_epi16(__m128i __a, __m128i __b)
 {
     return (__m128i)__builtin_ia32_phaddw128((__v8hi)__a, (__v8hi)__b);
 }
 
+/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
+///    128-bit vectors of [4 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPHADDD instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x i32] containing one of the source operands. The
+///    horizontal sums of the values are stored in the lower bits of the
+///    destination.
+/// \param __b
+///    A 128-bit vector of [4 x i32] containing one of the source operands. The
+///    horizontal sums of the values are stored in the upper bits of the
+///    destination.
+/// \returns A 128-bit vector of [4 x i32] containing the horizontal sums of
+///    both operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_hadd_epi32(__m128i __a, __m128i __b)
 {
     return (__m128i)__builtin_ia32_phaddd128((__v4si)__a, (__v4si)__b);
 }
 
+/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
+///    64-bit vectors of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PHADDW instruction.
+///
+/// \param __a
+///    A 64-bit vector of [4 x i16] containing one of the source operands. The
+///    horizontal sums of the values are stored in the lower bits of the
+///    destination.
+/// \param __b
+///    A 64-bit vector of [4 x i16] containing one of the source operands. The
+///    horizontal sums of the values are stored in the upper bits of the
+///    destination.
+/// \returns A 64-bit vector of [4 x i16] containing the horizontal sums of both
+///    operands.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_hadd_pi16(__m64 __a, __m64 __b)
 {
     return (__m64)__builtin_ia32_phaddw((__v4hi)__a, (__v4hi)__b);
 }
 
+/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
+///    64-bit vectors of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PHADDD instruction.
+///
+/// \param __a
+///    A 64-bit vector of [2 x i32] containing one of the source operands. The
+///    horizontal sums of the values are stored in the lower bits of the
+///    destination.
+/// \param __b
+///    A 64-bit vector of [2 x i32] containing one of the source operands. The
+///    horizontal sums of the values are stored in the upper bits of the
+///    destination.
+/// \returns A 64-bit vector of [2 x i32] containing the horizontal sums of both
+///    operands.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_hadd_pi32(__m64 __a, __m64 __b)
 {
     return (__m64)__builtin_ia32_phaddd((__v2si)__a, (__v2si)__b);
 }
 
+/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
+///    128-bit vectors of [8 x i16]. Positive sums greater than 7FFFh are
+///    saturated to 7FFFh. Negative sums less than 8000h are saturated to 8000h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPHADDSW instruction.
+///
+/// \param __a
+///    A 128-bit vector of [8 x i16] containing one of the source operands. The
+///    horizontal sums of the values are stored in the lower bits of the
+///    destination.
+/// \param __b
+///    A 128-bit vector of [8 x i16] containing one of the source operands. The
+///    horizontal sums of the values are stored in the upper bits of the
+///    destination.
+/// \returns A 128-bit vector of [8 x i16] containing the horizontal saturated
+///    sums of both operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_hadds_epi16(__m128i __a, __m128i __b)
 {
     return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b);
 }
 
+/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
+///    64-bit vectors of [4 x i16]. Positive sums greater than 7FFFh are
+///    saturated to 7FFFh. Negative sums less than 8000h are saturated to 8000h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PHADDSW instruction.
+///
+/// \param __a
+///    A 64-bit vector of [4 x i16] containing one of the source operands. The
+///    horizontal sums of the values are stored in the lower bits of the
+///    destination.
+/// \param __b
+///    A 64-bit vector of [4 x i16] containing one of the source operands. The
+///    horizontal sums of the values are stored in the upper bits of the
+///    destination.
+/// \returns A 64-bit vector of [4 x i16] containing the horizontal saturated
+///    sums of both operands.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_hadds_pi16(__m64 __a, __m64 __b)
 {
     return (__m64)__builtin_ia32_phaddsw((__v4hi)__a, (__v4hi)__b);
 }
 
+/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+///    packed 128-bit vectors of [8 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPHSUBW instruction.
+///
+/// \param __a
+///    A 128-bit vector of [8 x i16] containing one of the source operands. The
+///    horizontal differences between the values are stored in the lower bits of
+///    the destination.
+/// \param __b
+///    A 128-bit vector of [8 x i16] containing one of the source operands. The
+///    horizontal differences between the values are stored in the upper bits of
+///    the destination.
+/// \returns A 128-bit vector of [8 x i16] containing the horizontal differences
+///    of both operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_hsub_epi16(__m128i __a, __m128i __b)
 {
     return (__m128i)__builtin_ia32_phsubw128((__v8hi)__a, (__v8hi)__b);
 }
 
+/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+///    packed 128-bit vectors of [4 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPHSUBD instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x i32] containing one of the source operands. The
+///    horizontal differences between the values are stored in the lower bits of
+///    the destination.
+/// \param __b
+///    A 128-bit vector of [4 x i32] containing one of the source operands. The
+///    horizontal differences between the values are stored in the upper bits of
+///    the destination.
+/// \returns A 128-bit vector of [4 x i32] containing the horizontal differences
+///    of both operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_hsub_epi32(__m128i __a, __m128i __b)
 {
     return (__m128i)__builtin_ia32_phsubd128((__v4si)__a, (__v4si)__b);
 }
 
+/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+///    packed 64-bit vectors of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PHSUBW instruction.
+///
+/// \param __a
+///    A 64-bit vector of [4 x i16] containing one of the source operands. The
+///    horizontal differences between the values are stored in the lower bits of
+///    the destination.
+/// \param __b
+///    A 64-bit vector of [4 x i16] containing one of the source operands. The
+///    horizontal differences between the values are stored in the upper bits of
+///    the destination.
+/// \returns A 64-bit vector of [4 x i16] containing the horizontal differences
+///    of both operands.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_hsub_pi16(__m64 __a, __m64 __b)
 {
     return (__m64)__builtin_ia32_phsubw((__v4hi)__a, (__v4hi)__b);
 }
 
+/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+///    packed 64-bit vectors of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PHSUBD instruction.
+///
+/// \param __a
+///    A 64-bit vector of [2 x i32] containing one of the source operands. The
+///    horizontal differences between the values are stored in the lower bits of
+///    the destination.
+/// \param __b
+///    A 64-bit vector of [2 x i32] containing one of the source operands. The
+///    horizontal differences between the values are stored in the upper bits of
+///    the destination.
+/// \returns A 64-bit vector of [2 x i32] containing the horizontal differences
+///    of both operands.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_hsub_pi32(__m64 __a, __m64 __b)
 {
     return (__m64)__builtin_ia32_phsubd((__v2si)__a, (__v2si)__b);
 }
 
+/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+///    packed 128-bit vectors of [8 x i16]. Positive differences greater than
+///    7FFFh are saturated to 7FFFh. Negative differences less than 8000h are
+///    saturated to 8000h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPHSUBSW instruction.
+///
+/// \param __a
+///    A 128-bit vector of [8 x i16] containing one of the source operands. The
+///    horizontal differences between the values are stored in the lower bits of
+///    the destination.
+/// \param __b
+///    A 128-bit vector of [8 x i16] containing one of the source operands. The
+///    horizontal differences between the values are stored in the upper bits of
+///    the destination.
+/// \returns A 128-bit vector of [8 x i16] containing the horizontal saturated
+///    differences of both operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_hsubs_epi16(__m128i __a, __m128i __b)
 {
     return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__a, (__v8hi)__b);
 }
 
+/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+///    packed 64-bit vectors of [4 x i16]. Positive differences greater than
+///    7FFFh are saturated to 7FFFh. Negative differences less than 8000h are
+///    saturated to 8000h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PHSUBSW instruction.
+///
+/// \param __a
+///    A 64-bit vector of [4 x i16] containing one of the source operands. The
+///    horizontal differences between the values are stored in the lower bits of
+///    the destination.
+/// \param __b
+///    A 64-bit vector of [4 x i16] containing one of the source operands. The
+///    horizontal differences between the values are stored in the upper bits of
+///    the destination.
+/// \returns A 64-bit vector of [4 x i16] containing the horizontal saturated
+///    differences of both operands.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_hsubs_pi16(__m64 __a, __m64 __b)
 {
     return (__m64)__builtin_ia32_phsubsw((__v4hi)__a, (__v4hi)__b);
 }
 
+/// \brief Multiplies corresponding pairs of packed 8-bit unsigned integer
+///    values contained in the first source operand and packed 8-bit signed
+///    integer values contained in the second source operand, adds pairs of
+///    contiguous products with signed saturation, and writes the 16-bit sums to
+///    the corresponding bits in the destination. For example, bits [7:0] of
+///    both operands are multiplied, bits [15:8] of both operands are
+///    multiplied, and the sum of both results is written to bits [15:0] of the
+///    destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPMADDUBSW instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the first source operand.
+/// \param __b
+///    A 128-bit integer vector containing the second source operand.
+/// \returns A 128-bit integer vector containing the sums of products of both
+///    operands:
+///    R0 := (__a0 * __b0) + (__a1 * __b1)
+///    R1 := (__a2 * __b2) + (__a3 * __b3)
+///    R2 := (__a4 * __b4) + (__a5 * __b5)
+///    R3 := (__a6 * __b6) + (__a7 * __b7)
+///    R4 := (__a8 * __b8) + (__a9 * __b9)
+///    R5 := (__a10 * __b10) + (__a11 * __b11)
+///    R6 := (__a12 * __b12) + (__a13 * __b13)
+///    R7 := (__a14 * __b14) + (__a15 * __b15)
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_maddubs_epi16(__m128i __a, __m128i __b)
 {
     return (__m128i)__builtin_ia32_pmaddubsw128((__v16qi)__a, (__v16qi)__b);
 }
 
+/// \brief Multiplies corresponding pairs of packed 8-bit unsigned integer
+///    values contained in the first source operand and packed 8-bit signed
+///    integer values contained in the second source operand, adds pairs of
+///    contiguous products with signed saturation, and writes the 16-bit sums to
+///    the corresponding bits in the destination. For example, bits [7:0] of
+///    both operands are multiplied, bits [15:8] of both operands are
+///    multiplied, and the sum of both results is written to bits [15:0] of the
+///    destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMADDUBSW instruction.
+///
+/// \param __a
+///    A 64-bit integer vector containing the first source operand.
+/// \param __b
+///    A 64-bit integer vector containing the second source operand.
+/// \returns A 64-bit integer vector containing the sums of products of both
+///    operands:
+///    R0 := (__a0 * __b0) + (__a1 * __b1)
+///    R1 := (__a2 * __b2) + (__a3 * __b3)
+///    R2 := (__a4 * __b4) + (__a5 * __b5)
+///    R3 := (__a6 * __b6) + (__a7 * __b7)
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_maddubs_pi16(__m64 __a, __m64 __b)
 {
     return (__m64)__builtin_ia32_pmaddubsw((__v8qi)__a, (__v8qi)__b);
 }
 
+/// \brief Multiplies packed 16-bit signed integer values, truncates the 32-bit
+///    products to the 18 most significant bits by right-shifting, rounds the
+///    truncated value by adding 1, and writes bits [16:1] to the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULHRSW instruction.
+///
+/// \param __a
+///    A 128-bit vector of [8 x i16] containing one of the source operands.
+/// \param __b
+///    A 128-bit vector of [8 x i16] containing one of the source operands.
+/// \returns A 128-bit vector of [8 x i16] containing the rounded and scaled
+///    products of both operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_mulhrs_epi16(__m128i __a, __m128i __b)
 {
     return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__a, (__v8hi)__b);
 }
 
+/// \brief Multiplies packed 16-bit signed integer values, truncates the 32-bit
+///    products to the 18 most significant bits by right-shifting, rounds the
+///    truncated value by adding 1, and writes bits [16:1] to the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMULHRSW instruction.
+///
+/// \param __a
+///    A 64-bit vector of [4 x i16] containing one of the source operands.
+/// \param __b
+///    A 64-bit vector of [4 x i16] containing one of the source operands.
+/// \returns A 64-bit vector of [4 x i16] containing the rounded and scaled
+///    products of both operands.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_mulhrs_pi16(__m64 __a, __m64 __b)
 {
     return (__m64)__builtin_ia32_pmulhrsw((__v4hi)__a, (__v4hi)__b);
 }
 
+/// \brief Copies the 8-bit integers from a 128-bit integer vector to the
+///    destination or clears 8-bit values in the destination, as specified by
+///    the second source operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSHUFB instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the values to be copied.
+/// \param __b
+///    A 128-bit integer vector containing control bytes corresponding to
+///    positions in the destination:
+///    Bit 7:
+///    1: Clear the corresponding byte in the destination.
+///    0: Copy the selected source byte to the corresponding byte in the
+///    destination.
+///    Bits [6:4] Reserved.
+///    Bits [3:0] select the source byte to be copied.
+/// \returns A 128-bit integer vector containing the copied or cleared values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_shuffle_epi8(__m128i __a, __m128i __b)
 {
     return (__m128i)__builtin_ia32_pshufb128((__v16qi)__a, (__v16qi)__b);
 }
 
+/// \brief Copies the 8-bit integers from a 64-bit integer vector to the
+///    destination or clears 8-bit values in the destination, as specified by
+///    the second source operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSHUFB instruction.
+///
+/// \param __a
+///    A 64-bit integer vector containing the values to be copied.
+/// \param __b
+///    A 64-bit integer vector containing control bytes corresponding to
+///    positions in the destination:
+///    Bit 7:
+///    1: Clear the corresponding byte in the destination.
+///    0: Copy the selected source byte to the corresponding byte in the
+///    destination.
+///    Bits [3:0] select the source byte to be copied.
+/// \returns A 64-bit integer vector containing the copied or cleared values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_shuffle_pi8(__m64 __a, __m64 __b)
 {
     return (__m64)__builtin_ia32_pshufb((__v8qi)__a, (__v8qi)__b);
 }
 
+/// \brief For each 8-bit integer in the first source operand, perform one of
+///    the following actions as specified by the second source operand: If the
+///    byte in the second source is negative, calculate the two's complement of
+///    the corresponding byte in the first source, and write that value to the
+///    destination. If the byte in the second source is positive, copy the
+///    corresponding byte from the first source to the destination. If the byte
+///    in the second source is zero, clear the corresponding byte in the
+///    destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSIGNB instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the values to be copied.
+/// \param __b
+///    A 128-bit integer vector containing control bytes corresponding to
+///    positions in the destination.
+/// \returns A 128-bit integer vector containing the resultant values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_sign_epi8(__m128i __a, __m128i __b)
 {
     return (__m128i)__builtin_ia32_psignb128((__v16qi)__a, (__v16qi)__b);
 }
 
+/// \brief For each 16-bit integer in the first source operand, perform one of
+///    the following actions as specified by the second source operand: If the
+///    word in the second source is negative, calculate the two's complement of
+///    the corresponding word in the first source, and write that value to the
+///    destination. If the word in the second source is positive, copy the
+///    corresponding word from the first source to the destination. If the word
+///    in the second source is zero, clear the corresponding word in the
+///    destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSIGNW instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the values to be copied.
+/// \param __b
+///    A 128-bit integer vector containing control words corresponding to
+///    positions in the destination.
+/// \returns A 128-bit integer vector containing the resultant values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_sign_epi16(__m128i __a, __m128i __b)
 {
     return (__m128i)__builtin_ia32_psignw128((__v8hi)__a, (__v8hi)__b);
 }
 
+/// \brief For each 32-bit integer in the first source operand, perform one of
+///    the following actions as specified by the second source operand: If the
+///    doubleword in the second source is negative, calculate the two's
+///    complement of the corresponding word in the first source, and write that
+///    value to the destination. If the doubleword in the second source is
+///    positive, copy the corresponding word from the first source to the
+///    destination. If the doubleword in the second source is zero, clear the
+///    corresponding word in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSIGND instruction.
+///
+/// \param __a
+///    A 128-bit integer vector containing the values to be copied.
+/// \param __b
+///    A 128-bit integer vector containing control doublewords corresponding to
+///    positions in the destination.
+/// \returns A 128-bit integer vector containing the resultant values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_sign_epi32(__m128i __a, __m128i __b)
 {
     return (__m128i)__builtin_ia32_psignd128((__v4si)__a, (__v4si)__b);
 }
 
+/// \brief For each 8-bit integer in the first source operand, perform one of
+///    the following actions as specified by the second source operand: If the
+///    byte in the second source is negative, calculate the two's complement of
+///    the corresponding byte in the first source, and write that value to the
+///    destination. If the byte in the second source is positive, copy the
+///    corresponding byte from the first source to the destination. If the byte
+///    in the second source is zero, clear the corresponding byte in the
+///    destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSIGNB instruction.
+///
+/// \param __a
+///    A 64-bit integer vector containing the values to be copied.
+/// \param __b
+///    A 64-bit integer vector containing control bytes corresponding to
+///    positions in the destination.
+/// \returns A 64-bit integer vector containing the resultant values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_sign_pi8(__m64 __a, __m64 __b)
 {
     return (__m64)__builtin_ia32_psignb((__v8qi)__a, (__v8qi)__b);
 }
 
+/// \brief For each 16-bit integer in the first source operand, perform one of
+///    the following actions as specified by the second source operand: If the
+///    word in the second source is negative, calculate the two's complement of
+///    the corresponding word in the first source, and write that value to the
+///    destination. If the word in the second source is positive, copy the
+///    corresponding word from the first source to the destination. If the word
+///    in the second source is zero, clear the corresponding word in the
+///    destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSIGNW instruction.
+///
+/// \param __a
+///    A 64-bit integer vector containing the values to be copied.
+/// \param __b
+///    A 64-bit integer vector containing control words corresponding to
+///    positions in the destination.
+/// \returns A 64-bit integer vector containing the resultant values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_sign_pi16(__m64 __a, __m64 __b)
 {
     return (__m64)__builtin_ia32_psignw((__v4hi)__a, (__v4hi)__b);
 }
 
+/// \brief For each 32-bit integer in the first source operand, perform one of
+///    the following actions as specified by the second source operand: If the
+///    doubleword in the second source is negative, calculate the two's
+///    complement of the corresponding doubleword in the first source, and
+///    write that value to the destination. If the doubleword in the second
+///    source is positive, copy the corresponding doubleword from the first
+///    source to the destination. If the doubleword in the second source is
+///    zero, clear the corresponding doubleword in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSIGND instruction.
+///
+/// \param __a
+///    A 64-bit integer vector containing the values to be copied.
+/// \param __b
+///    A 64-bit integer vector containing two control doublewords corresponding
+///    to positions in the destination.
+/// \returns A 64-bit integer vector containing the resultant values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_sign_pi32(__m64 __a, __m64 __b)
 {
diff --git a/renderscript/clang-include/unwind.h b/renderscript/clang-include/unwind.h
index 303d792..4f74a34 100644
--- a/renderscript/clang-include/unwind.h
+++ b/renderscript/clang-include/unwind.h
@@ -79,6 +79,10 @@
 struct _Unwind_Exception;
 typedef enum {
   _URC_NO_REASON = 0,
+#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
+    !defined(__ARM_DWARF_EH__)
+  _URC_OK = 0, /* used by ARM EHABI */
+#endif
   _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
 
   _URC_FATAL_PHASE2_ERROR = 2,
@@ -88,7 +92,11 @@
   _URC_END_OF_STACK = 5,
   _URC_HANDLER_FOUND = 6,
   _URC_INSTALL_CONTEXT = 7,
-  _URC_CONTINUE_UNWIND = 8
+  _URC_CONTINUE_UNWIND = 8,
+#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
+    !defined(__ARM_DWARF_EH__)
+  _URC_FAILURE = 9 /* used by ARM EHABI */
+#endif
 } _Unwind_Reason_Code;
 
 typedef enum {
@@ -150,6 +158,15 @@
   _UVRSR_FAILED = 2
 } _Unwind_VRS_Result;
 
+#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__ARM_DWARF_EH__)
+typedef uint32_t _Unwind_State;
+#define _US_VIRTUAL_UNWIND_FRAME  ((_Unwind_State)0)
+#define _US_UNWIND_FRAME_STARTING ((_Unwind_State)1)
+#define _US_UNWIND_FRAME_RESUME   ((_Unwind_State)2)
+#define _US_ACTION_MASK           ((_Unwind_State)3)
+#define _US_FORCE_UNWIND          ((_Unwind_State)8)
+#endif
+
 _Unwind_VRS_Result _Unwind_VRS_Get(struct _Unwind_Context *__context,
   _Unwind_VRS_RegClass __regclass,
   uint32_t __regno,
diff --git a/renderscript/clang-include/x86intrin.h b/renderscript/clang-include/x86intrin.h
index 4d8077e..81a404f 100644
--- a/renderscript/clang-include/x86intrin.h
+++ b/renderscript/clang-include/x86intrin.h
@@ -28,29 +28,57 @@
 
 #include <immintrin.h>
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__3dNOW__)
 #include <mm3dnow.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__)
 #include <bmiintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI2__)
 #include <bmi2intrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LZCNT__)
 #include <lzcntintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__POPCNT__)
 #include <popcntintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDSEED__)
 #include <rdseedintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PRFCHW__)
 #include <prfchwintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE4A__)
 #include <ammintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA4__)
 #include <fma4intrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XOP__)
 #include <xopintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__TBM__)
 #include <tbmintrin.h>
+#endif
 
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__F16C__)
 #include <f16cintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MWAITX__)
+#include <mwaitxintrin.h>
+#endif
 
 /* FIXME: LWP */
 
diff --git a/renderscript/clang-include/xmmintrin.h b/renderscript/clang-include/xmmintrin.h
index ae0b2cd..3110e8b 100644
--- a/renderscript/clang-include/xmmintrin.h
+++ b/renderscript/clang-include/xmmintrin.h
@@ -30,6 +30,9 @@
 typedef float __v4sf __attribute__((__vector_size__(16)));
 typedef float __m128 __attribute__((__vector_size__(16)));
 
+/* Unsigned types */
+typedef unsigned int __v4su __attribute__((__vector_size__(16)));
+
 /* This header should only be included in a hosted environment as it depends on
  * a standard library to provide allocation routines. */
 #if __STDC_HOSTED__
@@ -39,6 +42,21 @@
 /* Define the default attributes for the functions in this file. */
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse")))
 
+/// \brief Adds the 32-bit float values in the low-order bits of the operands.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VADDSS / ADDSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the source operands.
+///    The lower 32 bits of this operand are used in the calculation.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the source operands.
+///    The lower 32 bits of this operand are used in the calculation.
+/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the sum
+///    of the lower 32 bits of both operands. The upper 96 bits are copied from
+///    the upper 96 bits of the first source operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_add_ss(__m128 __a, __m128 __b)
 {
@@ -46,12 +64,41 @@
   return __a;
 }
 
+/// \brief Adds two 128-bit vectors of [4 x float], and returns the results of
+///    the addition.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VADDPS / ADDPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the source operands.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the source operands.
+/// \returns A 128-bit vector of [4 x float] containing the sums of both
+///    operands.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_add_ps(__m128 __a, __m128 __b)
 {
-  return __a + __b;
+  return (__m128)((__v4sf)__a + (__v4sf)__b);
 }
 
+/// \brief Subtracts the 32-bit float value in the low-order bits of the second
+///    operand from the corresponding value in the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSUBSS / SUBSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing the minuend. The lower 32 bits
+///    of this operand are used in the calculation.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing the subtrahend. The lower 32
+///    bits of this operand are used in the calculation.
+/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
+///    difference of the lower 32 bits of both operands. The upper 96 bits are
+///    copied from the upper 96 bits of the first source operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_sub_ss(__m128 __a, __m128 __b)
 {
@@ -59,12 +106,42 @@
   return __a;
 }
 
+/// \brief Subtracts each of the values of the second operand from the first
+///    operand, both of which are 128-bit vectors of [4 x float] and returns
+///    the results of the subtraction.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSUBPS / SUBPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing the minuend.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing the subtrahend.
+/// \returns A 128-bit vector of [4 x float] containing the differences between
+///    both operands.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_sub_ps(__m128 __a, __m128 __b)
 {
-  return __a - __b;
+  return (__m128)((__v4sf)__a - (__v4sf)__b);
 }
 
+/// \brief Multiplies two 32-bit float values in the low-order bits of the
+///    operands.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMULSS / MULSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the source operands.
+///    The lower 32 bits of this operand are used in the calculation.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the source operands.
+///    The lower 32 bits of this operand are used in the calculation.
+/// \returns A 128-bit vector of [4 x float] containing the product of the lower
+///    32 bits of both operands. The upper 96 bits are copied from the upper 96
+///    bits of the first source operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_mul_ss(__m128 __a, __m128 __b)
 {
@@ -72,12 +149,41 @@
   return __a;
 }
 
+/// \brief Multiplies two 128-bit vectors of [4 x float] and returns the
+///    results of the multiplication.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMULPS / MULPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the source operands.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the source operands.
+/// \returns A 128-bit vector of [4 x float] containing the products of both
+///    operands.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_mul_ps(__m128 __a, __m128 __b)
 {
-  return __a * __b;
+  return (__m128)((__v4sf)__a * (__v4sf)__b);
 }
 
+/// \brief Divides the value in the low-order 32 bits of the first operand by
+///    the corresponding value in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VDIVSS / DIVSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing the dividend. The lower 32
+///    bits of this operand are used in the calculation.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing the divisor. The lower 32 bits
+///    of this operand are used in the calculation.
+/// \returns A 128-bit vector of [4 x float] containing the quotients of the
+///    lower 32 bits of both operands. The upper 96 bits are copied from the
+///    upper 96 bits of the first source operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_div_ss(__m128 __a, __m128 __b)
 {
@@ -85,329 +191,1091 @@
   return __a;
 }
 
+/// \brief Divides two 128-bit vectors of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VDIVPS / DIVPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing the dividend.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing the divisor.
+/// \returns A 128-bit vector of [4 x float] containing the quotients of both
+///    operands.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_div_ps(__m128 __a, __m128 __b)
 {
-  return __a / __b;
+  return (__m128)((__v4sf)__a / (__v4sf)__b);
 }
 
+/// \brief Calculates the square root of the value stored in the low-order bits
+///    of a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSQRTSS / SQRTSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the calculation.
+/// \returns A 128-bit vector of [4 x float] containing the square root of the
+///    value in the low-order bits of the operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_sqrt_ss(__m128 __a)
 {
-  __m128 __c = __builtin_ia32_sqrtss(__a);
+  __m128 __c = __builtin_ia32_sqrtss((__v4sf)__a);
   return (__m128) { __c[0], __a[1], __a[2], __a[3] };
 }
 
+/// \brief Calculates the square roots of the values stored in a 128-bit vector
+///    of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSQRTPS / SQRTPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the square roots of the
+///    values in the operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_sqrt_ps(__m128 __a)
 {
-  return __builtin_ia32_sqrtps(__a);
+  return __builtin_ia32_sqrtps((__v4sf)__a);
 }
 
+/// \brief Calculates the approximate reciprocal of the value stored in the
+///    low-order bits of a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VRCPSS / RCPSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the calculation.
+/// \returns A 128-bit vector of [4 x float] containing the approximate
+///    reciprocal of the value in the low-order bits of the operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_rcp_ss(__m128 __a)
 {
-  __m128 __c = __builtin_ia32_rcpss(__a);
+  __m128 __c = __builtin_ia32_rcpss((__v4sf)__a);
   return (__m128) { __c[0], __a[1], __a[2], __a[3] };
 }
 
+/// \brief Calculates the approximate reciprocals of the values stored in a
+///    128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VRCPPS / RCPPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the approximate
+///    reciprocals of the values in the operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_rcp_ps(__m128 __a)
 {
-  return __builtin_ia32_rcpps(__a);
+  return __builtin_ia32_rcpps((__v4sf)__a);
 }
 
+/// \brief Calculates the approximate reciprocal of the square root of the value
+///    stored in the low-order bits of a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VRSQRTSS / RSQRTSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the calculation.
+/// \returns A 128-bit vector of [4 x float] containing the approximate
+///    reciprocal of the square root of the value in the low-order bits of the
+///    operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_rsqrt_ss(__m128 __a)
 {
-  __m128 __c = __builtin_ia32_rsqrtss(__a);
+  __m128 __c = __builtin_ia32_rsqrtss((__v4sf)__a);
   return (__m128) { __c[0], __a[1], __a[2], __a[3] };
 }
 
+/// \brief Calculates the approximate reciprocals of the square roots of the
+///    values stored in a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VRSQRTPS / RSQRTPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the approximate
+///    reciprocals of the square roots of the values in the operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_rsqrt_ps(__m128 __a)
 {
-  return __builtin_ia32_rsqrtps(__a);
+  return __builtin_ia32_rsqrtps((__v4sf)__a);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands and returns the lesser value in the low-order bits of the
+///    vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMINSS / MINSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
+///    minimum value between both operands. The upper 96 bits are copied from
+///    the upper 96 bits of the first source operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_min_ss(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_minss(__a, __b);
+  return __builtin_ia32_minss((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares two 128-bit vectors of [4 x float] and returns the
+///    lesser of each pair of values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMINPS / MINPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the operands.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the operands.
+/// \returns A 128-bit vector of [4 x float] containing the minimum values
+///    between both operands.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_min_ps(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_minps(__a, __b);
+  return __builtin_ia32_minps((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands and returns the greater value in the low-order bits of
+///    a vector [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMAXSS / MAXSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
+///    maximum value between both operands. The upper 96 bits are copied from
+///    the upper 96 bits of the first source operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_max_ss(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_maxss(__a, __b);
+  return __builtin_ia32_maxss((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares two 128-bit vectors of [4 x float] and returns the greater
+///    of each pair of values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMAXPS / MAXPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the operands.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the operands.
+/// \returns A 128-bit vector of [4 x float] containing the maximum values
+///    between both operands.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_max_ps(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_maxps(__a, __b);
+  return __builtin_ia32_maxps((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Performs a bitwise AND of two 128-bit vectors of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VANDPS / ANDPS instructions.
+///
+/// \param __a
+///    A 128-bit vector containing one of the source operands.
+/// \param __b
+///    A 128-bit vector containing one of the source operands.
+/// \returns A 128-bit vector of [4 x float] containing the bitwise AND of the
+///    values between both operands.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_and_ps(__m128 __a, __m128 __b)
 {
-  return (__m128)((__v4si)__a & (__v4si)__b);
+  return (__m128)((__v4su)__a & (__v4su)__b);
 }
 
+/// \brief Performs a bitwise AND of two 128-bit vectors of [4 x float], using
+///    the one's complement of the values contained in the first source
+///    operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VANDNPS / ANDNPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing the first source operand. The
+///    one's complement of this value is used in the bitwise AND.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing the second source operand.
+/// \returns A 128-bit vector of [4 x float] containing the bitwise AND of the
+///    one's complement of the first operand and the values in the second
+///    operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_andnot_ps(__m128 __a, __m128 __b)
 {
-  return (__m128)(~(__v4si)__a & (__v4si)__b);
+  return (__m128)(~(__v4su)__a & (__v4su)__b);
 }
 
+/// \brief Performs a bitwise OR of two 128-bit vectors of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VORPS / ORPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the source operands.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the source operands.
+/// \returns A 128-bit vector of [4 x float] containing the bitwise OR of the
+///    values between both operands.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_or_ps(__m128 __a, __m128 __b)
 {
-  return (__m128)((__v4si)__a | (__v4si)__b);
+  return (__m128)((__v4su)__a | (__v4su)__b);
 }
 
+/// \brief Performs a bitwise exclusive OR of two 128-bit vectors of
+///    [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VXORPS / XORPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the source operands.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the source operands.
+/// \returns A 128-bit vector of [4 x float] containing the bitwise exclusive OR
+///    of the values between both operands.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_xor_ps(__m128 __a, __m128 __b)
 {
-  return (__m128)((__v4si)__a ^ (__v4si)__b);
+  return (__m128)((__v4su)__a ^ (__v4su)__b);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands for equality and returns the result of the comparison in the
+///    low-order bits of a vector [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPEQSS / CMPEQSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+///    in the low-order bits.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpeq_ss(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpeqss(__a, __b);
+  return (__m128)__builtin_ia32_cmpeqss((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares each of the corresponding 32-bit float values of the
+///    128-bit vectors of [4 x float] for equality.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPEQPS / CMPEQPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpeq_ps(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpeqps(__a, __b);
+  return (__m128)__builtin_ia32_cmpeqps((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands to determine if the value in the first operand is less than the
+///    corresponding value in the second operand and returns the result of the
+///    comparison in the low-order bits of a vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPLTSS / CMPLTSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+///    in the low-order bits.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmplt_ss(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpltss(__a, __b);
+  return (__m128)__builtin_ia32_cmpltss((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares each of the corresponding 32-bit float values of the
+///    128-bit vectors of [4 x float] to determine if the values in the first
+///    operand are less than those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPLTPS / CMPLTPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmplt_ps(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpltps(__a, __b);
+  return (__m128)__builtin_ia32_cmpltps((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands to determine if the value in the first operand is less than or
+///    equal to the corresponding value in the second operand and returns the
+///    result of the comparison in the low-order bits of a vector of
+///    [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPLESS / CMPLESS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+///    in the low-order bits.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmple_ss(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpless(__a, __b);
+  return (__m128)__builtin_ia32_cmpless((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares each of the corresponding 32-bit float values of the
+///    128-bit vectors of [4 x float] to determine if the values in the first
+///    operand are less than or equal to those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPLEPS / CMPLEPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmple_ps(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpleps(__a, __b);
+  return (__m128)__builtin_ia32_cmpleps((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands to determine if the value in the first operand is greater than
+///    the corresponding value in the second operand and returns the result of
+///    the comparison in the low-order bits of a vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPLTSS / CMPLTSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+///    in the low-order bits.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpgt_ss(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_shufflevector(__a,
-                                         __builtin_ia32_cmpltss(__b, __a),
+  return (__m128)__builtin_shufflevector((__v4sf)__a,
+                                         (__v4sf)__builtin_ia32_cmpltss((__v4sf)__b, (__v4sf)__a),
                                          4, 1, 2, 3);
 }
 
+/// \brief Compares each of the corresponding 32-bit float values of the
+///    128-bit vectors of [4 x float] to determine if the values in the first
+///    operand are greater than those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPLTPS / CMPLTPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpgt_ps(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpltps(__b, __a);
+  return (__m128)__builtin_ia32_cmpltps((__v4sf)__b, (__v4sf)__a);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands to determine if the value in the first operand is greater than
+///    or equal to the corresponding value in the second operand and returns
+///    the result of the comparison in the low-order bits of a vector of
+///    [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPLESS / CMPLESS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+///    in the low-order bits.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpge_ss(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_shufflevector(__a,
-                                         __builtin_ia32_cmpless(__b, __a),
+  return (__m128)__builtin_shufflevector((__v4sf)__a,
+                                         (__v4sf)__builtin_ia32_cmpless((__v4sf)__b, (__v4sf)__a),
                                          4, 1, 2, 3);
 }
 
+/// \brief Compares each of the corresponding 32-bit float values of the
+///    128-bit vectors of [4 x float] to determine if the values in the first
+///    operand are greater than or equal to those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPLEPS / CMPLEPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpge_ps(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpleps(__b, __a);
+  return (__m128)__builtin_ia32_cmpleps((__v4sf)__b, (__v4sf)__a);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands for inequality and returns the result of the comparison in the
+///    low-order bits of a vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNEQSS / CMPNEQSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+///    in the low-order bits.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpneq_ss(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpneqss(__a, __b);
+  return (__m128)__builtin_ia32_cmpneqss((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares each of the corresponding 32-bit float values of the
+///    128-bit vectors of [4 x float] for inequality.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNEQPS / CMPNEQPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpneq_ps(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpneqps(__a, __b);
+  return (__m128)__builtin_ia32_cmpneqps((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands to determine if the value in the first operand is not less than
+///    the corresponding value in the second operand and returns the result of
+///    the comparison in the low-order bits of a vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNLTSS / CMPNLTSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+///    in the low-order bits.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpnlt_ss(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpnltss(__a, __b);
+  return (__m128)__builtin_ia32_cmpnltss((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares each of the corresponding 32-bit float values of the
+///    128-bit vectors of [4 x float] to determine if the values in the first
+///    operand are not less than those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNLTPS / CMPNLTPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpnlt_ps(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpnltps(__a, __b);
+  return (__m128)__builtin_ia32_cmpnltps((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands to determine if the value in the first operand is not less than
+///    or equal to the corresponding value in the second operand and returns
+///    the result of the comparison in the low-order bits of a vector of
+///    [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNLESS / CMPNLESS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+///    in the low-order bits.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpnle_ss(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpnless(__a, __b);
+  return (__m128)__builtin_ia32_cmpnless((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares each of the corresponding 32-bit float values of the
+///    128-bit vectors of [4 x float] to determine if the values in the first
+///    operand are not less than or equal to those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNLEPS / CMPNLEPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpnle_ps(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpnleps(__a, __b);
+  return (__m128)__builtin_ia32_cmpnleps((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands to determine if the value in the first operand is not greater
+///    than the corresponding value in the second operand and returns the
+///    result of the comparison in the low-order bits of a vector of
+///    [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNLTSS / CMPNLTSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+///    in the low-order bits.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpngt_ss(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_shufflevector(__a,
-                                         __builtin_ia32_cmpnltss(__b, __a),
+  return (__m128)__builtin_shufflevector((__v4sf)__a,
+                                         (__v4sf)__builtin_ia32_cmpnltss((__v4sf)__b, (__v4sf)__a),
                                          4, 1, 2, 3);
 }
 
+/// \brief Compares each of the corresponding 32-bit float values of the
+///    128-bit vectors of [4 x float] to determine if the values in the first
+///    operand are not greater than those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNLTPS / CMPNLTPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpngt_ps(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpnltps(__b, __a);
+  return (__m128)__builtin_ia32_cmpnltps((__v4sf)__b, (__v4sf)__a);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands to determine if the value in the first operand is not greater
+///    than or equal to the corresponding value in the second operand and
+///    returns the result of the comparison in the low-order bits of a vector
+///    of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNLESS / CMPNLESS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+///    in the low-order bits.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpnge_ss(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_shufflevector(__a,
-                                         __builtin_ia32_cmpnless(__b, __a),
+  return (__m128)__builtin_shufflevector((__v4sf)__a,
+                                         (__v4sf)__builtin_ia32_cmpnless((__v4sf)__b, (__v4sf)__a),
                                          4, 1, 2, 3);
 }
 
+/// \brief Compares each of the corresponding 32-bit float values of the
+///    128-bit vectors of [4 x float] to determine if the values in the first
+///    operand are not greater than or equal to those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNLEPS / CMPNLEPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpnge_ps(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpnleps(__b, __a);
+  return (__m128)__builtin_ia32_cmpnleps((__v4sf)__b, (__v4sf)__a);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands to determine if the value in the first operand is ordered with
+///    respect to the corresponding value in the second operand and returns the
+///    result of the comparison in the low-order bits of a vector of
+///    [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPORDSS / CMPORDSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+///    in the low-order bits.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpord_ss(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpordss(__a, __b);
+  return (__m128)__builtin_ia32_cmpordss((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares each of the corresponding 32-bit float values of the
+///    128-bit vectors of [4 x float] to determine if the values in the first
+///    operand are ordered with respect to those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPORDPS / CMPORDPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpord_ps(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpordps(__a, __b);
+  return (__m128)__builtin_ia32_cmpordps((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands to determine if the value in the first operand is unordered
+///    with respect to the corresponding value in the second operand and
+///    returns the result of the comparison in the low-order bits of a vector
+///    of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPUNORDSS / CMPUNORDSS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float] containing one of the operands. The lower
+///    32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+///    in the low-order bits.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpunord_ss(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpunordss(__a, __b);
+  return (__m128)__builtin_ia32_cmpunordss((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares each of the corresponding 32-bit float values of the
+///    128-bit vectors of [4 x float] to determine if the values in the first
+///    operand are unordered with respect to those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPUNORDPS / CMPUNORDPS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cmpunord_ps(__m128 __a, __m128 __b)
 {
-  return (__m128)__builtin_ia32_cmpunordps(__a, __b);
+  return (__m128)__builtin_ia32_cmpunordps((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands for equality and returns the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \returns An integer containing the comparison results.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_comieq_ss(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_comieq(__a, __b);
+  return __builtin_ia32_comieq((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands to determine if the first operand is less than the second
+///    operand and returns the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \returns An integer containing the comparison results.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_comilt_ss(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_comilt(__a, __b);
+  return __builtin_ia32_comilt((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands to determine if the first operand is less than or equal to the
+///    second operand and returns the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \returns An integer containing the comparison results.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_comile_ss(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_comile(__a, __b);
+  return __builtin_ia32_comile((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands to determine if the first operand is greater than the second
+///    operand and returns the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \returns An integer containing the comparison results.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_comigt_ss(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_comigt(__a, __b);
+  return __builtin_ia32_comigt((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands to determine if the first operand is greater than or equal to
+///    the second operand and returns the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \returns An integer containing the comparison results.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_comige_ss(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_comige(__a, __b);
+  return __builtin_ia32_comige((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Compares two 32-bit float values in the low-order bits of both
+///    operands to determine if the first operand is not equal to the second
+///    operand and returns the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \returns An integer containing the comparison results.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_comineq_ss(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_comineq(__a, __b);
+  return __builtin_ia32_comineq((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Performs an unordered comparison of two 32-bit float values using
+///    the low-order bits of both operands to determine equality and returns
+///    the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \returns An integer containing the comparison results.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_ucomieq_ss(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_ucomieq(__a, __b);
+  return __builtin_ia32_ucomieq((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Performs an unordered comparison of two 32-bit float values using
+///    the low-order bits of both operands to determine if the first operand is
+///    less than the second operand and returns the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \returns An integer containing the comparison results.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_ucomilt_ss(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_ucomilt(__a, __b);
+  return __builtin_ia32_ucomilt((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Performs an unordered comparison of two 32-bit float values using
+///    the low-order bits of both operands to determine if the first operand
+///    is less than or equal to the second operand and returns the result of
+///    the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \returns An integer containing the comparison results.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_ucomile_ss(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_ucomile(__a, __b);
+  return __builtin_ia32_ucomile((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Performs an unordered comparison of two 32-bit float values using
+///    the low-order bits of both operands to determine if the first operand
+///    is greater than the second operand and returns the result of the
+///    comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \returns An integer containing the comparison results.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_ucomigt_ss(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_ucomigt(__a, __b);
+  return __builtin_ia32_ucomigt((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Performs an unordered comparison of two 32-bit float values using
+///    the low-order bits of both operands to determine if the first operand is
+///    greater than or equal to the second operand and returns the result of
+///    the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \returns An integer containing the comparison results.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_ucomige_ss(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_ucomige(__a, __b);
+  return __builtin_ia32_ucomige((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Performs an unordered comparison of two 32-bit float values using
+///    the low-order bits of both operands to determine inequality and returns
+///    the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \param __b
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the comparison.
+/// \returns An integer containing the comparison results.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_ucomineq_ss(__m128 __a, __m128 __b)
 {
-  return __builtin_ia32_ucomineq(__a, __b);
+  return __builtin_ia32_ucomineq((__v4sf)__a, (__v4sf)__b);
 }
 
+/// \brief Converts a float value contained in the lower 32 bits of a vector of
+///    [4 x float] into a 32-bit integer.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTSS2SI / CVTSS2SI instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the conversion.
+/// \returns A 32-bit integer containing the converted value.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_cvtss_si32(__m128 __a)
 {
-  return __builtin_ia32_cvtss2si(__a);
+  return __builtin_ia32_cvtss2si((__v4sf)__a);
 }
 
+/// \brief Converts a float value contained in the lower 32 bits of a vector of
+///    [4 x float] into a 32-bit integer.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTSS2SI / CVTSS2SI instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the conversion.
+/// \returns A 32-bit integer containing the converted value.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_cvt_ss2si(__m128 __a)
 {
@@ -416,56 +1284,161 @@
 
 #ifdef __x86_64__
 
+/// \brief Converts a float value contained in the lower 32 bits of a vector of
+///    [4 x float] into a 64-bit integer.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTSS2SI / CVTSS2SI instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the conversion.
+/// \returns A 64-bit integer containing the converted value.
 static __inline__ long long __DEFAULT_FN_ATTRS
 _mm_cvtss_si64(__m128 __a)
 {
-  return __builtin_ia32_cvtss2si64(__a);
+  return __builtin_ia32_cvtss2si64((__v4sf)__a);
 }
 
 #endif
 
+/// \brief Converts two low-order float values in a 128-bit vector of
+///    [4 x float] into a 64-bit vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPS2PI instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \returns A 64-bit integer vector containing the converted values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_cvtps_pi32(__m128 __a)
 {
-  return (__m64)__builtin_ia32_cvtps2pi(__a);
+  return (__m64)__builtin_ia32_cvtps2pi((__v4sf)__a);
 }
 
+/// \brief Converts two low-order float values in a 128-bit vector of
+///    [4 x float] into a 64-bit vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPS2PI instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \returns A 64-bit integer vector containing the converted values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_cvt_ps2pi(__m128 __a)
 {
   return _mm_cvtps_pi32(__a);
 }
 
+/// \brief Converts a float value contained in the lower 32 bits of a vector of
+///    [4 x float] into a 32-bit integer, truncating the result when it is
+///    inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTTSS2SI / CVTTSS2SI instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the conversion.
+/// \returns A 32-bit integer containing the converted value.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_cvttss_si32(__m128 __a)
 {
   return __a[0];
 }
 
+/// \brief Converts a float value contained in the lower 32 bits of a vector of
+///    [4 x float] into a 32-bit integer, truncating the result when it is
+///    inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTTSS2SI / CVTTSS2SI instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the conversion.
+/// \returns A 32-bit integer containing the converted value.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_cvtt_ss2si(__m128 __a)
 {
   return _mm_cvttss_si32(__a);
 }
 
+/// \brief Converts a float value contained in the lower 32 bits of a vector of
+///    [4 x float] into a 64-bit integer, truncating the result when it is
+///    inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTTSS2SI / CVTTSS2SI instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the conversion.
+/// \returns A 64-bit integer containing the converted value.
 static __inline__ long long __DEFAULT_FN_ATTRS
 _mm_cvttss_si64(__m128 __a)
 {
   return __a[0];
 }
 
+/// \brief Converts two low-order float values in a 128-bit vector of
+///    [4 x float] into a 64-bit vector of [2 x i32], truncating the result
+///    when it is inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTTPS2PI / VTTPS2PI instructions.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \returns A 64-bit integer vector containing the converted values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_cvttps_pi32(__m128 __a)
 {
-  return (__m64)__builtin_ia32_cvttps2pi(__a);
+  return (__m64)__builtin_ia32_cvttps2pi((__v4sf)__a);
 }
 
+/// \brief Converts two low-order float values in a 128-bit vector of [4 x
+///    float] into a 64-bit vector of [2 x i32], truncating the result when it
+///    is inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTTPS2PI instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \returns A 64-bit integer vector containing the converted values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_cvtt_ps2pi(__m128 __a)
 {
   return _mm_cvttps_pi32(__a);
 }
 
+/// \brief Converts a 32-bit signed integer value into a floating point value
+///    and writes it to the lower 32 bits of the destination. The remaining
+///    higher order elements of the destination vector are copied from the
+///    corresponding elements in the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTSI2SS / CVTSI2SS instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 32-bit signed integer operand containing the value to be converted.
+/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
+///    converted value of the second operand. The upper 96 bits are copied from
+///    the upper 96 bits of the first operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cvtsi32_ss(__m128 __a, int __b)
 {
@@ -473,6 +1446,22 @@
   return __a;
 }
 
+/// \brief Converts a 32-bit signed integer value into a floating point value
+///    and writes it to the lower 32 bits of the destination. The remaining
+///    higher order elements of the destination are copied from the
+///    corresponding elements in the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTSI2SS / CVTSI2SS instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 32-bit signed integer operand containing the value to be converted.
+/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
+///    converted value of the second operand. The upper 96 bits are copied from
+///    the upper 96 bits of the first operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cvt_si2ss(__m128 __a, int __b)
 {
@@ -481,6 +1470,22 @@
 
 #ifdef __x86_64__
 
+/// \brief Converts a 64-bit signed integer value into a floating point value
+///    and writes it to the lower 32 bits of the destination. The remaining
+///    higher order elements of the destination are copied from the
+///    corresponding elements in the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTSI2SS / CVTSI2SS instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 64-bit signed integer operand containing the value to be converted.
+/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
+///    converted value of the second operand. The upper 96 bits are copied from
+///    the upper 96 bits of the first operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cvtsi64_ss(__m128 __a, long long __b)
 {
@@ -490,24 +1495,84 @@
 
 #endif
 
+/// \brief Converts two elements of a 64-bit vector of [2 x i32] into two
+///    floating point values and writes them to the lower 64-bits of the
+///    destination. The remaining higher order elements of the destination are
+///    copied from the corresponding elements in the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPI2PS instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 64-bit vector of [2 x i32]. The elements in this vector are converted
+///    and written to the corresponding low-order elements in the destination.
+/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
+///    converted value of the second operand. The upper 64 bits are copied from
+///    the upper 64 bits of the first operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cvtpi32_ps(__m128 __a, __m64 __b)
 {
-  return __builtin_ia32_cvtpi2ps(__a, (__v2si)__b);
+  return __builtin_ia32_cvtpi2ps((__v4sf)__a, (__v2si)__b);
 }
 
+/// \brief Converts two elements of a 64-bit vector of [2 x i32] into two
+///    floating point values and writes them to the lower 64-bits of the
+///    destination. The remaining higher order elements of the destination are
+///    copied from the corresponding elements in the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPI2PS instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+/// \param __b
+///    A 64-bit vector of [2 x i32]. The elements in this vector are converted
+///    and written to the corresponding low-order elements in the destination.
+/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
+///    converted value from the second operand. The upper 64 bits are copied
+///    from the upper 64 bits of the first operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cvt_pi2ps(__m128 __a, __m64 __b)
 {
   return _mm_cvtpi32_ps(__a, __b);
 }
 
+/// \brief Extracts a float value contained in the lower 32 bits of a vector of
+///    [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVSS / MOVSS instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+///    used in the extraction.
+/// \returns A 32-bit float containing the extracted value.
 static __inline__ float __DEFAULT_FN_ATTRS
 _mm_cvtss_f32(__m128 __a)
 {
   return __a[0];
 }
 
+/// \brief Loads two packed float values from the address __p into the
+///     high-order bits of a 128-bit vector of [4 x float]. The low-order bits
+///     are copied from the low-order bits of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVHPD / MOVHPD instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. Bits [63:0] are written to bits [63:0]
+///    of the destination.
+/// \param __p
+///    A pointer to two packed float values. Bits [63:0] are written to bits
+///    [127:64] of the destination.
+/// \returns A 128-bit vector of [4 x float] containing the moved values.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_loadh_pi(__m128 __a, const __m64 *__p)
 {
@@ -520,6 +1585,21 @@
   return __builtin_shufflevector(__a, __bb, 0, 1, 4, 5);
 }
 
+/// \brief Loads two packed float values from the address __p into the low-order
+///    bits of a 128-bit vector of [4 x float]. The high-order bits are copied
+///    from the high-order bits of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVLPD / MOVLPD instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float]. Bits [127:64] are written to bits
+///    [127:64] of the destination.
+/// \param __p
+///    A pointer to two packed float values. Bits [63:0] are written to bits
+///    [63:0] of the destination.
+/// \returns A 128-bit vector of [4 x float] containing the moved values.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_loadl_pi(__m128 __a, const __m64 *__p)
 {
@@ -532,6 +1612,21 @@
   return __builtin_shufflevector(__a, __bb, 4, 5, 2, 3);
 }
 
+/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower
+///    32 bits of the vector are initialized with the single-precision
+///    floating-point value loaded from a specified memory location. The upper
+///    96 bits are set to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVSS / MOVSS instruction.
+///
+/// \param __p
+///    A pointer to a 32-bit memory location containing a single-precision
+///    floating-point value.
+/// \returns An initialized 128-bit floating-point vector of [4 x float]. The
+///    lower 32 bits contain the value loaded from the memory location. The
+///    upper 96 bits are set to zero.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_load_ss(const float *__p)
 {
@@ -542,6 +1637,18 @@
   return (__m128){ __u, 0, 0, 0 };
 }
 
+/// \brief Loads a 32-bit float value and duplicates it to all four vector
+///    elements of a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVSS / MOVSS + \c shuffling
+///    instruction.
+///
+/// \param __p
+///    A pointer to a float value to be loaded and duplicated.
+/// \returns A 128-bit vector of [4 x float] containing the loaded
+///    and duplicated values.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_load1_ps(const float *__p)
 {
@@ -554,12 +1661,34 @@
 
 #define        _mm_load_ps1(p) _mm_load1_ps(p)
 
+/// \brief Loads a 128-bit floating-point vector of [4 x float] from an aligned
+///    memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVAPS / MOVAPS instruction.
+///
+/// \param __p
+///    A pointer to a 128-bit memory location. The address of the memory
+///    location has to be 128-bit aligned.
+/// \returns A 128-bit vector of [4 x float] containing the loaded valus.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_load_ps(const float *__p)
 {
   return *(__m128*)__p;
 }
 
+/// \brief Loads a 128-bit floating-point vector of [4 x float] from an
+///    unaligned memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVUPS / MOVUPS instruction.
+///
+/// \param __p
+///    A pointer to a 128-bit memory location. The address of the memory
+///    location does not have to be aligned.
+/// \returns A 128-bit vector of [4 x float] containing the loaded values.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_loadu_ps(const float *__p)
 {
@@ -569,25 +1698,72 @@
   return ((struct __loadu_ps*)__p)->__v;
 }
 
+/// \brief Loads four packed float values, in reverse order, from an aligned
+///    memory location to 32-bit elements in a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVAPS / MOVAPS + \c shuffling
+///    instruction.
+///
+/// \param __p
+///    A pointer to a 128-bit memory location. The address of the memory
+///    location has to be 128-bit aligned.
+/// \returns A 128-bit vector of [4 x float] containing the moved values, loaded
+///    in reverse order.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_loadr_ps(const float *__p)
 {
   __m128 __a = _mm_load_ps(__p);
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
+  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0);
 }
 
+/// \brief Create a 128-bit vector of [4 x float] with undefined values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \returns A 128-bit vector of [4 x float] containing undefined values.
+
 static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_undefined_ps()
+_mm_undefined_ps(void)
 {
   return (__m128)__builtin_ia32_undef128();
 }
 
+/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower
+///    32 bits of the vector are initialized with the specified single-precision
+///    floating-point value. The upper 96 bits are set to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVSS / MOVSS instruction.
+///
+/// \param __w
+///    A single-precision floating-point value used to initialize the lower 32
+///    bits of the result.
+/// \returns An initialized 128-bit floating-point vector of [4 x float]. The
+///    lower 32 bits contain the value provided in the source operand. The
+///    upper 96 bits are set to zero.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_set_ss(float __w)
 {
   return (__m128){ __w, 0, 0, 0 };
 }
 
+/// \brief Constructs a 128-bit floating-point vector of [4 x float], with each
+///    of the four single-precision floating-point vector elements set to the
+///    specified single-precision floating-point value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+///
+/// \param __w
+///    A single-precision floating-point value used to initialize each vector
+///    element of the result.
+/// \returns An initialized 128-bit floating-point vector of [4 x float].
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_set1_ps(float __w)
 {
@@ -595,42 +1771,139 @@
 }
 
 /* Microsoft specific. */
+/// \brief Constructs a 128-bit floating-point vector of [4 x float], with each
+///    of the four single-precision floating-point vector elements set to the
+///    specified single-precision floating-point value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+///
+/// \param __w
+///    A single-precision floating-point value used to initialize each vector
+///    element of the result.
+/// \returns An initialized 128-bit floating-point vector of [4 x float].
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_set_ps1(float __w)
 {
     return _mm_set1_ps(__w);
 }
 
+/// \brief Constructs a 128-bit floating-point vector of [4 x float]
+///    initialized with the specified single-precision floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __z
+///    A single-precision floating-point value used to initialize bits [127:96]
+///    of the result.
+/// \param __y
+///    A single-precision floating-point value used to initialize bits [95:64]
+///    of the result.
+/// \param __x
+///    A single-precision floating-point value used to initialize bits [63:32]
+///    of the result.
+/// \param __w
+///    A single-precision floating-point value used to initialize bits [31:0]
+///    of the result.
+/// \returns An initialized 128-bit floating-point vector of [4 x float].
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_set_ps(float __z, float __y, float __x, float __w)
 {
   return (__m128){ __w, __x, __y, __z };
 }
 
+/// \brief Constructs a 128-bit floating-point vector of [4 x float],
+///    initialized in reverse order with the specified 32-bit single-precision
+///    float-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+///    instruction.
+///
+/// \param __z
+///    A single-precision floating-point value used to initialize bits [31:0]
+///    of the result.
+/// \param __y
+///    A single-precision floating-point value used to initialize bits [63:32]
+///    of the result.
+/// \param __x
+///    A single-precision floating-point value used to initialize bits [95:64]
+///    of the result.
+/// \param __w
+///    A single-precision floating-point value used to initialize bits [127:96]
+///    of the result.
+/// \returns An initialized 128-bit floating-point vector of [4 x float].
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_setr_ps(float __z, float __y, float __x, float __w)
 {
   return (__m128){ __z, __y, __x, __w };
 }
 
+/// \brief Constructs a 128-bit floating-point vector of [4 x float] initialized
+///    to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VXORPS / XORPS instruction.
+///
+/// \returns An initialized 128-bit floating-point vector of [4 x float] with
+///    all elements set to zero.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_setzero_ps(void)
 {
   return (__m128){ 0, 0, 0, 0 };
 }
 
+/// \brief Stores the upper 64 bits of a 128-bit vector of [4 x float] to a
+///    memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPEXTRQ / MOVQ instruction.
+///
+/// \param __p
+///    A pointer to a 64-bit memory location.
+/// \param __a
+///    A 128-bit vector of [4 x float] containing the values to be stored.
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_storeh_pi(__m64 *__p, __m128 __a)
 {
-  __builtin_ia32_storehps((__v2si *)__p, __a);
+  __builtin_ia32_storehps((__v2si *)__p, (__v4sf)__a);
 }
 
+/// \brief Stores the lower 64 bits of a 128-bit vector of [4 x float] to a
+///     memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVLPS / MOVLPS instruction.
+///
+/// \param __p
+///    A pointer to a memory location that will receive the float values.
+/// \param __a
+///    A 128-bit vector of [4 x float] containing the values to be stored.
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_storel_pi(__m64 *__p, __m128 __a)
 {
-  __builtin_ia32_storelps((__v2si *)__p, __a);
+  __builtin_ia32_storelps((__v2si *)__p, (__v4sf)__a);
 }
 
+/// \brief Stores the lower 32 bits of a 128-bit vector of [4 x float] to a
+///     memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVSS / MOVSS instruction.
+///
+/// \param __p
+///    A pointer to a 32-bit memory location.
+/// \param __a
+///    A 128-bit vector of [4 x float] containing the value to be stored.
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_store_ss(float *__p, __m128 __a)
 {
@@ -640,35 +1913,101 @@
   ((struct __mm_store_ss_struct*)__p)->__u = __a[0];
 }
 
+/// \brief Stores float values from a 128-bit vector of [4 x float] to an
+///    unaligned memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVUPS / MOVUPS instruction.
+///
+/// \param __p
+///    A pointer to a 128-bit memory location. The address of the memory
+///    location does not have to be aligned.
+/// \param __a
+///    A 128-bit vector of [4 x float] containing the values to be stored.
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_storeu_ps(float *__p, __m128 __a)
 {
-  __builtin_ia32_storeups(__p, __a);
+  struct __storeu_ps {
+    __m128 __v;
+  } __attribute__((__packed__, __may_alias__));
+  ((struct __storeu_ps*)__p)->__v = __a;
 }
 
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store1_ps(float *__p, __m128 __a)
-{
-  __a = __builtin_shufflevector(__a, __a, 0, 0, 0, 0);
-  _mm_storeu_ps(__p, __a);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_ps1(float *__p, __m128 __a)
-{
-    return _mm_store1_ps(__p, __a);
-}
-
+/// \brief Stores the lower 32 bits of a 128-bit vector of [4 x float] into
+///    four contiguous elements in an aligned memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to \c VMOVAPS / MOVAPS + \c shuffling
+///    instruction.
+///
+/// \param __p
+///    A pointer to a 128-bit memory location.
+/// \param __a
+///    A 128-bit vector of [4 x float] whose lower 32 bits are stored to each
+///    of the four contiguous elements pointed by __p.
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_store_ps(float *__p, __m128 __a)
 {
-  *(__m128 *)__p = __a;
+  *(__m128*)__p = __a;
 }
 
+/// \brief Stores the lower 32 bits of a 128-bit vector of [4 x float] into
+///    four contiguous elements in an aligned memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to \c VMOVAPS / MOVAPS + \c shuffling
+///    instruction.
+///
+/// \param __p
+///    A pointer to a 128-bit memory location.
+/// \param __a
+///    A 128-bit vector of [4 x float] whose lower 32 bits are stored to each
+///    of the four contiguous elements pointed by __p.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store1_ps(float *__p, __m128 __a)
+{
+  __a = __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 0, 0);
+  _mm_store_ps(__p, __a);
+}
+
+/// \brief Stores float values from a 128-bit vector of [4 x float] to an
+///    aligned memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVAPS / MOVAPS instruction.
+///
+/// \param __p
+///    A pointer to a 128-bit memory location. The address of the memory
+///    location has to be 128-bit aligned.
+/// \param __a
+///    A 128-bit vector of [4 x float] containing the values to be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_ps1(float *__p, __m128 __a)
+{
+  return _mm_store1_ps(__p, __a);
+}
+
+/// \brief Stores float values from a 128-bit vector of [4 x float] to an
+///    aligned memory location in reverse order.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVAPS / MOVAPS + \c shuffling
+///    instruction.
+///
+/// \param __p
+///    A pointer to a 128-bit memory location. The address of the memory
+///    location has to be 128-bit aligned.
+/// \param __a
+///    A 128-bit vector of [4 x float] containing the values to be stored.
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_storer_ps(float *__p, __m128 __a)
 {
-  __a = __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
+  __a = __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0);
   _mm_store_ps(__p, __a);
 }
 
@@ -681,153 +2020,599 @@
 /* FIXME: We have to #define this because "sel" must be a constant integer, and
    Sema doesn't do any form of constant propagation yet. */
 
+/// \brief Loads one cache line of data from the specified address to a location
+///    closer to the processor.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// void _mm_prefetch(const void * a, const int sel);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c PREFETCHNTA instruction.
+///
+/// \param a
+///    A pointer to a memory location containing a cache line of data.
+/// \param sel
+///    A predefined integer constant specifying the type of prefetch operation:
+///    _MM_HINT_NTA: Move data using the non-temporal access (NTA) hint.
+///    The PREFETCHNTA instruction will be generated.
+///    _MM_HINT_T0: Move data using the T0 hint. The PREFETCHT0 instruction will
+///    be generated.
+///    _MM_HINT_T1: Move data using the T1 hint. The PREFETCHT1 instruction will
+///    be generated.
+///    _MM_HINT_T2: Move data using the T2 hint. The PREFETCHT2 instruction will
+///    be generated.
 #define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, (sel)))
 #endif
 
+/// \brief Stores a 64-bit integer in the specified aligned memory location. To
+///    minimize caching, the data is flagged as non-temporal (unlikely to be
+///    used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c MOVNTQ instruction.
+///
+/// \param __p
+///    A pointer to an aligned memory location used to store the register value.
+/// \param __a
+///    A 64-bit integer containing the value to be stored.
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_stream_pi(__m64 *__p, __m64 __a)
 {
   __builtin_ia32_movntq(__p, __a);
 }
 
+/// \brief Moves packed float values from a 128-bit vector of [4 x float] to a
+///    128-bit aligned memory location. To minimize caching, the data is flagged
+///    as non-temporal (unlikely to be used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVNTPS / MOVNTPS instruction.
+///
+/// \param __p
+///    A pointer to a 128-bit aligned memory location that will receive the
+///    integer values.
+/// \param __a
+///    A 128-bit vector of [4 x float] containing the values to be moved.
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_stream_ps(float *__p, __m128 __a)
 {
-  __builtin_ia32_movntps(__p, __a);
+  __builtin_nontemporal_store((__v4sf)__a, (__v4sf*)__p);
 }
 
+/// \brief Forces strong memory ordering (serialization) between store
+///    instructions preceding this instruction and store instructions following
+///    this instruction, ensuring the system completes all previous stores
+///    before executing subsequent stores.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c SFENCE instruction.
+///
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_sfence(void)
 {
   __builtin_ia32_sfence();
 }
 
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_extract_pi16(__m64 __a, int __n)
-{
-  __v4hi __b = (__v4hi)__a;
-  return (unsigned short)__b[__n & 3];
-}
+/// \brief Extracts 16-bit element from a 64-bit vector of [4 x i16] and
+///    returns it, as specified by the immediate integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPEXTRW / PEXTRW instruction.
+///
+/// \param __a
+///    A 64-bit vector of [4 x i16].
+/// \param __n
+///    An immediate integer operand that determines which bits are extracted:
+///    0: Bits [15:0] are copied to the destination.
+///    1: Bits [31:16] are copied to the destination.
+///    2: Bits [47:32] are copied to the destination.
+///    3: Bits [63:48] are copied to the destination.
+/// \returns A 16-bit integer containing the extracted 16 bits of packed data.
+#define _mm_extract_pi16(a, n) __extension__ ({ \
+  (int)__builtin_ia32_vec_ext_v4hi((__m64)a, (int)n); })
 
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_insert_pi16(__m64 __a, int __d, int __n)
-{
-   __v4hi __b = (__v4hi)__a;
-   __b[__n & 3] = __d;
-   return (__m64)__b;
-}
+/// \brief Copies data from the 64-bit vector of [4 x i16] to the destination,
+///    and inserts the lower 16-bits of an integer operand at the 16-bit offset
+///    specified by the immediate operand __n.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPINSRW / PINSRW instruction.
+///
+/// \param __a
+///    A 64-bit vector of [4 x i16].
+/// \param __d
+///    An integer. The lower 16-bit value from this operand is written to the
+///    destination at the offset specified by operand __n.
+/// \param __n
+///    An immediate integer operant that determines which the bits to be used
+///    in the destination.
+///    0: Bits [15:0] are copied to the destination.
+///    1: Bits [31:16] are copied to the destination.
+///    2: Bits [47:32] are copied to the destination.
+///    3: Bits [63:48] are copied to the destination.
+///    The remaining bits in the destination are copied from the corresponding
+///    bits in operand __a.
+/// \returns A 64-bit integer vector containing the copied packed data from the
+///    operands.
+#define _mm_insert_pi16(a, d, n) __extension__ ({ \
+  (__m64)__builtin_ia32_vec_set_v4hi((__m64)a, (int)d, (int)n); })
 
+/// \brief Compares each of the corresponding packed 16-bit integer values of
+///    the 64-bit integer vectors, and writes the greater value to the
+///    corresponding bits in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMAXSW instruction.
+///
+/// \param __a
+///    A 64-bit integer vector containing one of the source operands.
+/// \param __b
+///    A 64-bit integer vector containing one of the source operands.
+/// \returns A 64-bit integer vector containing the comparison results.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_max_pi16(__m64 __a, __m64 __b)
 {
   return (__m64)__builtin_ia32_pmaxsw((__v4hi)__a, (__v4hi)__b);
 }
 
+/// \brief Compares each of the corresponding packed 8-bit unsigned integer
+///    values of the 64-bit integer vectors, and writes the greater value to the
+///    corresponding bits in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMAXUB instruction.
+///
+/// \param __a
+///    A 64-bit integer vector containing one of the source operands.
+/// \param __b
+///    A 64-bit integer vector containing one of the source operands.
+/// \returns A 64-bit integer vector containing the comparison results.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_max_pu8(__m64 __a, __m64 __b)
 {
   return (__m64)__builtin_ia32_pmaxub((__v8qi)__a, (__v8qi)__b);
 }
 
+/// \brief Compares each of the corresponding packed 16-bit integer values of
+///    the 64-bit integer vectors, and writes the lesser value to the
+///    corresponding bits in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMINSW instruction.
+///
+/// \param __a
+///    A 64-bit integer vector containing one of the source operands.
+/// \param __b
+///    A 64-bit integer vector containing one of the source operands.
+/// \returns A 64-bit integer vector containing the comparison results.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_min_pi16(__m64 __a, __m64 __b)
 {
   return (__m64)__builtin_ia32_pminsw((__v4hi)__a, (__v4hi)__b);
 }
 
+/// \brief Compares each of the corresponding packed 8-bit unsigned integer
+///    values of the 64-bit integer vectors, and writes the lesser value to the
+///    corresponding bits in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMINUB instruction.
+///
+/// \param __a
+///    A 64-bit integer vector containing one of the source operands.
+/// \param __b
+///    A 64-bit integer vector containing one of the source operands.
+/// \returns A 64-bit integer vector containing the comparison results.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_min_pu8(__m64 __a, __m64 __b)
 {
   return (__m64)__builtin_ia32_pminub((__v8qi)__a, (__v8qi)__b);
 }
 
+/// \brief Takes the most significant bit from each 8-bit element in a 64-bit
+///    integer vector to create a 16-bit mask value. Zero-extends the value to
+///    32-bit integer and writes it to the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMOVMSKB instruction.
+///
+/// \param __a
+///    A 64-bit integer vector containing the values with bits to be extracted.
+/// \returns The most significant bit from each 8-bit element in the operand,
+///    written to bits [15:0].
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_movemask_pi8(__m64 __a)
 {
   return __builtin_ia32_pmovmskb((__v8qi)__a);
 }
 
+/// \brief Multiplies packed 16-bit unsigned integer values and writes the
+///    high-order 16 bits of each 32-bit product to the corresponding bits in
+///    the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMULHUW instruction.
+///
+/// \param __a
+///    A 64-bit integer vector containing one of the source operands.
+/// \param __b
+///    A 64-bit integer vector containing one of the source operands.
+/// \returns A 64-bit integer vector containing the products of both operands.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_mulhi_pu16(__m64 __a, __m64 __b)
 {
   return (__m64)__builtin_ia32_pmulhuw((__v4hi)__a, (__v4hi)__b);
 }
 
+/// \brief Shuffles the 4 16-bit integers from a 64-bit integer vector to the
+///    destination, as specified by the immediate value operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSHUFW instruction.
+///
+/// \code
+/// __m64 _mm_shuffle_pi16(__m64 a, const int n);
+/// \endcode
+///
+/// \param a
+///    A 64-bit integer vector containing the values to be shuffled.
+/// \param n
+///    An immediate value containing an 8-bit value specifying which elements to
+///    copy from a. The destinations within the 64-bit destination are assigned
+///    values as follows:
+///    Bits [1:0] are used to assign values to bits [15:0] in the destination.
+///    Bits [3:2] are used to assign values to bits [31:16] in the destination.
+///    Bits [5:4] are used to assign values to bits [47:32] in the destination.
+///    Bits [7:6] are used to assign values to bits [63:48] in the destination.
+///    Bit value assignments:
+///    00: assigned from bits [15:0] of a.
+///    01: assigned from bits [31:16] of a.
+///    10: assigned from bits [47:32] of a.
+///    11: assigned from bits [63:48] of a.
+/// \returns A 64-bit integer vector containing the shuffled values.
 #define _mm_shuffle_pi16(a, n) __extension__ ({ \
   (__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n)); })
 
+/// \brief Conditionally copies the values from each 8-bit element in the first
+///    64-bit integer vector operand to the specified memory location, as
+///    specified by the most significant bit in the corresponding element in the
+///    second 64-bit integer vector operand. To minimize caching, the data is
+///    flagged as non-temporal (unlikely to be used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c MASKMOVQ instruction.
+///
+/// \param __d
+///    A 64-bit integer vector containing the values with elements to be copied.
+/// \param __n
+///    A 64-bit integer vector operand. The most significant bit from each 8-bit
+///    element determines whether the corresponding element in operand __d is
+///    copied. If the most significant bit of a given element is 1, the
+///    corresponding element in operand __d is copied.
+/// \param __p
+///    A pointer to a 64-bit memory location that will receive the conditionally
+///    copied integer values. The address of the memory location does not have
+///    to be aligned.
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_maskmove_si64(__m64 __d, __m64 __n, char *__p)
 {
   __builtin_ia32_maskmovq((__v8qi)__d, (__v8qi)__n, __p);
 }
 
+/// \brief Computes the rounded averages of the packed unsigned 8-bit integer
+///    values and writes the averages to the corresponding bits in the
+///    destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PAVGB instruction.
+///
+/// \param __a
+///    A 64-bit integer vector containing one of the source operands.
+/// \param __b
+///    A 64-bit integer vector containing one of the source operands.
+/// \returns A 64-bit integer vector containing the averages of both operands.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_avg_pu8(__m64 __a, __m64 __b)
 {
   return (__m64)__builtin_ia32_pavgb((__v8qi)__a, (__v8qi)__b);
 }
 
+/// \brief Computes the rounded averages of the packed unsigned 16-bit integer
+///    values and writes the averages to the corresponding bits in the
+///    destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PAVGW instruction.
+///
+/// \param __a
+///    A 64-bit integer vector containing one of the source operands.
+/// \param __b
+///    A 64-bit integer vector containing one of the source operands.
+/// \returns A 64-bit integer vector containing the averages of both operands.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_avg_pu16(__m64 __a, __m64 __b)
 {
   return (__m64)__builtin_ia32_pavgw((__v4hi)__a, (__v4hi)__b);
 }
 
+/// \brief Subtracts the corresponding 8-bit unsigned integer values of the two
+///    64-bit vector operands and computes the absolute value for each of the
+///    difference. Then sum of the 8 absolute differences is written to the
+///    bits [15:0] of the destination; the remaining bits [63:16] are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSADBW instruction.
+///
+/// \param __a
+///    A 64-bit integer vector containing one of the source operands.
+/// \param __b
+///    A 64-bit integer vector containing one of the source operands.
+/// \returns A 64-bit integer vector whose lower 16 bits contain the sums of the
+///    sets of absolute differences between both operands. The upper bits are
+///    cleared.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_sad_pu8(__m64 __a, __m64 __b)
 {
   return (__m64)__builtin_ia32_psadbw((__v8qi)__a, (__v8qi)__b);
 }
 
+/// \brief Returns the contents of the MXCSR register as a 32-bit unsigned
+///    integer value. There are several groups of macros associated with this
+///    intrinsic, including:
+///    * For checking exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO,
+///      _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW,
+///      _MM_EXCEPT_INEXACT. There is a convenience wrapper
+///      _MM_GET_EXCEPTION_STATE().
+///    * For checking exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW,
+///      _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT.
+///      There is a convenience wrapper _MM_GET_EXCEPTION_MASK().
+///    * For checking rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN,
+///      _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper
+///      _MM_GET_ROUNDING_MODE(x) where x is one of these macros.
+///    * For checking flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF.
+///      There is a convenience wrapper _MM_GET_FLUSH_ZERO_MODE().
+///    * For checking denormals-are-zero mode: _MM_DENORMALS_ZERO_ON,
+///      _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper
+///      _MM_GET_DENORMALS_ZERO_MODE().
+///
+///    For example, the expression below checks if an overflow exception has
+///    occurred:
+///      ( _mm_getcsr() & _MM_EXCEPT_OVERFLOW )
+///
+///    The following example gets the current rounding mode:
+///      _MM_GET_ROUNDING_MODE()
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSTMXCSR / STMXCSR instruction.
+///
+/// \returns A 32-bit unsigned integer containing the contents of the MXCSR
+///    register.
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
 _mm_getcsr(void)
 {
   return __builtin_ia32_stmxcsr();
 }
 
+/// \brief Sets the MXCSR register with the 32-bit unsigned integer value. There
+///    are several groups of macros associated with this intrinsic, including:
+///    * For setting exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO,
+///      _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW,
+///      _MM_EXCEPT_INEXACT. There is a convenience wrapper
+///      _MM_SET_EXCEPTION_STATE(x) where x is one of these macros.
+///    * For setting exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW,
+///      _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT.
+///      There is a convenience wrapper _MM_SET_EXCEPTION_MASK(x) where x is one
+///      of these macros.
+///    * For setting rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN,
+///      _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper
+///      _MM_SET_ROUNDING_MODE(x) where x is one of these macros.
+///    * For setting flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF.
+///      There is a convenience wrapper _MM_SET_FLUSH_ZERO_MODE(x) where x is
+///      one of these macros.
+///    * For setting denormals-are-zero mode: _MM_DENORMALS_ZERO_ON,
+///      _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper
+///      _MM_SET_DENORMALS_ZERO_MODE(x) where x is one of these macros.
+///
+///    For example, the following expression causes subsequent floating-point
+///    operations to round up:
+///      _mm_setcsr(_mm_getcsr() | _MM_ROUND_UP)
+///
+///    The following example sets the DAZ and FTZ flags:
+///      void setFlags() {
+///        _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON)
+///        _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON)
+///      }
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VLDMXCSR / LDMXCSR instruction.
+///
+/// \param __i
+///    A 32-bit unsigned integer value to be written to the MXCSR register.
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_setcsr(unsigned int __i)
 {
   __builtin_ia32_ldmxcsr(__i);
 }
 
+/// \brief Selects 4 float values from the 128-bit operands of [4 x float], as
+///    specified by the immediate value operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128 _mm_shuffle_ps(__m128 a, __m128 b, const int mask);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSHUFPS / SHUFPS instruction.
+///
+/// \param a
+///    A 128-bit vector of [4 x float].
+/// \param b
+///    A 128-bit vector of [4 x float].
+/// \param mask
+///    An immediate value containing an 8-bit value specifying which elements to
+///    copy from a and b.
+///    Bits [3:0] specify the values copied from operand a.
+///    Bits [7:4] specify the values copied from operand b. The destinations
+///    within the 128-bit destination are assigned values as follows:
+///    Bits [1:0] are used to assign values to bits [31:0] in the destination.
+///    Bits [3:2] are used to assign values to bits [63:32] in the destination.
+///    Bits [5:4] are used to assign values to bits [95:64] in the destination.
+///    Bits [7:6] are used to assign values to bits [127:96] in the destination.
+///    Bit value assignments:
+///    00: Bits [31:0] copied from the specified operand.
+///    01: Bits [63:32] copied from the specified operand.
+///    10: Bits [95:64] copied from the specified operand.
+///    11: Bits [127:96] copied from the specified operand.
+/// \returns A 128-bit vector of [4 x float] containing the shuffled values.
 #define _mm_shuffle_ps(a, b, mask) __extension__ ({ \
   (__m128)__builtin_shufflevector((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \
-                                  (mask) & 0x3, ((mask) & 0xc) >> 2, \
-                                  (((mask) & 0x30) >> 4) + 4, \
-                                  (((mask) & 0xc0) >> 6) + 4); })
+                                  0 + (((mask) >> 0) & 0x3), \
+                                  0 + (((mask) >> 2) & 0x3), \
+                                  4 + (((mask) >> 4) & 0x3), \
+                                  4 + (((mask) >> 6) & 0x3)); })
 
+/// \brief Unpacks the high-order (index 2,3) values from two 128-bit vectors of
+///    [4 x float] and interleaves them into a 128-bit vector of [4 x
+///    float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUNPCKHPS / UNPCKHPS instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+///    Bits [95:64] are written to bits [31:0] of the destination.
+///    Bits [127:96] are written to bits [95:64] of the destination.
+/// \param __b
+///    A 128-bit vector of [4 x float].
+///    Bits [95:64] are written to bits [63:32] of the destination.
+///    Bits [127:96] are written to bits [127:96] of the destination.
+/// \returns A 128-bit vector of [4 x float] containing the interleaved values.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_unpackhi_ps(__m128 __a, __m128 __b)
 {
-  return __builtin_shufflevector(__a, __b, 2, 6, 3, 7);
+  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 2, 6, 3, 7);
 }
 
+/// \brief Unpacks the low-order (index 0,1) values from two 128-bit vectors of
+///    [4 x float] and interleaves them into a 128-bit vector of [4 x
+///    float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUNPCKLPS / UNPCKLPS instruction.
+///
+/// \param __a
+///    A 128-bit vector of [4 x float].
+///    Bits [31:0] are written to bits [31:0] of the destination.
+///    Bits [63:32] are written to bits [95:64] of the destination.
+/// \param __b
+///    A 128-bit vector of [4 x float].
+///    Bits [31:0] are written to bits [63:32] of the destination.
+///    Bits [63:32] are written to bits [127:96] of the destination.
+/// \returns A 128-bit vector of [4 x float] containing the interleaved values.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_unpacklo_ps(__m128 __a, __m128 __b)
 {
-  return __builtin_shufflevector(__a, __b, 0, 4, 1, 5);
+  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 4, 1, 5);
 }
 
+/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower
+///    32 bits are set to the lower 32 bits of the second parameter. The upper
+///    96 bits are set to the upper 96 bits of the first parameter.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVSS / MOVSS instruction.
+///
+/// \param __a
+///    A 128-bit floating-point vector of [4 x float]. The upper 96 bits are
+///    written to the upper 96 bits of the result.
+/// \param __b
+///    A 128-bit floating-point vector of [4 x float]. The lower 32 bits are
+///    written to the lower 32 bits of the result.
+/// \returns A 128-bit floating-point vector of [4 x float].
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_move_ss(__m128 __a, __m128 __b)
 {
-  return __builtin_shufflevector(__a, __b, 4, 1, 2, 3);
+  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 4, 1, 2, 3);
 }
 
+/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower
+///    64 bits are set to the upper 64 bits of the second parameter. The upper
+///    64 bits are set to the upper 64 bits of the first parameter.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUNPCKHPD / UNPCKHPD instruction.
+///
+/// \param __a
+///    A 128-bit floating-point vector of [4 x float]. The upper 64 bits are
+///    written to the upper 64 bits of the result.
+/// \param __b
+///    A 128-bit floating-point vector of [4 x float]. The upper 64 bits are
+///    written to the lower 64 bits of the result.
+/// \returns A 128-bit floating-point vector of [4 x float].
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_movehl_ps(__m128 __a, __m128 __b)
 {
-  return __builtin_shufflevector(__a, __b, 6, 7, 2, 3);
+  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 6, 7, 2, 3);
 }
 
+/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower
+///    64 bits are set to the lower 64 bits of the first parameter. The upper
+///    64 bits are set to the lower 64 bits of the second parameter.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUNPCKLPD / UNPCKLPD instruction.
+///
+/// \param __a
+///    A 128-bit floating-point vector of [4 x float]. The lower 64 bits are
+///    written to the lower 64 bits of the result.
+/// \param __b
+///    A 128-bit floating-point vector of [4 x float]. The lower 64 bits are
+///    written to the upper 64 bits of the result.
+/// \returns A 128-bit floating-point vector of [4 x float].
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_movelh_ps(__m128 __a, __m128 __b)
 {
-  return __builtin_shufflevector(__a, __b, 0, 1, 4, 5);
+  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 1, 4, 5);
 }
 
+/// \brief Converts a 64-bit vector of [4 x i16] into a 128-bit vector of [4 x
+///    float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPI2PS + \c COMPOSITE instruction.
+///
+/// \param __a
+///    A 64-bit vector of [4 x i16]. The elements of the destination are copied
+///    from the corresponding elements in this operand.
+/// \returns A 128-bit vector of [4 x float] containing the copied and converted
+///    values from the operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cvtpi16_ps(__m64 __a)
 {
@@ -846,6 +2631,18 @@
   return __r;
 }
 
+/// \brief Converts a 64-bit vector of 16-bit unsigned integer values into a
+///    128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPI2PS + \c COMPOSITE instruction.
+///
+/// \param __a
+///    A 64-bit vector of 16-bit unsigned integer values. The elements of the
+///    destination are copied from the corresponding elements in this operand.
+/// \returns A 128-bit vector of [4 x float] containing the copied and converted
+///    values from the operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cvtpu16_ps(__m64 __a)
 {
@@ -863,6 +2660,18 @@
   return __r;
 }
 
+/// \brief Converts the lower four 8-bit values from a 64-bit vector of [8 x i8]
+///    into a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPI2PS + \c COMPOSITE instruction.
+///
+/// \param __a
+///    A 64-bit vector of [8 x i8]. The elements of the destination are copied
+///    from the corresponding lower 4 elements in this operand.
+/// \returns A 128-bit vector of [4 x float] containing the copied and converted
+///    values from the operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cvtpi8_ps(__m64 __a)
 {
@@ -875,6 +2684,19 @@
   return _mm_cvtpi16_ps(__b);
 }
 
+/// \brief Converts the lower four unsigned 8-bit integer values from a 64-bit
+///    vector of [8 x u8] into a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPI2PS + \c COMPOSITE instruction.
+///
+/// \param __a
+///    A 64-bit vector of unsigned 8-bit integer values. The elements of the
+///    destination are copied from the corresponding lower 4 elements in this
+///    operand.
+/// \returns A 128-bit vector of [4 x float] containing the copied and converted
+///    values from the source operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cvtpu8_ps(__m64 __a)
 {
@@ -886,6 +2708,22 @@
   return _mm_cvtpi16_ps(__b);
 }
 
+/// \brief Converts the two 32-bit signed integer values from each 64-bit vector
+///    operand of [2 x i32] into a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPI2PS + \c COMPOSITE instruction.
+///
+/// \param __a
+///    A 64-bit vector of [2 x i32]. The lower elements of the destination are
+///    copied from the elements in this operand.
+/// \param __b
+///    A 64-bit vector of [2 x i32]. The upper elements of the destination are
+///    copied from the elements in this operand.
+/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
+///    copied and converted values from the first operand. The upper 64 bits
+///    contain the copied and converted values from the second operand.
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_cvtpi32x2_ps(__m64 __a, __m64 __b)
 {
@@ -898,6 +2736,22 @@
   return _mm_cvtpi32_ps(__c, __a);
 }
 
+/// \brief Converts each single-precision floating-point element of a 128-bit
+///    floating-point vector of [4 x float] into a 16-bit signed integer, and
+///    packs the results into a 64-bit integer vector of [4 x i16]. If the
+///    floating-point element is NaN or infinity, or if the floating-point
+///    element is greater than 0x7FFFFFFF or less than -0x8000, it is converted
+///    to 0x8000. Otherwise if the floating-point element is greater
+///    than 0x7FFF, it is converted to 0x7FFF.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPS2PI + \c COMPOSITE instruction.
+///
+/// \param __a
+///    A 128-bit floating-point vector of [4 x float].
+/// \returns A 64-bit integer vector of [4 x i16] containing the converted
+///    values.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_cvtps_pi16(__m128 __a)
 {
@@ -910,6 +2764,23 @@
   return _mm_packs_pi32(__b, __c);
 }
 
+/// \brief Converts each single-precision floating-point element of a 128-bit
+///    floating-point vector of [4 x float] into an 8-bit signed integer, and
+///    packs the results into the lower 32 bits of a 64-bit integer vector of
+///    [8 x i8]. The upper 32 bits of the vector are set to 0. If the
+///    floating-point element is NaN or infinity, or if the floating-point
+///    element is greater than 0x7FFFFFFF or less than -0x80, it is converted
+///    to 0x80. Otherwise if the floating-point element is greater
+///    than 0x7F, it is converted to 0x7F.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPS2PI + \c COMPOSITE instruction.
+///
+/// \param __a
+///    128-bit floating-point vector of [4 x float].
+/// \returns A 64-bit integer vector of [8 x i8]. The lower 32 bits contain the
+///    converted values and the uppper 32 bits are set to zero.
 static __inline__ __m64 __DEFAULT_FN_ATTRS
 _mm_cvtps_pi8(__m128 __a)
 {
@@ -921,16 +2792,28 @@
   return _mm_packs_pi16(__b, __c);
 }
 
+/// \brief Extracts the sign bits from each single-precision floating-point
+///    element of a 128-bit floating-point vector of [4 x float] and returns the
+///    sign bits in bits [0:3] of the result. Bits [31:4] of the result are set
+///    to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVMSKPS / MOVMSKPS instruction.
+///
+/// \param __a
+///    A 128-bit floating-point vector of [4 x float].
+/// \returns A 32-bit integer value. Bits [3:0] contain the sign bits from each
+///    single-precision floating-point element of the parameter. Bits [31:4] are
+///    set to zero.
 static __inline__ int __DEFAULT_FN_ATTRS
 _mm_movemask_ps(__m128 __a)
 {
-  return __builtin_ia32_movmskps(__a);
+  return __builtin_ia32_movmskps((__v4sf)__a);
 }
 
 
-#ifdef _MSC_VER
-#define _MM_ALIGN16 __declspec(align(16))
-#endif
+#define _MM_ALIGN16 __attribute__((aligned(16)))
 
 #define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
 
@@ -1003,7 +2886,7 @@
 #undef __DEFAULT_FN_ATTRS
 
 /* Ugly hack for backwards-compatibility (compatible with gcc) */
-#if defined(__SSE2__) && !__has_feature(modules)
+#if defined(__SSE2__) && !__building_module(_Builtin_intrinsics)
 #include <emmintrin.h>
 #endif
 
diff --git a/renderscript/clang-include/xopintrin.h b/renderscript/clang-include/xopintrin.h
index f07f51c..bdf0cec 100644
--- a/renderscript/clang-include/xopintrin.h
+++ b/renderscript/clang-include/xopintrin.h
@@ -198,13 +198,13 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C)
 {
-  return (__m128i)__builtin_ia32_vpcmov(__A, __B, __C);
+  return (__m128i)__builtin_ia32_vpcmov((__v2di)__A, (__v2di)__B, (__v2di)__C);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS
 _mm256_cmov_si256(__m256i __A, __m256i __B, __m256i __C)
 {
-  return (__m256i)__builtin_ia32_vpcmov_256(__A, __B, __C);
+  return (__m256i)__builtin_ia32_vpcmov_256((__v4di)__A, (__v4di)__B, (__v4di)__C);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
diff --git a/renderscript/include/rs_math.rsh b/renderscript/include/rs_math.rsh
index 849b52e..3d034d0 100644
--- a/renderscript/include/rs_math.rsh
+++ b/renderscript/include/rs_math.rsh
@@ -58,22 +58,14 @@
  *
  * The inverse of pi, as a 32 bit float.
  */
-#if (defined(RS_VERSION) && (RS_VERSION >= UNRELEASED))
-extern const float M_1_PI;
-#else
 #define M_1_PI 0.318309886183790671537767526745028724f
-#endif
 
 /*
  * M_2_PI: 2 / pi, as a 32 bit float
  *
  * 2 divided by pi, as a 32 bit float.
  */
-#if (defined(RS_VERSION) && (RS_VERSION >= UNRELEASED))
-extern const float M_2_PI;
-#else
 #define M_2_PI 0.636619772367581343075535053490057448f
-#endif
 
 /*
  * M_2_PIl: 2 / pi, as a 32 bit float
@@ -82,132 +74,84 @@
  *
  * 2 divided by pi, as a 32 bit float.
  */
-#if (defined(RS_VERSION) && (RS_VERSION >= UNRELEASED))
-extern const float M_2_PIl;
-#else
 #define M_2_PIl 0.636619772367581343075535053490057448f
-#endif
 
 /*
  * M_2_SQRTPI: 2 / sqrt(pi), as a 32 bit float
  *
  * 2 divided by the square root of pi, as a 32 bit float.
  */
-#if (defined(RS_VERSION) && (RS_VERSION >= UNRELEASED))
-extern const float M_2_SQRTPI;
-#else
 #define M_2_SQRTPI 1.128379167095512573896158903121545172f
-#endif
 
 /*
  * M_E: e, as a 32 bit float
  *
  * The number e, the base of the natural logarithm, as a 32 bit float.
  */
-#if (defined(RS_VERSION) && (RS_VERSION >= UNRELEASED))
-extern const float M_E;
-#else
 #define M_E 2.718281828459045235360287471352662498f
-#endif
 
 /*
  * M_LN10: log_e(10), as a 32 bit float
  *
  * The natural logarithm of 10, as a 32 bit float.
  */
-#if (defined(RS_VERSION) && (RS_VERSION >= UNRELEASED))
-extern const float M_LN10;
-#else
 #define M_LN10 2.302585092994045684017991454684364208f
-#endif
 
 /*
  * M_LN2: log_e(2), as a 32 bit float
  *
  * The natural logarithm of 2, as a 32 bit float.
  */
-#if (defined(RS_VERSION) && (RS_VERSION >= UNRELEASED))
-extern const float M_LN2;
-#else
 #define M_LN2 0.693147180559945309417232121458176568f
-#endif
 
 /*
  * M_LOG10E: log_10(e), as a 32 bit float
  *
  * The logarithm base 10 of e, as a 32 bit float.
  */
-#if (defined(RS_VERSION) && (RS_VERSION >= UNRELEASED))
-extern const float M_LOG10E;
-#else
 #define M_LOG10E 0.434294481903251827651128918916605082f
-#endif
 
 /*
  * M_LOG2E: log_2(e), as a 32 bit float
  *
  * The logarithm base 2 of e, as a 32 bit float.
  */
-#if (defined(RS_VERSION) && (RS_VERSION >= UNRELEASED))
-extern const float M_LOG2E;
-#else
 #define M_LOG2E 1.442695040888963407359924681001892137f
-#endif
 
 /*
  * M_PI: pi, as a 32 bit float
  *
  * The constant pi, as a 32 bit float.
  */
-#if (defined(RS_VERSION) && (RS_VERSION >= UNRELEASED))
-extern const float M_PI;
-#else
 #define M_PI 3.141592653589793238462643383279502884f
-#endif
 
 /*
  * M_PI_2: pi / 2, as a 32 bit float
  *
  * Pi divided by 2, as a 32 bit float.
  */
-#if (defined(RS_VERSION) && (RS_VERSION >= UNRELEASED))
-extern const float M_PI_2;
-#else
 #define M_PI_2 1.570796326794896619231321691639751442f
-#endif
 
 /*
  * M_PI_4: pi / 4, as a 32 bit float
  *
  * Pi divided by 4, as a 32 bit float.
  */
-#if (defined(RS_VERSION) && (RS_VERSION >= UNRELEASED))
-extern const float M_PI_4;
-#else
 #define M_PI_4 0.785398163397448309615660845819875721f
-#endif
 
 /*
  * M_SQRT1_2: 1 / sqrt(2), as a 32 bit float
  *
  * The inverse of the square root of 2, as a 32 bit float.
  */
-#if (defined(RS_VERSION) && (RS_VERSION >= UNRELEASED))
-extern const float M_SQRT1_2;
-#else
 #define M_SQRT1_2 0.707106781186547524400844362104849039f
-#endif
 
 /*
  * M_SQRT2: sqrt(2), as a 32 bit float
  *
  * The square root of 2, as a 32 bit float.
  */
-#if (defined(RS_VERSION) && (RS_VERSION >= UNRELEASED))
-extern const float M_SQRT2;
-#else
 #define M_SQRT2 1.414213562373095048801688724209698079f
-#endif
 
 /*
  * abs: Absolute value of an integer
diff --git a/renderscript/lib/arm/libRSSupport.so b/renderscript/lib/arm/libRSSupport.so
index 0548eb6..4cd7040 100755
--- a/renderscript/lib/arm/libRSSupport.so
+++ b/renderscript/lib/arm/libRSSupport.so
Binary files differ
diff --git a/renderscript/lib/arm/libRSSupportIO.so b/renderscript/lib/arm/libRSSupportIO.so
index dd30e36..3359c58 100755
--- a/renderscript/lib/arm/libRSSupportIO.so
+++ b/renderscript/lib/arm/libRSSupportIO.so
Binary files differ
diff --git a/renderscript/lib/arm/libblasV8.so b/renderscript/lib/arm/libblasV8.so
index 53545a8..60b43b4 100644
--- a/renderscript/lib/arm/libblasV8.so
+++ b/renderscript/lib/arm/libblasV8.so
Binary files differ
diff --git a/renderscript/lib/arm/libc.so b/renderscript/lib/arm/libc.so
index 409713c..dc66bc0 100755
--- a/renderscript/lib/arm/libc.so
+++ b/renderscript/lib/arm/libc.so
Binary files differ
diff --git a/renderscript/lib/arm/libclcore.bc b/renderscript/lib/arm/libclcore.bc
index c4000db..ee0429a 100644
--- a/renderscript/lib/arm/libclcore.bc
+++ b/renderscript/lib/arm/libclcore.bc
Binary files differ
diff --git a/renderscript/lib/arm/libcompiler_rt.a b/renderscript/lib/arm/libcompiler_rt.a
index 304efe2..b34472d 100644
--- a/renderscript/lib/arm/libcompiler_rt.a
+++ b/renderscript/lib/arm/libcompiler_rt.a
Binary files differ
diff --git a/renderscript/lib/arm/libm.so b/renderscript/lib/arm/libm.so
index f61012f..8f3733d 100755
--- a/renderscript/lib/arm/libm.so
+++ b/renderscript/lib/arm/libm.so
Binary files differ
diff --git a/renderscript/lib/arm/librsjni.so b/renderscript/lib/arm/librsjni.so
index e65b58a..4878227 100755
--- a/renderscript/lib/arm/librsjni.so
+++ b/renderscript/lib/arm/librsjni.so
Binary files differ
diff --git a/renderscript/lib/arm/librsrt_arm.bc b/renderscript/lib/arm/librsrt_arm.bc
index c4000db..ee0429a 100644
--- a/renderscript/lib/arm/librsrt_arm.bc
+++ b/renderscript/lib/arm/librsrt_arm.bc
Binary files differ
diff --git a/renderscript/lib/arm64/libRSSupport.so b/renderscript/lib/arm64/libRSSupport.so
index c38bb46..e2933aa 100755
--- a/renderscript/lib/arm64/libRSSupport.so
+++ b/renderscript/lib/arm64/libRSSupport.so
Binary files differ
diff --git a/renderscript/lib/arm64/libRSSupportIO.so b/renderscript/lib/arm64/libRSSupportIO.so
index 140c8df..3ee7aa0 100755
--- a/renderscript/lib/arm64/libRSSupportIO.so
+++ b/renderscript/lib/arm64/libRSSupportIO.so
Binary files differ
diff --git a/renderscript/lib/arm64/libblasV8.so b/renderscript/lib/arm64/libblasV8.so
index 4f836b0..299f9d7 100644
--- a/renderscript/lib/arm64/libblasV8.so
+++ b/renderscript/lib/arm64/libblasV8.so
Binary files differ
diff --git a/renderscript/lib/arm64/libc.so b/renderscript/lib/arm64/libc.so
index 52d3257..d0342f8 100755
--- a/renderscript/lib/arm64/libc.so
+++ b/renderscript/lib/arm64/libc.so
Binary files differ
diff --git a/renderscript/lib/arm64/libclcore.bc b/renderscript/lib/arm64/libclcore.bc
index e54a506..0464fd6 100644
--- a/renderscript/lib/arm64/libclcore.bc
+++ b/renderscript/lib/arm64/libclcore.bc
Binary files differ
diff --git a/renderscript/lib/arm64/libcompiler_rt.a b/renderscript/lib/arm64/libcompiler_rt.a
index 2103acd..ed45201 100644
--- a/renderscript/lib/arm64/libcompiler_rt.a
+++ b/renderscript/lib/arm64/libcompiler_rt.a
Binary files differ
diff --git a/renderscript/lib/arm64/libm.so b/renderscript/lib/arm64/libm.so
index a176294..9dd4cf5 100755
--- a/renderscript/lib/arm64/libm.so
+++ b/renderscript/lib/arm64/libm.so
Binary files differ
diff --git a/renderscript/lib/arm64/librsjni.so b/renderscript/lib/arm64/librsjni.so
index 682060a..d8284d3 100755
--- a/renderscript/lib/arm64/librsjni.so
+++ b/renderscript/lib/arm64/librsjni.so
Binary files differ
diff --git a/renderscript/lib/arm64/librsrt_arm64.bc b/renderscript/lib/arm64/librsrt_arm64.bc
index 8c002e2..0a1d282 100644
--- a/renderscript/lib/arm64/librsrt_arm64.bc
+++ b/renderscript/lib/arm64/librsrt_arm64.bc
Binary files differ
diff --git a/renderscript/lib/javalib.jar b/renderscript/lib/javalib.jar
index abda0ca..399c9ac 100644
--- a/renderscript/lib/javalib.jar
+++ b/renderscript/lib/javalib.jar
Binary files differ
diff --git a/renderscript/lib/mips/libRSSupport.so b/renderscript/lib/mips/libRSSupport.so
index 6e9bc10..2d810dd 100755
--- a/renderscript/lib/mips/libRSSupport.so
+++ b/renderscript/lib/mips/libRSSupport.so
Binary files differ
diff --git a/renderscript/lib/mips/libRSSupportIO.so b/renderscript/lib/mips/libRSSupportIO.so
index 7af7e48..b90724c 100755
--- a/renderscript/lib/mips/libRSSupportIO.so
+++ b/renderscript/lib/mips/libRSSupportIO.so
Binary files differ
diff --git a/renderscript/lib/mips/libblasV8.so b/renderscript/lib/mips/libblasV8.so
index b369296..6f322af 100644
--- a/renderscript/lib/mips/libblasV8.so
+++ b/renderscript/lib/mips/libblasV8.so
Binary files differ
diff --git a/renderscript/lib/mips/libc.so b/renderscript/lib/mips/libc.so
index 7d384c1..76bd8f7 100755
--- a/renderscript/lib/mips/libc.so
+++ b/renderscript/lib/mips/libc.so
Binary files differ
diff --git a/renderscript/lib/mips/libclcore.bc b/renderscript/lib/mips/libclcore.bc
index c4000db..ee0429a 100644
--- a/renderscript/lib/mips/libclcore.bc
+++ b/renderscript/lib/mips/libclcore.bc
Binary files differ
diff --git a/renderscript/lib/mips/libcompiler_rt.a b/renderscript/lib/mips/libcompiler_rt.a
index f4ad753..231f03f 100644
--- a/renderscript/lib/mips/libcompiler_rt.a
+++ b/renderscript/lib/mips/libcompiler_rt.a
Binary files differ
diff --git a/renderscript/lib/mips/libm.so b/renderscript/lib/mips/libm.so
index 80c348f..b895280 100755
--- a/renderscript/lib/mips/libm.so
+++ b/renderscript/lib/mips/libm.so
Binary files differ
diff --git a/renderscript/lib/mips/librsjni.so b/renderscript/lib/mips/librsjni.so
index 7d062be..cc65d06 100755
--- a/renderscript/lib/mips/librsjni.so
+++ b/renderscript/lib/mips/librsjni.so
Binary files differ
diff --git a/renderscript/lib/mips/librsrt_mips.bc b/renderscript/lib/mips/librsrt_mips.bc
index c4000db..ee0429a 100644
--- a/renderscript/lib/mips/librsrt_mips.bc
+++ b/renderscript/lib/mips/librsrt_mips.bc
Binary files differ
diff --git a/renderscript/lib/x86/libRSSupport.so b/renderscript/lib/x86/libRSSupport.so
index 11e79cf..d776c2f 100755
--- a/renderscript/lib/x86/libRSSupport.so
+++ b/renderscript/lib/x86/libRSSupport.so
Binary files differ
diff --git a/renderscript/lib/x86/libRSSupportIO.so b/renderscript/lib/x86/libRSSupportIO.so
index e06b763..4f53b56 100755
--- a/renderscript/lib/x86/libRSSupportIO.so
+++ b/renderscript/lib/x86/libRSSupportIO.so
Binary files differ
diff --git a/renderscript/lib/x86/libblasV8.so b/renderscript/lib/x86/libblasV8.so
index 83ece9e..f987898 100644
--- a/renderscript/lib/x86/libblasV8.so
+++ b/renderscript/lib/x86/libblasV8.so
Binary files differ
diff --git a/renderscript/lib/x86/libc.so b/renderscript/lib/x86/libc.so
index a64d2d7..e4744fb 100755
--- a/renderscript/lib/x86/libc.so
+++ b/renderscript/lib/x86/libc.so
Binary files differ
diff --git a/renderscript/lib/x86/libclcore.bc b/renderscript/lib/x86/libclcore.bc
index c4000db..ee0429a 100644
--- a/renderscript/lib/x86/libclcore.bc
+++ b/renderscript/lib/x86/libclcore.bc
Binary files differ
diff --git a/renderscript/lib/x86/libcompiler_rt.a b/renderscript/lib/x86/libcompiler_rt.a
index 69081e4..1d05f47 100644
--- a/renderscript/lib/x86/libcompiler_rt.a
+++ b/renderscript/lib/x86/libcompiler_rt.a
Binary files differ
diff --git a/renderscript/lib/x86/libm.so b/renderscript/lib/x86/libm.so
index 777e7a7..71b5529 100755
--- a/renderscript/lib/x86/libm.so
+++ b/renderscript/lib/x86/libm.so
Binary files differ
diff --git a/renderscript/lib/x86/librsjni.so b/renderscript/lib/x86/librsjni.so
index c074aba..933a666 100755
--- a/renderscript/lib/x86/librsjni.so
+++ b/renderscript/lib/x86/librsjni.so
Binary files differ
diff --git a/renderscript/lib/x86/librsrt_x86.bc b/renderscript/lib/x86/librsrt_x86.bc
index 46ca71c..dc3bda4 100644
--- a/renderscript/lib/x86/librsrt_x86.bc
+++ b/renderscript/lib/x86/librsrt_x86.bc
Binary files differ
diff --git a/renderscript/lib/x86_64/libRSSupport.so b/renderscript/lib/x86_64/libRSSupport.so
index beef4ab..7d6994d 100755
--- a/renderscript/lib/x86_64/libRSSupport.so
+++ b/renderscript/lib/x86_64/libRSSupport.so
Binary files differ
diff --git a/renderscript/lib/x86_64/libRSSupportIO.so b/renderscript/lib/x86_64/libRSSupportIO.so
index c10a6bf..52a1883 100755
--- a/renderscript/lib/x86_64/libRSSupportIO.so
+++ b/renderscript/lib/x86_64/libRSSupportIO.so
Binary files differ
diff --git a/renderscript/lib/x86_64/libblasV8.so b/renderscript/lib/x86_64/libblasV8.so
index 9a8b53c..e4de1d7 100644
--- a/renderscript/lib/x86_64/libblasV8.so
+++ b/renderscript/lib/x86_64/libblasV8.so
Binary files differ
diff --git a/renderscript/lib/x86_64/libc.so b/renderscript/lib/x86_64/libc.so
index a0606de..aa72d09 100755
--- a/renderscript/lib/x86_64/libc.so
+++ b/renderscript/lib/x86_64/libc.so
Binary files differ
diff --git a/renderscript/lib/x86_64/libclcore.bc b/renderscript/lib/x86_64/libclcore.bc
index 8c002e2..0a1d282 100644
--- a/renderscript/lib/x86_64/libclcore.bc
+++ b/renderscript/lib/x86_64/libclcore.bc
Binary files differ
diff --git a/renderscript/lib/x86_64/libcompiler_rt.a b/renderscript/lib/x86_64/libcompiler_rt.a
index 2d5d7b0..66bf3fd 100644
--- a/renderscript/lib/x86_64/libcompiler_rt.a
+++ b/renderscript/lib/x86_64/libcompiler_rt.a
Binary files differ
diff --git a/renderscript/lib/x86_64/libm.so b/renderscript/lib/x86_64/libm.so
index 0b330ed..10bb5fd 100755
--- a/renderscript/lib/x86_64/libm.so
+++ b/renderscript/lib/x86_64/libm.so
Binary files differ
diff --git a/renderscript/lib/x86_64/librsjni.so b/renderscript/lib/x86_64/librsjni.so
index 8247737..7e4c932 100755
--- a/renderscript/lib/x86_64/librsjni.so
+++ b/renderscript/lib/x86_64/librsjni.so
Binary files differ
diff --git a/renderscript/lib/x86_64/librsrt_x86_64.bc b/renderscript/lib/x86_64/librsrt_x86_64.bc
index 7bb8548..239bb97 100644
--- a/renderscript/lib/x86_64/librsrt_x86_64.bc
+++ b/renderscript/lib/x86_64/librsrt_x86_64.bc
Binary files differ
diff --git a/tools/linux/bin/bcc_compat b/tools/linux/bin/bcc_compat
index 6804681..17ebb17 100755
--- a/tools/linux/bin/bcc_compat
+++ b/tools/linux/bin/bcc_compat
Binary files differ
diff --git a/tools/linux/bin/llvm-rs-cc b/tools/linux/bin/llvm-rs-cc
index 87ea63a..f4f46df 100755
--- a/tools/linux/bin/llvm-rs-cc
+++ b/tools/linux/bin/llvm-rs-cc
Binary files differ
diff --git a/tools/linux/lib/libc++.so b/tools/linux/lib/libc++.so
index 98f4090..430f183 100755
--- a/tools/linux/lib/libc++.so
+++ b/tools/linux/lib/libc++.so
Binary files differ
diff --git a/tools/linux/lib64/libLLVM.so b/tools/linux/lib64/libLLVM.so
index 31c6dca..11de592 100755
--- a/tools/linux/lib64/libLLVM.so
+++ b/tools/linux/lib64/libLLVM.so
Binary files differ
diff --git a/tools/linux/lib64/libbcc.so b/tools/linux/lib64/libbcc.so
index 7d43b0b..d654840 100755
--- a/tools/linux/lib64/libbcc.so
+++ b/tools/linux/lib64/libbcc.so
Binary files differ
diff --git a/tools/linux/lib64/libbcinfo.so b/tools/linux/lib64/libbcinfo.so
index 0348f1e..273e9e1 100755
--- a/tools/linux/lib64/libbcinfo.so
+++ b/tools/linux/lib64/libbcinfo.so
Binary files differ
diff --git a/tools/linux/lib64/libc++.so b/tools/linux/lib64/libc++.so
index 964915f..5361b92 100755
--- a/tools/linux/lib64/libc++.so
+++ b/tools/linux/lib64/libc++.so
Binary files differ
diff --git a/tools/linux/lib64/libclang.so b/tools/linux/lib64/libclang.so
index 41af6ad..219783a 100755
--- a/tools/linux/lib64/libclang.so
+++ b/tools/linux/lib64/libclang.so
Binary files differ