Add missing builtins to altivec.h for ABI compliance (vol. 2)

This patch corresponds to review:
http://reviews.llvm.org/D10875

The bulk of the second round of additions to altivec.h.
The following interfaces were added:
vector double vec_floor(vector double)
vector double vec_madd(vector double, vector double, vector double)
vector float vec_msub(vector float, vector float, vector float)
vector double vec_msub(vector double, vector double, vector double)
vector float vec_mul(vector float, vector float)
vector double vec_mul(vector double, vector double)
vector float vec_nmadd(vector float, vector float, vector float)
vector double vec_nmadd(vector double, vector double, vector double)
vector double vec_nmsub(vector double, vector double, vector double)
vector double vec_nor(vector double, vector double)
vector double vec_or(vector double, vector double)
vector float vec_rint(vector float)
vector double vec_rint(vector double)
vector float vec_nearbyint(vector float)
vector double vec_nearbyint(vector double)
vector float vec_sqrt(vector float)
vector double vec_sqrt(vector double)
vector double vec_rsqrte(vector double)
vector double vec_sel(vector double, vector double, vector unsigned long long)
vector double vec_sel(vector double, vector double, vector unsigned long long)
vector double vec_sub(vector double, vector double)
vector double vec_trunc(vector double)
vector double vec_xor(vector double, vector double)
vector double vec_xor(vector double, vector bool long long)
vector double vec_xor(vector bool long long, vector double)

New VSX paths for the following interfaces:
vector float vec_madd(vector float, vector float, vector float)
vector float vec_nmsub(vector float, vector float, vector float)
vector float vec_rsqrte(vector float)
vector float vec_trunc(vector float)
vector float vec_floor(vector float)

llvm-svn: 241399
diff --git a/clang/lib/Headers/altivec.h b/clang/lib/Headers/altivec.h
index 2c80e24..61ad320 100644
--- a/clang/lib/Headers/altivec.h
+++ b/clang/lib/Headers/altivec.h
@@ -1857,11 +1857,20 @@
 
 /* vec_floor */
 
-static vector float __attribute__((__always_inline__))
-vec_floor(vector float __a) {
+static vector float __ATTRS_o_ai vec_floor(vector float __a) {
+#ifdef __VSX__
+  return __builtin_vsx_xvrspim(__a);
+#else
   return __builtin_altivec_vrfim(__a);
+#endif
 }
 
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_floor(vector double __a) {
+  return __builtin_vsx_xvrdpim(__a);
+}
+#endif
+
 /* vec_vrfim */
 
 static vector float __attribute__((__always_inline__))
@@ -2532,11 +2541,22 @@
 
 /* vec_madd */
 
-static vector float __attribute__((__always_inline__))
+static vector float __ATTRS_o_ai
 vec_madd(vector float __a, vector float __b, vector float __c) {
+#ifdef __VSX__
+  return __builtin_vsx_xvmaddasp(__a, __b, __c);
+#else
   return __builtin_altivec_vmaddfp(__a, __b, __c);
+#endif
 }
 
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_madd(vector double __a, vector double __b, vector double __c) {
+  return __builtin_vsx_xvmaddadp(__a, __b, __c);
+}
+#endif
+
 /* vec_vmaddfp */
 
 static vector float __attribute__((__always_inline__))
@@ -2559,6 +2579,20 @@
   return __builtin_altivec_vmhaddshs(__a, __b, __c);
 }
 
+/* vec_msub */
+
+#ifdef __VSX__
+static vector float __ATTRS_o_ai
+vec_msub(vector float __a, vector float __b, vector float __c) {
+  return __builtin_vsx_xvmsubasp(__a, __b, __c);
+}
+
+static vector double __ATTRS_o_ai
+vec_msub(vector double __a, vector double __b, vector double __c) {
+  return __builtin_vsx_xvmsubadp(__a, __b, __c);
+}
+#endif
+
 /* vec_max */
 
 static vector signed char __ATTRS_o_ai vec_max(vector signed char __a,
@@ -3677,6 +3711,18 @@
   __builtin_altivec_mtvscr((vector int)__a);
 }
 
+/* vec_mul */
+static vector float __ATTRS_o_ai vec_mul(vector float __a, vector float __b) {
+  __a * __b;
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_mul(vector double __a, vector double __b) {
+  __a * __b;
+}
+#endif
+
 /* The vmulos* and vmules* instructions have a big endian bias, so
    we must reverse the meaning of "even" and "odd" for little endian.  */
 
@@ -3882,13 +3928,38 @@
 #endif
 }
 
+/* vec_nmadd */
+
+#ifdef __VSX__
+static vector float __ATTRS_o_ai
+vec_nmadd(vector float __a, vector float __b, vector float __c) {
+  return __builtin_vsx_xvnmaddasp(__a, __b, __c);
+}
+
+static vector double __ATTRS_o_ai
+vec_nmadd(vector double __a, vector double __b, vector double __c) {
+  return __builtin_vsx_xvnmaddadp(__a, __b, __c);
+}
+#endif
+
 /* vec_nmsub */
 
-static vector float __attribute__((__always_inline__))
+static vector float __ATTRS_o_ai
 vec_nmsub(vector float __a, vector float __b, vector float __c) {
+#ifdef __VSX__
+  return __builtin_vsx_xvnmsubasp(__a, __b, __c);
+#else
   return __builtin_altivec_vnmsubfp(__a, __b, __c);
+#endif
 }
 
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_nmsub(vector double __a, vector double __b, vector double __c) {
+  return __builtin_vsx_xvnmsubadp(__a, __b, __c);
+}
+#endif
+
 /* vec_vnmsubfp */
 
 static vector float __attribute__((__always_inline__))
@@ -3949,6 +4020,15 @@
   return (vector float)__res;
 }
 
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_nor(vector double __a, vector double __b) {
+  vector unsigned long long __res =
+      ~((vector unsigned long long)__a | (vector unsigned long long)__b);
+  return (vector double)__res;
+}
+#endif
+
 /* vec_vnor */
 
 static vector signed char __ATTRS_o_ai vec_vnor(vector signed char __a,
@@ -4141,6 +4221,12 @@
 }
 
 #ifdef __VSX__
+static vector double __ATTRS_o_ai vec_or(vector double __a, vector double __b) {
+  vector unsigned long long __res =
+      (vector unsigned long long)__a | (vector unsigned long long)__b;
+  return (vector double)__res;
+}
+
 static vector signed long long __ATTRS_o_ai
 vec_or(vector signed long long __a, vector signed long long __b) {
   return __a | __b;
@@ -5237,6 +5323,30 @@
   return __builtin_altivec_vrfin(__a);
 }
 
+#ifdef __VSX__
+/* vec_rint */
+
+static vector float __ATTRS_o_ai
+vec_rint(vector float __a) {
+  return __builtin_vsx_xvrspic(__a);
+}
+
+static vector double __ATTRS_o_ai
+vec_rint(vector double __a) {
+  return __builtin_vsx_xvrdpic(__a);
+}
+
+/* vec_nearbyint */
+
+static vector float __ATTRS_o_ai vec_nearbyint(vector float __a) {
+  return __builtin_vsx_xvrspi(__a);
+}
+
+static vector double __ATTRS_o_ai vec_nearbyint(vector double __a) {
+  return __builtin_vsx_xvrdpi(__a);
+}
+#endif
+
 /* vec_vrfin */
 
 static vector float __attribute__((__always_inline__))
@@ -5244,13 +5354,35 @@
   return __builtin_altivec_vrfin(__a);
 }
 
+/* vec_sqrt */
+
+#ifdef __VSX__
+static vector float __ATTRS_o_ai vec_sqrt(vector float __a) {
+  return __builtin_vsx_xvsqrtsp(__a);
+}
+
+static vector double __ATTRS_o_ai vec_sqrt(vector double __a) {
+  return __builtin_vsx_xvsqrtdp(__a);
+}
+#endif
+
 /* vec_rsqrte */
 
-static __vector float __attribute__((__always_inline__))
+static vector float __ATTRS_o_ai
 vec_rsqrte(vector float __a) {
+#ifdef __VSX__
+  return __builtin_vsx_xvrsqrtesp(__a);
+#else
   return __builtin_altivec_vrsqrtefp(__a);
+#endif
 }
 
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_rsqrte(vector double __a) {
+  return __builtin_vsx_xvrsqrtedp(__a);
+}
+#endif
+
 /* vec_vrsqrtefp */
 
 static __vector float __attribute__((__always_inline__))
@@ -5381,6 +5513,22 @@
   return (vector float)__res;
 }
 
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_sel(vector double __a, vector double __b,
+                                          vector bool long long __c) {
+  vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
+                     ((vector long long)__b & (vector long long)__c);
+  return (vector double)__res;
+}
+
+static vector double __ATTRS_o_ai vec_sel(vector double __a, vector double __b,
+                                          vector unsigned long long __c) {
+  vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
+                     ((vector long long)__b & (vector long long)__c);
+  return (vector double)__res;
+}
+#endif
+
 /* vec_vsel */
 
 static vector signed char __ATTRS_o_ai vec_vsel(vector signed char __a,
@@ -7960,6 +8108,13 @@
   return __a - __b;
 }
 
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_sub(vector double __a, vector double __b) {
+  return __a - __b;
+}
+#endif
+
 /* vec_vsububm */
 
 #define __builtin_altivec_vsububm vec_vsububm
@@ -8451,11 +8606,21 @@
 
 /* vec_trunc */
 
-static vector float __attribute__((__always_inline__))
+static vector float __ATTRS_o_ai
 vec_trunc(vector float __a) {
+#ifdef __VSX__
+  return __builtin_vsx_xvrspiz(__a);
+#else
   return __builtin_altivec_vrfiz(__a);
+#endif
 }
 
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_trunc(vector double __a) {
+  return __builtin_vsx_xvrdpiz(__a);
+}
+#endif
+
 /* vec_vrfiz */
 
 static vector float __attribute__((__always_inline__))
@@ -8945,6 +9110,24 @@
                                                   vector bool long long __b) {
   return __a ^ __b;
 }
+
+static vector double __ATTRS_o_ai
+vec_xor(vector double __a, vector double __b) {
+  return (vector double)((vector unsigned long long)__a ^
+                          (vector unsigned long long)__b);
+}
+
+static vector double __ATTRS_o_ai
+vec_xor(vector double __a, vector bool long long __b) {
+  return (vector double)((vector unsigned long long)__a ^
+                         (vector unsigned long long) __b);
+}
+
+static vector double __ATTRS_o_ai
+vec_xor(vector bool long long __a, vector double __b) {
+  return (vector double)((vector unsigned long long)__a ^
+                         (vector unsigned long long)__b);
+}
 #endif
 
 /* vec_vxor */