[AArch64]Add missing pair intrinsics such as:
    int32_t vminv_s32(int32x2_t a) 
which should be compiled into SMINP Vd.2S,Vn.2S,Vm.2S

llvm-svn: 196750
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 7726ad3..0a83d99 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -2026,6 +2026,7 @@
   // The followings are intrinsics with scalar results generated AcrossVec vectors
   case AArch64::BI__builtin_neon_vaddlv_s8:
   case AArch64::BI__builtin_neon_vaddlv_s16:
+  case AArch64::BI__builtin_neon_vaddlv_s32:
   case AArch64::BI__builtin_neon_vaddlvq_s8:
   case AArch64::BI__builtin_neon_vaddlvq_s16:
   case AArch64::BI__builtin_neon_vaddlvq_s32:
@@ -2033,6 +2034,7 @@
     AcrossVec = true; ExtendEle = true; s = "saddlv"; break;
   case AArch64::BI__builtin_neon_vaddlv_u8:
   case AArch64::BI__builtin_neon_vaddlv_u16:
+  case AArch64::BI__builtin_neon_vaddlv_u32:
   case AArch64::BI__builtin_neon_vaddlvq_u8:
   case AArch64::BI__builtin_neon_vaddlvq_u16:
   case AArch64::BI__builtin_neon_vaddlvq_u32:
@@ -2040,6 +2042,7 @@
     AcrossVec = true; ExtendEle = true; s = "uaddlv"; break;
   case AArch64::BI__builtin_neon_vmaxv_s8:
   case AArch64::BI__builtin_neon_vmaxv_s16:
+  case AArch64::BI__builtin_neon_vmaxv_s32:
   case AArch64::BI__builtin_neon_vmaxvq_s8:
   case AArch64::BI__builtin_neon_vmaxvq_s16:
   case AArch64::BI__builtin_neon_vmaxvq_s32:
@@ -2047,6 +2050,7 @@
     AcrossVec = true; ExtendEle = false; s = "smaxv"; break;
   case AArch64::BI__builtin_neon_vmaxv_u8:
   case AArch64::BI__builtin_neon_vmaxv_u16:
+  case AArch64::BI__builtin_neon_vmaxv_u32:
   case AArch64::BI__builtin_neon_vmaxvq_u8:
   case AArch64::BI__builtin_neon_vmaxvq_u16:
   case AArch64::BI__builtin_neon_vmaxvq_u32:
@@ -2054,6 +2058,7 @@
     AcrossVec = true; ExtendEle = false; s = "umaxv"; break;
   case AArch64::BI__builtin_neon_vminv_s8:
   case AArch64::BI__builtin_neon_vminv_s16:
+  case AArch64::BI__builtin_neon_vminv_s32:
   case AArch64::BI__builtin_neon_vminvq_s8:
   case AArch64::BI__builtin_neon_vminvq_s16:
   case AArch64::BI__builtin_neon_vminvq_s32:
@@ -2061,6 +2066,7 @@
     AcrossVec = true; ExtendEle = false; s = "sminv"; break;
   case AArch64::BI__builtin_neon_vminv_u8:
   case AArch64::BI__builtin_neon_vminv_u16:
+  case AArch64::BI__builtin_neon_vminv_u32:
   case AArch64::BI__builtin_neon_vminvq_u8:
   case AArch64::BI__builtin_neon_vminvq_u16:
   case AArch64::BI__builtin_neon_vminvq_u32:
@@ -2068,12 +2074,14 @@
     AcrossVec = true; ExtendEle = false; s = "uminv"; break;
   case AArch64::BI__builtin_neon_vaddv_s8:
   case AArch64::BI__builtin_neon_vaddv_s16:
+  case AArch64::BI__builtin_neon_vaddv_s32:
   case AArch64::BI__builtin_neon_vaddvq_s8:
   case AArch64::BI__builtin_neon_vaddvq_s16:
   case AArch64::BI__builtin_neon_vaddvq_s32:
   case AArch64::BI__builtin_neon_vaddvq_s64:
   case AArch64::BI__builtin_neon_vaddv_u8:
   case AArch64::BI__builtin_neon_vaddv_u16:
+  case AArch64::BI__builtin_neon_vaddv_u32:
   case AArch64::BI__builtin_neon_vaddvq_u8:
   case AArch64::BI__builtin_neon_vaddvq_u16:
   case AArch64::BI__builtin_neon_vaddvq_u32: