[PowerPC] Add v2i64 as a legal VSX type

v2i64 needs to be a legal VSX type because it is the SetCC result type from
v2f64 comparisons. We need to expand all non-arithmetic v2i64 operations.

This fixes the lowering for v2f64 VSELECT.

llvm-svn: 204828
diff --git a/llvm/lib/Target/PowerPC/PPCCallingConv.td b/llvm/lib/Target/PowerPC/PPCCallingConv.td
index 291627a..5e3074b 100644
--- a/llvm/lib/Target/PowerPC/PPCCallingConv.td
+++ b/llvm/lib/Target/PowerPC/PPCCallingConv.td
@@ -36,7 +36,7 @@
   CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4]>>,
   
   // Vector types are always returned in V2.
-  CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64], CCAssignToReg<[V2]>>
+  CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64, v2i64], CCAssignToReg<[V2]>>
 ]>;
 
 
@@ -70,7 +70,7 @@
   CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>,
   CCIfType<[f32],  CCAssignToReg<[F1, F2]>>,
   CCIfType<[f64],  CCAssignToReg<[F1, F2, F3, F4]>>,
-  CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64], CCAssignToReg<[V2]>>
+  CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64, v2i64], CCAssignToReg<[V2]>>
 ]>;
 
 //===----------------------------------------------------------------------===//
@@ -104,7 +104,7 @@
   CCIfType<[f32,f64], CCAssignToStack<8, 8>>,  
 
   // Vectors get 16-byte stack slots that are 16-byte aligned.
-  CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64], CCAssignToStack<16, 16>>
+  CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64, v2i64], CCAssignToStack<16, 16>>
 ]>;
 
 // This calling convention puts vector arguments always on the stack. It is used
@@ -118,7 +118,7 @@
 // put vector arguments in vector registers before putting them on the stack.
 def CC_PPC32_SVR4 : CallingConv<[
   // The first 12 Vector arguments are passed in AltiVec registers.
-  CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64],
+  CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64, v2i64],
            CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9, V10, V11, V12, V13]>>,
            
   CCDelegateTo<CC_PPC32_SVR4_Common>
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 2cc8f46..9690557 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -571,6 +571,12 @@
 
       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
+
+      // VSX v2i64 only supports non-arithmetic operations.
+      setOperationAction(ISD::ADD, MVT::v2i64, Expand);
+      setOperationAction(ISD::SUB, MVT::v2i64, Expand);
+
+      addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
     }
   }
 
@@ -2135,6 +2141,7 @@
         case MVT::v4i32:
         case MVT::v4f32:
         case MVT::v2f64:
+        case MVT::v2i64:
           RC = &PPC::VRRCRegClass;
           break;
       }
@@ -2382,7 +2389,7 @@
     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8 ||
-        ObjectVT==MVT::v2f64) {
+        ObjectVT==MVT::v2f64 || ObjectVT==MVT::v2i64) {
       if (isVarArg) {
         MinReservedArea = ((MinReservedArea+15)/16)*16;
         MinReservedArea += CalculateStackSlotSize(ObjectVT,
@@ -2540,6 +2547,7 @@
     case MVT::v8i16:
     case MVT::v16i8:
     case MVT::v2f64:
+    case MVT::v2i64:
       // Note that vector arguments in registers don't reserve stack space,
       // except in varargs functions.
       if (VR_idx != Num_VR_Regs) {
@@ -3003,7 +3011,7 @@
     // Varargs Altivec parameters are padded to a 16 byte boundary.
     if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 ||
         ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8 ||
-        ArgVT==MVT::v2f64) {
+        ArgVT==MVT::v2f64 || ArgVT==MVT::v2i64) {
       if (!isVarArg && !isPPC64) {
         // Non-varargs Altivec parameters go after all the non-Altivec
         // parameters; handle those later so we know how much padding we need.
@@ -4188,6 +4196,7 @@
     case MVT::v8i16:
     case MVT::v16i8:
     case MVT::v2f64:
+    case MVT::v2i64:
       if (isVarArg) {
         // These go aligned on the stack, or in the corresponding R registers
         // when within range.  The Darwin PPC ABI doc claims they also go in
diff --git a/llvm/lib/Target/PowerPC/PPCInstrVSX.td b/llvm/lib/Target/PowerPC/PPCInstrVSX.td
index 2762da6..8593ad2 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrVSX.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrVSX.td
@@ -761,6 +761,20 @@
 def : Pat<(v16i8 (bitconvert v2f64:$A)),
           (COPY_TO_REGCLASS $A, VRRC)>;
 
+def : Pat<(v2i64 (bitconvert v4i32:$A)),
+          (COPY_TO_REGCLASS $A, VSRC)>;
+def : Pat<(v2i64 (bitconvert v8i16:$A)),
+          (COPY_TO_REGCLASS $A, VSRC)>;
+def : Pat<(v2i64 (bitconvert v16i8:$A)),
+          (COPY_TO_REGCLASS $A, VSRC)>;
+
+def : Pat<(v4i32 (bitconvert v2i64:$A)),
+          (COPY_TO_REGCLASS $A, VRRC)>;
+def : Pat<(v8i16 (bitconvert v2i64:$A)),
+          (COPY_TO_REGCLASS $A, VRRC)>;
+def : Pat<(v16i8 (bitconvert v2i64:$A)),
+          (COPY_TO_REGCLASS $A, VRRC)>;
+
 } // AddedComplexity
 } // HasVSX
 
diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.td b/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
index 339d4e4d..dab222b 100644
--- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
+++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
@@ -235,16 +235,16 @@
 
 // VSX register classes (the allocation order mirrors that of the corresponding
 // subregister classes).
-def VSLRC : RegisterClass<"PPC", [v4i32,v4f32,f64,v2f64], 128,
+def VSLRC : RegisterClass<"PPC", [v4i32,v4f32,f64,v2f64,v2i64], 128,
                           (add (sequence "VSL%u", 0, 13),
                                (sequence "VSL%u", 31, 14))>;
-def VSHRC : RegisterClass<"PPC", [v4i32,v4f32,f64,v2f64], 128,
+def VSHRC : RegisterClass<"PPC", [v4i32,v4f32,f64,v2f64,v2i64], 128,
                           (add VSH2, VSH3, VSH4, VSH5, VSH0, VSH1, VSH6, VSH7,
 			       VSH8, VSH9, VSH10, VSH11, VSH12, VSH13, VSH14,
                                VSH15, VSH16, VSH17, VSH18, VSH19, VSH31, VSH30,
                                VSH29, VSH28, VSH27, VSH26, VSH25, VSH24, VSH23,
                                VSH22, VSH21, VSH20)>;
-def VSRC  : RegisterClass<"PPC", [v4i32,v4f32,f64,v2f64], 128,
+def VSRC  : RegisterClass<"PPC", [v4i32,v4f32,f64,v2f64,v2i64], 128,
                           (add VSLRC, VSHRC)>;
 
 def CRBITRC : RegisterClass<"PPC", [i1], 32,
diff --git a/llvm/test/CodeGen/PowerPC/vsx.ll b/llvm/test/CodeGen/PowerPC/vsx.ll
index b94c80c..e2b78c7 100644
--- a/llvm/test/CodeGen/PowerPC/vsx.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx.ll
@@ -271,7 +271,28 @@
   ret <2 x double> %v
 
 ; CHECK-LABEL: @test25
-; FIXME: This currently is scalarized because v2i64 is not a legal type.
+; CHECK: xvcmpeqdp [[V1:[0-9]+]], 36, 37
+; CHECK: xxsel 34, 35, 34, [[V1]]
+; CHECK: blr
+}
+
+define <2 x i64> @test26(<2 x i64> %a, <2 x i64> %b) {
+  %v = add <2 x i64> %a, %b
+  ret <2 x i64> %v
+
+; CHECK-LABEL: @test26
+; FIXME: The code quality here is not good; just make sure we do something for now.
+; CHECK: add
+; CHECK: add
+; CHECK: blr
+}
+
+define <2 x i64> @test27(<2 x i64> %a, <2 x i64> %b) {
+  %v = and <2 x i64> %a, %b
+  ret <2 x i64> %v
+
+; CHECK-LABEL: @test27
+; CHECK: xxland 34, 34, 35
 ; CHECK: blr
 }