Revert BuildVectorSDNode related patches: 65426, 65427, and 65296.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@65482 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index 6af60da..2c97b99 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -3093,6 +3093,100 @@
 // Vector related lowering.
 //
 
+// If this is a vector of constants or undefs, get the bits.  A bit in
+// UndefBits is set if the corresponding element of the vector is an
+// ISD::UNDEF value.  For undefs, the corresponding VectorBits values are
+// zero.   Return true if this is not an array of constants, false if it is.
+//
+static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2],
+                                       uint64_t UndefBits[2]) {
+  // Start with zero'd results.
+  VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0;
+
+  unsigned EltBitSize = BV->getOperand(0).getValueType().getSizeInBits();
+  for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
+    SDValue OpVal = BV->getOperand(i);
+
+    unsigned PartNo = i >= e/2;     // In the upper 128 bits?
+    unsigned SlotNo = e/2 - (i & (e/2-1))-1;  // Which subpiece of the uint64_t.
+
+    uint64_t EltBits = 0;
+    if (OpVal.getOpcode() == ISD::UNDEF) {
+      uint64_t EltUndefBits = ~0U >> (32-EltBitSize);
+      UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize);
+      continue;
+    } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
+      EltBits = CN->getZExtValue() & (~0U >> (32-EltBitSize));
+    } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
+      assert(CN->getValueType(0) == MVT::f32 &&
+             "Only one legal FP vector type!");
+      EltBits = FloatToBits(CN->getValueAPF().convertToFloat());
+    } else {
+      // Nonconstant element.
+      return true;
+    }
+
+    VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize);
+  }
+
+  //printf("%llx %llx  %llx %llx\n",
+  //       VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]);
+  return false;
+}
+
+// If this is a splat (repetition) of a value across the whole vector, return
+// the smallest size that splats it.  For example, "0x01010101010101..." is a
+// splat of 0x01, 0x0101, and 0x01010101.  We return SplatBits = 0x01 and
+// SplatSize = 1 byte.
+static bool isConstantSplat(const uint64_t Bits128[2],
+                            const uint64_t Undef128[2],
+                            unsigned &SplatBits, unsigned &SplatUndef,
+                            unsigned &SplatSize) {
+
+  // Don't let undefs prevent splats from matching.  See if the top 64-bits are
+  // the same as the lower 64-bits, ignoring undefs.
+  if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0]))
+    return false;  // Can't be a splat if two pieces don't match.
+
+  uint64_t Bits64  = Bits128[0] | Bits128[1];
+  uint64_t Undef64 = Undef128[0] & Undef128[1];
+
+  // Check that the top 32-bits are the same as the lower 32-bits, ignoring
+  // undefs.
+  if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64))
+    return false;  // Can't be a splat if two pieces don't match.
+
+  uint32_t Bits32  = uint32_t(Bits64) | uint32_t(Bits64 >> 32);
+  uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32);
+
+  // If the top 16-bits are different than the lower 16-bits, ignoring
+  // undefs, we have an i32 splat.
+  if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) {
+    SplatBits = Bits32;
+    SplatUndef = Undef32;
+    SplatSize = 4;
+    return true;
+  }
+
+  uint16_t Bits16  = uint16_t(Bits32)  | uint16_t(Bits32 >> 16);
+  uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16);
+
+  // If the top 8-bits are different than the lower 8-bits, ignoring
+  // undefs, we have an i16 splat.
+  if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) {
+    SplatBits = Bits16;
+    SplatUndef = Undef16;
+    SplatSize = 2;
+    return true;
+  }
+
+  // Otherwise, we have an 8-bit splat.
+  SplatBits  = uint8_t(Bits16)  | uint8_t(Bits16 >> 8);
+  SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8);
+  SplatSize = 1;
+  return true;
+}
+
 /// BuildSplatI - Build a canonical splati of Val with an element size of
 /// SplatSize.  Cast the result to VT.
 static SDValue BuildSplatI(int Val, unsigned SplatSize, MVT VT,
@@ -3115,7 +3209,8 @@
   SDValue Elt = DAG.getConstant(Val, CanonicalVT.getVectorElementType());
   SmallVector<SDValue, 8> Ops;
   Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
-  SDValue Res = DAG.getBUILD_VECTOR(CanonicalVT, dl, &Ops[0], Ops.size());
+  SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT,
+                              &Ops[0], Ops.size());
   return DAG.getNode(ISD::BIT_CONVERT, dl, ReqVT, Res);
 }
 
@@ -3152,7 +3247,7 @@
   for (unsigned i = 0; i != 16; ++i)
     Ops[i] = DAG.getConstant(i+Amt, MVT::i8);
   SDValue T = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v16i8, LHS, RHS,
-                            DAG.getBUILD_VECTOR(MVT::v16i8, dl, Ops,16));
+                        DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops,16));
   return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T);
 }
 
@@ -3167,20 +3262,20 @@
   // UndefBits is set if the corresponding element of the vector is an
   // ISD::UNDEF value.  For undefs, the corresponding VectorBits values are
   // zero.
+  uint64_t VectorBits[2];
+  uint64_t UndefBits[2];
   DebugLoc dl = Op.getDebugLoc();
-  BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
-  assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
-
-  uint64_t SplatBits;
-  uint64_t SplatUndef;
-  unsigned SplatSize;
-  bool HasAnyUndefs;
+  if (GetConstantBuildVectorBits(Op.getNode(), VectorBits, UndefBits))
+    return SDValue();   // Not a constant vector.
 
   // If this is a splat (repetition) of a value across the whole vector, return
   // the smallest size that splats it.  For example, "0x01010101010101..." is a
   // splat of 0x01, 0x0101, and 0x01010101.  We return SplatBits = 0x01 and
   // SplatSize = 1 byte.
-  if (BVN->isConstantSplat(HasAnyUndefs, SplatBits, SplatUndef, SplatSize)) {
+  unsigned SplatBits, SplatUndef, SplatSize;
+  if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){
+    bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0;
+
     // First, handle single instruction cases.
 
     // All zeros?
@@ -3188,7 +3283,7 @@
       // Canonicalize all zero vectors to be v4i32.
       if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
         SDValue Z = DAG.getConstant(0, MVT::i32);
-        Z = DAG.getBUILD_VECTOR(MVT::v4i32, dl, Z, Z, Z, Z);
+        Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z);
         Op = DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Z);
       }
       return Op;
@@ -3401,7 +3496,7 @@
 
   return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, OpLHS.getValueType(),
                      OpLHS, OpRHS,
-                     DAG.getBUILD_VECTOR(MVT::v16i8, dl, Ops, 16));
+                     DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops, 16));
 }
 
 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
@@ -3524,8 +3619,8 @@
                                            MVT::i8));
   }
 
-  SDValue VPermMask = DAG.getBUILD_VECTOR(MVT::v16i8, dl,
-                                          &ResultMask[0], ResultMask.size());
+  SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
+                                    &ResultMask[0], ResultMask.size());
   return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask);
 }
 
@@ -3713,7 +3808,7 @@
       Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8);
     }
     return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v16i8, EvenParts, OddParts,
-                       DAG.getBUILD_VECTOR(MVT::v16i8, dl, Ops, 16));
+                       DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops, 16));
   } else {
     assert(0 && "Unknown mul to lower!");
     abort();