Replace r169459 with something safer. Rather than having computeMaskedBits to
understand target implementation of any_extend / extload, just generate
zero_extend in place of any_extend for liveouts when the target knows the
zero_extend will be implicit (e.g. ARM ldrb / ldrh) or folded (e.g. x86 movz).

rdar://12771555


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@169536 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index c0a7853..32235b9 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -9462,6 +9462,27 @@
   return MVT::Other;
 }
 
+bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
+  if (Val.getOpcode() != ISD::LOAD)
+    return false;
+
+  EVT VT1 = Val.getValueType();
+  if (!VT1.isSimple() || !VT1.isInteger() ||
+      !VT2.isSimple() || !VT2.isInteger())
+    return false;
+
+  switch (VT1.getSimpleVT().SimpleTy) {
+  default: break;
+  case MVT::i1:
+  case MVT::i8:
+  case MVT::i16:
+    // 8-bit and 16-bit loads implicitly zero-extend to 32-bits.
+    return true;
+  }
+
+  return false;
+}
+
 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
   if (V < 0)
     return false;
@@ -9878,36 +9899,6 @@
   }
 }
 
-void ARMTargetLowering::computeMaskedBitsForAnyExtend(const SDValue Op,
-                                                      APInt &KnownZero,
-                                                      APInt &KnownOne,
-                                                      const SelectionDAG &DAG,
-                                                      unsigned Depth) const {
-  unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
-  if (Op.getOpcode() == ISD::ANY_EXTEND) {
-    // Implemented as a zero_extend.
-    EVT InVT = Op.getOperand(0).getValueType();
-    unsigned InBits = InVT.getScalarType().getSizeInBits();
-    KnownZero = KnownZero.trunc(InBits);
-    KnownOne = KnownOne.trunc(InBits);
-    DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
-    KnownZero = KnownZero.zext(BitWidth);
-    KnownOne = KnownOne.zext(BitWidth);
-    APInt NewBits   = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
-    KnownZero |= NewBits;
-    return;
-  } else if (ISD::isEXTLoad(Op.getNode())) {
-    // Implemented as zextloads.
-    LoadSDNode *LD = cast<LoadSDNode>(Op);
-    EVT VT = LD->getMemoryVT();
-    unsigned MemBits = VT.getScalarType().getSizeInBits();
-    KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
-    return;
-  }
-  
-  assert(0 && "Expecting an ANY_EXTEND or extload!");
-}
-
 //===----------------------------------------------------------------------===//
 //                           ARM Inline Assembly Support
 //===----------------------------------------------------------------------===//
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index bde2ad4..8f7b593 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -294,6 +294,8 @@
                                     bool MemcpyStrSrc,
                                     MachineFunction &MF) const;
 
+    virtual bool isZExtFree(SDValue Val, EVT VT2) const;
+
     /// isLegalAddressingMode - Return true if the addressing mode represented
     /// by AM is legal for this target, for a load/store of the specified type.
     virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
@@ -333,11 +335,6 @@
                                                 const SelectionDAG &DAG,
                                                 unsigned Depth) const;
 
-    virtual void computeMaskedBitsForAnyExtend(const SDValue Op,
-                                               APInt &KnownZero,
-                                               APInt &KnownOne,
-                                               const SelectionDAG &DAG,
-                                               unsigned Depth) const;
 
     virtual bool ExpandInlineAsm(CallInst *CI) const;
 
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 4ed8d1b..e59f619 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -12142,6 +12142,30 @@
   return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
 }
 
+bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
+  EVT VT1 = Val.getValueType();
+  if (isZExtFree(VT1, VT2))
+    return true;
+
+  if (Val.getOpcode() != ISD::LOAD)
+    return false;
+
+  if (!VT1.isSimple() || !VT1.isInteger() ||
+      !VT2.isSimple() || !VT2.isInteger())
+    return false;
+
+  switch (VT1.getSimpleVT().SimpleTy) {
+  default: break;
+  case MVT::i8:
+  case MVT::i16:
+  case MVT::i32:
+    // X86 has 8, 16, and 32-bit zero-extending loads.
+    return true;
+  }
+
+  return false;
+}
+
 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
   // i16 instructions are longer (0x66 prefix) and potentially slower.
   return !(VT1 == MVT::i32 && VT2 == MVT::i16);
@@ -14093,38 +14117,6 @@
   }
 }
 
-void X86TargetLowering::computeMaskedBitsForAnyExtend(const SDValue Op,
-                                                      APInt &KnownZero,
-                                                      APInt &KnownOne,
-                                                      const SelectionDAG &DAG,
-                                                      unsigned Depth) const {
-  unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
-  if (Op.getOpcode() == ISD::ANY_EXTEND) {
-    // Implemented as a zero_extend except for i16 -> i32
-    EVT InVT = Op.getOperand(0).getValueType();
-    unsigned InBits = InVT.getScalarType().getSizeInBits();
-    KnownZero = KnownZero.trunc(InBits);
-    KnownOne = KnownOne.trunc(InBits);
-    DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
-    KnownZero = KnownZero.zext(BitWidth);
-    KnownOne = KnownOne.zext(BitWidth);
-    if (BitWidth != 32 || InBits != 16) {
-      APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
-      KnownZero |= NewBits;
-    }
-    return;
-  } else if (ISD::isEXTLoad(Op.getNode())) {
-    // Implemented as zextloads or implicitly zero-extended (i32 -> i64)
-    LoadSDNode *LD = cast<LoadSDNode>(Op);
-    EVT VT = LD->getMemoryVT();
-    unsigned MemBits = VT.getScalarType().getSizeInBits();
-    KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
-    return;
-  }
-  
-  assert(0 && "Expecting an ANY_EXTEND or extload!");
-}
-
 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
                                                          unsigned Depth) const {
   // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 099f1d8..1042fe1 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -558,12 +558,6 @@
                                                 const SelectionDAG &DAG,
                                                 unsigned Depth = 0) const;
 
-    virtual void computeMaskedBitsForAnyExtend(const SDValue Op,
-                                               APInt &KnownZero,
-                                               APInt &KnownOne,
-                                               const SelectionDAG &DAG,
-                                               unsigned Depth) const;
-
     // ComputeNumSignBitsForTargetNode - Determine the number of bits in the
     // operation that are sign bits.
     virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
@@ -634,6 +628,7 @@
     /// result out to 64 bits.
     virtual bool isZExtFree(Type *Ty1, Type *Ty2) const;
     virtual bool isZExtFree(EVT VT1, EVT VT2) const;
+    virtual bool isZExtFree(SDValue Val, EVT VT2) const;
 
     /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than
     /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to