Rework the SelectionDAG-based implementations of SimplifyDemandedBits
and ComputeMaskedBits to match the new improved versions in instcombine.
Tested against all of multisource/benchmarks on ppc.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@26238 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/lib/Target/Sparc/SparcISelDAGToDAG.cpp
index 8671b60..d80c006 100644
--- a/lib/Target/Sparc/SparcISelDAGToDAG.cpp
+++ b/lib/Target/Sparc/SparcISelDAGToDAG.cpp
@@ -98,11 +98,14 @@
     SparcTargetLowering(TargetMachine &TM);
     virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
     
-    /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
-    /// be zero. Op is expected to be a target specific node. Used by DAG
-    /// combiner.
-    virtual bool isMaskedValueZeroForTargetNode(const SDOperand &Op,
-                                                uint64_t Mask) const;
+    /// computeMaskedBitsForTargetNode - Determine which of the bits specified 
+    /// in Mask are known to be either zero or one and return them in the 
+    /// KnownZero/KnownOne bitsets.
+    virtual void computeMaskedBitsForTargetNode(const SDOperand Op,
+                                                uint64_t Mask,
+                                                uint64_t &KnownZero, 
+                                                uint64_t &KnownOne,
+                                                unsigned Depth = 0) const;
     
     virtual std::vector<SDOperand>
       LowerArguments(Function &F, SelectionDAG &DAG);
@@ -246,20 +249,30 @@
 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
 /// be zero. Op is expected to be a target specific node. Used by DAG
 /// combiner.
-bool SparcTargetLowering::
-isMaskedValueZeroForTargetNode(const SDOperand &Op, uint64_t Mask) const {
+void SparcTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
+                                                         uint64_t Mask,
+                                                         uint64_t &KnownZero, 
+                                                         uint64_t &KnownOne,
+                                                         unsigned Depth) const {
+  uint64_t KnownZero2, KnownOne2;
+  KnownZero = KnownOne = 0;   // Don't know anything.
+  
   switch (Op.getOpcode()) {
-  default: return false; 
+  default: break;
   case SPISD::SELECT_ICC:
   case SPISD::SELECT_FCC:
-    assert(MVT::isInteger(Op.getValueType()) && "Not an integer select!");
-    // These operations are masked zero if both the left and the right are zero.
-    return MaskedValueIsZero(Op.getOperand(0), Mask) &&
-           MaskedValueIsZero(Op.getOperand(1), Mask);
+    ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
+    ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
+    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 
+    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 
+    
+    // Only known if known in both the LHS and RHS.
+    KnownOne &= KnownOne2;
+    KnownZero &= KnownZero2;
+    break;
   }
 }
 
-
 /// LowerArguments - V8 uses a very simple ABI, where all values are passed in
 /// either one or two GPRs, including FP values.  TODO: we should pass FP values
 /// in FP registers for fastcc functions.
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index adaa986..3e0a210 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -2035,19 +2035,23 @@
   }
 }
 
-bool X86TargetLowering::isMaskedValueZeroForTargetNode(const SDOperand &Op,
-                                                       uint64_t Mask) const {
+void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
+                                                       uint64_t Mask,
+                                                       uint64_t &KnownZero, 
+                                                       uint64_t &KnownOne,
+                                                       unsigned Depth) const {
 
   unsigned Opc = Op.getOpcode();
+  KnownZero = KnownOne = 0;   // Don't know anything.
 
   switch (Opc) {
   default:
     assert(Opc >= ISD::BUILTIN_OP_END && "Expected a target specific node");
     break;
-  case X86ISD::SETCC: return (Mask & 1) == 0;
+  case X86ISD::SETCC: 
+    KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
+    break;
   }
-
-  return false;
 }
 
 std::vector<unsigned> X86TargetLowering::
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 88d8e6c..dc1a13c 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -218,12 +218,15 @@
     /// DAG node.
     virtual const char *getTargetNodeName(unsigned Opcode) const;
 
-    /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
-    /// be zero. Op is expected to be a target specific node. Used by DAG
-    /// combiner.
-    virtual bool isMaskedValueZeroForTargetNode(const SDOperand &Op,
-                                                uint64_t Mask) const;
-
+    /// computeMaskedBitsForTargetNode - Determine which of the bits specified 
+    /// in Mask are known to be either zero or one and return them in the 
+    /// KnownZero/KnownOne bitsets.
+    virtual void computeMaskedBitsForTargetNode(const SDOperand Op,
+                                                uint64_t Mask,
+                                                uint64_t &KnownZero, 
+                                                uint64_t &KnownOne,
+                                                unsigned Depth = 0) const;
+    
     SDOperand getReturnAddressFrameIndex(SelectionDAG &DAG);
 
     std::vector<unsigned>