[SelectionDAG] Use KnownBits struct in DAG's computeKnownBits and simplifyDemandedBits
This patch replaces the separate APInts for KnownZero/KnownOne with a single KnownBits struct. This is similar to what was done to ValueTracking's version recently.
This is largely a mechanical transformation from KnownZero to Known.Zero.
Differential Revision: https://reviews.llvm.org/D32569
llvm-svn: 301620
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 3097145..b18fb30 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -20,6 +20,7 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
@@ -2078,18 +2079,18 @@
(void)BitWidth;
assert(BitWidth == 32 || BitWidth == 64);
- APInt KnownZero, KnownOne;
- CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
+ KnownBits Known;
+ CurDAG->computeKnownBits(Op, Known);
// Non-zero in the sense that they're not provably zero, which is the key
// point if we want to use this value
- uint64_t NonZeroBits = (~KnownZero).getZExtValue();
+ uint64_t NonZeroBits = (~Known.Zero).getZExtValue();
// Discard a constant AND mask if present. It's safe because the node will
// already have been factored into the computeKnownBits calculation above.
uint64_t AndImm;
if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
- assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
+ assert((~APInt(BitWidth, AndImm) & ~Known.Zero) == 0);
Op = Op.getOperand(0);
}
@@ -2158,15 +2159,15 @@
// Compute the Known Zero for the AND as this allows us to catch more general
// cases than just looking for AND with imm.
- APInt KnownZero, KnownOne;
- CurDAG->computeKnownBits(And, KnownZero, KnownOne);
+ KnownBits Known;
+ CurDAG->computeKnownBits(And, Known);
// Non-zero in the sense that they're not provably zero, which is the key
// point if we want to use this value.
- uint64_t NotKnownZero = (~KnownZero).getZExtValue();
+ uint64_t NotKnownZero = (~Known.Zero).getZExtValue();
// The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
- if (!isShiftedMask(KnownZero.getZExtValue(), VT))
+ if (!isShiftedMask(Known.Zero.getZExtValue(), VT))
return false;
// The bits being inserted must only set those bits that are known to be zero.
@@ -2300,15 +2301,15 @@
// This allows to catch more general case than just looking for
// AND with imm. Indeed, simplify-demanded-bits may have removed
// the AND instruction because it proves it was useless.
- APInt KnownZero, KnownOne;
- CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
+ KnownBits Known;
+ CurDAG->computeKnownBits(OrOpd1Val, Known);
// Check if there is enough room for the second operand to appear
// in the first one
APInt BitsToBeInserted =
- APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
+ APInt::getBitsSet(Known.getBitWidth(), DstLSB, DstLSB + Width);
- if ((BitsToBeInserted & ~KnownZero) != 0)
+ if ((BitsToBeInserted & ~Known.Zero) != 0)
continue;
// Set the first operand
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index a7c98fb..66c85c2 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -67,6 +67,7 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetCallingConv.h"
@@ -929,20 +930,19 @@
}
/// computeKnownBitsForTargetNode - Determine which of the bits specified in
-/// Mask are known to be either zero or one and return them in the
-/// KnownZero/KnownOne bitsets.
+/// Mask are known to be either zero or one and return them Known.
void AArch64TargetLowering::computeKnownBitsForTargetNode(
- const SDValue Op, APInt &KnownZero, APInt &KnownOne,
+ const SDValue Op, KnownBits &Known,
const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
switch (Op.getOpcode()) {
default:
break;
case AArch64ISD::CSEL: {
- APInt KnownZero2, KnownOne2;
- DAG.computeKnownBits(Op->getOperand(0), KnownZero, KnownOne, Depth + 1);
- DAG.computeKnownBits(Op->getOperand(1), KnownZero2, KnownOne2, Depth + 1);
- KnownZero &= KnownZero2;
- KnownOne &= KnownOne2;
+ KnownBits Known2;
+ DAG.computeKnownBits(Op->getOperand(0), Known, Depth + 1);
+ DAG.computeKnownBits(Op->getOperand(1), Known2, Depth + 1);
+ Known.Zero &= Known2.Zero;
+ Known.One &= Known2.One;
break;
}
case ISD::INTRINSIC_W_CHAIN: {
@@ -952,10 +952,10 @@
default: return;
case Intrinsic::aarch64_ldaxr:
case Intrinsic::aarch64_ldxr: {
- unsigned BitWidth = KnownOne.getBitWidth();
+ unsigned BitWidth = Known.getBitWidth();
EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
unsigned MemBits = VT.getScalarSizeInBits();
- KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
+ Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
return;
}
}
@@ -974,15 +974,15 @@
// bits larger than the element datatype. 32-bit or larget doesn't need
// this as those are legal types and will be handled by isel directly.
MVT VT = Op.getOperand(1).getValueType().getSimpleVT();
- unsigned BitWidth = KnownZero.getBitWidth();
+ unsigned BitWidth = Known.getBitWidth();
if (VT == MVT::v8i8 || VT == MVT::v16i8) {
assert(BitWidth >= 8 && "Unexpected width!");
APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 8);
- KnownZero |= Mask;
+ Known.Zero |= Mask;
} else if (VT == MVT::v4i16 || VT == MVT::v8i16) {
assert(BitWidth >= 16 && "Unexpected width!");
APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
- KnownZero |= Mask;
+ Known.Zero |= Mask;
}
break;
} break;
@@ -9461,11 +9461,11 @@
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) {
APInt DemandedMask = APInt::getLowBitsSet(64, 56);
- APInt KnownZero, KnownOne;
+ KnownBits Known;
TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
DCI.isBeforeLegalizeOps());
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- if (TLI.SimplifyDemandedBits(Addr, DemandedMask, KnownZero, KnownOne, TLO)) {
+ if (TLI.SimplifyDemandedBits(Addr, DemandedMask, Known, TLO)) {
DCI.CommitTargetLoweringOpt(TLO);
return true;
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 6081b07..89db566 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -250,8 +250,8 @@
/// Determine which of the bits specified in Mask are known to be either zero
/// or one and return them in the KnownZero/KnownOne bitsets.
- void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
- APInt &KnownOne, const APInt &DemandedElts,
+ void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
+ const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth = 0) const override;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index e21775e..29ded85 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -29,6 +29,7 @@
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/Support/KnownBits.h"
#include "SIInstrInfo.h"
using namespace llvm;
@@ -2293,11 +2294,11 @@
//===----------------------------------------------------------------------===//
static bool isU24(SDValue Op, SelectionDAG &DAG) {
- APInt KnownZero, KnownOne;
+ KnownBits Known;
EVT VT = Op.getValueType();
- DAG.computeKnownBits(Op, KnownZero, KnownOne);
+ DAG.computeKnownBits(Op, Known);
- return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
+ return (VT.getSizeInBits() - Known.Zero.countLeadingOnes()) <= 24;
}
static bool isI24(SDValue Op, SelectionDAG &DAG) {
@@ -3358,13 +3359,12 @@
OffsetVal,
OffsetVal + WidthVal);
- APInt KnownZero, KnownOne;
+ KnownBits Known;
TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
!DCI.isBeforeLegalizeOps());
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) ||
- TLI.SimplifyDemandedBits(BitsFrom, Demanded,
- KnownZero, KnownOne, TLO)) {
+ TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) {
DCI.CommitTargetLoweringOpt(TLO);
}
}
@@ -3574,14 +3574,12 @@
}
void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
- const SDValue Op, APInt &KnownZero, APInt &KnownOne,
+ const SDValue Op, KnownBits &Known,
const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
- unsigned BitWidth = KnownZero.getBitWidth();
- KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
+ Known.Zero.clearAllBits(); Known.One.clearAllBits(); // Don't know anything.
- APInt KnownZero2;
- APInt KnownOne2;
+ KnownBits Known2;
unsigned Opc = Op.getOpcode();
switch (Opc) {
@@ -3589,7 +3587,7 @@
break;
case AMDGPUISD::CARRY:
case AMDGPUISD::BORROW: {
- KnownZero = APInt::getHighBitsSet(32, 31);
+ Known.Zero = APInt::getHighBitsSet(32, 31);
break;
}
@@ -3602,16 +3600,16 @@
uint32_t Width = CWidth->getZExtValue() & 0x1f;
if (Opc == AMDGPUISD::BFE_U32)
- KnownZero = APInt::getHighBitsSet(32, 32 - Width);
+ Known.Zero = APInt::getHighBitsSet(32, 32 - Width);
break;
}
case AMDGPUISD::FP_TO_FP16:
case AMDGPUISD::FP16_ZEXT: {
- unsigned BitWidth = KnownZero.getBitWidth();
+ unsigned BitWidth = Known.getBitWidth();
// High bits are zero.
- KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
+ Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
break;
}
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index 13cbfe2..de05019 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -199,8 +199,7 @@
/// either zero or one and return them in the \p KnownZero and \p KnownOne
/// bitsets.
void computeKnownBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
+ KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth = 0) const override;
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index ce74a7c..41043804b 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -68,6 +68,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetCallingConv.h"
#include "llvm/Target/TargetOptions.h"
@@ -4706,12 +4707,12 @@
APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
- APInt KnownZero, KnownOne;
+ KnownBits Known;
TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
!DCI.isBeforeLegalizeOps());
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.ShrinkDemandedConstant(Src, Demanded, TLO) ||
- TLI.SimplifyDemandedBits(Src, Demanded, KnownZero, KnownOne, TLO)) {
+ TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
DCI.CommitTargetLoweringOpt(TLO);
}
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 382f881..9f7e60a 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -91,6 +91,7 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -11758,9 +11759,9 @@
// Lastly, can we determine that the bits defined by OrCI
// are zero in Y?
- APInt KnownZero, KnownOne;
- DAG.computeKnownBits(Y, KnownZero, KnownOne);
- if ((OrCI & KnownZero) != OrCI)
+ KnownBits Known;
+ DAG.computeKnownBits(Y, Known);
+ if ((OrCI & Known.Zero) != OrCI)
return SDValue();
// OK, we can do the combine.
@@ -11898,16 +11899,16 @@
}
if (Res.getNode()) {
- APInt KnownZero, KnownOne;
- DAG.computeKnownBits(SDValue(N,0), KnownZero, KnownOne);
+ KnownBits Known;
+ DAG.computeKnownBits(SDValue(N,0), Known);
// Capture demanded bits information that would be otherwise lost.
- if (KnownZero == 0xfffffffe)
+ if (Known.Zero == 0xfffffffe)
Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
DAG.getValueType(MVT::i1));
- else if (KnownZero == 0xffffff00)
+ else if (Known.Zero == 0xffffff00)
Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
DAG.getValueType(MVT::i8));
- else if (KnownZero == 0xffff0000)
+ else if (Known.Zero == 0xffff0000)
Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
DAG.getValueType(MVT::i16));
}
@@ -12596,13 +12597,12 @@
}
void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
+ KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
- unsigned BitWidth = KnownOne.getBitWidth();
- KnownZero = KnownOne = APInt(BitWidth, 0);
+ unsigned BitWidth = Known.getBitWidth();
+ Known.Zero.clearAllBits(); Known.One.clearAllBits();
switch (Op.getOpcode()) {
default: break;
case ARMISD::ADDC:
@@ -12612,17 +12612,17 @@
// These nodes' second result is a boolean
if (Op.getResNo() == 0)
break;
- KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
+ Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
break;
case ARMISD::CMOV: {
// Bits are known zero/one if known on the LHS and RHS.
- DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
- if (KnownZero == 0 && KnownOne == 0) return;
+ DAG.computeKnownBits(Op.getOperand(0), Known, Depth+1);
+ if (Known.Zero == 0 && Known.One == 0) return;
- APInt KnownZeroRHS, KnownOneRHS;
- DAG.computeKnownBits(Op.getOperand(1), KnownZeroRHS, KnownOneRHS, Depth+1);
- KnownZero &= KnownZeroRHS;
- KnownOne &= KnownOneRHS;
+ KnownBits KnownRHS;
+ DAG.computeKnownBits(Op.getOperand(1), KnownRHS, Depth+1);
+ Known.Zero &= KnownRHS.Zero;
+ Known.One &= KnownRHS.One;
return;
}
case ISD::INTRINSIC_W_CHAIN: {
@@ -12634,7 +12634,7 @@
case Intrinsic::arm_ldrex: {
EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
unsigned MemBits = VT.getScalarSizeInBits();
- KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
+ Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
return;
}
}
@@ -12642,14 +12642,14 @@
case ARMISD::BFI: {
// Conservatively, we can recurse down the first operand
// and just mask out all affected bits.
- DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth + 1);
+ DAG.computeKnownBits(Op.getOperand(0), Known, Depth + 1);
// The operand to BFI is already a mask suitable for removing the bits it
// sets.
ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2));
const APInt &Mask = CI->getAPIntValue();
- KnownZero &= Mask;
- KnownOne &= Mask;
+ Known.Zero &= Mask;
+ Known.One &= Mask;
return;
}
}
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 8b54ce4..76e4b60 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -350,8 +350,7 @@
SDValue &Offset, ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const override;
- void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
- APInt &KnownOne,
+ void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const override;
diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 125c002..1b0402b 100644
--- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -49,6 +49,7 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -542,12 +543,12 @@
SDValue Op1 = N->getOperand(1);
SDLoc dl(N);
- APInt LKZ, LKO, RKZ, RKO;
- CurDAG->computeKnownBits(Op0, LKZ, LKO);
- CurDAG->computeKnownBits(Op1, RKZ, RKO);
+ KnownBits LKnown, RKnown;
+ CurDAG->computeKnownBits(Op0, LKnown);
+ CurDAG->computeKnownBits(Op1, RKnown);
- unsigned TargetMask = LKZ.getZExtValue();
- unsigned InsertMask = RKZ.getZExtValue();
+ unsigned TargetMask = LKnown.Zero.getZExtValue();
+ unsigned InsertMask = RKnown.Zero.getZExtValue();
if ((TargetMask | InsertMask) == 0xFFFFFFFF) {
unsigned Op0Opc = Op0.getOpcode();
@@ -590,9 +591,9 @@
// The AND mask might not be a constant, and we need to make sure that
// if we're going to fold the masking with the insert, all bits not
// know to be zero in the mask are known to be one.
- APInt MKZ, MKO;
- CurDAG->computeKnownBits(Op1.getOperand(1), MKZ, MKO);
- bool CanFoldMask = InsertMask == MKO.getZExtValue();
+ KnownBits MKnown;
+ CurDAG->computeKnownBits(Op1.getOperand(1), MKnown);
+ bool CanFoldMask = InsertMask == MKnown.One.getZExtValue();
unsigned SHOpc = Op1.getOperand(0).getOpcode();
if ((SHOpc == ISD::SHL || SHOpc == ISD::SRL) && CanFoldMask &&
@@ -2772,12 +2773,12 @@
short Imm;
if (N->getOperand(0)->getOpcode() == ISD::FrameIndex &&
isIntS16Immediate(N->getOperand(1), Imm)) {
- APInt LHSKnownZero, LHSKnownOne;
- CurDAG->computeKnownBits(N->getOperand(0), LHSKnownZero, LHSKnownOne);
+ KnownBits LHSKnown;
+ CurDAG->computeKnownBits(N->getOperand(0), LHSKnown);
// If this is equivalent to an add, then we can fold it with the
// FrameIndex calculation.
- if ((LHSKnownZero.getZExtValue()|~(uint64_t)Imm) == ~0ULL) {
+ if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)Imm) == ~0ULL) {
selectFrameIndex(N, N->getOperand(0).getNode(), (int)Imm);
return;
}
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 4659a2e..8dadce1 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -79,6 +79,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -1847,17 +1848,14 @@
// If this is an or of disjoint bitfields, we can codegen this as an add
// (for better address arithmetic) if the LHS and RHS of the OR are provably
// disjoint.
- APInt LHSKnownZero, LHSKnownOne;
- APInt RHSKnownZero, RHSKnownOne;
- DAG.computeKnownBits(N.getOperand(0),
- LHSKnownZero, LHSKnownOne);
+ KnownBits LHSKnown, RHSKnown;
+ DAG.computeKnownBits(N.getOperand(0), LHSKnown);
- if (LHSKnownZero.getBoolValue()) {
- DAG.computeKnownBits(N.getOperand(1),
- RHSKnownZero, RHSKnownOne);
+ if (LHSKnown.Zero.getBoolValue()) {
+ DAG.computeKnownBits(N.getOperand(1), RHSKnown);
// If all of the bits are known zero on the LHS or RHS, the add won't
// carry.
- if (~(LHSKnownZero | RHSKnownZero) == 0) {
+ if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
Base = N.getOperand(0);
Index = N.getOperand(1);
return true;
@@ -1953,10 +1951,10 @@
// If this is an or of disjoint bitfields, we can codegen this as an add
// (for better address arithmetic) if the LHS and RHS of the OR are
// provably disjoint.
- APInt LHSKnownZero, LHSKnownOne;
- DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
+ KnownBits LHSKnown;
+ DAG.computeKnownBits(N.getOperand(0), LHSKnown);
- if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
+ if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
// If all of the bits are known zero on the LHS or RHS, the add won't
// carry.
if (FrameIndexSDNode *FI =
@@ -10318,17 +10316,16 @@
} else {
// This is neither a signed nor an unsigned comparison, just make sure
// that the high bits are equal.
- APInt Op1Zero, Op1One;
- APInt Op2Zero, Op2One;
- DAG.computeKnownBits(N->getOperand(0), Op1Zero, Op1One);
- DAG.computeKnownBits(N->getOperand(1), Op2Zero, Op2One);
+ KnownBits Op1Known, Op2Known;
+ DAG.computeKnownBits(N->getOperand(0), Op1Known);
+ DAG.computeKnownBits(N->getOperand(1), Op2Known);
// We don't really care about what is known about the first bit (if
// anything), so clear it in all masks prior to comparing them.
- Op1Zero.clearBit(0); Op1One.clearBit(0);
- Op2Zero.clearBit(0); Op2One.clearBit(0);
+ Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0);
+ Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0);
- if (Op1Zero != Op2Zero || Op1One != Op2One)
+ if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One)
return SDValue();
}
}
@@ -12015,18 +12012,17 @@
//===----------------------------------------------------------------------===//
void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
+ KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
- KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
+ Known.Zero.clearAllBits(); Known.One.clearAllBits();
switch (Op.getOpcode()) {
default: break;
case PPCISD::LBRX: {
// lhbrx is known to have the top bits cleared out.
if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
- KnownZero = 0xFFFF0000;
+ Known.Zero = 0xFFFF0000;
break;
}
case ISD::INTRINSIC_WO_CHAIN: {
@@ -12048,7 +12044,7 @@
case Intrinsic::ppc_altivec_vcmpgtuh_p:
case Intrinsic::ppc_altivec_vcmpgtuw_p:
case Intrinsic::ppc_altivec_vcmpgtud_p:
- KnownZero = ~1U; // All bits but the low one are known to be zero.
+ Known.Zero = ~1U; // All bits but the low one are known to be zero.
break;
}
}
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 6113eb5..5645fdc 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -606,8 +606,7 @@
SelectionDAG &DAG) const override;
void computeKnownBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
+ KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth = 0) const override;
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index f120a98..c44e371 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -30,6 +30,7 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
using namespace llvm;
@@ -1875,25 +1876,24 @@
/// combiner.
void SparcTargetLowering::computeKnownBitsForTargetNode
(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
+ KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
- APInt KnownZero2, KnownOne2;
- KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
+ KnownBits Known2;
+ Known.Zero.clearAllBits(); Known.One.clearAllBits();
switch (Op.getOpcode()) {
default: break;
case SPISD::SELECT_ICC:
case SPISD::SELECT_XCC:
case SPISD::SELECT_FCC:
- DAG.computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
- DAG.computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
+ DAG.computeKnownBits(Op.getOperand(1), Known, Depth+1);
+ DAG.computeKnownBits(Op.getOperand(0), Known2, Depth+1);
// Only known if known in both the LHS and RHS.
- KnownOne &= KnownOne2;
- KnownZero &= KnownZero2;
+ Known.One &= Known2.One;
+ Known.Zero &= Known2.Zero;
break;
}
}
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.h b/llvm/lib/Target/Sparc/SparcISelLowering.h
index 90d0398..cc6386b 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -66,8 +66,7 @@
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
void computeKnownBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
+ KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth = 0) const override;
diff --git a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index 920b6e4..cd2f708 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -15,6 +15,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -711,9 +712,9 @@
// The inner check covers all cases but is more expensive.
uint64_t Used = allOnes(Op.getValueSizeInBits());
if (Used != (AndMask | InsertMask)) {
- APInt KnownZero, KnownOne;
- CurDAG->computeKnownBits(Op.getOperand(0), KnownZero, KnownOne);
- if (Used != (AndMask | InsertMask | KnownZero.getZExtValue()))
+ KnownBits Known;
+ CurDAG->computeKnownBits(Op.getOperand(0), Known);
+ if (Used != (AndMask | InsertMask | Known.Zero.getZExtValue()))
return false;
}
@@ -770,9 +771,9 @@
// If some bits of Input are already known zeros, those bits will have
// been removed from the mask. See if adding them back in makes the
// mask suitable.
- APInt KnownZero, KnownOne;
- CurDAG->computeKnownBits(Input, KnownZero, KnownOne);
- Mask |= KnownZero.getZExtValue();
+ KnownBits Known;
+ CurDAG->computeKnownBits(Input, Known);
+ Mask |= Known.Zero.getZExtValue();
if (!refineRxSBGMask(RxSBG, Mask))
return false;
}
@@ -794,9 +795,9 @@
// If some bits of Input are already known ones, those bits will have
// been removed from the mask. See if adding them back in makes the
// mask suitable.
- APInt KnownZero, KnownOne;
- CurDAG->computeKnownBits(Input, KnownZero, KnownOne);
- Mask &= ~KnownOne.getZExtValue();
+ KnownBits Known;
+ CurDAG->computeKnownBits(Input, Known);
+ Mask &= ~Known.One.getZExtValue();
if (!refineRxSBGMask(RxSBG, Mask))
return false;
}
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index f2fd581..6989aab 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -20,8 +20,9 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/KnownBits.h"
#include <cctype>
using namespace llvm;
@@ -3066,14 +3067,14 @@
// Get the known-zero masks for each operand.
SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
- APInt KnownZero[2], KnownOne[2];
- DAG.computeKnownBits(Ops[0], KnownZero[0], KnownOne[0]);
- DAG.computeKnownBits(Ops[1], KnownZero[1], KnownOne[1]);
+ KnownBits Known[2];
+ DAG.computeKnownBits(Ops[0], Known[0]);
+ DAG.computeKnownBits(Ops[1], Known[1]);
// See if the upper 32 bits of one operand and the lower 32 bits of the
// other are known zero. They are the low and high operands respectively.
- uint64_t Masks[] = { KnownZero[0].getZExtValue(),
- KnownZero[1].getZExtValue() };
+ uint64_t Masks[] = { Known[0].Zero.getZExtValue(),
+ Known[1].Zero.getZExtValue() };
unsigned High, Low;
if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
High = 1, Low = 0;
@@ -3158,9 +3159,9 @@
}
// Get the known-zero mask for the operand.
- APInt KnownZero, KnownOne;
- DAG.computeKnownBits(Op, KnownZero, KnownOne);
- unsigned NumSignificantBits = (~KnownZero).getActiveBits();
+ KnownBits Known;
+ DAG.computeKnownBits(Op, Known);
+ unsigned NumSignificantBits = (~Known.Zero).getActiveBits();
if (NumSignificantBits == 0)
return DAG.getConstant(0, DL, VT);
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 2d788bf..b0c920f 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -31,6 +31,7 @@
#include "llvm/IR/Type.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
@@ -1070,9 +1071,9 @@
}
APInt MaskedHighBits =
APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
- APInt KnownZero, KnownOne;
- DAG.computeKnownBits(X, KnownZero, KnownOne);
- if (MaskedHighBits != KnownZero) return true;
+ KnownBits Known;
+ DAG.computeKnownBits(X, Known);
+ if (MaskedHighBits != Known.Zero) return true;
// We've identified a pattern that can be transformed into a single shift
// and an addressing mode. Make it so.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 067ad89..dbf0990 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -52,6 +52,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
@@ -16799,9 +16800,9 @@
unsigned BitWidth = Op0.getValueSizeInBits();
unsigned AndBitWidth = And.getValueSizeInBits();
if (BitWidth > AndBitWidth) {
- APInt Zeros, Ones;
- DAG.computeKnownBits(Op0, Zeros, Ones);
- if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
+ KnownBits Known;
+ DAG.computeKnownBits(Op0, Known);
+ if (Known.Zero.countLeadingOnes() < BitWidth - AndBitWidth)
return SDValue();
}
LHS = Op1;
@@ -26667,12 +26668,11 @@
//===----------------------------------------------------------------------===//
void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
+ KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
- unsigned BitWidth = KnownZero.getBitWidth();
+ unsigned BitWidth = Known.getBitWidth();
unsigned Opc = Op.getOpcode();
EVT VT = Op.getValueType();
assert((Opc >= ISD::BUILTIN_OP_END ||
@@ -26682,7 +26682,7 @@
"Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!");
- KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
+ Known = KnownBits(BitWidth); // Don't know anything.
switch (Opc) {
default: break;
case X86ISD::ADD:
@@ -26701,33 +26701,33 @@
break;
LLVM_FALLTHROUGH;
case X86ISD::SETCC:
- KnownZero.setBits(1, BitWidth);
+ Known.Zero.setBits(1, BitWidth);
break;
case X86ISD::MOVMSK: {
unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
- KnownZero.setBits(NumLoBits, BitWidth);
+ Known.Zero.setBits(NumLoBits, BitWidth);
break;
}
case X86ISD::VSHLI:
case X86ISD::VSRLI: {
if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
if (ShiftImm->getAPIntValue().uge(VT.getScalarSizeInBits())) {
- KnownZero.setAllBits();
+ Known.Zero.setAllBits();
break;
}
- DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth + 1);
+ DAG.computeKnownBits(Op.getOperand(0), Known, Depth + 1);
unsigned ShAmt = ShiftImm->getZExtValue();
if (Opc == X86ISD::VSHLI) {
- KnownZero <<= ShAmt;
- KnownOne <<= ShAmt;
+ Known.Zero <<= ShAmt;
+ Known.One <<= ShAmt;
// Low bits are known zero.
- KnownZero.setLowBits(ShAmt);
+ Known.Zero.setLowBits(ShAmt);
} else {
- KnownZero.lshrInPlace(ShAmt);
- KnownOne.lshrInPlace(ShAmt);
+ Known.Zero.lshrInPlace(ShAmt);
+ Known.One.lshrInPlace(ShAmt);
// High bits are known zero.
- KnownZero.setHighBits(ShAmt);
+ Known.Zero.setHighBits(ShAmt);
}
}
break;
@@ -26741,12 +26741,12 @@
unsigned InBitWidth = SrcVT.getScalarSizeInBits();
assert(InNumElts >= NumElts && "Illegal VZEXT input");
- KnownZero = KnownOne = APInt(InBitWidth, 0);
+ Known = KnownBits(InBitWidth);
APInt DemandedSrcElts = APInt::getLowBitsSet(InNumElts, NumElts);
- DAG.computeKnownBits(N0, KnownZero, KnownOne, DemandedSrcElts, Depth + 1);
- KnownOne = KnownOne.zext(BitWidth);
- KnownZero = KnownZero.zext(BitWidth);
- KnownZero.setBits(InBitWidth, BitWidth);
+ DAG.computeKnownBits(N0, Known, DemandedSrcElts, Depth + 1);
+ Known.One = Known.One.zext(BitWidth);
+ Known.Zero = Known.Zero.zext(BitWidth);
+ Known.Zero.setBits(InBitWidth, BitWidth);
break;
}
}
@@ -30206,12 +30206,11 @@
assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
APInt DemandedMask(APInt::getSignMask(BitWidth));
- APInt KnownZero, KnownOne;
+ KnownBits Known;
TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
DCI.isBeforeLegalizeOps());
if (TLI.ShrinkDemandedConstant(Cond, DemandedMask, TLO) ||
- TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
- TLO)) {
+ TLI.SimplifyDemandedBits(Cond, DemandedMask, Known, TLO)) {
// If we changed the computation somewhere in the DAG, this change will
// affect all users of Cond. Make sure it is fine and update all the nodes
// so that we do not use the generic VSELECT anymore. Otherwise, we may
@@ -33774,12 +33773,12 @@
if (Op1.hasOneUse()) {
unsigned BitWidth = Op1.getValueSizeInBits();
APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
- APInt KnownZero, KnownOne;
+ KnownBits Known;
TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
!DCI.isBeforeLegalizeOps());
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.ShrinkDemandedConstant(Op1, DemandedMask, TLO) ||
- TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
+ TLI.SimplifyDemandedBits(Op1, DemandedMask, Known, TLO))
DCI.CommitTargetLoweringOpt(TLO);
}
return SDValue();
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 190a883..ec6bb90 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -828,8 +828,7 @@
/// Determine which of the bits specified in Mask are known to be either
/// zero or one and return them in the KnownZero/KnownOne bitsets.
void computeKnownBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
+ KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth = 0) const override;
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index e592c2b..3dc673e 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -1271,11 +1271,11 @@
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
- APInt KnownZero0, KnownOne0;
- CurDAG->computeKnownBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
- APInt KnownZero1, KnownOne1;
- CurDAG->computeKnownBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
- return (~KnownZero0 & ~KnownZero1) == 0;
+ KnownBits Known0;
+ CurDAG->computeKnownBits(N->getOperand(0), Known0, 0);
+ KnownBits Known1;
+ CurDAG->computeKnownBits(N->getOperand(1), Known1, 0);
+ return (~Known0.Zero & ~Known1.Zero) == 0;
}]>;
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index 2efcd46..4d3ecf2 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -34,6 +34,7 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
@@ -406,9 +407,9 @@
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
{
- APInt KnownZero, KnownOne;
- DAG.computeKnownBits(Value, KnownZero, KnownOne);
- return KnownZero.countTrailingOnes() >= 2;
+ KnownBits Known;
+ DAG.computeKnownBits(Value, Known);
+ return Known.Zero.countTrailingOnes() >= 2;
}
SDValue XCoreTargetLowering::
@@ -1601,13 +1602,12 @@
if (OutVal.hasOneUse()) {
unsigned BitWidth = OutVal.getValueSizeInBits();
APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
- APInt KnownZero, KnownOne;
+ KnownBits Known;
TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
!DCI.isBeforeLegalizeOps());
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) ||
- TLI.SimplifyDemandedBits(OutVal, DemandedMask, KnownZero, KnownOne,
- TLO))
+ TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO))
DCI.CommitTargetLoweringOpt(TLO);
}
break;
@@ -1618,13 +1618,12 @@
if (Time.hasOneUse()) {
unsigned BitWidth = Time.getValueSizeInBits();
APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
- APInt KnownZero, KnownOne;
+ KnownBits Known;
TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
!DCI.isBeforeLegalizeOps());
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) ||
- TLI.SimplifyDemandedBits(Time, DemandedMask, KnownZero, KnownOne,
- TLO))
+ TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO))
DCI.CommitTargetLoweringOpt(TLO);
}
break;
@@ -1655,11 +1654,11 @@
// fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
// low bit set
if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
- APInt KnownZero, KnownOne;
+ KnownBits Known;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
- DAG.computeKnownBits(N2, KnownZero, KnownOne);
- if ((KnownZero & Mask) == Mask) {
+ DAG.computeKnownBits(N2, Known);
+ if ((Known.Zero & Mask) == Mask) {
SDValue Carry = DAG.getConstant(0, dl, VT);
SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
SDValue Ops[] = { Result, Carry };
@@ -1678,11 +1677,11 @@
// fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
- APInt KnownZero, KnownOne;
+ KnownBits Known;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
- DAG.computeKnownBits(N2, KnownZero, KnownOne);
- if ((KnownZero & Mask) == Mask) {
+ DAG.computeKnownBits(N2, Known);
+ if ((Known.Zero & Mask) == Mask) {
SDValue Borrow = N2;
SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
DAG.getConstant(0, dl, VT), N2);
@@ -1694,11 +1693,11 @@
// fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
// low bit set
if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
- APInt KnownZero, KnownOne;
+ KnownBits Known;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
- DAG.computeKnownBits(N2, KnownZero, KnownOne);
- if ((KnownZero & Mask) == Mask) {
+ DAG.computeKnownBits(N2, Known);
+ if ((Known.Zero & Mask) == Mask) {
SDValue Borrow = DAG.getConstant(0, dl, VT);
SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
SDValue Ops[] = { Result, Borrow };
@@ -1822,20 +1821,19 @@
}
void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
+ KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
- KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
+ Known.Zero.clearAllBits(); Known.One.clearAllBits();
switch (Op.getOpcode()) {
default: break;
case XCoreISD::LADD:
case XCoreISD::LSUB:
if (Op.getResNo() == 1) {
// Top bits of carry / borrow are clear.
- KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
- KnownZero.getBitWidth() - 1);
+ Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
+ Known.getBitWidth() - 1);
}
break;
case ISD::INTRINSIC_W_CHAIN:
@@ -1844,24 +1842,24 @@
switch (IntNo) {
case Intrinsic::xcore_getts:
// High bits are known to be zero.
- KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
- KnownZero.getBitWidth() - 16);
+ Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
+ Known.getBitWidth() - 16);
break;
case Intrinsic::xcore_int:
case Intrinsic::xcore_inct:
// High bits are known to be zero.
- KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
- KnownZero.getBitWidth() - 8);
+ Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
+ Known.getBitWidth() - 8);
break;
case Intrinsic::xcore_testct:
// Result is either 0 or 1.
- KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
- KnownZero.getBitWidth() - 1);
+ Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
+ Known.getBitWidth() - 1);
break;
case Intrinsic::xcore_testwct:
// Result is in the range 0 - 4.
- KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
- KnownZero.getBitWidth() - 3);
+ Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
+ Known.getBitWidth() - 3);
break;
}
}
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.h b/llvm/lib/Target/XCore/XCoreISelLowering.h
index 188f4f1..452d5b0 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.h
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.h
@@ -200,8 +200,7 @@
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
void computeKnownBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
+ KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth = 0) const override;