Remove the AssumptionCache
After r289755, the AssumptionCache is no longer needed. Variables affected by
assumptions are now found by using the new operand-bundle-based scheme. This
new scheme is more computationally efficient, and also we need much less
code...
llvm-svn: 289756
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index aa745eb..601fef8 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -15,7 +15,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/Loads.h"
@@ -73,7 +73,6 @@
// figuring out if we can use it.
struct Query {
const DataLayout &DL;
- AssumptionCache *AC;
const Instruction *CxtI;
const DominatorTree *DT;
@@ -89,12 +88,11 @@
std::array<const Value *, MaxDepth> Excluded;
unsigned NumExcluded;
- Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT)
- : DL(DL), AC(AC), CxtI(CxtI), DT(DT), NumExcluded(0) {}
+ Query(const DataLayout &DL, const Instruction *CxtI, const DominatorTree *DT)
+ : DL(DL), CxtI(CxtI), DT(DT), NumExcluded(0) {}
Query(const Query &Q, const Value *NewExcl)
- : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), NumExcluded(Q.NumExcluded) {
+ : DL(Q.DL), CxtI(Q.CxtI), DT(Q.DT), NumExcluded(Q.NumExcluded) {
Excluded = Q.Excluded;
Excluded[NumExcluded++] = NewExcl;
assert(NumExcluded <= Excluded.size());
@@ -130,15 +128,15 @@
void llvm::computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
const DataLayout &DL, unsigned Depth,
- AssumptionCache *AC, const Instruction *CxtI,
+ const Instruction *CxtI,
const DominatorTree *DT) {
::computeKnownBits(V, KnownZero, KnownOne, Depth,
- Query(DL, AC, safeCxtI(V, CxtI), DT));
+ Query(DL, safeCxtI(V, CxtI), DT));
}
bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
const DataLayout &DL,
- AssumptionCache *AC, const Instruction *CxtI,
+ const Instruction *CxtI,
const DominatorTree *DT) {
assert(LHS->getType() == RHS->getType() &&
"LHS and RHS should have the same type");
@@ -147,8 +145,8 @@
IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0);
APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0);
- computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT);
- computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT);
+ computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, CxtI, DT);
+ computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, CxtI, DT);
return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
}
@@ -157,10 +155,10 @@
void llvm::ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
const DataLayout &DL, unsigned Depth,
- AssumptionCache *AC, const Instruction *CxtI,
+ const Instruction *CxtI,
const DominatorTree *DT) {
::ComputeSignBit(V, KnownZero, KnownOne, Depth,
- Query(DL, AC, safeCxtI(V, CxtI), DT));
+ Query(DL, safeCxtI(V, CxtI), DT));
}
static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
@@ -168,58 +166,51 @@
bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
bool OrZero,
- unsigned Depth, AssumptionCache *AC,
- const Instruction *CxtI,
+ unsigned Depth, const Instruction *CxtI,
const DominatorTree *DT) {
return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
- Query(DL, AC, safeCxtI(V, CxtI), DT));
+ Query(DL, safeCxtI(V, CxtI), DT));
}
static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
- AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT) {
- return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
+ const Instruction *CxtI, const DominatorTree *DT) {
+ return ::isKnownNonZero(V, Depth, Query(DL, safeCxtI(V, CxtI), DT));
}
bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
- unsigned Depth,
- AssumptionCache *AC, const Instruction *CxtI,
+ unsigned Depth, const Instruction *CxtI,
const DominatorTree *DT) {
bool NonNegative, Negative;
- ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
+ ComputeSignBit(V, NonNegative, Negative, DL, Depth, CxtI, DT);
return NonNegative;
}
bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
- AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT) {
+ const Instruction *CxtI, const DominatorTree *DT) {
if (auto *CI = dyn_cast<ConstantInt>(V))
return CI->getValue().isStrictlyPositive();
// TODO: We'd doing two recursive queries here. We should factor this such
// that only a single query is needed.
- return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
- isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
+ return isKnownNonNegative(V, DL, Depth, CxtI, DT) &&
+ isKnownNonZero(V, DL, Depth, CxtI, DT);
}
bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
- AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT) {
+ const Instruction *CxtI, const DominatorTree *DT) {
bool NonNegative, Negative;
- ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
+ ComputeSignBit(V, NonNegative, Negative, DL, Depth, CxtI, DT);
return Negative;
}
static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
- const DataLayout &DL,
- AssumptionCache *AC, const Instruction *CxtI,
+ const DataLayout &DL, const Instruction *CxtI,
const DominatorTree *DT) {
- return ::isKnownNonEqual(V1, V2, Query(DL, AC,
- safeCxtI(V1, safeCxtI(V2, CxtI)),
+ return ::isKnownNonEqual(V1, V2, Query(DL, safeCxtI(V1, safeCxtI(V2, CxtI)),
DT));
}
@@ -228,20 +219,19 @@
bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
const DataLayout &DL,
- unsigned Depth, AssumptionCache *AC,
- const Instruction *CxtI, const DominatorTree *DT) {
+ unsigned Depth, const Instruction *CxtI,
+ const DominatorTree *DT) {
return ::MaskedValueIsZero(V, Mask, Depth,
- Query(DL, AC, safeCxtI(V, CxtI), DT));
+ Query(DL, safeCxtI(V, CxtI), DT));
}
static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
const Query &Q);
unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
- unsigned Depth, AssumptionCache *AC,
- const Instruction *CxtI,
+ unsigned Depth, const Instruction *CxtI,
const DominatorTree *DT) {
- return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
+ return ::ComputeNumSignBits(V, Depth, Query(DL, safeCxtI(V, CxtI), DT));
}
static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
@@ -521,7 +511,7 @@
const Query &Q) {
// Use of assumptions is context-sensitive. If we don't have a context, we
// cannot use them!
- if (!Q.AC || !Q.CxtI)
+ if (!Q.CxtI)
return;
unsigned BitWidth = KnownZero.getBitWidth();
@@ -3128,7 +3118,7 @@
// See if InstructionSimplify knows any relevant tricks.
if (Instruction *I = dyn_cast<Instruction>(V))
- // TODO: Acquire a DominatorTree and AssumptionCache and use them.
+ // TODO: Acquire a DominatorTree and use it.
if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) {
V = Simplified;
continue;
@@ -3413,7 +3403,6 @@
OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
const Value *RHS,
const DataLayout &DL,
- AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
// Multiplying n * m significant bits yields a result of n + m significant
@@ -3427,10 +3416,8 @@
APInt LHSKnownOne(BitWidth, 0);
APInt RHSKnownZero(BitWidth, 0);
APInt RHSKnownOne(BitWidth, 0);
- computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
- DT);
- computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
- DT);
+ computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, CxtI, DT);
+ computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, CxtI, DT);
// Note that underestimating the number of zero bits gives a more
// conservative answer.
unsigned ZeroBits = LHSKnownZero.countLeadingOnes() +
@@ -3464,16 +3451,15 @@
OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS,
const Value *RHS,
const DataLayout &DL,
- AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
bool LHSKnownNonNegative, LHSKnownNegative;
ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
- AC, CxtI, DT);
+ CxtI, DT);
if (LHSKnownNonNegative || LHSKnownNegative) {
bool RHSKnownNonNegative, RHSKnownNegative;
ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
- AC, CxtI, DT);
+ CxtI, DT);
if (LHSKnownNegative && RHSKnownNegative) {
// The sign bit is set in both cases: this MUST overflow.
@@ -3495,7 +3481,6 @@
const Value *RHS,
const AddOperator *Add,
const DataLayout &DL,
- AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
if (Add && Add->hasNoSignedWrap()) {
@@ -3505,9 +3490,9 @@
bool LHSKnownNonNegative, LHSKnownNegative;
bool RHSKnownNonNegative, RHSKnownNegative;
ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
- AC, CxtI, DT);
+ CxtI, DT);
ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
- AC, CxtI, DT);
+ CxtI, DT);
if ((LHSKnownNonNegative && RHSKnownNegative) ||
(LHSKnownNegative && RHSKnownNonNegative)) {
@@ -3529,7 +3514,7 @@
if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
bool AddKnownNonNegative, AddKnownNegative;
ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL,
- /*Depth=*/0, AC, CxtI, DT);
+ /*Depth=*/0, CxtI, DT);
if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) ||
(AddKnownNegative && LHSOrRHSKnownNegative)) {
return OverflowResult::NeverOverflows;
@@ -3603,20 +3588,18 @@
OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
const DataLayout &DL,
- AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
- Add, DL, AC, CxtI, DT);
+ Add, DL, CxtI, DT);
}
OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
const Value *RHS,
const DataLayout &DL,
- AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
- return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
+ return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, CxtI, DT);
}
bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
@@ -4147,8 +4130,7 @@
static bool isTruePredicate(CmpInst::Predicate Pred,
const Value *LHS, const Value *RHS,
const DataLayout &DL, unsigned Depth,
- AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT) {
+ const Instruction *CxtI, const DominatorTree *DT) {
assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
return true;
@@ -4186,7 +4168,7 @@
match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
unsigned BitWidth = CA->getBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, AC, CxtI, DT);
+ computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, CxtI, DT);
if ((KnownZero & *CA) == *CA && (KnownZero & *CB) == *CB)
return true;
@@ -4211,25 +4193,24 @@
isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
const Value *ARHS, const Value *BLHS,
const Value *BRHS, const DataLayout &DL,
- unsigned Depth, AssumptionCache *AC,
- const Instruction *CxtI, const DominatorTree *DT) {
+ unsigned Depth, const Instruction *CxtI,
+ const DominatorTree *DT) {
switch (Pred) {
default:
return None;
case CmpInst::ICMP_SLT:
case CmpInst::ICMP_SLE:
- if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI,
+ if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, CxtI,
DT) &&
- isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
+ isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, CxtI, DT))
return true;
return None;
case CmpInst::ICMP_ULT:
case CmpInst::ICMP_ULE:
- if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI,
- DT) &&
- isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
+ if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, CxtI, DT) &&
+ isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, CxtI, DT))
return true;
return None;
}
@@ -4293,8 +4274,7 @@
Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
const DataLayout &DL, bool InvertAPred,
- unsigned Depth, AssumptionCache *AC,
- const Instruction *CxtI,
+ unsigned Depth, const Instruction *CxtI,
const DominatorTree *DT) {
// A mismatch occurs when we compare a scalar cmp to a vector cmp, for example.
if (LHS->getType() != RHS->getType())
@@ -4347,8 +4327,8 @@
}
if (APred == BPred)
- return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC,
- CxtI, DT);
+ return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, CxtI,
+ DT);
return None;
}