Remove the AssumptionCache
After r289755, the AssumptionCache is no longer needed. Variables affected by
assumptions are now found by using the new operand-bundle-based scheme. This
new scheme is more computationally efficient, and also we need much less
code...
llvm-svn: 289756
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 761c6d9..e79092b 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -23,7 +23,6 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
@@ -182,7 +181,7 @@
/*static*/ const Value *BasicAAResult::GetLinearExpression(
const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
- AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
+ DominatorTree *DT, bool &NSW, bool &NUW) {
assert(V->getType()->isIntegerTy() && "Not an integer value");
// Limit our recursion depth.
@@ -221,7 +220,7 @@
case Instruction::Or:
// X|C == X+C if all the bits in C are unset in X. Otherwise we can't
// analyze it.
- if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
+ if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0,
BOp, DT)) {
Scale = 1;
Offset = 0;
@@ -230,23 +229,23 @@
LLVM_FALLTHROUGH;
case Instruction::Add:
V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
- SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
+ SExtBits, DL, Depth + 1, DT, NSW, NUW);
Offset += RHS;
break;
case Instruction::Sub:
V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
- SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
+ SExtBits, DL, Depth + 1, DT, NSW, NUW);
Offset -= RHS;
break;
case Instruction::Mul:
V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
- SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
+ SExtBits, DL, Depth + 1, DT, NSW, NUW);
Offset *= RHS;
Scale *= RHS;
break;
case Instruction::Shl:
V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
- SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
+ SExtBits, DL, Depth + 1, DT, NSW, NUW);
Offset <<= RHS.getLimitedValue();
Scale <<= RHS.getLimitedValue();
// the semantics of nsw and nuw for left shifts don't match those of
@@ -273,7 +272,7 @@
unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
const Value *Result =
GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
- Depth + 1, AC, DT, NSW, NUW);
+ Depth + 1, DT, NSW, NUW);
// zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
// by just incrementing the number of bits we've extended by.
@@ -344,7 +343,7 @@
/// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
/// through pointer casts.
bool BasicAAResult::DecomposeGEPExpression(const Value *V,
- DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
+ DecomposedGEP &Decomposed, const DataLayout &DL,
DominatorTree *DT) {
// Limit recursion depth to limit compile time in crazy cases.
unsigned MaxLookup = MaxLookupSearchDepth;
@@ -385,10 +384,9 @@
// If it's not a GEP, hand it off to SimplifyInstruction to see if it
// can come up with something. This matches what GetUnderlyingObject does.
if (const Instruction *I = dyn_cast<Instruction>(V))
- // TODO: Get a DominatorTree and AssumptionCache and use them here
- // (these are both now available in this function, but this should be
- // updated when GetUnderlyingObject is updated). TLI should be
- // provided also.
+ // TODO: Get a DominatorTree and use it here (it is now available in
+ // this function, but this should be updated when GetUnderlyingObject
+ // is updated). TLI should be provided also.
if (const Value *Simplified =
SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
V = Simplified;
@@ -450,7 +448,7 @@
APInt IndexScale(Width, 0), IndexOffset(Width, 0);
bool NSW = true, NUW = true;
Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
- SExtBits, DL, 0, AC, DT, NSW, NUW);
+ SExtBits, DL, 0, DT, NSW, NUW);
// The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
// This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
@@ -1058,9 +1056,9 @@
const Value *UnderlyingV2) {
DecomposedGEP DecompGEP1, DecompGEP2;
bool GEP1MaxLookupReached =
- DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
+ DecomposeGEPExpression(GEP1, DecompGEP1, DL, DT);
bool GEP2MaxLookupReached =
- DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
+ DecomposeGEPExpression(V2, DecompGEP2, DL, DT);
int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
@@ -1222,7 +1220,7 @@
bool SignKnownZero, SignKnownOne;
ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, DL,
- 0, &AC, nullptr, DT);
+ 0, nullptr, DT);
// Zero-extension widens the variable, and so forces the sign
// bit to zero.
@@ -1257,7 +1255,7 @@
return NoAlias;
if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
- GEP1BaseOffset, &AC, DT))
+ GEP1BaseOffset, DT))
return NoAlias;
}
@@ -1659,7 +1657,7 @@
bool BasicAAResult::constantOffsetHeuristic(
const SmallVectorImpl<VariableGEPIndex> &VarIndices, uint64_t V1Size,
- uint64_t V2Size, int64_t BaseOffset, AssumptionCache *AC,
+ uint64_t V2Size, int64_t BaseOffset,
DominatorTree *DT) {
if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize ||
V2Size == MemoryLocation::UnknownSize)
@@ -1682,11 +1680,11 @@
bool NSW = true, NUW = true;
unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
- V0SExtBits, DL, 0, AC, DT, NSW, NUW);
+ V0SExtBits, DL, 0, DT, NSW, NUW);
NSW = true;
NUW = true;
const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
- V1SExtBits, DL, 0, AC, DT, NSW, NUW);
+ V1SExtBits, DL, 0, DT, NSW, NUW);
if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
@@ -1720,7 +1718,6 @@
BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
return BasicAAResult(F.getParent()->getDataLayout(),
AM.getResult<TargetLibraryAnalysis>(F),
- AM.getResult<AssumptionAnalysis>(F),
&AM.getResult<DominatorTreeAnalysis>(F),
AM.getCachedResult<LoopAnalysis>(F));
}
@@ -1734,7 +1731,6 @@
INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa",
"Basic Alias Analysis (stateless AA impl)", true, true)
-INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa",
@@ -1745,13 +1741,12 @@
}
bool BasicAAWrapperPass::runOnFunction(Function &F) {
- auto &ACT = getAnalysis<AssumptionCacheTracker>();
auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), TLIWP.getTLI(),
- ACT.getAssumptionCache(F), &DTWP.getDomTree(),
+ &DTWP.getDomTree(),
LIWP ? &LIWP->getLoopInfo() : nullptr));
return false;
@@ -1759,7 +1754,6 @@
void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
- AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
@@ -1767,6 +1761,5 @@
BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
return BasicAAResult(
F.getParent()->getDataLayout(),
- P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
- P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
+ P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI());
}