Modify ModRefInfo values using static inline method abstractions [NFC].
Summary:
The aim is to make ModRefInfo checks and changes more intuitive
and less error prone using inline methods that abstract the bit operations.
Ideally ModRefInfo would become an enum class, but that change will require
a wider set of changes into FunctionModRefBehavior.
Reviewers: sanjoy, george.burgess.iv, dberlin, hfinkel
Subscribers: nlopes, llvm-commits
Differential Revision: https://reviews.llvm.org/D40749
llvm-svn: 319821
diff --git a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index f985061..5352e32 100644
--- a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -130,17 +130,18 @@
SCCNodes.count(CS.getCalledFunction()))
continue;
FunctionModRefBehavior MRB = AAR.getModRefBehavior(CS);
+ ModRefInfo MRI = createModRefInfo(MRB);
// If the call doesn't access memory, we're done.
- if (!(MRB & MRI_ModRef))
+ if (isNoModRef(MRI))
continue;
if (!AliasAnalysis::onlyAccessesArgPointees(MRB)) {
// The call could access any memory. If that includes writes, give up.
- if (MRB & MRI_Mod)
+ if (isModSet(MRI))
return MAK_MayWrite;
// If it reads, note it.
- if (MRB & MRI_Ref)
+ if (isRefSet(MRI))
ReadsMemory = true;
continue;
}
@@ -162,10 +163,10 @@
if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
continue;
- if (MRB & MRI_Mod)
+ if (isModSet(MRI))
// Writes non-local memory. Give up.
return MAK_MayWrite;
- if (MRB & MRI_Ref)
+ if (isRefSet(MRI))
// Ok, it reads non-local memory.
ReadsMemory = true;
}
diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
index e70e759..c4e6121 100644
--- a/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
+++ b/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
@@ -248,7 +248,7 @@
// Ok, now we know we have not seen a store yet. See if Inst can write to
// our load location, if it can not, just ignore the instruction.
- if (!(AA->getModRefInfo(Inst, Loc) & MRI_Mod))
+ if (!isModSet(AA->getModRefInfo(Inst, Loc)))
continue;
Store = dyn_cast<StoreInst>(Inst);
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 877050e..e703014 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -594,11 +594,9 @@
}
for (; BI != EI; ++BI) {
Instruction *I = &*BI;
- if (I->mayWriteToMemory() && I != SecondI) {
- auto Res = AA->getModRefInfo(I, MemLoc);
- if (Res & MRI_Mod)
+ if (I->mayWriteToMemory() && I != SecondI)
+ if (isModSet(AA->getModRefInfo(I, MemLoc)))
return false;
- }
}
if (B != FirstBB) {
assert(B != &FirstBB->getParent()->getEntryBlock() &&
@@ -822,9 +820,7 @@
// the call is live.
DeadStackObjects.remove_if([&](Value *I) {
// See if the call site touches the value.
- ModRefInfo A = AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI));
-
- return A == MRI_ModRef || A == MRI_Ref;
+ return isRefSet(AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI)));
});
// If all of the allocas were clobbered by the call then we're not going
@@ -1255,7 +1251,7 @@
if (DepWrite == &BB.front()) break;
// Can't look past this instruction if it might read 'Loc'.
- if (AA->getModRefInfo(DepWrite, Loc) & MRI_Ref)
+ if (isRefSet(AA->getModRefInfo(DepWrite, Loc)))
break;
InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 3e331cd..052ead8 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -788,7 +788,7 @@
++BI)
for (Instruction &I : **BI)
if (IgnoredStores.count(&I) == 0 &&
- (AA.getModRefInfo(&I, StoreLoc) & Access))
+ intersectModRef(AA.getModRefInfo(&I, StoreLoc), Access))
return true;
return false;
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 86d7b5e..cd3e4ba 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -518,7 +518,7 @@
const LoadInst *LI) {
// If the store alias this position, early bail out.
MemoryLocation StoreLoc = MemoryLocation::get(SI);
- if (AA.getModRefInfo(P, StoreLoc) != MRI_NoModRef)
+ if (isModOrRefSet(AA.getModRefInfo(P, StoreLoc)))
return false;
// Keep track of the arguments of all instruction we plan to lift
@@ -542,20 +542,20 @@
for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
auto *C = &*I;
- bool MayAlias = AA.getModRefInfo(C, None) != MRI_NoModRef;
+ bool MayAlias = isModOrRefSet(AA.getModRefInfo(C, None));
bool NeedLift = false;
if (Args.erase(C))
NeedLift = true;
else if (MayAlias) {
NeedLift = llvm::any_of(MemLocs, [C, &AA](const MemoryLocation &ML) {
- return AA.getModRefInfo(C, ML);
+ return isModOrRefSet(AA.getModRefInfo(C, ML));
});
if (!NeedLift)
NeedLift =
llvm::any_of(CallSites, [C, &AA](const ImmutableCallSite &CS) {
- return AA.getModRefInfo(C, CS);
+ return isModOrRefSet(AA.getModRefInfo(C, CS));
});
}
@@ -565,18 +565,18 @@
if (MayAlias) {
// Since LI is implicitly moved downwards past the lifted instructions,
// none of them may modify its source.
- if (AA.getModRefInfo(C, LoadLoc) & MRI_Mod)
+ if (isModSet(AA.getModRefInfo(C, LoadLoc)))
return false;
else if (auto CS = ImmutableCallSite(C)) {
// If we can't lift this before P, it's game over.
- if (AA.getModRefInfo(P, CS) != MRI_NoModRef)
+ if (isModOrRefSet(AA.getModRefInfo(P, CS)))
return false;
CallSites.push_back(CS);
} else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
// If we can't lift this before P, it's game over.
auto ML = MemoryLocation::get(C);
- if (AA.getModRefInfo(P, ML) != MRI_NoModRef)
+ if (isModOrRefSet(AA.getModRefInfo(P, ML)))
return false;
MemLocs.push_back(ML);
@@ -631,7 +631,7 @@
// of at the store position.
Instruction *P = SI;
for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) {
- if (AA.getModRefInfo(&I, LoadLoc) & MRI_Mod) {
+ if (isModSet(AA.getModRefInfo(&I, LoadLoc))) {
P = &I;
break;
}
@@ -702,7 +702,7 @@
MemoryLocation StoreLoc = MemoryLocation::get(SI);
for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator();
I != E; --I) {
- if (AA.getModRefInfo(&*I, StoreLoc) != MRI_NoModRef) {
+ if (isModOrRefSet(AA.getModRefInfo(&*I, StoreLoc))) {
C = nullptr;
break;
}
@@ -934,9 +934,9 @@
AliasAnalysis &AA = LookupAliasAnalysis();
ModRefInfo MR = AA.getModRefInfo(C, cpyDest, srcSize);
// If necessary, perform additional analysis.
- if (MR != MRI_NoModRef)
+ if (isModOrRefSet(MR))
MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT);
- if (MR != MRI_NoModRef)
+ if (isModOrRefSet(MR))
return false;
// We can't create address space casts here because we don't know if they're
diff --git a/llvm/lib/Transforms/Scalar/Sink.cpp b/llvm/lib/Transforms/Scalar/Sink.cpp
index 5210f16..cfb8a06 100644
--- a/llvm/lib/Transforms/Scalar/Sink.cpp
+++ b/llvm/lib/Transforms/Scalar/Sink.cpp
@@ -68,7 +68,7 @@
if (LoadInst *L = dyn_cast<LoadInst>(Inst)) {
MemoryLocation Loc = MemoryLocation::get(L);
for (Instruction *S : Stores)
- if (AA.getModRefInfo(S, Loc) & MRI_Mod)
+ if (isModSet(AA.getModRefInfo(S, Loc)))
return false;
}
@@ -83,7 +83,7 @@
return false;
for (Instruction *S : Stores)
- if (AA.getModRefInfo(S, CS) & MRI_Mod)
+ if (isModSet(AA.getModRefInfo(S, CS)))
return false;
}
diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
index f5aa47f..a8782e0 100644
--- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -332,7 +332,7 @@
// Writes to memory only matter if they may alias the pointer
// being loaded from.
const DataLayout &DL = L->getModule()->getDataLayout();
- if ((AA->getModRefInfo(CI, MemoryLocation::get(L)) & MRI_Mod) ||
+ if (isModSet(AA->getModRefInfo(CI, MemoryLocation::get(L))) ||
!isSafeToLoadUnconditionally(L->getPointerOperand(),
L->getAlignment(), DL, L))
return false;