blob: f7180aae69edcfa789fa9263e888a8973e0873ef [file] [log] [blame]
Nick Lewycky7ed1dbf2013-06-10 23:10:59 +00001//===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===//
Owen Andersonc0daf5f2007-07-06 23:14:35 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattnerf3ebc3f2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Owen Andersonc0daf5f2007-07-06 23:14:35 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file implements an analysis that determines, for a given memory
Jakub Staszakb0a7eed2013-03-20 21:47:51 +000011// operation, what preceding memory operations it depends on. It builds on
Owen Andersonfa788352007-08-08 22:01:54 +000012// alias analysis information, and tries to provide a lazy, caching interface to
Owen Andersonc0daf5f2007-07-06 23:14:35 +000013// a common kind of alias information query.
14//
15//===----------------------------------------------------------------------===//
16
17#include "llvm/Analysis/MemoryDependenceAnalysis.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000018#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/Statistic.h"
Owen Andersonc0daf5f2007-07-06 23:14:35 +000020#include "llvm/Analysis/AliasAnalysis.h"
Chris Lattner5030c6a2009-11-27 00:34:38 +000021#include "llvm/Analysis/InstructionSimplify.h"
Victor Hernandezf390e042009-10-27 20:05:49 +000022#include "llvm/Analysis/MemoryBuiltins.h"
Chris Lattner972e6d82009-12-09 01:59:31 +000023#include "llvm/Analysis/PHITransAddr.h"
Dan Gohmana4fcd242010-12-15 20:02:24 +000024#include "llvm/Analysis/ValueTracking.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000025#include "llvm/IR/DataLayout.h"
Chandler Carruth5ad5f152014-01-13 09:26:24 +000026#include "llvm/IR/Dominators.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000027#include "llvm/IR/Function.h"
28#include "llvm/IR/Instructions.h"
29#include "llvm/IR/IntrinsicInst.h"
30#include "llvm/IR/LLVMContext.h"
Chandler Carruthaa0ab632014-03-04 12:09:19 +000031#include "llvm/IR/PredIteratorCache.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000032#include "llvm/Support/Debug.h"
Owen Andersonc0daf5f2007-07-06 23:14:35 +000033using namespace llvm;
34
Chandler Carruthf1221bd2014-04-22 02:48:03 +000035#define DEBUG_TYPE "memdep"
36
Chris Lattner7e61daf2008-12-01 01:15:42 +000037STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
38STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
Chris Lattnere7d7e132008-11-29 22:02:15 +000039STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
Chris Lattnera28355d2008-12-07 08:50:20 +000040
41STATISTIC(NumCacheNonLocalPtr,
42 "Number of fully cached non-local ptr responses");
43STATISTIC(NumCacheDirtyNonLocalPtr,
44 "Number of cached, but dirty, non-local ptr responses");
45STATISTIC(NumUncacheNonLocalPtr,
46 "Number of uncached non-local ptr responses");
Chris Lattner5ed409e2008-12-08 07:31:50 +000047STATISTIC(NumCacheCompleteNonLocalPtr,
48 "Number of block queries that were completely cached");
Chris Lattnera28355d2008-12-07 08:50:20 +000049
Eli Friedman8b098b02011-06-15 23:59:25 +000050// Limit for the number of instructions to scan in a block.
Bill Wendling9ca12c12013-04-17 20:02:32 +000051static const int BlockScanLimit = 100;
Eli Friedman8b098b02011-06-15 23:59:25 +000052
Owen Andersonc0daf5f2007-07-06 23:14:35 +000053char MemoryDependenceAnalysis::ID = 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +000054
Owen Andersonc0daf5f2007-07-06 23:14:35 +000055// Register this pass...
Owen Anderson8ac477f2010-10-12 19:48:12 +000056INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep",
Owen Andersondf7a4f22010-10-07 22:25:06 +000057 "Memory Dependence Analysis", false, true)
Owen Anderson8ac477f2010-10-12 19:48:12 +000058INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
59INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep",
60 "Memory Dependence Analysis", false, true)
Owen Andersonc0daf5f2007-07-06 23:14:35 +000061
Chris Lattner768e5bc2008-12-09 06:28:49 +000062MemoryDependenceAnalysis::MemoryDependenceAnalysis()
Ahmed Charles56440fd2014-03-06 05:51:42 +000063 : FunctionPass(ID), PredCache() {
Owen Anderson6c18d1a2010-10-19 17:21:58 +000064 initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry());
Chris Lattner768e5bc2008-12-09 06:28:49 +000065}
66MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
67}
68
69/// Clean up memory in between runs
70void MemoryDependenceAnalysis::releaseMemory() {
71 LocalDeps.clear();
72 NonLocalDeps.clear();
73 NonLocalPointerDeps.clear();
74 ReverseLocalDeps.clear();
75 ReverseNonLocalDeps.clear();
76 ReverseNonLocalPtrDeps.clear();
77 PredCache->clear();
78}
79
80
81
Owen Andersonc0daf5f2007-07-06 23:14:35 +000082/// getAnalysisUsage - Does not modify anything. It uses Alias Analysis.
83///
84void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
85 AU.setPreservesAll();
86 AU.addRequiredTransitive<AliasAnalysis>();
Owen Andersonc0daf5f2007-07-06 23:14:35 +000087}
88
Chris Lattner13cae612008-11-30 19:24:31 +000089bool MemoryDependenceAnalysis::runOnFunction(Function &) {
90 AA = &getAnalysis<AliasAnalysis>();
Rafael Espindola93512512014-02-25 17:30:31 +000091 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
Craig Topper9f008862014-04-15 04:59:12 +000092 DL = DLP ? &DLP->getDataLayout() : nullptr;
Chandler Carruth73523022014-01-13 13:07:17 +000093 DominatorTreeWrapperPass *DTWP =
94 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
Craig Topper9f008862014-04-15 04:59:12 +000095 DT = DTWP ? &DTWP->getDomTree() : nullptr;
David Blaikie041f1aa2013-05-15 07:36:59 +000096 if (!PredCache)
Chris Lattner768e5bc2008-12-09 06:28:49 +000097 PredCache.reset(new PredIteratorCache());
Chris Lattner13cae612008-11-30 19:24:31 +000098 return false;
99}
100
Chris Lattnerde4440c2008-12-07 18:39:13 +0000101/// RemoveFromReverseMap - This is a helper function that removes Val from
102/// 'Inst's set in ReverseMap. If the set becomes empty, remove Inst's entry.
103template <typename KeyTy>
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000104static void RemoveFromReverseMap(DenseMap<Instruction*,
Chris Lattner8eda11b2009-03-29 00:24:04 +0000105 SmallPtrSet<KeyTy, 4> > &ReverseMap,
106 Instruction *Inst, KeyTy Val) {
107 typename DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> >::iterator
Chris Lattnerde4440c2008-12-07 18:39:13 +0000108 InstIt = ReverseMap.find(Inst);
109 assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
110 bool Found = InstIt->second.erase(Val);
Jeffrey Yasskin9b43f332010-12-23 00:58:24 +0000111 assert(Found && "Invalid reverse map!"); (void)Found;
Chris Lattnerde4440c2008-12-07 18:39:13 +0000112 if (InstIt->second.empty())
113 ReverseMap.erase(InstIt);
114}
115
Dan Gohman1d760ce2010-11-10 21:51:35 +0000116/// GetLocation - If the given instruction references a specific memory
117/// location, fill in Loc with the details, otherwise set Loc.Ptr to null.
118/// Return a ModRefInfo value describing the general behavior of the
119/// instruction.
120static
121AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
122 AliasAnalysis::Location &Loc,
123 AliasAnalysis *AA) {
124 if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000125 if (LI->isUnordered()) {
126 Loc = AA->getLocation(LI);
127 return AliasAnalysis::Ref;
Jakub Staszakfa41def2013-03-20 23:53:45 +0000128 }
129 if (LI->getOrdering() == Monotonic) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000130 Loc = AA->getLocation(LI);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000131 return AliasAnalysis::ModRef;
132 }
Eli Friedman5494ada2011-08-15 20:54:19 +0000133 Loc = AliasAnalysis::Location();
134 return AliasAnalysis::ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000135 }
136
137 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000138 if (SI->isUnordered()) {
139 Loc = AA->getLocation(SI);
140 return AliasAnalysis::Mod;
Jakub Staszakfa41def2013-03-20 23:53:45 +0000141 }
142 if (SI->getOrdering() == Monotonic) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000143 Loc = AA->getLocation(SI);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000144 return AliasAnalysis::ModRef;
145 }
Eli Friedman5494ada2011-08-15 20:54:19 +0000146 Loc = AliasAnalysis::Location();
147 return AliasAnalysis::ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000148 }
149
150 if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
Dan Gohman65316d62010-11-11 21:50:19 +0000151 Loc = AA->getLocation(V);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000152 return AliasAnalysis::ModRef;
153 }
154
Benjamin Kramer8bcc9712012-08-29 15:32:21 +0000155 if (const CallInst *CI = isFreeCall(Inst, AA->getTargetLibraryInfo())) {
Dan Gohman1d760ce2010-11-10 21:51:35 +0000156 // calls to free() deallocate the entire structure
157 Loc = AliasAnalysis::Location(CI->getArgOperand(0));
158 return AliasAnalysis::Mod;
159 }
160
Hal Finkelcc39b672014-07-24 12:16:19 +0000161 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
162 AAMDNodes AAInfo;
163
Dan Gohman1d760ce2010-11-10 21:51:35 +0000164 switch (II->getIntrinsicID()) {
165 case Intrinsic::lifetime_start:
166 case Intrinsic::lifetime_end:
167 case Intrinsic::invariant_start:
Hal Finkelcc39b672014-07-24 12:16:19 +0000168 II->getAAMetadata(AAInfo);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000169 Loc = AliasAnalysis::Location(II->getArgOperand(1),
170 cast<ConstantInt>(II->getArgOperand(0))
Hal Finkelcc39b672014-07-24 12:16:19 +0000171 ->getZExtValue(), AAInfo);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000172 // These intrinsics don't really modify the memory, but returning Mod
173 // will allow them to be handled conservatively.
174 return AliasAnalysis::Mod;
175 case Intrinsic::invariant_end:
Hal Finkelcc39b672014-07-24 12:16:19 +0000176 II->getAAMetadata(AAInfo);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000177 Loc = AliasAnalysis::Location(II->getArgOperand(2),
178 cast<ConstantInt>(II->getArgOperand(1))
Hal Finkelcc39b672014-07-24 12:16:19 +0000179 ->getZExtValue(), AAInfo);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000180 // These intrinsics don't really modify the memory, but returning Mod
181 // will allow them to be handled conservatively.
182 return AliasAnalysis::Mod;
183 default:
184 break;
185 }
Hal Finkelcc39b672014-07-24 12:16:19 +0000186 }
Dan Gohman1d760ce2010-11-10 21:51:35 +0000187
188 // Otherwise, just do the coarse-grained thing that always works.
189 if (Inst->mayWriteToMemory())
190 return AliasAnalysis::ModRef;
191 if (Inst->mayReadFromMemory())
192 return AliasAnalysis::Ref;
193 return AliasAnalysis::NoModRef;
194}
Chris Lattner7e61daf2008-12-01 01:15:42 +0000195
Chris Lattner056c0902008-12-07 00:35:51 +0000196/// getCallSiteDependencyFrom - Private helper for finding the local
197/// dependencies of a call site.
Chris Lattner47e81d02008-11-30 23:17:19 +0000198MemDepResult MemoryDependenceAnalysis::
Chris Lattner702e46e2008-12-09 21:19:42 +0000199getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
200 BasicBlock::iterator ScanIt, BasicBlock *BB) {
Eli Friedman8b098b02011-06-15 23:59:25 +0000201 unsigned Limit = BlockScanLimit;
202
Owen Anderson2b21c3c2007-08-08 22:26:03 +0000203 // Walk backwards through the block, looking for dependencies
Chris Lattner51ba8d02008-11-29 03:47:00 +0000204 while (ScanIt != BB->begin()) {
Eli Friedman8b098b02011-06-15 23:59:25 +0000205 // Limit the amount of scanning we do so we don't end up with quadratic
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000206 // running time on extreme testcases.
Eli Friedman8b098b02011-06-15 23:59:25 +0000207 --Limit;
208 if (!Limit)
209 return MemDepResult::getUnknown();
210
Chris Lattner51ba8d02008-11-29 03:47:00 +0000211 Instruction *Inst = --ScanIt;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000212
Owen Anderson9c884572007-07-10 17:59:22 +0000213 // If this inst is a memory op, get the pointer it accessed
Dan Gohman23483932010-09-22 21:41:02 +0000214 AliasAnalysis::Location Loc;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000215 AliasAnalysis::ModRefResult MR = GetLocation(Inst, Loc, AA);
216 if (Loc.Ptr) {
217 // A simple instruction.
218 if (AA->getModRefInfo(CS, Loc) != AliasAnalysis::NoModRef)
219 return MemDepResult::getClobber(Inst);
220 continue;
221 }
222
223 if (CallSite InstCS = cast<Value>(Inst)) {
Owen Andersonf9a9cf92009-03-09 05:12:38 +0000224 // Debug intrinsics don't cause dependences.
Dale Johannesenf61c8e82009-03-11 21:13:01 +0000225 if (isa<DbgInfoIntrinsic>(Inst)) continue;
Chris Lattner0e3d6332008-12-05 21:04:20 +0000226 // If these two calls do not interfere, look past it.
Chris Lattner702e46e2008-12-09 21:19:42 +0000227 switch (AA->getModRefInfo(CS, InstCS)) {
228 case AliasAnalysis::NoModRef:
Dan Gohman26ef7c72010-08-05 22:09:15 +0000229 // If the two calls are the same, return InstCS as a Def, so that
230 // CS can be found redundant and eliminated.
Dan Gohman1d760ce2010-11-10 21:51:35 +0000231 if (isReadOnlyCall && !(MR & AliasAnalysis::Mod) &&
Dan Gohman26ef7c72010-08-05 22:09:15 +0000232 CS.getInstruction()->isIdenticalToWhenDefined(Inst))
233 return MemDepResult::getDef(Inst);
234
235 // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
236 // keep scanning.
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000237 continue;
Chris Lattner702e46e2008-12-09 21:19:42 +0000238 default:
Chris Lattner0e3d6332008-12-05 21:04:20 +0000239 return MemDepResult::getClobber(Inst);
Chris Lattner702e46e2008-12-09 21:19:42 +0000240 }
Chris Lattnerff862c42008-11-30 01:44:00 +0000241 }
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000242
243 // If we could not obtain a pointer for the instruction and the instruction
244 // touches memory then assume that this is a dependency.
245 if (MR != AliasAnalysis::NoModRef)
246 return MemDepResult::getClobber(Inst);
Owen Anderson9c884572007-07-10 17:59:22 +0000247 }
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000248
Eli Friedman7d58bc72011-06-15 00:47:34 +0000249 // No dependence found. If this is the entry block of the function, it is
250 // unknown, otherwise it is non-local.
Chris Lattner2faa2c72008-12-07 02:15:47 +0000251 if (BB != &BB->getParent()->getEntryBlock())
252 return MemDepResult::getNonLocal();
Eli Friedmanc1702c82011-10-13 22:14:57 +0000253 return MemDepResult::getNonFuncLocal();
Owen Anderson9c884572007-07-10 17:59:22 +0000254}
255
Chris Lattner7aab2792011-04-26 22:42:01 +0000256/// isLoadLoadClobberIfExtendedToFullWidth - Return true if LI is a load that
257/// would fully overlap MemLoc if done as a wider legal integer load.
258///
259/// MemLocBase, MemLocOffset are lazily computed here the first time the
260/// base/offs of memloc is needed.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000261static bool
Chris Lattner7aab2792011-04-26 22:42:01 +0000262isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
263 const Value *&MemLocBase,
264 int64_t &MemLocOffs,
Chris Lattner827a2702011-04-28 07:29:08 +0000265 const LoadInst *LI,
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000266 const DataLayout *DL) {
Chris Lattner7aab2792011-04-26 22:42:01 +0000267 // If we have no target data, we can't do this.
Craig Topper9f008862014-04-15 04:59:12 +0000268 if (!DL) return false;
Chris Lattner7aab2792011-04-26 22:42:01 +0000269
270 // If we haven't already computed the base/offset of MemLoc, do so now.
Craig Topper9f008862014-04-15 04:59:12 +0000271 if (!MemLocBase)
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000272 MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL);
Chris Lattner7aab2792011-04-26 22:42:01 +0000273
Chris Lattner827a2702011-04-28 07:29:08 +0000274 unsigned Size = MemoryDependenceAnalysis::
275 getLoadLoadClobberFullWidthSize(MemLocBase, MemLocOffs, MemLoc.Size,
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000276 LI, *DL);
Chris Lattner827a2702011-04-28 07:29:08 +0000277 return Size != 0;
278}
279
280/// getLoadLoadClobberFullWidthSize - This is a little bit of analysis that
281/// looks at a memory location for a load (specified by MemLocBase, Offs,
282/// and Size) and compares it against a load. If the specified load could
283/// be safely widened to a larger integer load that is 1) still efficient,
284/// 2) safe for the target, and 3) would provide the specified memory
285/// location value, then this function returns the size in bytes of the
286/// load width to use. If not, this returns zero.
287unsigned MemoryDependenceAnalysis::
288getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
289 unsigned MemLocSize, const LoadInst *LI,
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000290 const DataLayout &DL) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000291 // We can only extend simple integer loads.
292 if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
Kostya Serebryany3838f272013-02-13 05:59:45 +0000293
294 // Load widening is hostile to ThreadSanitizer: it may cause false positives
295 // or make the reports more cryptic (access sizes are wrong).
296 if (LI->getParent()->getParent()->getAttributes().
Kostya Serebryanycf880b92013-02-26 06:58:09 +0000297 hasAttribute(AttributeSet::FunctionIndex, Attribute::SanitizeThread))
Kostya Serebryany3838f272013-02-13 05:59:45 +0000298 return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000299
Chris Lattner7aab2792011-04-26 22:42:01 +0000300 // Get the base of this load.
301 int64_t LIOffs = 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000302 const Value *LIBase =
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000303 GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &DL);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000304
Chris Lattner7aab2792011-04-26 22:42:01 +0000305 // If the two pointers are not based on the same pointer, we can't tell that
306 // they are related.
Chris Lattner827a2702011-04-28 07:29:08 +0000307 if (LIBase != MemLocBase) return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000308
Chris Lattner7aab2792011-04-26 22:42:01 +0000309 // Okay, the two values are based on the same pointer, but returned as
310 // no-alias. This happens when we have things like two byte loads at "P+1"
311 // and "P+3". Check to see if increasing the size of the "LI" load up to its
312 // alignment (or the largest native integer type) will allow us to load all
313 // the bits required by MemLoc.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000314
Chris Lattner7aab2792011-04-26 22:42:01 +0000315 // If MemLoc is before LI, then no widening of LI will help us out.
Chris Lattner827a2702011-04-28 07:29:08 +0000316 if (MemLocOffs < LIOffs) return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000317
Chris Lattner7aab2792011-04-26 22:42:01 +0000318 // Get the alignment of the load in bytes. We assume that it is safe to load
319 // any legal integer up to this size without a problem. For example, if we're
320 // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can
321 // widen it up to an i32 load. If it is known 2-byte aligned, we can widen it
322 // to i16.
323 unsigned LoadAlign = LI->getAlignment();
324
Chris Lattner827a2702011-04-28 07:29:08 +0000325 int64_t MemLocEnd = MemLocOffs+MemLocSize;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000326
Chris Lattner7aab2792011-04-26 22:42:01 +0000327 // If no amount of rounding up will let MemLoc fit into LI, then bail out.
Chris Lattner827a2702011-04-28 07:29:08 +0000328 if (LIOffs+LoadAlign < MemLocEnd) return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000329
Chris Lattner7aab2792011-04-26 22:42:01 +0000330 // This is the size of the load to try. Start with the next larger power of
331 // two.
332 unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits()/8U;
333 NewLoadByteSize = NextPowerOf2(NewLoadByteSize);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000334
Chris Lattner7aab2792011-04-26 22:42:01 +0000335 while (1) {
336 // If this load size is bigger than our known alignment or would not fit
337 // into a native integer register, then we fail.
338 if (NewLoadByteSize > LoadAlign ||
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000339 !DL.fitsInLegalInteger(NewLoadByteSize*8))
Chris Lattner827a2702011-04-28 07:29:08 +0000340 return 0;
Chris Lattner7aab2792011-04-26 22:42:01 +0000341
Kostya Serebryany9e0d3772012-02-06 22:48:56 +0000342 if (LIOffs+NewLoadByteSize > MemLocEnd &&
Bill Wendling698e84f2012-12-30 10:32:01 +0000343 LI->getParent()->getParent()->getAttributes().
Kostya Serebryanycf880b92013-02-26 06:58:09 +0000344 hasAttribute(AttributeSet::FunctionIndex, Attribute::SanitizeAddress))
Kostya Serebryany9e0d3772012-02-06 22:48:56 +0000345 // We will be reading past the location accessed by the original program.
346 // While this is safe in a regular build, Address Safety analysis tools
347 // may start reporting false warnings. So, don't do widening.
348 return 0;
Kostya Serebryany9e0d3772012-02-06 22:48:56 +0000349
Chris Lattner7aab2792011-04-26 22:42:01 +0000350 // If a load of this width would include all of MemLoc, then we succeed.
351 if (LIOffs+NewLoadByteSize >= MemLocEnd)
Chris Lattner827a2702011-04-28 07:29:08 +0000352 return NewLoadByteSize;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000353
Chris Lattner7aab2792011-04-26 22:42:01 +0000354 NewLoadByteSize <<= 1;
355 }
Chris Lattner7aab2792011-04-26 22:42:01 +0000356}
357
Chris Lattner5a786042008-12-07 01:50:16 +0000358/// getPointerDependencyFrom - Return the instruction on which a memory
Dan Gohman15a43962010-10-29 01:14:04 +0000359/// location depends. If isLoad is true, this routine ignores may-aliases with
360/// read-only operations. If isLoad is false, this routine ignores may-aliases
Shuxin Yang408bdad2013-03-06 17:48:48 +0000361/// with reads from read-only locations. If possible, pass the query
362/// instruction as well; this function may take advantage of the metadata
363/// annotated to the query instruction to refine the result.
Chris Lattner47e81d02008-11-30 23:17:19 +0000364MemDepResult MemoryDependenceAnalysis::
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000365getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
Shuxin Yang408bdad2013-03-06 17:48:48 +0000366 BasicBlock::iterator ScanIt, BasicBlock *BB,
367 Instruction *QueryInst) {
Chris Lattner2faa2c72008-12-07 02:15:47 +0000368
Craig Topper9f008862014-04-15 04:59:12 +0000369 const Value *MemLocBase = nullptr;
Chris Lattner7aab2792011-04-26 22:42:01 +0000370 int64_t MemLocOffset = 0;
Eli Friedman8b098b02011-06-15 23:59:25 +0000371 unsigned Limit = BlockScanLimit;
Shuxin Yang408bdad2013-03-06 17:48:48 +0000372 bool isInvariantLoad = false;
Robin Morisset163ef042014-08-29 20:32:58 +0000373
374 // We must be careful with atomic accesses, as they may allow another thread
375 // to touch this location, cloberring it. We are conservative: if the
376 // QueryInst is not a simple (non-atomic) memory access, we automatically
377 // return getClobber.
378 // If it is simple, we know based on the results of
379 // "Compiler testing via a theory of sound optimisations in the C11/C++11
380 // memory model" in PLDI 2013, that a non-atomic location can only be
381 // clobbered between a pair of a release and an acquire action, with no
382 // access to the location in between.
383 // Here is an example for giving the general intuition behind this rule.
384 // In the following code:
385 // store x 0;
386 // release action; [1]
387 // acquire action; [4]
388 // %val = load x;
389 // It is unsafe to replace %val by 0 because another thread may be running:
390 // acquire action; [2]
391 // store x 42;
392 // release action; [3]
393 // with synchronization from 1 to 2 and from 3 to 4, resulting in %val
394 // being 42. A key property of this program however is that if either
395 // 1 or 4 were missing, there would be a race between the store of 42
396 // either the store of 0 or the load (making the whole progam racy).
397 // The paper mentionned above shows that the same property is respected
398 // by every program that can detect any optimisation of that kind: either
399 // it is racy (undefined) or there is a release followed by an acquire
400 // between the pair of accesses under consideration.
401 bool HasSeenAcquire = false;
402
Shuxin Yang408bdad2013-03-06 17:48:48 +0000403 if (isLoad && QueryInst) {
404 LoadInst *LI = dyn_cast<LoadInst>(QueryInst);
Craig Topper9f008862014-04-15 04:59:12 +0000405 if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr)
Shuxin Yang408bdad2013-03-06 17:48:48 +0000406 isInvariantLoad = true;
407 }
Eli Friedman8b098b02011-06-15 23:59:25 +0000408
Chris Lattnera28355d2008-12-07 08:50:20 +0000409 // Walk backwards through the basic block, looking for dependencies.
Chris Lattner51ba8d02008-11-29 03:47:00 +0000410 while (ScanIt != BB->begin()) {
Yunzhong Gao5cbcf562013-11-14 01:10:52 +0000411 Instruction *Inst = --ScanIt;
412
413 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
414 // Debug intrinsics don't (and can't) cause dependencies.
415 if (isa<DbgInfoIntrinsic>(II)) continue;
416
Eli Friedman8b098b02011-06-15 23:59:25 +0000417 // Limit the amount of scanning we do so we don't end up with quadratic
418 // running time on extreme testcases.
419 --Limit;
420 if (!Limit)
421 return MemDepResult::getUnknown();
422
Chris Lattner506b8582009-12-01 21:15:15 +0000423 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
Owen Anderson2b2bd282009-10-28 07:05:35 +0000424 // If we reach a lifetime begin or end marker, then the query ends here
425 // because the value is undefined.
Chris Lattnera58edd12010-09-06 03:58:04 +0000426 if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
Owen Andersonb9878ee2009-12-02 07:35:19 +0000427 // FIXME: This only considers queries directly on the invariant-tagged
428 // pointer, not on query pointers that are indexed off of them. It'd
Chris Lattner7aab2792011-04-26 22:42:01 +0000429 // be nice to handle that at some point (the right approach is to use
430 // GetPointerBaseWithConstantOffset).
Chris Lattner32dc9bd2011-04-26 21:53:34 +0000431 if (AA->isMustAlias(AliasAnalysis::Location(II->getArgOperand(1)),
432 MemLoc))
Owen Anderson2b2bd282009-10-28 07:05:35 +0000433 return MemDepResult::getDef(II);
Chris Lattnera58edd12010-09-06 03:58:04 +0000434 continue;
Owen Andersond0e86d52009-10-28 06:18:42 +0000435 }
436 }
437
Chris Lattnerff862c42008-11-30 01:44:00 +0000438 // Values depend on loads if the pointers are must aliased. This means that
439 // a load depends on another must aliased load from the same value.
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000440 // One exception is atomic loads: a value can depend on an atomic load that it
441 // does not alias with when this atomic load indicates that another thread may
442 // be accessing the location.
Chris Lattner0e3d6332008-12-05 21:04:20 +0000443 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000444 // Atomic loads have complications involved.
Robin Morisset163ef042014-08-29 20:32:58 +0000445 // A Monotonic (or higher) load is OK if the query inst is itself not atomic.
446 // An Acquire (or higher) load sets the HasSeenAcquire flag, so that any
447 // release store will know to return getClobber.
Eli Friedman5494ada2011-08-15 20:54:19 +0000448 // FIXME: This is overly conservative.
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000449 if (!LI->isUnordered()) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000450 if (!QueryInst)
451 return MemDepResult::getClobber(LI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000452 if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000453 if (!QueryLI->isSimple())
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000454 return MemDepResult::getClobber(LI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000455 } else if (auto *QuerySI = dyn_cast<StoreInst>(QueryInst)) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000456 if (!QuerySI->isSimple())
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000457 return MemDepResult::getClobber(LI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000458 } else if (QueryInst->mayReadOrWriteMemory()) {
459 return MemDepResult::getClobber(LI);
460 }
461
Robin Morisset163ef042014-08-29 20:32:58 +0000462 if (isAtLeastAcquire(LI->getOrdering()))
463 HasSeenAcquire = true;
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000464 }
Eli Friedman5494ada2011-08-15 20:54:19 +0000465
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000466 // FIXME: this is overly conservative.
467 // While volatile access cannot be eliminated, they do not have to clobber
468 // non-aliasing locations, as normal accesses can for example be reordered
469 // with volatile accesses.
470 if (LI->isVolatile())
471 return MemDepResult::getClobber(LI);
472
Dan Gohman65316d62010-11-11 21:50:19 +0000473 AliasAnalysis::Location LoadLoc = AA->getLocation(LI);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000474
Chris Lattner0e3d6332008-12-05 21:04:20 +0000475 // If we found a pointer, check if it could be the same as our pointer.
Dan Gohman15a43962010-10-29 01:14:04 +0000476 AliasAnalysis::AliasResult R = AA->alias(LoadLoc, MemLoc);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000477
Chris Lattner6f83d062011-04-26 01:21:15 +0000478 if (isLoad) {
Chris Lattner7aab2792011-04-26 22:42:01 +0000479 if (R == AliasAnalysis::NoAlias) {
480 // If this is an over-aligned integer load (for example,
481 // "load i8* %P, align 4") see if it would obviously overlap with the
482 // queried location if widened to a larger load (e.g. if the queried
483 // location is 1 byte at P+1). If so, return it as a load/load
484 // clobber result, allowing the client to decide to widen the load if
485 // it wants to.
Chris Lattner229907c2011-07-18 04:54:35 +0000486 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
Chris Lattner7aab2792011-04-26 22:42:01 +0000487 if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() &&
488 isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000489 MemLocOffset, LI, DL))
Chris Lattner7aab2792011-04-26 22:42:01 +0000490 return MemDepResult::getClobber(Inst);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000491
Chris Lattner7aab2792011-04-26 22:42:01 +0000492 continue;
493 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000494
Chris Lattner6f83d062011-04-26 01:21:15 +0000495 // Must aliased loads are defs of each other.
496 if (R == AliasAnalysis::MustAlias)
497 return MemDepResult::getDef(Inst);
498
Dan Gohmana4717512011-06-04 06:48:50 +0000499#if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads
500 // in terms of clobbering loads, but since it does this by looking
501 // at the clobbering load directly, it doesn't know about any
502 // phi translation that may have happened along the way.
503
Chris Lattner6f83d062011-04-26 01:21:15 +0000504 // If we have a partial alias, then return this as a clobber for the
505 // client to handle.
506 if (R == AliasAnalysis::PartialAlias)
507 return MemDepResult::getClobber(Inst);
Dan Gohmana4717512011-06-04 06:48:50 +0000508#endif
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000509
Chris Lattner6f83d062011-04-26 01:21:15 +0000510 // Random may-alias loads don't depend on each other without a
511 // dependence.
Chris Lattner80c08182008-11-29 09:09:48 +0000512 continue;
Chris Lattner6f83d062011-04-26 01:21:15 +0000513 }
Dan Gohman15a43962010-10-29 01:14:04 +0000514
Chris Lattner7aab2792011-04-26 22:42:01 +0000515 // Stores don't depend on other no-aliased accesses.
516 if (R == AliasAnalysis::NoAlias)
517 continue;
518
Dan Gohman15a43962010-10-29 01:14:04 +0000519 // Stores don't alias loads from read-only memory.
Chris Lattner6f83d062011-04-26 01:21:15 +0000520 if (AA->pointsToConstantMemory(LoadLoc))
Dan Gohman15a43962010-10-29 01:14:04 +0000521 continue;
522
Chris Lattner6f83d062011-04-26 01:21:15 +0000523 // Stores depend on may/must aliased loads.
Chris Lattner0e3d6332008-12-05 21:04:20 +0000524 return MemDepResult::getDef(Inst);
525 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000526
Chris Lattner0e3d6332008-12-05 21:04:20 +0000527 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000528 // Atomic stores have complications involved.
Robin Morisset163ef042014-08-29 20:32:58 +0000529 // A Monotonic store is OK if the query inst is itself not atomic.
530 // A Release (or higher) store further requires that no acquire load
531 // has been seen.
Eli Friedman5494ada2011-08-15 20:54:19 +0000532 // FIXME: This is overly conservative.
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000533 if (!SI->isUnordered()) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000534 if (!QueryInst)
535 return MemDepResult::getClobber(SI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000536 if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000537 if (!QueryLI->isSimple())
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000538 return MemDepResult::getClobber(SI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000539 } else if (auto *QuerySI = dyn_cast<StoreInst>(QueryInst)) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000540 if (!QuerySI->isSimple())
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000541 return MemDepResult::getClobber(SI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000542 } else if (QueryInst->mayReadOrWriteMemory()) {
543 return MemDepResult::getClobber(SI);
544 }
545
Robin Morisset163ef042014-08-29 20:32:58 +0000546 if (HasSeenAcquire && isAtLeastRelease(SI->getOrdering()))
547 return MemDepResult::getClobber(SI);
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000548 }
Eli Friedman5494ada2011-08-15 20:54:19 +0000549
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000550 // FIXME: this is overly conservative.
551 // While volatile access cannot be eliminated, they do not have to clobber
552 // non-aliasing locations, as normal accesses can for example be reordered
553 // with volatile accesses.
554 if (SI->isVolatile())
555 return MemDepResult::getClobber(SI);
556
Chris Lattner02274a72009-05-25 21:28:56 +0000557 // If alias analysis can tell that this store is guaranteed to not modify
558 // the query pointer, ignore it. Use getModRefInfo to handle cases where
559 // the query pointer points to constant memory etc.
Dan Gohman23483932010-09-22 21:41:02 +0000560 if (AA->getModRefInfo(SI, MemLoc) == AliasAnalysis::NoModRef)
Chris Lattner02274a72009-05-25 21:28:56 +0000561 continue;
562
563 // Ok, this store might clobber the query pointer. Check to see if it is
564 // a must alias: in this case, we want to return this as a def.
Dan Gohman65316d62010-11-11 21:50:19 +0000565 AliasAnalysis::Location StoreLoc = AA->getLocation(SI);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000566
Chris Lattner0e3d6332008-12-05 21:04:20 +0000567 // If we found a pointer, check if it could be the same as our pointer.
Dan Gohman65316d62010-11-11 21:50:19 +0000568 AliasAnalysis::AliasResult R = AA->alias(StoreLoc, MemLoc);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000569
Chris Lattner0e3d6332008-12-05 21:04:20 +0000570 if (R == AliasAnalysis::NoAlias)
571 continue;
Dan Gohmanba5d0ab2010-12-13 22:47:57 +0000572 if (R == AliasAnalysis::MustAlias)
573 return MemDepResult::getDef(Inst);
Shuxin Yang408bdad2013-03-06 17:48:48 +0000574 if (isInvariantLoad)
575 continue;
Dan Gohmanba5d0ab2010-12-13 22:47:57 +0000576 return MemDepResult::getClobber(Inst);
Owen Andersonc0daf5f2007-07-06 23:14:35 +0000577 }
Chris Lattner3ff6d012008-11-30 01:39:32 +0000578
579 // If this is an allocation, and if we know that the accessed pointer is to
Chris Lattner0e3d6332008-12-05 21:04:20 +0000580 // the allocation, return Def. This means that there is no dependence and
Chris Lattner3ff6d012008-11-30 01:39:32 +0000581 // the access can be optimized based on that. For example, a load could
582 // turn into undef.
Victor Hernandez70e85052009-10-13 01:42:53 +0000583 // Note: Only determine this to be a malloc if Inst is the malloc call, not
584 // a subsequent bitcast of the malloc call result. There can be stores to
585 // the malloced memory between the malloc call and its bitcast uses, and we
586 // need to continue scanning until the malloc call.
Bob Wilsondcc54de2012-09-03 05:15:15 +0000587 const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo();
588 if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) {
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000589 const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000590
Chris Lattner32dc9bd2011-04-26 21:53:34 +0000591 if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
Victor Hernandez537d8d92009-09-18 21:34:51 +0000592 return MemDepResult::getDef(Inst);
Bob Wilson01cfbfe2012-09-04 03:30:13 +0000593 // Be conservative if the accessed pointer may alias the allocation.
594 if (AA->alias(Inst, AccessPtr) != AliasAnalysis::NoAlias)
595 return MemDepResult::getClobber(Inst);
Bob Wilsondcc54de2012-09-03 05:15:15 +0000596 // If the allocation is not aliased and does not read memory (like
597 // strdup), it is safe to ignore.
598 if (isa<AllocaInst>(Inst) ||
599 isMallocLikeFn(Inst, TLI) || isCallocLikeFn(Inst, TLI))
600 continue;
Victor Hernandez537d8d92009-09-18 21:34:51 +0000601 }
602
Chris Lattner0e3d6332008-12-05 21:04:20 +0000603 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
Chad Rosiera968caf2012-05-14 20:35:04 +0000604 AliasAnalysis::ModRefResult MR = AA->getModRefInfo(Inst, MemLoc);
605 // If necessary, perform additional analysis.
606 if (MR == AliasAnalysis::ModRef)
607 MR = AA->callCapturesBefore(Inst, MemLoc, DT);
608 switch (MR) {
Chris Lattner41efb682008-12-09 19:47:40 +0000609 case AliasAnalysis::NoModRef:
610 // If the call has no effect on the queried pointer, just ignore it.
Chris Lattner81f19e92008-11-29 08:51:16 +0000611 continue;
Owen Andersonfc16e5a2009-10-28 06:30:52 +0000612 case AliasAnalysis::Mod:
Owen Andersonfc16e5a2009-10-28 06:30:52 +0000613 return MemDepResult::getClobber(Inst);
Chris Lattner41efb682008-12-09 19:47:40 +0000614 case AliasAnalysis::Ref:
615 // If the call is known to never store to the pointer, and if this is a
616 // load query, we can safely ignore it (scan past it).
617 if (isLoad)
618 continue;
Chris Lattner41efb682008-12-09 19:47:40 +0000619 default:
620 // Otherwise, there is a potential dependence. Return a clobber.
621 return MemDepResult::getClobber(Inst);
622 }
Owen Andersonc0daf5f2007-07-06 23:14:35 +0000623 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000624
Eli Friedman7d58bc72011-06-15 00:47:34 +0000625 // No dependence found. If this is the entry block of the function, it is
626 // unknown, otherwise it is non-local.
Chris Lattner2faa2c72008-12-07 02:15:47 +0000627 if (BB != &BB->getParent()->getEntryBlock())
628 return MemDepResult::getNonLocal();
Eli Friedmanc1702c82011-10-13 22:14:57 +0000629 return MemDepResult::getNonFuncLocal();
Owen Andersonc0daf5f2007-07-06 23:14:35 +0000630}
631
Chris Lattner51ba8d02008-11-29 03:47:00 +0000632/// getDependency - Return the instruction on which a memory operation
633/// depends.
634MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
635 Instruction *ScanPos = QueryInst;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000636
Chris Lattner51ba8d02008-11-29 03:47:00 +0000637 // Check for a cached result
Chris Lattner47e81d02008-11-30 23:17:19 +0000638 MemDepResult &LocalCache = LocalDeps[QueryInst];
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000639
Chris Lattnere7d7e132008-11-29 22:02:15 +0000640 // If the cached entry is non-dirty, just return it. Note that this depends
Chris Lattner47e81d02008-11-30 23:17:19 +0000641 // on MemDepResult's default constructing to 'dirty'.
642 if (!LocalCache.isDirty())
643 return LocalCache;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000644
Chris Lattner51ba8d02008-11-29 03:47:00 +0000645 // Otherwise, if we have a dirty entry, we know we can start the scan at that
646 // instruction, which may save us some work.
Chris Lattner47e81d02008-11-30 23:17:19 +0000647 if (Instruction *Inst = LocalCache.getInst()) {
Chris Lattner51ba8d02008-11-29 03:47:00 +0000648 ScanPos = Inst;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000649
Chris Lattnerde4440c2008-12-07 18:39:13 +0000650 RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst);
Chris Lattner44104272008-11-30 02:52:26 +0000651 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000652
Chris Lattner5a786042008-12-07 01:50:16 +0000653 BasicBlock *QueryParent = QueryInst->getParent();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000654
Chris Lattner51ba8d02008-11-29 03:47:00 +0000655 // Do the scan.
Chris Lattner5a786042008-12-07 01:50:16 +0000656 if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
Eli Friedman7d58bc72011-06-15 00:47:34 +0000657 // No dependence found. If this is the entry block of the function, it is
658 // unknown, otherwise it is non-local.
Chris Lattner2faa2c72008-12-07 02:15:47 +0000659 if (QueryParent != &QueryParent->getParent()->getEntryBlock())
660 LocalCache = MemDepResult::getNonLocal();
661 else
Eli Friedmanc1702c82011-10-13 22:14:57 +0000662 LocalCache = MemDepResult::getNonFuncLocal();
Dan Gohman1d760ce2010-11-10 21:51:35 +0000663 } else {
664 AliasAnalysis::Location MemLoc;
665 AliasAnalysis::ModRefResult MR = GetLocation(QueryInst, MemLoc, AA);
666 if (MemLoc.Ptr) {
667 // If we can do a pointer scan, make it happen.
668 bool isLoad = !(MR & AliasAnalysis::Mod);
Chris Lattnerd540a5d2010-11-30 01:56:13 +0000669 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
Owen Anderson97f0cf32011-05-17 00:05:49 +0000670 isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
Chris Lattnere48c31c2010-11-21 07:34:32 +0000671
Dan Gohman1d760ce2010-11-10 21:51:35 +0000672 LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos,
Shuxin Yang408bdad2013-03-06 17:48:48 +0000673 QueryParent, QueryInst);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000674 } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
Gabor Greifef1ca242010-07-27 22:02:00 +0000675 CallSite QueryCS(QueryInst);
Nick Lewyckye91765f2009-12-05 06:37:24 +0000676 bool isReadOnly = AA->onlyReadsMemory(QueryCS);
677 LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
678 QueryParent);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000679 } else
680 // Non-memory instruction.
Eli Friedman7d58bc72011-06-15 00:47:34 +0000681 LocalCache = MemDepResult::getUnknown();
Nick Lewycky218a3392009-11-28 21:27:49 +0000682 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000683
Chris Lattner51ba8d02008-11-29 03:47:00 +0000684 // Remember the result!
Chris Lattner47e81d02008-11-30 23:17:19 +0000685 if (Instruction *I = LocalCache.getInst())
Chris Lattner9f1988ab2008-11-29 09:20:15 +0000686 ReverseLocalDeps[I].insert(QueryInst);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000687
Chris Lattner47e81d02008-11-30 23:17:19 +0000688 return LocalCache;
Chris Lattner51ba8d02008-11-29 03:47:00 +0000689}
690
Chris Lattnerf09619d2009-01-22 07:04:01 +0000691#ifndef NDEBUG
692/// AssertSorted - This method is used when -debug is specified to verify that
693/// cache arrays are properly kept sorted.
694static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
695 int Count = -1) {
696 if (Count == -1) Count = Cache.size();
697 if (Count == 0) return;
698
699 for (unsigned i = 1; i != unsigned(Count); ++i)
Chris Lattner0c315472009-12-09 07:08:01 +0000700 assert(!(Cache[i] < Cache[i-1]) && "Cache isn't sorted!");
Chris Lattnerf09619d2009-01-22 07:04:01 +0000701}
702#endif
703
Chris Lattner254314e2008-12-09 19:38:05 +0000704/// getNonLocalCallDependency - Perform a full dependency query for the
705/// specified call, returning the set of blocks that the value is
Chris Lattner20597532008-11-30 01:18:27 +0000706/// potentially live across. The returned set of results will include a
707/// "NonLocal" result for all blocks where the value is live across.
708///
Chris Lattner254314e2008-12-09 19:38:05 +0000709/// This method assumes the instruction returns a "NonLocal" dependency
Chris Lattner20597532008-11-30 01:18:27 +0000710/// within its own block.
711///
Chris Lattner254314e2008-12-09 19:38:05 +0000712/// This returns a reference to an internal data structure that may be
713/// invalidated on the next non-local query or when an instruction is
714/// removed. Clients must copy this data if they want it around longer than
715/// that.
Chris Lattner7e61daf2008-12-01 01:15:42 +0000716const MemoryDependenceAnalysis::NonLocalDepInfo &
Chris Lattner254314e2008-12-09 19:38:05 +0000717MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
718 assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
719 "getNonLocalCallDependency should only be used on calls with non-local deps!");
720 PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
Chris Lattner7e61daf2008-12-01 01:15:42 +0000721 NonLocalDepInfo &Cache = CacheP.first;
Chris Lattner20597532008-11-30 01:18:27 +0000722
723 /// DirtyBlocks - This is the set of blocks that need to be recomputed. In
724 /// the cached case, this can happen due to instructions being deleted etc. In
725 /// the uncached case, this starts out as the set of predecessors we care
726 /// about.
727 SmallVector<BasicBlock*, 32> DirtyBlocks;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000728
Chris Lattner20597532008-11-30 01:18:27 +0000729 if (!Cache.empty()) {
Chris Lattner7e61daf2008-12-01 01:15:42 +0000730 // Okay, we have a cache entry. If we know it is not dirty, just return it
731 // with no computation.
732 if (!CacheP.second) {
Dan Gohmand2d1ae12010-06-22 15:08:57 +0000733 ++NumCacheNonLocal;
Chris Lattner7e61daf2008-12-01 01:15:42 +0000734 return Cache;
735 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000736
Chris Lattner20597532008-11-30 01:18:27 +0000737 // If we already have a partially computed set of results, scan them to
Chris Lattner7e61daf2008-12-01 01:15:42 +0000738 // determine what is dirty, seeding our initial DirtyBlocks worklist.
739 for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end();
740 I != E; ++I)
Chris Lattner0c315472009-12-09 07:08:01 +0000741 if (I->getResult().isDirty())
742 DirtyBlocks.push_back(I->getBB());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000743
Chris Lattner7e61daf2008-12-01 01:15:42 +0000744 // Sort the cache so that we can do fast binary search lookups below.
745 std::sort(Cache.begin(), Cache.end());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000746
Chris Lattner7e61daf2008-12-01 01:15:42 +0000747 ++NumCacheDirtyNonLocal;
Chris Lattner20597532008-11-30 01:18:27 +0000748 //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
749 // << Cache.size() << " cached: " << *QueryInst;
750 } else {
751 // Seed DirtyBlocks with each of the preds of QueryInst's block.
Chris Lattner254314e2008-12-09 19:38:05 +0000752 BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
Chris Lattnere8113a72008-12-09 06:44:17 +0000753 for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI)
754 DirtyBlocks.push_back(*PI);
Dan Gohmand2d1ae12010-06-22 15:08:57 +0000755 ++NumUncacheNonLocal;
Chris Lattner20597532008-11-30 01:18:27 +0000756 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000757
Chris Lattner702e46e2008-12-09 21:19:42 +0000758 // isReadonlyCall - If this is a read-only call, we can be more aggressive.
759 bool isReadonlyCall = AA->onlyReadsMemory(QueryCS);
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000760
Chris Lattner7e61daf2008-12-01 01:15:42 +0000761 SmallPtrSet<BasicBlock*, 64> Visited;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000762
Chris Lattner7e61daf2008-12-01 01:15:42 +0000763 unsigned NumSortedEntries = Cache.size();
Chris Lattnerf09619d2009-01-22 07:04:01 +0000764 DEBUG(AssertSorted(Cache));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000765
Chris Lattner20597532008-11-30 01:18:27 +0000766 // Iterate while we still have blocks to update.
767 while (!DirtyBlocks.empty()) {
768 BasicBlock *DirtyBB = DirtyBlocks.back();
769 DirtyBlocks.pop_back();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000770
Chris Lattner7e61daf2008-12-01 01:15:42 +0000771 // Already processed this block?
772 if (!Visited.insert(DirtyBB))
773 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000774
Chris Lattner7e61daf2008-12-01 01:15:42 +0000775 // Do a binary search to see if we already have an entry for this block in
776 // the cache set. If so, find it.
Chris Lattnerf09619d2009-01-22 07:04:01 +0000777 DEBUG(AssertSorted(Cache, NumSortedEntries));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000778 NonLocalDepInfo::iterator Entry =
Chris Lattner7e61daf2008-12-01 01:15:42 +0000779 std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries,
Chris Lattnereea0f582009-12-09 07:31:04 +0000780 NonLocalDepEntry(DirtyBB));
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000781 if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB)
Chris Lattner7e61daf2008-12-01 01:15:42 +0000782 --Entry;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000783
Craig Topper9f008862014-04-15 04:59:12 +0000784 NonLocalDepEntry *ExistingResult = nullptr;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000785 if (Entry != Cache.begin()+NumSortedEntries &&
Chris Lattner0c315472009-12-09 07:08:01 +0000786 Entry->getBB() == DirtyBB) {
Chris Lattner7e61daf2008-12-01 01:15:42 +0000787 // If we already have an entry, and if it isn't already dirty, the block
788 // is done.
Chris Lattner0c315472009-12-09 07:08:01 +0000789 if (!Entry->getResult().isDirty())
Chris Lattner7e61daf2008-12-01 01:15:42 +0000790 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000791
Chris Lattner7e61daf2008-12-01 01:15:42 +0000792 // Otherwise, remember this slot so we can update the value.
Chris Lattner0c315472009-12-09 07:08:01 +0000793 ExistingResult = &*Entry;
Chris Lattner7e61daf2008-12-01 01:15:42 +0000794 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000795
Chris Lattner20597532008-11-30 01:18:27 +0000796 // If the dirty entry has a pointer, start scanning from it so we don't have
797 // to rescan the entire block.
798 BasicBlock::iterator ScanPos = DirtyBB->end();
Chris Lattner7e61daf2008-12-01 01:15:42 +0000799 if (ExistingResult) {
Chris Lattner0c315472009-12-09 07:08:01 +0000800 if (Instruction *Inst = ExistingResult->getResult().getInst()) {
Chris Lattner7e61daf2008-12-01 01:15:42 +0000801 ScanPos = Inst;
Chris Lattner7e61daf2008-12-01 01:15:42 +0000802 // We're removing QueryInst's use of Inst.
Chris Lattner254314e2008-12-09 19:38:05 +0000803 RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
804 QueryCS.getInstruction());
Chris Lattner7e61daf2008-12-01 01:15:42 +0000805 }
Chris Lattner1b810bd2008-11-30 02:28:25 +0000806 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000807
Chris Lattner60444f82008-11-30 01:26:32 +0000808 // Find out if this block has a local dependency for QueryInst.
Chris Lattnered494f72008-12-07 01:21:14 +0000809 MemDepResult Dep;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000810
Chris Lattner254314e2008-12-09 19:38:05 +0000811 if (ScanPos != DirtyBB->begin()) {
Chris Lattner702e46e2008-12-09 21:19:42 +0000812 Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB);
Chris Lattner254314e2008-12-09 19:38:05 +0000813 } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
814 // No dependence found. If this is the entry block of the function, it is
Eli Friedman7d58bc72011-06-15 00:47:34 +0000815 // a clobber, otherwise it is unknown.
Chris Lattner254314e2008-12-09 19:38:05 +0000816 Dep = MemDepResult::getNonLocal();
Chris Lattner5a786042008-12-07 01:50:16 +0000817 } else {
Eli Friedmanc1702c82011-10-13 22:14:57 +0000818 Dep = MemDepResult::getNonFuncLocal();
Chris Lattner5a786042008-12-07 01:50:16 +0000819 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000820
Chris Lattner7e61daf2008-12-01 01:15:42 +0000821 // If we had a dirty entry for the block, update it. Otherwise, just add
822 // a new entry.
823 if (ExistingResult)
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000824 ExistingResult->setResult(Dep);
Chris Lattner7e61daf2008-12-01 01:15:42 +0000825 else
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000826 Cache.push_back(NonLocalDepEntry(DirtyBB, Dep));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000827
Chris Lattner20597532008-11-30 01:18:27 +0000828 // If the block has a dependency (i.e. it isn't completely transparent to
Chris Lattner7e61daf2008-12-01 01:15:42 +0000829 // the value), remember the association!
830 if (!Dep.isNonLocal()) {
Chris Lattner20597532008-11-30 01:18:27 +0000831 // Keep the ReverseNonLocalDeps map up to date so we can efficiently
832 // update this when we remove instructions.
Chris Lattner7e61daf2008-12-01 01:15:42 +0000833 if (Instruction *Inst = Dep.getInst())
Chris Lattner254314e2008-12-09 19:38:05 +0000834 ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
Chris Lattner7e61daf2008-12-01 01:15:42 +0000835 } else {
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000836
Chris Lattner7e61daf2008-12-01 01:15:42 +0000837 // If the block *is* completely transparent to the load, we need to check
838 // the predecessors of this block. Add them to our worklist.
Chris Lattnere8113a72008-12-09 06:44:17 +0000839 for (BasicBlock **PI = PredCache->GetPreds(DirtyBB); *PI; ++PI)
840 DirtyBlocks.push_back(*PI);
Chris Lattner7e61daf2008-12-01 01:15:42 +0000841 }
Chris Lattner20597532008-11-30 01:18:27 +0000842 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000843
Chris Lattner7e61daf2008-12-01 01:15:42 +0000844 return Cache;
Chris Lattner20597532008-11-30 01:18:27 +0000845}
846
Chris Lattner2faa2c72008-12-07 02:15:47 +0000847/// getNonLocalPointerDependency - Perform a full dependency query for an
848/// access to the specified (non-volatile) memory location, returning the
849/// set of instructions that either define or clobber the value.
850///
851/// This method assumes the pointer has a "NonLocal" dependency within its
852/// own block.
853///
854void MemoryDependenceAnalysis::
Dan Gohman23483932010-09-22 21:41:02 +0000855getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad,
856 BasicBlock *FromBB,
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000857 SmallVectorImpl<NonLocalDepResult> &Result) {
Dan Gohman23483932010-09-22 21:41:02 +0000858 assert(Loc.Ptr->getType()->isPointerTy() &&
Chris Lattnerfdb88432008-12-07 18:45:15 +0000859 "Can't get pointer deps of a non-pointer!");
Chris Lattner7564a3b2008-12-07 02:56:57 +0000860 Result.clear();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000861
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000862 PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000863
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000864 // This is the set of blocks we've inspected, and the pointer we consider in
865 // each block. Because of critical edges, we currently bail out if querying
866 // a block with multiple different pointers. This can happen during PHI
867 // translation.
868 DenseMap<BasicBlock*, Value*> Visited;
Dan Gohman23483932010-09-22 21:41:02 +0000869 if (!getNonLocalPointerDepFromBB(Address, Loc, isLoad, FromBB,
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000870 Result, Visited, true))
871 return;
Chris Lattner7ed5ccc2008-12-15 04:58:29 +0000872 Result.clear();
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000873 Result.push_back(NonLocalDepResult(FromBB,
Eli Friedman7d58bc72011-06-15 00:47:34 +0000874 MemDepResult::getUnknown(),
Dan Gohman23483932010-09-22 21:41:02 +0000875 const_cast<Value *>(Loc.Ptr)));
Chris Lattner7564a3b2008-12-07 02:56:57 +0000876}
877
Chris Lattnerf903fe12008-12-09 07:47:11 +0000878/// GetNonLocalInfoForBlock - Compute the memdep value for BB with
879/// Pointer/PointeeSize using either cached information in Cache or by doing a
880/// lookup (which may use dirty cache info if available). If we do a lookup,
881/// add the result to the cache.
882MemDepResult MemoryDependenceAnalysis::
Dan Gohman23483932010-09-22 21:41:02 +0000883GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc,
Chris Lattnerf903fe12008-12-09 07:47:11 +0000884 bool isLoad, BasicBlock *BB,
885 NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000886
Chris Lattnerf903fe12008-12-09 07:47:11 +0000887 // Do a binary search to see if we already have an entry for this block in
888 // the cache set. If so, find it.
889 NonLocalDepInfo::iterator Entry =
890 std::upper_bound(Cache->begin(), Cache->begin()+NumSortedEntries,
Chris Lattnereea0f582009-12-09 07:31:04 +0000891 NonLocalDepEntry(BB));
Chris Lattner0c315472009-12-09 07:08:01 +0000892 if (Entry != Cache->begin() && (Entry-1)->getBB() == BB)
Chris Lattnerf903fe12008-12-09 07:47:11 +0000893 --Entry;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000894
Craig Topper9f008862014-04-15 04:59:12 +0000895 NonLocalDepEntry *ExistingResult = nullptr;
Chris Lattner0c315472009-12-09 07:08:01 +0000896 if (Entry != Cache->begin()+NumSortedEntries && Entry->getBB() == BB)
897 ExistingResult = &*Entry;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000898
Chris Lattnerf903fe12008-12-09 07:47:11 +0000899 // If we have a cached entry, and it is non-dirty, use it as the value for
900 // this dependency.
Chris Lattner0c315472009-12-09 07:08:01 +0000901 if (ExistingResult && !ExistingResult->getResult().isDirty()) {
Chris Lattnerf903fe12008-12-09 07:47:11 +0000902 ++NumCacheNonLocalPtr;
Chris Lattner0c315472009-12-09 07:08:01 +0000903 return ExistingResult->getResult();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000904 }
905
Chris Lattnerf903fe12008-12-09 07:47:11 +0000906 // Otherwise, we have to scan for the value. If we have a dirty cache
907 // entry, start scanning from its position, otherwise we scan from the end
908 // of the block.
909 BasicBlock::iterator ScanPos = BB->end();
Chris Lattner0c315472009-12-09 07:08:01 +0000910 if (ExistingResult && ExistingResult->getResult().getInst()) {
911 assert(ExistingResult->getResult().getInst()->getParent() == BB &&
Chris Lattnerf903fe12008-12-09 07:47:11 +0000912 "Instruction invalidated?");
913 ++NumCacheDirtyNonLocalPtr;
Chris Lattner0c315472009-12-09 07:08:01 +0000914 ScanPos = ExistingResult->getResult().getInst();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000915
Chris Lattnerf903fe12008-12-09 07:47:11 +0000916 // Eliminating the dirty entry from 'Cache', so update the reverse info.
Dan Gohman23483932010-09-22 21:41:02 +0000917 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
Chris Lattner8eda11b2009-03-29 00:24:04 +0000918 RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey);
Chris Lattnerf903fe12008-12-09 07:47:11 +0000919 } else {
920 ++NumUncacheNonLocalPtr;
921 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000922
Chris Lattnerf903fe12008-12-09 07:47:11 +0000923 // Scan the block for the dependency.
Dan Gohman23483932010-09-22 21:41:02 +0000924 MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000925
Chris Lattnerf903fe12008-12-09 07:47:11 +0000926 // If we had a dirty entry for the block, update it. Otherwise, just add
927 // a new entry.
928 if (ExistingResult)
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000929 ExistingResult->setResult(Dep);
Chris Lattnerf903fe12008-12-09 07:47:11 +0000930 else
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000931 Cache->push_back(NonLocalDepEntry(BB, Dep));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000932
Chris Lattnerf903fe12008-12-09 07:47:11 +0000933 // If the block has a dependency (i.e. it isn't completely transparent to
934 // the value), remember the reverse association because we just added it
935 // to Cache!
Eli Friedmanc1702c82011-10-13 22:14:57 +0000936 if (!Dep.isDef() && !Dep.isClobber())
Chris Lattnerf903fe12008-12-09 07:47:11 +0000937 return Dep;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000938
Chris Lattnerf903fe12008-12-09 07:47:11 +0000939 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
940 // update MemDep when we remove instructions.
941 Instruction *Inst = Dep.getInst();
942 assert(Inst && "Didn't depend on anything?");
Dan Gohman23483932010-09-22 21:41:02 +0000943 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
Chris Lattner8eda11b2009-03-29 00:24:04 +0000944 ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
Chris Lattnerf903fe12008-12-09 07:47:11 +0000945 return Dep;
946}
947
Robin Morisset039781e2014-08-29 21:53:01 +0000948/// SortNonLocalDepInfoCache - Sort the NonLocalDepInfo cache, given a certain
Chris Lattner370aada2009-07-13 17:20:05 +0000949/// number of elements in the array that are already properly ordered. This is
950/// optimized for the case when only a few entries are added.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000951static void
Chris Lattner370aada2009-07-13 17:20:05 +0000952SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
953 unsigned NumSortedEntries) {
954 switch (Cache.size() - NumSortedEntries) {
955 case 0:
956 // done, no new entries.
957 break;
958 case 2: {
959 // Two new entries, insert the last one into place.
Chris Lattner0c315472009-12-09 07:08:01 +0000960 NonLocalDepEntry Val = Cache.back();
Chris Lattner370aada2009-07-13 17:20:05 +0000961 Cache.pop_back();
962 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
963 std::upper_bound(Cache.begin(), Cache.end()-1, Val);
964 Cache.insert(Entry, Val);
965 // FALL THROUGH.
966 }
967 case 1:
968 // One new entry, Just insert the new value at the appropriate position.
969 if (Cache.size() != 1) {
Chris Lattner0c315472009-12-09 07:08:01 +0000970 NonLocalDepEntry Val = Cache.back();
Chris Lattner370aada2009-07-13 17:20:05 +0000971 Cache.pop_back();
972 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
973 std::upper_bound(Cache.begin(), Cache.end(), Val);
974 Cache.insert(Entry, Val);
975 }
976 break;
977 default:
978 // Added many values, do a full scale sort.
979 std::sort(Cache.begin(), Cache.end());
980 break;
981 }
982}
983
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000984/// getNonLocalPointerDepFromBB - Perform a dependency query based on
985/// pointer/pointeesize starting at the end of StartBB. Add any clobber/def
986/// results to the results vector and keep track of which blocks are visited in
987/// 'Visited'.
988///
989/// This has special behavior for the first block queries (when SkipFirstBlock
990/// is true). In this special case, it ignores the contents of the specified
991/// block and starts returning dependence info for its predecessors.
992///
993/// This function returns false on success, or true to indicate that it could
994/// not compute dependence information for some reason. This should be treated
995/// as a clobber dependence on the first instruction in the predecessor block.
996bool MemoryDependenceAnalysis::
Dan Gohman23483932010-09-22 21:41:02 +0000997getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
998 const AliasAnalysis::Location &Loc,
Chris Lattnerf903fe12008-12-09 07:47:11 +0000999 bool isLoad, BasicBlock *StartBB,
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001000 SmallVectorImpl<NonLocalDepResult> &Result,
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001001 DenseMap<BasicBlock*, Value*> &Visited,
1002 bool SkipFirstBlock) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001003 // Look up the cached info for Pointer.
Chris Lattner972e6d82009-12-09 01:59:31 +00001004 ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
Dan Gohman23483932010-09-22 21:41:02 +00001005
Dan Gohman0a6021a2010-11-10 20:37:15 +00001006 // Set up a temporary NLPI value. If the map doesn't yet have an entry for
1007 // CacheKey, this value will be inserted as the associated value. Otherwise,
1008 // it'll be ignored, and we'll have to check to see if the cached size and
Hal Finkelcc39b672014-07-24 12:16:19 +00001009 // aa tags are consistent with the current query.
Dan Gohman0a6021a2010-11-10 20:37:15 +00001010 NonLocalPointerInfo InitialNLPI;
1011 InitialNLPI.Size = Loc.Size;
Hal Finkelcc39b672014-07-24 12:16:19 +00001012 InitialNLPI.AATags = Loc.AATags;
Dan Gohman0a6021a2010-11-10 20:37:15 +00001013
1014 // Get the NLPI for CacheKey, inserting one into the map if it doesn't
1015 // already have one.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001016 std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
Dan Gohman0a6021a2010-11-10 20:37:15 +00001017 NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
1018 NonLocalPointerInfo *CacheInfo = &Pair.first->second;
1019
Dan Gohman2e8ca442010-11-10 21:45:11 +00001020 // If we already have a cache entry for this CacheKey, we may need to do some
1021 // work to reconcile the cache entry and the current query.
Dan Gohman0a6021a2010-11-10 20:37:15 +00001022 if (!Pair.second) {
Dan Gohman2e8ca442010-11-10 21:45:11 +00001023 if (CacheInfo->Size < Loc.Size) {
1024 // The query's Size is greater than the cached one. Throw out the
Benjamin Kramerbde91762012-06-02 10:20:22 +00001025 // cached data and proceed with the query at the greater size.
Dan Gohman2e8ca442010-11-10 21:45:11 +00001026 CacheInfo->Pair = BBSkipFirstBlockPair();
1027 CacheInfo->Size = Loc.Size;
Dan Gohman67919362010-11-10 22:35:02 +00001028 for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
1029 DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
1030 if (Instruction *Inst = DI->getResult().getInst())
1031 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
Dan Gohman2e8ca442010-11-10 21:45:11 +00001032 CacheInfo->NonLocalDeps.clear();
1033 } else if (CacheInfo->Size > Loc.Size) {
1034 // This query's Size is less than the cached one. Conservatively restart
1035 // the query using the greater size.
Dan Gohman0a6021a2010-11-10 20:37:15 +00001036 return getNonLocalPointerDepFromBB(Pointer,
1037 Loc.getWithNewSize(CacheInfo->Size),
1038 isLoad, StartBB, Result, Visited,
1039 SkipFirstBlock);
1040 }
1041
Hal Finkelcc39b672014-07-24 12:16:19 +00001042 // If the query's AATags are inconsistent with the cached one,
Dan Gohman2e8ca442010-11-10 21:45:11 +00001043 // conservatively throw out the cached data and restart the query with
1044 // no tag if needed.
Hal Finkelcc39b672014-07-24 12:16:19 +00001045 if (CacheInfo->AATags != Loc.AATags) {
1046 if (CacheInfo->AATags) {
Dan Gohman2e8ca442010-11-10 21:45:11 +00001047 CacheInfo->Pair = BBSkipFirstBlockPair();
Hal Finkelcc39b672014-07-24 12:16:19 +00001048 CacheInfo->AATags = AAMDNodes();
Dan Gohman67919362010-11-10 22:35:02 +00001049 for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
1050 DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
1051 if (Instruction *Inst = DI->getResult().getInst())
1052 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
Dan Gohman2e8ca442010-11-10 21:45:11 +00001053 CacheInfo->NonLocalDeps.clear();
1054 }
Hal Finkelcc39b672014-07-24 12:16:19 +00001055 if (Loc.AATags)
1056 return getNonLocalPointerDepFromBB(Pointer, Loc.getWithoutAATags(),
Dan Gohman2e8ca442010-11-10 21:45:11 +00001057 isLoad, StartBB, Result, Visited,
1058 SkipFirstBlock);
Dan Gohman0a6021a2010-11-10 20:37:15 +00001059 }
Dan Gohman23483932010-09-22 21:41:02 +00001060 }
1061
1062 NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
Chris Lattner5ed409e2008-12-08 07:31:50 +00001063
1064 // If we have valid cached information for exactly the block we are
1065 // investigating, just return it with no recomputation.
Dan Gohman23483932010-09-22 21:41:02 +00001066 if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
Chris Lattner8b4be372008-12-16 07:10:09 +00001067 // We have a fully cached result for this query then we can just return the
1068 // cached results and populate the visited set. However, we have to verify
1069 // that we don't already have conflicting results for these blocks. Check
1070 // to ensure that if a block in the results set is in the visited set that
1071 // it was for the same pointer query.
1072 if (!Visited.empty()) {
1073 for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
1074 I != E; ++I) {
Chris Lattner0c315472009-12-09 07:08:01 +00001075 DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->getBB());
Chris Lattner972e6d82009-12-09 01:59:31 +00001076 if (VI == Visited.end() || VI->second == Pointer.getAddr())
1077 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001078
Chris Lattner8b4be372008-12-16 07:10:09 +00001079 // We have a pointer mismatch in a block. Just return clobber, saying
1080 // that something was clobbered in this result. We could also do a
1081 // non-fully cached query, but there is little point in doing this.
1082 return true;
1083 }
1084 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001085
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001086 Value *Addr = Pointer.getAddr();
Chris Lattner5ed409e2008-12-08 07:31:50 +00001087 for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
Chris Lattner8b4be372008-12-16 07:10:09 +00001088 I != E; ++I) {
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001089 Visited.insert(std::make_pair(I->getBB(), Addr));
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001090 if (I->getResult().isNonLocal()) {
1091 continue;
1092 }
1093
1094 if (!DT) {
1095 Result.push_back(NonLocalDepResult(I->getBB(),
1096 MemDepResult::getUnknown(),
1097 Addr));
1098 } else if (DT->isReachableFromEntry(I->getBB())) {
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001099 Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr));
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001100 }
Chris Lattner8b4be372008-12-16 07:10:09 +00001101 }
Chris Lattner5ed409e2008-12-08 07:31:50 +00001102 ++NumCacheCompleteNonLocalPtr;
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001103 return false;
Chris Lattner5ed409e2008-12-08 07:31:50 +00001104 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001105
Chris Lattner5ed409e2008-12-08 07:31:50 +00001106 // Otherwise, either this is a new block, a block with an invalid cache
1107 // pointer or one that we're about to invalidate by putting more info into it
1108 // than its valid cache info. If empty, the result will be valid cache info,
1109 // otherwise it isn't.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001110 if (Cache->empty())
Dan Gohman23483932010-09-22 21:41:02 +00001111 CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
Dan Gohmanc87c8432010-11-11 00:42:22 +00001112 else
Dan Gohman23483932010-09-22 21:41:02 +00001113 CacheInfo->Pair = BBSkipFirstBlockPair();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001114
Chris Lattner5ed409e2008-12-08 07:31:50 +00001115 SmallVector<BasicBlock*, 32> Worklist;
1116 Worklist.push_back(StartBB);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001117
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001118 // PredList used inside loop.
1119 SmallVector<std::pair<BasicBlock*, PHITransAddr>, 16> PredList;
1120
Chris Lattnera28355d2008-12-07 08:50:20 +00001121 // Keep track of the entries that we know are sorted. Previously cached
1122 // entries will all be sorted. The entries we add we only sort on demand (we
1123 // don't insert every element into its sorted position). We know that we
1124 // won't get any reuse from currently inserted values, because we don't
1125 // revisit blocks after we insert info for them.
1126 unsigned NumSortedEntries = Cache->size();
Chris Lattnerf09619d2009-01-22 07:04:01 +00001127 DEBUG(AssertSorted(*Cache));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001128
Chris Lattner2faa2c72008-12-07 02:15:47 +00001129 while (!Worklist.empty()) {
Chris Lattner7564a3b2008-12-07 02:56:57 +00001130 BasicBlock *BB = Worklist.pop_back_val();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001131
Chris Lattner75510d82008-12-09 07:52:59 +00001132 // Skip the first block if we have it.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001133 if (!SkipFirstBlock) {
Chris Lattner75510d82008-12-09 07:52:59 +00001134 // Analyze the dependency of *Pointer in FromBB. See if we already have
1135 // been here.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001136 assert(Visited.count(BB) && "Should check 'visited' before adding to WL");
Chris Lattnera28355d2008-12-07 08:50:20 +00001137
Chris Lattner75510d82008-12-09 07:52:59 +00001138 // Get the dependency info for Pointer in BB. If we have cached
1139 // information, we will use it, otherwise we compute it.
Chris Lattnerf09619d2009-01-22 07:04:01 +00001140 DEBUG(AssertSorted(*Cache, NumSortedEntries));
Dan Gohman23483932010-09-22 21:41:02 +00001141 MemDepResult Dep = GetNonLocalInfoForBlock(Loc, isLoad, BB, Cache,
Chris Lattner972e6d82009-12-09 01:59:31 +00001142 NumSortedEntries);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001143
Chris Lattner75510d82008-12-09 07:52:59 +00001144 // If we got a Def or Clobber, add this to the list of results.
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001145 if (!Dep.isNonLocal()) {
1146 if (!DT) {
1147 Result.push_back(NonLocalDepResult(BB,
1148 MemDepResult::getUnknown(),
1149 Pointer.getAddr()));
1150 continue;
1151 } else if (DT->isReachableFromEntry(BB)) {
1152 Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
1153 continue;
1154 }
Chris Lattner75510d82008-12-09 07:52:59 +00001155 }
Chris Lattner2faa2c72008-12-07 02:15:47 +00001156 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001157
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001158 // If 'Pointer' is an instruction defined in this block, then we need to do
1159 // phi translation to change it into a value live in the predecessor block.
Chris Lattner972e6d82009-12-09 01:59:31 +00001160 // If not, we just add the predecessors to the worklist and scan them with
1161 // the same Pointer.
1162 if (!Pointer.NeedsPHITranslationFromBlock(BB)) {
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001163 SkipFirstBlock = false;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001164 SmallVector<BasicBlock*, 16> NewBlocks;
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001165 for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
1166 // Verify that we haven't looked at this block yet.
1167 std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
Chris Lattner972e6d82009-12-09 01:59:31 +00001168 InsertRes = Visited.insert(std::make_pair(*PI, Pointer.getAddr()));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001169 if (InsertRes.second) {
1170 // First time we've looked at *PI.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001171 NewBlocks.push_back(*PI);
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001172 continue;
1173 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001174
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001175 // If we have seen this block before, but it was with a different
1176 // pointer then we have a phi translation failure and we have to treat
1177 // this as a clobber.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001178 if (InsertRes.first->second != Pointer.getAddr()) {
1179 // Make sure to clean up the Visited map before continuing on to
1180 // PredTranslationFailure.
1181 for (unsigned i = 0; i < NewBlocks.size(); i++)
1182 Visited.erase(NewBlocks[i]);
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001183 goto PredTranslationFailure;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001184 }
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001185 }
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001186 Worklist.append(NewBlocks.begin(), NewBlocks.end());
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001187 continue;
1188 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001189
Chris Lattner972e6d82009-12-09 01:59:31 +00001190 // We do need to do phi translation, if we know ahead of time we can't phi
1191 // translate this value, don't even try.
1192 if (!Pointer.IsPotentiallyPHITranslatable())
1193 goto PredTranslationFailure;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001194
Chris Lattner2f0c1c42009-07-13 17:14:23 +00001195 // We may have added values to the cache list before this PHI translation.
1196 // If so, we haven't done anything to ensure that the cache remains sorted.
1197 // Sort it now (if needed) so that recursive invocations of
1198 // getNonLocalPointerDepFromBB and other routines that could reuse the cache
1199 // value will only see properly sorted cache arrays.
1200 if (Cache && NumSortedEntries != Cache->size()) {
Chris Lattner370aada2009-07-13 17:20:05 +00001201 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
Chris Lattner2f0c1c42009-07-13 17:14:23 +00001202 NumSortedEntries = Cache->size();
1203 }
Craig Topper9f008862014-04-15 04:59:12 +00001204 Cache = nullptr;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001205
1206 PredList.clear();
Chris Lattnerac323292009-11-27 08:37:22 +00001207 for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
1208 BasicBlock *Pred = *PI;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001209 PredList.push_back(std::make_pair(Pred, Pointer));
1210
Chris Lattner972e6d82009-12-09 01:59:31 +00001211 // Get the PHI translated pointer in this predecessor. This can fail if
1212 // not translatable, in which case the getAddr() returns null.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001213 PHITransAddr &PredPointer = PredList.back().second;
Craig Topper9f008862014-04-15 04:59:12 +00001214 PredPointer.PHITranslateValue(BB, Pred, nullptr);
Chris Lattner972e6d82009-12-09 01:59:31 +00001215
1216 Value *PredPtrVal = PredPointer.getAddr();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001217
Chris Lattnerac323292009-11-27 08:37:22 +00001218 // Check to see if we have already visited this pred block with another
1219 // pointer. If so, we can't do this lookup. This failure can occur
1220 // with PHI translation when a critical edge exists and the PHI node in
1221 // the successor translates to a pointer value different than the
1222 // pointer the block was first analyzed with.
1223 std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
Chris Lattner972e6d82009-12-09 01:59:31 +00001224 InsertRes = Visited.insert(std::make_pair(Pred, PredPtrVal));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001225
Chris Lattnerac323292009-11-27 08:37:22 +00001226 if (!InsertRes.second) {
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001227 // We found the pred; take it off the list of preds to visit.
1228 PredList.pop_back();
1229
Chris Lattnerac323292009-11-27 08:37:22 +00001230 // If the predecessor was visited with PredPtr, then we already did
1231 // the analysis and can ignore it.
Chris Lattner972e6d82009-12-09 01:59:31 +00001232 if (InsertRes.first->second == PredPtrVal)
Chris Lattnerac323292009-11-27 08:37:22 +00001233 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001234
Chris Lattnerac323292009-11-27 08:37:22 +00001235 // Otherwise, the block was previously analyzed with a different
1236 // pointer. We can't represent the result of this case, so we just
1237 // treat this as a phi translation failure.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001238
1239 // Make sure to clean up the Visited map before continuing on to
1240 // PredTranslationFailure.
Matt Arsenault2080ecd2013-03-29 18:48:42 +00001241 for (unsigned i = 0, n = PredList.size(); i < n; ++i)
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001242 Visited.erase(PredList[i].first);
1243
Chris Lattnerac323292009-11-27 08:37:22 +00001244 goto PredTranslationFailure;
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001245 }
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001246 }
1247
1248 // Actually process results here; this need to be a separate loop to avoid
1249 // calling getNonLocalPointerDepFromBB for blocks we don't want to return
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001250 // any results for. (getNonLocalPointerDepFromBB will modify our
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001251 // datastructures in ways the code after the PredTranslationFailure label
1252 // doesn't expect.)
Matt Arsenault2080ecd2013-03-29 18:48:42 +00001253 for (unsigned i = 0, n = PredList.size(); i < n; ++i) {
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001254 BasicBlock *Pred = PredList[i].first;
1255 PHITransAddr &PredPointer = PredList[i].second;
1256 Value *PredPtrVal = PredPointer.getAddr();
1257
1258 bool CanTranslate = true;
Chris Lattner2be52e72009-11-27 22:05:15 +00001259 // If PHI translation was unable to find an available pointer in this
1260 // predecessor, then we have to assume that the pointer is clobbered in
1261 // that predecessor. We can still do PRE of the load, which would insert
1262 // a computation of the pointer in this predecessor.
Craig Topper9f008862014-04-15 04:59:12 +00001263 if (!PredPtrVal)
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001264 CanTranslate = false;
1265
1266 // FIXME: it is entirely possible that PHI translating will end up with
1267 // the same value. Consider PHI translating something like:
1268 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need*
1269 // to recurse here, pedantically speaking.
1270
1271 // If getNonLocalPointerDepFromBB fails here, that means the cached
1272 // result conflicted with the Visited list; we have to conservatively
Eli Friedman7d58bc72011-06-15 00:47:34 +00001273 // assume it is unknown, but this also does not block PRE of the load.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001274 if (!CanTranslate ||
1275 getNonLocalPointerDepFromBB(PredPointer,
1276 Loc.getWithNewPtr(PredPtrVal),
1277 isLoad, Pred,
1278 Result, Visited)) {
Chris Lattner9c2053b2009-12-01 07:33:32 +00001279 // Add the entry to the Result list.
Eli Friedman7d58bc72011-06-15 00:47:34 +00001280 NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal);
Chris Lattner9c2053b2009-12-01 07:33:32 +00001281 Result.push_back(Entry);
1282
Chris Lattner25bf6f82009-12-19 21:29:22 +00001283 // Since we had a phi translation failure, the cache for CacheKey won't
1284 // include all of the entries that we need to immediately satisfy future
1285 // queries. Mark this in NonLocalPointerDeps by setting the
1286 // BBSkipFirstBlockPair pointer to null. This requires reuse of the
1287 // cached value to do more work but not miss the phi trans failure.
Dan Gohman23483932010-09-22 21:41:02 +00001288 NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
1289 NLPI.Pair = BBSkipFirstBlockPair();
Chris Lattner2be52e72009-11-27 22:05:15 +00001290 continue;
Chris Lattner2be52e72009-11-27 22:05:15 +00001291 }
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001292 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001293
Chris Lattnerac323292009-11-27 08:37:22 +00001294 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
1295 CacheInfo = &NonLocalPointerDeps[CacheKey];
Dan Gohman23483932010-09-22 21:41:02 +00001296 Cache = &CacheInfo->NonLocalDeps;
Chris Lattnerac323292009-11-27 08:37:22 +00001297 NumSortedEntries = Cache->size();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001298
Chris Lattnerac323292009-11-27 08:37:22 +00001299 // Since we did phi translation, the "Cache" set won't contain all of the
1300 // results for the query. This is ok (we can still use it to accelerate
1301 // specific block queries) but we can't do the fastpath "return all
1302 // results from the set" Clear out the indicator for this.
Dan Gohman23483932010-09-22 21:41:02 +00001303 CacheInfo->Pair = BBSkipFirstBlockPair();
Chris Lattnerac323292009-11-27 08:37:22 +00001304 SkipFirstBlock = false;
1305 continue;
Chris Lattnerc49f5ac2009-11-26 23:18:49 +00001306
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001307 PredTranslationFailure:
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001308 // The following code is "failure"; we can't produce a sane translation
1309 // for the given block. It assumes that we haven't modified any of
1310 // our datastructures while processing the current block.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001311
Craig Topper9f008862014-04-15 04:59:12 +00001312 if (!Cache) {
Chris Lattner3f4591c2009-01-23 07:12:16 +00001313 // Refresh the CacheInfo/Cache pointer if it got invalidated.
1314 CacheInfo = &NonLocalPointerDeps[CacheKey];
Dan Gohman23483932010-09-22 21:41:02 +00001315 Cache = &CacheInfo->NonLocalDeps;
Chris Lattner3f4591c2009-01-23 07:12:16 +00001316 NumSortedEntries = Cache->size();
Chris Lattner3f4591c2009-01-23 07:12:16 +00001317 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001318
Chris Lattner25bf6f82009-12-19 21:29:22 +00001319 // Since we failed phi translation, the "Cache" set won't contain all of the
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001320 // results for the query. This is ok (we can still use it to accelerate
1321 // specific block queries) but we can't do the fastpath "return all
Chris Lattner25bf6f82009-12-19 21:29:22 +00001322 // results from the set". Clear out the indicator for this.
Dan Gohman23483932010-09-22 21:41:02 +00001323 CacheInfo->Pair = BBSkipFirstBlockPair();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001324
Eli Friedman7d58bc72011-06-15 00:47:34 +00001325 // If *nothing* works, mark the pointer as unknown.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001326 //
1327 // If this is the magic first block, return this as a clobber of the whole
1328 // incoming value. Since we can't phi translate to one of the predecessors,
1329 // we have to bail out.
1330 if (SkipFirstBlock)
1331 return true;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001332
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001333 for (NonLocalDepInfo::reverse_iterator I = Cache->rbegin(); ; ++I) {
1334 assert(I != Cache->rend() && "Didn't find current block??");
Chris Lattner0c315472009-12-09 07:08:01 +00001335 if (I->getBB() != BB)
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001336 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001337
Chris Lattner0c315472009-12-09 07:08:01 +00001338 assert(I->getResult().isNonLocal() &&
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001339 "Should only be here with transparent block");
Eli Friedman7d58bc72011-06-15 00:47:34 +00001340 I->setResult(MemDepResult::getUnknown());
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001341 Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(),
1342 Pointer.getAddr()));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001343 break;
Chris Lattner7564a3b2008-12-07 02:56:57 +00001344 }
Chris Lattner2faa2c72008-12-07 02:15:47 +00001345 }
Chris Lattner3f4591c2009-01-23 07:12:16 +00001346
Chris Lattnerf903fe12008-12-09 07:47:11 +00001347 // Okay, we're done now. If we added new values to the cache, re-sort it.
Chris Lattner370aada2009-07-13 17:20:05 +00001348 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
Chris Lattnerf09619d2009-01-22 07:04:01 +00001349 DEBUG(AssertSorted(*Cache));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001350 return false;
Chris Lattnera28355d2008-12-07 08:50:20 +00001351}
1352
1353/// RemoveCachedNonLocalPointerDependencies - If P exists in
1354/// CachedNonLocalPointerInfo, remove it.
1355void MemoryDependenceAnalysis::
1356RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) {
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001357 CachedNonLocalPointerInfo::iterator It =
Chris Lattnera28355d2008-12-07 08:50:20 +00001358 NonLocalPointerDeps.find(P);
1359 if (It == NonLocalPointerDeps.end()) return;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001360
Chris Lattnera28355d2008-12-07 08:50:20 +00001361 // Remove all of the entries in the BB->val map. This involves removing
1362 // instructions from the reverse map.
Dan Gohman23483932010-09-22 21:41:02 +00001363 NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001364
Chris Lattnera28355d2008-12-07 08:50:20 +00001365 for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
Chris Lattner0c315472009-12-09 07:08:01 +00001366 Instruction *Target = PInfo[i].getResult().getInst();
Craig Topper9f008862014-04-15 04:59:12 +00001367 if (!Target) continue; // Ignore non-local dep results.
Chris Lattner0c315472009-12-09 07:08:01 +00001368 assert(Target->getParent() == PInfo[i].getBB());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001369
Chris Lattnera28355d2008-12-07 08:50:20 +00001370 // Eliminating the dirty entry from 'Cache', so update the reverse info.
Chris Lattner8eda11b2009-03-29 00:24:04 +00001371 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P);
Chris Lattnera28355d2008-12-07 08:50:20 +00001372 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001373
Chris Lattnera28355d2008-12-07 08:50:20 +00001374 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
1375 NonLocalPointerDeps.erase(It);
Chris Lattner2faa2c72008-12-07 02:15:47 +00001376}
1377
1378
Chris Lattnerfa9f99a2008-12-09 22:06:23 +00001379/// invalidateCachedPointerInfo - This method is used to invalidate cached
1380/// information about the specified pointer, because it may be too
1381/// conservative in memdep. This is an optional call that can be used when
1382/// the client detects an equivalence between the pointer and some other
1383/// value and replaces the other value with ptr. This can make Ptr available
1384/// in more places that cached info does not necessarily keep.
1385void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) {
1386 // If Ptr isn't really a pointer, just ignore it.
Duncan Sands19d0b472010-02-16 11:11:14 +00001387 if (!Ptr->getType()->isPointerTy()) return;
Chris Lattnerfa9f99a2008-12-09 22:06:23 +00001388 // Flush store info for the pointer.
1389 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
1390 // Flush load info for the pointer.
1391 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
1392}
1393
Bob Wilson92cdb6e2010-02-16 19:51:59 +00001394/// invalidateCachedPredecessors - Clear the PredIteratorCache info.
1395/// This needs to be done when the CFG changes, e.g., due to splitting
1396/// critical edges.
1397void MemoryDependenceAnalysis::invalidateCachedPredecessors() {
1398 PredCache->clear();
1399}
1400
Owen Andersonc0daf5f2007-07-06 23:14:35 +00001401/// removeInstruction - Remove an instruction from the dependence analysis,
1402/// updating the dependence of instructions that previously depended on it.
Owen Anderson2b21c3c2007-08-08 22:26:03 +00001403/// This method attempts to keep the cache coherent using the reverse map.
Chris Lattnera25d39522008-11-28 22:04:47 +00001404void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) {
Chris Lattnera25d39522008-11-28 22:04:47 +00001405 // Walk through the Non-local dependencies, removing this one as the value
1406 // for any cached queries.
Chris Lattner1b810bd2008-11-30 02:28:25 +00001407 NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
1408 if (NLDI != NonLocalDeps.end()) {
Chris Lattner7e61daf2008-12-01 01:15:42 +00001409 NonLocalDepInfo &BlockMap = NLDI->second.first;
Chris Lattnerfc678e22008-11-30 02:30:50 +00001410 for (NonLocalDepInfo::iterator DI = BlockMap.begin(), DE = BlockMap.end();
1411 DI != DE; ++DI)
Chris Lattner0c315472009-12-09 07:08:01 +00001412 if (Instruction *Inst = DI->getResult().getInst())
Chris Lattnerde4440c2008-12-07 18:39:13 +00001413 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
Chris Lattner1b810bd2008-11-30 02:28:25 +00001414 NonLocalDeps.erase(NLDI);
1415 }
Owen Anderson086b2c42007-12-08 01:37:09 +00001416
Chris Lattnera25d39522008-11-28 22:04:47 +00001417 // If we have a cached local dependence query for this instruction, remove it.
Chris Lattner73c25452008-11-28 22:28:27 +00001418 //
Chris Lattnerde04e112008-11-29 01:43:36 +00001419 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
1420 if (LocalDepEntry != LocalDeps.end()) {
Chris Lattnerada1f872008-11-30 01:09:30 +00001421 // Remove us from DepInst's reverse set now that the local dep info is gone.
Chris Lattnerde4440c2008-12-07 18:39:13 +00001422 if (Instruction *Inst = LocalDepEntry->second.getInst())
1423 RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst);
Chris Lattnerada1f872008-11-30 01:09:30 +00001424
Chris Lattner73c25452008-11-28 22:28:27 +00001425 // Remove this local dependency info.
Chris Lattnerde04e112008-11-29 01:43:36 +00001426 LocalDeps.erase(LocalDepEntry);
Chris Lattnera28355d2008-12-07 08:50:20 +00001427 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001428
Chris Lattnera28355d2008-12-07 08:50:20 +00001429 // If we have any cached pointer dependencies on this instruction, remove
1430 // them. If the instruction has non-pointer type, then it can't be a pointer
1431 // base.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001432
Chris Lattnera28355d2008-12-07 08:50:20 +00001433 // Remove it from both the load info and the store info. The instruction
1434 // can't be in either of these maps if it is non-pointer.
Duncan Sands19d0b472010-02-16 11:11:14 +00001435 if (RemInst->getType()->isPointerTy()) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001436 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
1437 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
1438 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001439
Chris Lattnerd3d91112008-11-28 22:51:08 +00001440 // Loop over all of the things that depend on the instruction we're removing.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001441 //
Chris Lattner63bd5862008-11-29 23:30:39 +00001442 SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd;
Chris Lattner82b70342008-12-07 18:42:51 +00001443
1444 // If we find RemInst as a clobber or Def in any of the maps for other values,
1445 // we need to replace its entry with a dirty version of the instruction after
1446 // it. If RemInst is a terminator, we use a null dirty value.
1447 //
1448 // Using a dirty version of the instruction after RemInst saves having to scan
1449 // the entire block to get to this point.
1450 MemDepResult NewDirtyVal;
1451 if (!RemInst->isTerminator())
1452 NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001453
Chris Lattner9f1988ab2008-11-29 09:20:15 +00001454 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
1455 if (ReverseDepIt != ReverseLocalDeps.end()) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001456 // RemInst can't be the terminator if it has local stuff depending on it.
Craig Topper46276792014-08-24 23:23:06 +00001457 assert(!ReverseDepIt->second.empty() && !isa<TerminatorInst>(RemInst) &&
Chris Lattnerada1f872008-11-30 01:09:30 +00001458 "Nothing can locally depend on a terminator");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001459
Craig Topper46276792014-08-24 23:23:06 +00001460 for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) {
Chris Lattner1b810bd2008-11-30 02:28:25 +00001461 assert(InstDependingOnRemInst != RemInst &&
1462 "Already removed our local dep info");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001463
Chris Lattner82b70342008-12-07 18:42:51 +00001464 LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001465
Chris Lattnerada1f872008-11-30 01:09:30 +00001466 // Make sure to remember that new things depend on NewDepInst.
Chris Lattner82b70342008-12-07 18:42:51 +00001467 assert(NewDirtyVal.getInst() && "There is no way something else can have "
1468 "a local dep on this if it is a terminator!");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001469 ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(),
Chris Lattnerada1f872008-11-30 01:09:30 +00001470 InstDependingOnRemInst));
Chris Lattnerd3d91112008-11-28 22:51:08 +00001471 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001472
Chris Lattner63bd5862008-11-29 23:30:39 +00001473 ReverseLocalDeps.erase(ReverseDepIt);
1474
1475 // Add new reverse deps after scanning the set, to avoid invalidating the
1476 // 'ReverseDeps' reference.
1477 while (!ReverseDepsToAdd.empty()) {
1478 ReverseLocalDeps[ReverseDepsToAdd.back().first]
1479 .insert(ReverseDepsToAdd.back().second);
1480 ReverseDepsToAdd.pop_back();
1481 }
Owen Andersonc0daf5f2007-07-06 23:14:35 +00001482 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001483
Chris Lattner9f1988ab2008-11-29 09:20:15 +00001484 ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
1485 if (ReverseDepIt != ReverseNonLocalDeps.end()) {
Craig Topper46276792014-08-24 23:23:06 +00001486 for (Instruction *I : ReverseDepIt->second) {
1487 assert(I != RemInst && "Already removed NonLocalDep info for RemInst");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001488
Craig Topper46276792014-08-24 23:23:06 +00001489 PerInstNLInfo &INLD = NonLocalDeps[I];
Chris Lattner44104272008-11-30 02:52:26 +00001490 // The information is now dirty!
Chris Lattner7e61daf2008-12-01 01:15:42 +00001491 INLD.second = true;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001492
1493 for (NonLocalDepInfo::iterator DI = INLD.first.begin(),
Chris Lattner7e61daf2008-12-01 01:15:42 +00001494 DE = INLD.first.end(); DI != DE; ++DI) {
Chris Lattner0c315472009-12-09 07:08:01 +00001495 if (DI->getResult().getInst() != RemInst) continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001496
Chris Lattner1b810bd2008-11-30 02:28:25 +00001497 // Convert to a dirty entry for the subsequent instruction.
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001498 DI->setResult(NewDirtyVal);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001499
Chris Lattner82b70342008-12-07 18:42:51 +00001500 if (Instruction *NextI = NewDirtyVal.getInst())
Craig Topper46276792014-08-24 23:23:06 +00001501 ReverseDepsToAdd.push_back(std::make_pair(NextI, I));
Chris Lattner1b810bd2008-11-30 02:28:25 +00001502 }
1503 }
Chris Lattner63bd5862008-11-29 23:30:39 +00001504
1505 ReverseNonLocalDeps.erase(ReverseDepIt);
1506
Chris Lattnere7d7e132008-11-29 22:02:15 +00001507 // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
1508 while (!ReverseDepsToAdd.empty()) {
1509 ReverseNonLocalDeps[ReverseDepsToAdd.back().first]
1510 .insert(ReverseDepsToAdd.back().second);
1511 ReverseDepsToAdd.pop_back();
1512 }
Owen Anderson5f208be2007-08-16 21:27:05 +00001513 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001514
Chris Lattnera28355d2008-12-07 08:50:20 +00001515 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
1516 // value in the NonLocalPointerDeps info.
1517 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
1518 ReverseNonLocalPtrDeps.find(RemInst);
1519 if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001520 SmallVector<std::pair<Instruction*, ValueIsLoadPair>,8> ReversePtrDepsToAdd;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001521
Craig Topper46276792014-08-24 23:23:06 +00001522 for (ValueIsLoadPair P : ReversePtrDepIt->second) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001523 assert(P.getPointer() != RemInst &&
1524 "Already removed NonLocalPointerDeps info for RemInst");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001525
Dan Gohman23483932010-09-22 21:41:02 +00001526 NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001527
Chris Lattner5ed409e2008-12-08 07:31:50 +00001528 // The cache is not valid for any specific block anymore.
Dan Gohman23483932010-09-22 21:41:02 +00001529 NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001530
Chris Lattnera28355d2008-12-07 08:50:20 +00001531 // Update any entries for RemInst to use the instruction after it.
1532 for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end();
1533 DI != DE; ++DI) {
Chris Lattner0c315472009-12-09 07:08:01 +00001534 if (DI->getResult().getInst() != RemInst) continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001535
Chris Lattnera28355d2008-12-07 08:50:20 +00001536 // Convert to a dirty entry for the subsequent instruction.
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001537 DI->setResult(NewDirtyVal);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001538
Chris Lattnera28355d2008-12-07 08:50:20 +00001539 if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
1540 ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
1541 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001542
Chris Lattner3f4591c2009-01-23 07:12:16 +00001543 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its
1544 // subsequent value may invalidate the sortedness.
1545 std::sort(NLPDI.begin(), NLPDI.end());
Chris Lattnera28355d2008-12-07 08:50:20 +00001546 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001547
Chris Lattnera28355d2008-12-07 08:50:20 +00001548 ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001549
Chris Lattnera28355d2008-12-07 08:50:20 +00001550 while (!ReversePtrDepsToAdd.empty()) {
1551 ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first]
Chris Lattner8eda11b2009-03-29 00:24:04 +00001552 .insert(ReversePtrDepsToAdd.back().second);
Chris Lattnera28355d2008-12-07 08:50:20 +00001553 ReversePtrDepsToAdd.pop_back();
1554 }
1555 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001556
1557
Chris Lattner1b810bd2008-11-30 02:28:25 +00001558 assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
Chris Lattner13cae612008-11-30 19:24:31 +00001559 AA->deleteValue(RemInst);
Jakob Stoklund Olesen087f2072011-01-11 04:05:39 +00001560 DEBUG(verifyRemoved(RemInst));
Owen Andersonc0daf5f2007-07-06 23:14:35 +00001561}
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001562/// verifyRemoved - Verify that the specified instruction does not occur
Craig Topper46276792014-08-24 23:23:06 +00001563/// in our internal data structures. This function verifies by asserting in
1564/// debug builds.
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001565void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const {
Craig Topper46276792014-08-24 23:23:06 +00001566#ifndef NDEBUG
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001567 for (LocalDepMapType::const_iterator I = LocalDeps.begin(),
1568 E = LocalDeps.end(); I != E; ++I) {
1569 assert(I->first != D && "Inst occurs in data structures");
Chris Lattner47e81d02008-11-30 23:17:19 +00001570 assert(I->second.getInst() != D &&
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001571 "Inst occurs in data structures");
1572 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001573
Chris Lattnera28355d2008-12-07 08:50:20 +00001574 for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(),
1575 E = NonLocalPointerDeps.end(); I != E; ++I) {
1576 assert(I->first.getPointer() != D && "Inst occurs in NLPD map key");
Dan Gohman23483932010-09-22 21:41:02 +00001577 const NonLocalDepInfo &Val = I->second.NonLocalDeps;
Chris Lattnera28355d2008-12-07 08:50:20 +00001578 for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end();
1579 II != E; ++II)
Chris Lattner0c315472009-12-09 07:08:01 +00001580 assert(II->getResult().getInst() != D && "Inst occurs as NLPD value");
Chris Lattnera28355d2008-12-07 08:50:20 +00001581 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001582
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001583 for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(),
1584 E = NonLocalDeps.end(); I != E; ++I) {
1585 assert(I->first != D && "Inst occurs in data structures");
Chris Lattner44104272008-11-30 02:52:26 +00001586 const PerInstNLInfo &INLD = I->second;
Chris Lattner7e61daf2008-12-01 01:15:42 +00001587 for (NonLocalDepInfo::const_iterator II = INLD.first.begin(),
1588 EE = INLD.first.end(); II != EE; ++II)
Chris Lattner0c315472009-12-09 07:08:01 +00001589 assert(II->getResult().getInst() != D && "Inst occurs in data structures");
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001590 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001591
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001592 for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(),
Chris Lattner1b810bd2008-11-30 02:28:25 +00001593 E = ReverseLocalDeps.end(); I != E; ++I) {
1594 assert(I->first != D && "Inst occurs in data structures");
Craig Topper46276792014-08-24 23:23:06 +00001595 for (Instruction *Inst : I->second)
1596 assert(Inst != D && "Inst occurs in data structures");
Chris Lattner1b810bd2008-11-30 02:28:25 +00001597 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001598
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001599 for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(),
1600 E = ReverseNonLocalDeps.end();
Chris Lattner1b810bd2008-11-30 02:28:25 +00001601 I != E; ++I) {
1602 assert(I->first != D && "Inst occurs in data structures");
Craig Topper46276792014-08-24 23:23:06 +00001603 for (Instruction *Inst : I->second)
1604 assert(Inst != D && "Inst occurs in data structures");
Chris Lattner1b810bd2008-11-30 02:28:25 +00001605 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001606
Chris Lattnera28355d2008-12-07 08:50:20 +00001607 for (ReverseNonLocalPtrDepTy::const_iterator
1608 I = ReverseNonLocalPtrDeps.begin(),
1609 E = ReverseNonLocalPtrDeps.end(); I != E; ++I) {
1610 assert(I->first != D && "Inst occurs in rev NLPD map");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001611
Craig Topper46276792014-08-24 23:23:06 +00001612 for (ValueIsLoadPair P : I->second)
1613 assert(P != ValueIsLoadPair(D, false) &&
1614 P != ValueIsLoadPair(D, true) &&
Chris Lattnera28355d2008-12-07 08:50:20 +00001615 "Inst occurs in ReverseNonLocalPtrDeps map");
1616 }
Craig Topper46276792014-08-24 23:23:06 +00001617#endif
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001618}