blob: 323c84f7f6767bd9f2eec5dcc9c7f69932e05f43 [file] [log] [blame]
Owen Anderson78e02f72007-07-06 23:14:35 +00001//===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation --*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Owen Anderson78e02f72007-07-06 23:14:35 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file implements an analysis that determines, for a given memory
11// operation, what preceding memory operations it depends on. It builds on
Owen Anderson80b1f092007-08-08 22:01:54 +000012// alias analysis information, and tries to provide a lazy, caching interface to
Owen Anderson78e02f72007-07-06 23:14:35 +000013// a common kind of alias information query.
14//
15//===----------------------------------------------------------------------===//
16
Chris Lattner0e575f42008-11-28 21:45:17 +000017#define DEBUG_TYPE "memdep"
Owen Anderson78e02f72007-07-06 23:14:35 +000018#include "llvm/Analysis/MemoryDependenceAnalysis.h"
Chris Lattnercb5fd742011-04-26 22:42:01 +000019#include "llvm/Analysis/ValueTracking.h"
Owen Anderson78e02f72007-07-06 23:14:35 +000020#include "llvm/Instructions.h"
Owen Andersonf6cec852009-03-09 05:12:38 +000021#include "llvm/IntrinsicInst.h"
Owen Anderson78e02f72007-07-06 23:14:35 +000022#include "llvm/Function.h"
Dan Gohmanc1ac0d72010-09-22 21:41:02 +000023#include "llvm/LLVMContext.h"
Owen Anderson78e02f72007-07-06 23:14:35 +000024#include "llvm/Analysis/AliasAnalysis.h"
Nick Lewycky88990242011-11-14 22:49:42 +000025#include "llvm/Analysis/CaptureTracking.h"
Chris Lattner6f7b2102009-11-27 22:05:15 +000026#include "llvm/Analysis/Dominators.h"
Chris Lattnere19e4ba2009-11-27 00:34:38 +000027#include "llvm/Analysis/InstructionSimplify.h"
Victor Hernandezf006b182009-10-27 20:05:49 +000028#include "llvm/Analysis/MemoryBuiltins.h"
Chris Lattner05e15f82009-12-09 01:59:31 +000029#include "llvm/Analysis/PHITransAddr.h"
Dan Gohman5034dd32010-12-15 20:02:24 +000030#include "llvm/Analysis/ValueTracking.h"
Chris Lattnerbaad8882008-11-28 22:28:27 +000031#include "llvm/ADT/Statistic.h"
Duncan Sands7050f3d2008-12-10 09:38:36 +000032#include "llvm/ADT/STLExtras.h"
Chris Lattner4012fdd2008-12-09 06:28:49 +000033#include "llvm/Support/PredIteratorCache.h"
Chris Lattner0e575f42008-11-28 21:45:17 +000034#include "llvm/Support/Debug.h"
Benjamin Kramerdd061b22010-11-21 15:21:46 +000035#include "llvm/Target/TargetData.h"
Owen Anderson78e02f72007-07-06 23:14:35 +000036using namespace llvm;
37
Chris Lattnerbf145d62008-12-01 01:15:42 +000038STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
39STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
Chris Lattner0ec48dd2008-11-29 22:02:15 +000040STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
Chris Lattner6290f5c2008-12-07 08:50:20 +000041
42STATISTIC(NumCacheNonLocalPtr,
43 "Number of fully cached non-local ptr responses");
44STATISTIC(NumCacheDirtyNonLocalPtr,
45 "Number of cached, but dirty, non-local ptr responses");
46STATISTIC(NumUncacheNonLocalPtr,
47 "Number of uncached non-local ptr responses");
Chris Lattner11dcd8d2008-12-08 07:31:50 +000048STATISTIC(NumCacheCompleteNonLocalPtr,
49 "Number of block queries that were completely cached");
Chris Lattner6290f5c2008-12-07 08:50:20 +000050
Eli Friedman992205a2011-06-15 23:59:25 +000051// Limit for the number of instructions to scan in a block.
52// FIXME: Figure out what a sane value is for this.
53// (500 is relatively insane.)
54static const int BlockScanLimit = 500;
55
Owen Anderson78e02f72007-07-06 23:14:35 +000056char MemoryDependenceAnalysis::ID = 0;
57
Owen Anderson78e02f72007-07-06 23:14:35 +000058// Register this pass...
Owen Anderson2ab36d32010-10-12 19:48:12 +000059INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep",
Owen Andersonce665bd2010-10-07 22:25:06 +000060 "Memory Dependence Analysis", false, true)
Owen Anderson2ab36d32010-10-12 19:48:12 +000061INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
62INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep",
63 "Memory Dependence Analysis", false, true)
Owen Anderson78e02f72007-07-06 23:14:35 +000064
Chris Lattner4012fdd2008-12-09 06:28:49 +000065MemoryDependenceAnalysis::MemoryDependenceAnalysis()
Owen Anderson90c579d2010-08-06 18:33:48 +000066: FunctionPass(ID), PredCache(0) {
Owen Anderson081c34b2010-10-19 17:21:58 +000067 initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry());
Chris Lattner4012fdd2008-12-09 06:28:49 +000068}
69MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
70}
71
72/// Clean up memory in between runs
73void MemoryDependenceAnalysis::releaseMemory() {
74 LocalDeps.clear();
75 NonLocalDeps.clear();
76 NonLocalPointerDeps.clear();
77 ReverseLocalDeps.clear();
78 ReverseNonLocalDeps.clear();
79 ReverseNonLocalPtrDeps.clear();
80 PredCache->clear();
81}
82
83
84
Owen Anderson78e02f72007-07-06 23:14:35 +000085/// getAnalysisUsage - Does not modify anything. It uses Alias Analysis.
86///
87void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
88 AU.setPreservesAll();
89 AU.addRequiredTransitive<AliasAnalysis>();
Owen Anderson78e02f72007-07-06 23:14:35 +000090}
91
Chris Lattnerd777d402008-11-30 19:24:31 +000092bool MemoryDependenceAnalysis::runOnFunction(Function &) {
93 AA = &getAnalysis<AliasAnalysis>();
Benjamin Kramerdd061b22010-11-21 15:21:46 +000094 TD = getAnalysisIfAvailable<TargetData>();
Nick Lewycky88990242011-11-14 22:49:42 +000095 DT = getAnalysisIfAvailable<DominatorTree>();
Chris Lattner4012fdd2008-12-09 06:28:49 +000096 if (PredCache == 0)
97 PredCache.reset(new PredIteratorCache());
Chris Lattnerd777d402008-11-30 19:24:31 +000098 return false;
99}
100
Chris Lattnerd44745d2008-12-07 18:39:13 +0000101/// RemoveFromReverseMap - This is a helper function that removes Val from
102/// 'Inst's set in ReverseMap. If the set becomes empty, remove Inst's entry.
103template <typename KeyTy>
104static void RemoveFromReverseMap(DenseMap<Instruction*,
Chris Lattner6a0dcc12009-03-29 00:24:04 +0000105 SmallPtrSet<KeyTy, 4> > &ReverseMap,
106 Instruction *Inst, KeyTy Val) {
107 typename DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> >::iterator
Chris Lattnerd44745d2008-12-07 18:39:13 +0000108 InstIt = ReverseMap.find(Inst);
109 assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
110 bool Found = InstIt->second.erase(Val);
Jeffrey Yasskin8e68c382010-12-23 00:58:24 +0000111 assert(Found && "Invalid reverse map!"); (void)Found;
Chris Lattnerd44745d2008-12-07 18:39:13 +0000112 if (InstIt->second.empty())
113 ReverseMap.erase(InstIt);
114}
115
Dan Gohman533c2ad2010-11-10 21:51:35 +0000116/// GetLocation - If the given instruction references a specific memory
117/// location, fill in Loc with the details, otherwise set Loc.Ptr to null.
118/// Return a ModRefInfo value describing the general behavior of the
119/// instruction.
120static
121AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
122 AliasAnalysis::Location &Loc,
123 AliasAnalysis *AA) {
124 if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
Eli Friedman667ccf22011-08-15 20:54:19 +0000125 if (LI->isUnordered()) {
126 Loc = AA->getLocation(LI);
127 return AliasAnalysis::Ref;
128 } else if (LI->getOrdering() == Monotonic) {
129 Loc = AA->getLocation(LI);
Dan Gohman533c2ad2010-11-10 21:51:35 +0000130 return AliasAnalysis::ModRef;
131 }
Eli Friedman667ccf22011-08-15 20:54:19 +0000132 Loc = AliasAnalysis::Location();
133 return AliasAnalysis::ModRef;
Dan Gohman533c2ad2010-11-10 21:51:35 +0000134 }
135
136 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
Eli Friedman667ccf22011-08-15 20:54:19 +0000137 if (SI->isUnordered()) {
138 Loc = AA->getLocation(SI);
139 return AliasAnalysis::Mod;
140 } else if (SI->getOrdering() == Monotonic) {
141 Loc = AA->getLocation(SI);
Dan Gohman533c2ad2010-11-10 21:51:35 +0000142 return AliasAnalysis::ModRef;
143 }
Eli Friedman667ccf22011-08-15 20:54:19 +0000144 Loc = AliasAnalysis::Location();
145 return AliasAnalysis::ModRef;
Dan Gohman533c2ad2010-11-10 21:51:35 +0000146 }
147
148 if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
Dan Gohman6d8eb152010-11-11 21:50:19 +0000149 Loc = AA->getLocation(V);
Dan Gohman533c2ad2010-11-10 21:51:35 +0000150 return AliasAnalysis::ModRef;
151 }
152
153 if (const CallInst *CI = isFreeCall(Inst)) {
154 // calls to free() deallocate the entire structure
155 Loc = AliasAnalysis::Location(CI->getArgOperand(0));
156 return AliasAnalysis::Mod;
157 }
158
159 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
160 switch (II->getIntrinsicID()) {
161 case Intrinsic::lifetime_start:
162 case Intrinsic::lifetime_end:
163 case Intrinsic::invariant_start:
164 Loc = AliasAnalysis::Location(II->getArgOperand(1),
165 cast<ConstantInt>(II->getArgOperand(0))
166 ->getZExtValue(),
167 II->getMetadata(LLVMContext::MD_tbaa));
168 // These intrinsics don't really modify the memory, but returning Mod
169 // will allow them to be handled conservatively.
170 return AliasAnalysis::Mod;
171 case Intrinsic::invariant_end:
172 Loc = AliasAnalysis::Location(II->getArgOperand(2),
173 cast<ConstantInt>(II->getArgOperand(1))
174 ->getZExtValue(),
175 II->getMetadata(LLVMContext::MD_tbaa));
176 // These intrinsics don't really modify the memory, but returning Mod
177 // will allow them to be handled conservatively.
178 return AliasAnalysis::Mod;
179 default:
180 break;
181 }
182
183 // Otherwise, just do the coarse-grained thing that always works.
184 if (Inst->mayWriteToMemory())
185 return AliasAnalysis::ModRef;
186 if (Inst->mayReadFromMemory())
187 return AliasAnalysis::Ref;
188 return AliasAnalysis::NoModRef;
189}
Chris Lattnerbf145d62008-12-01 01:15:42 +0000190
Chris Lattner8ef57c52008-12-07 00:35:51 +0000191/// getCallSiteDependencyFrom - Private helper for finding the local
192/// dependencies of a call site.
Chris Lattnerfd3dcbe2008-11-30 23:17:19 +0000193MemDepResult MemoryDependenceAnalysis::
Chris Lattner20d6f092008-12-09 21:19:42 +0000194getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
195 BasicBlock::iterator ScanIt, BasicBlock *BB) {
Eli Friedman992205a2011-06-15 23:59:25 +0000196 unsigned Limit = BlockScanLimit;
197
Owen Anderson642a9e32007-08-08 22:26:03 +0000198 // Walk backwards through the block, looking for dependencies
Chris Lattner5391a1d2008-11-29 03:47:00 +0000199 while (ScanIt != BB->begin()) {
Eli Friedman992205a2011-06-15 23:59:25 +0000200 // Limit the amount of scanning we do so we don't end up with quadratic
201 // running time on extreme testcases.
202 --Limit;
203 if (!Limit)
204 return MemDepResult::getUnknown();
205
Chris Lattner5391a1d2008-11-29 03:47:00 +0000206 Instruction *Inst = --ScanIt;
Owen Anderson5f323202007-07-10 17:59:22 +0000207
208 // If this inst is a memory op, get the pointer it accessed
Dan Gohmanc1ac0d72010-09-22 21:41:02 +0000209 AliasAnalysis::Location Loc;
Dan Gohman533c2ad2010-11-10 21:51:35 +0000210 AliasAnalysis::ModRefResult MR = GetLocation(Inst, Loc, AA);
211 if (Loc.Ptr) {
212 // A simple instruction.
213 if (AA->getModRefInfo(CS, Loc) != AliasAnalysis::NoModRef)
214 return MemDepResult::getClobber(Inst);
215 continue;
216 }
217
218 if (CallSite InstCS = cast<Value>(Inst)) {
Owen Andersonf6cec852009-03-09 05:12:38 +0000219 // Debug intrinsics don't cause dependences.
Dale Johannesen497cb6f2009-03-11 21:13:01 +0000220 if (isa<DbgInfoIntrinsic>(Inst)) continue;
Chris Lattnerb51deb92008-12-05 21:04:20 +0000221 // If these two calls do not interfere, look past it.
Chris Lattner20d6f092008-12-09 21:19:42 +0000222 switch (AA->getModRefInfo(CS, InstCS)) {
223 case AliasAnalysis::NoModRef:
Dan Gohman5fa417c2010-08-05 22:09:15 +0000224 // If the two calls are the same, return InstCS as a Def, so that
225 // CS can be found redundant and eliminated.
Dan Gohman533c2ad2010-11-10 21:51:35 +0000226 if (isReadOnlyCall && !(MR & AliasAnalysis::Mod) &&
Dan Gohman5fa417c2010-08-05 22:09:15 +0000227 CS.getInstruction()->isIdenticalToWhenDefined(Inst))
228 return MemDepResult::getDef(Inst);
229
230 // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
231 // keep scanning.
Dan Gohman533c2ad2010-11-10 21:51:35 +0000232 break;
Chris Lattner20d6f092008-12-09 21:19:42 +0000233 default:
Chris Lattnerb51deb92008-12-05 21:04:20 +0000234 return MemDepResult::getClobber(Inst);
Chris Lattner20d6f092008-12-09 21:19:42 +0000235 }
Chris Lattnercfbb6342008-11-30 01:44:00 +0000236 }
Owen Anderson5f323202007-07-10 17:59:22 +0000237 }
238
Eli Friedmana990e072011-06-15 00:47:34 +0000239 // No dependence found. If this is the entry block of the function, it is
240 // unknown, otherwise it is non-local.
Chris Lattner7ebcf032008-12-07 02:15:47 +0000241 if (BB != &BB->getParent()->getEntryBlock())
242 return MemDepResult::getNonLocal();
Eli Friedmanb4141422011-10-13 22:14:57 +0000243 return MemDepResult::getNonFuncLocal();
Owen Anderson5f323202007-07-10 17:59:22 +0000244}
245
Chris Lattnercb5fd742011-04-26 22:42:01 +0000246/// isLoadLoadClobberIfExtendedToFullWidth - Return true if LI is a load that
247/// would fully overlap MemLoc if done as a wider legal integer load.
248///
249/// MemLocBase, MemLocOffset are lazily computed here the first time the
250/// base/offs of memloc is needed.
251static bool
252isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
253 const Value *&MemLocBase,
254 int64_t &MemLocOffs,
Chris Lattner4034e142011-04-28 07:29:08 +0000255 const LoadInst *LI,
256 const TargetData *TD) {
Chris Lattnercb5fd742011-04-26 22:42:01 +0000257 // If we have no target data, we can't do this.
258 if (TD == 0) return false;
259
260 // If we haven't already computed the base/offset of MemLoc, do so now.
261 if (MemLocBase == 0)
262 MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, *TD);
263
Chris Lattner4034e142011-04-28 07:29:08 +0000264 unsigned Size = MemoryDependenceAnalysis::
265 getLoadLoadClobberFullWidthSize(MemLocBase, MemLocOffs, MemLoc.Size,
266 LI, *TD);
267 return Size != 0;
268}
269
270/// getLoadLoadClobberFullWidthSize - This is a little bit of analysis that
271/// looks at a memory location for a load (specified by MemLocBase, Offs,
272/// and Size) and compares it against a load. If the specified load could
273/// be safely widened to a larger integer load that is 1) still efficient,
274/// 2) safe for the target, and 3) would provide the specified memory
275/// location value, then this function returns the size in bytes of the
276/// load width to use. If not, this returns zero.
277unsigned MemoryDependenceAnalysis::
278getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
279 unsigned MemLocSize, const LoadInst *LI,
280 const TargetData &TD) {
Eli Friedman667ccf22011-08-15 20:54:19 +0000281 // We can only extend simple integer loads.
282 if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
Chris Lattner4034e142011-04-28 07:29:08 +0000283
Chris Lattnercb5fd742011-04-26 22:42:01 +0000284 // Get the base of this load.
285 int64_t LIOffs = 0;
286 const Value *LIBase =
Chris Lattner4034e142011-04-28 07:29:08 +0000287 GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, TD);
Chris Lattnercb5fd742011-04-26 22:42:01 +0000288
289 // If the two pointers are not based on the same pointer, we can't tell that
290 // they are related.
Chris Lattner4034e142011-04-28 07:29:08 +0000291 if (LIBase != MemLocBase) return 0;
Chris Lattnercb5fd742011-04-26 22:42:01 +0000292
293 // Okay, the two values are based on the same pointer, but returned as
294 // no-alias. This happens when we have things like two byte loads at "P+1"
295 // and "P+3". Check to see if increasing the size of the "LI" load up to its
296 // alignment (or the largest native integer type) will allow us to load all
297 // the bits required by MemLoc.
298
299 // If MemLoc is before LI, then no widening of LI will help us out.
Chris Lattner4034e142011-04-28 07:29:08 +0000300 if (MemLocOffs < LIOffs) return 0;
Chris Lattnercb5fd742011-04-26 22:42:01 +0000301
302 // Get the alignment of the load in bytes. We assume that it is safe to load
303 // any legal integer up to this size without a problem. For example, if we're
304 // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can
305 // widen it up to an i32 load. If it is known 2-byte aligned, we can widen it
306 // to i16.
307 unsigned LoadAlign = LI->getAlignment();
308
Chris Lattner4034e142011-04-28 07:29:08 +0000309 int64_t MemLocEnd = MemLocOffs+MemLocSize;
Chris Lattnercb5fd742011-04-26 22:42:01 +0000310
311 // If no amount of rounding up will let MemLoc fit into LI, then bail out.
Chris Lattner4034e142011-04-28 07:29:08 +0000312 if (LIOffs+LoadAlign < MemLocEnd) return 0;
Chris Lattnercb5fd742011-04-26 22:42:01 +0000313
314 // This is the size of the load to try. Start with the next larger power of
315 // two.
316 unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits()/8U;
317 NewLoadByteSize = NextPowerOf2(NewLoadByteSize);
318
319 while (1) {
320 // If this load size is bigger than our known alignment or would not fit
321 // into a native integer register, then we fail.
322 if (NewLoadByteSize > LoadAlign ||
Chris Lattner4034e142011-04-28 07:29:08 +0000323 !TD.fitsInLegalInteger(NewLoadByteSize*8))
324 return 0;
Chris Lattnercb5fd742011-04-26 22:42:01 +0000325
326 // If a load of this width would include all of MemLoc, then we succeed.
327 if (LIOffs+NewLoadByteSize >= MemLocEnd)
Chris Lattner4034e142011-04-28 07:29:08 +0000328 return NewLoadByteSize;
Chris Lattnercb5fd742011-04-26 22:42:01 +0000329
330 NewLoadByteSize <<= 1;
331 }
332
Chris Lattner4034e142011-04-28 07:29:08 +0000333 return 0;
Chris Lattnercb5fd742011-04-26 22:42:01 +0000334}
335
Nick Lewycky88990242011-11-14 22:49:42 +0000336namespace {
337 /// Only find pointer captures which happen before the given instruction. Uses
338 /// the dominator tree to determine whether one instruction is before another.
339 struct CapturesBefore {
340 CapturesBefore(const Instruction *I, DominatorTree *DT)
341 : BeforeHere(I), DT(DT), Captured(false) {}
342
343 void tooManyUses() { Captured = true; }
344
345 bool shouldExplore(Use *U) {
346 Instruction *I = cast<Instruction>(U->getUser());
347 if (BeforeHere != I && DT->dominates(BeforeHere, I))
348 return false;
349 return true;
350 }
351
352 bool captured(Instruction *I) {
353 if (BeforeHere != I && DT->dominates(BeforeHere, I))
354 return false;
355 Captured = true;
356 return true;
357 }
358
359 const Instruction *BeforeHere;
360 DominatorTree *DT;
361
362 bool Captured;
363 };
364}
365
366AliasAnalysis::ModRefResult
367MemoryDependenceAnalysis::getModRefInfo(const Instruction *Inst,
368 const AliasAnalysis::Location &MemLoc) {
369 AliasAnalysis::ModRefResult MR = AA->getModRefInfo(Inst, MemLoc);
370 if (MR != AliasAnalysis::ModRef) return MR;
371
372 // FIXME: this is really just shoring-up a deficiency in alias analysis.
373 // BasicAA isn't willing to spend linear time determining whether an alloca
374 // was captured before or after this particular call, while we are. However,
375 // with a smarter AA in place, this test is just wasting compile time.
376 if (!DT) return AliasAnalysis::ModRef;
377 const Value *Object = GetUnderlyingObject(MemLoc.Ptr, TD);
378 if (!isIdentifiedObject(Object) || isa<GlobalVariable>(Object))
379 return AliasAnalysis::ModRef;
380 ImmutableCallSite CS(Inst);
381 if (!CS.getInstruction()) return AliasAnalysis::ModRef;
382
383 CapturesBefore CB(Inst, DT);
384 llvm::PointerMayBeCaptured(Object, CB);
385
386 if (isa<Constant>(Object) || CS.getInstruction() == Object || CB.Captured)
387 return AliasAnalysis::ModRef;
388
389 unsigned ArgNo = 0;
390 for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
391 CI != CE; ++CI, ++ArgNo) {
392 // Only look at the no-capture or byval pointer arguments. If this
393 // pointer were passed to arguments that were neither of these, then it
394 // couldn't be no-capture.
395 if (!(*CI)->getType()->isPointerTy() ||
396 (!CS.paramHasAttr(ArgNo+1, Attribute::NoCapture) &&
397 !CS.paramHasAttr(ArgNo+1, Attribute::ByVal)))
398 continue;
399
400 // If this is a no-capture pointer argument, see if we can tell that it
401 // is impossible to alias the pointer we're checking. If not, we have to
402 // assume that the call could touch the pointer, even though it doesn't
403 // escape.
404 if (!AA->isNoAlias(AliasAnalysis::Location(*CI),
405 AliasAnalysis::Location(Object))) {
406 return AliasAnalysis::ModRef;
407 }
408 }
409 return AliasAnalysis::NoModRef;
410}
411
Chris Lattnere79be942008-12-07 01:50:16 +0000412/// getPointerDependencyFrom - Return the instruction on which a memory
Dan Gohmancd5c1232010-10-29 01:14:04 +0000413/// location depends. If isLoad is true, this routine ignores may-aliases with
414/// read-only operations. If isLoad is false, this routine ignores may-aliases
415/// with reads from read-only locations.
Chris Lattnerfd3dcbe2008-11-30 23:17:19 +0000416MemDepResult MemoryDependenceAnalysis::
Dan Gohmanc1ac0d72010-09-22 21:41:02 +0000417getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
Chris Lattnere79be942008-12-07 01:50:16 +0000418 BasicBlock::iterator ScanIt, BasicBlock *BB) {
Chris Lattner7ebcf032008-12-07 02:15:47 +0000419
Chris Lattnercb5fd742011-04-26 22:42:01 +0000420 const Value *MemLocBase = 0;
421 int64_t MemLocOffset = 0;
Eli Friedman992205a2011-06-15 23:59:25 +0000422
423 unsigned Limit = BlockScanLimit;
424
Chris Lattner6290f5c2008-12-07 08:50:20 +0000425 // Walk backwards through the basic block, looking for dependencies.
Chris Lattner5391a1d2008-11-29 03:47:00 +0000426 while (ScanIt != BB->begin()) {
Eli Friedman992205a2011-06-15 23:59:25 +0000427 // Limit the amount of scanning we do so we don't end up with quadratic
428 // running time on extreme testcases.
429 --Limit;
430 if (!Limit)
431 return MemDepResult::getUnknown();
432
Chris Lattner5391a1d2008-11-29 03:47:00 +0000433 Instruction *Inst = --ScanIt;
Chris Lattnera161ab02008-11-29 09:09:48 +0000434
Chris Lattner1ffb70f2009-12-01 21:15:15 +0000435 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
Chris Lattner09981982010-09-06 03:58:04 +0000436 // Debug intrinsics don't (and can't) cause dependences.
Chris Lattnerc5a5cf22010-09-06 01:26:29 +0000437 if (isa<DbgInfoIntrinsic>(II)) continue;
Owen Anderson9ff5a232009-12-02 07:35:19 +0000438
Owen Andersonb62f7922009-10-28 07:05:35 +0000439 // If we reach a lifetime begin or end marker, then the query ends here
440 // because the value is undefined.
Chris Lattner09981982010-09-06 03:58:04 +0000441 if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
Owen Anderson9ff5a232009-12-02 07:35:19 +0000442 // FIXME: This only considers queries directly on the invariant-tagged
443 // pointer, not on query pointers that are indexed off of them. It'd
Chris Lattnercb5fd742011-04-26 22:42:01 +0000444 // be nice to handle that at some point (the right approach is to use
445 // GetPointerBaseWithConstantOffset).
Chris Lattnerd5c7f7c2011-04-26 21:53:34 +0000446 if (AA->isMustAlias(AliasAnalysis::Location(II->getArgOperand(1)),
447 MemLoc))
Owen Andersonb62f7922009-10-28 07:05:35 +0000448 return MemDepResult::getDef(II);
Chris Lattner09981982010-09-06 03:58:04 +0000449 continue;
Owen Anderson4bc737c2009-10-28 06:18:42 +0000450 }
451 }
452
Chris Lattnercfbb6342008-11-30 01:44:00 +0000453 // Values depend on loads if the pointers are must aliased. This means that
454 // a load depends on another must aliased load from the same value.
Chris Lattnerb51deb92008-12-05 21:04:20 +0000455 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
Eli Friedman667ccf22011-08-15 20:54:19 +0000456 // Atomic loads have complications involved.
457 // FIXME: This is overly conservative.
458 if (!LI->isUnordered())
459 return MemDepResult::getClobber(LI);
460
Dan Gohman6d8eb152010-11-11 21:50:19 +0000461 AliasAnalysis::Location LoadLoc = AA->getLocation(LI);
Chris Lattnerb51deb92008-12-05 21:04:20 +0000462
463 // If we found a pointer, check if it could be the same as our pointer.
Dan Gohmancd5c1232010-10-29 01:14:04 +0000464 AliasAnalysis::AliasResult R = AA->alias(LoadLoc, MemLoc);
Chris Lattnera161ab02008-11-29 09:09:48 +0000465
Chris Lattner1f821512011-04-26 01:21:15 +0000466 if (isLoad) {
Chris Lattnercb5fd742011-04-26 22:42:01 +0000467 if (R == AliasAnalysis::NoAlias) {
468 // If this is an over-aligned integer load (for example,
469 // "load i8* %P, align 4") see if it would obviously overlap with the
470 // queried location if widened to a larger load (e.g. if the queried
471 // location is 1 byte at P+1). If so, return it as a load/load
472 // clobber result, allowing the client to decide to widen the load if
473 // it wants to.
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000474 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
Chris Lattnercb5fd742011-04-26 22:42:01 +0000475 if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() &&
476 isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
477 MemLocOffset, LI, TD))
478 return MemDepResult::getClobber(Inst);
479
480 continue;
481 }
482
Chris Lattner1f821512011-04-26 01:21:15 +0000483 // Must aliased loads are defs of each other.
484 if (R == AliasAnalysis::MustAlias)
485 return MemDepResult::getDef(Inst);
486
Dan Gohmana3351a02011-06-04 06:48:50 +0000487#if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads
488 // in terms of clobbering loads, but since it does this by looking
489 // at the clobbering load directly, it doesn't know about any
490 // phi translation that may have happened along the way.
491
Chris Lattner1f821512011-04-26 01:21:15 +0000492 // If we have a partial alias, then return this as a clobber for the
493 // client to handle.
494 if (R == AliasAnalysis::PartialAlias)
495 return MemDepResult::getClobber(Inst);
Dan Gohmana3351a02011-06-04 06:48:50 +0000496#endif
Chris Lattner1f821512011-04-26 01:21:15 +0000497
498 // Random may-alias loads don't depend on each other without a
499 // dependence.
Chris Lattnera161ab02008-11-29 09:09:48 +0000500 continue;
Chris Lattner1f821512011-04-26 01:21:15 +0000501 }
Dan Gohmancd5c1232010-10-29 01:14:04 +0000502
Chris Lattnercb5fd742011-04-26 22:42:01 +0000503 // Stores don't depend on other no-aliased accesses.
504 if (R == AliasAnalysis::NoAlias)
505 continue;
506
Dan Gohmancd5c1232010-10-29 01:14:04 +0000507 // Stores don't alias loads from read-only memory.
Chris Lattner1f821512011-04-26 01:21:15 +0000508 if (AA->pointsToConstantMemory(LoadLoc))
Dan Gohmancd5c1232010-10-29 01:14:04 +0000509 continue;
510
Chris Lattner1f821512011-04-26 01:21:15 +0000511 // Stores depend on may/must aliased loads.
Chris Lattnerb51deb92008-12-05 21:04:20 +0000512 return MemDepResult::getDef(Inst);
513 }
514
515 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
Eli Friedman667ccf22011-08-15 20:54:19 +0000516 // Atomic stores have complications involved.
517 // FIXME: This is overly conservative.
518 if (!SI->isUnordered())
519 return MemDepResult::getClobber(SI);
520
Chris Lattnerab9cf122009-05-25 21:28:56 +0000521 // If alias analysis can tell that this store is guaranteed to not modify
522 // the query pointer, ignore it. Use getModRefInfo to handle cases where
523 // the query pointer points to constant memory etc.
Dan Gohmanc1ac0d72010-09-22 21:41:02 +0000524 if (AA->getModRefInfo(SI, MemLoc) == AliasAnalysis::NoModRef)
Chris Lattnerab9cf122009-05-25 21:28:56 +0000525 continue;
526
527 // Ok, this store might clobber the query pointer. Check to see if it is
528 // a must alias: in this case, we want to return this as a def.
Dan Gohman6d8eb152010-11-11 21:50:19 +0000529 AliasAnalysis::Location StoreLoc = AA->getLocation(SI);
Chris Lattnerab9cf122009-05-25 21:28:56 +0000530
Chris Lattnerb51deb92008-12-05 21:04:20 +0000531 // If we found a pointer, check if it could be the same as our pointer.
Dan Gohman6d8eb152010-11-11 21:50:19 +0000532 AliasAnalysis::AliasResult R = AA->alias(StoreLoc, MemLoc);
Chris Lattnerb51deb92008-12-05 21:04:20 +0000533
534 if (R == AliasAnalysis::NoAlias)
535 continue;
Dan Gohman2cd19522010-12-13 22:47:57 +0000536 if (R == AliasAnalysis::MustAlias)
537 return MemDepResult::getDef(Inst);
538 return MemDepResult::getClobber(Inst);
Owen Anderson78e02f72007-07-06 23:14:35 +0000539 }
Chris Lattner237a8282008-11-30 01:39:32 +0000540
541 // If this is an allocation, and if we know that the accessed pointer is to
Chris Lattnerb51deb92008-12-05 21:04:20 +0000542 // the allocation, return Def. This means that there is no dependence and
Chris Lattner237a8282008-11-30 01:39:32 +0000543 // the access can be optimized based on that. For example, a load could
544 // turn into undef.
Victor Hernandez5c787362009-10-13 01:42:53 +0000545 // Note: Only determine this to be a malloc if Inst is the malloc call, not
546 // a subsequent bitcast of the malloc call result. There can be stores to
547 // the malloced memory between the malloc call and its bitcast uses, and we
548 // need to continue scanning until the malloc call.
Chris Lattner9b96eca2009-12-22 01:00:32 +0000549 if (isa<AllocaInst>(Inst) ||
550 (isa<CallInst>(Inst) && extractMallocCall(Inst))) {
Dan Gohmanbd1801b2011-01-24 18:53:32 +0000551 const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD);
Victor Hernandez46e83122009-09-18 21:34:51 +0000552
Chris Lattnerd5c7f7c2011-04-26 21:53:34 +0000553 if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
Victor Hernandez46e83122009-09-18 21:34:51 +0000554 return MemDepResult::getDef(Inst);
555 continue;
556 }
557
Chris Lattnerb51deb92008-12-05 21:04:20 +0000558 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
Nick Lewycky88990242011-11-14 22:49:42 +0000559 switch (getModRefInfo(Inst, MemLoc)) {
Chris Lattner3579e442008-12-09 19:47:40 +0000560 case AliasAnalysis::NoModRef:
561 // If the call has no effect on the queried pointer, just ignore it.
Chris Lattner25a08142008-11-29 08:51:16 +0000562 continue;
Owen Andersona85a6642009-10-28 06:30:52 +0000563 case AliasAnalysis::Mod:
Owen Andersona85a6642009-10-28 06:30:52 +0000564 return MemDepResult::getClobber(Inst);
Chris Lattner3579e442008-12-09 19:47:40 +0000565 case AliasAnalysis::Ref:
566 // If the call is known to never store to the pointer, and if this is a
567 // load query, we can safely ignore it (scan past it).
568 if (isLoad)
569 continue;
Chris Lattner3579e442008-12-09 19:47:40 +0000570 default:
571 // Otherwise, there is a potential dependence. Return a clobber.
572 return MemDepResult::getClobber(Inst);
573 }
Owen Anderson78e02f72007-07-06 23:14:35 +0000574 }
575
Eli Friedmana990e072011-06-15 00:47:34 +0000576 // No dependence found. If this is the entry block of the function, it is
577 // unknown, otherwise it is non-local.
Chris Lattner7ebcf032008-12-07 02:15:47 +0000578 if (BB != &BB->getParent()->getEntryBlock())
579 return MemDepResult::getNonLocal();
Eli Friedmanb4141422011-10-13 22:14:57 +0000580 return MemDepResult::getNonFuncLocal();
Owen Anderson78e02f72007-07-06 23:14:35 +0000581}
582
Chris Lattner5391a1d2008-11-29 03:47:00 +0000583/// getDependency - Return the instruction on which a memory operation
584/// depends.
585MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
586 Instruction *ScanPos = QueryInst;
587
588 // Check for a cached result
Chris Lattnerfd3dcbe2008-11-30 23:17:19 +0000589 MemDepResult &LocalCache = LocalDeps[QueryInst];
Chris Lattner5391a1d2008-11-29 03:47:00 +0000590
Chris Lattner0ec48dd2008-11-29 22:02:15 +0000591 // If the cached entry is non-dirty, just return it. Note that this depends
Chris Lattnerfd3dcbe2008-11-30 23:17:19 +0000592 // on MemDepResult's default constructing to 'dirty'.
593 if (!LocalCache.isDirty())
594 return LocalCache;
Chris Lattner5391a1d2008-11-29 03:47:00 +0000595
596 // Otherwise, if we have a dirty entry, we know we can start the scan at that
597 // instruction, which may save us some work.
Chris Lattnerfd3dcbe2008-11-30 23:17:19 +0000598 if (Instruction *Inst = LocalCache.getInst()) {
Chris Lattner5391a1d2008-11-29 03:47:00 +0000599 ScanPos = Inst;
Chris Lattner4a69bad2008-11-30 02:52:26 +0000600
Chris Lattnerd44745d2008-12-07 18:39:13 +0000601 RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst);
Chris Lattner4a69bad2008-11-30 02:52:26 +0000602 }
Chris Lattner5391a1d2008-11-29 03:47:00 +0000603
Chris Lattnere79be942008-12-07 01:50:16 +0000604 BasicBlock *QueryParent = QueryInst->getParent();
605
Chris Lattner5391a1d2008-11-29 03:47:00 +0000606 // Do the scan.
Chris Lattnere79be942008-12-07 01:50:16 +0000607 if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
Eli Friedmana990e072011-06-15 00:47:34 +0000608 // No dependence found. If this is the entry block of the function, it is
609 // unknown, otherwise it is non-local.
Chris Lattner7ebcf032008-12-07 02:15:47 +0000610 if (QueryParent != &QueryParent->getParent()->getEntryBlock())
611 LocalCache = MemDepResult::getNonLocal();
612 else
Eli Friedmanb4141422011-10-13 22:14:57 +0000613 LocalCache = MemDepResult::getNonFuncLocal();
Dan Gohman533c2ad2010-11-10 21:51:35 +0000614 } else {
615 AliasAnalysis::Location MemLoc;
616 AliasAnalysis::ModRefResult MR = GetLocation(QueryInst, MemLoc, AA);
617 if (MemLoc.Ptr) {
618 // If we can do a pointer scan, make it happen.
619 bool isLoad = !(MR & AliasAnalysis::Mod);
Chris Lattner12bf43b2010-11-30 01:56:13 +0000620 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
Owen Andersone1edb172011-05-17 00:05:49 +0000621 isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
Chris Lattnerf6f1f062010-11-21 07:34:32 +0000622
Dan Gohman533c2ad2010-11-10 21:51:35 +0000623 LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos,
624 QueryParent);
625 } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
Gabor Greif622b7cf2010-07-27 22:02:00 +0000626 CallSite QueryCS(QueryInst);
Nick Lewycky93d33112009-12-05 06:37:24 +0000627 bool isReadOnly = AA->onlyReadsMemory(QueryCS);
628 LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
629 QueryParent);
Dan Gohman533c2ad2010-11-10 21:51:35 +0000630 } else
631 // Non-memory instruction.
Eli Friedmana990e072011-06-15 00:47:34 +0000632 LocalCache = MemDepResult::getUnknown();
Nick Lewyckyd801c102009-11-28 21:27:49 +0000633 }
Chris Lattner5391a1d2008-11-29 03:47:00 +0000634
635 // Remember the result!
Chris Lattnerfd3dcbe2008-11-30 23:17:19 +0000636 if (Instruction *I = LocalCache.getInst())
Chris Lattner8c465272008-11-29 09:20:15 +0000637 ReverseLocalDeps[I].insert(QueryInst);
Chris Lattner5391a1d2008-11-29 03:47:00 +0000638
Chris Lattnerfd3dcbe2008-11-30 23:17:19 +0000639 return LocalCache;
Chris Lattner5391a1d2008-11-29 03:47:00 +0000640}
641
Chris Lattner12a7db32009-01-22 07:04:01 +0000642#ifndef NDEBUG
643/// AssertSorted - This method is used when -debug is specified to verify that
644/// cache arrays are properly kept sorted.
645static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
646 int Count = -1) {
647 if (Count == -1) Count = Cache.size();
648 if (Count == 0) return;
649
650 for (unsigned i = 1; i != unsigned(Count); ++i)
Chris Lattnere18b9712009-12-09 07:08:01 +0000651 assert(!(Cache[i] < Cache[i-1]) && "Cache isn't sorted!");
Chris Lattner12a7db32009-01-22 07:04:01 +0000652}
653#endif
654
Chris Lattner1559b362008-12-09 19:38:05 +0000655/// getNonLocalCallDependency - Perform a full dependency query for the
656/// specified call, returning the set of blocks that the value is
Chris Lattner37d041c2008-11-30 01:18:27 +0000657/// potentially live across. The returned set of results will include a
658/// "NonLocal" result for all blocks where the value is live across.
659///
Chris Lattner1559b362008-12-09 19:38:05 +0000660/// This method assumes the instruction returns a "NonLocal" dependency
Chris Lattner37d041c2008-11-30 01:18:27 +0000661/// within its own block.
662///
Chris Lattner1559b362008-12-09 19:38:05 +0000663/// This returns a reference to an internal data structure that may be
664/// invalidated on the next non-local query or when an instruction is
665/// removed. Clients must copy this data if they want it around longer than
666/// that.
Chris Lattnerbf145d62008-12-01 01:15:42 +0000667const MemoryDependenceAnalysis::NonLocalDepInfo &
Chris Lattner1559b362008-12-09 19:38:05 +0000668MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
669 assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
670 "getNonLocalCallDependency should only be used on calls with non-local deps!");
671 PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
Chris Lattnerbf145d62008-12-01 01:15:42 +0000672 NonLocalDepInfo &Cache = CacheP.first;
Chris Lattner37d041c2008-11-30 01:18:27 +0000673
674 /// DirtyBlocks - This is the set of blocks that need to be recomputed. In
675 /// the cached case, this can happen due to instructions being deleted etc. In
676 /// the uncached case, this starts out as the set of predecessors we care
677 /// about.
678 SmallVector<BasicBlock*, 32> DirtyBlocks;
679
680 if (!Cache.empty()) {
Chris Lattnerbf145d62008-12-01 01:15:42 +0000681 // Okay, we have a cache entry. If we know it is not dirty, just return it
682 // with no computation.
683 if (!CacheP.second) {
Dan Gohmanfe601042010-06-22 15:08:57 +0000684 ++NumCacheNonLocal;
Chris Lattnerbf145d62008-12-01 01:15:42 +0000685 return Cache;
686 }
687
Chris Lattner37d041c2008-11-30 01:18:27 +0000688 // If we already have a partially computed set of results, scan them to
Chris Lattnerbf145d62008-12-01 01:15:42 +0000689 // determine what is dirty, seeding our initial DirtyBlocks worklist.
690 for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end();
691 I != E; ++I)
Chris Lattnere18b9712009-12-09 07:08:01 +0000692 if (I->getResult().isDirty())
693 DirtyBlocks.push_back(I->getBB());
Chris Lattner37d041c2008-11-30 01:18:27 +0000694
Chris Lattnerbf145d62008-12-01 01:15:42 +0000695 // Sort the cache so that we can do fast binary search lookups below.
696 std::sort(Cache.begin(), Cache.end());
Chris Lattner37d041c2008-11-30 01:18:27 +0000697
Chris Lattnerbf145d62008-12-01 01:15:42 +0000698 ++NumCacheDirtyNonLocal;
Chris Lattner37d041c2008-11-30 01:18:27 +0000699 //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
700 // << Cache.size() << " cached: " << *QueryInst;
701 } else {
702 // Seed DirtyBlocks with each of the preds of QueryInst's block.
Chris Lattner1559b362008-12-09 19:38:05 +0000703 BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
Chris Lattner511b36c2008-12-09 06:44:17 +0000704 for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI)
705 DirtyBlocks.push_back(*PI);
Dan Gohmanfe601042010-06-22 15:08:57 +0000706 ++NumUncacheNonLocal;
Chris Lattner37d041c2008-11-30 01:18:27 +0000707 }
708
Chris Lattner20d6f092008-12-09 21:19:42 +0000709 // isReadonlyCall - If this is a read-only call, we can be more aggressive.
710 bool isReadonlyCall = AA->onlyReadsMemory(QueryCS);
Chris Lattner9e59c642008-12-15 03:35:32 +0000711
Chris Lattnerbf145d62008-12-01 01:15:42 +0000712 SmallPtrSet<BasicBlock*, 64> Visited;
713
714 unsigned NumSortedEntries = Cache.size();
Chris Lattner12a7db32009-01-22 07:04:01 +0000715 DEBUG(AssertSorted(Cache));
Chris Lattnerbf145d62008-12-01 01:15:42 +0000716
Chris Lattner37d041c2008-11-30 01:18:27 +0000717 // Iterate while we still have blocks to update.
718 while (!DirtyBlocks.empty()) {
719 BasicBlock *DirtyBB = DirtyBlocks.back();
720 DirtyBlocks.pop_back();
721
Chris Lattnerbf145d62008-12-01 01:15:42 +0000722 // Already processed this block?
723 if (!Visited.insert(DirtyBB))
724 continue;
Chris Lattner37d041c2008-11-30 01:18:27 +0000725
Chris Lattnerbf145d62008-12-01 01:15:42 +0000726 // Do a binary search to see if we already have an entry for this block in
727 // the cache set. If so, find it.
Chris Lattner12a7db32009-01-22 07:04:01 +0000728 DEBUG(AssertSorted(Cache, NumSortedEntries));
Chris Lattnerbf145d62008-12-01 01:15:42 +0000729 NonLocalDepInfo::iterator Entry =
730 std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries,
Chris Lattnerdad451c2009-12-09 07:31:04 +0000731 NonLocalDepEntry(DirtyBB));
Chris Lattnere18b9712009-12-09 07:08:01 +0000732 if (Entry != Cache.begin() && prior(Entry)->getBB() == DirtyBB)
Chris Lattnerbf145d62008-12-01 01:15:42 +0000733 --Entry;
734
Chris Lattnere18b9712009-12-09 07:08:01 +0000735 NonLocalDepEntry *ExistingResult = 0;
Chris Lattnerbf145d62008-12-01 01:15:42 +0000736 if (Entry != Cache.begin()+NumSortedEntries &&
Chris Lattnere18b9712009-12-09 07:08:01 +0000737 Entry->getBB() == DirtyBB) {
Chris Lattnerbf145d62008-12-01 01:15:42 +0000738 // If we already have an entry, and if it isn't already dirty, the block
739 // is done.
Chris Lattnere18b9712009-12-09 07:08:01 +0000740 if (!Entry->getResult().isDirty())
Chris Lattnerbf145d62008-12-01 01:15:42 +0000741 continue;
742
743 // Otherwise, remember this slot so we can update the value.
Chris Lattnere18b9712009-12-09 07:08:01 +0000744 ExistingResult = &*Entry;
Chris Lattnerbf145d62008-12-01 01:15:42 +0000745 }
746
Chris Lattner37d041c2008-11-30 01:18:27 +0000747 // If the dirty entry has a pointer, start scanning from it so we don't have
748 // to rescan the entire block.
749 BasicBlock::iterator ScanPos = DirtyBB->end();
Chris Lattnerbf145d62008-12-01 01:15:42 +0000750 if (ExistingResult) {
Chris Lattnere18b9712009-12-09 07:08:01 +0000751 if (Instruction *Inst = ExistingResult->getResult().getInst()) {
Chris Lattnerbf145d62008-12-01 01:15:42 +0000752 ScanPos = Inst;
Chris Lattnerbf145d62008-12-01 01:15:42 +0000753 // We're removing QueryInst's use of Inst.
Chris Lattner1559b362008-12-09 19:38:05 +0000754 RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
755 QueryCS.getInstruction());
Chris Lattnerbf145d62008-12-01 01:15:42 +0000756 }
Chris Lattnerf68f3102008-11-30 02:28:25 +0000757 }
Chris Lattner37d041c2008-11-30 01:18:27 +0000758
Chris Lattner73ec3cd2008-11-30 01:26:32 +0000759 // Find out if this block has a local dependency for QueryInst.
Chris Lattnerd8dd9342008-12-07 01:21:14 +0000760 MemDepResult Dep;
Chris Lattnere79be942008-12-07 01:50:16 +0000761
Chris Lattner1559b362008-12-09 19:38:05 +0000762 if (ScanPos != DirtyBB->begin()) {
Chris Lattner20d6f092008-12-09 21:19:42 +0000763 Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB);
Chris Lattner1559b362008-12-09 19:38:05 +0000764 } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
765 // No dependence found. If this is the entry block of the function, it is
Eli Friedmana990e072011-06-15 00:47:34 +0000766 // a clobber, otherwise it is unknown.
Chris Lattner1559b362008-12-09 19:38:05 +0000767 Dep = MemDepResult::getNonLocal();
Chris Lattnere79be942008-12-07 01:50:16 +0000768 } else {
Eli Friedmanb4141422011-10-13 22:14:57 +0000769 Dep = MemDepResult::getNonFuncLocal();
Chris Lattnere79be942008-12-07 01:50:16 +0000770 }
771
Chris Lattnerbf145d62008-12-01 01:15:42 +0000772 // If we had a dirty entry for the block, update it. Otherwise, just add
773 // a new entry.
774 if (ExistingResult)
Chris Lattner0ee443d2009-12-22 04:25:02 +0000775 ExistingResult->setResult(Dep);
Chris Lattnerbf145d62008-12-01 01:15:42 +0000776 else
Chris Lattner0ee443d2009-12-22 04:25:02 +0000777 Cache.push_back(NonLocalDepEntry(DirtyBB, Dep));
Chris Lattnerbf145d62008-12-01 01:15:42 +0000778
Chris Lattner37d041c2008-11-30 01:18:27 +0000779 // If the block has a dependency (i.e. it isn't completely transparent to
Chris Lattnerbf145d62008-12-01 01:15:42 +0000780 // the value), remember the association!
781 if (!Dep.isNonLocal()) {
Chris Lattner37d041c2008-11-30 01:18:27 +0000782 // Keep the ReverseNonLocalDeps map up to date so we can efficiently
783 // update this when we remove instructions.
Chris Lattnerbf145d62008-12-01 01:15:42 +0000784 if (Instruction *Inst = Dep.getInst())
Chris Lattner1559b362008-12-09 19:38:05 +0000785 ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
Chris Lattnerbf145d62008-12-01 01:15:42 +0000786 } else {
Chris Lattner37d041c2008-11-30 01:18:27 +0000787
Chris Lattnerbf145d62008-12-01 01:15:42 +0000788 // If the block *is* completely transparent to the load, we need to check
789 // the predecessors of this block. Add them to our worklist.
Chris Lattner511b36c2008-12-09 06:44:17 +0000790 for (BasicBlock **PI = PredCache->GetPreds(DirtyBB); *PI; ++PI)
791 DirtyBlocks.push_back(*PI);
Chris Lattnerbf145d62008-12-01 01:15:42 +0000792 }
Chris Lattner37d041c2008-11-30 01:18:27 +0000793 }
794
Chris Lattnerbf145d62008-12-01 01:15:42 +0000795 return Cache;
Chris Lattner37d041c2008-11-30 01:18:27 +0000796}
797
Chris Lattner7ebcf032008-12-07 02:15:47 +0000798/// getNonLocalPointerDependency - Perform a full dependency query for an
799/// access to the specified (non-volatile) memory location, returning the
800/// set of instructions that either define or clobber the value.
801///
802/// This method assumes the pointer has a "NonLocal" dependency within its
803/// own block.
804///
805void MemoryDependenceAnalysis::
Dan Gohmanc1ac0d72010-09-22 21:41:02 +0000806getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad,
807 BasicBlock *FromBB,
Chris Lattner0ee443d2009-12-22 04:25:02 +0000808 SmallVectorImpl<NonLocalDepResult> &Result) {
Dan Gohmanc1ac0d72010-09-22 21:41:02 +0000809 assert(Loc.Ptr->getType()->isPointerTy() &&
Chris Lattner3f7eb5b2008-12-07 18:45:15 +0000810 "Can't get pointer deps of a non-pointer!");
Chris Lattner9a193fd2008-12-07 02:56:57 +0000811 Result.clear();
812
Dan Gohmanc1ac0d72010-09-22 21:41:02 +0000813 PHITransAddr Address(const_cast<Value *>(Loc.Ptr), TD);
Chris Lattner05e15f82009-12-09 01:59:31 +0000814
Chris Lattner9e59c642008-12-15 03:35:32 +0000815 // This is the set of blocks we've inspected, and the pointer we consider in
816 // each block. Because of critical edges, we currently bail out if querying
817 // a block with multiple different pointers. This can happen during PHI
818 // translation.
819 DenseMap<BasicBlock*, Value*> Visited;
Dan Gohmanc1ac0d72010-09-22 21:41:02 +0000820 if (!getNonLocalPointerDepFromBB(Address, Loc, isLoad, FromBB,
Chris Lattner9e59c642008-12-15 03:35:32 +0000821 Result, Visited, true))
822 return;
Chris Lattner3af23f82008-12-15 04:58:29 +0000823 Result.clear();
Chris Lattner0ee443d2009-12-22 04:25:02 +0000824 Result.push_back(NonLocalDepResult(FromBB,
Eli Friedmana990e072011-06-15 00:47:34 +0000825 MemDepResult::getUnknown(),
Dan Gohmanc1ac0d72010-09-22 21:41:02 +0000826 const_cast<Value *>(Loc.Ptr)));
Chris Lattner9a193fd2008-12-07 02:56:57 +0000827}
828
Chris Lattner9863c3f2008-12-09 07:47:11 +0000829/// GetNonLocalInfoForBlock - Compute the memdep value for BB with
830/// Pointer/PointeeSize using either cached information in Cache or by doing a
831/// lookup (which may use dirty cache info if available). If we do a lookup,
832/// add the result to the cache.
833MemDepResult MemoryDependenceAnalysis::
Dan Gohmanc1ac0d72010-09-22 21:41:02 +0000834GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc,
Chris Lattner9863c3f2008-12-09 07:47:11 +0000835 bool isLoad, BasicBlock *BB,
836 NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
837
838 // Do a binary search to see if we already have an entry for this block in
839 // the cache set. If so, find it.
840 NonLocalDepInfo::iterator Entry =
841 std::upper_bound(Cache->begin(), Cache->begin()+NumSortedEntries,
Chris Lattnerdad451c2009-12-09 07:31:04 +0000842 NonLocalDepEntry(BB));
Chris Lattnere18b9712009-12-09 07:08:01 +0000843 if (Entry != Cache->begin() && (Entry-1)->getBB() == BB)
Chris Lattner9863c3f2008-12-09 07:47:11 +0000844 --Entry;
845
Chris Lattnere18b9712009-12-09 07:08:01 +0000846 NonLocalDepEntry *ExistingResult = 0;
847 if (Entry != Cache->begin()+NumSortedEntries && Entry->getBB() == BB)
848 ExistingResult = &*Entry;
Chris Lattner9863c3f2008-12-09 07:47:11 +0000849
850 // If we have a cached entry, and it is non-dirty, use it as the value for
851 // this dependency.
Chris Lattnere18b9712009-12-09 07:08:01 +0000852 if (ExistingResult && !ExistingResult->getResult().isDirty()) {
Chris Lattner9863c3f2008-12-09 07:47:11 +0000853 ++NumCacheNonLocalPtr;
Chris Lattnere18b9712009-12-09 07:08:01 +0000854 return ExistingResult->getResult();
Chris Lattner9863c3f2008-12-09 07:47:11 +0000855 }
856
857 // Otherwise, we have to scan for the value. If we have a dirty cache
858 // entry, start scanning from its position, otherwise we scan from the end
859 // of the block.
860 BasicBlock::iterator ScanPos = BB->end();
Chris Lattnere18b9712009-12-09 07:08:01 +0000861 if (ExistingResult && ExistingResult->getResult().getInst()) {
862 assert(ExistingResult->getResult().getInst()->getParent() == BB &&
Chris Lattner9863c3f2008-12-09 07:47:11 +0000863 "Instruction invalidated?");
864 ++NumCacheDirtyNonLocalPtr;
Chris Lattnere18b9712009-12-09 07:08:01 +0000865 ScanPos = ExistingResult->getResult().getInst();
Chris Lattner9863c3f2008-12-09 07:47:11 +0000866
867 // Eliminating the dirty entry from 'Cache', so update the reverse info.
Dan Gohmanc1ac0d72010-09-22 21:41:02 +0000868 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
Chris Lattner6a0dcc12009-03-29 00:24:04 +0000869 RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey);
Chris Lattner9863c3f2008-12-09 07:47:11 +0000870 } else {
871 ++NumUncacheNonLocalPtr;
872 }
873
874 // Scan the block for the dependency.
Dan Gohmanc1ac0d72010-09-22 21:41:02 +0000875 MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB);
Chris Lattner9863c3f2008-12-09 07:47:11 +0000876
877 // If we had a dirty entry for the block, update it. Otherwise, just add
878 // a new entry.
879 if (ExistingResult)
Chris Lattner0ee443d2009-12-22 04:25:02 +0000880 ExistingResult->setResult(Dep);
Chris Lattner9863c3f2008-12-09 07:47:11 +0000881 else
Chris Lattner0ee443d2009-12-22 04:25:02 +0000882 Cache->push_back(NonLocalDepEntry(BB, Dep));
Chris Lattner9863c3f2008-12-09 07:47:11 +0000883
884 // If the block has a dependency (i.e. it isn't completely transparent to
885 // the value), remember the reverse association because we just added it
886 // to Cache!
Eli Friedmanb4141422011-10-13 22:14:57 +0000887 if (!Dep.isDef() && !Dep.isClobber())
Chris Lattner9863c3f2008-12-09 07:47:11 +0000888 return Dep;
889
890 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
891 // update MemDep when we remove instructions.
892 Instruction *Inst = Dep.getInst();
893 assert(Inst && "Didn't depend on anything?");
Dan Gohmanc1ac0d72010-09-22 21:41:02 +0000894 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
Chris Lattner6a0dcc12009-03-29 00:24:04 +0000895 ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
Chris Lattner9863c3f2008-12-09 07:47:11 +0000896 return Dep;
897}
898
Chris Lattnera2f55dd2009-07-13 17:20:05 +0000899/// SortNonLocalDepInfoCache - Sort the a NonLocalDepInfo cache, given a certain
900/// number of elements in the array that are already properly ordered. This is
901/// optimized for the case when only a few entries are added.
902static void
903SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
904 unsigned NumSortedEntries) {
905 switch (Cache.size() - NumSortedEntries) {
906 case 0:
907 // done, no new entries.
908 break;
909 case 2: {
910 // Two new entries, insert the last one into place.
Chris Lattnere18b9712009-12-09 07:08:01 +0000911 NonLocalDepEntry Val = Cache.back();
Chris Lattnera2f55dd2009-07-13 17:20:05 +0000912 Cache.pop_back();
913 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
914 std::upper_bound(Cache.begin(), Cache.end()-1, Val);
915 Cache.insert(Entry, Val);
916 // FALL THROUGH.
917 }
918 case 1:
919 // One new entry, Just insert the new value at the appropriate position.
920 if (Cache.size() != 1) {
Chris Lattnere18b9712009-12-09 07:08:01 +0000921 NonLocalDepEntry Val = Cache.back();
Chris Lattnera2f55dd2009-07-13 17:20:05 +0000922 Cache.pop_back();
923 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
924 std::upper_bound(Cache.begin(), Cache.end(), Val);
925 Cache.insert(Entry, Val);
926 }
927 break;
928 default:
929 // Added many values, do a full scale sort.
930 std::sort(Cache.begin(), Cache.end());
931 break;
932 }
933}
934
Chris Lattner9e59c642008-12-15 03:35:32 +0000935/// getNonLocalPointerDepFromBB - Perform a dependency query based on
936/// pointer/pointeesize starting at the end of StartBB. Add any clobber/def
937/// results to the results vector and keep track of which blocks are visited in
938/// 'Visited'.
939///
940/// This has special behavior for the first block queries (when SkipFirstBlock
941/// is true). In this special case, it ignores the contents of the specified
942/// block and starts returning dependence info for its predecessors.
943///
944/// This function returns false on success, or true to indicate that it could
945/// not compute dependence information for some reason. This should be treated
946/// as a clobber dependence on the first instruction in the predecessor block.
947bool MemoryDependenceAnalysis::
Dan Gohmanc1ac0d72010-09-22 21:41:02 +0000948getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
949 const AliasAnalysis::Location &Loc,
Chris Lattner9863c3f2008-12-09 07:47:11 +0000950 bool isLoad, BasicBlock *StartBB,
Chris Lattner0ee443d2009-12-22 04:25:02 +0000951 SmallVectorImpl<NonLocalDepResult> &Result,
Chris Lattner9e59c642008-12-15 03:35:32 +0000952 DenseMap<BasicBlock*, Value*> &Visited,
953 bool SkipFirstBlock) {
Chris Lattner66364342009-09-20 22:44:26 +0000954
Chris Lattner6290f5c2008-12-07 08:50:20 +0000955 // Look up the cached info for Pointer.
Chris Lattner05e15f82009-12-09 01:59:31 +0000956 ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
Dan Gohmanc1ac0d72010-09-22 21:41:02 +0000957
Dan Gohman075fb5d2010-11-10 20:37:15 +0000958 // Set up a temporary NLPI value. If the map doesn't yet have an entry for
959 // CacheKey, this value will be inserted as the associated value. Otherwise,
960 // it'll be ignored, and we'll have to check to see if the cached size and
961 // tbaa tag are consistent with the current query.
962 NonLocalPointerInfo InitialNLPI;
963 InitialNLPI.Size = Loc.Size;
964 InitialNLPI.TBAATag = Loc.TBAATag;
965
966 // Get the NLPI for CacheKey, inserting one into the map if it doesn't
967 // already have one.
968 std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
969 NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
970 NonLocalPointerInfo *CacheInfo = &Pair.first->second;
971
Dan Gohman733c54d2010-11-10 21:45:11 +0000972 // If we already have a cache entry for this CacheKey, we may need to do some
973 // work to reconcile the cache entry and the current query.
Dan Gohman075fb5d2010-11-10 20:37:15 +0000974 if (!Pair.second) {
Dan Gohman733c54d2010-11-10 21:45:11 +0000975 if (CacheInfo->Size < Loc.Size) {
976 // The query's Size is greater than the cached one. Throw out the
977 // cached data and procede with the query at the greater size.
978 CacheInfo->Pair = BBSkipFirstBlockPair();
979 CacheInfo->Size = Loc.Size;
Dan Gohman2365f082010-11-10 22:35:02 +0000980 for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
981 DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
982 if (Instruction *Inst = DI->getResult().getInst())
983 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
Dan Gohman733c54d2010-11-10 21:45:11 +0000984 CacheInfo->NonLocalDeps.clear();
985 } else if (CacheInfo->Size > Loc.Size) {
986 // This query's Size is less than the cached one. Conservatively restart
987 // the query using the greater size.
Dan Gohman075fb5d2010-11-10 20:37:15 +0000988 return getNonLocalPointerDepFromBB(Pointer,
989 Loc.getWithNewSize(CacheInfo->Size),
990 isLoad, StartBB, Result, Visited,
991 SkipFirstBlock);
992 }
993
Dan Gohman733c54d2010-11-10 21:45:11 +0000994 // If the query's TBAATag is inconsistent with the cached one,
995 // conservatively throw out the cached data and restart the query with
996 // no tag if needed.
Dan Gohman075fb5d2010-11-10 20:37:15 +0000997 if (CacheInfo->TBAATag != Loc.TBAATag) {
Dan Gohman733c54d2010-11-10 21:45:11 +0000998 if (CacheInfo->TBAATag) {
999 CacheInfo->Pair = BBSkipFirstBlockPair();
1000 CacheInfo->TBAATag = 0;
Dan Gohman2365f082010-11-10 22:35:02 +00001001 for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
1002 DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
1003 if (Instruction *Inst = DI->getResult().getInst())
1004 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
Dan Gohman733c54d2010-11-10 21:45:11 +00001005 CacheInfo->NonLocalDeps.clear();
1006 }
1007 if (Loc.TBAATag)
1008 return getNonLocalPointerDepFromBB(Pointer, Loc.getWithoutTBAATag(),
1009 isLoad, StartBB, Result, Visited,
1010 SkipFirstBlock);
Dan Gohman075fb5d2010-11-10 20:37:15 +00001011 }
Dan Gohmanc1ac0d72010-09-22 21:41:02 +00001012 }
1013
1014 NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
Chris Lattner11dcd8d2008-12-08 07:31:50 +00001015
1016 // If we have valid cached information for exactly the block we are
1017 // investigating, just return it with no recomputation.
Dan Gohmanc1ac0d72010-09-22 21:41:02 +00001018 if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
Chris Lattnerf4789512008-12-16 07:10:09 +00001019 // We have a fully cached result for this query then we can just return the
1020 // cached results and populate the visited set. However, we have to verify
1021 // that we don't already have conflicting results for these blocks. Check
1022 // to ensure that if a block in the results set is in the visited set that
1023 // it was for the same pointer query.
1024 if (!Visited.empty()) {
1025 for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
1026 I != E; ++I) {
Chris Lattnere18b9712009-12-09 07:08:01 +00001027 DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->getBB());
Chris Lattner05e15f82009-12-09 01:59:31 +00001028 if (VI == Visited.end() || VI->second == Pointer.getAddr())
1029 continue;
Chris Lattnerf4789512008-12-16 07:10:09 +00001030
1031 // We have a pointer mismatch in a block. Just return clobber, saying
1032 // that something was clobbered in this result. We could also do a
1033 // non-fully cached query, but there is little point in doing this.
1034 return true;
1035 }
1036 }
1037
Chris Lattner0ee443d2009-12-22 04:25:02 +00001038 Value *Addr = Pointer.getAddr();
Chris Lattner11dcd8d2008-12-08 07:31:50 +00001039 for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
Chris Lattnerf4789512008-12-16 07:10:09 +00001040 I != E; ++I) {
Chris Lattner0ee443d2009-12-22 04:25:02 +00001041 Visited.insert(std::make_pair(I->getBB(), Addr));
Chris Lattnere18b9712009-12-09 07:08:01 +00001042 if (!I->getResult().isNonLocal())
Chris Lattner0ee443d2009-12-22 04:25:02 +00001043 Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr));
Chris Lattnerf4789512008-12-16 07:10:09 +00001044 }
Chris Lattner11dcd8d2008-12-08 07:31:50 +00001045 ++NumCacheCompleteNonLocalPtr;
Chris Lattner9e59c642008-12-15 03:35:32 +00001046 return false;
Chris Lattner11dcd8d2008-12-08 07:31:50 +00001047 }
1048
1049 // Otherwise, either this is a new block, a block with an invalid cache
1050 // pointer or one that we're about to invalidate by putting more info into it
1051 // than its valid cache info. If empty, the result will be valid cache info,
1052 // otherwise it isn't.
Chris Lattner9e59c642008-12-15 03:35:32 +00001053 if (Cache->empty())
Dan Gohmanc1ac0d72010-09-22 21:41:02 +00001054 CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
Dan Gohman8a66a202010-11-11 00:42:22 +00001055 else
Dan Gohmanc1ac0d72010-09-22 21:41:02 +00001056 CacheInfo->Pair = BBSkipFirstBlockPair();
Chris Lattner11dcd8d2008-12-08 07:31:50 +00001057
1058 SmallVector<BasicBlock*, 32> Worklist;
1059 Worklist.push_back(StartBB);
Chris Lattner6290f5c2008-12-07 08:50:20 +00001060
Eli Friedmanfc097972011-06-01 23:16:53 +00001061 // PredList used inside loop.
1062 SmallVector<std::pair<BasicBlock*, PHITransAddr>, 16> PredList;
1063
Chris Lattner6290f5c2008-12-07 08:50:20 +00001064 // Keep track of the entries that we know are sorted. Previously cached
1065 // entries will all be sorted. The entries we add we only sort on demand (we
1066 // don't insert every element into its sorted position). We know that we
1067 // won't get any reuse from currently inserted values, because we don't
1068 // revisit blocks after we insert info for them.
1069 unsigned NumSortedEntries = Cache->size();
Chris Lattner12a7db32009-01-22 07:04:01 +00001070 DEBUG(AssertSorted(*Cache));
Chris Lattner6290f5c2008-12-07 08:50:20 +00001071
Chris Lattner7ebcf032008-12-07 02:15:47 +00001072 while (!Worklist.empty()) {
Chris Lattner9a193fd2008-12-07 02:56:57 +00001073 BasicBlock *BB = Worklist.pop_back_val();
Chris Lattner7ebcf032008-12-07 02:15:47 +00001074
Chris Lattner65633712008-12-09 07:52:59 +00001075 // Skip the first block if we have it.
Chris Lattner9e59c642008-12-15 03:35:32 +00001076 if (!SkipFirstBlock) {
Chris Lattner65633712008-12-09 07:52:59 +00001077 // Analyze the dependency of *Pointer in FromBB. See if we already have
1078 // been here.
Chris Lattner9e59c642008-12-15 03:35:32 +00001079 assert(Visited.count(BB) && "Should check 'visited' before adding to WL");
Chris Lattner6290f5c2008-12-07 08:50:20 +00001080
Chris Lattner65633712008-12-09 07:52:59 +00001081 // Get the dependency info for Pointer in BB. If we have cached
1082 // information, we will use it, otherwise we compute it.
Chris Lattner12a7db32009-01-22 07:04:01 +00001083 DEBUG(AssertSorted(*Cache, NumSortedEntries));
Dan Gohmanc1ac0d72010-09-22 21:41:02 +00001084 MemDepResult Dep = GetNonLocalInfoForBlock(Loc, isLoad, BB, Cache,
Chris Lattner05e15f82009-12-09 01:59:31 +00001085 NumSortedEntries);
Chris Lattner65633712008-12-09 07:52:59 +00001086
1087 // If we got a Def or Clobber, add this to the list of results.
1088 if (!Dep.isNonLocal()) {
Chris Lattner0ee443d2009-12-22 04:25:02 +00001089 Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
Chris Lattner65633712008-12-09 07:52:59 +00001090 continue;
1091 }
Chris Lattner7ebcf032008-12-07 02:15:47 +00001092 }
1093
Chris Lattner9e59c642008-12-15 03:35:32 +00001094 // If 'Pointer' is an instruction defined in this block, then we need to do
1095 // phi translation to change it into a value live in the predecessor block.
Chris Lattner05e15f82009-12-09 01:59:31 +00001096 // If not, we just add the predecessors to the worklist and scan them with
1097 // the same Pointer.
1098 if (!Pointer.NeedsPHITranslationFromBlock(BB)) {
Chris Lattner9e59c642008-12-15 03:35:32 +00001099 SkipFirstBlock = false;
Eli Friedmanfc097972011-06-01 23:16:53 +00001100 SmallVector<BasicBlock*, 16> NewBlocks;
Chris Lattner9e59c642008-12-15 03:35:32 +00001101 for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
1102 // Verify that we haven't looked at this block yet.
1103 std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
Chris Lattner05e15f82009-12-09 01:59:31 +00001104 InsertRes = Visited.insert(std::make_pair(*PI, Pointer.getAddr()));
Chris Lattner9e59c642008-12-15 03:35:32 +00001105 if (InsertRes.second) {
1106 // First time we've looked at *PI.
Eli Friedmanfc097972011-06-01 23:16:53 +00001107 NewBlocks.push_back(*PI);
Chris Lattner9e59c642008-12-15 03:35:32 +00001108 continue;
1109 }
1110
1111 // If we have seen this block before, but it was with a different
1112 // pointer then we have a phi translation failure and we have to treat
1113 // this as a clobber.
Eli Friedmanfc097972011-06-01 23:16:53 +00001114 if (InsertRes.first->second != Pointer.getAddr()) {
1115 // Make sure to clean up the Visited map before continuing on to
1116 // PredTranslationFailure.
1117 for (unsigned i = 0; i < NewBlocks.size(); i++)
1118 Visited.erase(NewBlocks[i]);
Chris Lattner9e59c642008-12-15 03:35:32 +00001119 goto PredTranslationFailure;
Eli Friedmanfc097972011-06-01 23:16:53 +00001120 }
Chris Lattner9e59c642008-12-15 03:35:32 +00001121 }
Eli Friedmanfc097972011-06-01 23:16:53 +00001122 Worklist.append(NewBlocks.begin(), NewBlocks.end());
Chris Lattner9e59c642008-12-15 03:35:32 +00001123 continue;
1124 }
1125
Chris Lattner05e15f82009-12-09 01:59:31 +00001126 // We do need to do phi translation, if we know ahead of time we can't phi
1127 // translate this value, don't even try.
1128 if (!Pointer.IsPotentiallyPHITranslatable())
1129 goto PredTranslationFailure;
1130
Chris Lattner6fbc1962009-07-13 17:14:23 +00001131 // We may have added values to the cache list before this PHI translation.
1132 // If so, we haven't done anything to ensure that the cache remains sorted.
1133 // Sort it now (if needed) so that recursive invocations of
1134 // getNonLocalPointerDepFromBB and other routines that could reuse the cache
1135 // value will only see properly sorted cache arrays.
1136 if (Cache && NumSortedEntries != Cache->size()) {
Chris Lattnera2f55dd2009-07-13 17:20:05 +00001137 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
Chris Lattner6fbc1962009-07-13 17:14:23 +00001138 NumSortedEntries = Cache->size();
1139 }
Chris Lattnere95035a2009-11-27 08:37:22 +00001140 Cache = 0;
Eli Friedmanfc097972011-06-01 23:16:53 +00001141
1142 PredList.clear();
Chris Lattnere95035a2009-11-27 08:37:22 +00001143 for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
1144 BasicBlock *Pred = *PI;
Eli Friedmanfc097972011-06-01 23:16:53 +00001145 PredList.push_back(std::make_pair(Pred, Pointer));
1146
Chris Lattner05e15f82009-12-09 01:59:31 +00001147 // Get the PHI translated pointer in this predecessor. This can fail if
1148 // not translatable, in which case the getAddr() returns null.
Eli Friedmanfc097972011-06-01 23:16:53 +00001149 PHITransAddr &PredPointer = PredList.back().second;
Daniel Dunbar6d8f2ca2010-02-24 08:48:04 +00001150 PredPointer.PHITranslateValue(BB, Pred, 0);
Chris Lattner05e15f82009-12-09 01:59:31 +00001151
1152 Value *PredPtrVal = PredPointer.getAddr();
Chris Lattnere95035a2009-11-27 08:37:22 +00001153
1154 // Check to see if we have already visited this pred block with another
1155 // pointer. If so, we can't do this lookup. This failure can occur
1156 // with PHI translation when a critical edge exists and the PHI node in
1157 // the successor translates to a pointer value different than the
1158 // pointer the block was first analyzed with.
1159 std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
Chris Lattner05e15f82009-12-09 01:59:31 +00001160 InsertRes = Visited.insert(std::make_pair(Pred, PredPtrVal));
Chris Lattner9e59c642008-12-15 03:35:32 +00001161
Chris Lattnere95035a2009-11-27 08:37:22 +00001162 if (!InsertRes.second) {
Eli Friedmanfc097972011-06-01 23:16:53 +00001163 // We found the pred; take it off the list of preds to visit.
1164 PredList.pop_back();
1165
Chris Lattnere95035a2009-11-27 08:37:22 +00001166 // If the predecessor was visited with PredPtr, then we already did
1167 // the analysis and can ignore it.
Chris Lattner05e15f82009-12-09 01:59:31 +00001168 if (InsertRes.first->second == PredPtrVal)
Chris Lattnere95035a2009-11-27 08:37:22 +00001169 continue;
Chris Lattner9e59c642008-12-15 03:35:32 +00001170
Chris Lattnere95035a2009-11-27 08:37:22 +00001171 // Otherwise, the block was previously analyzed with a different
1172 // pointer. We can't represent the result of this case, so we just
1173 // treat this as a phi translation failure.
Eli Friedmanfc097972011-06-01 23:16:53 +00001174
1175 // Make sure to clean up the Visited map before continuing on to
1176 // PredTranslationFailure.
1177 for (unsigned i = 0; i < PredList.size(); i++)
1178 Visited.erase(PredList[i].first);
1179
Chris Lattnere95035a2009-11-27 08:37:22 +00001180 goto PredTranslationFailure;
Chris Lattner9e59c642008-12-15 03:35:32 +00001181 }
Eli Friedmanfc097972011-06-01 23:16:53 +00001182 }
1183
1184 // Actually process results here; this need to be a separate loop to avoid
1185 // calling getNonLocalPointerDepFromBB for blocks we don't want to return
1186 // any results for. (getNonLocalPointerDepFromBB will modify our
1187 // datastructures in ways the code after the PredTranslationFailure label
1188 // doesn't expect.)
1189 for (unsigned i = 0; i < PredList.size(); i++) {
1190 BasicBlock *Pred = PredList[i].first;
1191 PHITransAddr &PredPointer = PredList[i].second;
1192 Value *PredPtrVal = PredPointer.getAddr();
1193
1194 bool CanTranslate = true;
Chris Lattner6f7b2102009-11-27 22:05:15 +00001195 // If PHI translation was unable to find an available pointer in this
1196 // predecessor, then we have to assume that the pointer is clobbered in
1197 // that predecessor. We can still do PRE of the load, which would insert
1198 // a computation of the pointer in this predecessor.
Eli Friedmanfc097972011-06-01 23:16:53 +00001199 if (PredPtrVal == 0)
1200 CanTranslate = false;
1201
1202 // FIXME: it is entirely possible that PHI translating will end up with
1203 // the same value. Consider PHI translating something like:
1204 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need*
1205 // to recurse here, pedantically speaking.
1206
1207 // If getNonLocalPointerDepFromBB fails here, that means the cached
1208 // result conflicted with the Visited list; we have to conservatively
Eli Friedmana990e072011-06-15 00:47:34 +00001209 // assume it is unknown, but this also does not block PRE of the load.
Eli Friedmanfc097972011-06-01 23:16:53 +00001210 if (!CanTranslate ||
1211 getNonLocalPointerDepFromBB(PredPointer,
1212 Loc.getWithNewPtr(PredPtrVal),
1213 isLoad, Pred,
1214 Result, Visited)) {
Chris Lattner855d9da2009-12-01 07:33:32 +00001215 // Add the entry to the Result list.
Eli Friedmana990e072011-06-15 00:47:34 +00001216 NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal);
Chris Lattner855d9da2009-12-01 07:33:32 +00001217 Result.push_back(Entry);
1218
Chris Lattnerf6481252009-12-19 21:29:22 +00001219 // Since we had a phi translation failure, the cache for CacheKey won't
1220 // include all of the entries that we need to immediately satisfy future
1221 // queries. Mark this in NonLocalPointerDeps by setting the
1222 // BBSkipFirstBlockPair pointer to null. This requires reuse of the
1223 // cached value to do more work but not miss the phi trans failure.
Dan Gohmanc1ac0d72010-09-22 21:41:02 +00001224 NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
1225 NLPI.Pair = BBSkipFirstBlockPair();
Chris Lattner6f7b2102009-11-27 22:05:15 +00001226 continue;
Chris Lattner6f7b2102009-11-27 22:05:15 +00001227 }
Chris Lattner9e59c642008-12-15 03:35:32 +00001228 }
Chris Lattnere95035a2009-11-27 08:37:22 +00001229
1230 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
1231 CacheInfo = &NonLocalPointerDeps[CacheKey];
Dan Gohmanc1ac0d72010-09-22 21:41:02 +00001232 Cache = &CacheInfo->NonLocalDeps;
Chris Lattnere95035a2009-11-27 08:37:22 +00001233 NumSortedEntries = Cache->size();
1234
1235 // Since we did phi translation, the "Cache" set won't contain all of the
1236 // results for the query. This is ok (we can still use it to accelerate
1237 // specific block queries) but we can't do the fastpath "return all
1238 // results from the set" Clear out the indicator for this.
Dan Gohmanc1ac0d72010-09-22 21:41:02 +00001239 CacheInfo->Pair = BBSkipFirstBlockPair();
Chris Lattnere95035a2009-11-27 08:37:22 +00001240 SkipFirstBlock = false;
1241 continue;
Chris Lattnerdc593112009-11-26 23:18:49 +00001242
Chris Lattner9e59c642008-12-15 03:35:32 +00001243 PredTranslationFailure:
Eli Friedmanfc097972011-06-01 23:16:53 +00001244 // The following code is "failure"; we can't produce a sane translation
1245 // for the given block. It assumes that we haven't modified any of
1246 // our datastructures while processing the current block.
Chris Lattner9e59c642008-12-15 03:35:32 +00001247
Chris Lattner95900f22009-01-23 07:12:16 +00001248 if (Cache == 0) {
1249 // Refresh the CacheInfo/Cache pointer if it got invalidated.
1250 CacheInfo = &NonLocalPointerDeps[CacheKey];
Dan Gohmanc1ac0d72010-09-22 21:41:02 +00001251 Cache = &CacheInfo->NonLocalDeps;
Chris Lattner95900f22009-01-23 07:12:16 +00001252 NumSortedEntries = Cache->size();
Chris Lattner95900f22009-01-23 07:12:16 +00001253 }
Chris Lattner6fbc1962009-07-13 17:14:23 +00001254
Chris Lattnerf6481252009-12-19 21:29:22 +00001255 // Since we failed phi translation, the "Cache" set won't contain all of the
Chris Lattner9e59c642008-12-15 03:35:32 +00001256 // results for the query. This is ok (we can still use it to accelerate
1257 // specific block queries) but we can't do the fastpath "return all
Chris Lattnerf6481252009-12-19 21:29:22 +00001258 // results from the set". Clear out the indicator for this.
Dan Gohmanc1ac0d72010-09-22 21:41:02 +00001259 CacheInfo->Pair = BBSkipFirstBlockPair();
Chris Lattner9e59c642008-12-15 03:35:32 +00001260
Eli Friedmana990e072011-06-15 00:47:34 +00001261 // If *nothing* works, mark the pointer as unknown.
Chris Lattner9e59c642008-12-15 03:35:32 +00001262 //
1263 // If this is the magic first block, return this as a clobber of the whole
1264 // incoming value. Since we can't phi translate to one of the predecessors,
1265 // we have to bail out.
1266 if (SkipFirstBlock)
1267 return true;
1268
1269 for (NonLocalDepInfo::reverse_iterator I = Cache->rbegin(); ; ++I) {
1270 assert(I != Cache->rend() && "Didn't find current block??");
Chris Lattnere18b9712009-12-09 07:08:01 +00001271 if (I->getBB() != BB)
Chris Lattner9e59c642008-12-15 03:35:32 +00001272 continue;
1273
Chris Lattnere18b9712009-12-09 07:08:01 +00001274 assert(I->getResult().isNonLocal() &&
Chris Lattner9e59c642008-12-15 03:35:32 +00001275 "Should only be here with transparent block");
Eli Friedmana990e072011-06-15 00:47:34 +00001276 I->setResult(MemDepResult::getUnknown());
Chris Lattner0ee443d2009-12-22 04:25:02 +00001277 Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(),
1278 Pointer.getAddr()));
Chris Lattner9e59c642008-12-15 03:35:32 +00001279 break;
Chris Lattner9a193fd2008-12-07 02:56:57 +00001280 }
Chris Lattner7ebcf032008-12-07 02:15:47 +00001281 }
Chris Lattner95900f22009-01-23 07:12:16 +00001282
Chris Lattner9863c3f2008-12-09 07:47:11 +00001283 // Okay, we're done now. If we added new values to the cache, re-sort it.
Chris Lattnera2f55dd2009-07-13 17:20:05 +00001284 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
Chris Lattner12a7db32009-01-22 07:04:01 +00001285 DEBUG(AssertSorted(*Cache));
Chris Lattner9e59c642008-12-15 03:35:32 +00001286 return false;
Chris Lattner6290f5c2008-12-07 08:50:20 +00001287}
1288
1289/// RemoveCachedNonLocalPointerDependencies - If P exists in
1290/// CachedNonLocalPointerInfo, remove it.
1291void MemoryDependenceAnalysis::
1292RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) {
1293 CachedNonLocalPointerInfo::iterator It =
1294 NonLocalPointerDeps.find(P);
1295 if (It == NonLocalPointerDeps.end()) return;
1296
1297 // Remove all of the entries in the BB->val map. This involves removing
1298 // instructions from the reverse map.
Dan Gohmanc1ac0d72010-09-22 21:41:02 +00001299 NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
Chris Lattner6290f5c2008-12-07 08:50:20 +00001300
1301 for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
Chris Lattnere18b9712009-12-09 07:08:01 +00001302 Instruction *Target = PInfo[i].getResult().getInst();
Chris Lattner6290f5c2008-12-07 08:50:20 +00001303 if (Target == 0) continue; // Ignore non-local dep results.
Chris Lattnere18b9712009-12-09 07:08:01 +00001304 assert(Target->getParent() == PInfo[i].getBB());
Chris Lattner6290f5c2008-12-07 08:50:20 +00001305
1306 // Eliminating the dirty entry from 'Cache', so update the reverse info.
Chris Lattner6a0dcc12009-03-29 00:24:04 +00001307 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P);
Chris Lattner6290f5c2008-12-07 08:50:20 +00001308 }
1309
1310 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
1311 NonLocalPointerDeps.erase(It);
Chris Lattner7ebcf032008-12-07 02:15:47 +00001312}
1313
1314
Chris Lattnerbc99be12008-12-09 22:06:23 +00001315/// invalidateCachedPointerInfo - This method is used to invalidate cached
1316/// information about the specified pointer, because it may be too
1317/// conservative in memdep. This is an optional call that can be used when
1318/// the client detects an equivalence between the pointer and some other
1319/// value and replaces the other value with ptr. This can make Ptr available
1320/// in more places that cached info does not necessarily keep.
1321void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) {
1322 // If Ptr isn't really a pointer, just ignore it.
Duncan Sands1df98592010-02-16 11:11:14 +00001323 if (!Ptr->getType()->isPointerTy()) return;
Chris Lattnerbc99be12008-12-09 22:06:23 +00001324 // Flush store info for the pointer.
1325 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
1326 // Flush load info for the pointer.
1327 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
1328}
1329
Bob Wilson484d4a32010-02-16 19:51:59 +00001330/// invalidateCachedPredecessors - Clear the PredIteratorCache info.
1331/// This needs to be done when the CFG changes, e.g., due to splitting
1332/// critical edges.
1333void MemoryDependenceAnalysis::invalidateCachedPredecessors() {
1334 PredCache->clear();
1335}
1336
Owen Anderson78e02f72007-07-06 23:14:35 +00001337/// removeInstruction - Remove an instruction from the dependence analysis,
1338/// updating the dependence of instructions that previously depended on it.
Owen Anderson642a9e32007-08-08 22:26:03 +00001339/// This method attempts to keep the cache coherent using the reverse map.
Chris Lattner5f589dc2008-11-28 22:04:47 +00001340void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) {
Chris Lattner5f589dc2008-11-28 22:04:47 +00001341 // Walk through the Non-local dependencies, removing this one as the value
1342 // for any cached queries.
Chris Lattnerf68f3102008-11-30 02:28:25 +00001343 NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
1344 if (NLDI != NonLocalDeps.end()) {
Chris Lattnerbf145d62008-12-01 01:15:42 +00001345 NonLocalDepInfo &BlockMap = NLDI->second.first;
Chris Lattner25f4b2b2008-11-30 02:30:50 +00001346 for (NonLocalDepInfo::iterator DI = BlockMap.begin(), DE = BlockMap.end();
1347 DI != DE; ++DI)
Chris Lattnere18b9712009-12-09 07:08:01 +00001348 if (Instruction *Inst = DI->getResult().getInst())
Chris Lattnerd44745d2008-12-07 18:39:13 +00001349 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
Chris Lattnerf68f3102008-11-30 02:28:25 +00001350 NonLocalDeps.erase(NLDI);
1351 }
Owen Anderson5fc4aba2007-12-08 01:37:09 +00001352
Chris Lattner5f589dc2008-11-28 22:04:47 +00001353 // If we have a cached local dependence query for this instruction, remove it.
Chris Lattnerbaad8882008-11-28 22:28:27 +00001354 //
Chris Lattner39f372e2008-11-29 01:43:36 +00001355 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
1356 if (LocalDepEntry != LocalDeps.end()) {
Chris Lattner125ce362008-11-30 01:09:30 +00001357 // Remove us from DepInst's reverse set now that the local dep info is gone.
Chris Lattnerd44745d2008-12-07 18:39:13 +00001358 if (Instruction *Inst = LocalDepEntry->second.getInst())
1359 RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst);
Chris Lattner125ce362008-11-30 01:09:30 +00001360
Chris Lattnerbaad8882008-11-28 22:28:27 +00001361 // Remove this local dependency info.
Chris Lattner39f372e2008-11-29 01:43:36 +00001362 LocalDeps.erase(LocalDepEntry);
Chris Lattner6290f5c2008-12-07 08:50:20 +00001363 }
1364
1365 // If we have any cached pointer dependencies on this instruction, remove
1366 // them. If the instruction has non-pointer type, then it can't be a pointer
1367 // base.
1368
1369 // Remove it from both the load info and the store info. The instruction
1370 // can't be in either of these maps if it is non-pointer.
Duncan Sands1df98592010-02-16 11:11:14 +00001371 if (RemInst->getType()->isPointerTy()) {
Chris Lattner6290f5c2008-12-07 08:50:20 +00001372 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
1373 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
1374 }
Chris Lattnerbaad8882008-11-28 22:28:27 +00001375
Chris Lattnerd3d12ec2008-11-28 22:51:08 +00001376 // Loop over all of the things that depend on the instruction we're removing.
1377 //
Chris Lattner4f8c18c2008-11-29 23:30:39 +00001378 SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd;
Chris Lattner0655f732008-12-07 18:42:51 +00001379
1380 // If we find RemInst as a clobber or Def in any of the maps for other values,
1381 // we need to replace its entry with a dirty version of the instruction after
1382 // it. If RemInst is a terminator, we use a null dirty value.
1383 //
1384 // Using a dirty version of the instruction after RemInst saves having to scan
1385 // the entire block to get to this point.
1386 MemDepResult NewDirtyVal;
1387 if (!RemInst->isTerminator())
1388 NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst));
Chris Lattner4f8c18c2008-11-29 23:30:39 +00001389
Chris Lattner8c465272008-11-29 09:20:15 +00001390 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
1391 if (ReverseDepIt != ReverseLocalDeps.end()) {
Chris Lattnerd3d12ec2008-11-28 22:51:08 +00001392 SmallPtrSet<Instruction*, 4> &ReverseDeps = ReverseDepIt->second;
Chris Lattner6290f5c2008-12-07 08:50:20 +00001393 // RemInst can't be the terminator if it has local stuff depending on it.
Chris Lattner125ce362008-11-30 01:09:30 +00001394 assert(!ReverseDeps.empty() && !isa<TerminatorInst>(RemInst) &&
1395 "Nothing can locally depend on a terminator");
1396
Chris Lattnerd3d12ec2008-11-28 22:51:08 +00001397 for (SmallPtrSet<Instruction*, 4>::iterator I = ReverseDeps.begin(),
1398 E = ReverseDeps.end(); I != E; ++I) {
1399 Instruction *InstDependingOnRemInst = *I;
Chris Lattnerf68f3102008-11-30 02:28:25 +00001400 assert(InstDependingOnRemInst != RemInst &&
1401 "Already removed our local dep info");
Chris Lattner125ce362008-11-30 01:09:30 +00001402
Chris Lattner0655f732008-12-07 18:42:51 +00001403 LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
Chris Lattnerd3d12ec2008-11-28 22:51:08 +00001404
Chris Lattner125ce362008-11-30 01:09:30 +00001405 // Make sure to remember that new things depend on NewDepInst.
Chris Lattner0655f732008-12-07 18:42:51 +00001406 assert(NewDirtyVal.getInst() && "There is no way something else can have "
1407 "a local dep on this if it is a terminator!");
1408 ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(),
Chris Lattner125ce362008-11-30 01:09:30 +00001409 InstDependingOnRemInst));
Chris Lattnerd3d12ec2008-11-28 22:51:08 +00001410 }
Chris Lattner4f8c18c2008-11-29 23:30:39 +00001411
1412 ReverseLocalDeps.erase(ReverseDepIt);
1413
1414 // Add new reverse deps after scanning the set, to avoid invalidating the
1415 // 'ReverseDeps' reference.
1416 while (!ReverseDepsToAdd.empty()) {
1417 ReverseLocalDeps[ReverseDepsToAdd.back().first]
1418 .insert(ReverseDepsToAdd.back().second);
1419 ReverseDepsToAdd.pop_back();
1420 }
Owen Anderson78e02f72007-07-06 23:14:35 +00001421 }
Owen Anderson4d13de42007-08-16 21:27:05 +00001422
Chris Lattner8c465272008-11-29 09:20:15 +00001423 ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
1424 if (ReverseDepIt != ReverseNonLocalDeps.end()) {
Chris Lattner6290f5c2008-12-07 08:50:20 +00001425 SmallPtrSet<Instruction*, 4> &Set = ReverseDepIt->second;
1426 for (SmallPtrSet<Instruction*, 4>::iterator I = Set.begin(), E = Set.end();
Chris Lattnerf68f3102008-11-30 02:28:25 +00001427 I != E; ++I) {
1428 assert(*I != RemInst && "Already removed NonLocalDep info for RemInst");
1429
Chris Lattner4a69bad2008-11-30 02:52:26 +00001430 PerInstNLInfo &INLD = NonLocalDeps[*I];
Chris Lattner4a69bad2008-11-30 02:52:26 +00001431 // The information is now dirty!
Chris Lattnerbf145d62008-12-01 01:15:42 +00001432 INLD.second = true;
Chris Lattnerf68f3102008-11-30 02:28:25 +00001433
Chris Lattnerbf145d62008-12-01 01:15:42 +00001434 for (NonLocalDepInfo::iterator DI = INLD.first.begin(),
1435 DE = INLD.first.end(); DI != DE; ++DI) {
Chris Lattnere18b9712009-12-09 07:08:01 +00001436 if (DI->getResult().getInst() != RemInst) continue;
Chris Lattnerf68f3102008-11-30 02:28:25 +00001437
1438 // Convert to a dirty entry for the subsequent instruction.
Chris Lattner0ee443d2009-12-22 04:25:02 +00001439 DI->setResult(NewDirtyVal);
Chris Lattner0655f732008-12-07 18:42:51 +00001440
1441 if (Instruction *NextI = NewDirtyVal.getInst())
Chris Lattnerf68f3102008-11-30 02:28:25 +00001442 ReverseDepsToAdd.push_back(std::make_pair(NextI, *I));
Chris Lattnerf68f3102008-11-30 02:28:25 +00001443 }
1444 }
Chris Lattner4f8c18c2008-11-29 23:30:39 +00001445
1446 ReverseNonLocalDeps.erase(ReverseDepIt);
1447
Chris Lattner0ec48dd2008-11-29 22:02:15 +00001448 // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
1449 while (!ReverseDepsToAdd.empty()) {
1450 ReverseNonLocalDeps[ReverseDepsToAdd.back().first]
1451 .insert(ReverseDepsToAdd.back().second);
1452 ReverseDepsToAdd.pop_back();
1453 }
Owen Anderson4d13de42007-08-16 21:27:05 +00001454 }
Owen Anderson5fc4aba2007-12-08 01:37:09 +00001455
Chris Lattner6290f5c2008-12-07 08:50:20 +00001456 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
1457 // value in the NonLocalPointerDeps info.
1458 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
1459 ReverseNonLocalPtrDeps.find(RemInst);
1460 if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
Chris Lattner6a0dcc12009-03-29 00:24:04 +00001461 SmallPtrSet<ValueIsLoadPair, 4> &Set = ReversePtrDepIt->second;
Chris Lattner6290f5c2008-12-07 08:50:20 +00001462 SmallVector<std::pair<Instruction*, ValueIsLoadPair>,8> ReversePtrDepsToAdd;
1463
Chris Lattner6a0dcc12009-03-29 00:24:04 +00001464 for (SmallPtrSet<ValueIsLoadPair, 4>::iterator I = Set.begin(),
1465 E = Set.end(); I != E; ++I) {
1466 ValueIsLoadPair P = *I;
Chris Lattner6290f5c2008-12-07 08:50:20 +00001467 assert(P.getPointer() != RemInst &&
1468 "Already removed NonLocalPointerDeps info for RemInst");
1469
Dan Gohmanc1ac0d72010-09-22 21:41:02 +00001470 NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
Chris Lattner11dcd8d2008-12-08 07:31:50 +00001471
1472 // The cache is not valid for any specific block anymore.
Dan Gohmanc1ac0d72010-09-22 21:41:02 +00001473 NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
Chris Lattner6290f5c2008-12-07 08:50:20 +00001474
Chris Lattner6290f5c2008-12-07 08:50:20 +00001475 // Update any entries for RemInst to use the instruction after it.
1476 for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end();
1477 DI != DE; ++DI) {
Chris Lattnere18b9712009-12-09 07:08:01 +00001478 if (DI->getResult().getInst() != RemInst) continue;
Chris Lattner6290f5c2008-12-07 08:50:20 +00001479
1480 // Convert to a dirty entry for the subsequent instruction.
Chris Lattner0ee443d2009-12-22 04:25:02 +00001481 DI->setResult(NewDirtyVal);
Chris Lattner6290f5c2008-12-07 08:50:20 +00001482
1483 if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
1484 ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
1485 }
Chris Lattner95900f22009-01-23 07:12:16 +00001486
1487 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its
1488 // subsequent value may invalidate the sortedness.
1489 std::sort(NLPDI.begin(), NLPDI.end());
Chris Lattner6290f5c2008-12-07 08:50:20 +00001490 }
1491
1492 ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
1493
1494 while (!ReversePtrDepsToAdd.empty()) {
1495 ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first]
Chris Lattner6a0dcc12009-03-29 00:24:04 +00001496 .insert(ReversePtrDepsToAdd.back().second);
Chris Lattner6290f5c2008-12-07 08:50:20 +00001497 ReversePtrDepsToAdd.pop_back();
1498 }
1499 }
1500
1501
Chris Lattnerf68f3102008-11-30 02:28:25 +00001502 assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
Chris Lattnerd777d402008-11-30 19:24:31 +00001503 AA->deleteValue(RemInst);
Jakob Stoklund Olesenf7624bc2011-01-11 04:05:39 +00001504 DEBUG(verifyRemoved(RemInst));
Owen Anderson78e02f72007-07-06 23:14:35 +00001505}
Chris Lattner729b2372008-11-29 21:25:10 +00001506/// verifyRemoved - Verify that the specified instruction does not occur
1507/// in our internal data structures.
1508void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const {
1509 for (LocalDepMapType::const_iterator I = LocalDeps.begin(),
1510 E = LocalDeps.end(); I != E; ++I) {
1511 assert(I->first != D && "Inst occurs in data structures");
Chris Lattnerfd3dcbe2008-11-30 23:17:19 +00001512 assert(I->second.getInst() != D &&
Chris Lattner729b2372008-11-29 21:25:10 +00001513 "Inst occurs in data structures");
1514 }
1515
Chris Lattner6290f5c2008-12-07 08:50:20 +00001516 for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(),
1517 E = NonLocalPointerDeps.end(); I != E; ++I) {
1518 assert(I->first.getPointer() != D && "Inst occurs in NLPD map key");
Dan Gohmanc1ac0d72010-09-22 21:41:02 +00001519 const NonLocalDepInfo &Val = I->second.NonLocalDeps;
Chris Lattner6290f5c2008-12-07 08:50:20 +00001520 for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end();
1521 II != E; ++II)
Chris Lattnere18b9712009-12-09 07:08:01 +00001522 assert(II->getResult().getInst() != D && "Inst occurs as NLPD value");
Chris Lattner6290f5c2008-12-07 08:50:20 +00001523 }
1524
Chris Lattner729b2372008-11-29 21:25:10 +00001525 for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(),
1526 E = NonLocalDeps.end(); I != E; ++I) {
1527 assert(I->first != D && "Inst occurs in data structures");
Chris Lattner4a69bad2008-11-30 02:52:26 +00001528 const PerInstNLInfo &INLD = I->second;
Chris Lattnerbf145d62008-12-01 01:15:42 +00001529 for (NonLocalDepInfo::const_iterator II = INLD.first.begin(),
1530 EE = INLD.first.end(); II != EE; ++II)
Chris Lattnere18b9712009-12-09 07:08:01 +00001531 assert(II->getResult().getInst() != D && "Inst occurs in data structures");
Chris Lattner729b2372008-11-29 21:25:10 +00001532 }
1533
1534 for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(),
Chris Lattnerf68f3102008-11-30 02:28:25 +00001535 E = ReverseLocalDeps.end(); I != E; ++I) {
1536 assert(I->first != D && "Inst occurs in data structures");
Chris Lattner729b2372008-11-29 21:25:10 +00001537 for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(),
1538 EE = I->second.end(); II != EE; ++II)
1539 assert(*II != D && "Inst occurs in data structures");
Chris Lattnerf68f3102008-11-30 02:28:25 +00001540 }
Chris Lattner729b2372008-11-29 21:25:10 +00001541
1542 for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(),
1543 E = ReverseNonLocalDeps.end();
Chris Lattnerf68f3102008-11-30 02:28:25 +00001544 I != E; ++I) {
1545 assert(I->first != D && "Inst occurs in data structures");
Chris Lattner729b2372008-11-29 21:25:10 +00001546 for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(),
1547 EE = I->second.end(); II != EE; ++II)
1548 assert(*II != D && "Inst occurs in data structures");
Chris Lattnerf68f3102008-11-30 02:28:25 +00001549 }
Chris Lattner6290f5c2008-12-07 08:50:20 +00001550
1551 for (ReverseNonLocalPtrDepTy::const_iterator
1552 I = ReverseNonLocalPtrDeps.begin(),
1553 E = ReverseNonLocalPtrDeps.end(); I != E; ++I) {
1554 assert(I->first != D && "Inst occurs in rev NLPD map");
1555
Chris Lattner6a0dcc12009-03-29 00:24:04 +00001556 for (SmallPtrSet<ValueIsLoadPair, 4>::const_iterator II = I->second.begin(),
Chris Lattner6290f5c2008-12-07 08:50:20 +00001557 E = I->second.end(); II != E; ++II)
Chris Lattner6a0dcc12009-03-29 00:24:04 +00001558 assert(*II != ValueIsLoadPair(D, false) &&
1559 *II != ValueIsLoadPair(D, true) &&
Chris Lattner6290f5c2008-12-07 08:50:20 +00001560 "Inst occurs in ReverseNonLocalPtrDeps map");
1561 }
1562
Chris Lattner729b2372008-11-29 21:25:10 +00001563}