blob: 9207ca2c4f5a9269cdeb74b8cbf4bbc86933b62e [file] [log] [blame]
Nick Lewycky7ed1dbf2013-06-10 23:10:59 +00001//===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===//
Owen Andersonc0daf5f2007-07-06 23:14:35 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattnerf3ebc3f2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Owen Andersonc0daf5f2007-07-06 23:14:35 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file implements an analysis that determines, for a given memory
Jakub Staszakb0a7eed2013-03-20 21:47:51 +000011// operation, what preceding memory operations it depends on. It builds on
Owen Andersonfa788352007-08-08 22:01:54 +000012// alias analysis information, and tries to provide a lazy, caching interface to
Owen Andersonc0daf5f2007-07-06 23:14:35 +000013// a common kind of alias information query.
14//
15//===----------------------------------------------------------------------===//
16
17#include "llvm/Analysis/MemoryDependenceAnalysis.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000018#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/Statistic.h"
Owen Andersonc0daf5f2007-07-06 23:14:35 +000020#include "llvm/Analysis/AliasAnalysis.h"
Hal Finkel60db0582014-09-07 18:57:58 +000021#include "llvm/Analysis/AssumptionTracker.h"
Chris Lattner5030c6a2009-11-27 00:34:38 +000022#include "llvm/Analysis/InstructionSimplify.h"
Victor Hernandezf390e042009-10-27 20:05:49 +000023#include "llvm/Analysis/MemoryBuiltins.h"
Chris Lattner972e6d82009-12-09 01:59:31 +000024#include "llvm/Analysis/PHITransAddr.h"
Dan Gohmana4fcd242010-12-15 20:02:24 +000025#include "llvm/Analysis/ValueTracking.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000026#include "llvm/IR/DataLayout.h"
Chandler Carruth5ad5f152014-01-13 09:26:24 +000027#include "llvm/IR/Dominators.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000028#include "llvm/IR/Function.h"
29#include "llvm/IR/Instructions.h"
30#include "llvm/IR/IntrinsicInst.h"
31#include "llvm/IR/LLVMContext.h"
Chandler Carruthaa0ab632014-03-04 12:09:19 +000032#include "llvm/IR/PredIteratorCache.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000033#include "llvm/Support/Debug.h"
Owen Andersonc0daf5f2007-07-06 23:14:35 +000034using namespace llvm;
35
Chandler Carruthf1221bd2014-04-22 02:48:03 +000036#define DEBUG_TYPE "memdep"
37
Chris Lattner7e61daf2008-12-01 01:15:42 +000038STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
39STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
Chris Lattnere7d7e132008-11-29 22:02:15 +000040STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
Chris Lattnera28355d2008-12-07 08:50:20 +000041
42STATISTIC(NumCacheNonLocalPtr,
43 "Number of fully cached non-local ptr responses");
44STATISTIC(NumCacheDirtyNonLocalPtr,
45 "Number of cached, but dirty, non-local ptr responses");
46STATISTIC(NumUncacheNonLocalPtr,
47 "Number of uncached non-local ptr responses");
Chris Lattner5ed409e2008-12-08 07:31:50 +000048STATISTIC(NumCacheCompleteNonLocalPtr,
49 "Number of block queries that were completely cached");
Chris Lattnera28355d2008-12-07 08:50:20 +000050
Eli Friedman8b098b02011-06-15 23:59:25 +000051// Limit for the number of instructions to scan in a block.
Aaron Ballman254dd7e2014-10-02 13:17:11 +000052static const unsigned int BlockScanLimit = 100;
Eli Friedman8b098b02011-06-15 23:59:25 +000053
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +000054// Limit on the number of memdep results to process.
Aaron Ballman254dd7e2014-10-02 13:17:11 +000055static const unsigned int NumResultsLimit = 100;
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +000056
Owen Andersonc0daf5f2007-07-06 23:14:35 +000057char MemoryDependenceAnalysis::ID = 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +000058
Owen Andersonc0daf5f2007-07-06 23:14:35 +000059// Register this pass...
Owen Anderson8ac477f2010-10-12 19:48:12 +000060INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep",
Owen Andersondf7a4f22010-10-07 22:25:06 +000061 "Memory Dependence Analysis", false, true)
Hal Finkel60db0582014-09-07 18:57:58 +000062INITIALIZE_PASS_DEPENDENCY(AssumptionTracker)
Owen Anderson8ac477f2010-10-12 19:48:12 +000063INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
64INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep",
65 "Memory Dependence Analysis", false, true)
Owen Andersonc0daf5f2007-07-06 23:14:35 +000066
Chris Lattner768e5bc2008-12-09 06:28:49 +000067MemoryDependenceAnalysis::MemoryDependenceAnalysis()
Ahmed Charles56440fd2014-03-06 05:51:42 +000068 : FunctionPass(ID), PredCache() {
Owen Anderson6c18d1a2010-10-19 17:21:58 +000069 initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry());
Chris Lattner768e5bc2008-12-09 06:28:49 +000070}
71MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
72}
73
74/// Clean up memory in between runs
75void MemoryDependenceAnalysis::releaseMemory() {
76 LocalDeps.clear();
77 NonLocalDeps.clear();
78 NonLocalPointerDeps.clear();
79 ReverseLocalDeps.clear();
80 ReverseNonLocalDeps.clear();
81 ReverseNonLocalPtrDeps.clear();
82 PredCache->clear();
83}
84
85
86
Owen Andersonc0daf5f2007-07-06 23:14:35 +000087/// getAnalysisUsage - Does not modify anything. It uses Alias Analysis.
88///
89void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
90 AU.setPreservesAll();
Hal Finkel60db0582014-09-07 18:57:58 +000091 AU.addRequired<AssumptionTracker>();
Owen Andersonc0daf5f2007-07-06 23:14:35 +000092 AU.addRequiredTransitive<AliasAnalysis>();
Owen Andersonc0daf5f2007-07-06 23:14:35 +000093}
94
Chris Lattner13cae612008-11-30 19:24:31 +000095bool MemoryDependenceAnalysis::runOnFunction(Function &) {
96 AA = &getAnalysis<AliasAnalysis>();
Hal Finkel60db0582014-09-07 18:57:58 +000097 AT = &getAnalysis<AssumptionTracker>();
Rafael Espindola93512512014-02-25 17:30:31 +000098 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
Craig Topper9f008862014-04-15 04:59:12 +000099 DL = DLP ? &DLP->getDataLayout() : nullptr;
Chandler Carruth73523022014-01-13 13:07:17 +0000100 DominatorTreeWrapperPass *DTWP =
101 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
Craig Topper9f008862014-04-15 04:59:12 +0000102 DT = DTWP ? &DTWP->getDomTree() : nullptr;
David Blaikie041f1aa2013-05-15 07:36:59 +0000103 if (!PredCache)
Chris Lattner768e5bc2008-12-09 06:28:49 +0000104 PredCache.reset(new PredIteratorCache());
Chris Lattner13cae612008-11-30 19:24:31 +0000105 return false;
106}
107
Chris Lattnerde4440c2008-12-07 18:39:13 +0000108/// RemoveFromReverseMap - This is a helper function that removes Val from
109/// 'Inst's set in ReverseMap. If the set becomes empty, remove Inst's entry.
110template <typename KeyTy>
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000111static void RemoveFromReverseMap(DenseMap<Instruction*,
Chris Lattner8eda11b2009-03-29 00:24:04 +0000112 SmallPtrSet<KeyTy, 4> > &ReverseMap,
113 Instruction *Inst, KeyTy Val) {
114 typename DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> >::iterator
Chris Lattnerde4440c2008-12-07 18:39:13 +0000115 InstIt = ReverseMap.find(Inst);
116 assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
117 bool Found = InstIt->second.erase(Val);
Jeffrey Yasskin9b43f332010-12-23 00:58:24 +0000118 assert(Found && "Invalid reverse map!"); (void)Found;
Chris Lattnerde4440c2008-12-07 18:39:13 +0000119 if (InstIt->second.empty())
120 ReverseMap.erase(InstIt);
121}
122
Dan Gohman1d760ce2010-11-10 21:51:35 +0000123/// GetLocation - If the given instruction references a specific memory
124/// location, fill in Loc with the details, otherwise set Loc.Ptr to null.
125/// Return a ModRefInfo value describing the general behavior of the
126/// instruction.
127static
128AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
129 AliasAnalysis::Location &Loc,
130 AliasAnalysis *AA) {
131 if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000132 if (LI->isUnordered()) {
133 Loc = AA->getLocation(LI);
134 return AliasAnalysis::Ref;
Jakub Staszakfa41def2013-03-20 23:53:45 +0000135 }
136 if (LI->getOrdering() == Monotonic) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000137 Loc = AA->getLocation(LI);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000138 return AliasAnalysis::ModRef;
139 }
Eli Friedman5494ada2011-08-15 20:54:19 +0000140 Loc = AliasAnalysis::Location();
141 return AliasAnalysis::ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000142 }
143
144 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000145 if (SI->isUnordered()) {
146 Loc = AA->getLocation(SI);
147 return AliasAnalysis::Mod;
Jakub Staszakfa41def2013-03-20 23:53:45 +0000148 }
149 if (SI->getOrdering() == Monotonic) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000150 Loc = AA->getLocation(SI);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000151 return AliasAnalysis::ModRef;
152 }
Eli Friedman5494ada2011-08-15 20:54:19 +0000153 Loc = AliasAnalysis::Location();
154 return AliasAnalysis::ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000155 }
156
157 if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
Dan Gohman65316d62010-11-11 21:50:19 +0000158 Loc = AA->getLocation(V);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000159 return AliasAnalysis::ModRef;
160 }
161
Benjamin Kramer8bcc9712012-08-29 15:32:21 +0000162 if (const CallInst *CI = isFreeCall(Inst, AA->getTargetLibraryInfo())) {
Dan Gohman1d760ce2010-11-10 21:51:35 +0000163 // calls to free() deallocate the entire structure
164 Loc = AliasAnalysis::Location(CI->getArgOperand(0));
165 return AliasAnalysis::Mod;
166 }
167
Hal Finkelcc39b672014-07-24 12:16:19 +0000168 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
169 AAMDNodes AAInfo;
170
Dan Gohman1d760ce2010-11-10 21:51:35 +0000171 switch (II->getIntrinsicID()) {
172 case Intrinsic::lifetime_start:
173 case Intrinsic::lifetime_end:
174 case Intrinsic::invariant_start:
Hal Finkelcc39b672014-07-24 12:16:19 +0000175 II->getAAMetadata(AAInfo);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000176 Loc = AliasAnalysis::Location(II->getArgOperand(1),
177 cast<ConstantInt>(II->getArgOperand(0))
Hal Finkelcc39b672014-07-24 12:16:19 +0000178 ->getZExtValue(), AAInfo);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000179 // These intrinsics don't really modify the memory, but returning Mod
180 // will allow them to be handled conservatively.
181 return AliasAnalysis::Mod;
182 case Intrinsic::invariant_end:
Hal Finkelcc39b672014-07-24 12:16:19 +0000183 II->getAAMetadata(AAInfo);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000184 Loc = AliasAnalysis::Location(II->getArgOperand(2),
185 cast<ConstantInt>(II->getArgOperand(1))
Hal Finkelcc39b672014-07-24 12:16:19 +0000186 ->getZExtValue(), AAInfo);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000187 // These intrinsics don't really modify the memory, but returning Mod
188 // will allow them to be handled conservatively.
189 return AliasAnalysis::Mod;
190 default:
191 break;
192 }
Hal Finkelcc39b672014-07-24 12:16:19 +0000193 }
Dan Gohman1d760ce2010-11-10 21:51:35 +0000194
195 // Otherwise, just do the coarse-grained thing that always works.
196 if (Inst->mayWriteToMemory())
197 return AliasAnalysis::ModRef;
198 if (Inst->mayReadFromMemory())
199 return AliasAnalysis::Ref;
200 return AliasAnalysis::NoModRef;
201}
Chris Lattner7e61daf2008-12-01 01:15:42 +0000202
Chris Lattner056c0902008-12-07 00:35:51 +0000203/// getCallSiteDependencyFrom - Private helper for finding the local
204/// dependencies of a call site.
Chris Lattner47e81d02008-11-30 23:17:19 +0000205MemDepResult MemoryDependenceAnalysis::
Chris Lattner702e46e2008-12-09 21:19:42 +0000206getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
207 BasicBlock::iterator ScanIt, BasicBlock *BB) {
Eli Friedman8b098b02011-06-15 23:59:25 +0000208 unsigned Limit = BlockScanLimit;
209
Owen Anderson2b21c3c2007-08-08 22:26:03 +0000210 // Walk backwards through the block, looking for dependencies
Chris Lattner51ba8d02008-11-29 03:47:00 +0000211 while (ScanIt != BB->begin()) {
Eli Friedman8b098b02011-06-15 23:59:25 +0000212 // Limit the amount of scanning we do so we don't end up with quadratic
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000213 // running time on extreme testcases.
Eli Friedman8b098b02011-06-15 23:59:25 +0000214 --Limit;
215 if (!Limit)
216 return MemDepResult::getUnknown();
217
Chris Lattner51ba8d02008-11-29 03:47:00 +0000218 Instruction *Inst = --ScanIt;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000219
Owen Anderson9c884572007-07-10 17:59:22 +0000220 // If this inst is a memory op, get the pointer it accessed
Dan Gohman23483932010-09-22 21:41:02 +0000221 AliasAnalysis::Location Loc;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000222 AliasAnalysis::ModRefResult MR = GetLocation(Inst, Loc, AA);
223 if (Loc.Ptr) {
224 // A simple instruction.
225 if (AA->getModRefInfo(CS, Loc) != AliasAnalysis::NoModRef)
226 return MemDepResult::getClobber(Inst);
227 continue;
228 }
229
230 if (CallSite InstCS = cast<Value>(Inst)) {
Owen Andersonf9a9cf92009-03-09 05:12:38 +0000231 // Debug intrinsics don't cause dependences.
Dale Johannesenf61c8e82009-03-11 21:13:01 +0000232 if (isa<DbgInfoIntrinsic>(Inst)) continue;
Chris Lattner0e3d6332008-12-05 21:04:20 +0000233 // If these two calls do not interfere, look past it.
Chris Lattner702e46e2008-12-09 21:19:42 +0000234 switch (AA->getModRefInfo(CS, InstCS)) {
235 case AliasAnalysis::NoModRef:
Dan Gohman26ef7c72010-08-05 22:09:15 +0000236 // If the two calls are the same, return InstCS as a Def, so that
237 // CS can be found redundant and eliminated.
Dan Gohman1d760ce2010-11-10 21:51:35 +0000238 if (isReadOnlyCall && !(MR & AliasAnalysis::Mod) &&
Dan Gohman26ef7c72010-08-05 22:09:15 +0000239 CS.getInstruction()->isIdenticalToWhenDefined(Inst))
240 return MemDepResult::getDef(Inst);
241
242 // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
243 // keep scanning.
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000244 continue;
Chris Lattner702e46e2008-12-09 21:19:42 +0000245 default:
Chris Lattner0e3d6332008-12-05 21:04:20 +0000246 return MemDepResult::getClobber(Inst);
Chris Lattner702e46e2008-12-09 21:19:42 +0000247 }
Chris Lattnerff862c42008-11-30 01:44:00 +0000248 }
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000249
250 // If we could not obtain a pointer for the instruction and the instruction
251 // touches memory then assume that this is a dependency.
252 if (MR != AliasAnalysis::NoModRef)
253 return MemDepResult::getClobber(Inst);
Owen Anderson9c884572007-07-10 17:59:22 +0000254 }
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000255
Eli Friedman7d58bc72011-06-15 00:47:34 +0000256 // No dependence found. If this is the entry block of the function, it is
257 // unknown, otherwise it is non-local.
Chris Lattner2faa2c72008-12-07 02:15:47 +0000258 if (BB != &BB->getParent()->getEntryBlock())
259 return MemDepResult::getNonLocal();
Eli Friedmanc1702c82011-10-13 22:14:57 +0000260 return MemDepResult::getNonFuncLocal();
Owen Anderson9c884572007-07-10 17:59:22 +0000261}
262
Chris Lattner7aab2792011-04-26 22:42:01 +0000263/// isLoadLoadClobberIfExtendedToFullWidth - Return true if LI is a load that
264/// would fully overlap MemLoc if done as a wider legal integer load.
265///
266/// MemLocBase, MemLocOffset are lazily computed here the first time the
267/// base/offs of memloc is needed.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000268static bool
Chris Lattner7aab2792011-04-26 22:42:01 +0000269isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
270 const Value *&MemLocBase,
271 int64_t &MemLocOffs,
Chris Lattner827a2702011-04-28 07:29:08 +0000272 const LoadInst *LI,
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000273 const DataLayout *DL) {
Chris Lattner7aab2792011-04-26 22:42:01 +0000274 // If we have no target data, we can't do this.
Craig Topper9f008862014-04-15 04:59:12 +0000275 if (!DL) return false;
Chris Lattner7aab2792011-04-26 22:42:01 +0000276
277 // If we haven't already computed the base/offset of MemLoc, do so now.
Craig Topper9f008862014-04-15 04:59:12 +0000278 if (!MemLocBase)
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000279 MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL);
Chris Lattner7aab2792011-04-26 22:42:01 +0000280
Chris Lattner827a2702011-04-28 07:29:08 +0000281 unsigned Size = MemoryDependenceAnalysis::
282 getLoadLoadClobberFullWidthSize(MemLocBase, MemLocOffs, MemLoc.Size,
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000283 LI, *DL);
Chris Lattner827a2702011-04-28 07:29:08 +0000284 return Size != 0;
285}
286
287/// getLoadLoadClobberFullWidthSize - This is a little bit of analysis that
288/// looks at a memory location for a load (specified by MemLocBase, Offs,
289/// and Size) and compares it against a load. If the specified load could
290/// be safely widened to a larger integer load that is 1) still efficient,
291/// 2) safe for the target, and 3) would provide the specified memory
292/// location value, then this function returns the size in bytes of the
293/// load width to use. If not, this returns zero.
294unsigned MemoryDependenceAnalysis::
295getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
296 unsigned MemLocSize, const LoadInst *LI,
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000297 const DataLayout &DL) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000298 // We can only extend simple integer loads.
299 if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
Kostya Serebryany3838f272013-02-13 05:59:45 +0000300
301 // Load widening is hostile to ThreadSanitizer: it may cause false positives
302 // or make the reports more cryptic (access sizes are wrong).
303 if (LI->getParent()->getParent()->getAttributes().
Kostya Serebryanycf880b92013-02-26 06:58:09 +0000304 hasAttribute(AttributeSet::FunctionIndex, Attribute::SanitizeThread))
Kostya Serebryany3838f272013-02-13 05:59:45 +0000305 return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000306
Chris Lattner7aab2792011-04-26 22:42:01 +0000307 // Get the base of this load.
308 int64_t LIOffs = 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000309 const Value *LIBase =
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000310 GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &DL);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000311
Chris Lattner7aab2792011-04-26 22:42:01 +0000312 // If the two pointers are not based on the same pointer, we can't tell that
313 // they are related.
Chris Lattner827a2702011-04-28 07:29:08 +0000314 if (LIBase != MemLocBase) return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000315
Chris Lattner7aab2792011-04-26 22:42:01 +0000316 // Okay, the two values are based on the same pointer, but returned as
317 // no-alias. This happens when we have things like two byte loads at "P+1"
318 // and "P+3". Check to see if increasing the size of the "LI" load up to its
319 // alignment (or the largest native integer type) will allow us to load all
320 // the bits required by MemLoc.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000321
Chris Lattner7aab2792011-04-26 22:42:01 +0000322 // If MemLoc is before LI, then no widening of LI will help us out.
Chris Lattner827a2702011-04-28 07:29:08 +0000323 if (MemLocOffs < LIOffs) return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000324
Chris Lattner7aab2792011-04-26 22:42:01 +0000325 // Get the alignment of the load in bytes. We assume that it is safe to load
326 // any legal integer up to this size without a problem. For example, if we're
327 // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can
328 // widen it up to an i32 load. If it is known 2-byte aligned, we can widen it
329 // to i16.
330 unsigned LoadAlign = LI->getAlignment();
331
Chris Lattner827a2702011-04-28 07:29:08 +0000332 int64_t MemLocEnd = MemLocOffs+MemLocSize;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000333
Chris Lattner7aab2792011-04-26 22:42:01 +0000334 // If no amount of rounding up will let MemLoc fit into LI, then bail out.
Chris Lattner827a2702011-04-28 07:29:08 +0000335 if (LIOffs+LoadAlign < MemLocEnd) return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000336
Chris Lattner7aab2792011-04-26 22:42:01 +0000337 // This is the size of the load to try. Start with the next larger power of
338 // two.
339 unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits()/8U;
340 NewLoadByteSize = NextPowerOf2(NewLoadByteSize);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000341
Chris Lattner7aab2792011-04-26 22:42:01 +0000342 while (1) {
343 // If this load size is bigger than our known alignment or would not fit
344 // into a native integer register, then we fail.
345 if (NewLoadByteSize > LoadAlign ||
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000346 !DL.fitsInLegalInteger(NewLoadByteSize*8))
Chris Lattner827a2702011-04-28 07:29:08 +0000347 return 0;
Chris Lattner7aab2792011-04-26 22:42:01 +0000348
Kostya Serebryany9e0d3772012-02-06 22:48:56 +0000349 if (LIOffs+NewLoadByteSize > MemLocEnd &&
Bill Wendling698e84f2012-12-30 10:32:01 +0000350 LI->getParent()->getParent()->getAttributes().
Kostya Serebryanycf880b92013-02-26 06:58:09 +0000351 hasAttribute(AttributeSet::FunctionIndex, Attribute::SanitizeAddress))
Kostya Serebryany9e0d3772012-02-06 22:48:56 +0000352 // We will be reading past the location accessed by the original program.
353 // While this is safe in a regular build, Address Safety analysis tools
354 // may start reporting false warnings. So, don't do widening.
355 return 0;
Kostya Serebryany9e0d3772012-02-06 22:48:56 +0000356
Chris Lattner7aab2792011-04-26 22:42:01 +0000357 // If a load of this width would include all of MemLoc, then we succeed.
358 if (LIOffs+NewLoadByteSize >= MemLocEnd)
Chris Lattner827a2702011-04-28 07:29:08 +0000359 return NewLoadByteSize;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000360
Chris Lattner7aab2792011-04-26 22:42:01 +0000361 NewLoadByteSize <<= 1;
362 }
Chris Lattner7aab2792011-04-26 22:42:01 +0000363}
364
Chris Lattner5a786042008-12-07 01:50:16 +0000365/// getPointerDependencyFrom - Return the instruction on which a memory
Dan Gohman15a43962010-10-29 01:14:04 +0000366/// location depends. If isLoad is true, this routine ignores may-aliases with
367/// read-only operations. If isLoad is false, this routine ignores may-aliases
Shuxin Yang408bdad2013-03-06 17:48:48 +0000368/// with reads from read-only locations. If possible, pass the query
369/// instruction as well; this function may take advantage of the metadata
370/// annotated to the query instruction to refine the result.
Chris Lattner47e81d02008-11-30 23:17:19 +0000371MemDepResult MemoryDependenceAnalysis::
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000372getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
Shuxin Yang408bdad2013-03-06 17:48:48 +0000373 BasicBlock::iterator ScanIt, BasicBlock *BB,
374 Instruction *QueryInst) {
Chris Lattner2faa2c72008-12-07 02:15:47 +0000375
Craig Topper9f008862014-04-15 04:59:12 +0000376 const Value *MemLocBase = nullptr;
Chris Lattner7aab2792011-04-26 22:42:01 +0000377 int64_t MemLocOffset = 0;
Eli Friedman8b098b02011-06-15 23:59:25 +0000378 unsigned Limit = BlockScanLimit;
Shuxin Yang408bdad2013-03-06 17:48:48 +0000379 bool isInvariantLoad = false;
Robin Morisset163ef042014-08-29 20:32:58 +0000380
381 // We must be careful with atomic accesses, as they may allow another thread
382 // to touch this location, cloberring it. We are conservative: if the
383 // QueryInst is not a simple (non-atomic) memory access, we automatically
384 // return getClobber.
385 // If it is simple, we know based on the results of
386 // "Compiler testing via a theory of sound optimisations in the C11/C++11
387 // memory model" in PLDI 2013, that a non-atomic location can only be
388 // clobbered between a pair of a release and an acquire action, with no
389 // access to the location in between.
390 // Here is an example for giving the general intuition behind this rule.
391 // In the following code:
392 // store x 0;
393 // release action; [1]
394 // acquire action; [4]
395 // %val = load x;
396 // It is unsafe to replace %val by 0 because another thread may be running:
397 // acquire action; [2]
398 // store x 42;
399 // release action; [3]
400 // with synchronization from 1 to 2 and from 3 to 4, resulting in %val
401 // being 42. A key property of this program however is that if either
402 // 1 or 4 were missing, there would be a race between the store of 42
403 // either the store of 0 or the load (making the whole progam racy).
404 // The paper mentionned above shows that the same property is respected
405 // by every program that can detect any optimisation of that kind: either
406 // it is racy (undefined) or there is a release followed by an acquire
407 // between the pair of accesses under consideration.
408 bool HasSeenAcquire = false;
409
Shuxin Yang408bdad2013-03-06 17:48:48 +0000410 if (isLoad && QueryInst) {
411 LoadInst *LI = dyn_cast<LoadInst>(QueryInst);
Craig Topper9f008862014-04-15 04:59:12 +0000412 if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr)
Shuxin Yang408bdad2013-03-06 17:48:48 +0000413 isInvariantLoad = true;
414 }
Eli Friedman8b098b02011-06-15 23:59:25 +0000415
Chris Lattnera28355d2008-12-07 08:50:20 +0000416 // Walk backwards through the basic block, looking for dependencies.
Chris Lattner51ba8d02008-11-29 03:47:00 +0000417 while (ScanIt != BB->begin()) {
Yunzhong Gao5cbcf562013-11-14 01:10:52 +0000418 Instruction *Inst = --ScanIt;
419
420 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
421 // Debug intrinsics don't (and can't) cause dependencies.
422 if (isa<DbgInfoIntrinsic>(II)) continue;
423
Eli Friedman8b098b02011-06-15 23:59:25 +0000424 // Limit the amount of scanning we do so we don't end up with quadratic
425 // running time on extreme testcases.
426 --Limit;
427 if (!Limit)
428 return MemDepResult::getUnknown();
429
Chris Lattner506b8582009-12-01 21:15:15 +0000430 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
Owen Anderson2b2bd282009-10-28 07:05:35 +0000431 // If we reach a lifetime begin or end marker, then the query ends here
432 // because the value is undefined.
Chris Lattnera58edd12010-09-06 03:58:04 +0000433 if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
Owen Andersonb9878ee2009-12-02 07:35:19 +0000434 // FIXME: This only considers queries directly on the invariant-tagged
435 // pointer, not on query pointers that are indexed off of them. It'd
Chris Lattner7aab2792011-04-26 22:42:01 +0000436 // be nice to handle that at some point (the right approach is to use
437 // GetPointerBaseWithConstantOffset).
Chris Lattner32dc9bd2011-04-26 21:53:34 +0000438 if (AA->isMustAlias(AliasAnalysis::Location(II->getArgOperand(1)),
439 MemLoc))
Owen Anderson2b2bd282009-10-28 07:05:35 +0000440 return MemDepResult::getDef(II);
Chris Lattnera58edd12010-09-06 03:58:04 +0000441 continue;
Owen Andersond0e86d52009-10-28 06:18:42 +0000442 }
443 }
444
Chris Lattnerff862c42008-11-30 01:44:00 +0000445 // Values depend on loads if the pointers are must aliased. This means that
446 // a load depends on another must aliased load from the same value.
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000447 // One exception is atomic loads: a value can depend on an atomic load that it
448 // does not alias with when this atomic load indicates that another thread may
449 // be accessing the location.
Chris Lattner0e3d6332008-12-05 21:04:20 +0000450 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000451 // Atomic loads have complications involved.
Robin Morisset163ef042014-08-29 20:32:58 +0000452 // A Monotonic (or higher) load is OK if the query inst is itself not atomic.
453 // An Acquire (or higher) load sets the HasSeenAcquire flag, so that any
454 // release store will know to return getClobber.
Eli Friedman5494ada2011-08-15 20:54:19 +0000455 // FIXME: This is overly conservative.
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000456 if (!LI->isUnordered()) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000457 if (!QueryInst)
458 return MemDepResult::getClobber(LI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000459 if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000460 if (!QueryLI->isSimple())
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000461 return MemDepResult::getClobber(LI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000462 } else if (auto *QuerySI = dyn_cast<StoreInst>(QueryInst)) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000463 if (!QuerySI->isSimple())
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000464 return MemDepResult::getClobber(LI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000465 } else if (QueryInst->mayReadOrWriteMemory()) {
466 return MemDepResult::getClobber(LI);
467 }
468
Robin Morisset163ef042014-08-29 20:32:58 +0000469 if (isAtLeastAcquire(LI->getOrdering()))
470 HasSeenAcquire = true;
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000471 }
Eli Friedman5494ada2011-08-15 20:54:19 +0000472
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000473 // FIXME: this is overly conservative.
474 // While volatile access cannot be eliminated, they do not have to clobber
475 // non-aliasing locations, as normal accesses can for example be reordered
476 // with volatile accesses.
477 if (LI->isVolatile())
478 return MemDepResult::getClobber(LI);
479
Dan Gohman65316d62010-11-11 21:50:19 +0000480 AliasAnalysis::Location LoadLoc = AA->getLocation(LI);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000481
Chris Lattner0e3d6332008-12-05 21:04:20 +0000482 // If we found a pointer, check if it could be the same as our pointer.
Dan Gohman15a43962010-10-29 01:14:04 +0000483 AliasAnalysis::AliasResult R = AA->alias(LoadLoc, MemLoc);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000484
Chris Lattner6f83d062011-04-26 01:21:15 +0000485 if (isLoad) {
Chris Lattner7aab2792011-04-26 22:42:01 +0000486 if (R == AliasAnalysis::NoAlias) {
487 // If this is an over-aligned integer load (for example,
488 // "load i8* %P, align 4") see if it would obviously overlap with the
489 // queried location if widened to a larger load (e.g. if the queried
490 // location is 1 byte at P+1). If so, return it as a load/load
491 // clobber result, allowing the client to decide to widen the load if
492 // it wants to.
Chris Lattner229907c2011-07-18 04:54:35 +0000493 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
Chris Lattner7aab2792011-04-26 22:42:01 +0000494 if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() &&
495 isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000496 MemLocOffset, LI, DL))
Chris Lattner7aab2792011-04-26 22:42:01 +0000497 return MemDepResult::getClobber(Inst);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000498
Chris Lattner7aab2792011-04-26 22:42:01 +0000499 continue;
500 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000501
Chris Lattner6f83d062011-04-26 01:21:15 +0000502 // Must aliased loads are defs of each other.
503 if (R == AliasAnalysis::MustAlias)
504 return MemDepResult::getDef(Inst);
505
Dan Gohmana4717512011-06-04 06:48:50 +0000506#if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads
507 // in terms of clobbering loads, but since it does this by looking
508 // at the clobbering load directly, it doesn't know about any
509 // phi translation that may have happened along the way.
510
Chris Lattner6f83d062011-04-26 01:21:15 +0000511 // If we have a partial alias, then return this as a clobber for the
512 // client to handle.
513 if (R == AliasAnalysis::PartialAlias)
514 return MemDepResult::getClobber(Inst);
Dan Gohmana4717512011-06-04 06:48:50 +0000515#endif
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000516
Chris Lattner6f83d062011-04-26 01:21:15 +0000517 // Random may-alias loads don't depend on each other without a
518 // dependence.
Chris Lattner80c08182008-11-29 09:09:48 +0000519 continue;
Chris Lattner6f83d062011-04-26 01:21:15 +0000520 }
Dan Gohman15a43962010-10-29 01:14:04 +0000521
Chris Lattner7aab2792011-04-26 22:42:01 +0000522 // Stores don't depend on other no-aliased accesses.
523 if (R == AliasAnalysis::NoAlias)
524 continue;
525
Dan Gohman15a43962010-10-29 01:14:04 +0000526 // Stores don't alias loads from read-only memory.
Chris Lattner6f83d062011-04-26 01:21:15 +0000527 if (AA->pointsToConstantMemory(LoadLoc))
Dan Gohman15a43962010-10-29 01:14:04 +0000528 continue;
529
Chris Lattner6f83d062011-04-26 01:21:15 +0000530 // Stores depend on may/must aliased loads.
Chris Lattner0e3d6332008-12-05 21:04:20 +0000531 return MemDepResult::getDef(Inst);
532 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000533
Chris Lattner0e3d6332008-12-05 21:04:20 +0000534 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000535 // Atomic stores have complications involved.
Robin Morisset163ef042014-08-29 20:32:58 +0000536 // A Monotonic store is OK if the query inst is itself not atomic.
537 // A Release (or higher) store further requires that no acquire load
538 // has been seen.
Eli Friedman5494ada2011-08-15 20:54:19 +0000539 // FIXME: This is overly conservative.
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000540 if (!SI->isUnordered()) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000541 if (!QueryInst)
542 return MemDepResult::getClobber(SI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000543 if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000544 if (!QueryLI->isSimple())
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000545 return MemDepResult::getClobber(SI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000546 } else if (auto *QuerySI = dyn_cast<StoreInst>(QueryInst)) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000547 if (!QuerySI->isSimple())
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000548 return MemDepResult::getClobber(SI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000549 } else if (QueryInst->mayReadOrWriteMemory()) {
550 return MemDepResult::getClobber(SI);
551 }
552
Robin Morisset163ef042014-08-29 20:32:58 +0000553 if (HasSeenAcquire && isAtLeastRelease(SI->getOrdering()))
554 return MemDepResult::getClobber(SI);
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000555 }
Eli Friedman5494ada2011-08-15 20:54:19 +0000556
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000557 // FIXME: this is overly conservative.
558 // While volatile access cannot be eliminated, they do not have to clobber
559 // non-aliasing locations, as normal accesses can for example be reordered
560 // with volatile accesses.
561 if (SI->isVolatile())
562 return MemDepResult::getClobber(SI);
563
Chris Lattner02274a72009-05-25 21:28:56 +0000564 // If alias analysis can tell that this store is guaranteed to not modify
565 // the query pointer, ignore it. Use getModRefInfo to handle cases where
566 // the query pointer points to constant memory etc.
Dan Gohman23483932010-09-22 21:41:02 +0000567 if (AA->getModRefInfo(SI, MemLoc) == AliasAnalysis::NoModRef)
Chris Lattner02274a72009-05-25 21:28:56 +0000568 continue;
569
570 // Ok, this store might clobber the query pointer. Check to see if it is
571 // a must alias: in this case, we want to return this as a def.
Dan Gohman65316d62010-11-11 21:50:19 +0000572 AliasAnalysis::Location StoreLoc = AA->getLocation(SI);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000573
Chris Lattner0e3d6332008-12-05 21:04:20 +0000574 // If we found a pointer, check if it could be the same as our pointer.
Dan Gohman65316d62010-11-11 21:50:19 +0000575 AliasAnalysis::AliasResult R = AA->alias(StoreLoc, MemLoc);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000576
Chris Lattner0e3d6332008-12-05 21:04:20 +0000577 if (R == AliasAnalysis::NoAlias)
578 continue;
Dan Gohmanba5d0ab2010-12-13 22:47:57 +0000579 if (R == AliasAnalysis::MustAlias)
580 return MemDepResult::getDef(Inst);
Shuxin Yang408bdad2013-03-06 17:48:48 +0000581 if (isInvariantLoad)
582 continue;
Dan Gohmanba5d0ab2010-12-13 22:47:57 +0000583 return MemDepResult::getClobber(Inst);
Owen Andersonc0daf5f2007-07-06 23:14:35 +0000584 }
Chris Lattner3ff6d012008-11-30 01:39:32 +0000585
586 // If this is an allocation, and if we know that the accessed pointer is to
Chris Lattner0e3d6332008-12-05 21:04:20 +0000587 // the allocation, return Def. This means that there is no dependence and
Chris Lattner3ff6d012008-11-30 01:39:32 +0000588 // the access can be optimized based on that. For example, a load could
589 // turn into undef.
Victor Hernandez70e85052009-10-13 01:42:53 +0000590 // Note: Only determine this to be a malloc if Inst is the malloc call, not
591 // a subsequent bitcast of the malloc call result. There can be stores to
592 // the malloced memory between the malloc call and its bitcast uses, and we
593 // need to continue scanning until the malloc call.
Bob Wilsondcc54de2012-09-03 05:15:15 +0000594 const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo();
595 if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) {
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000596 const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000597
Chris Lattner32dc9bd2011-04-26 21:53:34 +0000598 if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
Victor Hernandez537d8d92009-09-18 21:34:51 +0000599 return MemDepResult::getDef(Inst);
Bob Wilson01cfbfe2012-09-04 03:30:13 +0000600 // Be conservative if the accessed pointer may alias the allocation.
601 if (AA->alias(Inst, AccessPtr) != AliasAnalysis::NoAlias)
602 return MemDepResult::getClobber(Inst);
Bob Wilsondcc54de2012-09-03 05:15:15 +0000603 // If the allocation is not aliased and does not read memory (like
604 // strdup), it is safe to ignore.
605 if (isa<AllocaInst>(Inst) ||
606 isMallocLikeFn(Inst, TLI) || isCallocLikeFn(Inst, TLI))
607 continue;
Victor Hernandez537d8d92009-09-18 21:34:51 +0000608 }
609
Chris Lattner0e3d6332008-12-05 21:04:20 +0000610 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
Chad Rosiera968caf2012-05-14 20:35:04 +0000611 AliasAnalysis::ModRefResult MR = AA->getModRefInfo(Inst, MemLoc);
612 // If necessary, perform additional analysis.
613 if (MR == AliasAnalysis::ModRef)
614 MR = AA->callCapturesBefore(Inst, MemLoc, DT);
615 switch (MR) {
Chris Lattner41efb682008-12-09 19:47:40 +0000616 case AliasAnalysis::NoModRef:
617 // If the call has no effect on the queried pointer, just ignore it.
Chris Lattner81f19e92008-11-29 08:51:16 +0000618 continue;
Owen Andersonfc16e5a2009-10-28 06:30:52 +0000619 case AliasAnalysis::Mod:
Owen Andersonfc16e5a2009-10-28 06:30:52 +0000620 return MemDepResult::getClobber(Inst);
Chris Lattner41efb682008-12-09 19:47:40 +0000621 case AliasAnalysis::Ref:
622 // If the call is known to never store to the pointer, and if this is a
623 // load query, we can safely ignore it (scan past it).
624 if (isLoad)
625 continue;
Chris Lattner41efb682008-12-09 19:47:40 +0000626 default:
627 // Otherwise, there is a potential dependence. Return a clobber.
628 return MemDepResult::getClobber(Inst);
629 }
Owen Andersonc0daf5f2007-07-06 23:14:35 +0000630 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000631
Eli Friedman7d58bc72011-06-15 00:47:34 +0000632 // No dependence found. If this is the entry block of the function, it is
633 // unknown, otherwise it is non-local.
Chris Lattner2faa2c72008-12-07 02:15:47 +0000634 if (BB != &BB->getParent()->getEntryBlock())
635 return MemDepResult::getNonLocal();
Eli Friedmanc1702c82011-10-13 22:14:57 +0000636 return MemDepResult::getNonFuncLocal();
Owen Andersonc0daf5f2007-07-06 23:14:35 +0000637}
638
Chris Lattner51ba8d02008-11-29 03:47:00 +0000639/// getDependency - Return the instruction on which a memory operation
640/// depends.
641MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
642 Instruction *ScanPos = QueryInst;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000643
Chris Lattner51ba8d02008-11-29 03:47:00 +0000644 // Check for a cached result
Chris Lattner47e81d02008-11-30 23:17:19 +0000645 MemDepResult &LocalCache = LocalDeps[QueryInst];
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000646
Chris Lattnere7d7e132008-11-29 22:02:15 +0000647 // If the cached entry is non-dirty, just return it. Note that this depends
Chris Lattner47e81d02008-11-30 23:17:19 +0000648 // on MemDepResult's default constructing to 'dirty'.
649 if (!LocalCache.isDirty())
650 return LocalCache;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000651
Chris Lattner51ba8d02008-11-29 03:47:00 +0000652 // Otherwise, if we have a dirty entry, we know we can start the scan at that
653 // instruction, which may save us some work.
Chris Lattner47e81d02008-11-30 23:17:19 +0000654 if (Instruction *Inst = LocalCache.getInst()) {
Chris Lattner51ba8d02008-11-29 03:47:00 +0000655 ScanPos = Inst;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000656
Chris Lattnerde4440c2008-12-07 18:39:13 +0000657 RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst);
Chris Lattner44104272008-11-30 02:52:26 +0000658 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000659
Chris Lattner5a786042008-12-07 01:50:16 +0000660 BasicBlock *QueryParent = QueryInst->getParent();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000661
Chris Lattner51ba8d02008-11-29 03:47:00 +0000662 // Do the scan.
Chris Lattner5a786042008-12-07 01:50:16 +0000663 if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
Eli Friedman7d58bc72011-06-15 00:47:34 +0000664 // No dependence found. If this is the entry block of the function, it is
665 // unknown, otherwise it is non-local.
Chris Lattner2faa2c72008-12-07 02:15:47 +0000666 if (QueryParent != &QueryParent->getParent()->getEntryBlock())
667 LocalCache = MemDepResult::getNonLocal();
668 else
Eli Friedmanc1702c82011-10-13 22:14:57 +0000669 LocalCache = MemDepResult::getNonFuncLocal();
Dan Gohman1d760ce2010-11-10 21:51:35 +0000670 } else {
671 AliasAnalysis::Location MemLoc;
672 AliasAnalysis::ModRefResult MR = GetLocation(QueryInst, MemLoc, AA);
673 if (MemLoc.Ptr) {
674 // If we can do a pointer scan, make it happen.
675 bool isLoad = !(MR & AliasAnalysis::Mod);
Chris Lattnerd540a5d2010-11-30 01:56:13 +0000676 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
Owen Anderson97f0cf32011-05-17 00:05:49 +0000677 isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
Chris Lattnere48c31c2010-11-21 07:34:32 +0000678
Dan Gohman1d760ce2010-11-10 21:51:35 +0000679 LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos,
Shuxin Yang408bdad2013-03-06 17:48:48 +0000680 QueryParent, QueryInst);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000681 } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
Gabor Greifef1ca242010-07-27 22:02:00 +0000682 CallSite QueryCS(QueryInst);
Nick Lewyckye91765f2009-12-05 06:37:24 +0000683 bool isReadOnly = AA->onlyReadsMemory(QueryCS);
684 LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
685 QueryParent);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000686 } else
687 // Non-memory instruction.
Eli Friedman7d58bc72011-06-15 00:47:34 +0000688 LocalCache = MemDepResult::getUnknown();
Nick Lewycky218a3392009-11-28 21:27:49 +0000689 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000690
Chris Lattner51ba8d02008-11-29 03:47:00 +0000691 // Remember the result!
Chris Lattner47e81d02008-11-30 23:17:19 +0000692 if (Instruction *I = LocalCache.getInst())
Chris Lattner9f1988ab2008-11-29 09:20:15 +0000693 ReverseLocalDeps[I].insert(QueryInst);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000694
Chris Lattner47e81d02008-11-30 23:17:19 +0000695 return LocalCache;
Chris Lattner51ba8d02008-11-29 03:47:00 +0000696}
697
Chris Lattnerf09619d2009-01-22 07:04:01 +0000698#ifndef NDEBUG
699/// AssertSorted - This method is used when -debug is specified to verify that
700/// cache arrays are properly kept sorted.
701static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
702 int Count = -1) {
703 if (Count == -1) Count = Cache.size();
704 if (Count == 0) return;
705
706 for (unsigned i = 1; i != unsigned(Count); ++i)
Chris Lattner0c315472009-12-09 07:08:01 +0000707 assert(!(Cache[i] < Cache[i-1]) && "Cache isn't sorted!");
Chris Lattnerf09619d2009-01-22 07:04:01 +0000708}
709#endif
710
Chris Lattner254314e2008-12-09 19:38:05 +0000711/// getNonLocalCallDependency - Perform a full dependency query for the
712/// specified call, returning the set of blocks that the value is
Chris Lattner20597532008-11-30 01:18:27 +0000713/// potentially live across. The returned set of results will include a
714/// "NonLocal" result for all blocks where the value is live across.
715///
Chris Lattner254314e2008-12-09 19:38:05 +0000716/// This method assumes the instruction returns a "NonLocal" dependency
Chris Lattner20597532008-11-30 01:18:27 +0000717/// within its own block.
718///
Chris Lattner254314e2008-12-09 19:38:05 +0000719/// This returns a reference to an internal data structure that may be
720/// invalidated on the next non-local query or when an instruction is
721/// removed. Clients must copy this data if they want it around longer than
722/// that.
Chris Lattner7e61daf2008-12-01 01:15:42 +0000723const MemoryDependenceAnalysis::NonLocalDepInfo &
Chris Lattner254314e2008-12-09 19:38:05 +0000724MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
725 assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
726 "getNonLocalCallDependency should only be used on calls with non-local deps!");
727 PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
Chris Lattner7e61daf2008-12-01 01:15:42 +0000728 NonLocalDepInfo &Cache = CacheP.first;
Chris Lattner20597532008-11-30 01:18:27 +0000729
730 /// DirtyBlocks - This is the set of blocks that need to be recomputed. In
731 /// the cached case, this can happen due to instructions being deleted etc. In
732 /// the uncached case, this starts out as the set of predecessors we care
733 /// about.
734 SmallVector<BasicBlock*, 32> DirtyBlocks;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000735
Chris Lattner20597532008-11-30 01:18:27 +0000736 if (!Cache.empty()) {
Chris Lattner7e61daf2008-12-01 01:15:42 +0000737 // Okay, we have a cache entry. If we know it is not dirty, just return it
738 // with no computation.
739 if (!CacheP.second) {
Dan Gohmand2d1ae12010-06-22 15:08:57 +0000740 ++NumCacheNonLocal;
Chris Lattner7e61daf2008-12-01 01:15:42 +0000741 return Cache;
742 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000743
Chris Lattner20597532008-11-30 01:18:27 +0000744 // If we already have a partially computed set of results, scan them to
Chris Lattner7e61daf2008-12-01 01:15:42 +0000745 // determine what is dirty, seeding our initial DirtyBlocks worklist.
746 for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end();
747 I != E; ++I)
Chris Lattner0c315472009-12-09 07:08:01 +0000748 if (I->getResult().isDirty())
749 DirtyBlocks.push_back(I->getBB());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000750
Chris Lattner7e61daf2008-12-01 01:15:42 +0000751 // Sort the cache so that we can do fast binary search lookups below.
752 std::sort(Cache.begin(), Cache.end());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000753
Chris Lattner7e61daf2008-12-01 01:15:42 +0000754 ++NumCacheDirtyNonLocal;
Chris Lattner20597532008-11-30 01:18:27 +0000755 //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
756 // << Cache.size() << " cached: " << *QueryInst;
757 } else {
758 // Seed DirtyBlocks with each of the preds of QueryInst's block.
Chris Lattner254314e2008-12-09 19:38:05 +0000759 BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
Chris Lattnere8113a72008-12-09 06:44:17 +0000760 for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI)
761 DirtyBlocks.push_back(*PI);
Dan Gohmand2d1ae12010-06-22 15:08:57 +0000762 ++NumUncacheNonLocal;
Chris Lattner20597532008-11-30 01:18:27 +0000763 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000764
Chris Lattner702e46e2008-12-09 21:19:42 +0000765 // isReadonlyCall - If this is a read-only call, we can be more aggressive.
766 bool isReadonlyCall = AA->onlyReadsMemory(QueryCS);
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000767
Chris Lattner7e61daf2008-12-01 01:15:42 +0000768 SmallPtrSet<BasicBlock*, 64> Visited;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000769
Chris Lattner7e61daf2008-12-01 01:15:42 +0000770 unsigned NumSortedEntries = Cache.size();
Chris Lattnerf09619d2009-01-22 07:04:01 +0000771 DEBUG(AssertSorted(Cache));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000772
Chris Lattner20597532008-11-30 01:18:27 +0000773 // Iterate while we still have blocks to update.
774 while (!DirtyBlocks.empty()) {
775 BasicBlock *DirtyBB = DirtyBlocks.back();
776 DirtyBlocks.pop_back();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000777
Chris Lattner7e61daf2008-12-01 01:15:42 +0000778 // Already processed this block?
779 if (!Visited.insert(DirtyBB))
780 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000781
Chris Lattner7e61daf2008-12-01 01:15:42 +0000782 // Do a binary search to see if we already have an entry for this block in
783 // the cache set. If so, find it.
Chris Lattnerf09619d2009-01-22 07:04:01 +0000784 DEBUG(AssertSorted(Cache, NumSortedEntries));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000785 NonLocalDepInfo::iterator Entry =
Chris Lattner7e61daf2008-12-01 01:15:42 +0000786 std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries,
Chris Lattnereea0f582009-12-09 07:31:04 +0000787 NonLocalDepEntry(DirtyBB));
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000788 if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB)
Chris Lattner7e61daf2008-12-01 01:15:42 +0000789 --Entry;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000790
Craig Topper9f008862014-04-15 04:59:12 +0000791 NonLocalDepEntry *ExistingResult = nullptr;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000792 if (Entry != Cache.begin()+NumSortedEntries &&
Chris Lattner0c315472009-12-09 07:08:01 +0000793 Entry->getBB() == DirtyBB) {
Chris Lattner7e61daf2008-12-01 01:15:42 +0000794 // If we already have an entry, and if it isn't already dirty, the block
795 // is done.
Chris Lattner0c315472009-12-09 07:08:01 +0000796 if (!Entry->getResult().isDirty())
Chris Lattner7e61daf2008-12-01 01:15:42 +0000797 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000798
Chris Lattner7e61daf2008-12-01 01:15:42 +0000799 // Otherwise, remember this slot so we can update the value.
Chris Lattner0c315472009-12-09 07:08:01 +0000800 ExistingResult = &*Entry;
Chris Lattner7e61daf2008-12-01 01:15:42 +0000801 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000802
Chris Lattner20597532008-11-30 01:18:27 +0000803 // If the dirty entry has a pointer, start scanning from it so we don't have
804 // to rescan the entire block.
805 BasicBlock::iterator ScanPos = DirtyBB->end();
Chris Lattner7e61daf2008-12-01 01:15:42 +0000806 if (ExistingResult) {
Chris Lattner0c315472009-12-09 07:08:01 +0000807 if (Instruction *Inst = ExistingResult->getResult().getInst()) {
Chris Lattner7e61daf2008-12-01 01:15:42 +0000808 ScanPos = Inst;
Chris Lattner7e61daf2008-12-01 01:15:42 +0000809 // We're removing QueryInst's use of Inst.
Chris Lattner254314e2008-12-09 19:38:05 +0000810 RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
811 QueryCS.getInstruction());
Chris Lattner7e61daf2008-12-01 01:15:42 +0000812 }
Chris Lattner1b810bd2008-11-30 02:28:25 +0000813 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000814
Chris Lattner60444f82008-11-30 01:26:32 +0000815 // Find out if this block has a local dependency for QueryInst.
Chris Lattnered494f72008-12-07 01:21:14 +0000816 MemDepResult Dep;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000817
Chris Lattner254314e2008-12-09 19:38:05 +0000818 if (ScanPos != DirtyBB->begin()) {
Chris Lattner702e46e2008-12-09 21:19:42 +0000819 Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB);
Chris Lattner254314e2008-12-09 19:38:05 +0000820 } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
821 // No dependence found. If this is the entry block of the function, it is
Eli Friedman7d58bc72011-06-15 00:47:34 +0000822 // a clobber, otherwise it is unknown.
Chris Lattner254314e2008-12-09 19:38:05 +0000823 Dep = MemDepResult::getNonLocal();
Chris Lattner5a786042008-12-07 01:50:16 +0000824 } else {
Eli Friedmanc1702c82011-10-13 22:14:57 +0000825 Dep = MemDepResult::getNonFuncLocal();
Chris Lattner5a786042008-12-07 01:50:16 +0000826 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000827
Chris Lattner7e61daf2008-12-01 01:15:42 +0000828 // If we had a dirty entry for the block, update it. Otherwise, just add
829 // a new entry.
830 if (ExistingResult)
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000831 ExistingResult->setResult(Dep);
Chris Lattner7e61daf2008-12-01 01:15:42 +0000832 else
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000833 Cache.push_back(NonLocalDepEntry(DirtyBB, Dep));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000834
Chris Lattner20597532008-11-30 01:18:27 +0000835 // If the block has a dependency (i.e. it isn't completely transparent to
Chris Lattner7e61daf2008-12-01 01:15:42 +0000836 // the value), remember the association!
837 if (!Dep.isNonLocal()) {
Chris Lattner20597532008-11-30 01:18:27 +0000838 // Keep the ReverseNonLocalDeps map up to date so we can efficiently
839 // update this when we remove instructions.
Chris Lattner7e61daf2008-12-01 01:15:42 +0000840 if (Instruction *Inst = Dep.getInst())
Chris Lattner254314e2008-12-09 19:38:05 +0000841 ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
Chris Lattner7e61daf2008-12-01 01:15:42 +0000842 } else {
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000843
Chris Lattner7e61daf2008-12-01 01:15:42 +0000844 // If the block *is* completely transparent to the load, we need to check
845 // the predecessors of this block. Add them to our worklist.
Chris Lattnere8113a72008-12-09 06:44:17 +0000846 for (BasicBlock **PI = PredCache->GetPreds(DirtyBB); *PI; ++PI)
847 DirtyBlocks.push_back(*PI);
Chris Lattner7e61daf2008-12-01 01:15:42 +0000848 }
Chris Lattner20597532008-11-30 01:18:27 +0000849 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000850
Chris Lattner7e61daf2008-12-01 01:15:42 +0000851 return Cache;
Chris Lattner20597532008-11-30 01:18:27 +0000852}
853
Chris Lattner2faa2c72008-12-07 02:15:47 +0000854/// getNonLocalPointerDependency - Perform a full dependency query for an
855/// access to the specified (non-volatile) memory location, returning the
856/// set of instructions that either define or clobber the value.
857///
858/// This method assumes the pointer has a "NonLocal" dependency within its
859/// own block.
860///
861void MemoryDependenceAnalysis::
Dan Gohman23483932010-09-22 21:41:02 +0000862getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad,
863 BasicBlock *FromBB,
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000864 SmallVectorImpl<NonLocalDepResult> &Result) {
Dan Gohman23483932010-09-22 21:41:02 +0000865 assert(Loc.Ptr->getType()->isPointerTy() &&
Chris Lattnerfdb88432008-12-07 18:45:15 +0000866 "Can't get pointer deps of a non-pointer!");
Chris Lattner7564a3b2008-12-07 02:56:57 +0000867 Result.clear();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000868
Hal Finkel60db0582014-09-07 18:57:58 +0000869 PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, AT);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000870
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000871 // This is the set of blocks we've inspected, and the pointer we consider in
872 // each block. Because of critical edges, we currently bail out if querying
873 // a block with multiple different pointers. This can happen during PHI
874 // translation.
875 DenseMap<BasicBlock*, Value*> Visited;
Dan Gohman23483932010-09-22 21:41:02 +0000876 if (!getNonLocalPointerDepFromBB(Address, Loc, isLoad, FromBB,
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000877 Result, Visited, true))
878 return;
Chris Lattner7ed5ccc2008-12-15 04:58:29 +0000879 Result.clear();
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000880 Result.push_back(NonLocalDepResult(FromBB,
Eli Friedman7d58bc72011-06-15 00:47:34 +0000881 MemDepResult::getUnknown(),
Dan Gohman23483932010-09-22 21:41:02 +0000882 const_cast<Value *>(Loc.Ptr)));
Chris Lattner7564a3b2008-12-07 02:56:57 +0000883}
884
Chris Lattnerf903fe12008-12-09 07:47:11 +0000885/// GetNonLocalInfoForBlock - Compute the memdep value for BB with
886/// Pointer/PointeeSize using either cached information in Cache or by doing a
887/// lookup (which may use dirty cache info if available). If we do a lookup,
888/// add the result to the cache.
889MemDepResult MemoryDependenceAnalysis::
Dan Gohman23483932010-09-22 21:41:02 +0000890GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc,
Chris Lattnerf903fe12008-12-09 07:47:11 +0000891 bool isLoad, BasicBlock *BB,
892 NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000893
Chris Lattnerf903fe12008-12-09 07:47:11 +0000894 // Do a binary search to see if we already have an entry for this block in
895 // the cache set. If so, find it.
896 NonLocalDepInfo::iterator Entry =
897 std::upper_bound(Cache->begin(), Cache->begin()+NumSortedEntries,
Chris Lattnereea0f582009-12-09 07:31:04 +0000898 NonLocalDepEntry(BB));
Chris Lattner0c315472009-12-09 07:08:01 +0000899 if (Entry != Cache->begin() && (Entry-1)->getBB() == BB)
Chris Lattnerf903fe12008-12-09 07:47:11 +0000900 --Entry;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000901
Craig Topper9f008862014-04-15 04:59:12 +0000902 NonLocalDepEntry *ExistingResult = nullptr;
Chris Lattner0c315472009-12-09 07:08:01 +0000903 if (Entry != Cache->begin()+NumSortedEntries && Entry->getBB() == BB)
904 ExistingResult = &*Entry;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000905
Chris Lattnerf903fe12008-12-09 07:47:11 +0000906 // If we have a cached entry, and it is non-dirty, use it as the value for
907 // this dependency.
Chris Lattner0c315472009-12-09 07:08:01 +0000908 if (ExistingResult && !ExistingResult->getResult().isDirty()) {
Chris Lattnerf903fe12008-12-09 07:47:11 +0000909 ++NumCacheNonLocalPtr;
Chris Lattner0c315472009-12-09 07:08:01 +0000910 return ExistingResult->getResult();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000911 }
912
Chris Lattnerf903fe12008-12-09 07:47:11 +0000913 // Otherwise, we have to scan for the value. If we have a dirty cache
914 // entry, start scanning from its position, otherwise we scan from the end
915 // of the block.
916 BasicBlock::iterator ScanPos = BB->end();
Chris Lattner0c315472009-12-09 07:08:01 +0000917 if (ExistingResult && ExistingResult->getResult().getInst()) {
918 assert(ExistingResult->getResult().getInst()->getParent() == BB &&
Chris Lattnerf903fe12008-12-09 07:47:11 +0000919 "Instruction invalidated?");
920 ++NumCacheDirtyNonLocalPtr;
Chris Lattner0c315472009-12-09 07:08:01 +0000921 ScanPos = ExistingResult->getResult().getInst();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000922
Chris Lattnerf903fe12008-12-09 07:47:11 +0000923 // Eliminating the dirty entry from 'Cache', so update the reverse info.
Dan Gohman23483932010-09-22 21:41:02 +0000924 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
Chris Lattner8eda11b2009-03-29 00:24:04 +0000925 RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey);
Chris Lattnerf903fe12008-12-09 07:47:11 +0000926 } else {
927 ++NumUncacheNonLocalPtr;
928 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000929
Chris Lattnerf903fe12008-12-09 07:47:11 +0000930 // Scan the block for the dependency.
Dan Gohman23483932010-09-22 21:41:02 +0000931 MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000932
Chris Lattnerf903fe12008-12-09 07:47:11 +0000933 // If we had a dirty entry for the block, update it. Otherwise, just add
934 // a new entry.
935 if (ExistingResult)
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000936 ExistingResult->setResult(Dep);
Chris Lattnerf903fe12008-12-09 07:47:11 +0000937 else
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000938 Cache->push_back(NonLocalDepEntry(BB, Dep));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000939
Chris Lattnerf903fe12008-12-09 07:47:11 +0000940 // If the block has a dependency (i.e. it isn't completely transparent to
941 // the value), remember the reverse association because we just added it
942 // to Cache!
Eli Friedmanc1702c82011-10-13 22:14:57 +0000943 if (!Dep.isDef() && !Dep.isClobber())
Chris Lattnerf903fe12008-12-09 07:47:11 +0000944 return Dep;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000945
Chris Lattnerf903fe12008-12-09 07:47:11 +0000946 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
947 // update MemDep when we remove instructions.
948 Instruction *Inst = Dep.getInst();
949 assert(Inst && "Didn't depend on anything?");
Dan Gohman23483932010-09-22 21:41:02 +0000950 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
Chris Lattner8eda11b2009-03-29 00:24:04 +0000951 ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
Chris Lattnerf903fe12008-12-09 07:47:11 +0000952 return Dep;
953}
954
Robin Morisset039781e2014-08-29 21:53:01 +0000955/// SortNonLocalDepInfoCache - Sort the NonLocalDepInfo cache, given a certain
Chris Lattner370aada2009-07-13 17:20:05 +0000956/// number of elements in the array that are already properly ordered. This is
957/// optimized for the case when only a few entries are added.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000958static void
Chris Lattner370aada2009-07-13 17:20:05 +0000959SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
960 unsigned NumSortedEntries) {
961 switch (Cache.size() - NumSortedEntries) {
962 case 0:
963 // done, no new entries.
964 break;
965 case 2: {
966 // Two new entries, insert the last one into place.
Chris Lattner0c315472009-12-09 07:08:01 +0000967 NonLocalDepEntry Val = Cache.back();
Chris Lattner370aada2009-07-13 17:20:05 +0000968 Cache.pop_back();
969 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
970 std::upper_bound(Cache.begin(), Cache.end()-1, Val);
971 Cache.insert(Entry, Val);
972 // FALL THROUGH.
973 }
974 case 1:
975 // One new entry, Just insert the new value at the appropriate position.
976 if (Cache.size() != 1) {
Chris Lattner0c315472009-12-09 07:08:01 +0000977 NonLocalDepEntry Val = Cache.back();
Chris Lattner370aada2009-07-13 17:20:05 +0000978 Cache.pop_back();
979 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
980 std::upper_bound(Cache.begin(), Cache.end(), Val);
981 Cache.insert(Entry, Val);
982 }
983 break;
984 default:
985 // Added many values, do a full scale sort.
986 std::sort(Cache.begin(), Cache.end());
987 break;
988 }
989}
990
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000991/// getNonLocalPointerDepFromBB - Perform a dependency query based on
992/// pointer/pointeesize starting at the end of StartBB. Add any clobber/def
993/// results to the results vector and keep track of which blocks are visited in
994/// 'Visited'.
995///
996/// This has special behavior for the first block queries (when SkipFirstBlock
997/// is true). In this special case, it ignores the contents of the specified
998/// block and starts returning dependence info for its predecessors.
999///
1000/// This function returns false on success, or true to indicate that it could
1001/// not compute dependence information for some reason. This should be treated
1002/// as a clobber dependence on the first instruction in the predecessor block.
1003bool MemoryDependenceAnalysis::
Dan Gohman23483932010-09-22 21:41:02 +00001004getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
1005 const AliasAnalysis::Location &Loc,
Chris Lattnerf903fe12008-12-09 07:47:11 +00001006 bool isLoad, BasicBlock *StartBB,
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001007 SmallVectorImpl<NonLocalDepResult> &Result,
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001008 DenseMap<BasicBlock*, Value*> &Visited,
1009 bool SkipFirstBlock) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001010 // Look up the cached info for Pointer.
Chris Lattner972e6d82009-12-09 01:59:31 +00001011 ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
Dan Gohman23483932010-09-22 21:41:02 +00001012
Dan Gohman0a6021a2010-11-10 20:37:15 +00001013 // Set up a temporary NLPI value. If the map doesn't yet have an entry for
1014 // CacheKey, this value will be inserted as the associated value. Otherwise,
1015 // it'll be ignored, and we'll have to check to see if the cached size and
Hal Finkelcc39b672014-07-24 12:16:19 +00001016 // aa tags are consistent with the current query.
Dan Gohman0a6021a2010-11-10 20:37:15 +00001017 NonLocalPointerInfo InitialNLPI;
1018 InitialNLPI.Size = Loc.Size;
Hal Finkelcc39b672014-07-24 12:16:19 +00001019 InitialNLPI.AATags = Loc.AATags;
Dan Gohman0a6021a2010-11-10 20:37:15 +00001020
1021 // Get the NLPI for CacheKey, inserting one into the map if it doesn't
1022 // already have one.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001023 std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
Dan Gohman0a6021a2010-11-10 20:37:15 +00001024 NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
1025 NonLocalPointerInfo *CacheInfo = &Pair.first->second;
1026
Dan Gohman2e8ca442010-11-10 21:45:11 +00001027 // If we already have a cache entry for this CacheKey, we may need to do some
1028 // work to reconcile the cache entry and the current query.
Dan Gohman0a6021a2010-11-10 20:37:15 +00001029 if (!Pair.second) {
Dan Gohman2e8ca442010-11-10 21:45:11 +00001030 if (CacheInfo->Size < Loc.Size) {
1031 // The query's Size is greater than the cached one. Throw out the
Benjamin Kramerbde91762012-06-02 10:20:22 +00001032 // cached data and proceed with the query at the greater size.
Dan Gohman2e8ca442010-11-10 21:45:11 +00001033 CacheInfo->Pair = BBSkipFirstBlockPair();
1034 CacheInfo->Size = Loc.Size;
Dan Gohman67919362010-11-10 22:35:02 +00001035 for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
1036 DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
1037 if (Instruction *Inst = DI->getResult().getInst())
1038 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
Dan Gohman2e8ca442010-11-10 21:45:11 +00001039 CacheInfo->NonLocalDeps.clear();
1040 } else if (CacheInfo->Size > Loc.Size) {
1041 // This query's Size is less than the cached one. Conservatively restart
1042 // the query using the greater size.
Dan Gohman0a6021a2010-11-10 20:37:15 +00001043 return getNonLocalPointerDepFromBB(Pointer,
1044 Loc.getWithNewSize(CacheInfo->Size),
1045 isLoad, StartBB, Result, Visited,
1046 SkipFirstBlock);
1047 }
1048
Hal Finkelcc39b672014-07-24 12:16:19 +00001049 // If the query's AATags are inconsistent with the cached one,
Dan Gohman2e8ca442010-11-10 21:45:11 +00001050 // conservatively throw out the cached data and restart the query with
1051 // no tag if needed.
Hal Finkelcc39b672014-07-24 12:16:19 +00001052 if (CacheInfo->AATags != Loc.AATags) {
1053 if (CacheInfo->AATags) {
Dan Gohman2e8ca442010-11-10 21:45:11 +00001054 CacheInfo->Pair = BBSkipFirstBlockPair();
Hal Finkelcc39b672014-07-24 12:16:19 +00001055 CacheInfo->AATags = AAMDNodes();
Dan Gohman67919362010-11-10 22:35:02 +00001056 for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
1057 DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
1058 if (Instruction *Inst = DI->getResult().getInst())
1059 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
Dan Gohman2e8ca442010-11-10 21:45:11 +00001060 CacheInfo->NonLocalDeps.clear();
1061 }
Hal Finkelcc39b672014-07-24 12:16:19 +00001062 if (Loc.AATags)
1063 return getNonLocalPointerDepFromBB(Pointer, Loc.getWithoutAATags(),
Dan Gohman2e8ca442010-11-10 21:45:11 +00001064 isLoad, StartBB, Result, Visited,
1065 SkipFirstBlock);
Dan Gohman0a6021a2010-11-10 20:37:15 +00001066 }
Dan Gohman23483932010-09-22 21:41:02 +00001067 }
1068
1069 NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
Chris Lattner5ed409e2008-12-08 07:31:50 +00001070
1071 // If we have valid cached information for exactly the block we are
1072 // investigating, just return it with no recomputation.
Dan Gohman23483932010-09-22 21:41:02 +00001073 if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
Chris Lattner8b4be372008-12-16 07:10:09 +00001074 // We have a fully cached result for this query then we can just return the
1075 // cached results and populate the visited set. However, we have to verify
1076 // that we don't already have conflicting results for these blocks. Check
1077 // to ensure that if a block in the results set is in the visited set that
1078 // it was for the same pointer query.
1079 if (!Visited.empty()) {
1080 for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
1081 I != E; ++I) {
Chris Lattner0c315472009-12-09 07:08:01 +00001082 DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->getBB());
Chris Lattner972e6d82009-12-09 01:59:31 +00001083 if (VI == Visited.end() || VI->second == Pointer.getAddr())
1084 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001085
Chris Lattner8b4be372008-12-16 07:10:09 +00001086 // We have a pointer mismatch in a block. Just return clobber, saying
1087 // that something was clobbered in this result. We could also do a
1088 // non-fully cached query, but there is little point in doing this.
1089 return true;
1090 }
1091 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001092
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001093 Value *Addr = Pointer.getAddr();
Chris Lattner5ed409e2008-12-08 07:31:50 +00001094 for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
Chris Lattner8b4be372008-12-16 07:10:09 +00001095 I != E; ++I) {
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001096 Visited.insert(std::make_pair(I->getBB(), Addr));
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001097 if (I->getResult().isNonLocal()) {
1098 continue;
1099 }
1100
1101 if (!DT) {
1102 Result.push_back(NonLocalDepResult(I->getBB(),
1103 MemDepResult::getUnknown(),
1104 Addr));
1105 } else if (DT->isReachableFromEntry(I->getBB())) {
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001106 Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr));
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001107 }
Chris Lattner8b4be372008-12-16 07:10:09 +00001108 }
Chris Lattner5ed409e2008-12-08 07:31:50 +00001109 ++NumCacheCompleteNonLocalPtr;
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001110 return false;
Chris Lattner5ed409e2008-12-08 07:31:50 +00001111 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001112
Chris Lattner5ed409e2008-12-08 07:31:50 +00001113 // Otherwise, either this is a new block, a block with an invalid cache
1114 // pointer or one that we're about to invalidate by putting more info into it
1115 // than its valid cache info. If empty, the result will be valid cache info,
1116 // otherwise it isn't.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001117 if (Cache->empty())
Dan Gohman23483932010-09-22 21:41:02 +00001118 CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
Dan Gohmanc87c8432010-11-11 00:42:22 +00001119 else
Dan Gohman23483932010-09-22 21:41:02 +00001120 CacheInfo->Pair = BBSkipFirstBlockPair();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001121
Chris Lattner5ed409e2008-12-08 07:31:50 +00001122 SmallVector<BasicBlock*, 32> Worklist;
1123 Worklist.push_back(StartBB);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001124
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001125 // PredList used inside loop.
1126 SmallVector<std::pair<BasicBlock*, PHITransAddr>, 16> PredList;
1127
Chris Lattnera28355d2008-12-07 08:50:20 +00001128 // Keep track of the entries that we know are sorted. Previously cached
1129 // entries will all be sorted. The entries we add we only sort on demand (we
1130 // don't insert every element into its sorted position). We know that we
1131 // won't get any reuse from currently inserted values, because we don't
1132 // revisit blocks after we insert info for them.
1133 unsigned NumSortedEntries = Cache->size();
Chris Lattnerf09619d2009-01-22 07:04:01 +00001134 DEBUG(AssertSorted(*Cache));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001135
Chris Lattner2faa2c72008-12-07 02:15:47 +00001136 while (!Worklist.empty()) {
Chris Lattner7564a3b2008-12-07 02:56:57 +00001137 BasicBlock *BB = Worklist.pop_back_val();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001138
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +00001139 // If we do process a large number of blocks it becomes very expensive and
1140 // likely it isn't worth worrying about
1141 if (Result.size() > NumResultsLimit) {
1142 Worklist.clear();
1143 // Sort it now (if needed) so that recursive invocations of
1144 // getNonLocalPointerDepFromBB and other routines that could reuse the
1145 // cache value will only see properly sorted cache arrays.
1146 if (Cache && NumSortedEntries != Cache->size()) {
1147 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
1148 NumSortedEntries = Cache->size();
1149 }
1150 // Since we bail out, the "Cache" set won't contain all of the
1151 // results for the query. This is ok (we can still use it to accelerate
1152 // specific block queries) but we can't do the fastpath "return all
1153 // results from the set". Clear out the indicator for this.
1154 CacheInfo->Pair = BBSkipFirstBlockPair();
1155 return true;
1156 }
1157
Chris Lattner75510d82008-12-09 07:52:59 +00001158 // Skip the first block if we have it.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001159 if (!SkipFirstBlock) {
Chris Lattner75510d82008-12-09 07:52:59 +00001160 // Analyze the dependency of *Pointer in FromBB. See if we already have
1161 // been here.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001162 assert(Visited.count(BB) && "Should check 'visited' before adding to WL");
Chris Lattnera28355d2008-12-07 08:50:20 +00001163
Chris Lattner75510d82008-12-09 07:52:59 +00001164 // Get the dependency info for Pointer in BB. If we have cached
1165 // information, we will use it, otherwise we compute it.
Chris Lattnerf09619d2009-01-22 07:04:01 +00001166 DEBUG(AssertSorted(*Cache, NumSortedEntries));
Dan Gohman23483932010-09-22 21:41:02 +00001167 MemDepResult Dep = GetNonLocalInfoForBlock(Loc, isLoad, BB, Cache,
Chris Lattner972e6d82009-12-09 01:59:31 +00001168 NumSortedEntries);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001169
Chris Lattner75510d82008-12-09 07:52:59 +00001170 // If we got a Def or Clobber, add this to the list of results.
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001171 if (!Dep.isNonLocal()) {
1172 if (!DT) {
1173 Result.push_back(NonLocalDepResult(BB,
1174 MemDepResult::getUnknown(),
1175 Pointer.getAddr()));
1176 continue;
1177 } else if (DT->isReachableFromEntry(BB)) {
1178 Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
1179 continue;
1180 }
Chris Lattner75510d82008-12-09 07:52:59 +00001181 }
Chris Lattner2faa2c72008-12-07 02:15:47 +00001182 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001183
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001184 // If 'Pointer' is an instruction defined in this block, then we need to do
1185 // phi translation to change it into a value live in the predecessor block.
Chris Lattner972e6d82009-12-09 01:59:31 +00001186 // If not, we just add the predecessors to the worklist and scan them with
1187 // the same Pointer.
1188 if (!Pointer.NeedsPHITranslationFromBlock(BB)) {
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001189 SkipFirstBlock = false;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001190 SmallVector<BasicBlock*, 16> NewBlocks;
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001191 for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
1192 // Verify that we haven't looked at this block yet.
1193 std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
Chris Lattner972e6d82009-12-09 01:59:31 +00001194 InsertRes = Visited.insert(std::make_pair(*PI, Pointer.getAddr()));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001195 if (InsertRes.second) {
1196 // First time we've looked at *PI.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001197 NewBlocks.push_back(*PI);
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001198 continue;
1199 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001200
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001201 // If we have seen this block before, but it was with a different
1202 // pointer then we have a phi translation failure and we have to treat
1203 // this as a clobber.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001204 if (InsertRes.first->second != Pointer.getAddr()) {
1205 // Make sure to clean up the Visited map before continuing on to
1206 // PredTranslationFailure.
1207 for (unsigned i = 0; i < NewBlocks.size(); i++)
1208 Visited.erase(NewBlocks[i]);
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001209 goto PredTranslationFailure;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001210 }
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001211 }
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001212 Worklist.append(NewBlocks.begin(), NewBlocks.end());
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001213 continue;
1214 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001215
Chris Lattner972e6d82009-12-09 01:59:31 +00001216 // We do need to do phi translation, if we know ahead of time we can't phi
1217 // translate this value, don't even try.
1218 if (!Pointer.IsPotentiallyPHITranslatable())
1219 goto PredTranslationFailure;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001220
Chris Lattner2f0c1c42009-07-13 17:14:23 +00001221 // We may have added values to the cache list before this PHI translation.
1222 // If so, we haven't done anything to ensure that the cache remains sorted.
1223 // Sort it now (if needed) so that recursive invocations of
1224 // getNonLocalPointerDepFromBB and other routines that could reuse the cache
1225 // value will only see properly sorted cache arrays.
1226 if (Cache && NumSortedEntries != Cache->size()) {
Chris Lattner370aada2009-07-13 17:20:05 +00001227 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
Chris Lattner2f0c1c42009-07-13 17:14:23 +00001228 NumSortedEntries = Cache->size();
1229 }
Craig Topper9f008862014-04-15 04:59:12 +00001230 Cache = nullptr;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001231
1232 PredList.clear();
Chris Lattnerac323292009-11-27 08:37:22 +00001233 for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
1234 BasicBlock *Pred = *PI;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001235 PredList.push_back(std::make_pair(Pred, Pointer));
1236
Chris Lattner972e6d82009-12-09 01:59:31 +00001237 // Get the PHI translated pointer in this predecessor. This can fail if
1238 // not translatable, in which case the getAddr() returns null.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001239 PHITransAddr &PredPointer = PredList.back().second;
Craig Topper9f008862014-04-15 04:59:12 +00001240 PredPointer.PHITranslateValue(BB, Pred, nullptr);
Chris Lattner972e6d82009-12-09 01:59:31 +00001241
1242 Value *PredPtrVal = PredPointer.getAddr();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001243
Chris Lattnerac323292009-11-27 08:37:22 +00001244 // Check to see if we have already visited this pred block with another
1245 // pointer. If so, we can't do this lookup. This failure can occur
1246 // with PHI translation when a critical edge exists and the PHI node in
1247 // the successor translates to a pointer value different than the
1248 // pointer the block was first analyzed with.
1249 std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
Chris Lattner972e6d82009-12-09 01:59:31 +00001250 InsertRes = Visited.insert(std::make_pair(Pred, PredPtrVal));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001251
Chris Lattnerac323292009-11-27 08:37:22 +00001252 if (!InsertRes.second) {
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001253 // We found the pred; take it off the list of preds to visit.
1254 PredList.pop_back();
1255
Chris Lattnerac323292009-11-27 08:37:22 +00001256 // If the predecessor was visited with PredPtr, then we already did
1257 // the analysis and can ignore it.
Chris Lattner972e6d82009-12-09 01:59:31 +00001258 if (InsertRes.first->second == PredPtrVal)
Chris Lattnerac323292009-11-27 08:37:22 +00001259 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001260
Chris Lattnerac323292009-11-27 08:37:22 +00001261 // Otherwise, the block was previously analyzed with a different
1262 // pointer. We can't represent the result of this case, so we just
1263 // treat this as a phi translation failure.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001264
1265 // Make sure to clean up the Visited map before continuing on to
1266 // PredTranslationFailure.
Matt Arsenault2080ecd2013-03-29 18:48:42 +00001267 for (unsigned i = 0, n = PredList.size(); i < n; ++i)
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001268 Visited.erase(PredList[i].first);
1269
Chris Lattnerac323292009-11-27 08:37:22 +00001270 goto PredTranslationFailure;
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001271 }
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001272 }
1273
1274 // Actually process results here; this need to be a separate loop to avoid
1275 // calling getNonLocalPointerDepFromBB for blocks we don't want to return
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001276 // any results for. (getNonLocalPointerDepFromBB will modify our
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001277 // datastructures in ways the code after the PredTranslationFailure label
1278 // doesn't expect.)
Matt Arsenault2080ecd2013-03-29 18:48:42 +00001279 for (unsigned i = 0, n = PredList.size(); i < n; ++i) {
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001280 BasicBlock *Pred = PredList[i].first;
1281 PHITransAddr &PredPointer = PredList[i].second;
1282 Value *PredPtrVal = PredPointer.getAddr();
1283
1284 bool CanTranslate = true;
Chris Lattner2be52e72009-11-27 22:05:15 +00001285 // If PHI translation was unable to find an available pointer in this
1286 // predecessor, then we have to assume that the pointer is clobbered in
1287 // that predecessor. We can still do PRE of the load, which would insert
1288 // a computation of the pointer in this predecessor.
Craig Topper9f008862014-04-15 04:59:12 +00001289 if (!PredPtrVal)
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001290 CanTranslate = false;
1291
1292 // FIXME: it is entirely possible that PHI translating will end up with
1293 // the same value. Consider PHI translating something like:
1294 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need*
1295 // to recurse here, pedantically speaking.
1296
1297 // If getNonLocalPointerDepFromBB fails here, that means the cached
1298 // result conflicted with the Visited list; we have to conservatively
Eli Friedman7d58bc72011-06-15 00:47:34 +00001299 // assume it is unknown, but this also does not block PRE of the load.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001300 if (!CanTranslate ||
1301 getNonLocalPointerDepFromBB(PredPointer,
1302 Loc.getWithNewPtr(PredPtrVal),
1303 isLoad, Pred,
1304 Result, Visited)) {
Chris Lattner9c2053b2009-12-01 07:33:32 +00001305 // Add the entry to the Result list.
Eli Friedman7d58bc72011-06-15 00:47:34 +00001306 NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal);
Chris Lattner9c2053b2009-12-01 07:33:32 +00001307 Result.push_back(Entry);
1308
Chris Lattner25bf6f82009-12-19 21:29:22 +00001309 // Since we had a phi translation failure, the cache for CacheKey won't
1310 // include all of the entries that we need to immediately satisfy future
1311 // queries. Mark this in NonLocalPointerDeps by setting the
1312 // BBSkipFirstBlockPair pointer to null. This requires reuse of the
1313 // cached value to do more work but not miss the phi trans failure.
Dan Gohman23483932010-09-22 21:41:02 +00001314 NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
1315 NLPI.Pair = BBSkipFirstBlockPair();
Chris Lattner2be52e72009-11-27 22:05:15 +00001316 continue;
Chris Lattner2be52e72009-11-27 22:05:15 +00001317 }
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001318 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001319
Chris Lattnerac323292009-11-27 08:37:22 +00001320 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
1321 CacheInfo = &NonLocalPointerDeps[CacheKey];
Dan Gohman23483932010-09-22 21:41:02 +00001322 Cache = &CacheInfo->NonLocalDeps;
Chris Lattnerac323292009-11-27 08:37:22 +00001323 NumSortedEntries = Cache->size();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001324
Chris Lattnerac323292009-11-27 08:37:22 +00001325 // Since we did phi translation, the "Cache" set won't contain all of the
1326 // results for the query. This is ok (we can still use it to accelerate
1327 // specific block queries) but we can't do the fastpath "return all
1328 // results from the set" Clear out the indicator for this.
Dan Gohman23483932010-09-22 21:41:02 +00001329 CacheInfo->Pair = BBSkipFirstBlockPair();
Chris Lattnerac323292009-11-27 08:37:22 +00001330 SkipFirstBlock = false;
1331 continue;
Chris Lattnerc49f5ac2009-11-26 23:18:49 +00001332
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001333 PredTranslationFailure:
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001334 // The following code is "failure"; we can't produce a sane translation
1335 // for the given block. It assumes that we haven't modified any of
1336 // our datastructures while processing the current block.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001337
Craig Topper9f008862014-04-15 04:59:12 +00001338 if (!Cache) {
Chris Lattner3f4591c2009-01-23 07:12:16 +00001339 // Refresh the CacheInfo/Cache pointer if it got invalidated.
1340 CacheInfo = &NonLocalPointerDeps[CacheKey];
Dan Gohman23483932010-09-22 21:41:02 +00001341 Cache = &CacheInfo->NonLocalDeps;
Chris Lattner3f4591c2009-01-23 07:12:16 +00001342 NumSortedEntries = Cache->size();
Chris Lattner3f4591c2009-01-23 07:12:16 +00001343 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001344
Chris Lattner25bf6f82009-12-19 21:29:22 +00001345 // Since we failed phi translation, the "Cache" set won't contain all of the
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001346 // results for the query. This is ok (we can still use it to accelerate
1347 // specific block queries) but we can't do the fastpath "return all
Chris Lattner25bf6f82009-12-19 21:29:22 +00001348 // results from the set". Clear out the indicator for this.
Dan Gohman23483932010-09-22 21:41:02 +00001349 CacheInfo->Pair = BBSkipFirstBlockPair();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001350
Eli Friedman7d58bc72011-06-15 00:47:34 +00001351 // If *nothing* works, mark the pointer as unknown.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001352 //
1353 // If this is the magic first block, return this as a clobber of the whole
1354 // incoming value. Since we can't phi translate to one of the predecessors,
1355 // we have to bail out.
1356 if (SkipFirstBlock)
1357 return true;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001358
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001359 for (NonLocalDepInfo::reverse_iterator I = Cache->rbegin(); ; ++I) {
1360 assert(I != Cache->rend() && "Didn't find current block??");
Chris Lattner0c315472009-12-09 07:08:01 +00001361 if (I->getBB() != BB)
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001362 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001363
Chris Lattner0c315472009-12-09 07:08:01 +00001364 assert(I->getResult().isNonLocal() &&
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001365 "Should only be here with transparent block");
Eli Friedman7d58bc72011-06-15 00:47:34 +00001366 I->setResult(MemDepResult::getUnknown());
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001367 Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(),
1368 Pointer.getAddr()));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001369 break;
Chris Lattner7564a3b2008-12-07 02:56:57 +00001370 }
Chris Lattner2faa2c72008-12-07 02:15:47 +00001371 }
Chris Lattner3f4591c2009-01-23 07:12:16 +00001372
Chris Lattnerf903fe12008-12-09 07:47:11 +00001373 // Okay, we're done now. If we added new values to the cache, re-sort it.
Chris Lattner370aada2009-07-13 17:20:05 +00001374 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
Chris Lattnerf09619d2009-01-22 07:04:01 +00001375 DEBUG(AssertSorted(*Cache));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001376 return false;
Chris Lattnera28355d2008-12-07 08:50:20 +00001377}
1378
1379/// RemoveCachedNonLocalPointerDependencies - If P exists in
1380/// CachedNonLocalPointerInfo, remove it.
1381void MemoryDependenceAnalysis::
1382RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) {
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001383 CachedNonLocalPointerInfo::iterator It =
Chris Lattnera28355d2008-12-07 08:50:20 +00001384 NonLocalPointerDeps.find(P);
1385 if (It == NonLocalPointerDeps.end()) return;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001386
Chris Lattnera28355d2008-12-07 08:50:20 +00001387 // Remove all of the entries in the BB->val map. This involves removing
1388 // instructions from the reverse map.
Dan Gohman23483932010-09-22 21:41:02 +00001389 NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001390
Chris Lattnera28355d2008-12-07 08:50:20 +00001391 for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
Chris Lattner0c315472009-12-09 07:08:01 +00001392 Instruction *Target = PInfo[i].getResult().getInst();
Craig Topper9f008862014-04-15 04:59:12 +00001393 if (!Target) continue; // Ignore non-local dep results.
Chris Lattner0c315472009-12-09 07:08:01 +00001394 assert(Target->getParent() == PInfo[i].getBB());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001395
Chris Lattnera28355d2008-12-07 08:50:20 +00001396 // Eliminating the dirty entry from 'Cache', so update the reverse info.
Chris Lattner8eda11b2009-03-29 00:24:04 +00001397 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P);
Chris Lattnera28355d2008-12-07 08:50:20 +00001398 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001399
Chris Lattnera28355d2008-12-07 08:50:20 +00001400 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
1401 NonLocalPointerDeps.erase(It);
Chris Lattner2faa2c72008-12-07 02:15:47 +00001402}
1403
1404
Chris Lattnerfa9f99a2008-12-09 22:06:23 +00001405/// invalidateCachedPointerInfo - This method is used to invalidate cached
1406/// information about the specified pointer, because it may be too
1407/// conservative in memdep. This is an optional call that can be used when
1408/// the client detects an equivalence between the pointer and some other
1409/// value and replaces the other value with ptr. This can make Ptr available
1410/// in more places that cached info does not necessarily keep.
1411void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) {
1412 // If Ptr isn't really a pointer, just ignore it.
Duncan Sands19d0b472010-02-16 11:11:14 +00001413 if (!Ptr->getType()->isPointerTy()) return;
Chris Lattnerfa9f99a2008-12-09 22:06:23 +00001414 // Flush store info for the pointer.
1415 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
1416 // Flush load info for the pointer.
1417 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
1418}
1419
Bob Wilson92cdb6e2010-02-16 19:51:59 +00001420/// invalidateCachedPredecessors - Clear the PredIteratorCache info.
1421/// This needs to be done when the CFG changes, e.g., due to splitting
1422/// critical edges.
1423void MemoryDependenceAnalysis::invalidateCachedPredecessors() {
1424 PredCache->clear();
1425}
1426
Owen Andersonc0daf5f2007-07-06 23:14:35 +00001427/// removeInstruction - Remove an instruction from the dependence analysis,
1428/// updating the dependence of instructions that previously depended on it.
Owen Anderson2b21c3c2007-08-08 22:26:03 +00001429/// This method attempts to keep the cache coherent using the reverse map.
Chris Lattnera25d39522008-11-28 22:04:47 +00001430void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) {
Chris Lattnera25d39522008-11-28 22:04:47 +00001431 // Walk through the Non-local dependencies, removing this one as the value
1432 // for any cached queries.
Chris Lattner1b810bd2008-11-30 02:28:25 +00001433 NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
1434 if (NLDI != NonLocalDeps.end()) {
Chris Lattner7e61daf2008-12-01 01:15:42 +00001435 NonLocalDepInfo &BlockMap = NLDI->second.first;
Chris Lattnerfc678e22008-11-30 02:30:50 +00001436 for (NonLocalDepInfo::iterator DI = BlockMap.begin(), DE = BlockMap.end();
1437 DI != DE; ++DI)
Chris Lattner0c315472009-12-09 07:08:01 +00001438 if (Instruction *Inst = DI->getResult().getInst())
Chris Lattnerde4440c2008-12-07 18:39:13 +00001439 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
Chris Lattner1b810bd2008-11-30 02:28:25 +00001440 NonLocalDeps.erase(NLDI);
1441 }
Owen Anderson086b2c42007-12-08 01:37:09 +00001442
Chris Lattnera25d39522008-11-28 22:04:47 +00001443 // If we have a cached local dependence query for this instruction, remove it.
Chris Lattner73c25452008-11-28 22:28:27 +00001444 //
Chris Lattnerde04e112008-11-29 01:43:36 +00001445 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
1446 if (LocalDepEntry != LocalDeps.end()) {
Chris Lattnerada1f872008-11-30 01:09:30 +00001447 // Remove us from DepInst's reverse set now that the local dep info is gone.
Chris Lattnerde4440c2008-12-07 18:39:13 +00001448 if (Instruction *Inst = LocalDepEntry->second.getInst())
1449 RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst);
Chris Lattnerada1f872008-11-30 01:09:30 +00001450
Chris Lattner73c25452008-11-28 22:28:27 +00001451 // Remove this local dependency info.
Chris Lattnerde04e112008-11-29 01:43:36 +00001452 LocalDeps.erase(LocalDepEntry);
Chris Lattnera28355d2008-12-07 08:50:20 +00001453 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001454
Chris Lattnera28355d2008-12-07 08:50:20 +00001455 // If we have any cached pointer dependencies on this instruction, remove
1456 // them. If the instruction has non-pointer type, then it can't be a pointer
1457 // base.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001458
Chris Lattnera28355d2008-12-07 08:50:20 +00001459 // Remove it from both the load info and the store info. The instruction
1460 // can't be in either of these maps if it is non-pointer.
Duncan Sands19d0b472010-02-16 11:11:14 +00001461 if (RemInst->getType()->isPointerTy()) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001462 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
1463 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
1464 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001465
Chris Lattnerd3d91112008-11-28 22:51:08 +00001466 // Loop over all of the things that depend on the instruction we're removing.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001467 //
Chris Lattner63bd5862008-11-29 23:30:39 +00001468 SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd;
Chris Lattner82b70342008-12-07 18:42:51 +00001469
1470 // If we find RemInst as a clobber or Def in any of the maps for other values,
1471 // we need to replace its entry with a dirty version of the instruction after
1472 // it. If RemInst is a terminator, we use a null dirty value.
1473 //
1474 // Using a dirty version of the instruction after RemInst saves having to scan
1475 // the entire block to get to this point.
1476 MemDepResult NewDirtyVal;
1477 if (!RemInst->isTerminator())
1478 NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001479
Chris Lattner9f1988ab2008-11-29 09:20:15 +00001480 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
1481 if (ReverseDepIt != ReverseLocalDeps.end()) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001482 // RemInst can't be the terminator if it has local stuff depending on it.
Craig Topper46276792014-08-24 23:23:06 +00001483 assert(!ReverseDepIt->second.empty() && !isa<TerminatorInst>(RemInst) &&
Chris Lattnerada1f872008-11-30 01:09:30 +00001484 "Nothing can locally depend on a terminator");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001485
Craig Topper46276792014-08-24 23:23:06 +00001486 for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) {
Chris Lattner1b810bd2008-11-30 02:28:25 +00001487 assert(InstDependingOnRemInst != RemInst &&
1488 "Already removed our local dep info");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001489
Chris Lattner82b70342008-12-07 18:42:51 +00001490 LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001491
Chris Lattnerada1f872008-11-30 01:09:30 +00001492 // Make sure to remember that new things depend on NewDepInst.
Chris Lattner82b70342008-12-07 18:42:51 +00001493 assert(NewDirtyVal.getInst() && "There is no way something else can have "
1494 "a local dep on this if it is a terminator!");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001495 ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(),
Chris Lattnerada1f872008-11-30 01:09:30 +00001496 InstDependingOnRemInst));
Chris Lattnerd3d91112008-11-28 22:51:08 +00001497 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001498
Chris Lattner63bd5862008-11-29 23:30:39 +00001499 ReverseLocalDeps.erase(ReverseDepIt);
1500
1501 // Add new reverse deps after scanning the set, to avoid invalidating the
1502 // 'ReverseDeps' reference.
1503 while (!ReverseDepsToAdd.empty()) {
1504 ReverseLocalDeps[ReverseDepsToAdd.back().first]
1505 .insert(ReverseDepsToAdd.back().second);
1506 ReverseDepsToAdd.pop_back();
1507 }
Owen Andersonc0daf5f2007-07-06 23:14:35 +00001508 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001509
Chris Lattner9f1988ab2008-11-29 09:20:15 +00001510 ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
1511 if (ReverseDepIt != ReverseNonLocalDeps.end()) {
Craig Topper46276792014-08-24 23:23:06 +00001512 for (Instruction *I : ReverseDepIt->second) {
1513 assert(I != RemInst && "Already removed NonLocalDep info for RemInst");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001514
Craig Topper46276792014-08-24 23:23:06 +00001515 PerInstNLInfo &INLD = NonLocalDeps[I];
Chris Lattner44104272008-11-30 02:52:26 +00001516 // The information is now dirty!
Chris Lattner7e61daf2008-12-01 01:15:42 +00001517 INLD.second = true;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001518
1519 for (NonLocalDepInfo::iterator DI = INLD.first.begin(),
Chris Lattner7e61daf2008-12-01 01:15:42 +00001520 DE = INLD.first.end(); DI != DE; ++DI) {
Chris Lattner0c315472009-12-09 07:08:01 +00001521 if (DI->getResult().getInst() != RemInst) continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001522
Chris Lattner1b810bd2008-11-30 02:28:25 +00001523 // Convert to a dirty entry for the subsequent instruction.
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001524 DI->setResult(NewDirtyVal);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001525
Chris Lattner82b70342008-12-07 18:42:51 +00001526 if (Instruction *NextI = NewDirtyVal.getInst())
Craig Topper46276792014-08-24 23:23:06 +00001527 ReverseDepsToAdd.push_back(std::make_pair(NextI, I));
Chris Lattner1b810bd2008-11-30 02:28:25 +00001528 }
1529 }
Chris Lattner63bd5862008-11-29 23:30:39 +00001530
1531 ReverseNonLocalDeps.erase(ReverseDepIt);
1532
Chris Lattnere7d7e132008-11-29 22:02:15 +00001533 // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
1534 while (!ReverseDepsToAdd.empty()) {
1535 ReverseNonLocalDeps[ReverseDepsToAdd.back().first]
1536 .insert(ReverseDepsToAdd.back().second);
1537 ReverseDepsToAdd.pop_back();
1538 }
Owen Anderson5f208be2007-08-16 21:27:05 +00001539 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001540
Chris Lattnera28355d2008-12-07 08:50:20 +00001541 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
1542 // value in the NonLocalPointerDeps info.
1543 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
1544 ReverseNonLocalPtrDeps.find(RemInst);
1545 if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001546 SmallVector<std::pair<Instruction*, ValueIsLoadPair>,8> ReversePtrDepsToAdd;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001547
Craig Topper46276792014-08-24 23:23:06 +00001548 for (ValueIsLoadPair P : ReversePtrDepIt->second) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001549 assert(P.getPointer() != RemInst &&
1550 "Already removed NonLocalPointerDeps info for RemInst");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001551
Dan Gohman23483932010-09-22 21:41:02 +00001552 NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001553
Chris Lattner5ed409e2008-12-08 07:31:50 +00001554 // The cache is not valid for any specific block anymore.
Dan Gohman23483932010-09-22 21:41:02 +00001555 NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001556
Chris Lattnera28355d2008-12-07 08:50:20 +00001557 // Update any entries for RemInst to use the instruction after it.
1558 for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end();
1559 DI != DE; ++DI) {
Chris Lattner0c315472009-12-09 07:08:01 +00001560 if (DI->getResult().getInst() != RemInst) continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001561
Chris Lattnera28355d2008-12-07 08:50:20 +00001562 // Convert to a dirty entry for the subsequent instruction.
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001563 DI->setResult(NewDirtyVal);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001564
Chris Lattnera28355d2008-12-07 08:50:20 +00001565 if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
1566 ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
1567 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001568
Chris Lattner3f4591c2009-01-23 07:12:16 +00001569 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its
1570 // subsequent value may invalidate the sortedness.
1571 std::sort(NLPDI.begin(), NLPDI.end());
Chris Lattnera28355d2008-12-07 08:50:20 +00001572 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001573
Chris Lattnera28355d2008-12-07 08:50:20 +00001574 ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001575
Chris Lattnera28355d2008-12-07 08:50:20 +00001576 while (!ReversePtrDepsToAdd.empty()) {
1577 ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first]
Chris Lattner8eda11b2009-03-29 00:24:04 +00001578 .insert(ReversePtrDepsToAdd.back().second);
Chris Lattnera28355d2008-12-07 08:50:20 +00001579 ReversePtrDepsToAdd.pop_back();
1580 }
1581 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001582
1583
Chris Lattner1b810bd2008-11-30 02:28:25 +00001584 assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
Chris Lattner13cae612008-11-30 19:24:31 +00001585 AA->deleteValue(RemInst);
Jakob Stoklund Olesen087f2072011-01-11 04:05:39 +00001586 DEBUG(verifyRemoved(RemInst));
Owen Andersonc0daf5f2007-07-06 23:14:35 +00001587}
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001588/// verifyRemoved - Verify that the specified instruction does not occur
Craig Topper46276792014-08-24 23:23:06 +00001589/// in our internal data structures. This function verifies by asserting in
1590/// debug builds.
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001591void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const {
Craig Topper46276792014-08-24 23:23:06 +00001592#ifndef NDEBUG
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001593 for (LocalDepMapType::const_iterator I = LocalDeps.begin(),
1594 E = LocalDeps.end(); I != E; ++I) {
1595 assert(I->first != D && "Inst occurs in data structures");
Chris Lattner47e81d02008-11-30 23:17:19 +00001596 assert(I->second.getInst() != D &&
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001597 "Inst occurs in data structures");
1598 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001599
Chris Lattnera28355d2008-12-07 08:50:20 +00001600 for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(),
1601 E = NonLocalPointerDeps.end(); I != E; ++I) {
1602 assert(I->first.getPointer() != D && "Inst occurs in NLPD map key");
Dan Gohman23483932010-09-22 21:41:02 +00001603 const NonLocalDepInfo &Val = I->second.NonLocalDeps;
Chris Lattnera28355d2008-12-07 08:50:20 +00001604 for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end();
1605 II != E; ++II)
Chris Lattner0c315472009-12-09 07:08:01 +00001606 assert(II->getResult().getInst() != D && "Inst occurs as NLPD value");
Chris Lattnera28355d2008-12-07 08:50:20 +00001607 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001608
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001609 for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(),
1610 E = NonLocalDeps.end(); I != E; ++I) {
1611 assert(I->first != D && "Inst occurs in data structures");
Chris Lattner44104272008-11-30 02:52:26 +00001612 const PerInstNLInfo &INLD = I->second;
Chris Lattner7e61daf2008-12-01 01:15:42 +00001613 for (NonLocalDepInfo::const_iterator II = INLD.first.begin(),
1614 EE = INLD.first.end(); II != EE; ++II)
Chris Lattner0c315472009-12-09 07:08:01 +00001615 assert(II->getResult().getInst() != D && "Inst occurs in data structures");
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001616 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001617
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001618 for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(),
Chris Lattner1b810bd2008-11-30 02:28:25 +00001619 E = ReverseLocalDeps.end(); I != E; ++I) {
1620 assert(I->first != D && "Inst occurs in data structures");
Craig Topper46276792014-08-24 23:23:06 +00001621 for (Instruction *Inst : I->second)
1622 assert(Inst != D && "Inst occurs in data structures");
Chris Lattner1b810bd2008-11-30 02:28:25 +00001623 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001624
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001625 for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(),
1626 E = ReverseNonLocalDeps.end();
Chris Lattner1b810bd2008-11-30 02:28:25 +00001627 I != E; ++I) {
1628 assert(I->first != D && "Inst occurs in data structures");
Craig Topper46276792014-08-24 23:23:06 +00001629 for (Instruction *Inst : I->second)
1630 assert(Inst != D && "Inst occurs in data structures");
Chris Lattner1b810bd2008-11-30 02:28:25 +00001631 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001632
Chris Lattnera28355d2008-12-07 08:50:20 +00001633 for (ReverseNonLocalPtrDepTy::const_iterator
1634 I = ReverseNonLocalPtrDeps.begin(),
1635 E = ReverseNonLocalPtrDeps.end(); I != E; ++I) {
1636 assert(I->first != D && "Inst occurs in rev NLPD map");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001637
Craig Topper46276792014-08-24 23:23:06 +00001638 for (ValueIsLoadPair P : I->second)
1639 assert(P != ValueIsLoadPair(D, false) &&
1640 P != ValueIsLoadPair(D, true) &&
Chris Lattnera28355d2008-12-07 08:50:20 +00001641 "Inst occurs in ReverseNonLocalPtrDeps map");
1642 }
Craig Topper46276792014-08-24 23:23:06 +00001643#endif
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001644}