blob: a7add7bfef3493c8f3229948b3f4e679ceb496e1 [file] [log] [blame]
Nick Lewycky7ed1dbf2013-06-10 23:10:59 +00001//===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===//
Owen Andersonc0daf5f2007-07-06 23:14:35 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattnerf3ebc3f2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Owen Andersonc0daf5f2007-07-06 23:14:35 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file implements an analysis that determines, for a given memory
Jakub Staszakb0a7eed2013-03-20 21:47:51 +000011// operation, what preceding memory operations it depends on. It builds on
Owen Andersonfa788352007-08-08 22:01:54 +000012// alias analysis information, and tries to provide a lazy, caching interface to
Owen Andersonc0daf5f2007-07-06 23:14:35 +000013// a common kind of alias information query.
14//
15//===----------------------------------------------------------------------===//
16
17#include "llvm/Analysis/MemoryDependenceAnalysis.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000018#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/Statistic.h"
Owen Andersonc0daf5f2007-07-06 23:14:35 +000020#include "llvm/Analysis/AliasAnalysis.h"
Chandler Carruth66b31302015-01-04 12:03:27 +000021#include "llvm/Analysis/AssumptionCache.h"
Chris Lattner5030c6a2009-11-27 00:34:38 +000022#include "llvm/Analysis/InstructionSimplify.h"
Victor Hernandezf390e042009-10-27 20:05:49 +000023#include "llvm/Analysis/MemoryBuiltins.h"
Chris Lattner972e6d82009-12-09 01:59:31 +000024#include "llvm/Analysis/PHITransAddr.h"
Dan Gohmana4fcd242010-12-15 20:02:24 +000025#include "llvm/Analysis/ValueTracking.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000026#include "llvm/IR/DataLayout.h"
Chandler Carruth5ad5f152014-01-13 09:26:24 +000027#include "llvm/IR/Dominators.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000028#include "llvm/IR/Function.h"
29#include "llvm/IR/Instructions.h"
30#include "llvm/IR/IntrinsicInst.h"
31#include "llvm/IR/LLVMContext.h"
Chandler Carruthaa0ab632014-03-04 12:09:19 +000032#include "llvm/IR/PredIteratorCache.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000033#include "llvm/Support/Debug.h"
Owen Andersonc0daf5f2007-07-06 23:14:35 +000034using namespace llvm;
35
Chandler Carruthf1221bd2014-04-22 02:48:03 +000036#define DEBUG_TYPE "memdep"
37
Chris Lattner7e61daf2008-12-01 01:15:42 +000038STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
39STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
Chris Lattnere7d7e132008-11-29 22:02:15 +000040STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
Chris Lattnera28355d2008-12-07 08:50:20 +000041
42STATISTIC(NumCacheNonLocalPtr,
43 "Number of fully cached non-local ptr responses");
44STATISTIC(NumCacheDirtyNonLocalPtr,
45 "Number of cached, but dirty, non-local ptr responses");
46STATISTIC(NumUncacheNonLocalPtr,
47 "Number of uncached non-local ptr responses");
Chris Lattner5ed409e2008-12-08 07:31:50 +000048STATISTIC(NumCacheCompleteNonLocalPtr,
49 "Number of block queries that were completely cached");
Chris Lattnera28355d2008-12-07 08:50:20 +000050
Eli Friedman8b098b02011-06-15 23:59:25 +000051// Limit for the number of instructions to scan in a block.
Aaron Ballman254dd7e2014-10-02 13:17:11 +000052static const unsigned int BlockScanLimit = 100;
Eli Friedman8b098b02011-06-15 23:59:25 +000053
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +000054// Limit on the number of memdep results to process.
Aaron Ballman254dd7e2014-10-02 13:17:11 +000055static const unsigned int NumResultsLimit = 100;
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +000056
Owen Andersonc0daf5f2007-07-06 23:14:35 +000057char MemoryDependenceAnalysis::ID = 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +000058
Owen Andersonc0daf5f2007-07-06 23:14:35 +000059// Register this pass...
Owen Anderson8ac477f2010-10-12 19:48:12 +000060INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep",
Owen Andersondf7a4f22010-10-07 22:25:06 +000061 "Memory Dependence Analysis", false, true)
Chandler Carruth66b31302015-01-04 12:03:27 +000062INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
Owen Anderson8ac477f2010-10-12 19:48:12 +000063INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
64INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep",
65 "Memory Dependence Analysis", false, true)
Owen Andersonc0daf5f2007-07-06 23:14:35 +000066
Chris Lattner768e5bc2008-12-09 06:28:49 +000067MemoryDependenceAnalysis::MemoryDependenceAnalysis()
Ahmed Charles56440fd2014-03-06 05:51:42 +000068 : FunctionPass(ID), PredCache() {
Owen Anderson6c18d1a2010-10-19 17:21:58 +000069 initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry());
Chris Lattner768e5bc2008-12-09 06:28:49 +000070}
71MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
72}
73
74/// Clean up memory in between runs
75void MemoryDependenceAnalysis::releaseMemory() {
76 LocalDeps.clear();
77 NonLocalDeps.clear();
78 NonLocalPointerDeps.clear();
79 ReverseLocalDeps.clear();
80 ReverseNonLocalDeps.clear();
81 ReverseNonLocalPtrDeps.clear();
82 PredCache->clear();
83}
84
Owen Andersonc0daf5f2007-07-06 23:14:35 +000085/// getAnalysisUsage - Does not modify anything. It uses Alias Analysis.
86///
87void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
88 AU.setPreservesAll();
Chandler Carruth66b31302015-01-04 12:03:27 +000089 AU.addRequired<AssumptionCacheTracker>();
Owen Andersonc0daf5f2007-07-06 23:14:35 +000090 AU.addRequiredTransitive<AliasAnalysis>();
Owen Andersonc0daf5f2007-07-06 23:14:35 +000091}
92
Chandler Carruth66b31302015-01-04 12:03:27 +000093bool MemoryDependenceAnalysis::runOnFunction(Function &F) {
Chris Lattner13cae612008-11-30 19:24:31 +000094 AA = &getAnalysis<AliasAnalysis>();
Chandler Carruth66b31302015-01-04 12:03:27 +000095 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
Chandler Carruth73523022014-01-13 13:07:17 +000096 DominatorTreeWrapperPass *DTWP =
97 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
Craig Topper9f008862014-04-15 04:59:12 +000098 DT = DTWP ? &DTWP->getDomTree() : nullptr;
David Blaikie041f1aa2013-05-15 07:36:59 +000099 if (!PredCache)
Chris Lattner768e5bc2008-12-09 06:28:49 +0000100 PredCache.reset(new PredIteratorCache());
Chris Lattner13cae612008-11-30 19:24:31 +0000101 return false;
102}
103
Chris Lattnerde4440c2008-12-07 18:39:13 +0000104/// RemoveFromReverseMap - This is a helper function that removes Val from
105/// 'Inst's set in ReverseMap. If the set becomes empty, remove Inst's entry.
106template <typename KeyTy>
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000107static void RemoveFromReverseMap(DenseMap<Instruction*,
Chris Lattner8eda11b2009-03-29 00:24:04 +0000108 SmallPtrSet<KeyTy, 4> > &ReverseMap,
109 Instruction *Inst, KeyTy Val) {
110 typename DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> >::iterator
Chris Lattnerde4440c2008-12-07 18:39:13 +0000111 InstIt = ReverseMap.find(Inst);
112 assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
113 bool Found = InstIt->second.erase(Val);
Jeffrey Yasskin9b43f332010-12-23 00:58:24 +0000114 assert(Found && "Invalid reverse map!"); (void)Found;
Chris Lattnerde4440c2008-12-07 18:39:13 +0000115 if (InstIt->second.empty())
116 ReverseMap.erase(InstIt);
117}
118
Dan Gohman1d760ce2010-11-10 21:51:35 +0000119/// GetLocation - If the given instruction references a specific memory
120/// location, fill in Loc with the details, otherwise set Loc.Ptr to null.
121/// Return a ModRefInfo value describing the general behavior of the
122/// instruction.
123static
124AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
125 AliasAnalysis::Location &Loc,
126 AliasAnalysis *AA) {
127 if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000128 if (LI->isUnordered()) {
129 Loc = AA->getLocation(LI);
130 return AliasAnalysis::Ref;
Jakub Staszakfa41def2013-03-20 23:53:45 +0000131 }
132 if (LI->getOrdering() == Monotonic) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000133 Loc = AA->getLocation(LI);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000134 return AliasAnalysis::ModRef;
135 }
Eli Friedman5494ada2011-08-15 20:54:19 +0000136 Loc = AliasAnalysis::Location();
137 return AliasAnalysis::ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000138 }
139
140 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000141 if (SI->isUnordered()) {
142 Loc = AA->getLocation(SI);
143 return AliasAnalysis::Mod;
Jakub Staszakfa41def2013-03-20 23:53:45 +0000144 }
145 if (SI->getOrdering() == Monotonic) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000146 Loc = AA->getLocation(SI);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000147 return AliasAnalysis::ModRef;
148 }
Eli Friedman5494ada2011-08-15 20:54:19 +0000149 Loc = AliasAnalysis::Location();
150 return AliasAnalysis::ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000151 }
152
153 if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
Dan Gohman65316d62010-11-11 21:50:19 +0000154 Loc = AA->getLocation(V);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000155 return AliasAnalysis::ModRef;
156 }
157
Benjamin Kramer8bcc9712012-08-29 15:32:21 +0000158 if (const CallInst *CI = isFreeCall(Inst, AA->getTargetLibraryInfo())) {
Dan Gohman1d760ce2010-11-10 21:51:35 +0000159 // calls to free() deallocate the entire structure
160 Loc = AliasAnalysis::Location(CI->getArgOperand(0));
161 return AliasAnalysis::Mod;
162 }
163
Hal Finkelcc39b672014-07-24 12:16:19 +0000164 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
165 AAMDNodes AAInfo;
166
Dan Gohman1d760ce2010-11-10 21:51:35 +0000167 switch (II->getIntrinsicID()) {
168 case Intrinsic::lifetime_start:
169 case Intrinsic::lifetime_end:
170 case Intrinsic::invariant_start:
Hal Finkelcc39b672014-07-24 12:16:19 +0000171 II->getAAMetadata(AAInfo);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000172 Loc = AliasAnalysis::Location(II->getArgOperand(1),
173 cast<ConstantInt>(II->getArgOperand(0))
Hal Finkelcc39b672014-07-24 12:16:19 +0000174 ->getZExtValue(), AAInfo);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000175 // These intrinsics don't really modify the memory, but returning Mod
176 // will allow them to be handled conservatively.
177 return AliasAnalysis::Mod;
178 case Intrinsic::invariant_end:
Hal Finkelcc39b672014-07-24 12:16:19 +0000179 II->getAAMetadata(AAInfo);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000180 Loc = AliasAnalysis::Location(II->getArgOperand(2),
181 cast<ConstantInt>(II->getArgOperand(1))
Hal Finkelcc39b672014-07-24 12:16:19 +0000182 ->getZExtValue(), AAInfo);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000183 // These intrinsics don't really modify the memory, but returning Mod
184 // will allow them to be handled conservatively.
185 return AliasAnalysis::Mod;
186 default:
187 break;
188 }
Hal Finkelcc39b672014-07-24 12:16:19 +0000189 }
Dan Gohman1d760ce2010-11-10 21:51:35 +0000190
191 // Otherwise, just do the coarse-grained thing that always works.
192 if (Inst->mayWriteToMemory())
193 return AliasAnalysis::ModRef;
194 if (Inst->mayReadFromMemory())
195 return AliasAnalysis::Ref;
196 return AliasAnalysis::NoModRef;
197}
Chris Lattner7e61daf2008-12-01 01:15:42 +0000198
Chris Lattner056c0902008-12-07 00:35:51 +0000199/// getCallSiteDependencyFrom - Private helper for finding the local
200/// dependencies of a call site.
Chris Lattner47e81d02008-11-30 23:17:19 +0000201MemDepResult MemoryDependenceAnalysis::
Chris Lattner702e46e2008-12-09 21:19:42 +0000202getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
203 BasicBlock::iterator ScanIt, BasicBlock *BB) {
Eli Friedman8b098b02011-06-15 23:59:25 +0000204 unsigned Limit = BlockScanLimit;
205
Owen Anderson2b21c3c2007-08-08 22:26:03 +0000206 // Walk backwards through the block, looking for dependencies
Chris Lattner51ba8d02008-11-29 03:47:00 +0000207 while (ScanIt != BB->begin()) {
Eli Friedman8b098b02011-06-15 23:59:25 +0000208 // Limit the amount of scanning we do so we don't end up with quadratic
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000209 // running time on extreme testcases.
Eli Friedman8b098b02011-06-15 23:59:25 +0000210 --Limit;
211 if (!Limit)
212 return MemDepResult::getUnknown();
213
Chris Lattner51ba8d02008-11-29 03:47:00 +0000214 Instruction *Inst = --ScanIt;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000215
Owen Anderson9c884572007-07-10 17:59:22 +0000216 // If this inst is a memory op, get the pointer it accessed
Dan Gohman23483932010-09-22 21:41:02 +0000217 AliasAnalysis::Location Loc;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000218 AliasAnalysis::ModRefResult MR = GetLocation(Inst, Loc, AA);
219 if (Loc.Ptr) {
220 // A simple instruction.
221 if (AA->getModRefInfo(CS, Loc) != AliasAnalysis::NoModRef)
222 return MemDepResult::getClobber(Inst);
223 continue;
224 }
225
Benjamin Kramer3a09ef62015-04-10 14:50:08 +0000226 if (auto InstCS = CallSite(Inst)) {
Owen Andersonf9a9cf92009-03-09 05:12:38 +0000227 // Debug intrinsics don't cause dependences.
Dale Johannesenf61c8e82009-03-11 21:13:01 +0000228 if (isa<DbgInfoIntrinsic>(Inst)) continue;
Chris Lattner0e3d6332008-12-05 21:04:20 +0000229 // If these two calls do not interfere, look past it.
Chris Lattner702e46e2008-12-09 21:19:42 +0000230 switch (AA->getModRefInfo(CS, InstCS)) {
231 case AliasAnalysis::NoModRef:
Dan Gohman26ef7c72010-08-05 22:09:15 +0000232 // If the two calls are the same, return InstCS as a Def, so that
233 // CS can be found redundant and eliminated.
Dan Gohman1d760ce2010-11-10 21:51:35 +0000234 if (isReadOnlyCall && !(MR & AliasAnalysis::Mod) &&
Dan Gohman26ef7c72010-08-05 22:09:15 +0000235 CS.getInstruction()->isIdenticalToWhenDefined(Inst))
236 return MemDepResult::getDef(Inst);
237
238 // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
239 // keep scanning.
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000240 continue;
Chris Lattner702e46e2008-12-09 21:19:42 +0000241 default:
Chris Lattner0e3d6332008-12-05 21:04:20 +0000242 return MemDepResult::getClobber(Inst);
Chris Lattner702e46e2008-12-09 21:19:42 +0000243 }
Chris Lattnerff862c42008-11-30 01:44:00 +0000244 }
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000245
246 // If we could not obtain a pointer for the instruction and the instruction
247 // touches memory then assume that this is a dependency.
248 if (MR != AliasAnalysis::NoModRef)
249 return MemDepResult::getClobber(Inst);
Owen Anderson9c884572007-07-10 17:59:22 +0000250 }
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000251
Eli Friedman7d58bc72011-06-15 00:47:34 +0000252 // No dependence found. If this is the entry block of the function, it is
253 // unknown, otherwise it is non-local.
Chris Lattner2faa2c72008-12-07 02:15:47 +0000254 if (BB != &BB->getParent()->getEntryBlock())
255 return MemDepResult::getNonLocal();
Eli Friedmanc1702c82011-10-13 22:14:57 +0000256 return MemDepResult::getNonFuncLocal();
Owen Anderson9c884572007-07-10 17:59:22 +0000257}
258
Chris Lattner7aab2792011-04-26 22:42:01 +0000259/// isLoadLoadClobberIfExtendedToFullWidth - Return true if LI is a load that
260/// would fully overlap MemLoc if done as a wider legal integer load.
261///
262/// MemLocBase, MemLocOffset are lazily computed here the first time the
263/// base/offs of memloc is needed.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000264static bool isLoadLoadClobberIfExtendedToFullWidth(
265 const AliasAnalysis::Location &MemLoc, const Value *&MemLocBase,
266 int64_t &MemLocOffs, const LoadInst *LI) {
267 const DataLayout &DL = LI->getModule()->getDataLayout();
Chris Lattner7aab2792011-04-26 22:42:01 +0000268
269 // If we haven't already computed the base/offset of MemLoc, do so now.
Craig Topper9f008862014-04-15 04:59:12 +0000270 if (!MemLocBase)
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000271 MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL);
Chris Lattner7aab2792011-04-26 22:42:01 +0000272
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000273 unsigned Size = MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize(
274 MemLocBase, MemLocOffs, MemLoc.Size, LI);
Chris Lattner827a2702011-04-28 07:29:08 +0000275 return Size != 0;
276}
277
278/// getLoadLoadClobberFullWidthSize - This is a little bit of analysis that
279/// looks at a memory location for a load (specified by MemLocBase, Offs,
280/// and Size) and compares it against a load. If the specified load could
281/// be safely widened to a larger integer load that is 1) still efficient,
282/// 2) safe for the target, and 3) would provide the specified memory
283/// location value, then this function returns the size in bytes of the
284/// load width to use. If not, this returns zero.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000285unsigned MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize(
286 const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize,
287 const LoadInst *LI) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000288 // We can only extend simple integer loads.
289 if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
Kostya Serebryany3838f272013-02-13 05:59:45 +0000290
291 // Load widening is hostile to ThreadSanitizer: it may cause false positives
292 // or make the reports more cryptic (access sizes are wrong).
Duncan P. N. Exon Smithb3fc83c2015-02-14 00:12:15 +0000293 if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
Kostya Serebryany3838f272013-02-13 05:59:45 +0000294 return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000295
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000296 const DataLayout &DL = LI->getModule()->getDataLayout();
297
Chris Lattner7aab2792011-04-26 22:42:01 +0000298 // Get the base of this load.
299 int64_t LIOffs = 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000300 const Value *LIBase =
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000301 GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, DL);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000302
Chris Lattner7aab2792011-04-26 22:42:01 +0000303 // If the two pointers are not based on the same pointer, we can't tell that
304 // they are related.
Chris Lattner827a2702011-04-28 07:29:08 +0000305 if (LIBase != MemLocBase) return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000306
Chris Lattner7aab2792011-04-26 22:42:01 +0000307 // Okay, the two values are based on the same pointer, but returned as
308 // no-alias. This happens when we have things like two byte loads at "P+1"
309 // and "P+3". Check to see if increasing the size of the "LI" load up to its
310 // alignment (or the largest native integer type) will allow us to load all
311 // the bits required by MemLoc.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000312
Chris Lattner7aab2792011-04-26 22:42:01 +0000313 // If MemLoc is before LI, then no widening of LI will help us out.
Chris Lattner827a2702011-04-28 07:29:08 +0000314 if (MemLocOffs < LIOffs) return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000315
Chris Lattner7aab2792011-04-26 22:42:01 +0000316 // Get the alignment of the load in bytes. We assume that it is safe to load
317 // any legal integer up to this size without a problem. For example, if we're
318 // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can
319 // widen it up to an i32 load. If it is known 2-byte aligned, we can widen it
320 // to i16.
321 unsigned LoadAlign = LI->getAlignment();
322
Chris Lattner827a2702011-04-28 07:29:08 +0000323 int64_t MemLocEnd = MemLocOffs+MemLocSize;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000324
Chris Lattner7aab2792011-04-26 22:42:01 +0000325 // If no amount of rounding up will let MemLoc fit into LI, then bail out.
Chris Lattner827a2702011-04-28 07:29:08 +0000326 if (LIOffs+LoadAlign < MemLocEnd) return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000327
Chris Lattner7aab2792011-04-26 22:42:01 +0000328 // This is the size of the load to try. Start with the next larger power of
329 // two.
330 unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits()/8U;
331 NewLoadByteSize = NextPowerOf2(NewLoadByteSize);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000332
Chris Lattner7aab2792011-04-26 22:42:01 +0000333 while (1) {
334 // If this load size is bigger than our known alignment or would not fit
335 // into a native integer register, then we fail.
336 if (NewLoadByteSize > LoadAlign ||
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000337 !DL.fitsInLegalInteger(NewLoadByteSize*8))
Chris Lattner827a2702011-04-28 07:29:08 +0000338 return 0;
Chris Lattner7aab2792011-04-26 22:42:01 +0000339
Duncan P. N. Exon Smithb3fc83c2015-02-14 00:12:15 +0000340 if (LIOffs + NewLoadByteSize > MemLocEnd &&
341 LI->getParent()->getParent()->hasFnAttribute(
342 Attribute::SanitizeAddress))
Kostya Serebryany9e0d3772012-02-06 22:48:56 +0000343 // We will be reading past the location accessed by the original program.
344 // While this is safe in a regular build, Address Safety analysis tools
345 // may start reporting false warnings. So, don't do widening.
346 return 0;
Kostya Serebryany9e0d3772012-02-06 22:48:56 +0000347
Chris Lattner7aab2792011-04-26 22:42:01 +0000348 // If a load of this width would include all of MemLoc, then we succeed.
349 if (LIOffs+NewLoadByteSize >= MemLocEnd)
Chris Lattner827a2702011-04-28 07:29:08 +0000350 return NewLoadByteSize;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000351
Chris Lattner7aab2792011-04-26 22:42:01 +0000352 NewLoadByteSize <<= 1;
353 }
Chris Lattner7aab2792011-04-26 22:42:01 +0000354}
355
Philip Reamesa7ad6a52015-01-26 18:54:27 +0000356static bool isVolatile(Instruction *Inst) {
357 if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
358 return LI->isVolatile();
359 else if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
360 return SI->isVolatile();
361 else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst))
362 return AI->isVolatile();
363 return false;
364}
365
366
Chris Lattner5a786042008-12-07 01:50:16 +0000367/// getPointerDependencyFrom - Return the instruction on which a memory
Dan Gohman15a43962010-10-29 01:14:04 +0000368/// location depends. If isLoad is true, this routine ignores may-aliases with
369/// read-only operations. If isLoad is false, this routine ignores may-aliases
Shuxin Yang408bdad2013-03-06 17:48:48 +0000370/// with reads from read-only locations. If possible, pass the query
371/// instruction as well; this function may take advantage of the metadata
372/// annotated to the query instruction to refine the result.
Chris Lattner47e81d02008-11-30 23:17:19 +0000373MemDepResult MemoryDependenceAnalysis::
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000374getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
Shuxin Yang408bdad2013-03-06 17:48:48 +0000375 BasicBlock::iterator ScanIt, BasicBlock *BB,
376 Instruction *QueryInst) {
Chris Lattner2faa2c72008-12-07 02:15:47 +0000377
Craig Topper9f008862014-04-15 04:59:12 +0000378 const Value *MemLocBase = nullptr;
Chris Lattner7aab2792011-04-26 22:42:01 +0000379 int64_t MemLocOffset = 0;
Eli Friedman8b098b02011-06-15 23:59:25 +0000380 unsigned Limit = BlockScanLimit;
Shuxin Yang408bdad2013-03-06 17:48:48 +0000381 bool isInvariantLoad = false;
Robin Morisset163ef042014-08-29 20:32:58 +0000382
383 // We must be careful with atomic accesses, as they may allow another thread
384 // to touch this location, cloberring it. We are conservative: if the
385 // QueryInst is not a simple (non-atomic) memory access, we automatically
386 // return getClobber.
387 // If it is simple, we know based on the results of
388 // "Compiler testing via a theory of sound optimisations in the C11/C++11
389 // memory model" in PLDI 2013, that a non-atomic location can only be
390 // clobbered between a pair of a release and an acquire action, with no
391 // access to the location in between.
392 // Here is an example for giving the general intuition behind this rule.
393 // In the following code:
394 // store x 0;
395 // release action; [1]
396 // acquire action; [4]
397 // %val = load x;
398 // It is unsafe to replace %val by 0 because another thread may be running:
399 // acquire action; [2]
400 // store x 42;
401 // release action; [3]
402 // with synchronization from 1 to 2 and from 3 to 4, resulting in %val
403 // being 42. A key property of this program however is that if either
404 // 1 or 4 were missing, there would be a race between the store of 42
405 // either the store of 0 or the load (making the whole progam racy).
406 // The paper mentionned above shows that the same property is respected
407 // by every program that can detect any optimisation of that kind: either
408 // it is racy (undefined) or there is a release followed by an acquire
409 // between the pair of accesses under consideration.
Robin Morisset163ef042014-08-29 20:32:58 +0000410
Philip Reames4dbd88f2015-03-24 23:54:54 +0000411 // If the load is invariant, we "know" that it doesn't alias *any* write. We
412 // do want to respect mustalias results since defs are useful for value
413 // forwarding, but any mayalias write can be assumed to be noalias.
414 // Arguably, this logic should be pushed inside AliasAnalysis itself.
Shuxin Yang408bdad2013-03-06 17:48:48 +0000415 if (isLoad && QueryInst) {
416 LoadInst *LI = dyn_cast<LoadInst>(QueryInst);
Craig Topper9f008862014-04-15 04:59:12 +0000417 if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr)
Shuxin Yang408bdad2013-03-06 17:48:48 +0000418 isInvariantLoad = true;
419 }
Eli Friedman8b098b02011-06-15 23:59:25 +0000420
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000421 const DataLayout &DL = BB->getModule()->getDataLayout();
422
Chris Lattnera28355d2008-12-07 08:50:20 +0000423 // Walk backwards through the basic block, looking for dependencies.
Philip Reames090a8242015-02-15 19:07:31 +0000424 while (ScanIt != BB->begin()) {
Yunzhong Gao5cbcf562013-11-14 01:10:52 +0000425 Instruction *Inst = --ScanIt;
426
427 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
428 // Debug intrinsics don't (and can't) cause dependencies.
429 if (isa<DbgInfoIntrinsic>(II)) continue;
430
Eli Friedman8b098b02011-06-15 23:59:25 +0000431 // Limit the amount of scanning we do so we don't end up with quadratic
432 // running time on extreme testcases.
433 --Limit;
434 if (!Limit)
435 return MemDepResult::getUnknown();
436
Chris Lattner506b8582009-12-01 21:15:15 +0000437 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
Owen Anderson2b2bd282009-10-28 07:05:35 +0000438 // If we reach a lifetime begin or end marker, then the query ends here
439 // because the value is undefined.
Chris Lattnera58edd12010-09-06 03:58:04 +0000440 if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
Owen Andersonb9878ee2009-12-02 07:35:19 +0000441 // FIXME: This only considers queries directly on the invariant-tagged
442 // pointer, not on query pointers that are indexed off of them. It'd
Chris Lattner7aab2792011-04-26 22:42:01 +0000443 // be nice to handle that at some point (the right approach is to use
444 // GetPointerBaseWithConstantOffset).
Chris Lattner32dc9bd2011-04-26 21:53:34 +0000445 if (AA->isMustAlias(AliasAnalysis::Location(II->getArgOperand(1)),
446 MemLoc))
Owen Anderson2b2bd282009-10-28 07:05:35 +0000447 return MemDepResult::getDef(II);
Chris Lattnera58edd12010-09-06 03:58:04 +0000448 continue;
Owen Andersond0e86d52009-10-28 06:18:42 +0000449 }
450 }
451
Chris Lattnerff862c42008-11-30 01:44:00 +0000452 // Values depend on loads if the pointers are must aliased. This means that
453 // a load depends on another must aliased load from the same value.
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000454 // One exception is atomic loads: a value can depend on an atomic load that it
455 // does not alias with when this atomic load indicates that another thread may
456 // be accessing the location.
Chris Lattner0e3d6332008-12-05 21:04:20 +0000457 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
Philip Reamesa7ad6a52015-01-26 18:54:27 +0000458
459 // While volatile access cannot be eliminated, they do not have to clobber
460 // non-aliasing locations, as normal accesses, for example, can be safely
461 // reordered with volatile accesses.
462 if (LI->isVolatile()) {
463 if (!QueryInst)
464 // Original QueryInst *may* be volatile
465 return MemDepResult::getClobber(LI);
466 if (isVolatile(QueryInst))
467 // Ordering required if QueryInst is itself volatile
468 return MemDepResult::getClobber(LI);
469 // Otherwise, volatile doesn't imply any special ordering
470 }
471
Eli Friedman5494ada2011-08-15 20:54:19 +0000472 // Atomic loads have complications involved.
Robin Morisset163ef042014-08-29 20:32:58 +0000473 // A Monotonic (or higher) load is OK if the query inst is itself not atomic.
Eli Friedman5494ada2011-08-15 20:54:19 +0000474 // FIXME: This is overly conservative.
Philip Reamesa7ad6a52015-01-26 18:54:27 +0000475 if (LI->isAtomic() && LI->getOrdering() > Unordered) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000476 if (!QueryInst)
477 return MemDepResult::getClobber(LI);
David Majnemere1655022015-03-21 06:19:17 +0000478 if (LI->getOrdering() != Monotonic)
479 return MemDepResult::getClobber(LI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000480 if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000481 if (!QueryLI->isSimple())
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000482 return MemDepResult::getClobber(LI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000483 } else if (auto *QuerySI = dyn_cast<StoreInst>(QueryInst)) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000484 if (!QuerySI->isSimple())
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000485 return MemDepResult::getClobber(LI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000486 } else if (QueryInst->mayReadOrWriteMemory()) {
487 return MemDepResult::getClobber(LI);
488 }
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000489 }
Eli Friedman5494ada2011-08-15 20:54:19 +0000490
Dan Gohman65316d62010-11-11 21:50:19 +0000491 AliasAnalysis::Location LoadLoc = AA->getLocation(LI);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000492
Chris Lattner0e3d6332008-12-05 21:04:20 +0000493 // If we found a pointer, check if it could be the same as our pointer.
Dan Gohman15a43962010-10-29 01:14:04 +0000494 AliasAnalysis::AliasResult R = AA->alias(LoadLoc, MemLoc);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000495
Chris Lattner6f83d062011-04-26 01:21:15 +0000496 if (isLoad) {
Chris Lattner7aab2792011-04-26 22:42:01 +0000497 if (R == AliasAnalysis::NoAlias) {
498 // If this is an over-aligned integer load (for example,
499 // "load i8* %P, align 4") see if it would obviously overlap with the
500 // queried location if widened to a larger load (e.g. if the queried
501 // location is 1 byte at P+1). If so, return it as a load/load
502 // clobber result, allowing the client to decide to widen the load if
503 // it wants to.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000504 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
505 if (LI->getAlignment() * 8 > ITy->getPrimitiveSizeInBits() &&
Chris Lattner7aab2792011-04-26 22:42:01 +0000506 isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000507 MemLocOffset, LI))
Chris Lattner7aab2792011-04-26 22:42:01 +0000508 return MemDepResult::getClobber(Inst);
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000509 }
Chris Lattner7aab2792011-04-26 22:42:01 +0000510 continue;
511 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000512
Chris Lattner6f83d062011-04-26 01:21:15 +0000513 // Must aliased loads are defs of each other.
514 if (R == AliasAnalysis::MustAlias)
515 return MemDepResult::getDef(Inst);
516
Dan Gohmana4717512011-06-04 06:48:50 +0000517#if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads
518 // in terms of clobbering loads, but since it does this by looking
519 // at the clobbering load directly, it doesn't know about any
520 // phi translation that may have happened along the way.
521
Chris Lattner6f83d062011-04-26 01:21:15 +0000522 // If we have a partial alias, then return this as a clobber for the
523 // client to handle.
524 if (R == AliasAnalysis::PartialAlias)
525 return MemDepResult::getClobber(Inst);
Dan Gohmana4717512011-06-04 06:48:50 +0000526#endif
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000527
Chris Lattner6f83d062011-04-26 01:21:15 +0000528 // Random may-alias loads don't depend on each other without a
529 // dependence.
Chris Lattner80c08182008-11-29 09:09:48 +0000530 continue;
Chris Lattner6f83d062011-04-26 01:21:15 +0000531 }
Dan Gohman15a43962010-10-29 01:14:04 +0000532
Chris Lattner7aab2792011-04-26 22:42:01 +0000533 // Stores don't depend on other no-aliased accesses.
534 if (R == AliasAnalysis::NoAlias)
535 continue;
536
Dan Gohman15a43962010-10-29 01:14:04 +0000537 // Stores don't alias loads from read-only memory.
Chris Lattner6f83d062011-04-26 01:21:15 +0000538 if (AA->pointsToConstantMemory(LoadLoc))
Dan Gohman15a43962010-10-29 01:14:04 +0000539 continue;
540
Chris Lattner6f83d062011-04-26 01:21:15 +0000541 // Stores depend on may/must aliased loads.
Chris Lattner0e3d6332008-12-05 21:04:20 +0000542 return MemDepResult::getDef(Inst);
543 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000544
Chris Lattner0e3d6332008-12-05 21:04:20 +0000545 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000546 // Atomic stores have complications involved.
Robin Morisset163ef042014-08-29 20:32:58 +0000547 // A Monotonic store is OK if the query inst is itself not atomic.
Eli Friedman5494ada2011-08-15 20:54:19 +0000548 // FIXME: This is overly conservative.
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000549 if (!SI->isUnordered()) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000550 if (!QueryInst)
551 return MemDepResult::getClobber(SI);
David Majnemere1655022015-03-21 06:19:17 +0000552 if (SI->getOrdering() != Monotonic)
553 return MemDepResult::getClobber(SI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000554 if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000555 if (!QueryLI->isSimple())
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000556 return MemDepResult::getClobber(SI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000557 } else if (auto *QuerySI = dyn_cast<StoreInst>(QueryInst)) {
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000558 if (!QuerySI->isSimple())
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000559 return MemDepResult::getClobber(SI);
Robin Morisset4f6b93b2014-09-02 20:17:52 +0000560 } else if (QueryInst->mayReadOrWriteMemory()) {
561 return MemDepResult::getClobber(SI);
562 }
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000563 }
Eli Friedman5494ada2011-08-15 20:54:19 +0000564
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000565 // FIXME: this is overly conservative.
566 // While volatile access cannot be eliminated, they do not have to clobber
567 // non-aliasing locations, as normal accesses can for example be reordered
568 // with volatile accesses.
569 if (SI->isVolatile())
570 return MemDepResult::getClobber(SI);
571
Chris Lattner02274a72009-05-25 21:28:56 +0000572 // If alias analysis can tell that this store is guaranteed to not modify
573 // the query pointer, ignore it. Use getModRefInfo to handle cases where
574 // the query pointer points to constant memory etc.
Dan Gohman23483932010-09-22 21:41:02 +0000575 if (AA->getModRefInfo(SI, MemLoc) == AliasAnalysis::NoModRef)
Chris Lattner02274a72009-05-25 21:28:56 +0000576 continue;
577
578 // Ok, this store might clobber the query pointer. Check to see if it is
579 // a must alias: in this case, we want to return this as a def.
Dan Gohman65316d62010-11-11 21:50:19 +0000580 AliasAnalysis::Location StoreLoc = AA->getLocation(SI);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000581
Chris Lattner0e3d6332008-12-05 21:04:20 +0000582 // If we found a pointer, check if it could be the same as our pointer.
Dan Gohman65316d62010-11-11 21:50:19 +0000583 AliasAnalysis::AliasResult R = AA->alias(StoreLoc, MemLoc);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000584
Chris Lattner0e3d6332008-12-05 21:04:20 +0000585 if (R == AliasAnalysis::NoAlias)
586 continue;
Dan Gohmanba5d0ab2010-12-13 22:47:57 +0000587 if (R == AliasAnalysis::MustAlias)
588 return MemDepResult::getDef(Inst);
Shuxin Yang408bdad2013-03-06 17:48:48 +0000589 if (isInvariantLoad)
590 continue;
Dan Gohmanba5d0ab2010-12-13 22:47:57 +0000591 return MemDepResult::getClobber(Inst);
Owen Andersonc0daf5f2007-07-06 23:14:35 +0000592 }
Chris Lattner3ff6d012008-11-30 01:39:32 +0000593
594 // If this is an allocation, and if we know that the accessed pointer is to
Chris Lattner0e3d6332008-12-05 21:04:20 +0000595 // the allocation, return Def. This means that there is no dependence and
Chris Lattner3ff6d012008-11-30 01:39:32 +0000596 // the access can be optimized based on that. For example, a load could
597 // turn into undef.
Victor Hernandez70e85052009-10-13 01:42:53 +0000598 // Note: Only determine this to be a malloc if Inst is the malloc call, not
599 // a subsequent bitcast of the malloc call result. There can be stores to
600 // the malloced memory between the malloc call and its bitcast uses, and we
601 // need to continue scanning until the malloc call.
Bob Wilsondcc54de2012-09-03 05:15:15 +0000602 const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo();
603 if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) {
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000604 const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000605
Chris Lattner32dc9bd2011-04-26 21:53:34 +0000606 if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
Victor Hernandez537d8d92009-09-18 21:34:51 +0000607 return MemDepResult::getDef(Inst);
Philip Reames4dbd88f2015-03-24 23:54:54 +0000608 if (isInvariantLoad)
609 continue;
Bob Wilson01cfbfe2012-09-04 03:30:13 +0000610 // Be conservative if the accessed pointer may alias the allocation.
611 if (AA->alias(Inst, AccessPtr) != AliasAnalysis::NoAlias)
612 return MemDepResult::getClobber(Inst);
Bob Wilsondcc54de2012-09-03 05:15:15 +0000613 // If the allocation is not aliased and does not read memory (like
614 // strdup), it is safe to ignore.
615 if (isa<AllocaInst>(Inst) ||
616 isMallocLikeFn(Inst, TLI) || isCallocLikeFn(Inst, TLI))
617 continue;
Victor Hernandez537d8d92009-09-18 21:34:51 +0000618 }
619
Philip Reames4dbd88f2015-03-24 23:54:54 +0000620 if (isInvariantLoad)
621 continue;
622
Chris Lattner0e3d6332008-12-05 21:04:20 +0000623 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
Chad Rosiera968caf2012-05-14 20:35:04 +0000624 AliasAnalysis::ModRefResult MR = AA->getModRefInfo(Inst, MemLoc);
625 // If necessary, perform additional analysis.
626 if (MR == AliasAnalysis::ModRef)
627 MR = AA->callCapturesBefore(Inst, MemLoc, DT);
628 switch (MR) {
Chris Lattner41efb682008-12-09 19:47:40 +0000629 case AliasAnalysis::NoModRef:
630 // If the call has no effect on the queried pointer, just ignore it.
Chris Lattner81f19e92008-11-29 08:51:16 +0000631 continue;
Owen Andersonfc16e5a2009-10-28 06:30:52 +0000632 case AliasAnalysis::Mod:
Owen Andersonfc16e5a2009-10-28 06:30:52 +0000633 return MemDepResult::getClobber(Inst);
Chris Lattner41efb682008-12-09 19:47:40 +0000634 case AliasAnalysis::Ref:
635 // If the call is known to never store to the pointer, and if this is a
636 // load query, we can safely ignore it (scan past it).
637 if (isLoad)
638 continue;
Chris Lattner41efb682008-12-09 19:47:40 +0000639 default:
640 // Otherwise, there is a potential dependence. Return a clobber.
641 return MemDepResult::getClobber(Inst);
642 }
Owen Andersonc0daf5f2007-07-06 23:14:35 +0000643 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000644
Eli Friedman7d58bc72011-06-15 00:47:34 +0000645 // No dependence found. If this is the entry block of the function, it is
646 // unknown, otherwise it is non-local.
Chris Lattner2faa2c72008-12-07 02:15:47 +0000647 if (BB != &BB->getParent()->getEntryBlock())
648 return MemDepResult::getNonLocal();
Eli Friedmanc1702c82011-10-13 22:14:57 +0000649 return MemDepResult::getNonFuncLocal();
Owen Andersonc0daf5f2007-07-06 23:14:35 +0000650}
651
Chris Lattner51ba8d02008-11-29 03:47:00 +0000652/// getDependency - Return the instruction on which a memory operation
653/// depends.
654MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
655 Instruction *ScanPos = QueryInst;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000656
Chris Lattner51ba8d02008-11-29 03:47:00 +0000657 // Check for a cached result
Chris Lattner47e81d02008-11-30 23:17:19 +0000658 MemDepResult &LocalCache = LocalDeps[QueryInst];
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000659
Chris Lattnere7d7e132008-11-29 22:02:15 +0000660 // If the cached entry is non-dirty, just return it. Note that this depends
Chris Lattner47e81d02008-11-30 23:17:19 +0000661 // on MemDepResult's default constructing to 'dirty'.
662 if (!LocalCache.isDirty())
663 return LocalCache;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000664
Chris Lattner51ba8d02008-11-29 03:47:00 +0000665 // Otherwise, if we have a dirty entry, we know we can start the scan at that
666 // instruction, which may save us some work.
Chris Lattner47e81d02008-11-30 23:17:19 +0000667 if (Instruction *Inst = LocalCache.getInst()) {
Chris Lattner51ba8d02008-11-29 03:47:00 +0000668 ScanPos = Inst;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000669
Chris Lattnerde4440c2008-12-07 18:39:13 +0000670 RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst);
Chris Lattner44104272008-11-30 02:52:26 +0000671 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000672
Chris Lattner5a786042008-12-07 01:50:16 +0000673 BasicBlock *QueryParent = QueryInst->getParent();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000674
Chris Lattner51ba8d02008-11-29 03:47:00 +0000675 // Do the scan.
Chris Lattner5a786042008-12-07 01:50:16 +0000676 if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
Eli Friedman7d58bc72011-06-15 00:47:34 +0000677 // No dependence found. If this is the entry block of the function, it is
678 // unknown, otherwise it is non-local.
Chris Lattner2faa2c72008-12-07 02:15:47 +0000679 if (QueryParent != &QueryParent->getParent()->getEntryBlock())
680 LocalCache = MemDepResult::getNonLocal();
681 else
Eli Friedmanc1702c82011-10-13 22:14:57 +0000682 LocalCache = MemDepResult::getNonFuncLocal();
Dan Gohman1d760ce2010-11-10 21:51:35 +0000683 } else {
684 AliasAnalysis::Location MemLoc;
685 AliasAnalysis::ModRefResult MR = GetLocation(QueryInst, MemLoc, AA);
686 if (MemLoc.Ptr) {
687 // If we can do a pointer scan, make it happen.
688 bool isLoad = !(MR & AliasAnalysis::Mod);
Chris Lattnerd540a5d2010-11-30 01:56:13 +0000689 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
Owen Anderson97f0cf32011-05-17 00:05:49 +0000690 isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
Chris Lattnere48c31c2010-11-21 07:34:32 +0000691
Dan Gohman1d760ce2010-11-10 21:51:35 +0000692 LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos,
Shuxin Yang408bdad2013-03-06 17:48:48 +0000693 QueryParent, QueryInst);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000694 } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
Gabor Greifef1ca242010-07-27 22:02:00 +0000695 CallSite QueryCS(QueryInst);
Nick Lewyckye91765f2009-12-05 06:37:24 +0000696 bool isReadOnly = AA->onlyReadsMemory(QueryCS);
697 LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
698 QueryParent);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000699 } else
700 // Non-memory instruction.
Eli Friedman7d58bc72011-06-15 00:47:34 +0000701 LocalCache = MemDepResult::getUnknown();
Nick Lewycky218a3392009-11-28 21:27:49 +0000702 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000703
Chris Lattner51ba8d02008-11-29 03:47:00 +0000704 // Remember the result!
Chris Lattner47e81d02008-11-30 23:17:19 +0000705 if (Instruction *I = LocalCache.getInst())
Chris Lattner9f1988ab2008-11-29 09:20:15 +0000706 ReverseLocalDeps[I].insert(QueryInst);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000707
Chris Lattner47e81d02008-11-30 23:17:19 +0000708 return LocalCache;
Chris Lattner51ba8d02008-11-29 03:47:00 +0000709}
710
Chris Lattnerf09619d2009-01-22 07:04:01 +0000711#ifndef NDEBUG
712/// AssertSorted - This method is used when -debug is specified to verify that
713/// cache arrays are properly kept sorted.
714static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
715 int Count = -1) {
716 if (Count == -1) Count = Cache.size();
717 if (Count == 0) return;
718
719 for (unsigned i = 1; i != unsigned(Count); ++i)
Chris Lattner0c315472009-12-09 07:08:01 +0000720 assert(!(Cache[i] < Cache[i-1]) && "Cache isn't sorted!");
Chris Lattnerf09619d2009-01-22 07:04:01 +0000721}
722#endif
723
Chris Lattner254314e2008-12-09 19:38:05 +0000724/// getNonLocalCallDependency - Perform a full dependency query for the
725/// specified call, returning the set of blocks that the value is
Chris Lattner20597532008-11-30 01:18:27 +0000726/// potentially live across. The returned set of results will include a
727/// "NonLocal" result for all blocks where the value is live across.
728///
Chris Lattner254314e2008-12-09 19:38:05 +0000729/// This method assumes the instruction returns a "NonLocal" dependency
Chris Lattner20597532008-11-30 01:18:27 +0000730/// within its own block.
731///
Chris Lattner254314e2008-12-09 19:38:05 +0000732/// This returns a reference to an internal data structure that may be
733/// invalidated on the next non-local query or when an instruction is
734/// removed. Clients must copy this data if they want it around longer than
735/// that.
Chris Lattner7e61daf2008-12-01 01:15:42 +0000736const MemoryDependenceAnalysis::NonLocalDepInfo &
Chris Lattner254314e2008-12-09 19:38:05 +0000737MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
738 assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
739 "getNonLocalCallDependency should only be used on calls with non-local deps!");
740 PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
Chris Lattner7e61daf2008-12-01 01:15:42 +0000741 NonLocalDepInfo &Cache = CacheP.first;
Chris Lattner20597532008-11-30 01:18:27 +0000742
743 /// DirtyBlocks - This is the set of blocks that need to be recomputed. In
744 /// the cached case, this can happen due to instructions being deleted etc. In
745 /// the uncached case, this starts out as the set of predecessors we care
746 /// about.
747 SmallVector<BasicBlock*, 32> DirtyBlocks;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000748
Chris Lattner20597532008-11-30 01:18:27 +0000749 if (!Cache.empty()) {
Chris Lattner7e61daf2008-12-01 01:15:42 +0000750 // Okay, we have a cache entry. If we know it is not dirty, just return it
751 // with no computation.
752 if (!CacheP.second) {
Dan Gohmand2d1ae12010-06-22 15:08:57 +0000753 ++NumCacheNonLocal;
Chris Lattner7e61daf2008-12-01 01:15:42 +0000754 return Cache;
755 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000756
Chris Lattner20597532008-11-30 01:18:27 +0000757 // If we already have a partially computed set of results, scan them to
Chris Lattner7e61daf2008-12-01 01:15:42 +0000758 // determine what is dirty, seeding our initial DirtyBlocks worklist.
759 for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end();
760 I != E; ++I)
Chris Lattner0c315472009-12-09 07:08:01 +0000761 if (I->getResult().isDirty())
762 DirtyBlocks.push_back(I->getBB());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000763
Chris Lattner7e61daf2008-12-01 01:15:42 +0000764 // Sort the cache so that we can do fast binary search lookups below.
765 std::sort(Cache.begin(), Cache.end());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000766
Chris Lattner7e61daf2008-12-01 01:15:42 +0000767 ++NumCacheDirtyNonLocal;
Chris Lattner20597532008-11-30 01:18:27 +0000768 //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
769 // << Cache.size() << " cached: " << *QueryInst;
770 } else {
771 // Seed DirtyBlocks with each of the preds of QueryInst's block.
Chris Lattner254314e2008-12-09 19:38:05 +0000772 BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
Chris Lattnere8113a72008-12-09 06:44:17 +0000773 for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI)
774 DirtyBlocks.push_back(*PI);
Dan Gohmand2d1ae12010-06-22 15:08:57 +0000775 ++NumUncacheNonLocal;
Chris Lattner20597532008-11-30 01:18:27 +0000776 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000777
Chris Lattner702e46e2008-12-09 21:19:42 +0000778 // isReadonlyCall - If this is a read-only call, we can be more aggressive.
779 bool isReadonlyCall = AA->onlyReadsMemory(QueryCS);
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000780
Chris Lattner7e61daf2008-12-01 01:15:42 +0000781 SmallPtrSet<BasicBlock*, 64> Visited;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000782
Chris Lattner7e61daf2008-12-01 01:15:42 +0000783 unsigned NumSortedEntries = Cache.size();
Chris Lattnerf09619d2009-01-22 07:04:01 +0000784 DEBUG(AssertSorted(Cache));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000785
Chris Lattner20597532008-11-30 01:18:27 +0000786 // Iterate while we still have blocks to update.
787 while (!DirtyBlocks.empty()) {
788 BasicBlock *DirtyBB = DirtyBlocks.back();
789 DirtyBlocks.pop_back();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000790
Chris Lattner7e61daf2008-12-01 01:15:42 +0000791 // Already processed this block?
David Blaikie70573dc2014-11-19 07:49:26 +0000792 if (!Visited.insert(DirtyBB).second)
Chris Lattner7e61daf2008-12-01 01:15:42 +0000793 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000794
Chris Lattner7e61daf2008-12-01 01:15:42 +0000795 // Do a binary search to see if we already have an entry for this block in
796 // the cache set. If so, find it.
Chris Lattnerf09619d2009-01-22 07:04:01 +0000797 DEBUG(AssertSorted(Cache, NumSortedEntries));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000798 NonLocalDepInfo::iterator Entry =
Chris Lattner7e61daf2008-12-01 01:15:42 +0000799 std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries,
Chris Lattnereea0f582009-12-09 07:31:04 +0000800 NonLocalDepEntry(DirtyBB));
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000801 if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB)
Chris Lattner7e61daf2008-12-01 01:15:42 +0000802 --Entry;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000803
Craig Topper9f008862014-04-15 04:59:12 +0000804 NonLocalDepEntry *ExistingResult = nullptr;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000805 if (Entry != Cache.begin()+NumSortedEntries &&
Chris Lattner0c315472009-12-09 07:08:01 +0000806 Entry->getBB() == DirtyBB) {
Chris Lattner7e61daf2008-12-01 01:15:42 +0000807 // If we already have an entry, and if it isn't already dirty, the block
808 // is done.
Chris Lattner0c315472009-12-09 07:08:01 +0000809 if (!Entry->getResult().isDirty())
Chris Lattner7e61daf2008-12-01 01:15:42 +0000810 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000811
Chris Lattner7e61daf2008-12-01 01:15:42 +0000812 // Otherwise, remember this slot so we can update the value.
Chris Lattner0c315472009-12-09 07:08:01 +0000813 ExistingResult = &*Entry;
Chris Lattner7e61daf2008-12-01 01:15:42 +0000814 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000815
Chris Lattner20597532008-11-30 01:18:27 +0000816 // If the dirty entry has a pointer, start scanning from it so we don't have
817 // to rescan the entire block.
818 BasicBlock::iterator ScanPos = DirtyBB->end();
Chris Lattner7e61daf2008-12-01 01:15:42 +0000819 if (ExistingResult) {
Chris Lattner0c315472009-12-09 07:08:01 +0000820 if (Instruction *Inst = ExistingResult->getResult().getInst()) {
Chris Lattner7e61daf2008-12-01 01:15:42 +0000821 ScanPos = Inst;
Chris Lattner7e61daf2008-12-01 01:15:42 +0000822 // We're removing QueryInst's use of Inst.
Chris Lattner254314e2008-12-09 19:38:05 +0000823 RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
824 QueryCS.getInstruction());
Chris Lattner7e61daf2008-12-01 01:15:42 +0000825 }
Chris Lattner1b810bd2008-11-30 02:28:25 +0000826 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000827
Chris Lattner60444f82008-11-30 01:26:32 +0000828 // Find out if this block has a local dependency for QueryInst.
Chris Lattnered494f72008-12-07 01:21:14 +0000829 MemDepResult Dep;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000830
Chris Lattner254314e2008-12-09 19:38:05 +0000831 if (ScanPos != DirtyBB->begin()) {
Chris Lattner702e46e2008-12-09 21:19:42 +0000832 Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB);
Chris Lattner254314e2008-12-09 19:38:05 +0000833 } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
834 // No dependence found. If this is the entry block of the function, it is
Eli Friedman7d58bc72011-06-15 00:47:34 +0000835 // a clobber, otherwise it is unknown.
Chris Lattner254314e2008-12-09 19:38:05 +0000836 Dep = MemDepResult::getNonLocal();
Chris Lattner5a786042008-12-07 01:50:16 +0000837 } else {
Eli Friedmanc1702c82011-10-13 22:14:57 +0000838 Dep = MemDepResult::getNonFuncLocal();
Chris Lattner5a786042008-12-07 01:50:16 +0000839 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000840
Chris Lattner7e61daf2008-12-01 01:15:42 +0000841 // If we had a dirty entry for the block, update it. Otherwise, just add
842 // a new entry.
843 if (ExistingResult)
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000844 ExistingResult->setResult(Dep);
Chris Lattner7e61daf2008-12-01 01:15:42 +0000845 else
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000846 Cache.push_back(NonLocalDepEntry(DirtyBB, Dep));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000847
Chris Lattner20597532008-11-30 01:18:27 +0000848 // If the block has a dependency (i.e. it isn't completely transparent to
Chris Lattner7e61daf2008-12-01 01:15:42 +0000849 // the value), remember the association!
850 if (!Dep.isNonLocal()) {
Chris Lattner20597532008-11-30 01:18:27 +0000851 // Keep the ReverseNonLocalDeps map up to date so we can efficiently
852 // update this when we remove instructions.
Chris Lattner7e61daf2008-12-01 01:15:42 +0000853 if (Instruction *Inst = Dep.getInst())
Chris Lattner254314e2008-12-09 19:38:05 +0000854 ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
Chris Lattner7e61daf2008-12-01 01:15:42 +0000855 } else {
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000856
Chris Lattner7e61daf2008-12-01 01:15:42 +0000857 // If the block *is* completely transparent to the load, we need to check
858 // the predecessors of this block. Add them to our worklist.
Chris Lattnere8113a72008-12-09 06:44:17 +0000859 for (BasicBlock **PI = PredCache->GetPreds(DirtyBB); *PI; ++PI)
860 DirtyBlocks.push_back(*PI);
Chris Lattner7e61daf2008-12-01 01:15:42 +0000861 }
Chris Lattner20597532008-11-30 01:18:27 +0000862 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000863
Chris Lattner7e61daf2008-12-01 01:15:42 +0000864 return Cache;
Chris Lattner20597532008-11-30 01:18:27 +0000865}
866
Chris Lattner2faa2c72008-12-07 02:15:47 +0000867/// getNonLocalPointerDependency - Perform a full dependency query for an
868/// access to the specified (non-volatile) memory location, returning the
869/// set of instructions that either define or clobber the value.
870///
871/// This method assumes the pointer has a "NonLocal" dependency within its
872/// own block.
873///
874void MemoryDependenceAnalysis::
Philip Reames567feb92015-01-09 00:04:22 +0000875getNonLocalPointerDependency(Instruction *QueryInst,
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000876 SmallVectorImpl<NonLocalDepResult> &Result) {
Philip Reames567feb92015-01-09 00:04:22 +0000877
878 auto getLocation = [](AliasAnalysis *AA, Instruction *Inst) {
879 if (auto *I = dyn_cast<LoadInst>(Inst))
880 return AA->getLocation(I);
881 else if (auto *I = dyn_cast<StoreInst>(Inst))
882 return AA->getLocation(I);
883 else if (auto *I = dyn_cast<VAArgInst>(Inst))
884 return AA->getLocation(I);
885 else if (auto *I = dyn_cast<AtomicCmpXchgInst>(Inst))
886 return AA->getLocation(I);
887 else if (auto *I = dyn_cast<AtomicRMWInst>(Inst))
888 return AA->getLocation(I);
889 else
890 llvm_unreachable("unsupported memory instruction");
891 };
892
893 const AliasAnalysis::Location Loc = getLocation(AA, QueryInst);
894 bool isLoad = isa<LoadInst>(QueryInst);
895 BasicBlock *FromBB = QueryInst->getParent();
896 assert(FromBB);
Philip Reames33d7f9d2015-01-09 00:26:45 +0000897
898 assert(Loc.Ptr->getType()->isPointerTy() &&
899 "Can't get pointer deps of a non-pointer!");
900 Result.clear();
Philip Reames567feb92015-01-09 00:04:22 +0000901
Philip Reames33d7f9d2015-01-09 00:26:45 +0000902 // This routine does not expect to deal with volatile instructions.
903 // Doing so would require piping through the QueryInst all the way through.
Philip Reames567feb92015-01-09 00:04:22 +0000904 // TODO: volatiles can't be elided, but they can be reordered with other
Philip Reames33d7f9d2015-01-09 00:26:45 +0000905 // non-volatile accesses.
Philip Reamesa7ad6a52015-01-26 18:54:27 +0000906
Philip Reames567feb92015-01-09 00:04:22 +0000907 // We currently give up on any instruction which is ordered, but we do handle
908 // atomic instructions which are unordered.
909 // TODO: Handle ordered instructions
910 auto isOrdered = [](Instruction *Inst) {
911 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
912 return !LI->isUnordered();
913 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
914 return !SI->isUnordered();
915 }
916 return false;
917 };
Philip Reames33d7f9d2015-01-09 00:26:45 +0000918 if (isVolatile(QueryInst) || isOrdered(QueryInst)) {
919 Result.push_back(NonLocalDepResult(FromBB,
920 MemDepResult::getUnknown(),
921 const_cast<Value *>(Loc.Ptr)));
922 return;
923 }
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000924 const DataLayout &DL = FromBB->getModule()->getDataLayout();
Chandler Carruth66b31302015-01-04 12:03:27 +0000925 PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, AC);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000926
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000927 // This is the set of blocks we've inspected, and the pointer we consider in
928 // each block. Because of critical edges, we currently bail out if querying
929 // a block with multiple different pointers. This can happen during PHI
930 // translation.
931 DenseMap<BasicBlock*, Value*> Visited;
Philip Reames32351452015-01-26 18:39:52 +0000932 if (!getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB,
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000933 Result, Visited, true))
934 return;
Chris Lattner7ed5ccc2008-12-15 04:58:29 +0000935 Result.clear();
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000936 Result.push_back(NonLocalDepResult(FromBB,
Eli Friedman7d58bc72011-06-15 00:47:34 +0000937 MemDepResult::getUnknown(),
Dan Gohman23483932010-09-22 21:41:02 +0000938 const_cast<Value *>(Loc.Ptr)));
Chris Lattner7564a3b2008-12-07 02:56:57 +0000939}
940
Chris Lattnerf903fe12008-12-09 07:47:11 +0000941/// GetNonLocalInfoForBlock - Compute the memdep value for BB with
942/// Pointer/PointeeSize using either cached information in Cache or by doing a
943/// lookup (which may use dirty cache info if available). If we do a lookup,
944/// add the result to the cache.
945MemDepResult MemoryDependenceAnalysis::
Philip Reames32351452015-01-26 18:39:52 +0000946GetNonLocalInfoForBlock(Instruction *QueryInst,
947 const AliasAnalysis::Location &Loc,
Chris Lattnerf903fe12008-12-09 07:47:11 +0000948 bool isLoad, BasicBlock *BB,
949 NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000950
Chris Lattnerf903fe12008-12-09 07:47:11 +0000951 // Do a binary search to see if we already have an entry for this block in
952 // the cache set. If so, find it.
953 NonLocalDepInfo::iterator Entry =
954 std::upper_bound(Cache->begin(), Cache->begin()+NumSortedEntries,
Chris Lattnereea0f582009-12-09 07:31:04 +0000955 NonLocalDepEntry(BB));
Chris Lattner0c315472009-12-09 07:08:01 +0000956 if (Entry != Cache->begin() && (Entry-1)->getBB() == BB)
Chris Lattnerf903fe12008-12-09 07:47:11 +0000957 --Entry;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000958
Craig Topper9f008862014-04-15 04:59:12 +0000959 NonLocalDepEntry *ExistingResult = nullptr;
Chris Lattner0c315472009-12-09 07:08:01 +0000960 if (Entry != Cache->begin()+NumSortedEntries && Entry->getBB() == BB)
961 ExistingResult = &*Entry;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000962
Chris Lattnerf903fe12008-12-09 07:47:11 +0000963 // If we have a cached entry, and it is non-dirty, use it as the value for
964 // this dependency.
Chris Lattner0c315472009-12-09 07:08:01 +0000965 if (ExistingResult && !ExistingResult->getResult().isDirty()) {
Chris Lattnerf903fe12008-12-09 07:47:11 +0000966 ++NumCacheNonLocalPtr;
Chris Lattner0c315472009-12-09 07:08:01 +0000967 return ExistingResult->getResult();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000968 }
969
Chris Lattnerf903fe12008-12-09 07:47:11 +0000970 // Otherwise, we have to scan for the value. If we have a dirty cache
971 // entry, start scanning from its position, otherwise we scan from the end
972 // of the block.
973 BasicBlock::iterator ScanPos = BB->end();
Chris Lattner0c315472009-12-09 07:08:01 +0000974 if (ExistingResult && ExistingResult->getResult().getInst()) {
975 assert(ExistingResult->getResult().getInst()->getParent() == BB &&
Chris Lattnerf903fe12008-12-09 07:47:11 +0000976 "Instruction invalidated?");
977 ++NumCacheDirtyNonLocalPtr;
Chris Lattner0c315472009-12-09 07:08:01 +0000978 ScanPos = ExistingResult->getResult().getInst();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000979
Chris Lattnerf903fe12008-12-09 07:47:11 +0000980 // Eliminating the dirty entry from 'Cache', so update the reverse info.
Dan Gohman23483932010-09-22 21:41:02 +0000981 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
Chris Lattner8eda11b2009-03-29 00:24:04 +0000982 RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey);
Chris Lattnerf903fe12008-12-09 07:47:11 +0000983 } else {
984 ++NumUncacheNonLocalPtr;
985 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000986
Chris Lattnerf903fe12008-12-09 07:47:11 +0000987 // Scan the block for the dependency.
Philip Reames32351452015-01-26 18:39:52 +0000988 MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB,
989 QueryInst);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000990
Chris Lattnerf903fe12008-12-09 07:47:11 +0000991 // If we had a dirty entry for the block, update it. Otherwise, just add
992 // a new entry.
993 if (ExistingResult)
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000994 ExistingResult->setResult(Dep);
Chris Lattnerf903fe12008-12-09 07:47:11 +0000995 else
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000996 Cache->push_back(NonLocalDepEntry(BB, Dep));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000997
Chris Lattnerf903fe12008-12-09 07:47:11 +0000998 // If the block has a dependency (i.e. it isn't completely transparent to
999 // the value), remember the reverse association because we just added it
1000 // to Cache!
Eli Friedmanc1702c82011-10-13 22:14:57 +00001001 if (!Dep.isDef() && !Dep.isClobber())
Chris Lattnerf903fe12008-12-09 07:47:11 +00001002 return Dep;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001003
Chris Lattnerf903fe12008-12-09 07:47:11 +00001004 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
1005 // update MemDep when we remove instructions.
1006 Instruction *Inst = Dep.getInst();
1007 assert(Inst && "Didn't depend on anything?");
Dan Gohman23483932010-09-22 21:41:02 +00001008 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
Chris Lattner8eda11b2009-03-29 00:24:04 +00001009 ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
Chris Lattnerf903fe12008-12-09 07:47:11 +00001010 return Dep;
1011}
1012
Robin Morisset039781e2014-08-29 21:53:01 +00001013/// SortNonLocalDepInfoCache - Sort the NonLocalDepInfo cache, given a certain
Chris Lattner370aada2009-07-13 17:20:05 +00001014/// number of elements in the array that are already properly ordered. This is
1015/// optimized for the case when only a few entries are added.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001016static void
Chris Lattner370aada2009-07-13 17:20:05 +00001017SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
1018 unsigned NumSortedEntries) {
1019 switch (Cache.size() - NumSortedEntries) {
1020 case 0:
1021 // done, no new entries.
1022 break;
1023 case 2: {
1024 // Two new entries, insert the last one into place.
Chris Lattner0c315472009-12-09 07:08:01 +00001025 NonLocalDepEntry Val = Cache.back();
Chris Lattner370aada2009-07-13 17:20:05 +00001026 Cache.pop_back();
1027 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
1028 std::upper_bound(Cache.begin(), Cache.end()-1, Val);
1029 Cache.insert(Entry, Val);
1030 // FALL THROUGH.
1031 }
1032 case 1:
1033 // One new entry, Just insert the new value at the appropriate position.
1034 if (Cache.size() != 1) {
Chris Lattner0c315472009-12-09 07:08:01 +00001035 NonLocalDepEntry Val = Cache.back();
Chris Lattner370aada2009-07-13 17:20:05 +00001036 Cache.pop_back();
1037 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
1038 std::upper_bound(Cache.begin(), Cache.end(), Val);
1039 Cache.insert(Entry, Val);
1040 }
1041 break;
1042 default:
1043 // Added many values, do a full scale sort.
1044 std::sort(Cache.begin(), Cache.end());
1045 break;
1046 }
1047}
1048
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001049/// getNonLocalPointerDepFromBB - Perform a dependency query based on
1050/// pointer/pointeesize starting at the end of StartBB. Add any clobber/def
1051/// results to the results vector and keep track of which blocks are visited in
1052/// 'Visited'.
1053///
1054/// This has special behavior for the first block queries (when SkipFirstBlock
1055/// is true). In this special case, it ignores the contents of the specified
1056/// block and starts returning dependence info for its predecessors.
1057///
1058/// This function returns false on success, or true to indicate that it could
1059/// not compute dependence information for some reason. This should be treated
1060/// as a clobber dependence on the first instruction in the predecessor block.
1061bool MemoryDependenceAnalysis::
Philip Reames32351452015-01-26 18:39:52 +00001062getNonLocalPointerDepFromBB(Instruction *QueryInst,
1063 const PHITransAddr &Pointer,
Dan Gohman23483932010-09-22 21:41:02 +00001064 const AliasAnalysis::Location &Loc,
Chris Lattnerf903fe12008-12-09 07:47:11 +00001065 bool isLoad, BasicBlock *StartBB,
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001066 SmallVectorImpl<NonLocalDepResult> &Result,
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001067 DenseMap<BasicBlock*, Value*> &Visited,
1068 bool SkipFirstBlock) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001069 // Look up the cached info for Pointer.
Chris Lattner972e6d82009-12-09 01:59:31 +00001070 ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
Dan Gohman23483932010-09-22 21:41:02 +00001071
Dan Gohman0a6021a2010-11-10 20:37:15 +00001072 // Set up a temporary NLPI value. If the map doesn't yet have an entry for
1073 // CacheKey, this value will be inserted as the associated value. Otherwise,
1074 // it'll be ignored, and we'll have to check to see if the cached size and
Hal Finkelcc39b672014-07-24 12:16:19 +00001075 // aa tags are consistent with the current query.
Dan Gohman0a6021a2010-11-10 20:37:15 +00001076 NonLocalPointerInfo InitialNLPI;
1077 InitialNLPI.Size = Loc.Size;
Hal Finkelcc39b672014-07-24 12:16:19 +00001078 InitialNLPI.AATags = Loc.AATags;
Dan Gohman0a6021a2010-11-10 20:37:15 +00001079
1080 // Get the NLPI for CacheKey, inserting one into the map if it doesn't
1081 // already have one.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001082 std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
Dan Gohman0a6021a2010-11-10 20:37:15 +00001083 NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
1084 NonLocalPointerInfo *CacheInfo = &Pair.first->second;
1085
Dan Gohman2e8ca442010-11-10 21:45:11 +00001086 // If we already have a cache entry for this CacheKey, we may need to do some
1087 // work to reconcile the cache entry and the current query.
Dan Gohman0a6021a2010-11-10 20:37:15 +00001088 if (!Pair.second) {
Dan Gohman2e8ca442010-11-10 21:45:11 +00001089 if (CacheInfo->Size < Loc.Size) {
1090 // The query's Size is greater than the cached one. Throw out the
Benjamin Kramerbde91762012-06-02 10:20:22 +00001091 // cached data and proceed with the query at the greater size.
Dan Gohman2e8ca442010-11-10 21:45:11 +00001092 CacheInfo->Pair = BBSkipFirstBlockPair();
1093 CacheInfo->Size = Loc.Size;
Dan Gohman67919362010-11-10 22:35:02 +00001094 for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
1095 DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
1096 if (Instruction *Inst = DI->getResult().getInst())
1097 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
Dan Gohman2e8ca442010-11-10 21:45:11 +00001098 CacheInfo->NonLocalDeps.clear();
1099 } else if (CacheInfo->Size > Loc.Size) {
1100 // This query's Size is less than the cached one. Conservatively restart
1101 // the query using the greater size.
Philip Reames32351452015-01-26 18:39:52 +00001102 return getNonLocalPointerDepFromBB(QueryInst, Pointer,
Dan Gohman0a6021a2010-11-10 20:37:15 +00001103 Loc.getWithNewSize(CacheInfo->Size),
1104 isLoad, StartBB, Result, Visited,
1105 SkipFirstBlock);
1106 }
1107
Hal Finkelcc39b672014-07-24 12:16:19 +00001108 // If the query's AATags are inconsistent with the cached one,
Dan Gohman2e8ca442010-11-10 21:45:11 +00001109 // conservatively throw out the cached data and restart the query with
1110 // no tag if needed.
Hal Finkelcc39b672014-07-24 12:16:19 +00001111 if (CacheInfo->AATags != Loc.AATags) {
1112 if (CacheInfo->AATags) {
Dan Gohman2e8ca442010-11-10 21:45:11 +00001113 CacheInfo->Pair = BBSkipFirstBlockPair();
Hal Finkelcc39b672014-07-24 12:16:19 +00001114 CacheInfo->AATags = AAMDNodes();
Dan Gohman67919362010-11-10 22:35:02 +00001115 for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
1116 DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
1117 if (Instruction *Inst = DI->getResult().getInst())
1118 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
Dan Gohman2e8ca442010-11-10 21:45:11 +00001119 CacheInfo->NonLocalDeps.clear();
1120 }
Hal Finkelcc39b672014-07-24 12:16:19 +00001121 if (Loc.AATags)
Philip Reames32351452015-01-26 18:39:52 +00001122 return getNonLocalPointerDepFromBB(QueryInst,
1123 Pointer, Loc.getWithoutAATags(),
Dan Gohman2e8ca442010-11-10 21:45:11 +00001124 isLoad, StartBB, Result, Visited,
1125 SkipFirstBlock);
Dan Gohman0a6021a2010-11-10 20:37:15 +00001126 }
Dan Gohman23483932010-09-22 21:41:02 +00001127 }
1128
1129 NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
Chris Lattner5ed409e2008-12-08 07:31:50 +00001130
1131 // If we have valid cached information for exactly the block we are
1132 // investigating, just return it with no recomputation.
Dan Gohman23483932010-09-22 21:41:02 +00001133 if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
Chris Lattner8b4be372008-12-16 07:10:09 +00001134 // We have a fully cached result for this query then we can just return the
1135 // cached results and populate the visited set. However, we have to verify
1136 // that we don't already have conflicting results for these blocks. Check
1137 // to ensure that if a block in the results set is in the visited set that
1138 // it was for the same pointer query.
1139 if (!Visited.empty()) {
1140 for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
1141 I != E; ++I) {
Chris Lattner0c315472009-12-09 07:08:01 +00001142 DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->getBB());
Chris Lattner972e6d82009-12-09 01:59:31 +00001143 if (VI == Visited.end() || VI->second == Pointer.getAddr())
1144 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001145
Chris Lattner8b4be372008-12-16 07:10:09 +00001146 // We have a pointer mismatch in a block. Just return clobber, saying
1147 // that something was clobbered in this result. We could also do a
1148 // non-fully cached query, but there is little point in doing this.
1149 return true;
1150 }
1151 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001152
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001153 Value *Addr = Pointer.getAddr();
Chris Lattner5ed409e2008-12-08 07:31:50 +00001154 for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
Chris Lattner8b4be372008-12-16 07:10:09 +00001155 I != E; ++I) {
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001156 Visited.insert(std::make_pair(I->getBB(), Addr));
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001157 if (I->getResult().isNonLocal()) {
1158 continue;
1159 }
1160
1161 if (!DT) {
1162 Result.push_back(NonLocalDepResult(I->getBB(),
1163 MemDepResult::getUnknown(),
1164 Addr));
1165 } else if (DT->isReachableFromEntry(I->getBB())) {
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001166 Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr));
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001167 }
Chris Lattner8b4be372008-12-16 07:10:09 +00001168 }
Chris Lattner5ed409e2008-12-08 07:31:50 +00001169 ++NumCacheCompleteNonLocalPtr;
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001170 return false;
Chris Lattner5ed409e2008-12-08 07:31:50 +00001171 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001172
Chris Lattner5ed409e2008-12-08 07:31:50 +00001173 // Otherwise, either this is a new block, a block with an invalid cache
1174 // pointer or one that we're about to invalidate by putting more info into it
1175 // than its valid cache info. If empty, the result will be valid cache info,
1176 // otherwise it isn't.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001177 if (Cache->empty())
Dan Gohman23483932010-09-22 21:41:02 +00001178 CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
Dan Gohmanc87c8432010-11-11 00:42:22 +00001179 else
Dan Gohman23483932010-09-22 21:41:02 +00001180 CacheInfo->Pair = BBSkipFirstBlockPair();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001181
Chris Lattner5ed409e2008-12-08 07:31:50 +00001182 SmallVector<BasicBlock*, 32> Worklist;
1183 Worklist.push_back(StartBB);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001184
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001185 // PredList used inside loop.
1186 SmallVector<std::pair<BasicBlock*, PHITransAddr>, 16> PredList;
1187
Chris Lattnera28355d2008-12-07 08:50:20 +00001188 // Keep track of the entries that we know are sorted. Previously cached
1189 // entries will all be sorted. The entries we add we only sort on demand (we
1190 // don't insert every element into its sorted position). We know that we
1191 // won't get any reuse from currently inserted values, because we don't
1192 // revisit blocks after we insert info for them.
1193 unsigned NumSortedEntries = Cache->size();
Chris Lattnerf09619d2009-01-22 07:04:01 +00001194 DEBUG(AssertSorted(*Cache));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001195
Chris Lattner2faa2c72008-12-07 02:15:47 +00001196 while (!Worklist.empty()) {
Chris Lattner7564a3b2008-12-07 02:56:57 +00001197 BasicBlock *BB = Worklist.pop_back_val();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001198
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +00001199 // If we do process a large number of blocks it becomes very expensive and
1200 // likely it isn't worth worrying about
1201 if (Result.size() > NumResultsLimit) {
1202 Worklist.clear();
1203 // Sort it now (if needed) so that recursive invocations of
1204 // getNonLocalPointerDepFromBB and other routines that could reuse the
1205 // cache value will only see properly sorted cache arrays.
1206 if (Cache && NumSortedEntries != Cache->size()) {
1207 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +00001208 }
1209 // Since we bail out, the "Cache" set won't contain all of the
1210 // results for the query. This is ok (we can still use it to accelerate
1211 // specific block queries) but we can't do the fastpath "return all
1212 // results from the set". Clear out the indicator for this.
1213 CacheInfo->Pair = BBSkipFirstBlockPair();
1214 return true;
1215 }
1216
Chris Lattner75510d82008-12-09 07:52:59 +00001217 // Skip the first block if we have it.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001218 if (!SkipFirstBlock) {
Chris Lattner75510d82008-12-09 07:52:59 +00001219 // Analyze the dependency of *Pointer in FromBB. See if we already have
1220 // been here.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001221 assert(Visited.count(BB) && "Should check 'visited' before adding to WL");
Chris Lattnera28355d2008-12-07 08:50:20 +00001222
Chris Lattner75510d82008-12-09 07:52:59 +00001223 // Get the dependency info for Pointer in BB. If we have cached
1224 // information, we will use it, otherwise we compute it.
Chris Lattnerf09619d2009-01-22 07:04:01 +00001225 DEBUG(AssertSorted(*Cache, NumSortedEntries));
Philip Reames32351452015-01-26 18:39:52 +00001226 MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst,
1227 Loc, isLoad, BB, Cache,
Chris Lattner972e6d82009-12-09 01:59:31 +00001228 NumSortedEntries);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001229
Chris Lattner75510d82008-12-09 07:52:59 +00001230 // If we got a Def or Clobber, add this to the list of results.
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001231 if (!Dep.isNonLocal()) {
1232 if (!DT) {
1233 Result.push_back(NonLocalDepResult(BB,
1234 MemDepResult::getUnknown(),
1235 Pointer.getAddr()));
1236 continue;
1237 } else if (DT->isReachableFromEntry(BB)) {
1238 Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
1239 continue;
1240 }
Chris Lattner75510d82008-12-09 07:52:59 +00001241 }
Chris Lattner2faa2c72008-12-07 02:15:47 +00001242 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001243
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001244 // If 'Pointer' is an instruction defined in this block, then we need to do
1245 // phi translation to change it into a value live in the predecessor block.
Chris Lattner972e6d82009-12-09 01:59:31 +00001246 // If not, we just add the predecessors to the worklist and scan them with
1247 // the same Pointer.
1248 if (!Pointer.NeedsPHITranslationFromBlock(BB)) {
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001249 SkipFirstBlock = false;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001250 SmallVector<BasicBlock*, 16> NewBlocks;
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001251 for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
1252 // Verify that we haven't looked at this block yet.
1253 std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
Chris Lattner972e6d82009-12-09 01:59:31 +00001254 InsertRes = Visited.insert(std::make_pair(*PI, Pointer.getAddr()));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001255 if (InsertRes.second) {
1256 // First time we've looked at *PI.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001257 NewBlocks.push_back(*PI);
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001258 continue;
1259 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001260
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001261 // If we have seen this block before, but it was with a different
1262 // pointer then we have a phi translation failure and we have to treat
1263 // this as a clobber.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001264 if (InsertRes.first->second != Pointer.getAddr()) {
1265 // Make sure to clean up the Visited map before continuing on to
1266 // PredTranslationFailure.
1267 for (unsigned i = 0; i < NewBlocks.size(); i++)
1268 Visited.erase(NewBlocks[i]);
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001269 goto PredTranslationFailure;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001270 }
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001271 }
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001272 Worklist.append(NewBlocks.begin(), NewBlocks.end());
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001273 continue;
1274 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001275
Chris Lattner972e6d82009-12-09 01:59:31 +00001276 // We do need to do phi translation, if we know ahead of time we can't phi
1277 // translate this value, don't even try.
1278 if (!Pointer.IsPotentiallyPHITranslatable())
1279 goto PredTranslationFailure;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001280
Chris Lattner2f0c1c42009-07-13 17:14:23 +00001281 // We may have added values to the cache list before this PHI translation.
1282 // If so, we haven't done anything to ensure that the cache remains sorted.
1283 // Sort it now (if needed) so that recursive invocations of
1284 // getNonLocalPointerDepFromBB and other routines that could reuse the cache
1285 // value will only see properly sorted cache arrays.
1286 if (Cache && NumSortedEntries != Cache->size()) {
Chris Lattner370aada2009-07-13 17:20:05 +00001287 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
Chris Lattner2f0c1c42009-07-13 17:14:23 +00001288 NumSortedEntries = Cache->size();
1289 }
Craig Topper9f008862014-04-15 04:59:12 +00001290 Cache = nullptr;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001291
1292 PredList.clear();
Chris Lattnerac323292009-11-27 08:37:22 +00001293 for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
1294 BasicBlock *Pred = *PI;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001295 PredList.push_back(std::make_pair(Pred, Pointer));
1296
Chris Lattner972e6d82009-12-09 01:59:31 +00001297 // Get the PHI translated pointer in this predecessor. This can fail if
1298 // not translatable, in which case the getAddr() returns null.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001299 PHITransAddr &PredPointer = PredList.back().second;
Craig Topper9f008862014-04-15 04:59:12 +00001300 PredPointer.PHITranslateValue(BB, Pred, nullptr);
Chris Lattner972e6d82009-12-09 01:59:31 +00001301
1302 Value *PredPtrVal = PredPointer.getAddr();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001303
Chris Lattnerac323292009-11-27 08:37:22 +00001304 // Check to see if we have already visited this pred block with another
1305 // pointer. If so, we can't do this lookup. This failure can occur
1306 // with PHI translation when a critical edge exists and the PHI node in
1307 // the successor translates to a pointer value different than the
1308 // pointer the block was first analyzed with.
1309 std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
Chris Lattner972e6d82009-12-09 01:59:31 +00001310 InsertRes = Visited.insert(std::make_pair(Pred, PredPtrVal));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001311
Chris Lattnerac323292009-11-27 08:37:22 +00001312 if (!InsertRes.second) {
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001313 // We found the pred; take it off the list of preds to visit.
1314 PredList.pop_back();
1315
Chris Lattnerac323292009-11-27 08:37:22 +00001316 // If the predecessor was visited with PredPtr, then we already did
1317 // the analysis and can ignore it.
Chris Lattner972e6d82009-12-09 01:59:31 +00001318 if (InsertRes.first->second == PredPtrVal)
Chris Lattnerac323292009-11-27 08:37:22 +00001319 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001320
Chris Lattnerac323292009-11-27 08:37:22 +00001321 // Otherwise, the block was previously analyzed with a different
1322 // pointer. We can't represent the result of this case, so we just
1323 // treat this as a phi translation failure.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001324
1325 // Make sure to clean up the Visited map before continuing on to
1326 // PredTranslationFailure.
Matt Arsenault2080ecd2013-03-29 18:48:42 +00001327 for (unsigned i = 0, n = PredList.size(); i < n; ++i)
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001328 Visited.erase(PredList[i].first);
1329
Chris Lattnerac323292009-11-27 08:37:22 +00001330 goto PredTranslationFailure;
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001331 }
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001332 }
1333
1334 // Actually process results here; this need to be a separate loop to avoid
1335 // calling getNonLocalPointerDepFromBB for blocks we don't want to return
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001336 // any results for. (getNonLocalPointerDepFromBB will modify our
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001337 // datastructures in ways the code after the PredTranslationFailure label
1338 // doesn't expect.)
Matt Arsenault2080ecd2013-03-29 18:48:42 +00001339 for (unsigned i = 0, n = PredList.size(); i < n; ++i) {
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001340 BasicBlock *Pred = PredList[i].first;
1341 PHITransAddr &PredPointer = PredList[i].second;
1342 Value *PredPtrVal = PredPointer.getAddr();
1343
1344 bool CanTranslate = true;
Chris Lattner2be52e72009-11-27 22:05:15 +00001345 // If PHI translation was unable to find an available pointer in this
1346 // predecessor, then we have to assume that the pointer is clobbered in
1347 // that predecessor. We can still do PRE of the load, which would insert
1348 // a computation of the pointer in this predecessor.
Craig Topper9f008862014-04-15 04:59:12 +00001349 if (!PredPtrVal)
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001350 CanTranslate = false;
1351
1352 // FIXME: it is entirely possible that PHI translating will end up with
1353 // the same value. Consider PHI translating something like:
1354 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need*
1355 // to recurse here, pedantically speaking.
1356
1357 // If getNonLocalPointerDepFromBB fails here, that means the cached
1358 // result conflicted with the Visited list; we have to conservatively
Eli Friedman7d58bc72011-06-15 00:47:34 +00001359 // assume it is unknown, but this also does not block PRE of the load.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001360 if (!CanTranslate ||
Philip Reames32351452015-01-26 18:39:52 +00001361 getNonLocalPointerDepFromBB(QueryInst, PredPointer,
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001362 Loc.getWithNewPtr(PredPtrVal),
1363 isLoad, Pred,
1364 Result, Visited)) {
Chris Lattner9c2053b2009-12-01 07:33:32 +00001365 // Add the entry to the Result list.
Eli Friedman7d58bc72011-06-15 00:47:34 +00001366 NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal);
Chris Lattner9c2053b2009-12-01 07:33:32 +00001367 Result.push_back(Entry);
1368
Chris Lattner25bf6f82009-12-19 21:29:22 +00001369 // Since we had a phi translation failure, the cache for CacheKey won't
1370 // include all of the entries that we need to immediately satisfy future
1371 // queries. Mark this in NonLocalPointerDeps by setting the
1372 // BBSkipFirstBlockPair pointer to null. This requires reuse of the
1373 // cached value to do more work but not miss the phi trans failure.
Dan Gohman23483932010-09-22 21:41:02 +00001374 NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
1375 NLPI.Pair = BBSkipFirstBlockPair();
Chris Lattner2be52e72009-11-27 22:05:15 +00001376 continue;
Chris Lattner2be52e72009-11-27 22:05:15 +00001377 }
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001378 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001379
Chris Lattnerac323292009-11-27 08:37:22 +00001380 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
1381 CacheInfo = &NonLocalPointerDeps[CacheKey];
Dan Gohman23483932010-09-22 21:41:02 +00001382 Cache = &CacheInfo->NonLocalDeps;
Chris Lattnerac323292009-11-27 08:37:22 +00001383 NumSortedEntries = Cache->size();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001384
Chris Lattnerac323292009-11-27 08:37:22 +00001385 // Since we did phi translation, the "Cache" set won't contain all of the
1386 // results for the query. This is ok (we can still use it to accelerate
1387 // specific block queries) but we can't do the fastpath "return all
1388 // results from the set" Clear out the indicator for this.
Dan Gohman23483932010-09-22 21:41:02 +00001389 CacheInfo->Pair = BBSkipFirstBlockPair();
Chris Lattnerac323292009-11-27 08:37:22 +00001390 SkipFirstBlock = false;
1391 continue;
Chris Lattnerc49f5ac2009-11-26 23:18:49 +00001392
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001393 PredTranslationFailure:
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001394 // The following code is "failure"; we can't produce a sane translation
1395 // for the given block. It assumes that we haven't modified any of
1396 // our datastructures while processing the current block.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001397
Craig Topper9f008862014-04-15 04:59:12 +00001398 if (!Cache) {
Chris Lattner3f4591c2009-01-23 07:12:16 +00001399 // Refresh the CacheInfo/Cache pointer if it got invalidated.
1400 CacheInfo = &NonLocalPointerDeps[CacheKey];
Dan Gohman23483932010-09-22 21:41:02 +00001401 Cache = &CacheInfo->NonLocalDeps;
Chris Lattner3f4591c2009-01-23 07:12:16 +00001402 NumSortedEntries = Cache->size();
Chris Lattner3f4591c2009-01-23 07:12:16 +00001403 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001404
Chris Lattner25bf6f82009-12-19 21:29:22 +00001405 // Since we failed phi translation, the "Cache" set won't contain all of the
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001406 // results for the query. This is ok (we can still use it to accelerate
1407 // specific block queries) but we can't do the fastpath "return all
Chris Lattner25bf6f82009-12-19 21:29:22 +00001408 // results from the set". Clear out the indicator for this.
Dan Gohman23483932010-09-22 21:41:02 +00001409 CacheInfo->Pair = BBSkipFirstBlockPair();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001410
Eli Friedman7d58bc72011-06-15 00:47:34 +00001411 // If *nothing* works, mark the pointer as unknown.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001412 //
1413 // If this is the magic first block, return this as a clobber of the whole
1414 // incoming value. Since we can't phi translate to one of the predecessors,
1415 // we have to bail out.
1416 if (SkipFirstBlock)
1417 return true;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001418
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001419 for (NonLocalDepInfo::reverse_iterator I = Cache->rbegin(); ; ++I) {
1420 assert(I != Cache->rend() && "Didn't find current block??");
Chris Lattner0c315472009-12-09 07:08:01 +00001421 if (I->getBB() != BB)
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001422 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001423
Rafael Espindolaa4b2ee42014-12-01 02:55:24 +00001424 assert((I->getResult().isNonLocal() || !DT->isReachableFromEntry(BB)) &&
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001425 "Should only be here with transparent block");
Eli Friedman7d58bc72011-06-15 00:47:34 +00001426 I->setResult(MemDepResult::getUnknown());
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001427 Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(),
1428 Pointer.getAddr()));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001429 break;
Chris Lattner7564a3b2008-12-07 02:56:57 +00001430 }
Chris Lattner2faa2c72008-12-07 02:15:47 +00001431 }
Chris Lattner3f4591c2009-01-23 07:12:16 +00001432
Chris Lattnerf903fe12008-12-09 07:47:11 +00001433 // Okay, we're done now. If we added new values to the cache, re-sort it.
Chris Lattner370aada2009-07-13 17:20:05 +00001434 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
Chris Lattnerf09619d2009-01-22 07:04:01 +00001435 DEBUG(AssertSorted(*Cache));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001436 return false;
Chris Lattnera28355d2008-12-07 08:50:20 +00001437}
1438
1439/// RemoveCachedNonLocalPointerDependencies - If P exists in
1440/// CachedNonLocalPointerInfo, remove it.
1441void MemoryDependenceAnalysis::
1442RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) {
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001443 CachedNonLocalPointerInfo::iterator It =
Chris Lattnera28355d2008-12-07 08:50:20 +00001444 NonLocalPointerDeps.find(P);
1445 if (It == NonLocalPointerDeps.end()) return;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001446
Chris Lattnera28355d2008-12-07 08:50:20 +00001447 // Remove all of the entries in the BB->val map. This involves removing
1448 // instructions from the reverse map.
Dan Gohman23483932010-09-22 21:41:02 +00001449 NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001450
Chris Lattnera28355d2008-12-07 08:50:20 +00001451 for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
Chris Lattner0c315472009-12-09 07:08:01 +00001452 Instruction *Target = PInfo[i].getResult().getInst();
Craig Topper9f008862014-04-15 04:59:12 +00001453 if (!Target) continue; // Ignore non-local dep results.
Chris Lattner0c315472009-12-09 07:08:01 +00001454 assert(Target->getParent() == PInfo[i].getBB());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001455
Chris Lattnera28355d2008-12-07 08:50:20 +00001456 // Eliminating the dirty entry from 'Cache', so update the reverse info.
Chris Lattner8eda11b2009-03-29 00:24:04 +00001457 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P);
Chris Lattnera28355d2008-12-07 08:50:20 +00001458 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001459
Chris Lattnera28355d2008-12-07 08:50:20 +00001460 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
1461 NonLocalPointerDeps.erase(It);
Chris Lattner2faa2c72008-12-07 02:15:47 +00001462}
1463
1464
Chris Lattnerfa9f99a2008-12-09 22:06:23 +00001465/// invalidateCachedPointerInfo - This method is used to invalidate cached
1466/// information about the specified pointer, because it may be too
1467/// conservative in memdep. This is an optional call that can be used when
1468/// the client detects an equivalence between the pointer and some other
1469/// value and replaces the other value with ptr. This can make Ptr available
1470/// in more places that cached info does not necessarily keep.
1471void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) {
1472 // If Ptr isn't really a pointer, just ignore it.
Duncan Sands19d0b472010-02-16 11:11:14 +00001473 if (!Ptr->getType()->isPointerTy()) return;
Chris Lattnerfa9f99a2008-12-09 22:06:23 +00001474 // Flush store info for the pointer.
1475 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
1476 // Flush load info for the pointer.
1477 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
1478}
1479
Bob Wilson92cdb6e2010-02-16 19:51:59 +00001480/// invalidateCachedPredecessors - Clear the PredIteratorCache info.
1481/// This needs to be done when the CFG changes, e.g., due to splitting
1482/// critical edges.
1483void MemoryDependenceAnalysis::invalidateCachedPredecessors() {
1484 PredCache->clear();
1485}
1486
Owen Andersonc0daf5f2007-07-06 23:14:35 +00001487/// removeInstruction - Remove an instruction from the dependence analysis,
1488/// updating the dependence of instructions that previously depended on it.
Owen Anderson2b21c3c2007-08-08 22:26:03 +00001489/// This method attempts to keep the cache coherent using the reverse map.
Chris Lattnera25d39522008-11-28 22:04:47 +00001490void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) {
Chris Lattnera25d39522008-11-28 22:04:47 +00001491 // Walk through the Non-local dependencies, removing this one as the value
1492 // for any cached queries.
Chris Lattner1b810bd2008-11-30 02:28:25 +00001493 NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
1494 if (NLDI != NonLocalDeps.end()) {
Chris Lattner7e61daf2008-12-01 01:15:42 +00001495 NonLocalDepInfo &BlockMap = NLDI->second.first;
Chris Lattnerfc678e22008-11-30 02:30:50 +00001496 for (NonLocalDepInfo::iterator DI = BlockMap.begin(), DE = BlockMap.end();
1497 DI != DE; ++DI)
Chris Lattner0c315472009-12-09 07:08:01 +00001498 if (Instruction *Inst = DI->getResult().getInst())
Chris Lattnerde4440c2008-12-07 18:39:13 +00001499 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
Chris Lattner1b810bd2008-11-30 02:28:25 +00001500 NonLocalDeps.erase(NLDI);
1501 }
Owen Anderson086b2c42007-12-08 01:37:09 +00001502
Chris Lattnera25d39522008-11-28 22:04:47 +00001503 // If we have a cached local dependence query for this instruction, remove it.
Chris Lattner73c25452008-11-28 22:28:27 +00001504 //
Chris Lattnerde04e112008-11-29 01:43:36 +00001505 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
1506 if (LocalDepEntry != LocalDeps.end()) {
Chris Lattnerada1f872008-11-30 01:09:30 +00001507 // Remove us from DepInst's reverse set now that the local dep info is gone.
Chris Lattnerde4440c2008-12-07 18:39:13 +00001508 if (Instruction *Inst = LocalDepEntry->second.getInst())
1509 RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst);
Chris Lattnerada1f872008-11-30 01:09:30 +00001510
Chris Lattner73c25452008-11-28 22:28:27 +00001511 // Remove this local dependency info.
Chris Lattnerde04e112008-11-29 01:43:36 +00001512 LocalDeps.erase(LocalDepEntry);
Chris Lattnera28355d2008-12-07 08:50:20 +00001513 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001514
Chris Lattnera28355d2008-12-07 08:50:20 +00001515 // If we have any cached pointer dependencies on this instruction, remove
1516 // them. If the instruction has non-pointer type, then it can't be a pointer
1517 // base.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001518
Chris Lattnera28355d2008-12-07 08:50:20 +00001519 // Remove it from both the load info and the store info. The instruction
1520 // can't be in either of these maps if it is non-pointer.
Duncan Sands19d0b472010-02-16 11:11:14 +00001521 if (RemInst->getType()->isPointerTy()) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001522 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
1523 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
1524 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001525
Chris Lattnerd3d91112008-11-28 22:51:08 +00001526 // Loop over all of the things that depend on the instruction we're removing.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001527 //
Chris Lattner63bd5862008-11-29 23:30:39 +00001528 SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd;
Chris Lattner82b70342008-12-07 18:42:51 +00001529
1530 // If we find RemInst as a clobber or Def in any of the maps for other values,
1531 // we need to replace its entry with a dirty version of the instruction after
1532 // it. If RemInst is a terminator, we use a null dirty value.
1533 //
1534 // Using a dirty version of the instruction after RemInst saves having to scan
1535 // the entire block to get to this point.
1536 MemDepResult NewDirtyVal;
1537 if (!RemInst->isTerminator())
1538 NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001539
Chris Lattner9f1988ab2008-11-29 09:20:15 +00001540 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
1541 if (ReverseDepIt != ReverseLocalDeps.end()) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001542 // RemInst can't be the terminator if it has local stuff depending on it.
Craig Topper46276792014-08-24 23:23:06 +00001543 assert(!ReverseDepIt->second.empty() && !isa<TerminatorInst>(RemInst) &&
Chris Lattnerada1f872008-11-30 01:09:30 +00001544 "Nothing can locally depend on a terminator");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001545
Craig Topper46276792014-08-24 23:23:06 +00001546 for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) {
Chris Lattner1b810bd2008-11-30 02:28:25 +00001547 assert(InstDependingOnRemInst != RemInst &&
1548 "Already removed our local dep info");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001549
Chris Lattner82b70342008-12-07 18:42:51 +00001550 LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001551
Chris Lattnerada1f872008-11-30 01:09:30 +00001552 // Make sure to remember that new things depend on NewDepInst.
Chris Lattner82b70342008-12-07 18:42:51 +00001553 assert(NewDirtyVal.getInst() && "There is no way something else can have "
1554 "a local dep on this if it is a terminator!");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001555 ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(),
Chris Lattnerada1f872008-11-30 01:09:30 +00001556 InstDependingOnRemInst));
Chris Lattnerd3d91112008-11-28 22:51:08 +00001557 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001558
Chris Lattner63bd5862008-11-29 23:30:39 +00001559 ReverseLocalDeps.erase(ReverseDepIt);
1560
1561 // Add new reverse deps after scanning the set, to avoid invalidating the
1562 // 'ReverseDeps' reference.
1563 while (!ReverseDepsToAdd.empty()) {
1564 ReverseLocalDeps[ReverseDepsToAdd.back().first]
1565 .insert(ReverseDepsToAdd.back().second);
1566 ReverseDepsToAdd.pop_back();
1567 }
Owen Andersonc0daf5f2007-07-06 23:14:35 +00001568 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001569
Chris Lattner9f1988ab2008-11-29 09:20:15 +00001570 ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
1571 if (ReverseDepIt != ReverseNonLocalDeps.end()) {
Craig Topper46276792014-08-24 23:23:06 +00001572 for (Instruction *I : ReverseDepIt->second) {
1573 assert(I != RemInst && "Already removed NonLocalDep info for RemInst");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001574
Craig Topper46276792014-08-24 23:23:06 +00001575 PerInstNLInfo &INLD = NonLocalDeps[I];
Chris Lattner44104272008-11-30 02:52:26 +00001576 // The information is now dirty!
Chris Lattner7e61daf2008-12-01 01:15:42 +00001577 INLD.second = true;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001578
1579 for (NonLocalDepInfo::iterator DI = INLD.first.begin(),
Chris Lattner7e61daf2008-12-01 01:15:42 +00001580 DE = INLD.first.end(); DI != DE; ++DI) {
Chris Lattner0c315472009-12-09 07:08:01 +00001581 if (DI->getResult().getInst() != RemInst) continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001582
Chris Lattner1b810bd2008-11-30 02:28:25 +00001583 // Convert to a dirty entry for the subsequent instruction.
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001584 DI->setResult(NewDirtyVal);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001585
Chris Lattner82b70342008-12-07 18:42:51 +00001586 if (Instruction *NextI = NewDirtyVal.getInst())
Craig Topper46276792014-08-24 23:23:06 +00001587 ReverseDepsToAdd.push_back(std::make_pair(NextI, I));
Chris Lattner1b810bd2008-11-30 02:28:25 +00001588 }
1589 }
Chris Lattner63bd5862008-11-29 23:30:39 +00001590
1591 ReverseNonLocalDeps.erase(ReverseDepIt);
1592
Chris Lattnere7d7e132008-11-29 22:02:15 +00001593 // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
1594 while (!ReverseDepsToAdd.empty()) {
1595 ReverseNonLocalDeps[ReverseDepsToAdd.back().first]
1596 .insert(ReverseDepsToAdd.back().second);
1597 ReverseDepsToAdd.pop_back();
1598 }
Owen Anderson5f208be2007-08-16 21:27:05 +00001599 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001600
Chris Lattnera28355d2008-12-07 08:50:20 +00001601 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
1602 // value in the NonLocalPointerDeps info.
1603 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
1604 ReverseNonLocalPtrDeps.find(RemInst);
1605 if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001606 SmallVector<std::pair<Instruction*, ValueIsLoadPair>,8> ReversePtrDepsToAdd;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001607
Craig Topper46276792014-08-24 23:23:06 +00001608 for (ValueIsLoadPair P : ReversePtrDepIt->second) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001609 assert(P.getPointer() != RemInst &&
1610 "Already removed NonLocalPointerDeps info for RemInst");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001611
Dan Gohman23483932010-09-22 21:41:02 +00001612 NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001613
Chris Lattner5ed409e2008-12-08 07:31:50 +00001614 // The cache is not valid for any specific block anymore.
Dan Gohman23483932010-09-22 21:41:02 +00001615 NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001616
Chris Lattnera28355d2008-12-07 08:50:20 +00001617 // Update any entries for RemInst to use the instruction after it.
1618 for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end();
1619 DI != DE; ++DI) {
Chris Lattner0c315472009-12-09 07:08:01 +00001620 if (DI->getResult().getInst() != RemInst) continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001621
Chris Lattnera28355d2008-12-07 08:50:20 +00001622 // Convert to a dirty entry for the subsequent instruction.
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001623 DI->setResult(NewDirtyVal);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001624
Chris Lattnera28355d2008-12-07 08:50:20 +00001625 if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
1626 ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
1627 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001628
Chris Lattner3f4591c2009-01-23 07:12:16 +00001629 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its
1630 // subsequent value may invalidate the sortedness.
1631 std::sort(NLPDI.begin(), NLPDI.end());
Chris Lattnera28355d2008-12-07 08:50:20 +00001632 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001633
Chris Lattnera28355d2008-12-07 08:50:20 +00001634 ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001635
Chris Lattnera28355d2008-12-07 08:50:20 +00001636 while (!ReversePtrDepsToAdd.empty()) {
1637 ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first]
Chris Lattner8eda11b2009-03-29 00:24:04 +00001638 .insert(ReversePtrDepsToAdd.back().second);
Chris Lattnera28355d2008-12-07 08:50:20 +00001639 ReversePtrDepsToAdd.pop_back();
1640 }
1641 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001642
1643
Chris Lattner1b810bd2008-11-30 02:28:25 +00001644 assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
Chris Lattner13cae612008-11-30 19:24:31 +00001645 AA->deleteValue(RemInst);
Jakob Stoklund Olesen087f2072011-01-11 04:05:39 +00001646 DEBUG(verifyRemoved(RemInst));
Owen Andersonc0daf5f2007-07-06 23:14:35 +00001647}
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001648/// verifyRemoved - Verify that the specified instruction does not occur
Craig Topper46276792014-08-24 23:23:06 +00001649/// in our internal data structures. This function verifies by asserting in
1650/// debug builds.
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001651void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const {
Craig Topper46276792014-08-24 23:23:06 +00001652#ifndef NDEBUG
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001653 for (LocalDepMapType::const_iterator I = LocalDeps.begin(),
1654 E = LocalDeps.end(); I != E; ++I) {
1655 assert(I->first != D && "Inst occurs in data structures");
Chris Lattner47e81d02008-11-30 23:17:19 +00001656 assert(I->second.getInst() != D &&
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001657 "Inst occurs in data structures");
1658 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001659
Chris Lattnera28355d2008-12-07 08:50:20 +00001660 for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(),
1661 E = NonLocalPointerDeps.end(); I != E; ++I) {
1662 assert(I->first.getPointer() != D && "Inst occurs in NLPD map key");
Dan Gohman23483932010-09-22 21:41:02 +00001663 const NonLocalDepInfo &Val = I->second.NonLocalDeps;
Chris Lattnera28355d2008-12-07 08:50:20 +00001664 for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end();
1665 II != E; ++II)
Chris Lattner0c315472009-12-09 07:08:01 +00001666 assert(II->getResult().getInst() != D && "Inst occurs as NLPD value");
Chris Lattnera28355d2008-12-07 08:50:20 +00001667 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001668
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001669 for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(),
1670 E = NonLocalDeps.end(); I != E; ++I) {
1671 assert(I->first != D && "Inst occurs in data structures");
Chris Lattner44104272008-11-30 02:52:26 +00001672 const PerInstNLInfo &INLD = I->second;
Chris Lattner7e61daf2008-12-01 01:15:42 +00001673 for (NonLocalDepInfo::const_iterator II = INLD.first.begin(),
1674 EE = INLD.first.end(); II != EE; ++II)
Chris Lattner0c315472009-12-09 07:08:01 +00001675 assert(II->getResult().getInst() != D && "Inst occurs in data structures");
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001676 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001677
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001678 for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(),
Chris Lattner1b810bd2008-11-30 02:28:25 +00001679 E = ReverseLocalDeps.end(); I != E; ++I) {
1680 assert(I->first != D && "Inst occurs in data structures");
Craig Topper46276792014-08-24 23:23:06 +00001681 for (Instruction *Inst : I->second)
1682 assert(Inst != D && "Inst occurs in data structures");
Chris Lattner1b810bd2008-11-30 02:28:25 +00001683 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001684
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001685 for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(),
1686 E = ReverseNonLocalDeps.end();
Chris Lattner1b810bd2008-11-30 02:28:25 +00001687 I != E; ++I) {
1688 assert(I->first != D && "Inst occurs in data structures");
Craig Topper46276792014-08-24 23:23:06 +00001689 for (Instruction *Inst : I->second)
1690 assert(Inst != D && "Inst occurs in data structures");
Chris Lattner1b810bd2008-11-30 02:28:25 +00001691 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001692
Chris Lattnera28355d2008-12-07 08:50:20 +00001693 for (ReverseNonLocalPtrDepTy::const_iterator
1694 I = ReverseNonLocalPtrDeps.begin(),
1695 E = ReverseNonLocalPtrDeps.end(); I != E; ++I) {
1696 assert(I->first != D && "Inst occurs in rev NLPD map");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001697
Craig Topper46276792014-08-24 23:23:06 +00001698 for (ValueIsLoadPair P : I->second)
1699 assert(P != ValueIsLoadPair(D, false) &&
1700 P != ValueIsLoadPair(D, true) &&
Chris Lattnera28355d2008-12-07 08:50:20 +00001701 "Inst occurs in ReverseNonLocalPtrDeps map");
1702 }
Craig Topper46276792014-08-24 23:23:06 +00001703#endif
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001704}