blob: acdf14733b0c1131349e3bd8df64e885a5976d7b [file] [log] [blame]
Nick Lewycky7ed1dbf2013-06-10 23:10:59 +00001//===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===//
Owen Andersonc0daf5f2007-07-06 23:14:35 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattnerf3ebc3f2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Owen Andersonc0daf5f2007-07-06 23:14:35 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file implements an analysis that determines, for a given memory
Jakub Staszakb0a7eed2013-03-20 21:47:51 +000011// operation, what preceding memory operations it depends on. It builds on
Owen Andersonfa788352007-08-08 22:01:54 +000012// alias analysis information, and tries to provide a lazy, caching interface to
Owen Andersonc0daf5f2007-07-06 23:14:35 +000013// a common kind of alias information query.
14//
15//===----------------------------------------------------------------------===//
16
17#include "llvm/Analysis/MemoryDependenceAnalysis.h"
Eugene Zelenko1804a772016-08-25 00:45:04 +000018#include "llvm/ADT/SmallSet.h"
19#include "llvm/ADT/SmallVector.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000020#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/Statistic.h"
Owen Andersonc0daf5f2007-07-06 23:14:35 +000022#include "llvm/Analysis/AliasAnalysis.h"
Daniel Jasperaec2fa32016-12-19 08:22:17 +000023#include "llvm/Analysis/AssumptionCache.h"
Victor Hernandezf390e042009-10-27 20:05:49 +000024#include "llvm/Analysis/MemoryBuiltins.h"
Chris Lattner972e6d82009-12-09 01:59:31 +000025#include "llvm/Analysis/PHITransAddr.h"
Bruno Cardoso Lopesdfc1d962015-07-31 14:31:35 +000026#include "llvm/Analysis/OrderedBasicBlock.h"
Dan Gohmana4fcd242010-12-15 20:02:24 +000027#include "llvm/Analysis/ValueTracking.h"
Chandler Carruthd06034d2015-08-12 17:47:44 +000028#include "llvm/Analysis/TargetLibraryInfo.h"
Eugene Zelenko1804a772016-08-25 00:45:04 +000029#include "llvm/IR/CallSite.h"
30#include "llvm/IR/Constants.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000031#include "llvm/IR/DataLayout.h"
Eugene Zelenko1804a772016-08-25 00:45:04 +000032#include "llvm/IR/DerivedTypes.h"
Chandler Carruth5ad5f152014-01-13 09:26:24 +000033#include "llvm/IR/Dominators.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000034#include "llvm/IR/Function.h"
Eugene Zelenko1804a772016-08-25 00:45:04 +000035#include "llvm/IR/Instruction.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000036#include "llvm/IR/Instructions.h"
37#include "llvm/IR/IntrinsicInst.h"
38#include "llvm/IR/LLVMContext.h"
Chandler Carruthaa0ab632014-03-04 12:09:19 +000039#include "llvm/IR/PredIteratorCache.h"
Eugene Zelenko1804a772016-08-25 00:45:04 +000040#include "llvm/Support/AtomicOrdering.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/CommandLine.h"
43#include "llvm/Support/Compiler.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000044#include "llvm/Support/Debug.h"
Eugene Zelenko1804a772016-08-25 00:45:04 +000045#include "llvm/Support/MathExtras.h"
46#include <algorithm>
47#include <cassert>
48#include <iterator>
49
Owen Andersonc0daf5f2007-07-06 23:14:35 +000050using namespace llvm;
51
Chandler Carruthf1221bd2014-04-22 02:48:03 +000052#define DEBUG_TYPE "memdep"
53
Chris Lattner7e61daf2008-12-01 01:15:42 +000054STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
55STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
Chris Lattnere7d7e132008-11-29 22:02:15 +000056STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
Chris Lattnera28355d2008-12-07 08:50:20 +000057
58STATISTIC(NumCacheNonLocalPtr,
59 "Number of fully cached non-local ptr responses");
60STATISTIC(NumCacheDirtyNonLocalPtr,
61 "Number of cached, but dirty, non-local ptr responses");
Chandler Carruth60fb1b42016-03-07 10:19:30 +000062STATISTIC(NumUncacheNonLocalPtr, "Number of uncached non-local ptr responses");
Chris Lattner5ed409e2008-12-08 07:31:50 +000063STATISTIC(NumCacheCompleteNonLocalPtr,
64 "Number of block queries that were completely cached");
Chris Lattnera28355d2008-12-07 08:50:20 +000065
Eli Friedman8b098b02011-06-15 23:59:25 +000066// Limit for the number of instructions to scan in a block.
Jingyue Wud058ea92015-07-21 21:50:39 +000067
68static cl::opt<unsigned> BlockScanLimit(
69 "memdep-block-scan-limit", cl::Hidden, cl::init(100),
70 cl::desc("The number of instructions to scan in a block in memory "
71 "dependency analysis (default = 100)"));
Eli Friedman8b098b02011-06-15 23:59:25 +000072
Chandler Carruth60fb1b42016-03-07 10:19:30 +000073static cl::opt<unsigned>
74 BlockNumberLimit("memdep-block-number-limit", cl::Hidden, cl::init(1000),
75 cl::desc("The number of blocks to scan during memory "
76 "dependency analysis (default = 1000)"));
Joerg Sonnenberger36894dc2016-02-20 11:24:44 +000077
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +000078// Limit on the number of memdep results to process.
Aaron Ballman254dd7e2014-10-02 13:17:11 +000079static const unsigned int NumResultsLimit = 100;
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +000080
Chandler Carruth40e21f22016-03-07 12:30:06 +000081/// This is a helper function that removes Val from 'Inst's set in ReverseMap.
82///
83/// If the set becomes empty, remove Inst's entry.
Chris Lattnerde4440c2008-12-07 18:39:13 +000084template <typename KeyTy>
Chandler Carruth60fb1b42016-03-07 10:19:30 +000085static void
86RemoveFromReverseMap(DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>> &ReverseMap,
87 Instruction *Inst, KeyTy Val) {
88 typename DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>>::iterator InstIt =
89 ReverseMap.find(Inst);
Chris Lattnerde4440c2008-12-07 18:39:13 +000090 assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
91 bool Found = InstIt->second.erase(Val);
Chandler Carruth60fb1b42016-03-07 10:19:30 +000092 assert(Found && "Invalid reverse map!");
93 (void)Found;
Chris Lattnerde4440c2008-12-07 18:39:13 +000094 if (InstIt->second.empty())
95 ReverseMap.erase(InstIt);
96}
97
Chandler Carruth40e21f22016-03-07 12:30:06 +000098/// If the given instruction references a specific memory location, fill in Loc
99/// with the details, otherwise set Loc.Ptr to null.
100///
101/// Returns a ModRefInfo value describing the general behavior of the
Dan Gohman1d760ce2010-11-10 21:51:35 +0000102/// instruction.
Chandler Carruth194f59c2015-07-22 23:15:57 +0000103static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
Chandler Carruthd06034d2015-08-12 17:47:44 +0000104 const TargetLibraryInfo &TLI) {
Dan Gohman1d760ce2010-11-10 21:51:35 +0000105 if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000106 if (LI->isUnordered()) {
Chandler Carruth70c61c12015-06-04 02:03:15 +0000107 Loc = MemoryLocation::get(LI);
Chandler Carruth194f59c2015-07-22 23:15:57 +0000108 return MRI_Ref;
Jakub Staszakfa41def2013-03-20 23:53:45 +0000109 }
JF Bastien800f87a2016-04-06 21:19:33 +0000110 if (LI->getOrdering() == AtomicOrdering::Monotonic) {
Chandler Carruth70c61c12015-06-04 02:03:15 +0000111 Loc = MemoryLocation::get(LI);
Chandler Carruth194f59c2015-07-22 23:15:57 +0000112 return MRI_ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000113 }
Chandler Carruthac80dc72015-06-17 07:18:54 +0000114 Loc = MemoryLocation();
Chandler Carruth194f59c2015-07-22 23:15:57 +0000115 return MRI_ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000116 }
117
118 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000119 if (SI->isUnordered()) {
Chandler Carruth70c61c12015-06-04 02:03:15 +0000120 Loc = MemoryLocation::get(SI);
Chandler Carruth194f59c2015-07-22 23:15:57 +0000121 return MRI_Mod;
Jakub Staszakfa41def2013-03-20 23:53:45 +0000122 }
JF Bastien800f87a2016-04-06 21:19:33 +0000123 if (SI->getOrdering() == AtomicOrdering::Monotonic) {
Chandler Carruth70c61c12015-06-04 02:03:15 +0000124 Loc = MemoryLocation::get(SI);
Chandler Carruth194f59c2015-07-22 23:15:57 +0000125 return MRI_ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000126 }
Chandler Carruthac80dc72015-06-17 07:18:54 +0000127 Loc = MemoryLocation();
Chandler Carruth194f59c2015-07-22 23:15:57 +0000128 return MRI_ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000129 }
130
131 if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
Chandler Carruth70c61c12015-06-04 02:03:15 +0000132 Loc = MemoryLocation::get(V);
Chandler Carruth194f59c2015-07-22 23:15:57 +0000133 return MRI_ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000134 }
135
Chandler Carruthd06034d2015-08-12 17:47:44 +0000136 if (const CallInst *CI = isFreeCall(Inst, &TLI)) {
Dan Gohman1d760ce2010-11-10 21:51:35 +0000137 // calls to free() deallocate the entire structure
Chandler Carruthac80dc72015-06-17 07:18:54 +0000138 Loc = MemoryLocation(CI->getArgOperand(0));
Chandler Carruth194f59c2015-07-22 23:15:57 +0000139 return MRI_Mod;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000140 }
141
Hal Finkelcc39b672014-07-24 12:16:19 +0000142 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
143 AAMDNodes AAInfo;
144
Dan Gohman1d760ce2010-11-10 21:51:35 +0000145 switch (II->getIntrinsicID()) {
146 case Intrinsic::lifetime_start:
147 case Intrinsic::lifetime_end:
148 case Intrinsic::invariant_start:
Hal Finkelcc39b672014-07-24 12:16:19 +0000149 II->getAAMetadata(AAInfo);
Chandler Carruthac80dc72015-06-17 07:18:54 +0000150 Loc = MemoryLocation(
151 II->getArgOperand(1),
152 cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(), AAInfo);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000153 // These intrinsics don't really modify the memory, but returning Mod
154 // will allow them to be handled conservatively.
Chandler Carruth194f59c2015-07-22 23:15:57 +0000155 return MRI_Mod;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000156 case Intrinsic::invariant_end:
Hal Finkelcc39b672014-07-24 12:16:19 +0000157 II->getAAMetadata(AAInfo);
Chandler Carruthac80dc72015-06-17 07:18:54 +0000158 Loc = MemoryLocation(
159 II->getArgOperand(2),
160 cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(), AAInfo);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000161 // These intrinsics don't really modify the memory, but returning Mod
162 // will allow them to be handled conservatively.
Chandler Carruth194f59c2015-07-22 23:15:57 +0000163 return MRI_Mod;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000164 default:
165 break;
166 }
Hal Finkelcc39b672014-07-24 12:16:19 +0000167 }
Dan Gohman1d760ce2010-11-10 21:51:35 +0000168
169 // Otherwise, just do the coarse-grained thing that always works.
170 if (Inst->mayWriteToMemory())
Chandler Carruth194f59c2015-07-22 23:15:57 +0000171 return MRI_ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000172 if (Inst->mayReadFromMemory())
Chandler Carruth194f59c2015-07-22 23:15:57 +0000173 return MRI_Ref;
174 return MRI_NoModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000175}
Chris Lattner7e61daf2008-12-01 01:15:42 +0000176
Chandler Carruth40e21f22016-03-07 12:30:06 +0000177/// Private helper for finding the local dependencies of a call site.
Chandler Carruth61440d22016-03-10 00:55:30 +0000178MemDepResult MemoryDependenceResults::getCallSiteDependencyFrom(
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000179 CallSite CS, bool isReadOnlyCall, BasicBlock::iterator ScanIt,
180 BasicBlock *BB) {
Eli Friedman8b098b02011-06-15 23:59:25 +0000181 unsigned Limit = BlockScanLimit;
182
Henric Karlsson54a53bd2016-10-06 10:58:41 +0000183 // Walk backwards through the block, looking for dependencies.
Chris Lattner51ba8d02008-11-29 03:47:00 +0000184 while (ScanIt != BB->begin()) {
Eli Friedman8b098b02011-06-15 23:59:25 +0000185 // Limit the amount of scanning we do so we don't end up with quadratic
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000186 // running time on extreme testcases.
Eli Friedman8b098b02011-06-15 23:59:25 +0000187 --Limit;
188 if (!Limit)
189 return MemDepResult::getUnknown();
190
Duncan P. N. Exon Smith5a82c912015-10-10 00:53:03 +0000191 Instruction *Inst = &*--ScanIt;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000192
Owen Anderson9c884572007-07-10 17:59:22 +0000193 // If this inst is a memory op, get the pointer it accessed
Chandler Carruthac80dc72015-06-17 07:18:54 +0000194 MemoryLocation Loc;
Chandler Carruth61440d22016-03-10 00:55:30 +0000195 ModRefInfo MR = GetLocation(Inst, Loc, TLI);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000196 if (Loc.Ptr) {
197 // A simple instruction.
Chandler Carruth61440d22016-03-10 00:55:30 +0000198 if (AA.getModRefInfo(CS, Loc) != MRI_NoModRef)
Dan Gohman1d760ce2010-11-10 21:51:35 +0000199 return MemDepResult::getClobber(Inst);
200 continue;
201 }
202
Benjamin Kramer3a09ef62015-04-10 14:50:08 +0000203 if (auto InstCS = CallSite(Inst)) {
Owen Andersonf9a9cf92009-03-09 05:12:38 +0000204 // Debug intrinsics don't cause dependences.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000205 if (isa<DbgInfoIntrinsic>(Inst))
206 continue;
Chris Lattner0e3d6332008-12-05 21:04:20 +0000207 // If these two calls do not interfere, look past it.
Chandler Carruth61440d22016-03-10 00:55:30 +0000208 switch (AA.getModRefInfo(CS, InstCS)) {
Chandler Carruth194f59c2015-07-22 23:15:57 +0000209 case MRI_NoModRef:
Dan Gohman26ef7c72010-08-05 22:09:15 +0000210 // If the two calls are the same, return InstCS as a Def, so that
211 // CS can be found redundant and eliminated.
Chandler Carruth194f59c2015-07-22 23:15:57 +0000212 if (isReadOnlyCall && !(MR & MRI_Mod) &&
Dan Gohman26ef7c72010-08-05 22:09:15 +0000213 CS.getInstruction()->isIdenticalToWhenDefined(Inst))
214 return MemDepResult::getDef(Inst);
215
216 // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
217 // keep scanning.
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000218 continue;
Chris Lattner702e46e2008-12-09 21:19:42 +0000219 default:
Chris Lattner0e3d6332008-12-05 21:04:20 +0000220 return MemDepResult::getClobber(Inst);
Chris Lattner702e46e2008-12-09 21:19:42 +0000221 }
Chris Lattnerff862c42008-11-30 01:44:00 +0000222 }
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000223
224 // If we could not obtain a pointer for the instruction and the instruction
225 // touches memory then assume that this is a dependency.
Chandler Carruth194f59c2015-07-22 23:15:57 +0000226 if (MR != MRI_NoModRef)
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000227 return MemDepResult::getClobber(Inst);
Owen Anderson9c884572007-07-10 17:59:22 +0000228 }
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000229
Eli Friedman7d58bc72011-06-15 00:47:34 +0000230 // No dependence found. If this is the entry block of the function, it is
231 // unknown, otherwise it is non-local.
Chris Lattner2faa2c72008-12-07 02:15:47 +0000232 if (BB != &BB->getParent()->getEntryBlock())
233 return MemDepResult::getNonLocal();
Eli Friedmanc1702c82011-10-13 22:14:57 +0000234 return MemDepResult::getNonFuncLocal();
Owen Anderson9c884572007-07-10 17:59:22 +0000235}
236
Chandler Carruth61440d22016-03-10 00:55:30 +0000237unsigned MemoryDependenceResults::getLoadLoadClobberFullWidthSize(
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000238 const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize,
239 const LoadInst *LI) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000240 // We can only extend simple integer loads.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000241 if (!isa<IntegerType>(LI->getType()) || !LI->isSimple())
242 return 0;
Kostya Serebryany3838f272013-02-13 05:59:45 +0000243
244 // Load widening is hostile to ThreadSanitizer: it may cause false positives
245 // or make the reports more cryptic (access sizes are wrong).
Duncan P. N. Exon Smithb3fc83c2015-02-14 00:12:15 +0000246 if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
Kostya Serebryany3838f272013-02-13 05:59:45 +0000247 return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000248
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000249 const DataLayout &DL = LI->getModule()->getDataLayout();
250
Chris Lattner7aab2792011-04-26 22:42:01 +0000251 // Get the base of this load.
252 int64_t LIOffs = 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000253 const Value *LIBase =
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000254 GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, DL);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000255
Chris Lattner7aab2792011-04-26 22:42:01 +0000256 // If the two pointers are not based on the same pointer, we can't tell that
257 // they are related.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000258 if (LIBase != MemLocBase)
259 return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000260
Chris Lattner7aab2792011-04-26 22:42:01 +0000261 // Okay, the two values are based on the same pointer, but returned as
262 // no-alias. This happens when we have things like two byte loads at "P+1"
263 // and "P+3". Check to see if increasing the size of the "LI" load up to its
264 // alignment (or the largest native integer type) will allow us to load all
265 // the bits required by MemLoc.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000266
Chris Lattner7aab2792011-04-26 22:42:01 +0000267 // If MemLoc is before LI, then no widening of LI will help us out.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000268 if (MemLocOffs < LIOffs)
269 return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000270
Chris Lattner7aab2792011-04-26 22:42:01 +0000271 // Get the alignment of the load in bytes. We assume that it is safe to load
272 // any legal integer up to this size without a problem. For example, if we're
273 // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can
274 // widen it up to an i32 load. If it is known 2-byte aligned, we can widen it
275 // to i16.
276 unsigned LoadAlign = LI->getAlignment();
277
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000278 int64_t MemLocEnd = MemLocOffs + MemLocSize;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000279
Chris Lattner7aab2792011-04-26 22:42:01 +0000280 // If no amount of rounding up will let MemLoc fit into LI, then bail out.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000281 if (LIOffs + LoadAlign < MemLocEnd)
282 return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000283
Chris Lattner7aab2792011-04-26 22:42:01 +0000284 // This is the size of the load to try. Start with the next larger power of
285 // two.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000286 unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits() / 8U;
Chris Lattner7aab2792011-04-26 22:42:01 +0000287 NewLoadByteSize = NextPowerOf2(NewLoadByteSize);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000288
Eugene Zelenko1804a772016-08-25 00:45:04 +0000289 while (true) {
Chris Lattner7aab2792011-04-26 22:42:01 +0000290 // If this load size is bigger than our known alignment or would not fit
291 // into a native integer register, then we fail.
292 if (NewLoadByteSize > LoadAlign ||
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000293 !DL.fitsInLegalInteger(NewLoadByteSize * 8))
Chris Lattner827a2702011-04-28 07:29:08 +0000294 return 0;
Chris Lattner7aab2792011-04-26 22:42:01 +0000295
Duncan P. N. Exon Smithb3fc83c2015-02-14 00:12:15 +0000296 if (LIOffs + NewLoadByteSize > MemLocEnd &&
297 LI->getParent()->getParent()->hasFnAttribute(
298 Attribute::SanitizeAddress))
Kostya Serebryany9e0d3772012-02-06 22:48:56 +0000299 // We will be reading past the location accessed by the original program.
300 // While this is safe in a regular build, Address Safety analysis tools
301 // may start reporting false warnings. So, don't do widening.
302 return 0;
Kostya Serebryany9e0d3772012-02-06 22:48:56 +0000303
Chris Lattner7aab2792011-04-26 22:42:01 +0000304 // If a load of this width would include all of MemLoc, then we succeed.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000305 if (LIOffs + NewLoadByteSize >= MemLocEnd)
Chris Lattner827a2702011-04-28 07:29:08 +0000306 return NewLoadByteSize;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000307
Chris Lattner7aab2792011-04-26 22:42:01 +0000308 NewLoadByteSize <<= 1;
309 }
Chris Lattner7aab2792011-04-26 22:42:01 +0000310}
311
Philip Reamesa7ad6a52015-01-26 18:54:27 +0000312static bool isVolatile(Instruction *Inst) {
313 if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
314 return LI->isVolatile();
315 else if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
316 return SI->isVolatile();
317 else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst))
318 return AI->isVolatile();
319 return false;
320}
321
Chandler Carruth61440d22016-03-10 00:55:30 +0000322MemDepResult MemoryDependenceResults::getPointerDependencyFrom(
Chandler Carruthac80dc72015-06-17 07:18:54 +0000323 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
Bob Haarman3db17642016-08-26 16:34:27 +0000324 BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) {
Chris Lattner2faa2c72008-12-07 02:15:47 +0000325
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000326 if (QueryInst != nullptr) {
327 if (auto *LI = dyn_cast<LoadInst>(QueryInst)) {
Piotr Padlewskie41beed2017-01-11 16:23:54 +0000328 MemDepResult InvariantGroupDependency =
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000329 getInvariantGroupPointerDependency(LI, BB);
330
Piotr Padlewskie41beed2017-01-11 16:23:54 +0000331 if (InvariantGroupDependency.isDef())
332 return InvariantGroupDependency;
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000333 }
334 }
Bob Haarman3db17642016-08-26 16:34:27 +0000335 return getSimplePointerDependencyFrom(MemLoc, isLoad, ScanIt, BB, QueryInst,
336 Limit);
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000337}
338
339MemDepResult
Chandler Carruth61440d22016-03-10 00:55:30 +0000340MemoryDependenceResults::getInvariantGroupPointerDependency(LoadInst *LI,
Piotr Padlewski01659cb2016-11-08 18:20:51 +0000341 BasicBlock *BB) {
Piotr Padlewskida362152016-12-30 18:45:07 +0000342
343 auto *InvariantGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group);
344 if (!InvariantGroupMD)
345 return MemDepResult::getUnknown();
346
Piotr Padlewski09ad6782017-01-08 22:26:06 +0000347 // Take the ptr operand after all casts and geps 0. This way we can search
348 // cast graph down only.
349 Value *LoadOperand = LI->getPointerOperand()->stripPointerCasts();
350
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000351 // It's is not safe to walk the use list of global value, because function
352 // passes aren't allowed to look outside their functions.
Piotr Padlewski09ad6782017-01-08 22:26:06 +0000353 // FIXME: this could be fixed by filtering instructions from outside
354 // of current function.
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000355 if (isa<GlobalValue>(LoadOperand))
356 return MemDepResult::getUnknown();
357
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000358 // Queue to process all pointers that are equivalent to load operand.
Piotr Padlewskida362152016-12-30 18:45:07 +0000359 SmallVector<const Value *, 8> LoadOperandsQueue;
Piotr Padlewski09ad6782017-01-08 22:26:06 +0000360 LoadOperandsQueue.push_back(LoadOperand);
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000361 while (!LoadOperandsQueue.empty()) {
Piotr Padlewskida362152016-12-30 18:45:07 +0000362 const Value *Ptr = LoadOperandsQueue.pop_back_val();
Piotr Padlewski09ad6782017-01-08 22:26:06 +0000363 assert(Ptr && !isa<GlobalValue>(Ptr) &&
364 "Null or GlobalValue should not be inserted");
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000365
Piotr Padlewskida362152016-12-30 18:45:07 +0000366 for (const Use &Us : Ptr->uses()) {
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000367 auto *U = dyn_cast<Instruction>(Us.getUser());
Chandler Carruthaef32bd2016-03-11 13:46:00 +0000368 if (!U || U == LI || !DT.dominates(U, LI))
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000369 continue;
370
Piotr Padlewskida362152016-12-30 18:45:07 +0000371 // Bitcast or gep with zeros are using Ptr. Add to queue to check it's
372 // users. U = bitcast Ptr
373 if (isa<BitCastInst>(U)) {
Piotr Padlewski09ad6782017-01-08 22:26:06 +0000374 LoadOperandsQueue.push_back(U);
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000375 continue;
376 }
Piotr Padlewski09ad6782017-01-08 22:26:06 +0000377 // Gep with zeros is equivalent to bitcast.
378 // FIXME: we are not sure if some bitcast should be canonicalized to gep 0
379 // or gep 0 to bitcast because of SROA, so there are 2 forms. When
380 // typeless pointers will be ready then both cases will be gone
381 // (and this BFS also won't be needed).
Piotr Padlewskida362152016-12-30 18:45:07 +0000382 if (auto *GEP = dyn_cast<GetElementPtrInst>(U))
383 if (GEP->hasAllZeroIndices()) {
Piotr Padlewski09ad6782017-01-08 22:26:06 +0000384 LoadOperandsQueue.push_back(U);
Piotr Padlewskida362152016-12-30 18:45:07 +0000385 continue;
386 }
387
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000388 // If we hit load/store with the same invariant.group metadata (and the
389 // same pointer operand) we can assume that value pointed by pointer
390 // operand didn't change.
391 if ((isa<LoadInst>(U) || isa<StoreInst>(U)) && U->getParent() == BB &&
392 U->getMetadata(LLVMContext::MD_invariant_group) == InvariantGroupMD)
393 return MemDepResult::getDef(U);
394 }
395 }
Piotr Padlewski383edba2016-12-23 13:13:32 +0000396 return MemDepResult::getUnknown();
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000397}
398
Chandler Carruth61440d22016-03-10 00:55:30 +0000399MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000400 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
Bob Haarman3db17642016-08-26 16:34:27 +0000401 BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) {
Shuxin Yang408bdad2013-03-06 17:48:48 +0000402 bool isInvariantLoad = false;
Robin Morisset163ef042014-08-29 20:32:58 +0000403
Bob Haarman3db17642016-08-26 16:34:27 +0000404 if (!Limit) {
405 unsigned DefaultLimit = BlockScanLimit;
406 return getSimplePointerDependencyFrom(MemLoc, isLoad, ScanIt, BB, QueryInst,
407 &DefaultLimit);
408 }
409
Robin Morisset163ef042014-08-29 20:32:58 +0000410 // We must be careful with atomic accesses, as they may allow another thread
Chad Rosier02e831c2016-06-28 17:19:10 +0000411 // to touch this location, clobbering it. We are conservative: if the
Robin Morisset163ef042014-08-29 20:32:58 +0000412 // QueryInst is not a simple (non-atomic) memory access, we automatically
413 // return getClobber.
414 // If it is simple, we know based on the results of
415 // "Compiler testing via a theory of sound optimisations in the C11/C++11
416 // memory model" in PLDI 2013, that a non-atomic location can only be
417 // clobbered between a pair of a release and an acquire action, with no
418 // access to the location in between.
419 // Here is an example for giving the general intuition behind this rule.
420 // In the following code:
421 // store x 0;
422 // release action; [1]
423 // acquire action; [4]
424 // %val = load x;
425 // It is unsafe to replace %val by 0 because another thread may be running:
426 // acquire action; [2]
427 // store x 42;
428 // release action; [3]
429 // with synchronization from 1 to 2 and from 3 to 4, resulting in %val
430 // being 42. A key property of this program however is that if either
431 // 1 or 4 were missing, there would be a race between the store of 42
Chad Rosier02e831c2016-06-28 17:19:10 +0000432 // either the store of 0 or the load (making the whole program racy).
Nick Lewycky947ca8a2016-01-04 16:44:44 +0000433 // The paper mentioned above shows that the same property is respected
Chad Rosier02e831c2016-06-28 17:19:10 +0000434 // by every program that can detect any optimization of that kind: either
Robin Morisset163ef042014-08-29 20:32:58 +0000435 // it is racy (undefined) or there is a release followed by an acquire
436 // between the pair of accesses under consideration.
Robin Morisset163ef042014-08-29 20:32:58 +0000437
Philip Reames4dbd88f2015-03-24 23:54:54 +0000438 // If the load is invariant, we "know" that it doesn't alias *any* write. We
439 // do want to respect mustalias results since defs are useful for value
440 // forwarding, but any mayalias write can be assumed to be noalias.
441 // Arguably, this logic should be pushed inside AliasAnalysis itself.
Shuxin Yang408bdad2013-03-06 17:48:48 +0000442 if (isLoad && QueryInst) {
443 LoadInst *LI = dyn_cast<LoadInst>(QueryInst);
Craig Topper9f008862014-04-15 04:59:12 +0000444 if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr)
Shuxin Yang408bdad2013-03-06 17:48:48 +0000445 isInvariantLoad = true;
446 }
Eli Friedman8b098b02011-06-15 23:59:25 +0000447
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000448 const DataLayout &DL = BB->getModule()->getDataLayout();
449
Bruno Cardoso Lopesdfc1d962015-07-31 14:31:35 +0000450 // Create a numbered basic block to lazily compute and cache instruction
451 // positions inside a BB. This is used to provide fast queries for relative
452 // position between two instructions in a BB and can be used by
453 // AliasAnalysis::callCapturesBefore.
454 OrderedBasicBlock OBB(BB);
455
Krzysztof Parzyszeke261e5a2016-02-22 23:07:43 +0000456 // Return "true" if and only if the instruction I is either a non-simple
457 // load or a non-simple store.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000458 auto isNonSimpleLoadOrStore = [](Instruction *I) -> bool {
Krzysztof Parzyszeke261e5a2016-02-22 23:07:43 +0000459 if (auto *LI = dyn_cast<LoadInst>(I))
460 return !LI->isSimple();
461 if (auto *SI = dyn_cast<StoreInst>(I))
462 return !SI->isSimple();
463 return false;
464 };
465
466 // Return "true" if I is not a load and not a store, but it does access
467 // memory.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000468 auto isOtherMemAccess = [](Instruction *I) -> bool {
Krzysztof Parzyszeke261e5a2016-02-22 23:07:43 +0000469 return !isa<LoadInst>(I) && !isa<StoreInst>(I) && I->mayReadOrWriteMemory();
470 };
471
Chris Lattnera28355d2008-12-07 08:50:20 +0000472 // Walk backwards through the basic block, looking for dependencies.
Philip Reames090a8242015-02-15 19:07:31 +0000473 while (ScanIt != BB->begin()) {
Duncan P. N. Exon Smith5a82c912015-10-10 00:53:03 +0000474 Instruction *Inst = &*--ScanIt;
Yunzhong Gao5cbcf562013-11-14 01:10:52 +0000475
476 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
477 // Debug intrinsics don't (and can't) cause dependencies.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000478 if (isa<DbgInfoIntrinsic>(II))
479 continue;
Yunzhong Gao5cbcf562013-11-14 01:10:52 +0000480
Eli Friedman8b098b02011-06-15 23:59:25 +0000481 // Limit the amount of scanning we do so we don't end up with quadratic
482 // running time on extreme testcases.
Bob Haarman3db17642016-08-26 16:34:27 +0000483 --*Limit;
484 if (!*Limit)
Eli Friedman8b098b02011-06-15 23:59:25 +0000485 return MemDepResult::getUnknown();
486
Chris Lattner506b8582009-12-01 21:15:15 +0000487 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
Owen Anderson2b2bd282009-10-28 07:05:35 +0000488 // If we reach a lifetime begin or end marker, then the query ends here
489 // because the value is undefined.
Chris Lattnera58edd12010-09-06 03:58:04 +0000490 if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
Owen Andersonb9878ee2009-12-02 07:35:19 +0000491 // FIXME: This only considers queries directly on the invariant-tagged
492 // pointer, not on query pointers that are indexed off of them. It'd
Chris Lattner7aab2792011-04-26 22:42:01 +0000493 // be nice to handle that at some point (the right approach is to use
494 // GetPointerBaseWithConstantOffset).
Chandler Carruth61440d22016-03-10 00:55:30 +0000495 if (AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), MemLoc))
Owen Anderson2b2bd282009-10-28 07:05:35 +0000496 return MemDepResult::getDef(II);
Chris Lattnera58edd12010-09-06 03:58:04 +0000497 continue;
Owen Andersond0e86d52009-10-28 06:18:42 +0000498 }
499 }
500
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000501 // Values depend on loads if the pointers are must aliased. This means
502 // that a load depends on another must aliased load from the same value.
503 // One exception is atomic loads: a value can depend on an atomic load that
504 // it does not alias with when this atomic load indicates that another
505 // thread may be accessing the location.
Chris Lattner0e3d6332008-12-05 21:04:20 +0000506 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
Philip Reamesa7ad6a52015-01-26 18:54:27 +0000507
508 // While volatile access cannot be eliminated, they do not have to clobber
509 // non-aliasing locations, as normal accesses, for example, can be safely
510 // reordered with volatile accesses.
511 if (LI->isVolatile()) {
512 if (!QueryInst)
513 // Original QueryInst *may* be volatile
514 return MemDepResult::getClobber(LI);
515 if (isVolatile(QueryInst))
516 // Ordering required if QueryInst is itself volatile
517 return MemDepResult::getClobber(LI);
518 // Otherwise, volatile doesn't imply any special ordering
519 }
Krzysztof Parzyszeke261e5a2016-02-22 23:07:43 +0000520
Eli Friedman5494ada2011-08-15 20:54:19 +0000521 // Atomic loads have complications involved.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000522 // A Monotonic (or higher) load is OK if the query inst is itself not
523 // atomic.
Eli Friedman5494ada2011-08-15 20:54:19 +0000524 // FIXME: This is overly conservative.
JF Bastien800f87a2016-04-06 21:19:33 +0000525 if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) {
Krzysztof Parzyszeke261e5a2016-02-22 23:07:43 +0000526 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
527 isOtherMemAccess(QueryInst))
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000528 return MemDepResult::getClobber(LI);
JF Bastien800f87a2016-04-06 21:19:33 +0000529 if (LI->getOrdering() != AtomicOrdering::Monotonic)
David Majnemere1655022015-03-21 06:19:17 +0000530 return MemDepResult::getClobber(LI);
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000531 }
Eli Friedman5494ada2011-08-15 20:54:19 +0000532
Chandler Carruthac80dc72015-06-17 07:18:54 +0000533 MemoryLocation LoadLoc = MemoryLocation::get(LI);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000534
Chris Lattner0e3d6332008-12-05 21:04:20 +0000535 // If we found a pointer, check if it could be the same as our pointer.
Chandler Carruth61440d22016-03-10 00:55:30 +0000536 AliasResult R = AA.alias(LoadLoc, MemLoc);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000537
Chris Lattner6f83d062011-04-26 01:21:15 +0000538 if (isLoad) {
Dehao Chen22ce5eb2016-09-09 18:42:35 +0000539 if (R == NoAlias)
Chris Lattner7aab2792011-04-26 22:42:01 +0000540 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000541
Chris Lattner6f83d062011-04-26 01:21:15 +0000542 // Must aliased loads are defs of each other.
Chandler Carruthc3f49eb2015-06-22 02:16:51 +0000543 if (R == MustAlias)
Chris Lattner6f83d062011-04-26 01:21:15 +0000544 return MemDepResult::getDef(Inst);
545
Dan Gohmana4717512011-06-04 06:48:50 +0000546#if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads
547 // in terms of clobbering loads, but since it does this by looking
548 // at the clobbering load directly, it doesn't know about any
549 // phi translation that may have happened along the way.
550
Chris Lattner6f83d062011-04-26 01:21:15 +0000551 // If we have a partial alias, then return this as a clobber for the
552 // client to handle.
Chandler Carruthc3f49eb2015-06-22 02:16:51 +0000553 if (R == PartialAlias)
Chris Lattner6f83d062011-04-26 01:21:15 +0000554 return MemDepResult::getClobber(Inst);
Dan Gohmana4717512011-06-04 06:48:50 +0000555#endif
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000556
Chris Lattner6f83d062011-04-26 01:21:15 +0000557 // Random may-alias loads don't depend on each other without a
558 // dependence.
Chris Lattner80c08182008-11-29 09:09:48 +0000559 continue;
Chris Lattner6f83d062011-04-26 01:21:15 +0000560 }
Dan Gohman15a43962010-10-29 01:14:04 +0000561
Chris Lattner7aab2792011-04-26 22:42:01 +0000562 // Stores don't depend on other no-aliased accesses.
Chandler Carruthc3f49eb2015-06-22 02:16:51 +0000563 if (R == NoAlias)
Chris Lattner7aab2792011-04-26 22:42:01 +0000564 continue;
565
Dan Gohman15a43962010-10-29 01:14:04 +0000566 // Stores don't alias loads from read-only memory.
Chandler Carruth61440d22016-03-10 00:55:30 +0000567 if (AA.pointsToConstantMemory(LoadLoc))
Dan Gohman15a43962010-10-29 01:14:04 +0000568 continue;
569
Chris Lattner6f83d062011-04-26 01:21:15 +0000570 // Stores depend on may/must aliased loads.
Chris Lattner0e3d6332008-12-05 21:04:20 +0000571 return MemDepResult::getDef(Inst);
572 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000573
Chris Lattner0e3d6332008-12-05 21:04:20 +0000574 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000575 // Atomic stores have complications involved.
Robin Morisset163ef042014-08-29 20:32:58 +0000576 // A Monotonic store is OK if the query inst is itself not atomic.
Eli Friedman5494ada2011-08-15 20:54:19 +0000577 // FIXME: This is overly conservative.
Krzysztof Parzyszeke261e5a2016-02-22 23:07:43 +0000578 if (!SI->isUnordered() && SI->isAtomic()) {
579 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
580 isOtherMemAccess(QueryInst))
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000581 return MemDepResult::getClobber(SI);
JF Bastien800f87a2016-04-06 21:19:33 +0000582 if (SI->getOrdering() != AtomicOrdering::Monotonic)
David Majnemere1655022015-03-21 06:19:17 +0000583 return MemDepResult::getClobber(SI);
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000584 }
Eli Friedman5494ada2011-08-15 20:54:19 +0000585
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000586 // FIXME: this is overly conservative.
587 // While volatile access cannot be eliminated, they do not have to clobber
588 // non-aliasing locations, as normal accesses can for example be reordered
589 // with volatile accesses.
590 if (SI->isVolatile())
Krzysztof Parzyszeke261e5a2016-02-22 23:07:43 +0000591 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
592 isOtherMemAccess(QueryInst))
593 return MemDepResult::getClobber(SI);
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000594
Chris Lattner02274a72009-05-25 21:28:56 +0000595 // If alias analysis can tell that this store is guaranteed to not modify
596 // the query pointer, ignore it. Use getModRefInfo to handle cases where
597 // the query pointer points to constant memory etc.
Chandler Carruth61440d22016-03-10 00:55:30 +0000598 if (AA.getModRefInfo(SI, MemLoc) == MRI_NoModRef)
Chris Lattner02274a72009-05-25 21:28:56 +0000599 continue;
600
601 // Ok, this store might clobber the query pointer. Check to see if it is
602 // a must alias: in this case, we want to return this as a def.
Chandler Carruthac80dc72015-06-17 07:18:54 +0000603 MemoryLocation StoreLoc = MemoryLocation::get(SI);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000604
Chris Lattner0e3d6332008-12-05 21:04:20 +0000605 // If we found a pointer, check if it could be the same as our pointer.
Chandler Carruth61440d22016-03-10 00:55:30 +0000606 AliasResult R = AA.alias(StoreLoc, MemLoc);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000607
Chandler Carruthc3f49eb2015-06-22 02:16:51 +0000608 if (R == NoAlias)
Chris Lattner0e3d6332008-12-05 21:04:20 +0000609 continue;
Chandler Carruthc3f49eb2015-06-22 02:16:51 +0000610 if (R == MustAlias)
Dan Gohmanba5d0ab2010-12-13 22:47:57 +0000611 return MemDepResult::getDef(Inst);
Shuxin Yang408bdad2013-03-06 17:48:48 +0000612 if (isInvariantLoad)
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000613 continue;
Dan Gohmanba5d0ab2010-12-13 22:47:57 +0000614 return MemDepResult::getClobber(Inst);
Owen Andersonc0daf5f2007-07-06 23:14:35 +0000615 }
Chris Lattner3ff6d012008-11-30 01:39:32 +0000616
617 // If this is an allocation, and if we know that the accessed pointer is to
Chris Lattner0e3d6332008-12-05 21:04:20 +0000618 // the allocation, return Def. This means that there is no dependence and
Chris Lattner3ff6d012008-11-30 01:39:32 +0000619 // the access can be optimized based on that. For example, a load could
Philip Reamesd9f4a3d2016-03-09 23:19:56 +0000620 // turn into undef. Note that we can bypass the allocation itself when
621 // looking for a clobber in many cases; that's an alias property and is
622 // handled by BasicAA.
Chandler Carruth61440d22016-03-10 00:55:30 +0000623 if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, &TLI)) {
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000624 const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL);
Chandler Carruth61440d22016-03-10 00:55:30 +0000625 if (AccessPtr == Inst || AA.isMustAlias(Inst, AccessPtr))
Victor Hernandez537d8d92009-09-18 21:34:51 +0000626 return MemDepResult::getDef(Inst);
Victor Hernandez537d8d92009-09-18 21:34:51 +0000627 }
628
Philip Reames4dbd88f2015-03-24 23:54:54 +0000629 if (isInvariantLoad)
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000630 continue;
Philip Reames4dbd88f2015-03-24 23:54:54 +0000631
Philip Reamesb5681132016-03-25 22:40:35 +0000632 // A release fence requires that all stores complete before it, but does
633 // not prevent the reordering of following loads or stores 'before' the
634 // fence. As a result, we look past it when finding a dependency for
635 // loads. DSE uses this to find preceeding stores to delete and thus we
636 // can't bypass the fence if the query instruction is a store.
637 if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
JF Bastien800f87a2016-04-06 21:19:33 +0000638 if (isLoad && FI->getOrdering() == AtomicOrdering::Release)
Philip Reamesb5681132016-03-25 22:40:35 +0000639 continue;
JF Bastien800f87a2016-04-06 21:19:33 +0000640
Chris Lattner0e3d6332008-12-05 21:04:20 +0000641 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
Chandler Carruth61440d22016-03-10 00:55:30 +0000642 ModRefInfo MR = AA.getModRefInfo(Inst, MemLoc);
Chad Rosiera968caf2012-05-14 20:35:04 +0000643 // If necessary, perform additional analysis.
Chandler Carruth194f59c2015-07-22 23:15:57 +0000644 if (MR == MRI_ModRef)
Chandler Carruthaef32bd2016-03-11 13:46:00 +0000645 MR = AA.callCapturesBefore(Inst, MemLoc, &DT, &OBB);
Chad Rosiera968caf2012-05-14 20:35:04 +0000646 switch (MR) {
Chandler Carruth194f59c2015-07-22 23:15:57 +0000647 case MRI_NoModRef:
Chris Lattner41efb682008-12-09 19:47:40 +0000648 // If the call has no effect on the queried pointer, just ignore it.
Chris Lattner81f19e92008-11-29 08:51:16 +0000649 continue;
Chandler Carruth194f59c2015-07-22 23:15:57 +0000650 case MRI_Mod:
Owen Andersonfc16e5a2009-10-28 06:30:52 +0000651 return MemDepResult::getClobber(Inst);
Chandler Carruth194f59c2015-07-22 23:15:57 +0000652 case MRI_Ref:
Chris Lattner41efb682008-12-09 19:47:40 +0000653 // If the call is known to never store to the pointer, and if this is a
654 // load query, we can safely ignore it (scan past it).
655 if (isLoad)
656 continue;
Chris Lattner41efb682008-12-09 19:47:40 +0000657 default:
658 // Otherwise, there is a potential dependence. Return a clobber.
659 return MemDepResult::getClobber(Inst);
660 }
Owen Andersonc0daf5f2007-07-06 23:14:35 +0000661 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000662
Eli Friedman7d58bc72011-06-15 00:47:34 +0000663 // No dependence found. If this is the entry block of the function, it is
664 // unknown, otherwise it is non-local.
Chris Lattner2faa2c72008-12-07 02:15:47 +0000665 if (BB != &BB->getParent()->getEntryBlock())
666 return MemDepResult::getNonLocal();
Eli Friedmanc1702c82011-10-13 22:14:57 +0000667 return MemDepResult::getNonFuncLocal();
Owen Andersonc0daf5f2007-07-06 23:14:35 +0000668}
669
Chandler Carruth61440d22016-03-10 00:55:30 +0000670MemDepResult MemoryDependenceResults::getDependency(Instruction *QueryInst) {
Chris Lattner51ba8d02008-11-29 03:47:00 +0000671 Instruction *ScanPos = QueryInst;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000672
Chris Lattner51ba8d02008-11-29 03:47:00 +0000673 // Check for a cached result
Chris Lattner47e81d02008-11-30 23:17:19 +0000674 MemDepResult &LocalCache = LocalDeps[QueryInst];
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000675
Chris Lattnere7d7e132008-11-29 22:02:15 +0000676 // If the cached entry is non-dirty, just return it. Note that this depends
Chris Lattner47e81d02008-11-30 23:17:19 +0000677 // on MemDepResult's default constructing to 'dirty'.
678 if (!LocalCache.isDirty())
679 return LocalCache;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000680
Chris Lattner51ba8d02008-11-29 03:47:00 +0000681 // Otherwise, if we have a dirty entry, we know we can start the scan at that
682 // instruction, which may save us some work.
Chris Lattner47e81d02008-11-30 23:17:19 +0000683 if (Instruction *Inst = LocalCache.getInst()) {
Chris Lattner51ba8d02008-11-29 03:47:00 +0000684 ScanPos = Inst;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000685
Chris Lattnerde4440c2008-12-07 18:39:13 +0000686 RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst);
Chris Lattner44104272008-11-30 02:52:26 +0000687 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000688
Chris Lattner5a786042008-12-07 01:50:16 +0000689 BasicBlock *QueryParent = QueryInst->getParent();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000690
Chris Lattner51ba8d02008-11-29 03:47:00 +0000691 // Do the scan.
Chris Lattner5a786042008-12-07 01:50:16 +0000692 if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
Piotr Padlewski01659cb2016-11-08 18:20:51 +0000693 // No dependence found. If this is the entry block of the function, it is
Eli Friedman7d58bc72011-06-15 00:47:34 +0000694 // unknown, otherwise it is non-local.
Chris Lattner2faa2c72008-12-07 02:15:47 +0000695 if (QueryParent != &QueryParent->getParent()->getEntryBlock())
696 LocalCache = MemDepResult::getNonLocal();
697 else
Eli Friedmanc1702c82011-10-13 22:14:57 +0000698 LocalCache = MemDepResult::getNonFuncLocal();
Dan Gohman1d760ce2010-11-10 21:51:35 +0000699 } else {
Chandler Carruthac80dc72015-06-17 07:18:54 +0000700 MemoryLocation MemLoc;
Chandler Carruth61440d22016-03-10 00:55:30 +0000701 ModRefInfo MR = GetLocation(QueryInst, MemLoc, TLI);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000702 if (MemLoc.Ptr) {
703 // If we can do a pointer scan, make it happen.
Chandler Carruth194f59c2015-07-22 23:15:57 +0000704 bool isLoad = !(MR & MRI_Mod);
Piotr Padlewski01659cb2016-11-08 18:20:51 +0000705 if (auto *II = dyn_cast<IntrinsicInst>(QueryInst))
Owen Anderson97f0cf32011-05-17 00:05:49 +0000706 isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
Chris Lattnere48c31c2010-11-21 07:34:32 +0000707
Duncan P. N. Exon Smith5a82c912015-10-10 00:53:03 +0000708 LocalCache = getPointerDependencyFrom(
709 MemLoc, isLoad, ScanPos->getIterator(), QueryParent, QueryInst);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000710 } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
Gabor Greifef1ca242010-07-27 22:02:00 +0000711 CallSite QueryCS(QueryInst);
Chandler Carruth61440d22016-03-10 00:55:30 +0000712 bool isReadOnly = AA.onlyReadsMemory(QueryCS);
Duncan P. N. Exon Smith5a82c912015-10-10 00:53:03 +0000713 LocalCache = getCallSiteDependencyFrom(
714 QueryCS, isReadOnly, ScanPos->getIterator(), QueryParent);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000715 } else
716 // Non-memory instruction.
Eli Friedman7d58bc72011-06-15 00:47:34 +0000717 LocalCache = MemDepResult::getUnknown();
Nick Lewycky218a3392009-11-28 21:27:49 +0000718 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000719
Chris Lattner51ba8d02008-11-29 03:47:00 +0000720 // Remember the result!
Chris Lattner47e81d02008-11-30 23:17:19 +0000721 if (Instruction *I = LocalCache.getInst())
Chris Lattner9f1988ab2008-11-29 09:20:15 +0000722 ReverseLocalDeps[I].insert(QueryInst);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000723
Chris Lattner47e81d02008-11-30 23:17:19 +0000724 return LocalCache;
Chris Lattner51ba8d02008-11-29 03:47:00 +0000725}
726
Chris Lattnerf09619d2009-01-22 07:04:01 +0000727#ifndef NDEBUG
Chandler Carruth40e21f22016-03-07 12:30:06 +0000728/// This method is used when -debug is specified to verify that cache arrays
729/// are properly kept sorted.
Chandler Carruth61440d22016-03-10 00:55:30 +0000730static void AssertSorted(MemoryDependenceResults::NonLocalDepInfo &Cache,
Chris Lattnerf09619d2009-01-22 07:04:01 +0000731 int Count = -1) {
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000732 if (Count == -1)
733 Count = Cache.size();
Craig Toppere30b8ca2016-01-03 19:43:40 +0000734 assert(std::is_sorted(Cache.begin(), Cache.begin() + Count) &&
735 "Cache isn't sorted!");
Chris Lattnerf09619d2009-01-22 07:04:01 +0000736}
737#endif
738
Chandler Carruth61440d22016-03-10 00:55:30 +0000739const MemoryDependenceResults::NonLocalDepInfo &
740MemoryDependenceResults::getNonLocalCallDependency(CallSite QueryCS) {
Chris Lattner254314e2008-12-09 19:38:05 +0000741 assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000742 "getNonLocalCallDependency should only be used on calls with "
743 "non-local deps!");
Chris Lattner254314e2008-12-09 19:38:05 +0000744 PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
Chris Lattner7e61daf2008-12-01 01:15:42 +0000745 NonLocalDepInfo &Cache = CacheP.first;
Chris Lattner20597532008-11-30 01:18:27 +0000746
Chandler Carruth40e21f22016-03-07 12:30:06 +0000747 // This is the set of blocks that need to be recomputed. In the cached case,
748 // this can happen due to instructions being deleted etc. In the uncached
749 // case, this starts out as the set of predecessors we care about.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000750 SmallVector<BasicBlock *, 32> DirtyBlocks;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000751
Chris Lattner20597532008-11-30 01:18:27 +0000752 if (!Cache.empty()) {
Chris Lattner7e61daf2008-12-01 01:15:42 +0000753 // Okay, we have a cache entry. If we know it is not dirty, just return it
754 // with no computation.
755 if (!CacheP.second) {
Dan Gohmand2d1ae12010-06-22 15:08:57 +0000756 ++NumCacheNonLocal;
Chris Lattner7e61daf2008-12-01 01:15:42 +0000757 return Cache;
758 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000759
Chris Lattner20597532008-11-30 01:18:27 +0000760 // If we already have a partially computed set of results, scan them to
Chris Lattner7e61daf2008-12-01 01:15:42 +0000761 // determine what is dirty, seeding our initial DirtyBlocks worklist.
Chandler Carruthaf8321e2016-03-07 15:12:57 +0000762 for (auto &Entry : Cache)
763 if (Entry.getResult().isDirty())
764 DirtyBlocks.push_back(Entry.getBB());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000765
Chris Lattner7e61daf2008-12-01 01:15:42 +0000766 // Sort the cache so that we can do fast binary search lookups below.
767 std::sort(Cache.begin(), Cache.end());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000768
Chris Lattner7e61daf2008-12-01 01:15:42 +0000769 ++NumCacheDirtyNonLocal;
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000770 // cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
Chris Lattner20597532008-11-30 01:18:27 +0000771 // << Cache.size() << " cached: " << *QueryInst;
772 } else {
773 // Seed DirtyBlocks with each of the preds of QueryInst's block.
Chris Lattner254314e2008-12-09 19:38:05 +0000774 BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
Daniel Berlinb4e7a4a2015-04-21 21:11:50 +0000775 for (BasicBlock *Pred : PredCache.get(QueryBB))
776 DirtyBlocks.push_back(Pred);
Dan Gohmand2d1ae12010-06-22 15:08:57 +0000777 ++NumUncacheNonLocal;
Chris Lattner20597532008-11-30 01:18:27 +0000778 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000779
Chris Lattner702e46e2008-12-09 21:19:42 +0000780 // isReadonlyCall - If this is a read-only call, we can be more aggressive.
Chandler Carruth61440d22016-03-10 00:55:30 +0000781 bool isReadonlyCall = AA.onlyReadsMemory(QueryCS);
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000782
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000783 SmallPtrSet<BasicBlock *, 32> Visited;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000784
Chris Lattner7e61daf2008-12-01 01:15:42 +0000785 unsigned NumSortedEntries = Cache.size();
Chris Lattnerf09619d2009-01-22 07:04:01 +0000786 DEBUG(AssertSorted(Cache));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000787
Chris Lattner20597532008-11-30 01:18:27 +0000788 // Iterate while we still have blocks to update.
789 while (!DirtyBlocks.empty()) {
790 BasicBlock *DirtyBB = DirtyBlocks.back();
791 DirtyBlocks.pop_back();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000792
Chris Lattner7e61daf2008-12-01 01:15:42 +0000793 // Already processed this block?
David Blaikie70573dc2014-11-19 07:49:26 +0000794 if (!Visited.insert(DirtyBB).second)
Chris Lattner7e61daf2008-12-01 01:15:42 +0000795 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000796
Chris Lattner7e61daf2008-12-01 01:15:42 +0000797 // Do a binary search to see if we already have an entry for this block in
798 // the cache set. If so, find it.
Chris Lattnerf09619d2009-01-22 07:04:01 +0000799 DEBUG(AssertSorted(Cache, NumSortedEntries));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000800 NonLocalDepInfo::iterator Entry =
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000801 std::upper_bound(Cache.begin(), Cache.begin() + NumSortedEntries,
802 NonLocalDepEntry(DirtyBB));
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000803 if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB)
Chris Lattner7e61daf2008-12-01 01:15:42 +0000804 --Entry;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000805
Craig Topper9f008862014-04-15 04:59:12 +0000806 NonLocalDepEntry *ExistingResult = nullptr;
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000807 if (Entry != Cache.begin() + NumSortedEntries &&
Chris Lattner0c315472009-12-09 07:08:01 +0000808 Entry->getBB() == DirtyBB) {
Chris Lattner7e61daf2008-12-01 01:15:42 +0000809 // If we already have an entry, and if it isn't already dirty, the block
810 // is done.
Chris Lattner0c315472009-12-09 07:08:01 +0000811 if (!Entry->getResult().isDirty())
Chris Lattner7e61daf2008-12-01 01:15:42 +0000812 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000813
Chris Lattner7e61daf2008-12-01 01:15:42 +0000814 // Otherwise, remember this slot so we can update the value.
Chris Lattner0c315472009-12-09 07:08:01 +0000815 ExistingResult = &*Entry;
Chris Lattner7e61daf2008-12-01 01:15:42 +0000816 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000817
Chris Lattner20597532008-11-30 01:18:27 +0000818 // If the dirty entry has a pointer, start scanning from it so we don't have
819 // to rescan the entire block.
820 BasicBlock::iterator ScanPos = DirtyBB->end();
Chris Lattner7e61daf2008-12-01 01:15:42 +0000821 if (ExistingResult) {
Chris Lattner0c315472009-12-09 07:08:01 +0000822 if (Instruction *Inst = ExistingResult->getResult().getInst()) {
Duncan P. N. Exon Smith5a82c912015-10-10 00:53:03 +0000823 ScanPos = Inst->getIterator();
Chris Lattner7e61daf2008-12-01 01:15:42 +0000824 // We're removing QueryInst's use of Inst.
Chris Lattner254314e2008-12-09 19:38:05 +0000825 RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
826 QueryCS.getInstruction());
Chris Lattner7e61daf2008-12-01 01:15:42 +0000827 }
Chris Lattner1b810bd2008-11-30 02:28:25 +0000828 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000829
Chris Lattner60444f82008-11-30 01:26:32 +0000830 // Find out if this block has a local dependency for QueryInst.
Chris Lattnered494f72008-12-07 01:21:14 +0000831 MemDepResult Dep;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000832
Chris Lattner254314e2008-12-09 19:38:05 +0000833 if (ScanPos != DirtyBB->begin()) {
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000834 Dep =
835 getCallSiteDependencyFrom(QueryCS, isReadonlyCall, ScanPos, DirtyBB);
Chris Lattner254314e2008-12-09 19:38:05 +0000836 } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
837 // No dependence found. If this is the entry block of the function, it is
Eli Friedman7d58bc72011-06-15 00:47:34 +0000838 // a clobber, otherwise it is unknown.
Chris Lattner254314e2008-12-09 19:38:05 +0000839 Dep = MemDepResult::getNonLocal();
Chris Lattner5a786042008-12-07 01:50:16 +0000840 } else {
Eli Friedmanc1702c82011-10-13 22:14:57 +0000841 Dep = MemDepResult::getNonFuncLocal();
Chris Lattner5a786042008-12-07 01:50:16 +0000842 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000843
Chris Lattner7e61daf2008-12-01 01:15:42 +0000844 // If we had a dirty entry for the block, update it. Otherwise, just add
845 // a new entry.
846 if (ExistingResult)
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000847 ExistingResult->setResult(Dep);
Chris Lattner7e61daf2008-12-01 01:15:42 +0000848 else
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000849 Cache.push_back(NonLocalDepEntry(DirtyBB, Dep));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000850
Chris Lattner20597532008-11-30 01:18:27 +0000851 // If the block has a dependency (i.e. it isn't completely transparent to
Chris Lattner7e61daf2008-12-01 01:15:42 +0000852 // the value), remember the association!
853 if (!Dep.isNonLocal()) {
Chris Lattner20597532008-11-30 01:18:27 +0000854 // Keep the ReverseNonLocalDeps map up to date so we can efficiently
855 // update this when we remove instructions.
Chris Lattner7e61daf2008-12-01 01:15:42 +0000856 if (Instruction *Inst = Dep.getInst())
Chris Lattner254314e2008-12-09 19:38:05 +0000857 ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
Chris Lattner7e61daf2008-12-01 01:15:42 +0000858 } else {
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000859
Chris Lattner7e61daf2008-12-01 01:15:42 +0000860 // If the block *is* completely transparent to the load, we need to check
861 // the predecessors of this block. Add them to our worklist.
Daniel Berlinb4e7a4a2015-04-21 21:11:50 +0000862 for (BasicBlock *Pred : PredCache.get(DirtyBB))
863 DirtyBlocks.push_back(Pred);
Chris Lattner7e61daf2008-12-01 01:15:42 +0000864 }
Chris Lattner20597532008-11-30 01:18:27 +0000865 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000866
Chris Lattner7e61daf2008-12-01 01:15:42 +0000867 return Cache;
Chris Lattner20597532008-11-30 01:18:27 +0000868}
869
Chandler Carruth61440d22016-03-10 00:55:30 +0000870void MemoryDependenceResults::getNonLocalPointerDependency(
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000871 Instruction *QueryInst, SmallVectorImpl<NonLocalDepResult> &Result) {
Chandler Carruthac80dc72015-06-17 07:18:54 +0000872 const MemoryLocation Loc = MemoryLocation::get(QueryInst);
Philip Reames567feb92015-01-09 00:04:22 +0000873 bool isLoad = isa<LoadInst>(QueryInst);
874 BasicBlock *FromBB = QueryInst->getParent();
875 assert(FromBB);
Philip Reames33d7f9d2015-01-09 00:26:45 +0000876
877 assert(Loc.Ptr->getType()->isPointerTy() &&
878 "Can't get pointer deps of a non-pointer!");
879 Result.clear();
Krzysztof Parzyszeke261e5a2016-02-22 23:07:43 +0000880
Philip Reames33d7f9d2015-01-09 00:26:45 +0000881 // This routine does not expect to deal with volatile instructions.
882 // Doing so would require piping through the QueryInst all the way through.
Philip Reames567feb92015-01-09 00:04:22 +0000883 // TODO: volatiles can't be elided, but they can be reordered with other
Philip Reames33d7f9d2015-01-09 00:26:45 +0000884 // non-volatile accesses.
Philip Reamesa7ad6a52015-01-26 18:54:27 +0000885
Philip Reames567feb92015-01-09 00:04:22 +0000886 // We currently give up on any instruction which is ordered, but we do handle
887 // atomic instructions which are unordered.
888 // TODO: Handle ordered instructions
889 auto isOrdered = [](Instruction *Inst) {
890 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
891 return !LI->isUnordered();
892 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
893 return !SI->isUnordered();
894 }
895 return false;
896 };
Philip Reames33d7f9d2015-01-09 00:26:45 +0000897 if (isVolatile(QueryInst) || isOrdered(QueryInst)) {
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000898 Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(),
Philip Reames33d7f9d2015-01-09 00:26:45 +0000899 const_cast<Value *>(Loc.Ptr)));
900 return;
901 }
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000902 const DataLayout &DL = FromBB->getModule()->getDataLayout();
Daniel Jasperaec2fa32016-12-19 08:22:17 +0000903 PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, &AC);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000904
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000905 // This is the set of blocks we've inspected, and the pointer we consider in
906 // each block. Because of critical edges, we currently bail out if querying
907 // a block with multiple different pointers. This can happen during PHI
908 // translation.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000909 DenseMap<BasicBlock *, Value *> Visited;
Chandler Carruthb32febe2016-03-07 12:45:07 +0000910 if (getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB,
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000911 Result, Visited, true))
912 return;
Chris Lattner7ed5ccc2008-12-15 04:58:29 +0000913 Result.clear();
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000914 Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(),
Dan Gohman23483932010-09-22 21:41:02 +0000915 const_cast<Value *>(Loc.Ptr)));
Chris Lattner7564a3b2008-12-07 02:56:57 +0000916}
917
Chandler Carruth40e21f22016-03-07 12:30:06 +0000918/// Compute the memdep value for BB with Pointer/PointeeSize using either
919/// cached information in Cache or by doing a lookup (which may use dirty cache
920/// info if available).
921///
922/// If we do a lookup, add the result to the cache.
Chandler Carruth61440d22016-03-10 00:55:30 +0000923MemDepResult MemoryDependenceResults::GetNonLocalInfoForBlock(
Chandler Carruthac80dc72015-06-17 07:18:54 +0000924 Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad,
925 BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000926
Chris Lattnerf903fe12008-12-09 07:47:11 +0000927 // Do a binary search to see if we already have an entry for this block in
928 // the cache set. If so, find it.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000929 NonLocalDepInfo::iterator Entry = std::upper_bound(
930 Cache->begin(), Cache->begin() + NumSortedEntries, NonLocalDepEntry(BB));
931 if (Entry != Cache->begin() && (Entry - 1)->getBB() == BB)
Chris Lattnerf903fe12008-12-09 07:47:11 +0000932 --Entry;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000933
Craig Topper9f008862014-04-15 04:59:12 +0000934 NonLocalDepEntry *ExistingResult = nullptr;
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000935 if (Entry != Cache->begin() + NumSortedEntries && Entry->getBB() == BB)
Chris Lattner0c315472009-12-09 07:08:01 +0000936 ExistingResult = &*Entry;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000937
Chris Lattnerf903fe12008-12-09 07:47:11 +0000938 // If we have a cached entry, and it is non-dirty, use it as the value for
939 // this dependency.
Chris Lattner0c315472009-12-09 07:08:01 +0000940 if (ExistingResult && !ExistingResult->getResult().isDirty()) {
Chris Lattnerf903fe12008-12-09 07:47:11 +0000941 ++NumCacheNonLocalPtr;
Chris Lattner0c315472009-12-09 07:08:01 +0000942 return ExistingResult->getResult();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000943 }
944
Chris Lattnerf903fe12008-12-09 07:47:11 +0000945 // Otherwise, we have to scan for the value. If we have a dirty cache
946 // entry, start scanning from its position, otherwise we scan from the end
947 // of the block.
948 BasicBlock::iterator ScanPos = BB->end();
Chris Lattner0c315472009-12-09 07:08:01 +0000949 if (ExistingResult && ExistingResult->getResult().getInst()) {
950 assert(ExistingResult->getResult().getInst()->getParent() == BB &&
Chris Lattnerf903fe12008-12-09 07:47:11 +0000951 "Instruction invalidated?");
952 ++NumCacheDirtyNonLocalPtr;
Duncan P. N. Exon Smith5a82c912015-10-10 00:53:03 +0000953 ScanPos = ExistingResult->getResult().getInst()->getIterator();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000954
Chris Lattnerf903fe12008-12-09 07:47:11 +0000955 // Eliminating the dirty entry from 'Cache', so update the reverse info.
Dan Gohman23483932010-09-22 21:41:02 +0000956 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
Duncan P. N. Exon Smith5a82c912015-10-10 00:53:03 +0000957 RemoveFromReverseMap(ReverseNonLocalPtrDeps, &*ScanPos, CacheKey);
Chris Lattnerf903fe12008-12-09 07:47:11 +0000958 } else {
959 ++NumUncacheNonLocalPtr;
960 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000961
Chris Lattnerf903fe12008-12-09 07:47:11 +0000962 // Scan the block for the dependency.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000963 MemDepResult Dep =
964 getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, QueryInst);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000965
Chris Lattnerf903fe12008-12-09 07:47:11 +0000966 // If we had a dirty entry for the block, update it. Otherwise, just add
967 // a new entry.
968 if (ExistingResult)
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000969 ExistingResult->setResult(Dep);
Chris Lattnerf903fe12008-12-09 07:47:11 +0000970 else
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000971 Cache->push_back(NonLocalDepEntry(BB, Dep));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000972
Chris Lattnerf903fe12008-12-09 07:47:11 +0000973 // If the block has a dependency (i.e. it isn't completely transparent to
974 // the value), remember the reverse association because we just added it
975 // to Cache!
Eli Friedmanc1702c82011-10-13 22:14:57 +0000976 if (!Dep.isDef() && !Dep.isClobber())
Chris Lattnerf903fe12008-12-09 07:47:11 +0000977 return Dep;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000978
Chris Lattnerf903fe12008-12-09 07:47:11 +0000979 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
980 // update MemDep when we remove instructions.
981 Instruction *Inst = Dep.getInst();
982 assert(Inst && "Didn't depend on anything?");
Dan Gohman23483932010-09-22 21:41:02 +0000983 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
Chris Lattner8eda11b2009-03-29 00:24:04 +0000984 ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
Chris Lattnerf903fe12008-12-09 07:47:11 +0000985 return Dep;
986}
987
Chandler Carruth40e21f22016-03-07 12:30:06 +0000988/// Sort the NonLocalDepInfo cache, given a certain number of elements in the
989/// array that are already properly ordered.
990///
991/// This is optimized for the case when only a few entries are added.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000992static void
Chandler Carruth61440d22016-03-10 00:55:30 +0000993SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache,
Chris Lattner370aada2009-07-13 17:20:05 +0000994 unsigned NumSortedEntries) {
995 switch (Cache.size() - NumSortedEntries) {
996 case 0:
997 // done, no new entries.
998 break;
999 case 2: {
1000 // Two new entries, insert the last one into place.
Chris Lattner0c315472009-12-09 07:08:01 +00001001 NonLocalDepEntry Val = Cache.back();
Chris Lattner370aada2009-07-13 17:20:05 +00001002 Cache.pop_back();
Chandler Carruth61440d22016-03-10 00:55:30 +00001003 MemoryDependenceResults::NonLocalDepInfo::iterator Entry =
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001004 std::upper_bound(Cache.begin(), Cache.end() - 1, Val);
Chris Lattner370aada2009-07-13 17:20:05 +00001005 Cache.insert(Entry, Val);
Justin Bognercd1d5aa2016-08-17 20:30:52 +00001006 LLVM_FALLTHROUGH;
Chris Lattner370aada2009-07-13 17:20:05 +00001007 }
1008 case 1:
1009 // One new entry, Just insert the new value at the appropriate position.
1010 if (Cache.size() != 1) {
Chris Lattner0c315472009-12-09 07:08:01 +00001011 NonLocalDepEntry Val = Cache.back();
Chris Lattner370aada2009-07-13 17:20:05 +00001012 Cache.pop_back();
Chandler Carruth61440d22016-03-10 00:55:30 +00001013 MemoryDependenceResults::NonLocalDepInfo::iterator Entry =
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001014 std::upper_bound(Cache.begin(), Cache.end(), Val);
Chris Lattner370aada2009-07-13 17:20:05 +00001015 Cache.insert(Entry, Val);
1016 }
1017 break;
1018 default:
1019 // Added many values, do a full scale sort.
1020 std::sort(Cache.begin(), Cache.end());
1021 break;
1022 }
1023}
1024
Chandler Carruth40e21f22016-03-07 12:30:06 +00001025/// Perform a dependency query based on pointer/pointeesize starting at the end
1026/// of StartBB.
1027///
1028/// Add any clobber/def results to the results vector and keep track of which
1029/// blocks are visited in 'Visited'.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001030///
1031/// This has special behavior for the first block queries (when SkipFirstBlock
1032/// is true). In this special case, it ignores the contents of the specified
1033/// block and starts returning dependence info for its predecessors.
1034///
Chandler Carruthb32febe2016-03-07 12:45:07 +00001035/// This function returns true on success, or false to indicate that it could
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001036/// not compute dependence information for some reason. This should be treated
1037/// as a clobber dependence on the first instruction in the predecessor block.
Chandler Carruth61440d22016-03-10 00:55:30 +00001038bool MemoryDependenceResults::getNonLocalPointerDepFromBB(
Chandler Carruthac80dc72015-06-17 07:18:54 +00001039 Instruction *QueryInst, const PHITransAddr &Pointer,
1040 const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB,
1041 SmallVectorImpl<NonLocalDepResult> &Result,
1042 DenseMap<BasicBlock *, Value *> &Visited, bool SkipFirstBlock) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001043 // Look up the cached info for Pointer.
Chris Lattner972e6d82009-12-09 01:59:31 +00001044 ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
Dan Gohman23483932010-09-22 21:41:02 +00001045
Dan Gohman0a6021a2010-11-10 20:37:15 +00001046 // Set up a temporary NLPI value. If the map doesn't yet have an entry for
1047 // CacheKey, this value will be inserted as the associated value. Otherwise,
1048 // it'll be ignored, and we'll have to check to see if the cached size and
Hal Finkelcc39b672014-07-24 12:16:19 +00001049 // aa tags are consistent with the current query.
Dan Gohman0a6021a2010-11-10 20:37:15 +00001050 NonLocalPointerInfo InitialNLPI;
1051 InitialNLPI.Size = Loc.Size;
Hal Finkelcc39b672014-07-24 12:16:19 +00001052 InitialNLPI.AATags = Loc.AATags;
Dan Gohman0a6021a2010-11-10 20:37:15 +00001053
1054 // Get the NLPI for CacheKey, inserting one into the map if it doesn't
1055 // already have one.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001056 std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001057 NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
Dan Gohman0a6021a2010-11-10 20:37:15 +00001058 NonLocalPointerInfo *CacheInfo = &Pair.first->second;
1059
Dan Gohman2e8ca442010-11-10 21:45:11 +00001060 // If we already have a cache entry for this CacheKey, we may need to do some
1061 // work to reconcile the cache entry and the current query.
Dan Gohman0a6021a2010-11-10 20:37:15 +00001062 if (!Pair.second) {
Dan Gohman2e8ca442010-11-10 21:45:11 +00001063 if (CacheInfo->Size < Loc.Size) {
1064 // The query's Size is greater than the cached one. Throw out the
Benjamin Kramerbde91762012-06-02 10:20:22 +00001065 // cached data and proceed with the query at the greater size.
Dan Gohman2e8ca442010-11-10 21:45:11 +00001066 CacheInfo->Pair = BBSkipFirstBlockPair();
1067 CacheInfo->Size = Loc.Size;
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001068 for (auto &Entry : CacheInfo->NonLocalDeps)
1069 if (Instruction *Inst = Entry.getResult().getInst())
Dan Gohman67919362010-11-10 22:35:02 +00001070 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
Dan Gohman2e8ca442010-11-10 21:45:11 +00001071 CacheInfo->NonLocalDeps.clear();
1072 } else if (CacheInfo->Size > Loc.Size) {
1073 // This query's Size is less than the cached one. Conservatively restart
1074 // the query using the greater size.
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001075 return getNonLocalPointerDepFromBB(
1076 QueryInst, Pointer, Loc.getWithNewSize(CacheInfo->Size), isLoad,
1077 StartBB, Result, Visited, SkipFirstBlock);
Dan Gohman0a6021a2010-11-10 20:37:15 +00001078 }
1079
Hal Finkelcc39b672014-07-24 12:16:19 +00001080 // If the query's AATags are inconsistent with the cached one,
Dan Gohman2e8ca442010-11-10 21:45:11 +00001081 // conservatively throw out the cached data and restart the query with
1082 // no tag if needed.
Hal Finkelcc39b672014-07-24 12:16:19 +00001083 if (CacheInfo->AATags != Loc.AATags) {
1084 if (CacheInfo->AATags) {
Dan Gohman2e8ca442010-11-10 21:45:11 +00001085 CacheInfo->Pair = BBSkipFirstBlockPair();
Hal Finkelcc39b672014-07-24 12:16:19 +00001086 CacheInfo->AATags = AAMDNodes();
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001087 for (auto &Entry : CacheInfo->NonLocalDeps)
1088 if (Instruction *Inst = Entry.getResult().getInst())
Dan Gohman67919362010-11-10 22:35:02 +00001089 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
Dan Gohman2e8ca442010-11-10 21:45:11 +00001090 CacheInfo->NonLocalDeps.clear();
1091 }
Hal Finkelcc39b672014-07-24 12:16:19 +00001092 if (Loc.AATags)
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001093 return getNonLocalPointerDepFromBB(
1094 QueryInst, Pointer, Loc.getWithoutAATags(), isLoad, StartBB, Result,
1095 Visited, SkipFirstBlock);
Dan Gohman0a6021a2010-11-10 20:37:15 +00001096 }
Dan Gohman23483932010-09-22 21:41:02 +00001097 }
1098
1099 NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
Chris Lattner5ed409e2008-12-08 07:31:50 +00001100
1101 // If we have valid cached information for exactly the block we are
1102 // investigating, just return it with no recomputation.
Dan Gohman23483932010-09-22 21:41:02 +00001103 if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
Chris Lattner8b4be372008-12-16 07:10:09 +00001104 // We have a fully cached result for this query then we can just return the
1105 // cached results and populate the visited set. However, we have to verify
1106 // that we don't already have conflicting results for these blocks. Check
1107 // to ensure that if a block in the results set is in the visited set that
1108 // it was for the same pointer query.
1109 if (!Visited.empty()) {
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001110 for (auto &Entry : *Cache) {
1111 DenseMap<BasicBlock *, Value *>::iterator VI =
1112 Visited.find(Entry.getBB());
Chris Lattner972e6d82009-12-09 01:59:31 +00001113 if (VI == Visited.end() || VI->second == Pointer.getAddr())
1114 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001115
Chandler Carruthb32febe2016-03-07 12:45:07 +00001116 // We have a pointer mismatch in a block. Just return false, saying
Chris Lattner8b4be372008-12-16 07:10:09 +00001117 // that something was clobbered in this result. We could also do a
1118 // non-fully cached query, but there is little point in doing this.
Chandler Carruthb32febe2016-03-07 12:45:07 +00001119 return false;
Chris Lattner8b4be372008-12-16 07:10:09 +00001120 }
1121 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001122
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001123 Value *Addr = Pointer.getAddr();
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001124 for (auto &Entry : *Cache) {
1125 Visited.insert(std::make_pair(Entry.getBB(), Addr));
1126 if (Entry.getResult().isNonLocal()) {
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001127 continue;
1128 }
1129
Chandler Carruthaef32bd2016-03-11 13:46:00 +00001130 if (DT.isReachableFromEntry(Entry.getBB())) {
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001131 Result.push_back(
1132 NonLocalDepResult(Entry.getBB(), Entry.getResult(), Addr));
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001133 }
Chris Lattner8b4be372008-12-16 07:10:09 +00001134 }
Chris Lattner5ed409e2008-12-08 07:31:50 +00001135 ++NumCacheCompleteNonLocalPtr;
Chandler Carruthb32febe2016-03-07 12:45:07 +00001136 return true;
Chris Lattner5ed409e2008-12-08 07:31:50 +00001137 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001138
Chris Lattner5ed409e2008-12-08 07:31:50 +00001139 // Otherwise, either this is a new block, a block with an invalid cache
1140 // pointer or one that we're about to invalidate by putting more info into it
1141 // than its valid cache info. If empty, the result will be valid cache info,
1142 // otherwise it isn't.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001143 if (Cache->empty())
Dan Gohman23483932010-09-22 21:41:02 +00001144 CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
Dan Gohmanc87c8432010-11-11 00:42:22 +00001145 else
Dan Gohman23483932010-09-22 21:41:02 +00001146 CacheInfo->Pair = BBSkipFirstBlockPair();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001147
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001148 SmallVector<BasicBlock *, 32> Worklist;
Chris Lattner5ed409e2008-12-08 07:31:50 +00001149 Worklist.push_back(StartBB);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001150
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001151 // PredList used inside loop.
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001152 SmallVector<std::pair<BasicBlock *, PHITransAddr>, 16> PredList;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001153
Chris Lattnera28355d2008-12-07 08:50:20 +00001154 // Keep track of the entries that we know are sorted. Previously cached
1155 // entries will all be sorted. The entries we add we only sort on demand (we
1156 // don't insert every element into its sorted position). We know that we
1157 // won't get any reuse from currently inserted values, because we don't
1158 // revisit blocks after we insert info for them.
1159 unsigned NumSortedEntries = Cache->size();
Joerg Sonnenberger36894dc2016-02-20 11:24:44 +00001160 unsigned WorklistEntries = BlockNumberLimit;
1161 bool GotWorklistLimit = false;
Chris Lattnerf09619d2009-01-22 07:04:01 +00001162 DEBUG(AssertSorted(*Cache));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001163
Chris Lattner2faa2c72008-12-07 02:15:47 +00001164 while (!Worklist.empty()) {
Chris Lattner7564a3b2008-12-07 02:56:57 +00001165 BasicBlock *BB = Worklist.pop_back_val();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001166
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +00001167 // If we do process a large number of blocks it becomes very expensive and
1168 // likely it isn't worth worrying about
1169 if (Result.size() > NumResultsLimit) {
1170 Worklist.clear();
1171 // Sort it now (if needed) so that recursive invocations of
1172 // getNonLocalPointerDepFromBB and other routines that could reuse the
1173 // cache value will only see properly sorted cache arrays.
1174 if (Cache && NumSortedEntries != Cache->size()) {
1175 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +00001176 }
1177 // Since we bail out, the "Cache" set won't contain all of the
1178 // results for the query. This is ok (we can still use it to accelerate
1179 // specific block queries) but we can't do the fastpath "return all
1180 // results from the set". Clear out the indicator for this.
1181 CacheInfo->Pair = BBSkipFirstBlockPair();
Chandler Carruthb32febe2016-03-07 12:45:07 +00001182 return false;
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +00001183 }
1184
Chris Lattner75510d82008-12-09 07:52:59 +00001185 // Skip the first block if we have it.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001186 if (!SkipFirstBlock) {
Chris Lattner75510d82008-12-09 07:52:59 +00001187 // Analyze the dependency of *Pointer in FromBB. See if we already have
1188 // been here.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001189 assert(Visited.count(BB) && "Should check 'visited' before adding to WL");
Chris Lattnera28355d2008-12-07 08:50:20 +00001190
Chris Lattner75510d82008-12-09 07:52:59 +00001191 // Get the dependency info for Pointer in BB. If we have cached
1192 // information, we will use it, otherwise we compute it.
Chris Lattnerf09619d2009-01-22 07:04:01 +00001193 DEBUG(AssertSorted(*Cache, NumSortedEntries));
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001194 MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst, Loc, isLoad, BB,
1195 Cache, NumSortedEntries);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001196
Chris Lattner75510d82008-12-09 07:52:59 +00001197 // If we got a Def or Clobber, add this to the list of results.
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001198 if (!Dep.isNonLocal()) {
Chandler Carruthaef32bd2016-03-11 13:46:00 +00001199 if (DT.isReachableFromEntry(BB)) {
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001200 Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
1201 continue;
1202 }
Chris Lattner75510d82008-12-09 07:52:59 +00001203 }
Chris Lattner2faa2c72008-12-07 02:15:47 +00001204 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001205
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001206 // If 'Pointer' is an instruction defined in this block, then we need to do
1207 // phi translation to change it into a value live in the predecessor block.
Chris Lattner972e6d82009-12-09 01:59:31 +00001208 // If not, we just add the predecessors to the worklist and scan them with
1209 // the same Pointer.
1210 if (!Pointer.NeedsPHITranslationFromBlock(BB)) {
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001211 SkipFirstBlock = false;
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001212 SmallVector<BasicBlock *, 16> NewBlocks;
Daniel Berlinb4e7a4a2015-04-21 21:11:50 +00001213 for (BasicBlock *Pred : PredCache.get(BB)) {
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001214 // Verify that we haven't looked at this block yet.
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001215 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes =
1216 Visited.insert(std::make_pair(Pred, Pointer.getAddr()));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001217 if (InsertRes.second) {
1218 // First time we've looked at *PI.
Daniel Berlinb4e7a4a2015-04-21 21:11:50 +00001219 NewBlocks.push_back(Pred);
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001220 continue;
1221 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001222
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001223 // If we have seen this block before, but it was with a different
1224 // pointer then we have a phi translation failure and we have to treat
1225 // this as a clobber.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001226 if (InsertRes.first->second != Pointer.getAddr()) {
1227 // Make sure to clean up the Visited map before continuing on to
1228 // PredTranslationFailure.
1229 for (unsigned i = 0; i < NewBlocks.size(); i++)
1230 Visited.erase(NewBlocks[i]);
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001231 goto PredTranslationFailure;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001232 }
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001233 }
Joerg Sonnenberger36894dc2016-02-20 11:24:44 +00001234 if (NewBlocks.size() > WorklistEntries) {
1235 // Make sure to clean up the Visited map before continuing on to
1236 // PredTranslationFailure.
1237 for (unsigned i = 0; i < NewBlocks.size(); i++)
1238 Visited.erase(NewBlocks[i]);
1239 GotWorklistLimit = true;
1240 goto PredTranslationFailure;
1241 }
1242 WorklistEntries -= NewBlocks.size();
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001243 Worklist.append(NewBlocks.begin(), NewBlocks.end());
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001244 continue;
1245 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001246
Chris Lattner972e6d82009-12-09 01:59:31 +00001247 // We do need to do phi translation, if we know ahead of time we can't phi
1248 // translate this value, don't even try.
1249 if (!Pointer.IsPotentiallyPHITranslatable())
1250 goto PredTranslationFailure;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001251
Chris Lattner2f0c1c42009-07-13 17:14:23 +00001252 // We may have added values to the cache list before this PHI translation.
1253 // If so, we haven't done anything to ensure that the cache remains sorted.
1254 // Sort it now (if needed) so that recursive invocations of
1255 // getNonLocalPointerDepFromBB and other routines that could reuse the cache
1256 // value will only see properly sorted cache arrays.
1257 if (Cache && NumSortedEntries != Cache->size()) {
Chris Lattner370aada2009-07-13 17:20:05 +00001258 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
Chris Lattner2f0c1c42009-07-13 17:14:23 +00001259 NumSortedEntries = Cache->size();
1260 }
Craig Topper9f008862014-04-15 04:59:12 +00001261 Cache = nullptr;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001262
1263 PredList.clear();
Daniel Berlinb4e7a4a2015-04-21 21:11:50 +00001264 for (BasicBlock *Pred : PredCache.get(BB)) {
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001265 PredList.push_back(std::make_pair(Pred, Pointer));
1266
Chris Lattner972e6d82009-12-09 01:59:31 +00001267 // Get the PHI translated pointer in this predecessor. This can fail if
1268 // not translatable, in which case the getAddr() returns null.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001269 PHITransAddr &PredPointer = PredList.back().second;
Chandler Carruthaef32bd2016-03-11 13:46:00 +00001270 PredPointer.PHITranslateValue(BB, Pred, &DT, /*MustDominate=*/false);
Chris Lattner972e6d82009-12-09 01:59:31 +00001271 Value *PredPtrVal = PredPointer.getAddr();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001272
Chris Lattnerac323292009-11-27 08:37:22 +00001273 // Check to see if we have already visited this pred block with another
1274 // pointer. If so, we can't do this lookup. This failure can occur
1275 // with PHI translation when a critical edge exists and the PHI node in
1276 // the successor translates to a pointer value different than the
1277 // pointer the block was first analyzed with.
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001278 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes =
1279 Visited.insert(std::make_pair(Pred, PredPtrVal));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001280
Chris Lattnerac323292009-11-27 08:37:22 +00001281 if (!InsertRes.second) {
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001282 // We found the pred; take it off the list of preds to visit.
1283 PredList.pop_back();
1284
Chris Lattnerac323292009-11-27 08:37:22 +00001285 // If the predecessor was visited with PredPtr, then we already did
1286 // the analysis and can ignore it.
Chris Lattner972e6d82009-12-09 01:59:31 +00001287 if (InsertRes.first->second == PredPtrVal)
Chris Lattnerac323292009-11-27 08:37:22 +00001288 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001289
Chris Lattnerac323292009-11-27 08:37:22 +00001290 // Otherwise, the block was previously analyzed with a different
1291 // pointer. We can't represent the result of this case, so we just
1292 // treat this as a phi translation failure.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001293
1294 // Make sure to clean up the Visited map before continuing on to
1295 // PredTranslationFailure.
Matt Arsenault2080ecd2013-03-29 18:48:42 +00001296 for (unsigned i = 0, n = PredList.size(); i < n; ++i)
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001297 Visited.erase(PredList[i].first);
1298
Chris Lattnerac323292009-11-27 08:37:22 +00001299 goto PredTranslationFailure;
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001300 }
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001301 }
1302
1303 // Actually process results here; this need to be a separate loop to avoid
1304 // calling getNonLocalPointerDepFromBB for blocks we don't want to return
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001305 // any results for. (getNonLocalPointerDepFromBB will modify our
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001306 // datastructures in ways the code after the PredTranslationFailure label
1307 // doesn't expect.)
Matt Arsenault2080ecd2013-03-29 18:48:42 +00001308 for (unsigned i = 0, n = PredList.size(); i < n; ++i) {
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001309 BasicBlock *Pred = PredList[i].first;
1310 PHITransAddr &PredPointer = PredList[i].second;
1311 Value *PredPtrVal = PredPointer.getAddr();
1312
1313 bool CanTranslate = true;
Chris Lattner2be52e72009-11-27 22:05:15 +00001314 // If PHI translation was unable to find an available pointer in this
1315 // predecessor, then we have to assume that the pointer is clobbered in
1316 // that predecessor. We can still do PRE of the load, which would insert
1317 // a computation of the pointer in this predecessor.
Craig Topper9f008862014-04-15 04:59:12 +00001318 if (!PredPtrVal)
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001319 CanTranslate = false;
1320
1321 // FIXME: it is entirely possible that PHI translating will end up with
1322 // the same value. Consider PHI translating something like:
1323 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need*
1324 // to recurse here, pedantically speaking.
1325
1326 // If getNonLocalPointerDepFromBB fails here, that means the cached
1327 // result conflicted with the Visited list; we have to conservatively
Eli Friedman7d58bc72011-06-15 00:47:34 +00001328 // assume it is unknown, but this also does not block PRE of the load.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001329 if (!CanTranslate ||
Chandler Carruthb32febe2016-03-07 12:45:07 +00001330 !getNonLocalPointerDepFromBB(QueryInst, PredPointer,
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001331 Loc.getWithNewPtr(PredPtrVal), isLoad,
1332 Pred, Result, Visited)) {
Chris Lattner9c2053b2009-12-01 07:33:32 +00001333 // Add the entry to the Result list.
Eli Friedman7d58bc72011-06-15 00:47:34 +00001334 NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal);
Chris Lattner9c2053b2009-12-01 07:33:32 +00001335 Result.push_back(Entry);
1336
Chris Lattner25bf6f82009-12-19 21:29:22 +00001337 // Since we had a phi translation failure, the cache for CacheKey won't
1338 // include all of the entries that we need to immediately satisfy future
1339 // queries. Mark this in NonLocalPointerDeps by setting the
1340 // BBSkipFirstBlockPair pointer to null. This requires reuse of the
1341 // cached value to do more work but not miss the phi trans failure.
Dan Gohman23483932010-09-22 21:41:02 +00001342 NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
1343 NLPI.Pair = BBSkipFirstBlockPair();
Chris Lattner2be52e72009-11-27 22:05:15 +00001344 continue;
Chris Lattner2be52e72009-11-27 22:05:15 +00001345 }
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001346 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001347
Chris Lattnerac323292009-11-27 08:37:22 +00001348 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
1349 CacheInfo = &NonLocalPointerDeps[CacheKey];
Dan Gohman23483932010-09-22 21:41:02 +00001350 Cache = &CacheInfo->NonLocalDeps;
Chris Lattnerac323292009-11-27 08:37:22 +00001351 NumSortedEntries = Cache->size();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001352
Chris Lattnerac323292009-11-27 08:37:22 +00001353 // Since we did phi translation, the "Cache" set won't contain all of the
1354 // results for the query. This is ok (we can still use it to accelerate
1355 // specific block queries) but we can't do the fastpath "return all
1356 // results from the set" Clear out the indicator for this.
Dan Gohman23483932010-09-22 21:41:02 +00001357 CacheInfo->Pair = BBSkipFirstBlockPair();
Chris Lattnerac323292009-11-27 08:37:22 +00001358 SkipFirstBlock = false;
1359 continue;
Chris Lattnerc49f5ac2009-11-26 23:18:49 +00001360
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001361 PredTranslationFailure:
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001362 // The following code is "failure"; we can't produce a sane translation
1363 // for the given block. It assumes that we haven't modified any of
1364 // our datastructures while processing the current block.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001365
Craig Topper9f008862014-04-15 04:59:12 +00001366 if (!Cache) {
Chris Lattner3f4591c2009-01-23 07:12:16 +00001367 // Refresh the CacheInfo/Cache pointer if it got invalidated.
1368 CacheInfo = &NonLocalPointerDeps[CacheKey];
Dan Gohman23483932010-09-22 21:41:02 +00001369 Cache = &CacheInfo->NonLocalDeps;
Chris Lattner3f4591c2009-01-23 07:12:16 +00001370 NumSortedEntries = Cache->size();
Chris Lattner3f4591c2009-01-23 07:12:16 +00001371 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001372
Chris Lattner25bf6f82009-12-19 21:29:22 +00001373 // Since we failed phi translation, the "Cache" set won't contain all of the
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001374 // results for the query. This is ok (we can still use it to accelerate
1375 // specific block queries) but we can't do the fastpath "return all
Chris Lattner25bf6f82009-12-19 21:29:22 +00001376 // results from the set". Clear out the indicator for this.
Dan Gohman23483932010-09-22 21:41:02 +00001377 CacheInfo->Pair = BBSkipFirstBlockPair();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001378
Eli Friedman7d58bc72011-06-15 00:47:34 +00001379 // If *nothing* works, mark the pointer as unknown.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001380 //
1381 // If this is the magic first block, return this as a clobber of the whole
1382 // incoming value. Since we can't phi translate to one of the predecessors,
1383 // we have to bail out.
1384 if (SkipFirstBlock)
Chandler Carruthb32febe2016-03-07 12:45:07 +00001385 return false;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001386
Joerg Sonnenberger36894dc2016-02-20 11:24:44 +00001387 bool foundBlock = false;
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001388 for (NonLocalDepEntry &I : llvm::reverse(*Cache)) {
Joerg Sonnenberger36894dc2016-02-20 11:24:44 +00001389 if (I.getBB() != BB)
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001390 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001391
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001392 assert((GotWorklistLimit || I.getResult().isNonLocal() ||
Chandler Carruthaef32bd2016-03-11 13:46:00 +00001393 !DT.isReachableFromEntry(BB)) &&
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001394 "Should only be here with transparent block");
Joerg Sonnenberger36894dc2016-02-20 11:24:44 +00001395 foundBlock = true;
1396 I.setResult(MemDepResult::getUnknown());
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001397 Result.push_back(
1398 NonLocalDepResult(I.getBB(), I.getResult(), Pointer.getAddr()));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001399 break;
Chris Lattner7564a3b2008-12-07 02:56:57 +00001400 }
Mehdi Amini89038a12016-04-02 05:34:19 +00001401 (void)foundBlock; (void)GotWorklistLimit;
Joerg Sonnenberger36894dc2016-02-20 11:24:44 +00001402 assert((foundBlock || GotWorklistLimit) && "Current block not in cache?");
Chris Lattner2faa2c72008-12-07 02:15:47 +00001403 }
Chris Lattner3f4591c2009-01-23 07:12:16 +00001404
Chris Lattnerf903fe12008-12-09 07:47:11 +00001405 // Okay, we're done now. If we added new values to the cache, re-sort it.
Chris Lattner370aada2009-07-13 17:20:05 +00001406 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
Chris Lattnerf09619d2009-01-22 07:04:01 +00001407 DEBUG(AssertSorted(*Cache));
Chandler Carruthb32febe2016-03-07 12:45:07 +00001408 return true;
Chris Lattnera28355d2008-12-07 08:50:20 +00001409}
1410
Chandler Carruth40e21f22016-03-07 12:30:06 +00001411/// If P exists in CachedNonLocalPointerInfo, remove it.
Chandler Carruth61440d22016-03-10 00:55:30 +00001412void MemoryDependenceResults::RemoveCachedNonLocalPointerDependencies(
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001413 ValueIsLoadPair P) {
1414 CachedNonLocalPointerInfo::iterator It = NonLocalPointerDeps.find(P);
1415 if (It == NonLocalPointerDeps.end())
1416 return;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001417
Chris Lattnera28355d2008-12-07 08:50:20 +00001418 // Remove all of the entries in the BB->val map. This involves removing
1419 // instructions from the reverse map.
Dan Gohman23483932010-09-22 21:41:02 +00001420 NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001421
Chris Lattnera28355d2008-12-07 08:50:20 +00001422 for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
Chris Lattner0c315472009-12-09 07:08:01 +00001423 Instruction *Target = PInfo[i].getResult().getInst();
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001424 if (!Target)
1425 continue; // Ignore non-local dep results.
Chris Lattner0c315472009-12-09 07:08:01 +00001426 assert(Target->getParent() == PInfo[i].getBB());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001427
Chris Lattnera28355d2008-12-07 08:50:20 +00001428 // Eliminating the dirty entry from 'Cache', so update the reverse info.
Chris Lattner8eda11b2009-03-29 00:24:04 +00001429 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P);
Chris Lattnera28355d2008-12-07 08:50:20 +00001430 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001431
Chris Lattnera28355d2008-12-07 08:50:20 +00001432 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
1433 NonLocalPointerDeps.erase(It);
Chris Lattner2faa2c72008-12-07 02:15:47 +00001434}
1435
Chandler Carruth61440d22016-03-10 00:55:30 +00001436void MemoryDependenceResults::invalidateCachedPointerInfo(Value *Ptr) {
Chris Lattnerfa9f99a2008-12-09 22:06:23 +00001437 // If Ptr isn't really a pointer, just ignore it.
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001438 if (!Ptr->getType()->isPointerTy())
1439 return;
Chris Lattnerfa9f99a2008-12-09 22:06:23 +00001440 // Flush store info for the pointer.
1441 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
1442 // Flush load info for the pointer.
1443 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
1444}
1445
Chandler Carruth61440d22016-03-10 00:55:30 +00001446void MemoryDependenceResults::invalidateCachedPredecessors() {
Daniel Berlinb4e7a4a2015-04-21 21:11:50 +00001447 PredCache.clear();
Bob Wilson92cdb6e2010-02-16 19:51:59 +00001448}
1449
Chandler Carruth61440d22016-03-10 00:55:30 +00001450void MemoryDependenceResults::removeInstruction(Instruction *RemInst) {
Chris Lattnera25d39522008-11-28 22:04:47 +00001451 // Walk through the Non-local dependencies, removing this one as the value
1452 // for any cached queries.
Chris Lattner1b810bd2008-11-30 02:28:25 +00001453 NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
1454 if (NLDI != NonLocalDeps.end()) {
Chris Lattner7e61daf2008-12-01 01:15:42 +00001455 NonLocalDepInfo &BlockMap = NLDI->second.first;
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001456 for (auto &Entry : BlockMap)
1457 if (Instruction *Inst = Entry.getResult().getInst())
Chris Lattnerde4440c2008-12-07 18:39:13 +00001458 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
Chris Lattner1b810bd2008-11-30 02:28:25 +00001459 NonLocalDeps.erase(NLDI);
1460 }
Owen Anderson086b2c42007-12-08 01:37:09 +00001461
Chris Lattnera25d39522008-11-28 22:04:47 +00001462 // If we have a cached local dependence query for this instruction, remove it.
Chris Lattner73c25452008-11-28 22:28:27 +00001463 //
Chris Lattnerde04e112008-11-29 01:43:36 +00001464 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
1465 if (LocalDepEntry != LocalDeps.end()) {
Chris Lattnerada1f872008-11-30 01:09:30 +00001466 // Remove us from DepInst's reverse set now that the local dep info is gone.
Chris Lattnerde4440c2008-12-07 18:39:13 +00001467 if (Instruction *Inst = LocalDepEntry->second.getInst())
1468 RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst);
Chris Lattnerada1f872008-11-30 01:09:30 +00001469
Chris Lattner73c25452008-11-28 22:28:27 +00001470 // Remove this local dependency info.
Chris Lattnerde04e112008-11-29 01:43:36 +00001471 LocalDeps.erase(LocalDepEntry);
Chris Lattnera28355d2008-12-07 08:50:20 +00001472 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001473
Chris Lattnera28355d2008-12-07 08:50:20 +00001474 // If we have any cached pointer dependencies on this instruction, remove
1475 // them. If the instruction has non-pointer type, then it can't be a pointer
1476 // base.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001477
Chris Lattnera28355d2008-12-07 08:50:20 +00001478 // Remove it from both the load info and the store info. The instruction
1479 // can't be in either of these maps if it is non-pointer.
Duncan Sands19d0b472010-02-16 11:11:14 +00001480 if (RemInst->getType()->isPointerTy()) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001481 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
1482 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
1483 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001484
Chris Lattnerd3d91112008-11-28 22:51:08 +00001485 // Loop over all of the things that depend on the instruction we're removing.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001486 //
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001487 SmallVector<std::pair<Instruction *, Instruction *>, 8> ReverseDepsToAdd;
Chris Lattner82b70342008-12-07 18:42:51 +00001488
1489 // If we find RemInst as a clobber or Def in any of the maps for other values,
1490 // we need to replace its entry with a dirty version of the instruction after
1491 // it. If RemInst is a terminator, we use a null dirty value.
1492 //
1493 // Using a dirty version of the instruction after RemInst saves having to scan
1494 // the entire block to get to this point.
1495 MemDepResult NewDirtyVal;
1496 if (!RemInst->isTerminator())
Duncan P. N. Exon Smith5a82c912015-10-10 00:53:03 +00001497 NewDirtyVal = MemDepResult::getDirty(&*++RemInst->getIterator());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001498
Chris Lattner9f1988ab2008-11-29 09:20:15 +00001499 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
1500 if (ReverseDepIt != ReverseLocalDeps.end()) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001501 // RemInst can't be the terminator if it has local stuff depending on it.
Craig Topper46276792014-08-24 23:23:06 +00001502 assert(!ReverseDepIt->second.empty() && !isa<TerminatorInst>(RemInst) &&
Chris Lattnerada1f872008-11-30 01:09:30 +00001503 "Nothing can locally depend on a terminator");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001504
Craig Topper46276792014-08-24 23:23:06 +00001505 for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) {
Chris Lattner1b810bd2008-11-30 02:28:25 +00001506 assert(InstDependingOnRemInst != RemInst &&
1507 "Already removed our local dep info");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001508
Chris Lattner82b70342008-12-07 18:42:51 +00001509 LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001510
Chris Lattnerada1f872008-11-30 01:09:30 +00001511 // Make sure to remember that new things depend on NewDepInst.
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001512 assert(NewDirtyVal.getInst() &&
1513 "There is no way something else can have "
Chris Lattner82b70342008-12-07 18:42:51 +00001514 "a local dep on this if it is a terminator!");
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001515 ReverseDepsToAdd.push_back(
1516 std::make_pair(NewDirtyVal.getInst(), InstDependingOnRemInst));
Chris Lattnerd3d91112008-11-28 22:51:08 +00001517 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001518
Chris Lattner63bd5862008-11-29 23:30:39 +00001519 ReverseLocalDeps.erase(ReverseDepIt);
1520
1521 // Add new reverse deps after scanning the set, to avoid invalidating the
1522 // 'ReverseDeps' reference.
1523 while (!ReverseDepsToAdd.empty()) {
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001524 ReverseLocalDeps[ReverseDepsToAdd.back().first].insert(
1525 ReverseDepsToAdd.back().second);
Chris Lattner63bd5862008-11-29 23:30:39 +00001526 ReverseDepsToAdd.pop_back();
1527 }
Owen Andersonc0daf5f2007-07-06 23:14:35 +00001528 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001529
Chris Lattner9f1988ab2008-11-29 09:20:15 +00001530 ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
1531 if (ReverseDepIt != ReverseNonLocalDeps.end()) {
Craig Topper46276792014-08-24 23:23:06 +00001532 for (Instruction *I : ReverseDepIt->second) {
1533 assert(I != RemInst && "Already removed NonLocalDep info for RemInst");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001534
Craig Topper46276792014-08-24 23:23:06 +00001535 PerInstNLInfo &INLD = NonLocalDeps[I];
Chris Lattner44104272008-11-30 02:52:26 +00001536 // The information is now dirty!
Chris Lattner7e61daf2008-12-01 01:15:42 +00001537 INLD.second = true;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001538
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001539 for (auto &Entry : INLD.first) {
1540 if (Entry.getResult().getInst() != RemInst)
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001541 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001542
Chris Lattner1b810bd2008-11-30 02:28:25 +00001543 // Convert to a dirty entry for the subsequent instruction.
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001544 Entry.setResult(NewDirtyVal);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001545
Chris Lattner82b70342008-12-07 18:42:51 +00001546 if (Instruction *NextI = NewDirtyVal.getInst())
Craig Topper46276792014-08-24 23:23:06 +00001547 ReverseDepsToAdd.push_back(std::make_pair(NextI, I));
Chris Lattner1b810bd2008-11-30 02:28:25 +00001548 }
1549 }
Chris Lattner63bd5862008-11-29 23:30:39 +00001550
1551 ReverseNonLocalDeps.erase(ReverseDepIt);
1552
Chris Lattnere7d7e132008-11-29 22:02:15 +00001553 // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
1554 while (!ReverseDepsToAdd.empty()) {
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001555 ReverseNonLocalDeps[ReverseDepsToAdd.back().first].insert(
1556 ReverseDepsToAdd.back().second);
Chris Lattnere7d7e132008-11-29 22:02:15 +00001557 ReverseDepsToAdd.pop_back();
1558 }
Owen Anderson5f208be2007-08-16 21:27:05 +00001559 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001560
Chris Lattnera28355d2008-12-07 08:50:20 +00001561 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
1562 // value in the NonLocalPointerDeps info.
1563 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001564 ReverseNonLocalPtrDeps.find(RemInst);
Chris Lattnera28355d2008-12-07 08:50:20 +00001565 if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001566 SmallVector<std::pair<Instruction *, ValueIsLoadPair>, 8>
1567 ReversePtrDepsToAdd;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001568
Craig Topper46276792014-08-24 23:23:06 +00001569 for (ValueIsLoadPair P : ReversePtrDepIt->second) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001570 assert(P.getPointer() != RemInst &&
1571 "Already removed NonLocalPointerDeps info for RemInst");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001572
Dan Gohman23483932010-09-22 21:41:02 +00001573 NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001574
Chris Lattner5ed409e2008-12-08 07:31:50 +00001575 // The cache is not valid for any specific block anymore.
Dan Gohman23483932010-09-22 21:41:02 +00001576 NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001577
Chris Lattnera28355d2008-12-07 08:50:20 +00001578 // Update any entries for RemInst to use the instruction after it.
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001579 for (auto &Entry : NLPDI) {
1580 if (Entry.getResult().getInst() != RemInst)
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001581 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001582
Chris Lattnera28355d2008-12-07 08:50:20 +00001583 // Convert to a dirty entry for the subsequent instruction.
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001584 Entry.setResult(NewDirtyVal);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001585
Chris Lattnera28355d2008-12-07 08:50:20 +00001586 if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
1587 ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
1588 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001589
Chris Lattner3f4591c2009-01-23 07:12:16 +00001590 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its
1591 // subsequent value may invalidate the sortedness.
1592 std::sort(NLPDI.begin(), NLPDI.end());
Chris Lattnera28355d2008-12-07 08:50:20 +00001593 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001594
Chris Lattnera28355d2008-12-07 08:50:20 +00001595 ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001596
Chris Lattnera28355d2008-12-07 08:50:20 +00001597 while (!ReversePtrDepsToAdd.empty()) {
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001598 ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first].insert(
1599 ReversePtrDepsToAdd.back().second);
Chris Lattnera28355d2008-12-07 08:50:20 +00001600 ReversePtrDepsToAdd.pop_back();
1601 }
1602 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001603
Chris Lattner1b810bd2008-11-30 02:28:25 +00001604 assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
Jakob Stoklund Olesen087f2072011-01-11 04:05:39 +00001605 DEBUG(verifyRemoved(RemInst));
Owen Andersonc0daf5f2007-07-06 23:14:35 +00001606}
Chandler Carruth40e21f22016-03-07 12:30:06 +00001607
1608/// Verify that the specified instruction does not occur in our internal data
1609/// structures.
1610///
1611/// This function verifies by asserting in debug builds.
Chandler Carruth61440d22016-03-10 00:55:30 +00001612void MemoryDependenceResults::verifyRemoved(Instruction *D) const {
Craig Topper46276792014-08-24 23:23:06 +00001613#ifndef NDEBUG
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001614 for (const auto &DepKV : LocalDeps) {
1615 assert(DepKV.first != D && "Inst occurs in data structures");
1616 assert(DepKV.second.getInst() != D && "Inst occurs in data structures");
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001617 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001618
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001619 for (const auto &DepKV : NonLocalPointerDeps) {
1620 assert(DepKV.first.getPointer() != D && "Inst occurs in NLPD map key");
1621 for (const auto &Entry : DepKV.second.NonLocalDeps)
1622 assert(Entry.getResult().getInst() != D && "Inst occurs as NLPD value");
Chris Lattnera28355d2008-12-07 08:50:20 +00001623 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001624
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001625 for (const auto &DepKV : NonLocalDeps) {
1626 assert(DepKV.first != D && "Inst occurs in data structures");
1627 const PerInstNLInfo &INLD = DepKV.second;
1628 for (const auto &Entry : INLD.first)
1629 assert(Entry.getResult().getInst() != D &&
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001630 "Inst occurs in data structures");
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001631 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001632
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001633 for (const auto &DepKV : ReverseLocalDeps) {
1634 assert(DepKV.first != D && "Inst occurs in data structures");
1635 for (Instruction *Inst : DepKV.second)
Craig Topper46276792014-08-24 23:23:06 +00001636 assert(Inst != D && "Inst occurs in data structures");
Chris Lattner1b810bd2008-11-30 02:28:25 +00001637 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001638
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001639 for (const auto &DepKV : ReverseNonLocalDeps) {
1640 assert(DepKV.first != D && "Inst occurs in data structures");
1641 for (Instruction *Inst : DepKV.second)
Craig Topper46276792014-08-24 23:23:06 +00001642 assert(Inst != D && "Inst occurs in data structures");
Chris Lattner1b810bd2008-11-30 02:28:25 +00001643 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001644
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001645 for (const auto &DepKV : ReverseNonLocalPtrDeps) {
1646 assert(DepKV.first != D && "Inst occurs in rev NLPD map");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001647
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001648 for (ValueIsLoadPair P : DepKV.second)
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001649 assert(P != ValueIsLoadPair(D, false) && P != ValueIsLoadPair(D, true) &&
Chris Lattnera28355d2008-12-07 08:50:20 +00001650 "Inst occurs in ReverseNonLocalPtrDeps map");
1651 }
Craig Topper46276792014-08-24 23:23:06 +00001652#endif
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001653}
Chandler Carruth61440d22016-03-10 00:55:30 +00001654
Chandler Carruthdab4eae2016-11-23 17:53:26 +00001655AnalysisKey MemoryDependenceAnalysis::Key;
Chandler Carruthb4faf132016-03-11 10:22:49 +00001656
Chandler Carruth61440d22016-03-10 00:55:30 +00001657MemoryDependenceResults
Sean Silva36e0d012016-08-09 00:28:15 +00001658MemoryDependenceAnalysis::run(Function &F, FunctionAnalysisManager &AM) {
Chandler Carruthb47f8012016-03-11 11:05:24 +00001659 auto &AA = AM.getResult<AAManager>(F);
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001660 auto &AC = AM.getResult<AssumptionAnalysis>(F);
Chandler Carruthb47f8012016-03-11 11:05:24 +00001661 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
Chandler Carruthaef32bd2016-03-11 13:46:00 +00001662 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001663 return MemoryDependenceResults(AA, AC, TLI, DT);
Chandler Carruth61440d22016-03-10 00:55:30 +00001664}
1665
1666char MemoryDependenceWrapperPass::ID = 0;
1667
1668INITIALIZE_PASS_BEGIN(MemoryDependenceWrapperPass, "memdep",
1669 "Memory Dependence Analysis", false, true)
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001670INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
Chandler Carruth61440d22016-03-10 00:55:30 +00001671INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Chandler Carruthaef32bd2016-03-11 13:46:00 +00001672INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
Chandler Carruth61440d22016-03-10 00:55:30 +00001673INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1674INITIALIZE_PASS_END(MemoryDependenceWrapperPass, "memdep",
1675 "Memory Dependence Analysis", false, true)
1676
1677MemoryDependenceWrapperPass::MemoryDependenceWrapperPass() : FunctionPass(ID) {
1678 initializeMemoryDependenceWrapperPassPass(*PassRegistry::getPassRegistry());
1679}
Eugene Zelenko1804a772016-08-25 00:45:04 +00001680
Chandler Carruth61440d22016-03-10 00:55:30 +00001681MemoryDependenceWrapperPass::~MemoryDependenceWrapperPass() {}
1682
1683void MemoryDependenceWrapperPass::releaseMemory() {
1684 MemDep.reset();
1685}
1686
1687void MemoryDependenceWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1688 AU.setPreservesAll();
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001689 AU.addRequired<AssumptionCacheTracker>();
Chandler Carruthaef32bd2016-03-11 13:46:00 +00001690 AU.addRequired<DominatorTreeWrapperPass>();
Chandler Carruth61440d22016-03-10 00:55:30 +00001691 AU.addRequiredTransitive<AAResultsWrapperPass>();
1692 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1693}
1694
Chandler Carruthe14524c2016-12-27 19:33:04 +00001695bool MemoryDependenceResults::invalidate(Function &F, const PreservedAnalyses &PA,
1696 FunctionAnalysisManager::Invalidator &Inv) {
1697 // Check whether our analysis is preserved.
1698 auto PAC = PA.getChecker<MemoryDependenceAnalysis>();
1699 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
1700 // If not, give up now.
1701 return true;
1702
1703 // Check whether the analyses we depend on became invalid for any reason.
1704 if (Inv.invalidate<AAManager>(F, PA) ||
1705 Inv.invalidate<AssumptionAnalysis>(F, PA) ||
1706 Inv.invalidate<DominatorTreeAnalysis>(F, PA))
1707 return true;
1708
1709 // Otherwise this analysis result remains valid.
1710 return false;
1711}
1712
Bob Haarman3db17642016-08-26 16:34:27 +00001713unsigned MemoryDependenceResults::getDefaultBlockScanLimit() const {
1714 return BlockScanLimit;
1715}
1716
Chandler Carruth61440d22016-03-10 00:55:30 +00001717bool MemoryDependenceWrapperPass::runOnFunction(Function &F) {
1718 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001719 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
Chandler Carruth61440d22016-03-10 00:55:30 +00001720 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
Chandler Carruthaef32bd2016-03-11 13:46:00 +00001721 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001722 MemDep.emplace(AA, AC, TLI, DT);
Chandler Carruth61440d22016-03-10 00:55:30 +00001723 return false;
1724}