blob: 05144265487c09443be8c8dc9995fe2c3e835b08 [file] [log] [blame]
Nick Lewycky7ed1dbf2013-06-10 23:10:59 +00001//===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===//
Owen Andersonc0daf5f2007-07-06 23:14:35 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattnerf3ebc3f2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Owen Andersonc0daf5f2007-07-06 23:14:35 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file implements an analysis that determines, for a given memory
Jakub Staszakb0a7eed2013-03-20 21:47:51 +000011// operation, what preceding memory operations it depends on. It builds on
Owen Andersonfa788352007-08-08 22:01:54 +000012// alias analysis information, and tries to provide a lazy, caching interface to
Owen Andersonc0daf5f2007-07-06 23:14:35 +000013// a common kind of alias information query.
14//
15//===----------------------------------------------------------------------===//
16
17#include "llvm/Analysis/MemoryDependenceAnalysis.h"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000018#include "llvm/ADT/DenseMap.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000019#include "llvm/ADT/STLExtras.h"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000020#include "llvm/ADT/SmallPtrSet.h"
Eugene Zelenko1804a772016-08-25 00:45:04 +000021#include "llvm/ADT/SmallVector.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000022#include "llvm/ADT/Statistic.h"
Owen Andersonc0daf5f2007-07-06 23:14:35 +000023#include "llvm/Analysis/AliasAnalysis.h"
Daniel Jasperaec2fa32016-12-19 08:22:17 +000024#include "llvm/Analysis/AssumptionCache.h"
Victor Hernandezf390e042009-10-27 20:05:49 +000025#include "llvm/Analysis/MemoryBuiltins.h"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000026#include "llvm/Analysis/MemoryLocation.h"
Bruno Cardoso Lopesdfc1d962015-07-31 14:31:35 +000027#include "llvm/Analysis/OrderedBasicBlock.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000028#include "llvm/Analysis/PHITransAddr.h"
Chandler Carruthd06034d2015-08-12 17:47:44 +000029#include "llvm/Analysis/TargetLibraryInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000030#include "llvm/Analysis/ValueTracking.h"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000031#include "llvm/IR/Attributes.h"
32#include "llvm/IR/BasicBlock.h"
Eugene Zelenko1804a772016-08-25 00:45:04 +000033#include "llvm/IR/CallSite.h"
34#include "llvm/IR/Constants.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000035#include "llvm/IR/DataLayout.h"
Eugene Zelenko1804a772016-08-25 00:45:04 +000036#include "llvm/IR/DerivedTypes.h"
Chandler Carruth5ad5f152014-01-13 09:26:24 +000037#include "llvm/IR/Dominators.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000038#include "llvm/IR/Function.h"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000039#include "llvm/IR/InstrTypes.h"
Eugene Zelenko1804a772016-08-25 00:45:04 +000040#include "llvm/IR/Instruction.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000041#include "llvm/IR/Instructions.h"
42#include "llvm/IR/IntrinsicInst.h"
43#include "llvm/IR/LLVMContext.h"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000044#include "llvm/IR/Metadata.h"
45#include "llvm/IR/Module.h"
Chandler Carruthaa0ab632014-03-04 12:09:19 +000046#include "llvm/IR/PredIteratorCache.h"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000047#include "llvm/IR/Type.h"
48#include "llvm/IR/Use.h"
49#include "llvm/IR/User.h"
50#include "llvm/IR/Value.h"
51#include "llvm/Pass.h"
Eugene Zelenko1804a772016-08-25 00:45:04 +000052#include "llvm/Support/AtomicOrdering.h"
53#include "llvm/Support/Casting.h"
54#include "llvm/Support/CommandLine.h"
55#include "llvm/Support/Compiler.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000056#include "llvm/Support/Debug.h"
Eugene Zelenko1804a772016-08-25 00:45:04 +000057#include "llvm/Support/MathExtras.h"
58#include <algorithm>
59#include <cassert>
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000060#include <cstdint>
Eugene Zelenko1804a772016-08-25 00:45:04 +000061#include <iterator>
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000062#include <utility>
Eugene Zelenko1804a772016-08-25 00:45:04 +000063
Owen Andersonc0daf5f2007-07-06 23:14:35 +000064using namespace llvm;
65
Chandler Carruthf1221bd2014-04-22 02:48:03 +000066#define DEBUG_TYPE "memdep"
67
Chris Lattner7e61daf2008-12-01 01:15:42 +000068STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
69STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
Chris Lattnere7d7e132008-11-29 22:02:15 +000070STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
Chris Lattnera28355d2008-12-07 08:50:20 +000071
72STATISTIC(NumCacheNonLocalPtr,
73 "Number of fully cached non-local ptr responses");
74STATISTIC(NumCacheDirtyNonLocalPtr,
75 "Number of cached, but dirty, non-local ptr responses");
Chandler Carruth60fb1b42016-03-07 10:19:30 +000076STATISTIC(NumUncacheNonLocalPtr, "Number of uncached non-local ptr responses");
Chris Lattner5ed409e2008-12-08 07:31:50 +000077STATISTIC(NumCacheCompleteNonLocalPtr,
78 "Number of block queries that were completely cached");
Chris Lattnera28355d2008-12-07 08:50:20 +000079
Eli Friedman8b098b02011-06-15 23:59:25 +000080// Limit for the number of instructions to scan in a block.
Jingyue Wud058ea92015-07-21 21:50:39 +000081
82static cl::opt<unsigned> BlockScanLimit(
83 "memdep-block-scan-limit", cl::Hidden, cl::init(100),
84 cl::desc("The number of instructions to scan in a block in memory "
85 "dependency analysis (default = 100)"));
Eli Friedman8b098b02011-06-15 23:59:25 +000086
Chandler Carruth60fb1b42016-03-07 10:19:30 +000087static cl::opt<unsigned>
88 BlockNumberLimit("memdep-block-number-limit", cl::Hidden, cl::init(1000),
89 cl::desc("The number of blocks to scan during memory "
90 "dependency analysis (default = 1000)"));
Joerg Sonnenberger36894dc2016-02-20 11:24:44 +000091
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +000092// Limit on the number of memdep results to process.
Aaron Ballman254dd7e2014-10-02 13:17:11 +000093static const unsigned int NumResultsLimit = 100;
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +000094
Chandler Carruth40e21f22016-03-07 12:30:06 +000095/// This is a helper function that removes Val from 'Inst's set in ReverseMap.
96///
97/// If the set becomes empty, remove Inst's entry.
Chris Lattnerde4440c2008-12-07 18:39:13 +000098template <typename KeyTy>
Chandler Carruth60fb1b42016-03-07 10:19:30 +000099static void
100RemoveFromReverseMap(DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>> &ReverseMap,
101 Instruction *Inst, KeyTy Val) {
102 typename DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>>::iterator InstIt =
103 ReverseMap.find(Inst);
Chris Lattnerde4440c2008-12-07 18:39:13 +0000104 assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
105 bool Found = InstIt->second.erase(Val);
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000106 assert(Found && "Invalid reverse map!");
107 (void)Found;
Chris Lattnerde4440c2008-12-07 18:39:13 +0000108 if (InstIt->second.empty())
109 ReverseMap.erase(InstIt);
110}
111
Chandler Carruth40e21f22016-03-07 12:30:06 +0000112/// If the given instruction references a specific memory location, fill in Loc
113/// with the details, otherwise set Loc.Ptr to null.
114///
115/// Returns a ModRefInfo value describing the general behavior of the
Dan Gohman1d760ce2010-11-10 21:51:35 +0000116/// instruction.
Chandler Carruth194f59c2015-07-22 23:15:57 +0000117static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
Chandler Carruthd06034d2015-08-12 17:47:44 +0000118 const TargetLibraryInfo &TLI) {
Dan Gohman1d760ce2010-11-10 21:51:35 +0000119 if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000120 if (LI->isUnordered()) {
Chandler Carruth70c61c12015-06-04 02:03:15 +0000121 Loc = MemoryLocation::get(LI);
Alina Sbirlea193429f2017-12-07 22:41:34 +0000122 return ModRefInfo::Ref;
Jakub Staszakfa41def2013-03-20 23:53:45 +0000123 }
JF Bastien800f87a2016-04-06 21:19:33 +0000124 if (LI->getOrdering() == AtomicOrdering::Monotonic) {
Chandler Carruth70c61c12015-06-04 02:03:15 +0000125 Loc = MemoryLocation::get(LI);
Alina Sbirlea193429f2017-12-07 22:41:34 +0000126 return ModRefInfo::ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000127 }
Chandler Carruthac80dc72015-06-17 07:18:54 +0000128 Loc = MemoryLocation();
Alina Sbirlea193429f2017-12-07 22:41:34 +0000129 return ModRefInfo::ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000130 }
131
132 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000133 if (SI->isUnordered()) {
Chandler Carruth70c61c12015-06-04 02:03:15 +0000134 Loc = MemoryLocation::get(SI);
Alina Sbirlea193429f2017-12-07 22:41:34 +0000135 return ModRefInfo::Mod;
Jakub Staszakfa41def2013-03-20 23:53:45 +0000136 }
JF Bastien800f87a2016-04-06 21:19:33 +0000137 if (SI->getOrdering() == AtomicOrdering::Monotonic) {
Chandler Carruth70c61c12015-06-04 02:03:15 +0000138 Loc = MemoryLocation::get(SI);
Alina Sbirlea193429f2017-12-07 22:41:34 +0000139 return ModRefInfo::ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000140 }
Chandler Carruthac80dc72015-06-17 07:18:54 +0000141 Loc = MemoryLocation();
Alina Sbirlea193429f2017-12-07 22:41:34 +0000142 return ModRefInfo::ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000143 }
144
145 if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
Chandler Carruth70c61c12015-06-04 02:03:15 +0000146 Loc = MemoryLocation::get(V);
Alina Sbirlea193429f2017-12-07 22:41:34 +0000147 return ModRefInfo::ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000148 }
149
Chandler Carruthd06034d2015-08-12 17:47:44 +0000150 if (const CallInst *CI = isFreeCall(Inst, &TLI)) {
Dan Gohman1d760ce2010-11-10 21:51:35 +0000151 // calls to free() deallocate the entire structure
Chandler Carruthac80dc72015-06-17 07:18:54 +0000152 Loc = MemoryLocation(CI->getArgOperand(0));
Alina Sbirlea193429f2017-12-07 22:41:34 +0000153 return ModRefInfo::Mod;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000154 }
155
Hal Finkelcc39b672014-07-24 12:16:19 +0000156 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
Dan Gohman1d760ce2010-11-10 21:51:35 +0000157 switch (II->getIntrinsicID()) {
158 case Intrinsic::lifetime_start:
159 case Intrinsic::lifetime_end:
160 case Intrinsic::invariant_start:
Philip Reamesf5ff5d52018-01-17 19:57:19 +0000161 Loc = MemoryLocation::getForArgument(II, 1, TLI);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000162 // These intrinsics don't really modify the memory, but returning Mod
163 // will allow them to be handled conservatively.
Alina Sbirlea193429f2017-12-07 22:41:34 +0000164 return ModRefInfo::Mod;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000165 case Intrinsic::invariant_end:
Philip Reamesf5ff5d52018-01-17 19:57:19 +0000166 Loc = MemoryLocation::getForArgument(II, 2, TLI);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000167 // These intrinsics don't really modify the memory, but returning Mod
168 // will allow them to be handled conservatively.
Alina Sbirlea193429f2017-12-07 22:41:34 +0000169 return ModRefInfo::Mod;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000170 default:
171 break;
172 }
Hal Finkelcc39b672014-07-24 12:16:19 +0000173 }
Dan Gohman1d760ce2010-11-10 21:51:35 +0000174
175 // Otherwise, just do the coarse-grained thing that always works.
176 if (Inst->mayWriteToMemory())
Alina Sbirlea193429f2017-12-07 22:41:34 +0000177 return ModRefInfo::ModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000178 if (Inst->mayReadFromMemory())
Alina Sbirlea193429f2017-12-07 22:41:34 +0000179 return ModRefInfo::Ref;
180 return ModRefInfo::NoModRef;
Dan Gohman1d760ce2010-11-10 21:51:35 +0000181}
Chris Lattner7e61daf2008-12-01 01:15:42 +0000182
Chandler Carruth40e21f22016-03-07 12:30:06 +0000183/// Private helper for finding the local dependencies of a call site.
Chandler Carruth61440d22016-03-10 00:55:30 +0000184MemDepResult MemoryDependenceResults::getCallSiteDependencyFrom(
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000185 CallSite CS, bool isReadOnlyCall, BasicBlock::iterator ScanIt,
186 BasicBlock *BB) {
Eli Friedman8b098b02011-06-15 23:59:25 +0000187 unsigned Limit = BlockScanLimit;
188
Henric Karlsson54a53bd2016-10-06 10:58:41 +0000189 // Walk backwards through the block, looking for dependencies.
Chris Lattner51ba8d02008-11-29 03:47:00 +0000190 while (ScanIt != BB->begin()) {
Mikael Holmen279790b2017-10-25 06:15:32 +0000191 Instruction *Inst = &*--ScanIt;
192 // Debug intrinsics don't cause dependences and should not affect Limit
193 if (isa<DbgInfoIntrinsic>(Inst))
194 continue;
195
Eli Friedman8b098b02011-06-15 23:59:25 +0000196 // Limit the amount of scanning we do so we don't end up with quadratic
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000197 // running time on extreme testcases.
Eli Friedman8b098b02011-06-15 23:59:25 +0000198 --Limit;
199 if (!Limit)
200 return MemDepResult::getUnknown();
201
Owen Anderson9c884572007-07-10 17:59:22 +0000202 // If this inst is a memory op, get the pointer it accessed
Chandler Carruthac80dc72015-06-17 07:18:54 +0000203 MemoryLocation Loc;
Chandler Carruth61440d22016-03-10 00:55:30 +0000204 ModRefInfo MR = GetLocation(Inst, Loc, TLI);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000205 if (Loc.Ptr) {
206 // A simple instruction.
Alina Sbirlea63d22502017-12-05 20:12:23 +0000207 if (isModOrRefSet(AA.getModRefInfo(CS, Loc)))
Dan Gohman1d760ce2010-11-10 21:51:35 +0000208 return MemDepResult::getClobber(Inst);
209 continue;
210 }
211
Benjamin Kramer3a09ef62015-04-10 14:50:08 +0000212 if (auto InstCS = CallSite(Inst)) {
Chris Lattner0e3d6332008-12-05 21:04:20 +0000213 // If these two calls do not interfere, look past it.
Alina Sbirlea63d22502017-12-05 20:12:23 +0000214 if (isNoModRef(AA.getModRefInfo(CS, InstCS))) {
Dan Gohman26ef7c72010-08-05 22:09:15 +0000215 // If the two calls are the same, return InstCS as a Def, so that
216 // CS can be found redundant and eliminated.
Alina Sbirlea63d22502017-12-05 20:12:23 +0000217 if (isReadOnlyCall && !isModSet(MR) &&
Dan Gohman26ef7c72010-08-05 22:09:15 +0000218 CS.getInstruction()->isIdenticalToWhenDefined(Inst))
219 return MemDepResult::getDef(Inst);
220
221 // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
222 // keep scanning.
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000223 continue;
Alina Sbirlea63d22502017-12-05 20:12:23 +0000224 } else
Chris Lattner0e3d6332008-12-05 21:04:20 +0000225 return MemDepResult::getClobber(Inst);
Chris Lattnerff862c42008-11-30 01:44:00 +0000226 }
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000227
228 // If we could not obtain a pointer for the instruction and the instruction
229 // touches memory then assume that this is a dependency.
Alina Sbirlea63d22502017-12-05 20:12:23 +0000230 if (isModOrRefSet(MR))
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000231 return MemDepResult::getClobber(Inst);
Owen Anderson9c884572007-07-10 17:59:22 +0000232 }
Nadav Rotem5d4e2052012-08-13 23:03:43 +0000233
Eli Friedman7d58bc72011-06-15 00:47:34 +0000234 // No dependence found. If this is the entry block of the function, it is
235 // unknown, otherwise it is non-local.
Chris Lattner2faa2c72008-12-07 02:15:47 +0000236 if (BB != &BB->getParent()->getEntryBlock())
237 return MemDepResult::getNonLocal();
Eli Friedmanc1702c82011-10-13 22:14:57 +0000238 return MemDepResult::getNonFuncLocal();
Owen Anderson9c884572007-07-10 17:59:22 +0000239}
240
Chandler Carruth61440d22016-03-10 00:55:30 +0000241unsigned MemoryDependenceResults::getLoadLoadClobberFullWidthSize(
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000242 const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize,
243 const LoadInst *LI) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000244 // We can only extend simple integer loads.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000245 if (!isa<IntegerType>(LI->getType()) || !LI->isSimple())
246 return 0;
Kostya Serebryany3838f272013-02-13 05:59:45 +0000247
248 // Load widening is hostile to ThreadSanitizer: it may cause false positives
249 // or make the reports more cryptic (access sizes are wrong).
Duncan P. N. Exon Smithb3fc83c2015-02-14 00:12:15 +0000250 if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
Kostya Serebryany3838f272013-02-13 05:59:45 +0000251 return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000252
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000253 const DataLayout &DL = LI->getModule()->getDataLayout();
254
Chris Lattner7aab2792011-04-26 22:42:01 +0000255 // Get the base of this load.
256 int64_t LIOffs = 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000257 const Value *LIBase =
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000258 GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, DL);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000259
Chris Lattner7aab2792011-04-26 22:42:01 +0000260 // If the two pointers are not based on the same pointer, we can't tell that
261 // they are related.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000262 if (LIBase != MemLocBase)
263 return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000264
Chris Lattner7aab2792011-04-26 22:42:01 +0000265 // Okay, the two values are based on the same pointer, but returned as
266 // no-alias. This happens when we have things like two byte loads at "P+1"
267 // and "P+3". Check to see if increasing the size of the "LI" load up to its
268 // alignment (or the largest native integer type) will allow us to load all
269 // the bits required by MemLoc.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000270
Chris Lattner7aab2792011-04-26 22:42:01 +0000271 // If MemLoc is before LI, then no widening of LI will help us out.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000272 if (MemLocOffs < LIOffs)
273 return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000274
Chris Lattner7aab2792011-04-26 22:42:01 +0000275 // Get the alignment of the load in bytes. We assume that it is safe to load
276 // any legal integer up to this size without a problem. For example, if we're
277 // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can
278 // widen it up to an i32 load. If it is known 2-byte aligned, we can widen it
279 // to i16.
280 unsigned LoadAlign = LI->getAlignment();
281
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000282 int64_t MemLocEnd = MemLocOffs + MemLocSize;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000283
Chris Lattner7aab2792011-04-26 22:42:01 +0000284 // If no amount of rounding up will let MemLoc fit into LI, then bail out.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000285 if (LIOffs + LoadAlign < MemLocEnd)
286 return 0;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000287
Chris Lattner7aab2792011-04-26 22:42:01 +0000288 // This is the size of the load to try. Start with the next larger power of
289 // two.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000290 unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits() / 8U;
Chris Lattner7aab2792011-04-26 22:42:01 +0000291 NewLoadByteSize = NextPowerOf2(NewLoadByteSize);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000292
Eugene Zelenko1804a772016-08-25 00:45:04 +0000293 while (true) {
Chris Lattner7aab2792011-04-26 22:42:01 +0000294 // If this load size is bigger than our known alignment or would not fit
295 // into a native integer register, then we fail.
296 if (NewLoadByteSize > LoadAlign ||
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000297 !DL.fitsInLegalInteger(NewLoadByteSize * 8))
Chris Lattner827a2702011-04-28 07:29:08 +0000298 return 0;
Chris Lattner7aab2792011-04-26 22:42:01 +0000299
Duncan P. N. Exon Smithb3fc83c2015-02-14 00:12:15 +0000300 if (LIOffs + NewLoadByteSize > MemLocEnd &&
Evgeniy Stepanovc667c1f2017-12-09 00:21:41 +0000301 (LI->getParent()->getParent()->hasFnAttribute(
302 Attribute::SanitizeAddress) ||
303 LI->getParent()->getParent()->hasFnAttribute(
304 Attribute::SanitizeHWAddress)))
Kostya Serebryany9e0d3772012-02-06 22:48:56 +0000305 // We will be reading past the location accessed by the original program.
306 // While this is safe in a regular build, Address Safety analysis tools
307 // may start reporting false warnings. So, don't do widening.
308 return 0;
Kostya Serebryany9e0d3772012-02-06 22:48:56 +0000309
Chris Lattner7aab2792011-04-26 22:42:01 +0000310 // If a load of this width would include all of MemLoc, then we succeed.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000311 if (LIOffs + NewLoadByteSize >= MemLocEnd)
Chris Lattner827a2702011-04-28 07:29:08 +0000312 return NewLoadByteSize;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000313
Chris Lattner7aab2792011-04-26 22:42:01 +0000314 NewLoadByteSize <<= 1;
315 }
Chris Lattner7aab2792011-04-26 22:42:01 +0000316}
317
Philip Reamesa7ad6a52015-01-26 18:54:27 +0000318static bool isVolatile(Instruction *Inst) {
Davide Italianof15fb362017-06-25 22:12:59 +0000319 if (auto *LI = dyn_cast<LoadInst>(Inst))
Philip Reamesa7ad6a52015-01-26 18:54:27 +0000320 return LI->isVolatile();
Davide Italianof15fb362017-06-25 22:12:59 +0000321 if (auto *SI = dyn_cast<StoreInst>(Inst))
Philip Reamesa7ad6a52015-01-26 18:54:27 +0000322 return SI->isVolatile();
Davide Italianof15fb362017-06-25 22:12:59 +0000323 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(Inst))
Philip Reamesa7ad6a52015-01-26 18:54:27 +0000324 return AI->isVolatile();
325 return false;
326}
327
Chandler Carruth61440d22016-03-10 00:55:30 +0000328MemDepResult MemoryDependenceResults::getPointerDependencyFrom(
Chandler Carruthac80dc72015-06-17 07:18:54 +0000329 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
Bob Haarman3db17642016-08-26 16:34:27 +0000330 BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) {
Piotr Padlewski95308832017-01-12 11:33:58 +0000331 MemDepResult InvariantGroupDependency = MemDepResult::getUnknown();
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000332 if (QueryInst != nullptr) {
333 if (auto *LI = dyn_cast<LoadInst>(QueryInst)) {
Piotr Padlewski95308832017-01-12 11:33:58 +0000334 InvariantGroupDependency = getInvariantGroupPointerDependency(LI, BB);
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000335
Piotr Padlewskie41beed2017-01-11 16:23:54 +0000336 if (InvariantGroupDependency.isDef())
337 return InvariantGroupDependency;
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000338 }
339 }
Piotr Padlewski95308832017-01-12 11:33:58 +0000340 MemDepResult SimpleDep = getSimplePointerDependencyFrom(
341 MemLoc, isLoad, ScanIt, BB, QueryInst, Limit);
342 if (SimpleDep.isDef())
343 return SimpleDep;
344 // Non-local invariant group dependency indicates there is non local Def
345 // (it only returns nonLocal if it finds nonLocal def), which is better than
346 // local clobber and everything else.
347 if (InvariantGroupDependency.isNonLocal())
348 return InvariantGroupDependency;
349
350 assert(InvariantGroupDependency.isUnknown() &&
351 "InvariantGroupDependency should be only unknown at this point");
352 return SimpleDep;
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000353}
354
355MemDepResult
Chandler Carruth61440d22016-03-10 00:55:30 +0000356MemoryDependenceResults::getInvariantGroupPointerDependency(LoadInst *LI,
Piotr Padlewski01659cb2016-11-08 18:20:51 +0000357 BasicBlock *BB) {
Piotr Padlewskida362152016-12-30 18:45:07 +0000358 auto *InvariantGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group);
359 if (!InvariantGroupMD)
360 return MemDepResult::getUnknown();
361
Piotr Padlewski09ad6782017-01-08 22:26:06 +0000362 // Take the ptr operand after all casts and geps 0. This way we can search
363 // cast graph down only.
364 Value *LoadOperand = LI->getPointerOperand()->stripPointerCasts();
365
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000366 // It's is not safe to walk the use list of global value, because function
367 // passes aren't allowed to look outside their functions.
Piotr Padlewski09ad6782017-01-08 22:26:06 +0000368 // FIXME: this could be fixed by filtering instructions from outside
369 // of current function.
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000370 if (isa<GlobalValue>(LoadOperand))
371 return MemDepResult::getUnknown();
372
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000373 // Queue to process all pointers that are equivalent to load operand.
Piotr Padlewskida362152016-12-30 18:45:07 +0000374 SmallVector<const Value *, 8> LoadOperandsQueue;
Piotr Padlewski09ad6782017-01-08 22:26:06 +0000375 LoadOperandsQueue.push_back(LoadOperand);
Piotr Padlewski95308832017-01-12 11:33:58 +0000376
377 Instruction *ClosestDependency = nullptr;
378 // Order of instructions in uses list is unpredictible. In order to always
379 // get the same result, we will look for the closest dominance.
380 auto GetClosestDependency = [this](Instruction *Best, Instruction *Other) {
381 assert(Other && "Must call it with not null instruction");
382 if (Best == nullptr || DT.dominates(Best, Other))
383 return Other;
384 return Best;
385 };
386
Piotr Padlewski95308832017-01-12 11:33:58 +0000387 // FIXME: This loop is O(N^2) because dominates can be O(n) and in worst case
388 // we will see all the instructions. This should be fixed in MSSA.
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000389 while (!LoadOperandsQueue.empty()) {
Piotr Padlewskida362152016-12-30 18:45:07 +0000390 const Value *Ptr = LoadOperandsQueue.pop_back_val();
Piotr Padlewski09ad6782017-01-08 22:26:06 +0000391 assert(Ptr && !isa<GlobalValue>(Ptr) &&
392 "Null or GlobalValue should not be inserted");
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000393
Piotr Padlewskida362152016-12-30 18:45:07 +0000394 for (const Use &Us : Ptr->uses()) {
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000395 auto *U = dyn_cast<Instruction>(Us.getUser());
Chandler Carruthaef32bd2016-03-11 13:46:00 +0000396 if (!U || U == LI || !DT.dominates(U, LI))
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000397 continue;
398
Piotr Padlewskida362152016-12-30 18:45:07 +0000399 // Bitcast or gep with zeros are using Ptr. Add to queue to check it's
400 // users. U = bitcast Ptr
401 if (isa<BitCastInst>(U)) {
Piotr Padlewski09ad6782017-01-08 22:26:06 +0000402 LoadOperandsQueue.push_back(U);
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000403 continue;
404 }
Piotr Padlewski09ad6782017-01-08 22:26:06 +0000405 // Gep with zeros is equivalent to bitcast.
406 // FIXME: we are not sure if some bitcast should be canonicalized to gep 0
407 // or gep 0 to bitcast because of SROA, so there are 2 forms. When
408 // typeless pointers will be ready then both cases will be gone
409 // (and this BFS also won't be needed).
Piotr Padlewskida362152016-12-30 18:45:07 +0000410 if (auto *GEP = dyn_cast<GetElementPtrInst>(U))
411 if (GEP->hasAllZeroIndices()) {
Piotr Padlewski09ad6782017-01-08 22:26:06 +0000412 LoadOperandsQueue.push_back(U);
Piotr Padlewskida362152016-12-30 18:45:07 +0000413 continue;
414 }
415
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000416 // If we hit load/store with the same invariant.group metadata (and the
417 // same pointer operand) we can assume that value pointed by pointer
418 // operand didn't change.
Piotr Padlewski95308832017-01-12 11:33:58 +0000419 if ((isa<LoadInst>(U) || isa<StoreInst>(U)) &&
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000420 U->getMetadata(LLVMContext::MD_invariant_group) == InvariantGroupMD)
Piotr Padlewski95308832017-01-12 11:33:58 +0000421 ClosestDependency = GetClosestDependency(ClosestDependency, U);
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000422 }
423 }
Piotr Padlewski95308832017-01-12 11:33:58 +0000424
425 if (!ClosestDependency)
426 return MemDepResult::getUnknown();
427 if (ClosestDependency->getParent() == BB)
428 return MemDepResult::getDef(ClosestDependency);
429 // Def(U) can't be returned here because it is non-local. If local
430 // dependency won't be found then return nonLocal counting that the
431 // user will call getNonLocalPointerDependency, which will return cached
432 // result.
433 NonLocalDefsCache.try_emplace(
434 LI, NonLocalDepResult(ClosestDependency->getParent(),
435 MemDepResult::getDef(ClosestDependency), nullptr));
436 return MemDepResult::getNonLocal();
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000437}
438
Chandler Carruth61440d22016-03-10 00:55:30 +0000439MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000440 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
Bob Haarman3db17642016-08-26 16:34:27 +0000441 BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) {
Shuxin Yang408bdad2013-03-06 17:48:48 +0000442 bool isInvariantLoad = false;
Robin Morisset163ef042014-08-29 20:32:58 +0000443
Bob Haarman3db17642016-08-26 16:34:27 +0000444 if (!Limit) {
445 unsigned DefaultLimit = BlockScanLimit;
446 return getSimplePointerDependencyFrom(MemLoc, isLoad, ScanIt, BB, QueryInst,
447 &DefaultLimit);
448 }
449
Robin Morisset163ef042014-08-29 20:32:58 +0000450 // We must be careful with atomic accesses, as they may allow another thread
Chad Rosier02e831c2016-06-28 17:19:10 +0000451 // to touch this location, clobbering it. We are conservative: if the
Robin Morisset163ef042014-08-29 20:32:58 +0000452 // QueryInst is not a simple (non-atomic) memory access, we automatically
453 // return getClobber.
454 // If it is simple, we know based on the results of
455 // "Compiler testing via a theory of sound optimisations in the C11/C++11
456 // memory model" in PLDI 2013, that a non-atomic location can only be
457 // clobbered between a pair of a release and an acquire action, with no
458 // access to the location in between.
459 // Here is an example for giving the general intuition behind this rule.
460 // In the following code:
461 // store x 0;
462 // release action; [1]
463 // acquire action; [4]
464 // %val = load x;
465 // It is unsafe to replace %val by 0 because another thread may be running:
466 // acquire action; [2]
467 // store x 42;
468 // release action; [3]
469 // with synchronization from 1 to 2 and from 3 to 4, resulting in %val
470 // being 42. A key property of this program however is that if either
471 // 1 or 4 were missing, there would be a race between the store of 42
Chad Rosier02e831c2016-06-28 17:19:10 +0000472 // either the store of 0 or the load (making the whole program racy).
Nick Lewycky947ca8a2016-01-04 16:44:44 +0000473 // The paper mentioned above shows that the same property is respected
Chad Rosier02e831c2016-06-28 17:19:10 +0000474 // by every program that can detect any optimization of that kind: either
Robin Morisset163ef042014-08-29 20:32:58 +0000475 // it is racy (undefined) or there is a release followed by an acquire
476 // between the pair of accesses under consideration.
Robin Morisset163ef042014-08-29 20:32:58 +0000477
Philip Reames4dbd88f2015-03-24 23:54:54 +0000478 // If the load is invariant, we "know" that it doesn't alias *any* write. We
479 // do want to respect mustalias results since defs are useful for value
480 // forwarding, but any mayalias write can be assumed to be noalias.
481 // Arguably, this logic should be pushed inside AliasAnalysis itself.
Shuxin Yang408bdad2013-03-06 17:48:48 +0000482 if (isLoad && QueryInst) {
483 LoadInst *LI = dyn_cast<LoadInst>(QueryInst);
Craig Topper9f008862014-04-15 04:59:12 +0000484 if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr)
Shuxin Yang408bdad2013-03-06 17:48:48 +0000485 isInvariantLoad = true;
486 }
Eli Friedman8b098b02011-06-15 23:59:25 +0000487
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000488 const DataLayout &DL = BB->getModule()->getDataLayout();
489
Bruno Cardoso Lopesdfc1d962015-07-31 14:31:35 +0000490 // Create a numbered basic block to lazily compute and cache instruction
491 // positions inside a BB. This is used to provide fast queries for relative
492 // position between two instructions in a BB and can be used by
493 // AliasAnalysis::callCapturesBefore.
494 OrderedBasicBlock OBB(BB);
495
Krzysztof Parzyszeke261e5a2016-02-22 23:07:43 +0000496 // Return "true" if and only if the instruction I is either a non-simple
497 // load or a non-simple store.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000498 auto isNonSimpleLoadOrStore = [](Instruction *I) -> bool {
Krzysztof Parzyszeke261e5a2016-02-22 23:07:43 +0000499 if (auto *LI = dyn_cast<LoadInst>(I))
500 return !LI->isSimple();
501 if (auto *SI = dyn_cast<StoreInst>(I))
502 return !SI->isSimple();
503 return false;
504 };
505
506 // Return "true" if I is not a load and not a store, but it does access
507 // memory.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000508 auto isOtherMemAccess = [](Instruction *I) -> bool {
Krzysztof Parzyszeke261e5a2016-02-22 23:07:43 +0000509 return !isa<LoadInst>(I) && !isa<StoreInst>(I) && I->mayReadOrWriteMemory();
510 };
511
Chris Lattnera28355d2008-12-07 08:50:20 +0000512 // Walk backwards through the basic block, looking for dependencies.
Philip Reames090a8242015-02-15 19:07:31 +0000513 while (ScanIt != BB->begin()) {
Duncan P. N. Exon Smith5a82c912015-10-10 00:53:03 +0000514 Instruction *Inst = &*--ScanIt;
Yunzhong Gao5cbcf562013-11-14 01:10:52 +0000515
516 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
517 // Debug intrinsics don't (and can't) cause dependencies.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000518 if (isa<DbgInfoIntrinsic>(II))
519 continue;
Yunzhong Gao5cbcf562013-11-14 01:10:52 +0000520
Eli Friedman8b098b02011-06-15 23:59:25 +0000521 // Limit the amount of scanning we do so we don't end up with quadratic
522 // running time on extreme testcases.
Bob Haarman3db17642016-08-26 16:34:27 +0000523 --*Limit;
524 if (!*Limit)
Eli Friedman8b098b02011-06-15 23:59:25 +0000525 return MemDepResult::getUnknown();
526
Chris Lattner506b8582009-12-01 21:15:15 +0000527 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
Owen Anderson2b2bd282009-10-28 07:05:35 +0000528 // If we reach a lifetime begin or end marker, then the query ends here
529 // because the value is undefined.
Chris Lattnera58edd12010-09-06 03:58:04 +0000530 if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
Owen Andersonb9878ee2009-12-02 07:35:19 +0000531 // FIXME: This only considers queries directly on the invariant-tagged
532 // pointer, not on query pointers that are indexed off of them. It'd
Chris Lattner7aab2792011-04-26 22:42:01 +0000533 // be nice to handle that at some point (the right approach is to use
534 // GetPointerBaseWithConstantOffset).
Chandler Carruth61440d22016-03-10 00:55:30 +0000535 if (AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), MemLoc))
Owen Anderson2b2bd282009-10-28 07:05:35 +0000536 return MemDepResult::getDef(II);
Chris Lattnera58edd12010-09-06 03:58:04 +0000537 continue;
Owen Andersond0e86d52009-10-28 06:18:42 +0000538 }
539 }
540
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000541 // Values depend on loads if the pointers are must aliased. This means
542 // that a load depends on another must aliased load from the same value.
543 // One exception is atomic loads: a value can depend on an atomic load that
544 // it does not alias with when this atomic load indicates that another
545 // thread may be accessing the location.
Chris Lattner0e3d6332008-12-05 21:04:20 +0000546 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
Philip Reamesa7ad6a52015-01-26 18:54:27 +0000547 // While volatile access cannot be eliminated, they do not have to clobber
548 // non-aliasing locations, as normal accesses, for example, can be safely
549 // reordered with volatile accesses.
550 if (LI->isVolatile()) {
551 if (!QueryInst)
552 // Original QueryInst *may* be volatile
553 return MemDepResult::getClobber(LI);
554 if (isVolatile(QueryInst))
555 // Ordering required if QueryInst is itself volatile
556 return MemDepResult::getClobber(LI);
557 // Otherwise, volatile doesn't imply any special ordering
558 }
Krzysztof Parzyszeke261e5a2016-02-22 23:07:43 +0000559
Eli Friedman5494ada2011-08-15 20:54:19 +0000560 // Atomic loads have complications involved.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000561 // A Monotonic (or higher) load is OK if the query inst is itself not
562 // atomic.
Eli Friedman5494ada2011-08-15 20:54:19 +0000563 // FIXME: This is overly conservative.
JF Bastien800f87a2016-04-06 21:19:33 +0000564 if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) {
Krzysztof Parzyszeke261e5a2016-02-22 23:07:43 +0000565 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
566 isOtherMemAccess(QueryInst))
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000567 return MemDepResult::getClobber(LI);
JF Bastien800f87a2016-04-06 21:19:33 +0000568 if (LI->getOrdering() != AtomicOrdering::Monotonic)
David Majnemere1655022015-03-21 06:19:17 +0000569 return MemDepResult::getClobber(LI);
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000570 }
Eli Friedman5494ada2011-08-15 20:54:19 +0000571
Chandler Carruthac80dc72015-06-17 07:18:54 +0000572 MemoryLocation LoadLoc = MemoryLocation::get(LI);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000573
Chris Lattner0e3d6332008-12-05 21:04:20 +0000574 // If we found a pointer, check if it could be the same as our pointer.
Chandler Carruth61440d22016-03-10 00:55:30 +0000575 AliasResult R = AA.alias(LoadLoc, MemLoc);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000576
Chris Lattner6f83d062011-04-26 01:21:15 +0000577 if (isLoad) {
Dehao Chen22ce5eb2016-09-09 18:42:35 +0000578 if (R == NoAlias)
Chris Lattner7aab2792011-04-26 22:42:01 +0000579 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000580
Chris Lattner6f83d062011-04-26 01:21:15 +0000581 // Must aliased loads are defs of each other.
Chandler Carruthc3f49eb2015-06-22 02:16:51 +0000582 if (R == MustAlias)
Chris Lattner6f83d062011-04-26 01:21:15 +0000583 return MemDepResult::getDef(Inst);
584
Dan Gohmana4717512011-06-04 06:48:50 +0000585#if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads
586 // in terms of clobbering loads, but since it does this by looking
587 // at the clobbering load directly, it doesn't know about any
588 // phi translation that may have happened along the way.
589
Chris Lattner6f83d062011-04-26 01:21:15 +0000590 // If we have a partial alias, then return this as a clobber for the
591 // client to handle.
Chandler Carruthc3f49eb2015-06-22 02:16:51 +0000592 if (R == PartialAlias)
Chris Lattner6f83d062011-04-26 01:21:15 +0000593 return MemDepResult::getClobber(Inst);
Dan Gohmana4717512011-06-04 06:48:50 +0000594#endif
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000595
Chris Lattner6f83d062011-04-26 01:21:15 +0000596 // Random may-alias loads don't depend on each other without a
597 // dependence.
Chris Lattner80c08182008-11-29 09:09:48 +0000598 continue;
Chris Lattner6f83d062011-04-26 01:21:15 +0000599 }
Dan Gohman15a43962010-10-29 01:14:04 +0000600
Chris Lattner7aab2792011-04-26 22:42:01 +0000601 // Stores don't depend on other no-aliased accesses.
Chandler Carruthc3f49eb2015-06-22 02:16:51 +0000602 if (R == NoAlias)
Chris Lattner7aab2792011-04-26 22:42:01 +0000603 continue;
604
Dan Gohman15a43962010-10-29 01:14:04 +0000605 // Stores don't alias loads from read-only memory.
Chandler Carruth61440d22016-03-10 00:55:30 +0000606 if (AA.pointsToConstantMemory(LoadLoc))
Dan Gohman15a43962010-10-29 01:14:04 +0000607 continue;
608
Chris Lattner6f83d062011-04-26 01:21:15 +0000609 // Stores depend on may/must aliased loads.
Chris Lattner0e3d6332008-12-05 21:04:20 +0000610 return MemDepResult::getDef(Inst);
611 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000612
Chris Lattner0e3d6332008-12-05 21:04:20 +0000613 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
Eli Friedman5494ada2011-08-15 20:54:19 +0000614 // Atomic stores have complications involved.
Robin Morisset163ef042014-08-29 20:32:58 +0000615 // A Monotonic store is OK if the query inst is itself not atomic.
Eli Friedman5494ada2011-08-15 20:54:19 +0000616 // FIXME: This is overly conservative.
Krzysztof Parzyszeke261e5a2016-02-22 23:07:43 +0000617 if (!SI->isUnordered() && SI->isAtomic()) {
618 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
619 isOtherMemAccess(QueryInst))
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000620 return MemDepResult::getClobber(SI);
JF Bastien800f87a2016-04-06 21:19:33 +0000621 if (SI->getOrdering() != AtomicOrdering::Monotonic)
David Majnemere1655022015-03-21 06:19:17 +0000622 return MemDepResult::getClobber(SI);
Robin Morisset4ffe8aa2014-08-18 22:18:11 +0000623 }
Eli Friedman5494ada2011-08-15 20:54:19 +0000624
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000625 // FIXME: this is overly conservative.
626 // While volatile access cannot be eliminated, they do not have to clobber
627 // non-aliasing locations, as normal accesses can for example be reordered
628 // with volatile accesses.
629 if (SI->isVolatile())
Krzysztof Parzyszeke261e5a2016-02-22 23:07:43 +0000630 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
631 isOtherMemAccess(QueryInst))
632 return MemDepResult::getClobber(SI);
Robin Morisset9e98e7f2014-08-18 22:18:14 +0000633
Chris Lattner02274a72009-05-25 21:28:56 +0000634 // If alias analysis can tell that this store is guaranteed to not modify
635 // the query pointer, ignore it. Use getModRefInfo to handle cases where
636 // the query pointer points to constant memory etc.
Alina Sbirlea63d22502017-12-05 20:12:23 +0000637 if (!isModOrRefSet(AA.getModRefInfo(SI, MemLoc)))
Chris Lattner02274a72009-05-25 21:28:56 +0000638 continue;
639
640 // Ok, this store might clobber the query pointer. Check to see if it is
641 // a must alias: in this case, we want to return this as a def.
Alina Sbirlea50db8a22017-12-21 21:41:53 +0000642 // FIXME: Use ModRefInfo::Must bit from getModRefInfo call above.
Chandler Carruthac80dc72015-06-17 07:18:54 +0000643 MemoryLocation StoreLoc = MemoryLocation::get(SI);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000644
Chris Lattner0e3d6332008-12-05 21:04:20 +0000645 // If we found a pointer, check if it could be the same as our pointer.
Chandler Carruth61440d22016-03-10 00:55:30 +0000646 AliasResult R = AA.alias(StoreLoc, MemLoc);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000647
Chandler Carruthc3f49eb2015-06-22 02:16:51 +0000648 if (R == NoAlias)
Chris Lattner0e3d6332008-12-05 21:04:20 +0000649 continue;
Chandler Carruthc3f49eb2015-06-22 02:16:51 +0000650 if (R == MustAlias)
Dan Gohmanba5d0ab2010-12-13 22:47:57 +0000651 return MemDepResult::getDef(Inst);
Shuxin Yang408bdad2013-03-06 17:48:48 +0000652 if (isInvariantLoad)
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000653 continue;
Dan Gohmanba5d0ab2010-12-13 22:47:57 +0000654 return MemDepResult::getClobber(Inst);
Owen Andersonc0daf5f2007-07-06 23:14:35 +0000655 }
Chris Lattner3ff6d012008-11-30 01:39:32 +0000656
657 // If this is an allocation, and if we know that the accessed pointer is to
Chris Lattner0e3d6332008-12-05 21:04:20 +0000658 // the allocation, return Def. This means that there is no dependence and
Chris Lattner3ff6d012008-11-30 01:39:32 +0000659 // the access can be optimized based on that. For example, a load could
Philip Reamesd9f4a3d2016-03-09 23:19:56 +0000660 // turn into undef. Note that we can bypass the allocation itself when
661 // looking for a clobber in many cases; that's an alias property and is
662 // handled by BasicAA.
Chandler Carruth61440d22016-03-10 00:55:30 +0000663 if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, &TLI)) {
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000664 const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL);
Chandler Carruth61440d22016-03-10 00:55:30 +0000665 if (AccessPtr == Inst || AA.isMustAlias(Inst, AccessPtr))
Victor Hernandez537d8d92009-09-18 21:34:51 +0000666 return MemDepResult::getDef(Inst);
Victor Hernandez537d8d92009-09-18 21:34:51 +0000667 }
668
Philip Reames4dbd88f2015-03-24 23:54:54 +0000669 if (isInvariantLoad)
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000670 continue;
Philip Reames4dbd88f2015-03-24 23:54:54 +0000671
Philip Reamesb5681132016-03-25 22:40:35 +0000672 // A release fence requires that all stores complete before it, but does
673 // not prevent the reordering of following loads or stores 'before' the
674 // fence. As a result, we look past it when finding a dependency for
675 // loads. DSE uses this to find preceeding stores to delete and thus we
676 // can't bypass the fence if the query instruction is a store.
677 if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
JF Bastien800f87a2016-04-06 21:19:33 +0000678 if (isLoad && FI->getOrdering() == AtomicOrdering::Release)
Philip Reamesb5681132016-03-25 22:40:35 +0000679 continue;
JF Bastien800f87a2016-04-06 21:19:33 +0000680
Chris Lattner0e3d6332008-12-05 21:04:20 +0000681 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
Chandler Carruth61440d22016-03-10 00:55:30 +0000682 ModRefInfo MR = AA.getModRefInfo(Inst, MemLoc);
Chad Rosiera968caf2012-05-14 20:35:04 +0000683 // If necessary, perform additional analysis.
Alina Sbirlea63d22502017-12-05 20:12:23 +0000684 if (isModAndRefSet(MR))
Chandler Carruthaef32bd2016-03-11 13:46:00 +0000685 MR = AA.callCapturesBefore(Inst, MemLoc, &DT, &OBB);
Alina Sbirlea50db8a22017-12-21 21:41:53 +0000686 switch (clearMust(MR)) {
Alina Sbirlea193429f2017-12-07 22:41:34 +0000687 case ModRefInfo::NoModRef:
Chris Lattner41efb682008-12-09 19:47:40 +0000688 // If the call has no effect on the queried pointer, just ignore it.
Chris Lattner81f19e92008-11-29 08:51:16 +0000689 continue;
Alina Sbirlea193429f2017-12-07 22:41:34 +0000690 case ModRefInfo::Mod:
Owen Andersonfc16e5a2009-10-28 06:30:52 +0000691 return MemDepResult::getClobber(Inst);
Alina Sbirlea193429f2017-12-07 22:41:34 +0000692 case ModRefInfo::Ref:
Chris Lattner41efb682008-12-09 19:47:40 +0000693 // If the call is known to never store to the pointer, and if this is a
694 // load query, we can safely ignore it (scan past it).
695 if (isLoad)
696 continue;
Galina Kistanova0b69e362017-05-31 22:02:05 +0000697 LLVM_FALLTHROUGH;
Chris Lattner41efb682008-12-09 19:47:40 +0000698 default:
699 // Otherwise, there is a potential dependence. Return a clobber.
700 return MemDepResult::getClobber(Inst);
701 }
Owen Andersonc0daf5f2007-07-06 23:14:35 +0000702 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000703
Eli Friedman7d58bc72011-06-15 00:47:34 +0000704 // No dependence found. If this is the entry block of the function, it is
705 // unknown, otherwise it is non-local.
Chris Lattner2faa2c72008-12-07 02:15:47 +0000706 if (BB != &BB->getParent()->getEntryBlock())
707 return MemDepResult::getNonLocal();
Eli Friedmanc1702c82011-10-13 22:14:57 +0000708 return MemDepResult::getNonFuncLocal();
Owen Andersonc0daf5f2007-07-06 23:14:35 +0000709}
710
Chandler Carruth61440d22016-03-10 00:55:30 +0000711MemDepResult MemoryDependenceResults::getDependency(Instruction *QueryInst) {
Chris Lattner51ba8d02008-11-29 03:47:00 +0000712 Instruction *ScanPos = QueryInst;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000713
Chris Lattner51ba8d02008-11-29 03:47:00 +0000714 // Check for a cached result
Chris Lattner47e81d02008-11-30 23:17:19 +0000715 MemDepResult &LocalCache = LocalDeps[QueryInst];
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000716
Chris Lattnere7d7e132008-11-29 22:02:15 +0000717 // If the cached entry is non-dirty, just return it. Note that this depends
Chris Lattner47e81d02008-11-30 23:17:19 +0000718 // on MemDepResult's default constructing to 'dirty'.
719 if (!LocalCache.isDirty())
720 return LocalCache;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000721
Chris Lattner51ba8d02008-11-29 03:47:00 +0000722 // Otherwise, if we have a dirty entry, we know we can start the scan at that
723 // instruction, which may save us some work.
Chris Lattner47e81d02008-11-30 23:17:19 +0000724 if (Instruction *Inst = LocalCache.getInst()) {
Chris Lattner51ba8d02008-11-29 03:47:00 +0000725 ScanPos = Inst;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000726
Chris Lattnerde4440c2008-12-07 18:39:13 +0000727 RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst);
Chris Lattner44104272008-11-30 02:52:26 +0000728 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000729
Chris Lattner5a786042008-12-07 01:50:16 +0000730 BasicBlock *QueryParent = QueryInst->getParent();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000731
Chris Lattner51ba8d02008-11-29 03:47:00 +0000732 // Do the scan.
Chris Lattner5a786042008-12-07 01:50:16 +0000733 if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
Piotr Padlewski01659cb2016-11-08 18:20:51 +0000734 // No dependence found. If this is the entry block of the function, it is
Eli Friedman7d58bc72011-06-15 00:47:34 +0000735 // unknown, otherwise it is non-local.
Chris Lattner2faa2c72008-12-07 02:15:47 +0000736 if (QueryParent != &QueryParent->getParent()->getEntryBlock())
737 LocalCache = MemDepResult::getNonLocal();
738 else
Eli Friedmanc1702c82011-10-13 22:14:57 +0000739 LocalCache = MemDepResult::getNonFuncLocal();
Dan Gohman1d760ce2010-11-10 21:51:35 +0000740 } else {
Chandler Carruthac80dc72015-06-17 07:18:54 +0000741 MemoryLocation MemLoc;
Chandler Carruth61440d22016-03-10 00:55:30 +0000742 ModRefInfo MR = GetLocation(QueryInst, MemLoc, TLI);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000743 if (MemLoc.Ptr) {
744 // If we can do a pointer scan, make it happen.
Alina Sbirlead6037eb2017-12-07 00:43:19 +0000745 bool isLoad = !isModSet(MR);
Piotr Padlewski01659cb2016-11-08 18:20:51 +0000746 if (auto *II = dyn_cast<IntrinsicInst>(QueryInst))
Owen Anderson97f0cf32011-05-17 00:05:49 +0000747 isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
Chris Lattnere48c31c2010-11-21 07:34:32 +0000748
Duncan P. N. Exon Smith5a82c912015-10-10 00:53:03 +0000749 LocalCache = getPointerDependencyFrom(
750 MemLoc, isLoad, ScanPos->getIterator(), QueryParent, QueryInst);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000751 } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
Gabor Greifef1ca242010-07-27 22:02:00 +0000752 CallSite QueryCS(QueryInst);
Chandler Carruth61440d22016-03-10 00:55:30 +0000753 bool isReadOnly = AA.onlyReadsMemory(QueryCS);
Duncan P. N. Exon Smith5a82c912015-10-10 00:53:03 +0000754 LocalCache = getCallSiteDependencyFrom(
755 QueryCS, isReadOnly, ScanPos->getIterator(), QueryParent);
Dan Gohman1d760ce2010-11-10 21:51:35 +0000756 } else
757 // Non-memory instruction.
Eli Friedman7d58bc72011-06-15 00:47:34 +0000758 LocalCache = MemDepResult::getUnknown();
Nick Lewycky218a3392009-11-28 21:27:49 +0000759 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000760
Chris Lattner51ba8d02008-11-29 03:47:00 +0000761 // Remember the result!
Chris Lattner47e81d02008-11-30 23:17:19 +0000762 if (Instruction *I = LocalCache.getInst())
Chris Lattner9f1988ab2008-11-29 09:20:15 +0000763 ReverseLocalDeps[I].insert(QueryInst);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000764
Chris Lattner47e81d02008-11-30 23:17:19 +0000765 return LocalCache;
Chris Lattner51ba8d02008-11-29 03:47:00 +0000766}
767
Chris Lattnerf09619d2009-01-22 07:04:01 +0000768#ifndef NDEBUG
Chandler Carruth40e21f22016-03-07 12:30:06 +0000769/// This method is used when -debug is specified to verify that cache arrays
770/// are properly kept sorted.
Chandler Carruth61440d22016-03-10 00:55:30 +0000771static void AssertSorted(MemoryDependenceResults::NonLocalDepInfo &Cache,
Chris Lattnerf09619d2009-01-22 07:04:01 +0000772 int Count = -1) {
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000773 if (Count == -1)
774 Count = Cache.size();
Craig Toppere30b8ca2016-01-03 19:43:40 +0000775 assert(std::is_sorted(Cache.begin(), Cache.begin() + Count) &&
776 "Cache isn't sorted!");
Chris Lattnerf09619d2009-01-22 07:04:01 +0000777}
778#endif
779
Chandler Carruth61440d22016-03-10 00:55:30 +0000780const MemoryDependenceResults::NonLocalDepInfo &
781MemoryDependenceResults::getNonLocalCallDependency(CallSite QueryCS) {
Chris Lattner254314e2008-12-09 19:38:05 +0000782 assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000783 "getNonLocalCallDependency should only be used on calls with "
784 "non-local deps!");
Chris Lattner254314e2008-12-09 19:38:05 +0000785 PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
Chris Lattner7e61daf2008-12-01 01:15:42 +0000786 NonLocalDepInfo &Cache = CacheP.first;
Chris Lattner20597532008-11-30 01:18:27 +0000787
Chandler Carruth40e21f22016-03-07 12:30:06 +0000788 // This is the set of blocks that need to be recomputed. In the cached case,
789 // this can happen due to instructions being deleted etc. In the uncached
790 // case, this starts out as the set of predecessors we care about.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000791 SmallVector<BasicBlock *, 32> DirtyBlocks;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000792
Chris Lattner20597532008-11-30 01:18:27 +0000793 if (!Cache.empty()) {
Chris Lattner7e61daf2008-12-01 01:15:42 +0000794 // Okay, we have a cache entry. If we know it is not dirty, just return it
795 // with no computation.
796 if (!CacheP.second) {
Dan Gohmand2d1ae12010-06-22 15:08:57 +0000797 ++NumCacheNonLocal;
Chris Lattner7e61daf2008-12-01 01:15:42 +0000798 return Cache;
799 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000800
Chris Lattner20597532008-11-30 01:18:27 +0000801 // If we already have a partially computed set of results, scan them to
Chris Lattner7e61daf2008-12-01 01:15:42 +0000802 // determine what is dirty, seeding our initial DirtyBlocks worklist.
Chandler Carruthaf8321e2016-03-07 15:12:57 +0000803 for (auto &Entry : Cache)
804 if (Entry.getResult().isDirty())
805 DirtyBlocks.push_back(Entry.getBB());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000806
Chris Lattner7e61daf2008-12-01 01:15:42 +0000807 // Sort the cache so that we can do fast binary search lookups below.
808 std::sort(Cache.begin(), Cache.end());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000809
Chris Lattner7e61daf2008-12-01 01:15:42 +0000810 ++NumCacheDirtyNonLocal;
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000811 // cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
Chris Lattner20597532008-11-30 01:18:27 +0000812 // << Cache.size() << " cached: " << *QueryInst;
813 } else {
814 // Seed DirtyBlocks with each of the preds of QueryInst's block.
Chris Lattner254314e2008-12-09 19:38:05 +0000815 BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
Daniel Berlinb4e7a4a2015-04-21 21:11:50 +0000816 for (BasicBlock *Pred : PredCache.get(QueryBB))
817 DirtyBlocks.push_back(Pred);
Dan Gohmand2d1ae12010-06-22 15:08:57 +0000818 ++NumUncacheNonLocal;
Chris Lattner20597532008-11-30 01:18:27 +0000819 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000820
Chris Lattner702e46e2008-12-09 21:19:42 +0000821 // isReadonlyCall - If this is a read-only call, we can be more aggressive.
Chandler Carruth61440d22016-03-10 00:55:30 +0000822 bool isReadonlyCall = AA.onlyReadsMemory(QueryCS);
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000823
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000824 SmallPtrSet<BasicBlock *, 32> Visited;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000825
Chris Lattner7e61daf2008-12-01 01:15:42 +0000826 unsigned NumSortedEntries = Cache.size();
Chris Lattnerf09619d2009-01-22 07:04:01 +0000827 DEBUG(AssertSorted(Cache));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000828
Chris Lattner20597532008-11-30 01:18:27 +0000829 // Iterate while we still have blocks to update.
830 while (!DirtyBlocks.empty()) {
831 BasicBlock *DirtyBB = DirtyBlocks.back();
832 DirtyBlocks.pop_back();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000833
Chris Lattner7e61daf2008-12-01 01:15:42 +0000834 // Already processed this block?
David Blaikie70573dc2014-11-19 07:49:26 +0000835 if (!Visited.insert(DirtyBB).second)
Chris Lattner7e61daf2008-12-01 01:15:42 +0000836 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000837
Chris Lattner7e61daf2008-12-01 01:15:42 +0000838 // Do a binary search to see if we already have an entry for this block in
839 // the cache set. If so, find it.
Chris Lattnerf09619d2009-01-22 07:04:01 +0000840 DEBUG(AssertSorted(Cache, NumSortedEntries));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000841 NonLocalDepInfo::iterator Entry =
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000842 std::upper_bound(Cache.begin(), Cache.begin() + NumSortedEntries,
843 NonLocalDepEntry(DirtyBB));
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000844 if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB)
Chris Lattner7e61daf2008-12-01 01:15:42 +0000845 --Entry;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000846
Craig Topper9f008862014-04-15 04:59:12 +0000847 NonLocalDepEntry *ExistingResult = nullptr;
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000848 if (Entry != Cache.begin() + NumSortedEntries &&
Chris Lattner0c315472009-12-09 07:08:01 +0000849 Entry->getBB() == DirtyBB) {
Chris Lattner7e61daf2008-12-01 01:15:42 +0000850 // If we already have an entry, and if it isn't already dirty, the block
851 // is done.
Chris Lattner0c315472009-12-09 07:08:01 +0000852 if (!Entry->getResult().isDirty())
Chris Lattner7e61daf2008-12-01 01:15:42 +0000853 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000854
Chris Lattner7e61daf2008-12-01 01:15:42 +0000855 // Otherwise, remember this slot so we can update the value.
Chris Lattner0c315472009-12-09 07:08:01 +0000856 ExistingResult = &*Entry;
Chris Lattner7e61daf2008-12-01 01:15:42 +0000857 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000858
Chris Lattner20597532008-11-30 01:18:27 +0000859 // If the dirty entry has a pointer, start scanning from it so we don't have
860 // to rescan the entire block.
861 BasicBlock::iterator ScanPos = DirtyBB->end();
Chris Lattner7e61daf2008-12-01 01:15:42 +0000862 if (ExistingResult) {
Chris Lattner0c315472009-12-09 07:08:01 +0000863 if (Instruction *Inst = ExistingResult->getResult().getInst()) {
Duncan P. N. Exon Smith5a82c912015-10-10 00:53:03 +0000864 ScanPos = Inst->getIterator();
Chris Lattner7e61daf2008-12-01 01:15:42 +0000865 // We're removing QueryInst's use of Inst.
Chris Lattner254314e2008-12-09 19:38:05 +0000866 RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
867 QueryCS.getInstruction());
Chris Lattner7e61daf2008-12-01 01:15:42 +0000868 }
Chris Lattner1b810bd2008-11-30 02:28:25 +0000869 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000870
Chris Lattner60444f82008-11-30 01:26:32 +0000871 // Find out if this block has a local dependency for QueryInst.
Chris Lattnered494f72008-12-07 01:21:14 +0000872 MemDepResult Dep;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000873
Chris Lattner254314e2008-12-09 19:38:05 +0000874 if (ScanPos != DirtyBB->begin()) {
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000875 Dep =
876 getCallSiteDependencyFrom(QueryCS, isReadonlyCall, ScanPos, DirtyBB);
Chris Lattner254314e2008-12-09 19:38:05 +0000877 } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
878 // No dependence found. If this is the entry block of the function, it is
Eli Friedman7d58bc72011-06-15 00:47:34 +0000879 // a clobber, otherwise it is unknown.
Chris Lattner254314e2008-12-09 19:38:05 +0000880 Dep = MemDepResult::getNonLocal();
Chris Lattner5a786042008-12-07 01:50:16 +0000881 } else {
Eli Friedmanc1702c82011-10-13 22:14:57 +0000882 Dep = MemDepResult::getNonFuncLocal();
Chris Lattner5a786042008-12-07 01:50:16 +0000883 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000884
Chris Lattner7e61daf2008-12-01 01:15:42 +0000885 // If we had a dirty entry for the block, update it. Otherwise, just add
886 // a new entry.
887 if (ExistingResult)
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000888 ExistingResult->setResult(Dep);
Chris Lattner7e61daf2008-12-01 01:15:42 +0000889 else
Chris Lattner9b7d99e2009-12-22 04:25:02 +0000890 Cache.push_back(NonLocalDepEntry(DirtyBB, Dep));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000891
Chris Lattner20597532008-11-30 01:18:27 +0000892 // If the block has a dependency (i.e. it isn't completely transparent to
Chris Lattner7e61daf2008-12-01 01:15:42 +0000893 // the value), remember the association!
894 if (!Dep.isNonLocal()) {
Chris Lattner20597532008-11-30 01:18:27 +0000895 // Keep the ReverseNonLocalDeps map up to date so we can efficiently
896 // update this when we remove instructions.
Chris Lattner7e61daf2008-12-01 01:15:42 +0000897 if (Instruction *Inst = Dep.getInst())
Chris Lattner254314e2008-12-09 19:38:05 +0000898 ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
Chris Lattner7e61daf2008-12-01 01:15:42 +0000899 } else {
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000900
Chris Lattner7e61daf2008-12-01 01:15:42 +0000901 // If the block *is* completely transparent to the load, we need to check
902 // the predecessors of this block. Add them to our worklist.
Daniel Berlinb4e7a4a2015-04-21 21:11:50 +0000903 for (BasicBlock *Pred : PredCache.get(DirtyBB))
904 DirtyBlocks.push_back(Pred);
Chris Lattner7e61daf2008-12-01 01:15:42 +0000905 }
Chris Lattner20597532008-11-30 01:18:27 +0000906 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000907
Chris Lattner7e61daf2008-12-01 01:15:42 +0000908 return Cache;
Chris Lattner20597532008-11-30 01:18:27 +0000909}
910
Chandler Carruth61440d22016-03-10 00:55:30 +0000911void MemoryDependenceResults::getNonLocalPointerDependency(
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000912 Instruction *QueryInst, SmallVectorImpl<NonLocalDepResult> &Result) {
Chandler Carruthac80dc72015-06-17 07:18:54 +0000913 const MemoryLocation Loc = MemoryLocation::get(QueryInst);
Philip Reames567feb92015-01-09 00:04:22 +0000914 bool isLoad = isa<LoadInst>(QueryInst);
915 BasicBlock *FromBB = QueryInst->getParent();
916 assert(FromBB);
Philip Reames33d7f9d2015-01-09 00:26:45 +0000917
918 assert(Loc.Ptr->getType()->isPointerTy() &&
919 "Can't get pointer deps of a non-pointer!");
920 Result.clear();
Piotr Padlewski95308832017-01-12 11:33:58 +0000921 {
922 // Check if there is cached Def with invariant.group. FIXME: cache might be
923 // invalid if cached instruction would be removed between call to
924 // getPointerDependencyFrom and this function.
925 auto NonLocalDefIt = NonLocalDefsCache.find(QueryInst);
926 if (NonLocalDefIt != NonLocalDefsCache.end()) {
927 Result.push_back(std::move(NonLocalDefIt->second));
928 NonLocalDefsCache.erase(NonLocalDefIt);
929 return;
930 }
931 }
Philip Reames33d7f9d2015-01-09 00:26:45 +0000932 // This routine does not expect to deal with volatile instructions.
933 // Doing so would require piping through the QueryInst all the way through.
Philip Reames567feb92015-01-09 00:04:22 +0000934 // TODO: volatiles can't be elided, but they can be reordered with other
Philip Reames33d7f9d2015-01-09 00:26:45 +0000935 // non-volatile accesses.
Philip Reamesa7ad6a52015-01-26 18:54:27 +0000936
Philip Reames567feb92015-01-09 00:04:22 +0000937 // We currently give up on any instruction which is ordered, but we do handle
938 // atomic instructions which are unordered.
939 // TODO: Handle ordered instructions
940 auto isOrdered = [](Instruction *Inst) {
941 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
942 return !LI->isUnordered();
943 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
944 return !SI->isUnordered();
945 }
946 return false;
947 };
Philip Reames33d7f9d2015-01-09 00:26:45 +0000948 if (isVolatile(QueryInst) || isOrdered(QueryInst)) {
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000949 Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(),
Philip Reames33d7f9d2015-01-09 00:26:45 +0000950 const_cast<Value *>(Loc.Ptr)));
951 return;
952 }
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000953 const DataLayout &DL = FromBB->getModule()->getDataLayout();
Daniel Jasperaec2fa32016-12-19 08:22:17 +0000954 PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, &AC);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000955
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000956 // This is the set of blocks we've inspected, and the pointer we consider in
957 // each block. Because of critical edges, we currently bail out if querying
958 // a block with multiple different pointers. This can happen during PHI
959 // translation.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000960 DenseMap<BasicBlock *, Value *> Visited;
Chandler Carruthb32febe2016-03-07 12:45:07 +0000961 if (getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB,
Chris Lattnerff9f3db2008-12-15 03:35:32 +0000962 Result, Visited, true))
963 return;
Chris Lattner7ed5ccc2008-12-15 04:58:29 +0000964 Result.clear();
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000965 Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(),
Dan Gohman23483932010-09-22 21:41:02 +0000966 const_cast<Value *>(Loc.Ptr)));
Chris Lattner7564a3b2008-12-07 02:56:57 +0000967}
968
Chandler Carruth40e21f22016-03-07 12:30:06 +0000969/// Compute the memdep value for BB with Pointer/PointeeSize using either
970/// cached information in Cache or by doing a lookup (which may use dirty cache
971/// info if available).
972///
973/// If we do a lookup, add the result to the cache.
Chandler Carruth61440d22016-03-10 00:55:30 +0000974MemDepResult MemoryDependenceResults::GetNonLocalInfoForBlock(
Chandler Carruthac80dc72015-06-17 07:18:54 +0000975 Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad,
976 BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000977
Chris Lattnerf903fe12008-12-09 07:47:11 +0000978 // Do a binary search to see if we already have an entry for this block in
979 // the cache set. If so, find it.
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000980 NonLocalDepInfo::iterator Entry = std::upper_bound(
981 Cache->begin(), Cache->begin() + NumSortedEntries, NonLocalDepEntry(BB));
982 if (Entry != Cache->begin() && (Entry - 1)->getBB() == BB)
Chris Lattnerf903fe12008-12-09 07:47:11 +0000983 --Entry;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000984
Craig Topper9f008862014-04-15 04:59:12 +0000985 NonLocalDepEntry *ExistingResult = nullptr;
Chandler Carruth60fb1b42016-03-07 10:19:30 +0000986 if (Entry != Cache->begin() + NumSortedEntries && Entry->getBB() == BB)
Chris Lattner0c315472009-12-09 07:08:01 +0000987 ExistingResult = &*Entry;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000988
Chris Lattnerf903fe12008-12-09 07:47:11 +0000989 // If we have a cached entry, and it is non-dirty, use it as the value for
990 // this dependency.
Chris Lattner0c315472009-12-09 07:08:01 +0000991 if (ExistingResult && !ExistingResult->getResult().isDirty()) {
Chris Lattnerf903fe12008-12-09 07:47:11 +0000992 ++NumCacheNonLocalPtr;
Chris Lattner0c315472009-12-09 07:08:01 +0000993 return ExistingResult->getResult();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +0000994 }
995
Chris Lattnerf903fe12008-12-09 07:47:11 +0000996 // Otherwise, we have to scan for the value. If we have a dirty cache
997 // entry, start scanning from its position, otherwise we scan from the end
998 // of the block.
999 BasicBlock::iterator ScanPos = BB->end();
Chris Lattner0c315472009-12-09 07:08:01 +00001000 if (ExistingResult && ExistingResult->getResult().getInst()) {
1001 assert(ExistingResult->getResult().getInst()->getParent() == BB &&
Chris Lattnerf903fe12008-12-09 07:47:11 +00001002 "Instruction invalidated?");
1003 ++NumCacheDirtyNonLocalPtr;
Duncan P. N. Exon Smith5a82c912015-10-10 00:53:03 +00001004 ScanPos = ExistingResult->getResult().getInst()->getIterator();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001005
Chris Lattnerf903fe12008-12-09 07:47:11 +00001006 // Eliminating the dirty entry from 'Cache', so update the reverse info.
Dan Gohman23483932010-09-22 21:41:02 +00001007 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
Duncan P. N. Exon Smith5a82c912015-10-10 00:53:03 +00001008 RemoveFromReverseMap(ReverseNonLocalPtrDeps, &*ScanPos, CacheKey);
Chris Lattnerf903fe12008-12-09 07:47:11 +00001009 } else {
1010 ++NumUncacheNonLocalPtr;
1011 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001012
Chris Lattnerf903fe12008-12-09 07:47:11 +00001013 // Scan the block for the dependency.
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001014 MemDepResult Dep =
1015 getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, QueryInst);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001016
Chris Lattnerf903fe12008-12-09 07:47:11 +00001017 // If we had a dirty entry for the block, update it. Otherwise, just add
1018 // a new entry.
1019 if (ExistingResult)
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001020 ExistingResult->setResult(Dep);
Chris Lattnerf903fe12008-12-09 07:47:11 +00001021 else
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001022 Cache->push_back(NonLocalDepEntry(BB, Dep));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001023
Chris Lattnerf903fe12008-12-09 07:47:11 +00001024 // If the block has a dependency (i.e. it isn't completely transparent to
1025 // the value), remember the reverse association because we just added it
1026 // to Cache!
Eli Friedmanc1702c82011-10-13 22:14:57 +00001027 if (!Dep.isDef() && !Dep.isClobber())
Chris Lattnerf903fe12008-12-09 07:47:11 +00001028 return Dep;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001029
Chris Lattnerf903fe12008-12-09 07:47:11 +00001030 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
1031 // update MemDep when we remove instructions.
1032 Instruction *Inst = Dep.getInst();
1033 assert(Inst && "Didn't depend on anything?");
Dan Gohman23483932010-09-22 21:41:02 +00001034 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
Chris Lattner8eda11b2009-03-29 00:24:04 +00001035 ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
Chris Lattnerf903fe12008-12-09 07:47:11 +00001036 return Dep;
1037}
1038
Chandler Carruth40e21f22016-03-07 12:30:06 +00001039/// Sort the NonLocalDepInfo cache, given a certain number of elements in the
1040/// array that are already properly ordered.
1041///
1042/// This is optimized for the case when only a few entries are added.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001043static void
Chandler Carruth61440d22016-03-10 00:55:30 +00001044SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache,
Chris Lattner370aada2009-07-13 17:20:05 +00001045 unsigned NumSortedEntries) {
1046 switch (Cache.size() - NumSortedEntries) {
1047 case 0:
1048 // done, no new entries.
1049 break;
1050 case 2: {
1051 // Two new entries, insert the last one into place.
Chris Lattner0c315472009-12-09 07:08:01 +00001052 NonLocalDepEntry Val = Cache.back();
Chris Lattner370aada2009-07-13 17:20:05 +00001053 Cache.pop_back();
Chandler Carruth61440d22016-03-10 00:55:30 +00001054 MemoryDependenceResults::NonLocalDepInfo::iterator Entry =
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001055 std::upper_bound(Cache.begin(), Cache.end() - 1, Val);
Chris Lattner370aada2009-07-13 17:20:05 +00001056 Cache.insert(Entry, Val);
Justin Bognercd1d5aa2016-08-17 20:30:52 +00001057 LLVM_FALLTHROUGH;
Chris Lattner370aada2009-07-13 17:20:05 +00001058 }
1059 case 1:
1060 // One new entry, Just insert the new value at the appropriate position.
1061 if (Cache.size() != 1) {
Chris Lattner0c315472009-12-09 07:08:01 +00001062 NonLocalDepEntry Val = Cache.back();
Chris Lattner370aada2009-07-13 17:20:05 +00001063 Cache.pop_back();
Chandler Carruth61440d22016-03-10 00:55:30 +00001064 MemoryDependenceResults::NonLocalDepInfo::iterator Entry =
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001065 std::upper_bound(Cache.begin(), Cache.end(), Val);
Chris Lattner370aada2009-07-13 17:20:05 +00001066 Cache.insert(Entry, Val);
1067 }
1068 break;
1069 default:
1070 // Added many values, do a full scale sort.
1071 std::sort(Cache.begin(), Cache.end());
1072 break;
1073 }
1074}
1075
Chandler Carruth40e21f22016-03-07 12:30:06 +00001076/// Perform a dependency query based on pointer/pointeesize starting at the end
1077/// of StartBB.
1078///
1079/// Add any clobber/def results to the results vector and keep track of which
1080/// blocks are visited in 'Visited'.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001081///
1082/// This has special behavior for the first block queries (when SkipFirstBlock
1083/// is true). In this special case, it ignores the contents of the specified
1084/// block and starts returning dependence info for its predecessors.
1085///
Chandler Carruthb32febe2016-03-07 12:45:07 +00001086/// This function returns true on success, or false to indicate that it could
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001087/// not compute dependence information for some reason. This should be treated
1088/// as a clobber dependence on the first instruction in the predecessor block.
Chandler Carruth61440d22016-03-10 00:55:30 +00001089bool MemoryDependenceResults::getNonLocalPointerDepFromBB(
Chandler Carruthac80dc72015-06-17 07:18:54 +00001090 Instruction *QueryInst, const PHITransAddr &Pointer,
1091 const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB,
1092 SmallVectorImpl<NonLocalDepResult> &Result,
1093 DenseMap<BasicBlock *, Value *> &Visited, bool SkipFirstBlock) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001094 // Look up the cached info for Pointer.
Chris Lattner972e6d82009-12-09 01:59:31 +00001095 ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
Dan Gohman23483932010-09-22 21:41:02 +00001096
Dan Gohman0a6021a2010-11-10 20:37:15 +00001097 // Set up a temporary NLPI value. If the map doesn't yet have an entry for
1098 // CacheKey, this value will be inserted as the associated value. Otherwise,
1099 // it'll be ignored, and we'll have to check to see if the cached size and
Hal Finkelcc39b672014-07-24 12:16:19 +00001100 // aa tags are consistent with the current query.
Dan Gohman0a6021a2010-11-10 20:37:15 +00001101 NonLocalPointerInfo InitialNLPI;
1102 InitialNLPI.Size = Loc.Size;
Hal Finkelcc39b672014-07-24 12:16:19 +00001103 InitialNLPI.AATags = Loc.AATags;
Dan Gohman0a6021a2010-11-10 20:37:15 +00001104
1105 // Get the NLPI for CacheKey, inserting one into the map if it doesn't
1106 // already have one.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001107 std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001108 NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
Dan Gohman0a6021a2010-11-10 20:37:15 +00001109 NonLocalPointerInfo *CacheInfo = &Pair.first->second;
1110
Dan Gohman2e8ca442010-11-10 21:45:11 +00001111 // If we already have a cache entry for this CacheKey, we may need to do some
1112 // work to reconcile the cache entry and the current query.
Dan Gohman0a6021a2010-11-10 20:37:15 +00001113 if (!Pair.second) {
Reid Kleckner6d310012017-12-28 05:10:33 +00001114 if (CacheInfo->Size < Loc.Size) {
1115 // The query's Size is greater than the cached one. Throw out the
1116 // cached data and proceed with the query at the greater size.
Dan Gohman2e8ca442010-11-10 21:45:11 +00001117 CacheInfo->Pair = BBSkipFirstBlockPair();
1118 CacheInfo->Size = Loc.Size;
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001119 for (auto &Entry : CacheInfo->NonLocalDeps)
1120 if (Instruction *Inst = Entry.getResult().getInst())
Dan Gohman67919362010-11-10 22:35:02 +00001121 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
Dan Gohman2e8ca442010-11-10 21:45:11 +00001122 CacheInfo->NonLocalDeps.clear();
Reid Kleckner6d310012017-12-28 05:10:33 +00001123 } else if (CacheInfo->Size > Loc.Size) {
1124 // This query's Size is less than the cached one. Conservatively restart
1125 // the query using the greater size.
1126 return getNonLocalPointerDepFromBB(
1127 QueryInst, Pointer, Loc.getWithNewSize(CacheInfo->Size), isLoad,
1128 StartBB, Result, Visited, SkipFirstBlock);
Dan Gohman0a6021a2010-11-10 20:37:15 +00001129 }
1130
Hal Finkelcc39b672014-07-24 12:16:19 +00001131 // If the query's AATags are inconsistent with the cached one,
Dan Gohman2e8ca442010-11-10 21:45:11 +00001132 // conservatively throw out the cached data and restart the query with
1133 // no tag if needed.
Hal Finkelcc39b672014-07-24 12:16:19 +00001134 if (CacheInfo->AATags != Loc.AATags) {
1135 if (CacheInfo->AATags) {
Dan Gohman2e8ca442010-11-10 21:45:11 +00001136 CacheInfo->Pair = BBSkipFirstBlockPair();
Hal Finkelcc39b672014-07-24 12:16:19 +00001137 CacheInfo->AATags = AAMDNodes();
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001138 for (auto &Entry : CacheInfo->NonLocalDeps)
1139 if (Instruction *Inst = Entry.getResult().getInst())
Dan Gohman67919362010-11-10 22:35:02 +00001140 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
Dan Gohman2e8ca442010-11-10 21:45:11 +00001141 CacheInfo->NonLocalDeps.clear();
1142 }
Hal Finkelcc39b672014-07-24 12:16:19 +00001143 if (Loc.AATags)
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001144 return getNonLocalPointerDepFromBB(
1145 QueryInst, Pointer, Loc.getWithoutAATags(), isLoad, StartBB, Result,
1146 Visited, SkipFirstBlock);
Dan Gohman0a6021a2010-11-10 20:37:15 +00001147 }
Dan Gohman23483932010-09-22 21:41:02 +00001148 }
1149
1150 NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
Chris Lattner5ed409e2008-12-08 07:31:50 +00001151
1152 // If we have valid cached information for exactly the block we are
1153 // investigating, just return it with no recomputation.
Dan Gohman23483932010-09-22 21:41:02 +00001154 if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
Chris Lattner8b4be372008-12-16 07:10:09 +00001155 // We have a fully cached result for this query then we can just return the
1156 // cached results and populate the visited set. However, we have to verify
1157 // that we don't already have conflicting results for these blocks. Check
1158 // to ensure that if a block in the results set is in the visited set that
1159 // it was for the same pointer query.
1160 if (!Visited.empty()) {
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001161 for (auto &Entry : *Cache) {
1162 DenseMap<BasicBlock *, Value *>::iterator VI =
1163 Visited.find(Entry.getBB());
Chris Lattner972e6d82009-12-09 01:59:31 +00001164 if (VI == Visited.end() || VI->second == Pointer.getAddr())
1165 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001166
Chandler Carruthb32febe2016-03-07 12:45:07 +00001167 // We have a pointer mismatch in a block. Just return false, saying
Chris Lattner8b4be372008-12-16 07:10:09 +00001168 // that something was clobbered in this result. We could also do a
1169 // non-fully cached query, but there is little point in doing this.
Chandler Carruthb32febe2016-03-07 12:45:07 +00001170 return false;
Chris Lattner8b4be372008-12-16 07:10:09 +00001171 }
1172 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001173
Chris Lattner9b7d99e2009-12-22 04:25:02 +00001174 Value *Addr = Pointer.getAddr();
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001175 for (auto &Entry : *Cache) {
1176 Visited.insert(std::make_pair(Entry.getBB(), Addr));
1177 if (Entry.getResult().isNonLocal()) {
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001178 continue;
1179 }
1180
Chandler Carruthaef32bd2016-03-11 13:46:00 +00001181 if (DT.isReachableFromEntry(Entry.getBB())) {
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001182 Result.push_back(
1183 NonLocalDepResult(Entry.getBB(), Entry.getResult(), Addr));
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001184 }
Chris Lattner8b4be372008-12-16 07:10:09 +00001185 }
Chris Lattner5ed409e2008-12-08 07:31:50 +00001186 ++NumCacheCompleteNonLocalPtr;
Chandler Carruthb32febe2016-03-07 12:45:07 +00001187 return true;
Chris Lattner5ed409e2008-12-08 07:31:50 +00001188 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001189
Chris Lattner5ed409e2008-12-08 07:31:50 +00001190 // Otherwise, either this is a new block, a block with an invalid cache
1191 // pointer or one that we're about to invalidate by putting more info into it
1192 // than its valid cache info. If empty, the result will be valid cache info,
1193 // otherwise it isn't.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001194 if (Cache->empty())
Dan Gohman23483932010-09-22 21:41:02 +00001195 CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
Dan Gohmanc87c8432010-11-11 00:42:22 +00001196 else
Dan Gohman23483932010-09-22 21:41:02 +00001197 CacheInfo->Pair = BBSkipFirstBlockPair();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001198
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001199 SmallVector<BasicBlock *, 32> Worklist;
Chris Lattner5ed409e2008-12-08 07:31:50 +00001200 Worklist.push_back(StartBB);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001201
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001202 // PredList used inside loop.
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001203 SmallVector<std::pair<BasicBlock *, PHITransAddr>, 16> PredList;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001204
Chris Lattnera28355d2008-12-07 08:50:20 +00001205 // Keep track of the entries that we know are sorted. Previously cached
1206 // entries will all be sorted. The entries we add we only sort on demand (we
1207 // don't insert every element into its sorted position). We know that we
1208 // won't get any reuse from currently inserted values, because we don't
1209 // revisit blocks after we insert info for them.
1210 unsigned NumSortedEntries = Cache->size();
Joerg Sonnenberger36894dc2016-02-20 11:24:44 +00001211 unsigned WorklistEntries = BlockNumberLimit;
1212 bool GotWorklistLimit = false;
Chris Lattnerf09619d2009-01-22 07:04:01 +00001213 DEBUG(AssertSorted(*Cache));
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001214
Chris Lattner2faa2c72008-12-07 02:15:47 +00001215 while (!Worklist.empty()) {
Chris Lattner7564a3b2008-12-07 02:56:57 +00001216 BasicBlock *BB = Worklist.pop_back_val();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001217
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +00001218 // If we do process a large number of blocks it becomes very expensive and
1219 // likely it isn't worth worrying about
1220 if (Result.size() > NumResultsLimit) {
1221 Worklist.clear();
1222 // Sort it now (if needed) so that recursive invocations of
1223 // getNonLocalPointerDepFromBB and other routines that could reuse the
1224 // cache value will only see properly sorted cache arrays.
1225 if (Cache && NumSortedEntries != Cache->size()) {
1226 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +00001227 }
1228 // Since we bail out, the "Cache" set won't contain all of the
1229 // results for the query. This is ok (we can still use it to accelerate
1230 // specific block queries) but we can't do the fastpath "return all
1231 // results from the set". Clear out the indicator for this.
1232 CacheInfo->Pair = BBSkipFirstBlockPair();
Chandler Carruthb32febe2016-03-07 12:45:07 +00001233 return false;
Bruno Cardoso Lopese3c513a2014-10-01 20:07:13 +00001234 }
1235
Chris Lattner75510d82008-12-09 07:52:59 +00001236 // Skip the first block if we have it.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001237 if (!SkipFirstBlock) {
Chris Lattner75510d82008-12-09 07:52:59 +00001238 // Analyze the dependency of *Pointer in FromBB. See if we already have
1239 // been here.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001240 assert(Visited.count(BB) && "Should check 'visited' before adding to WL");
Chris Lattnera28355d2008-12-07 08:50:20 +00001241
Chris Lattner75510d82008-12-09 07:52:59 +00001242 // Get the dependency info for Pointer in BB. If we have cached
1243 // information, we will use it, otherwise we compute it.
Chris Lattnerf09619d2009-01-22 07:04:01 +00001244 DEBUG(AssertSorted(*Cache, NumSortedEntries));
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001245 MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst, Loc, isLoad, BB,
1246 Cache, NumSortedEntries);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001247
Chris Lattner75510d82008-12-09 07:52:59 +00001248 // If we got a Def or Clobber, add this to the list of results.
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001249 if (!Dep.isNonLocal()) {
Chandler Carruthaef32bd2016-03-11 13:46:00 +00001250 if (DT.isReachableFromEntry(BB)) {
Matt Arsenaultc23753a2013-05-06 02:07:24 +00001251 Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
1252 continue;
1253 }
Chris Lattner75510d82008-12-09 07:52:59 +00001254 }
Chris Lattner2faa2c72008-12-07 02:15:47 +00001255 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001256
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001257 // If 'Pointer' is an instruction defined in this block, then we need to do
1258 // phi translation to change it into a value live in the predecessor block.
Chris Lattner972e6d82009-12-09 01:59:31 +00001259 // If not, we just add the predecessors to the worklist and scan them with
1260 // the same Pointer.
1261 if (!Pointer.NeedsPHITranslationFromBlock(BB)) {
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001262 SkipFirstBlock = false;
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001263 SmallVector<BasicBlock *, 16> NewBlocks;
Daniel Berlinb4e7a4a2015-04-21 21:11:50 +00001264 for (BasicBlock *Pred : PredCache.get(BB)) {
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001265 // Verify that we haven't looked at this block yet.
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001266 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes =
1267 Visited.insert(std::make_pair(Pred, Pointer.getAddr()));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001268 if (InsertRes.second) {
1269 // First time we've looked at *PI.
Daniel Berlinb4e7a4a2015-04-21 21:11:50 +00001270 NewBlocks.push_back(Pred);
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001271 continue;
1272 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001273
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001274 // If we have seen this block before, but it was with a different
1275 // pointer then we have a phi translation failure and we have to treat
1276 // this as a clobber.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001277 if (InsertRes.first->second != Pointer.getAddr()) {
1278 // Make sure to clean up the Visited map before continuing on to
1279 // PredTranslationFailure.
1280 for (unsigned i = 0; i < NewBlocks.size(); i++)
1281 Visited.erase(NewBlocks[i]);
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001282 goto PredTranslationFailure;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001283 }
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001284 }
Joerg Sonnenberger36894dc2016-02-20 11:24:44 +00001285 if (NewBlocks.size() > WorklistEntries) {
1286 // Make sure to clean up the Visited map before continuing on to
1287 // PredTranslationFailure.
1288 for (unsigned i = 0; i < NewBlocks.size(); i++)
1289 Visited.erase(NewBlocks[i]);
1290 GotWorklistLimit = true;
1291 goto PredTranslationFailure;
1292 }
1293 WorklistEntries -= NewBlocks.size();
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001294 Worklist.append(NewBlocks.begin(), NewBlocks.end());
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001295 continue;
1296 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001297
Chris Lattner972e6d82009-12-09 01:59:31 +00001298 // We do need to do phi translation, if we know ahead of time we can't phi
1299 // translate this value, don't even try.
1300 if (!Pointer.IsPotentiallyPHITranslatable())
1301 goto PredTranslationFailure;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001302
Chris Lattner2f0c1c42009-07-13 17:14:23 +00001303 // We may have added values to the cache list before this PHI translation.
1304 // If so, we haven't done anything to ensure that the cache remains sorted.
1305 // Sort it now (if needed) so that recursive invocations of
1306 // getNonLocalPointerDepFromBB and other routines that could reuse the cache
1307 // value will only see properly sorted cache arrays.
1308 if (Cache && NumSortedEntries != Cache->size()) {
Chris Lattner370aada2009-07-13 17:20:05 +00001309 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
Chris Lattner2f0c1c42009-07-13 17:14:23 +00001310 NumSortedEntries = Cache->size();
1311 }
Craig Topper9f008862014-04-15 04:59:12 +00001312 Cache = nullptr;
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001313
1314 PredList.clear();
Daniel Berlinb4e7a4a2015-04-21 21:11:50 +00001315 for (BasicBlock *Pred : PredCache.get(BB)) {
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001316 PredList.push_back(std::make_pair(Pred, Pointer));
1317
Chris Lattner972e6d82009-12-09 01:59:31 +00001318 // Get the PHI translated pointer in this predecessor. This can fail if
1319 // not translatable, in which case the getAddr() returns null.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001320 PHITransAddr &PredPointer = PredList.back().second;
Chandler Carruthaef32bd2016-03-11 13:46:00 +00001321 PredPointer.PHITranslateValue(BB, Pred, &DT, /*MustDominate=*/false);
Chris Lattner972e6d82009-12-09 01:59:31 +00001322 Value *PredPtrVal = PredPointer.getAddr();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001323
Chris Lattnerac323292009-11-27 08:37:22 +00001324 // Check to see if we have already visited this pred block with another
1325 // pointer. If so, we can't do this lookup. This failure can occur
1326 // with PHI translation when a critical edge exists and the PHI node in
1327 // the successor translates to a pointer value different than the
1328 // pointer the block was first analyzed with.
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001329 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes =
1330 Visited.insert(std::make_pair(Pred, PredPtrVal));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001331
Chris Lattnerac323292009-11-27 08:37:22 +00001332 if (!InsertRes.second) {
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001333 // We found the pred; take it off the list of preds to visit.
1334 PredList.pop_back();
1335
Chris Lattnerac323292009-11-27 08:37:22 +00001336 // If the predecessor was visited with PredPtr, then we already did
1337 // the analysis and can ignore it.
Chris Lattner972e6d82009-12-09 01:59:31 +00001338 if (InsertRes.first->second == PredPtrVal)
Chris Lattnerac323292009-11-27 08:37:22 +00001339 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001340
Chris Lattnerac323292009-11-27 08:37:22 +00001341 // Otherwise, the block was previously analyzed with a different
1342 // pointer. We can't represent the result of this case, so we just
1343 // treat this as a phi translation failure.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001344
1345 // Make sure to clean up the Visited map before continuing on to
1346 // PredTranslationFailure.
Matt Arsenault2080ecd2013-03-29 18:48:42 +00001347 for (unsigned i = 0, n = PredList.size(); i < n; ++i)
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001348 Visited.erase(PredList[i].first);
1349
Chris Lattnerac323292009-11-27 08:37:22 +00001350 goto PredTranslationFailure;
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001351 }
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001352 }
1353
1354 // Actually process results here; this need to be a separate loop to avoid
1355 // calling getNonLocalPointerDepFromBB for blocks we don't want to return
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001356 // any results for. (getNonLocalPointerDepFromBB will modify our
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001357 // datastructures in ways the code after the PredTranslationFailure label
1358 // doesn't expect.)
Matt Arsenault2080ecd2013-03-29 18:48:42 +00001359 for (unsigned i = 0, n = PredList.size(); i < n; ++i) {
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001360 BasicBlock *Pred = PredList[i].first;
1361 PHITransAddr &PredPointer = PredList[i].second;
1362 Value *PredPtrVal = PredPointer.getAddr();
1363
1364 bool CanTranslate = true;
Chris Lattner2be52e72009-11-27 22:05:15 +00001365 // If PHI translation was unable to find an available pointer in this
1366 // predecessor, then we have to assume that the pointer is clobbered in
1367 // that predecessor. We can still do PRE of the load, which would insert
1368 // a computation of the pointer in this predecessor.
Craig Topper9f008862014-04-15 04:59:12 +00001369 if (!PredPtrVal)
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001370 CanTranslate = false;
1371
1372 // FIXME: it is entirely possible that PHI translating will end up with
1373 // the same value. Consider PHI translating something like:
1374 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need*
1375 // to recurse here, pedantically speaking.
1376
1377 // If getNonLocalPointerDepFromBB fails here, that means the cached
1378 // result conflicted with the Visited list; we have to conservatively
Eli Friedman7d58bc72011-06-15 00:47:34 +00001379 // assume it is unknown, but this also does not block PRE of the load.
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001380 if (!CanTranslate ||
Chandler Carruthb32febe2016-03-07 12:45:07 +00001381 !getNonLocalPointerDepFromBB(QueryInst, PredPointer,
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001382 Loc.getWithNewPtr(PredPtrVal), isLoad,
1383 Pred, Result, Visited)) {
Chris Lattner9c2053b2009-12-01 07:33:32 +00001384 // Add the entry to the Result list.
Eli Friedman7d58bc72011-06-15 00:47:34 +00001385 NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal);
Chris Lattner9c2053b2009-12-01 07:33:32 +00001386 Result.push_back(Entry);
1387
Chris Lattner25bf6f82009-12-19 21:29:22 +00001388 // Since we had a phi translation failure, the cache for CacheKey won't
1389 // include all of the entries that we need to immediately satisfy future
1390 // queries. Mark this in NonLocalPointerDeps by setting the
1391 // BBSkipFirstBlockPair pointer to null. This requires reuse of the
1392 // cached value to do more work but not miss the phi trans failure.
Dan Gohman23483932010-09-22 21:41:02 +00001393 NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
1394 NLPI.Pair = BBSkipFirstBlockPair();
Chris Lattner2be52e72009-11-27 22:05:15 +00001395 continue;
Chris Lattner2be52e72009-11-27 22:05:15 +00001396 }
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001397 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001398
Chris Lattnerac323292009-11-27 08:37:22 +00001399 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
1400 CacheInfo = &NonLocalPointerDeps[CacheKey];
Dan Gohman23483932010-09-22 21:41:02 +00001401 Cache = &CacheInfo->NonLocalDeps;
Chris Lattnerac323292009-11-27 08:37:22 +00001402 NumSortedEntries = Cache->size();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001403
Chris Lattnerac323292009-11-27 08:37:22 +00001404 // Since we did phi translation, the "Cache" set won't contain all of the
1405 // results for the query. This is ok (we can still use it to accelerate
1406 // specific block queries) but we can't do the fastpath "return all
1407 // results from the set" Clear out the indicator for this.
Dan Gohman23483932010-09-22 21:41:02 +00001408 CacheInfo->Pair = BBSkipFirstBlockPair();
Chris Lattnerac323292009-11-27 08:37:22 +00001409 SkipFirstBlock = false;
1410 continue;
Chris Lattnerc49f5ac2009-11-26 23:18:49 +00001411
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001412 PredTranslationFailure:
Eli Friedman4b6eeb92011-06-01 23:16:53 +00001413 // The following code is "failure"; we can't produce a sane translation
1414 // for the given block. It assumes that we haven't modified any of
1415 // our datastructures while processing the current block.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001416
Craig Topper9f008862014-04-15 04:59:12 +00001417 if (!Cache) {
Chris Lattner3f4591c2009-01-23 07:12:16 +00001418 // Refresh the CacheInfo/Cache pointer if it got invalidated.
1419 CacheInfo = &NonLocalPointerDeps[CacheKey];
Dan Gohman23483932010-09-22 21:41:02 +00001420 Cache = &CacheInfo->NonLocalDeps;
Chris Lattner3f4591c2009-01-23 07:12:16 +00001421 NumSortedEntries = Cache->size();
Chris Lattner3f4591c2009-01-23 07:12:16 +00001422 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001423
Chris Lattner25bf6f82009-12-19 21:29:22 +00001424 // Since we failed phi translation, the "Cache" set won't contain all of the
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001425 // results for the query. This is ok (we can still use it to accelerate
1426 // specific block queries) but we can't do the fastpath "return all
Chris Lattner25bf6f82009-12-19 21:29:22 +00001427 // results from the set". Clear out the indicator for this.
Dan Gohman23483932010-09-22 21:41:02 +00001428 CacheInfo->Pair = BBSkipFirstBlockPair();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001429
Eli Friedman7d58bc72011-06-15 00:47:34 +00001430 // If *nothing* works, mark the pointer as unknown.
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001431 //
1432 // If this is the magic first block, return this as a clobber of the whole
1433 // incoming value. Since we can't phi translate to one of the predecessors,
1434 // we have to bail out.
1435 if (SkipFirstBlock)
Chandler Carruthb32febe2016-03-07 12:45:07 +00001436 return false;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001437
Joerg Sonnenberger36894dc2016-02-20 11:24:44 +00001438 bool foundBlock = false;
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001439 for (NonLocalDepEntry &I : llvm::reverse(*Cache)) {
Joerg Sonnenberger36894dc2016-02-20 11:24:44 +00001440 if (I.getBB() != BB)
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001441 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001442
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001443 assert((GotWorklistLimit || I.getResult().isNonLocal() ||
Chandler Carruthaef32bd2016-03-11 13:46:00 +00001444 !DT.isReachableFromEntry(BB)) &&
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001445 "Should only be here with transparent block");
Joerg Sonnenberger36894dc2016-02-20 11:24:44 +00001446 foundBlock = true;
1447 I.setResult(MemDepResult::getUnknown());
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001448 Result.push_back(
1449 NonLocalDepResult(I.getBB(), I.getResult(), Pointer.getAddr()));
Chris Lattnerff9f3db2008-12-15 03:35:32 +00001450 break;
Chris Lattner7564a3b2008-12-07 02:56:57 +00001451 }
Mehdi Amini89038a12016-04-02 05:34:19 +00001452 (void)foundBlock; (void)GotWorklistLimit;
Joerg Sonnenberger36894dc2016-02-20 11:24:44 +00001453 assert((foundBlock || GotWorklistLimit) && "Current block not in cache?");
Chris Lattner2faa2c72008-12-07 02:15:47 +00001454 }
Chris Lattner3f4591c2009-01-23 07:12:16 +00001455
Chris Lattnerf903fe12008-12-09 07:47:11 +00001456 // Okay, we're done now. If we added new values to the cache, re-sort it.
Chris Lattner370aada2009-07-13 17:20:05 +00001457 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
Chris Lattnerf09619d2009-01-22 07:04:01 +00001458 DEBUG(AssertSorted(*Cache));
Chandler Carruthb32febe2016-03-07 12:45:07 +00001459 return true;
Chris Lattnera28355d2008-12-07 08:50:20 +00001460}
1461
Chandler Carruth40e21f22016-03-07 12:30:06 +00001462/// If P exists in CachedNonLocalPointerInfo, remove it.
Chandler Carruth61440d22016-03-10 00:55:30 +00001463void MemoryDependenceResults::RemoveCachedNonLocalPointerDependencies(
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001464 ValueIsLoadPair P) {
1465 CachedNonLocalPointerInfo::iterator It = NonLocalPointerDeps.find(P);
1466 if (It == NonLocalPointerDeps.end())
1467 return;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001468
Chris Lattnera28355d2008-12-07 08:50:20 +00001469 // Remove all of the entries in the BB->val map. This involves removing
1470 // instructions from the reverse map.
Dan Gohman23483932010-09-22 21:41:02 +00001471 NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001472
Chris Lattnera28355d2008-12-07 08:50:20 +00001473 for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
Chris Lattner0c315472009-12-09 07:08:01 +00001474 Instruction *Target = PInfo[i].getResult().getInst();
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001475 if (!Target)
1476 continue; // Ignore non-local dep results.
Chris Lattner0c315472009-12-09 07:08:01 +00001477 assert(Target->getParent() == PInfo[i].getBB());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001478
Chris Lattnera28355d2008-12-07 08:50:20 +00001479 // Eliminating the dirty entry from 'Cache', so update the reverse info.
Chris Lattner8eda11b2009-03-29 00:24:04 +00001480 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P);
Chris Lattnera28355d2008-12-07 08:50:20 +00001481 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001482
Chris Lattnera28355d2008-12-07 08:50:20 +00001483 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
1484 NonLocalPointerDeps.erase(It);
Chris Lattner2faa2c72008-12-07 02:15:47 +00001485}
1486
Chandler Carruth61440d22016-03-10 00:55:30 +00001487void MemoryDependenceResults::invalidateCachedPointerInfo(Value *Ptr) {
Chris Lattnerfa9f99a2008-12-09 22:06:23 +00001488 // If Ptr isn't really a pointer, just ignore it.
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001489 if (!Ptr->getType()->isPointerTy())
1490 return;
Chris Lattnerfa9f99a2008-12-09 22:06:23 +00001491 // Flush store info for the pointer.
1492 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
1493 // Flush load info for the pointer.
1494 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
1495}
1496
Chandler Carruth61440d22016-03-10 00:55:30 +00001497void MemoryDependenceResults::invalidateCachedPredecessors() {
Daniel Berlinb4e7a4a2015-04-21 21:11:50 +00001498 PredCache.clear();
Bob Wilson92cdb6e2010-02-16 19:51:59 +00001499}
1500
Chandler Carruth61440d22016-03-10 00:55:30 +00001501void MemoryDependenceResults::removeInstruction(Instruction *RemInst) {
Chris Lattnera25d39522008-11-28 22:04:47 +00001502 // Walk through the Non-local dependencies, removing this one as the value
1503 // for any cached queries.
Chris Lattner1b810bd2008-11-30 02:28:25 +00001504 NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
1505 if (NLDI != NonLocalDeps.end()) {
Chris Lattner7e61daf2008-12-01 01:15:42 +00001506 NonLocalDepInfo &BlockMap = NLDI->second.first;
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001507 for (auto &Entry : BlockMap)
1508 if (Instruction *Inst = Entry.getResult().getInst())
Chris Lattnerde4440c2008-12-07 18:39:13 +00001509 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
Chris Lattner1b810bd2008-11-30 02:28:25 +00001510 NonLocalDeps.erase(NLDI);
1511 }
Owen Anderson086b2c42007-12-08 01:37:09 +00001512
Chris Lattnera25d39522008-11-28 22:04:47 +00001513 // If we have a cached local dependence query for this instruction, remove it.
Chris Lattnerde04e112008-11-29 01:43:36 +00001514 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
1515 if (LocalDepEntry != LocalDeps.end()) {
Chris Lattnerada1f872008-11-30 01:09:30 +00001516 // Remove us from DepInst's reverse set now that the local dep info is gone.
Chris Lattnerde4440c2008-12-07 18:39:13 +00001517 if (Instruction *Inst = LocalDepEntry->second.getInst())
1518 RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst);
Chris Lattnerada1f872008-11-30 01:09:30 +00001519
Chris Lattner73c25452008-11-28 22:28:27 +00001520 // Remove this local dependency info.
Chris Lattnerde04e112008-11-29 01:43:36 +00001521 LocalDeps.erase(LocalDepEntry);
Chris Lattnera28355d2008-12-07 08:50:20 +00001522 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001523
Chris Lattnera28355d2008-12-07 08:50:20 +00001524 // If we have any cached pointer dependencies on this instruction, remove
1525 // them. If the instruction has non-pointer type, then it can't be a pointer
1526 // base.
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001527
Chris Lattnera28355d2008-12-07 08:50:20 +00001528 // Remove it from both the load info and the store info. The instruction
1529 // can't be in either of these maps if it is non-pointer.
Duncan Sands19d0b472010-02-16 11:11:14 +00001530 if (RemInst->getType()->isPointerTy()) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001531 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
1532 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
1533 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001534
Chris Lattnerd3d91112008-11-28 22:51:08 +00001535 // Loop over all of the things that depend on the instruction we're removing.
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001536 SmallVector<std::pair<Instruction *, Instruction *>, 8> ReverseDepsToAdd;
Chris Lattner82b70342008-12-07 18:42:51 +00001537
1538 // If we find RemInst as a clobber or Def in any of the maps for other values,
1539 // we need to replace its entry with a dirty version of the instruction after
1540 // it. If RemInst is a terminator, we use a null dirty value.
1541 //
1542 // Using a dirty version of the instruction after RemInst saves having to scan
1543 // the entire block to get to this point.
1544 MemDepResult NewDirtyVal;
1545 if (!RemInst->isTerminator())
Duncan P. N. Exon Smith5a82c912015-10-10 00:53:03 +00001546 NewDirtyVal = MemDepResult::getDirty(&*++RemInst->getIterator());
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001547
Chris Lattner9f1988ab2008-11-29 09:20:15 +00001548 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
1549 if (ReverseDepIt != ReverseLocalDeps.end()) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001550 // RemInst can't be the terminator if it has local stuff depending on it.
Craig Topper46276792014-08-24 23:23:06 +00001551 assert(!ReverseDepIt->second.empty() && !isa<TerminatorInst>(RemInst) &&
Chris Lattnerada1f872008-11-30 01:09:30 +00001552 "Nothing can locally depend on a terminator");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001553
Craig Topper46276792014-08-24 23:23:06 +00001554 for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) {
Chris Lattner1b810bd2008-11-30 02:28:25 +00001555 assert(InstDependingOnRemInst != RemInst &&
1556 "Already removed our local dep info");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001557
Chris Lattner82b70342008-12-07 18:42:51 +00001558 LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001559
Chris Lattnerada1f872008-11-30 01:09:30 +00001560 // Make sure to remember that new things depend on NewDepInst.
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001561 assert(NewDirtyVal.getInst() &&
1562 "There is no way something else can have "
Chris Lattner82b70342008-12-07 18:42:51 +00001563 "a local dep on this if it is a terminator!");
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001564 ReverseDepsToAdd.push_back(
1565 std::make_pair(NewDirtyVal.getInst(), InstDependingOnRemInst));
Chris Lattnerd3d91112008-11-28 22:51:08 +00001566 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001567
Chris Lattner63bd5862008-11-29 23:30:39 +00001568 ReverseLocalDeps.erase(ReverseDepIt);
1569
1570 // Add new reverse deps after scanning the set, to avoid invalidating the
1571 // 'ReverseDeps' reference.
1572 while (!ReverseDepsToAdd.empty()) {
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001573 ReverseLocalDeps[ReverseDepsToAdd.back().first].insert(
1574 ReverseDepsToAdd.back().second);
Chris Lattner63bd5862008-11-29 23:30:39 +00001575 ReverseDepsToAdd.pop_back();
1576 }
Owen Andersonc0daf5f2007-07-06 23:14:35 +00001577 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001578
Chris Lattner9f1988ab2008-11-29 09:20:15 +00001579 ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
1580 if (ReverseDepIt != ReverseNonLocalDeps.end()) {
Craig Topper46276792014-08-24 23:23:06 +00001581 for (Instruction *I : ReverseDepIt->second) {
1582 assert(I != RemInst && "Already removed NonLocalDep info for RemInst");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001583
Craig Topper46276792014-08-24 23:23:06 +00001584 PerInstNLInfo &INLD = NonLocalDeps[I];
Chris Lattner44104272008-11-30 02:52:26 +00001585 // The information is now dirty!
Chris Lattner7e61daf2008-12-01 01:15:42 +00001586 INLD.second = true;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001587
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001588 for (auto &Entry : INLD.first) {
1589 if (Entry.getResult().getInst() != RemInst)
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001590 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001591
Chris Lattner1b810bd2008-11-30 02:28:25 +00001592 // Convert to a dirty entry for the subsequent instruction.
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001593 Entry.setResult(NewDirtyVal);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001594
Chris Lattner82b70342008-12-07 18:42:51 +00001595 if (Instruction *NextI = NewDirtyVal.getInst())
Craig Topper46276792014-08-24 23:23:06 +00001596 ReverseDepsToAdd.push_back(std::make_pair(NextI, I));
Chris Lattner1b810bd2008-11-30 02:28:25 +00001597 }
1598 }
Chris Lattner63bd5862008-11-29 23:30:39 +00001599
1600 ReverseNonLocalDeps.erase(ReverseDepIt);
1601
Chris Lattnere7d7e132008-11-29 22:02:15 +00001602 // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
1603 while (!ReverseDepsToAdd.empty()) {
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001604 ReverseNonLocalDeps[ReverseDepsToAdd.back().first].insert(
1605 ReverseDepsToAdd.back().second);
Chris Lattnere7d7e132008-11-29 22:02:15 +00001606 ReverseDepsToAdd.pop_back();
1607 }
Owen Anderson5f208be2007-08-16 21:27:05 +00001608 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001609
Chris Lattnera28355d2008-12-07 08:50:20 +00001610 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
1611 // value in the NonLocalPointerDeps info.
1612 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001613 ReverseNonLocalPtrDeps.find(RemInst);
Chris Lattnera28355d2008-12-07 08:50:20 +00001614 if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001615 SmallVector<std::pair<Instruction *, ValueIsLoadPair>, 8>
1616 ReversePtrDepsToAdd;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001617
Craig Topper46276792014-08-24 23:23:06 +00001618 for (ValueIsLoadPair P : ReversePtrDepIt->second) {
Chris Lattnera28355d2008-12-07 08:50:20 +00001619 assert(P.getPointer() != RemInst &&
1620 "Already removed NonLocalPointerDeps info for RemInst");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001621
Dan Gohman23483932010-09-22 21:41:02 +00001622 NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001623
Chris Lattner5ed409e2008-12-08 07:31:50 +00001624 // The cache is not valid for any specific block anymore.
Dan Gohman23483932010-09-22 21:41:02 +00001625 NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001626
Chris Lattnera28355d2008-12-07 08:50:20 +00001627 // Update any entries for RemInst to use the instruction after it.
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001628 for (auto &Entry : NLPDI) {
1629 if (Entry.getResult().getInst() != RemInst)
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001630 continue;
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001631
Chris Lattnera28355d2008-12-07 08:50:20 +00001632 // Convert to a dirty entry for the subsequent instruction.
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001633 Entry.setResult(NewDirtyVal);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001634
Chris Lattnera28355d2008-12-07 08:50:20 +00001635 if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
1636 ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
1637 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001638
Chris Lattner3f4591c2009-01-23 07:12:16 +00001639 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its
1640 // subsequent value may invalidate the sortedness.
1641 std::sort(NLPDI.begin(), NLPDI.end());
Chris Lattnera28355d2008-12-07 08:50:20 +00001642 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001643
Chris Lattnera28355d2008-12-07 08:50:20 +00001644 ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001645
Chris Lattnera28355d2008-12-07 08:50:20 +00001646 while (!ReversePtrDepsToAdd.empty()) {
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001647 ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first].insert(
1648 ReversePtrDepsToAdd.back().second);
Chris Lattnera28355d2008-12-07 08:50:20 +00001649 ReversePtrDepsToAdd.pop_back();
1650 }
1651 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001652
Chris Lattner1b810bd2008-11-30 02:28:25 +00001653 assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
Jakob Stoklund Olesen087f2072011-01-11 04:05:39 +00001654 DEBUG(verifyRemoved(RemInst));
Owen Andersonc0daf5f2007-07-06 23:14:35 +00001655}
Chandler Carruth40e21f22016-03-07 12:30:06 +00001656
1657/// Verify that the specified instruction does not occur in our internal data
1658/// structures.
1659///
1660/// This function verifies by asserting in debug builds.
Chandler Carruth61440d22016-03-10 00:55:30 +00001661void MemoryDependenceResults::verifyRemoved(Instruction *D) const {
Craig Topper46276792014-08-24 23:23:06 +00001662#ifndef NDEBUG
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001663 for (const auto &DepKV : LocalDeps) {
1664 assert(DepKV.first != D && "Inst occurs in data structures");
1665 assert(DepKV.second.getInst() != D && "Inst occurs in data structures");
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001666 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001667
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001668 for (const auto &DepKV : NonLocalPointerDeps) {
1669 assert(DepKV.first.getPointer() != D && "Inst occurs in NLPD map key");
1670 for (const auto &Entry : DepKV.second.NonLocalDeps)
1671 assert(Entry.getResult().getInst() != D && "Inst occurs as NLPD value");
Chris Lattnera28355d2008-12-07 08:50:20 +00001672 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001673
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001674 for (const auto &DepKV : NonLocalDeps) {
1675 assert(DepKV.first != D && "Inst occurs in data structures");
1676 const PerInstNLInfo &INLD = DepKV.second;
1677 for (const auto &Entry : INLD.first)
1678 assert(Entry.getResult().getInst() != D &&
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001679 "Inst occurs in data structures");
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001680 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001681
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001682 for (const auto &DepKV : ReverseLocalDeps) {
1683 assert(DepKV.first != D && "Inst occurs in data structures");
1684 for (Instruction *Inst : DepKV.second)
Craig Topper46276792014-08-24 23:23:06 +00001685 assert(Inst != D && "Inst occurs in data structures");
Chris Lattner1b810bd2008-11-30 02:28:25 +00001686 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001687
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001688 for (const auto &DepKV : ReverseNonLocalDeps) {
1689 assert(DepKV.first != D && "Inst occurs in data structures");
1690 for (Instruction *Inst : DepKV.second)
Craig Topper46276792014-08-24 23:23:06 +00001691 assert(Inst != D && "Inst occurs in data structures");
Chris Lattner1b810bd2008-11-30 02:28:25 +00001692 }
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001693
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001694 for (const auto &DepKV : ReverseNonLocalPtrDeps) {
1695 assert(DepKV.first != D && "Inst occurs in rev NLPD map");
Jakub Staszakb0a7eed2013-03-20 21:47:51 +00001696
Chandler Carruthaf8321e2016-03-07 15:12:57 +00001697 for (ValueIsLoadPair P : DepKV.second)
Chandler Carruth60fb1b42016-03-07 10:19:30 +00001698 assert(P != ValueIsLoadPair(D, false) && P != ValueIsLoadPair(D, true) &&
Chris Lattnera28355d2008-12-07 08:50:20 +00001699 "Inst occurs in ReverseNonLocalPtrDeps map");
1700 }
Craig Topper46276792014-08-24 23:23:06 +00001701#endif
Chris Lattnerb8ec75b2008-11-29 21:25:10 +00001702}
Chandler Carruth61440d22016-03-10 00:55:30 +00001703
Chandler Carruthdab4eae2016-11-23 17:53:26 +00001704AnalysisKey MemoryDependenceAnalysis::Key;
Chandler Carruthb4faf132016-03-11 10:22:49 +00001705
Chandler Carruth61440d22016-03-10 00:55:30 +00001706MemoryDependenceResults
Sean Silva36e0d012016-08-09 00:28:15 +00001707MemoryDependenceAnalysis::run(Function &F, FunctionAnalysisManager &AM) {
Chandler Carruthb47f8012016-03-11 11:05:24 +00001708 auto &AA = AM.getResult<AAManager>(F);
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001709 auto &AC = AM.getResult<AssumptionAnalysis>(F);
Chandler Carruthb47f8012016-03-11 11:05:24 +00001710 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
Chandler Carruthaef32bd2016-03-11 13:46:00 +00001711 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001712 return MemoryDependenceResults(AA, AC, TLI, DT);
Chandler Carruth61440d22016-03-10 00:55:30 +00001713}
1714
1715char MemoryDependenceWrapperPass::ID = 0;
1716
1717INITIALIZE_PASS_BEGIN(MemoryDependenceWrapperPass, "memdep",
1718 "Memory Dependence Analysis", false, true)
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001719INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
Chandler Carruth61440d22016-03-10 00:55:30 +00001720INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Chandler Carruthaef32bd2016-03-11 13:46:00 +00001721INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
Chandler Carruth61440d22016-03-10 00:55:30 +00001722INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1723INITIALIZE_PASS_END(MemoryDependenceWrapperPass, "memdep",
1724 "Memory Dependence Analysis", false, true)
1725
1726MemoryDependenceWrapperPass::MemoryDependenceWrapperPass() : FunctionPass(ID) {
1727 initializeMemoryDependenceWrapperPassPass(*PassRegistry::getPassRegistry());
1728}
Eugene Zelenko1804a772016-08-25 00:45:04 +00001729
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +00001730MemoryDependenceWrapperPass::~MemoryDependenceWrapperPass() = default;
Chandler Carruth61440d22016-03-10 00:55:30 +00001731
1732void MemoryDependenceWrapperPass::releaseMemory() {
1733 MemDep.reset();
1734}
1735
1736void MemoryDependenceWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1737 AU.setPreservesAll();
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001738 AU.addRequired<AssumptionCacheTracker>();
Chandler Carruthaef32bd2016-03-11 13:46:00 +00001739 AU.addRequired<DominatorTreeWrapperPass>();
Chandler Carruth61440d22016-03-10 00:55:30 +00001740 AU.addRequiredTransitive<AAResultsWrapperPass>();
1741 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1742}
1743
Chandler Carruthe14524c2016-12-27 19:33:04 +00001744bool MemoryDependenceResults::invalidate(Function &F, const PreservedAnalyses &PA,
1745 FunctionAnalysisManager::Invalidator &Inv) {
1746 // Check whether our analysis is preserved.
1747 auto PAC = PA.getChecker<MemoryDependenceAnalysis>();
1748 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
1749 // If not, give up now.
1750 return true;
1751
1752 // Check whether the analyses we depend on became invalid for any reason.
1753 if (Inv.invalidate<AAManager>(F, PA) ||
1754 Inv.invalidate<AssumptionAnalysis>(F, PA) ||
1755 Inv.invalidate<DominatorTreeAnalysis>(F, PA))
1756 return true;
1757
1758 // Otherwise this analysis result remains valid.
1759 return false;
1760}
1761
Bob Haarman3db17642016-08-26 16:34:27 +00001762unsigned MemoryDependenceResults::getDefaultBlockScanLimit() const {
1763 return BlockScanLimit;
1764}
1765
Chandler Carruth61440d22016-03-10 00:55:30 +00001766bool MemoryDependenceWrapperPass::runOnFunction(Function &F) {
1767 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001768 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
Chandler Carruth61440d22016-03-10 00:55:30 +00001769 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
Chandler Carruthaef32bd2016-03-11 13:46:00 +00001770 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001771 MemDep.emplace(AA, AC, TLI, DT);
Chandler Carruth61440d22016-03-10 00:55:30 +00001772 return false;
1773}