Nick Lewycky | 7ed1dbf | 2013-06-10 23:10:59 +0000 | [diff] [blame] | 1 | //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===// |
Owen Anderson | c0daf5f | 2007-07-06 23:14:35 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
Chris Lattner | f3ebc3f | 2007-12-29 20:36:04 +0000 | [diff] [blame] | 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
Owen Anderson | c0daf5f | 2007-07-06 23:14:35 +0000 | [diff] [blame] | 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file implements an analysis that determines, for a given memory |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 11 | // operation, what preceding memory operations it depends on. It builds on |
Owen Anderson | fa78835 | 2007-08-08 22:01:54 +0000 | [diff] [blame] | 12 | // alias analysis information, and tries to provide a lazy, caching interface to |
Owen Anderson | c0daf5f | 2007-07-06 23:14:35 +0000 | [diff] [blame] | 13 | // a common kind of alias information query. |
| 14 | // |
| 15 | //===----------------------------------------------------------------------===// |
| 16 | |
| 17 | #include "llvm/Analysis/MemoryDependenceAnalysis.h" |
Eugene Zelenko | 1804a77 | 2016-08-25 00:45:04 +0000 | [diff] [blame^] | 18 | #include "llvm/ADT/SmallSet.h" |
| 19 | #include "llvm/ADT/SmallVector.h" |
Chandler Carruth | ed0881b | 2012-12-03 16:50:05 +0000 | [diff] [blame] | 20 | #include "llvm/ADT/STLExtras.h" |
| 21 | #include "llvm/ADT/Statistic.h" |
Owen Anderson | c0daf5f | 2007-07-06 23:14:35 +0000 | [diff] [blame] | 22 | #include "llvm/Analysis/AliasAnalysis.h" |
Chandler Carruth | 66b3130 | 2015-01-04 12:03:27 +0000 | [diff] [blame] | 23 | #include "llvm/Analysis/AssumptionCache.h" |
Victor Hernandez | f390e04 | 2009-10-27 20:05:49 +0000 | [diff] [blame] | 24 | #include "llvm/Analysis/MemoryBuiltins.h" |
Chris Lattner | 972e6d8 | 2009-12-09 01:59:31 +0000 | [diff] [blame] | 25 | #include "llvm/Analysis/PHITransAddr.h" |
Bruno Cardoso Lopes | dfc1d96 | 2015-07-31 14:31:35 +0000 | [diff] [blame] | 26 | #include "llvm/Analysis/OrderedBasicBlock.h" |
Dan Gohman | a4fcd24 | 2010-12-15 20:02:24 +0000 | [diff] [blame] | 27 | #include "llvm/Analysis/ValueTracking.h" |
Chandler Carruth | d06034d | 2015-08-12 17:47:44 +0000 | [diff] [blame] | 28 | #include "llvm/Analysis/TargetLibraryInfo.h" |
Eugene Zelenko | 1804a77 | 2016-08-25 00:45:04 +0000 | [diff] [blame^] | 29 | #include "llvm/IR/CallSite.h" |
| 30 | #include "llvm/IR/Constants.h" |
Chandler Carruth | 9fb823b | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 31 | #include "llvm/IR/DataLayout.h" |
Eugene Zelenko | 1804a77 | 2016-08-25 00:45:04 +0000 | [diff] [blame^] | 32 | #include "llvm/IR/DerivedTypes.h" |
Chandler Carruth | 5ad5f15 | 2014-01-13 09:26:24 +0000 | [diff] [blame] | 33 | #include "llvm/IR/Dominators.h" |
Chandler Carruth | 9fb823b | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 34 | #include "llvm/IR/Function.h" |
Eugene Zelenko | 1804a77 | 2016-08-25 00:45:04 +0000 | [diff] [blame^] | 35 | #include "llvm/IR/Instruction.h" |
Chandler Carruth | 9fb823b | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 36 | #include "llvm/IR/Instructions.h" |
| 37 | #include "llvm/IR/IntrinsicInst.h" |
| 38 | #include "llvm/IR/LLVMContext.h" |
Chandler Carruth | aa0ab63 | 2014-03-04 12:09:19 +0000 | [diff] [blame] | 39 | #include "llvm/IR/PredIteratorCache.h" |
Eugene Zelenko | 1804a77 | 2016-08-25 00:45:04 +0000 | [diff] [blame^] | 40 | #include "llvm/Support/AtomicOrdering.h" |
| 41 | #include "llvm/Support/Casting.h" |
| 42 | #include "llvm/Support/CommandLine.h" |
| 43 | #include "llvm/Support/Compiler.h" |
Chandler Carruth | ed0881b | 2012-12-03 16:50:05 +0000 | [diff] [blame] | 44 | #include "llvm/Support/Debug.h" |
Eugene Zelenko | 1804a77 | 2016-08-25 00:45:04 +0000 | [diff] [blame^] | 45 | #include "llvm/Support/MathExtras.h" |
| 46 | #include <algorithm> |
| 47 | #include <cassert> |
| 48 | #include <iterator> |
| 49 | |
Owen Anderson | c0daf5f | 2007-07-06 23:14:35 +0000 | [diff] [blame] | 50 | using namespace llvm; |
| 51 | |
Chandler Carruth | f1221bd | 2014-04-22 02:48:03 +0000 | [diff] [blame] | 52 | #define DEBUG_TYPE "memdep" |
| 53 | |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 54 | STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses"); |
| 55 | STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses"); |
Chris Lattner | e7d7e13 | 2008-11-29 22:02:15 +0000 | [diff] [blame] | 56 | STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses"); |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 57 | |
| 58 | STATISTIC(NumCacheNonLocalPtr, |
| 59 | "Number of fully cached non-local ptr responses"); |
| 60 | STATISTIC(NumCacheDirtyNonLocalPtr, |
| 61 | "Number of cached, but dirty, non-local ptr responses"); |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 62 | STATISTIC(NumUncacheNonLocalPtr, "Number of uncached non-local ptr responses"); |
Chris Lattner | 5ed409e | 2008-12-08 07:31:50 +0000 | [diff] [blame] | 63 | STATISTIC(NumCacheCompleteNonLocalPtr, |
| 64 | "Number of block queries that were completely cached"); |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 65 | |
Eli Friedman | 8b098b0 | 2011-06-15 23:59:25 +0000 | [diff] [blame] | 66 | // Limit for the number of instructions to scan in a block. |
Jingyue Wu | d058ea9 | 2015-07-21 21:50:39 +0000 | [diff] [blame] | 67 | |
| 68 | static cl::opt<unsigned> BlockScanLimit( |
| 69 | "memdep-block-scan-limit", cl::Hidden, cl::init(100), |
| 70 | cl::desc("The number of instructions to scan in a block in memory " |
| 71 | "dependency analysis (default = 100)")); |
Eli Friedman | 8b098b0 | 2011-06-15 23:59:25 +0000 | [diff] [blame] | 72 | |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 73 | static cl::opt<unsigned> |
| 74 | BlockNumberLimit("memdep-block-number-limit", cl::Hidden, cl::init(1000), |
| 75 | cl::desc("The number of blocks to scan during memory " |
| 76 | "dependency analysis (default = 1000)")); |
Joerg Sonnenberger | 36894dc | 2016-02-20 11:24:44 +0000 | [diff] [blame] | 77 | |
Bruno Cardoso Lopes | e3c513a | 2014-10-01 20:07:13 +0000 | [diff] [blame] | 78 | // Limit on the number of memdep results to process. |
Aaron Ballman | 254dd7e | 2014-10-02 13:17:11 +0000 | [diff] [blame] | 79 | static const unsigned int NumResultsLimit = 100; |
Bruno Cardoso Lopes | e3c513a | 2014-10-01 20:07:13 +0000 | [diff] [blame] | 80 | |
Chandler Carruth | 40e21f2 | 2016-03-07 12:30:06 +0000 | [diff] [blame] | 81 | /// This is a helper function that removes Val from 'Inst's set in ReverseMap. |
| 82 | /// |
| 83 | /// If the set becomes empty, remove Inst's entry. |
Chris Lattner | de4440c | 2008-12-07 18:39:13 +0000 | [diff] [blame] | 84 | template <typename KeyTy> |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 85 | static void |
| 86 | RemoveFromReverseMap(DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>> &ReverseMap, |
| 87 | Instruction *Inst, KeyTy Val) { |
| 88 | typename DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>>::iterator InstIt = |
| 89 | ReverseMap.find(Inst); |
Chris Lattner | de4440c | 2008-12-07 18:39:13 +0000 | [diff] [blame] | 90 | assert(InstIt != ReverseMap.end() && "Reverse map out of sync?"); |
| 91 | bool Found = InstIt->second.erase(Val); |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 92 | assert(Found && "Invalid reverse map!"); |
| 93 | (void)Found; |
Chris Lattner | de4440c | 2008-12-07 18:39:13 +0000 | [diff] [blame] | 94 | if (InstIt->second.empty()) |
| 95 | ReverseMap.erase(InstIt); |
| 96 | } |
| 97 | |
Chandler Carruth | 40e21f2 | 2016-03-07 12:30:06 +0000 | [diff] [blame] | 98 | /// If the given instruction references a specific memory location, fill in Loc |
| 99 | /// with the details, otherwise set Loc.Ptr to null. |
| 100 | /// |
| 101 | /// Returns a ModRefInfo value describing the general behavior of the |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 102 | /// instruction. |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 103 | static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc, |
Chandler Carruth | d06034d | 2015-08-12 17:47:44 +0000 | [diff] [blame] | 104 | const TargetLibraryInfo &TLI) { |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 105 | if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) { |
Eli Friedman | 5494ada | 2011-08-15 20:54:19 +0000 | [diff] [blame] | 106 | if (LI->isUnordered()) { |
Chandler Carruth | 70c61c1 | 2015-06-04 02:03:15 +0000 | [diff] [blame] | 107 | Loc = MemoryLocation::get(LI); |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 108 | return MRI_Ref; |
Jakub Staszak | fa41def | 2013-03-20 23:53:45 +0000 | [diff] [blame] | 109 | } |
JF Bastien | 800f87a | 2016-04-06 21:19:33 +0000 | [diff] [blame] | 110 | if (LI->getOrdering() == AtomicOrdering::Monotonic) { |
Chandler Carruth | 70c61c1 | 2015-06-04 02:03:15 +0000 | [diff] [blame] | 111 | Loc = MemoryLocation::get(LI); |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 112 | return MRI_ModRef; |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 113 | } |
Chandler Carruth | ac80dc7 | 2015-06-17 07:18:54 +0000 | [diff] [blame] | 114 | Loc = MemoryLocation(); |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 115 | return MRI_ModRef; |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 116 | } |
| 117 | |
| 118 | if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { |
Eli Friedman | 5494ada | 2011-08-15 20:54:19 +0000 | [diff] [blame] | 119 | if (SI->isUnordered()) { |
Chandler Carruth | 70c61c1 | 2015-06-04 02:03:15 +0000 | [diff] [blame] | 120 | Loc = MemoryLocation::get(SI); |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 121 | return MRI_Mod; |
Jakub Staszak | fa41def | 2013-03-20 23:53:45 +0000 | [diff] [blame] | 122 | } |
JF Bastien | 800f87a | 2016-04-06 21:19:33 +0000 | [diff] [blame] | 123 | if (SI->getOrdering() == AtomicOrdering::Monotonic) { |
Chandler Carruth | 70c61c1 | 2015-06-04 02:03:15 +0000 | [diff] [blame] | 124 | Loc = MemoryLocation::get(SI); |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 125 | return MRI_ModRef; |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 126 | } |
Chandler Carruth | ac80dc7 | 2015-06-17 07:18:54 +0000 | [diff] [blame] | 127 | Loc = MemoryLocation(); |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 128 | return MRI_ModRef; |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 129 | } |
| 130 | |
| 131 | if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) { |
Chandler Carruth | 70c61c1 | 2015-06-04 02:03:15 +0000 | [diff] [blame] | 132 | Loc = MemoryLocation::get(V); |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 133 | return MRI_ModRef; |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 134 | } |
| 135 | |
Chandler Carruth | d06034d | 2015-08-12 17:47:44 +0000 | [diff] [blame] | 136 | if (const CallInst *CI = isFreeCall(Inst, &TLI)) { |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 137 | // calls to free() deallocate the entire structure |
Chandler Carruth | ac80dc7 | 2015-06-17 07:18:54 +0000 | [diff] [blame] | 138 | Loc = MemoryLocation(CI->getArgOperand(0)); |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 139 | return MRI_Mod; |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 140 | } |
| 141 | |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 142 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { |
| 143 | AAMDNodes AAInfo; |
| 144 | |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 145 | switch (II->getIntrinsicID()) { |
| 146 | case Intrinsic::lifetime_start: |
| 147 | case Intrinsic::lifetime_end: |
| 148 | case Intrinsic::invariant_start: |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 149 | II->getAAMetadata(AAInfo); |
Chandler Carruth | ac80dc7 | 2015-06-17 07:18:54 +0000 | [diff] [blame] | 150 | Loc = MemoryLocation( |
| 151 | II->getArgOperand(1), |
| 152 | cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(), AAInfo); |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 153 | // These intrinsics don't really modify the memory, but returning Mod |
| 154 | // will allow them to be handled conservatively. |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 155 | return MRI_Mod; |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 156 | case Intrinsic::invariant_end: |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 157 | II->getAAMetadata(AAInfo); |
Chandler Carruth | ac80dc7 | 2015-06-17 07:18:54 +0000 | [diff] [blame] | 158 | Loc = MemoryLocation( |
| 159 | II->getArgOperand(2), |
| 160 | cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(), AAInfo); |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 161 | // These intrinsics don't really modify the memory, but returning Mod |
| 162 | // will allow them to be handled conservatively. |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 163 | return MRI_Mod; |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 164 | default: |
| 165 | break; |
| 166 | } |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 167 | } |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 168 | |
| 169 | // Otherwise, just do the coarse-grained thing that always works. |
| 170 | if (Inst->mayWriteToMemory()) |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 171 | return MRI_ModRef; |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 172 | if (Inst->mayReadFromMemory()) |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 173 | return MRI_Ref; |
| 174 | return MRI_NoModRef; |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 175 | } |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 176 | |
Chandler Carruth | 40e21f2 | 2016-03-07 12:30:06 +0000 | [diff] [blame] | 177 | /// Private helper for finding the local dependencies of a call site. |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 178 | MemDepResult MemoryDependenceResults::getCallSiteDependencyFrom( |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 179 | CallSite CS, bool isReadOnlyCall, BasicBlock::iterator ScanIt, |
| 180 | BasicBlock *BB) { |
Eli Friedman | 8b098b0 | 2011-06-15 23:59:25 +0000 | [diff] [blame] | 181 | unsigned Limit = BlockScanLimit; |
| 182 | |
Owen Anderson | 2b21c3c | 2007-08-08 22:26:03 +0000 | [diff] [blame] | 183 | // Walk backwards through the block, looking for dependencies |
Chris Lattner | 51ba8d0 | 2008-11-29 03:47:00 +0000 | [diff] [blame] | 184 | while (ScanIt != BB->begin()) { |
Eli Friedman | 8b098b0 | 2011-06-15 23:59:25 +0000 | [diff] [blame] | 185 | // Limit the amount of scanning we do so we don't end up with quadratic |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 186 | // running time on extreme testcases. |
Eli Friedman | 8b098b0 | 2011-06-15 23:59:25 +0000 | [diff] [blame] | 187 | --Limit; |
| 188 | if (!Limit) |
| 189 | return MemDepResult::getUnknown(); |
| 190 | |
Duncan P. N. Exon Smith | 5a82c91 | 2015-10-10 00:53:03 +0000 | [diff] [blame] | 191 | Instruction *Inst = &*--ScanIt; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 192 | |
Owen Anderson | 9c88457 | 2007-07-10 17:59:22 +0000 | [diff] [blame] | 193 | // If this inst is a memory op, get the pointer it accessed |
Chandler Carruth | ac80dc7 | 2015-06-17 07:18:54 +0000 | [diff] [blame] | 194 | MemoryLocation Loc; |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 195 | ModRefInfo MR = GetLocation(Inst, Loc, TLI); |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 196 | if (Loc.Ptr) { |
| 197 | // A simple instruction. |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 198 | if (AA.getModRefInfo(CS, Loc) != MRI_NoModRef) |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 199 | return MemDepResult::getClobber(Inst); |
| 200 | continue; |
| 201 | } |
| 202 | |
Benjamin Kramer | 3a09ef6 | 2015-04-10 14:50:08 +0000 | [diff] [blame] | 203 | if (auto InstCS = CallSite(Inst)) { |
Owen Anderson | f9a9cf9 | 2009-03-09 05:12:38 +0000 | [diff] [blame] | 204 | // Debug intrinsics don't cause dependences. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 205 | if (isa<DbgInfoIntrinsic>(Inst)) |
| 206 | continue; |
Chris Lattner | 0e3d633 | 2008-12-05 21:04:20 +0000 | [diff] [blame] | 207 | // If these two calls do not interfere, look past it. |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 208 | switch (AA.getModRefInfo(CS, InstCS)) { |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 209 | case MRI_NoModRef: |
Dan Gohman | 26ef7c7 | 2010-08-05 22:09:15 +0000 | [diff] [blame] | 210 | // If the two calls are the same, return InstCS as a Def, so that |
| 211 | // CS can be found redundant and eliminated. |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 212 | if (isReadOnlyCall && !(MR & MRI_Mod) && |
Dan Gohman | 26ef7c7 | 2010-08-05 22:09:15 +0000 | [diff] [blame] | 213 | CS.getInstruction()->isIdenticalToWhenDefined(Inst)) |
| 214 | return MemDepResult::getDef(Inst); |
| 215 | |
| 216 | // Otherwise if the two calls don't interact (e.g. InstCS is readnone) |
| 217 | // keep scanning. |
Nadav Rotem | 5d4e205 | 2012-08-13 23:03:43 +0000 | [diff] [blame] | 218 | continue; |
Chris Lattner | 702e46e | 2008-12-09 21:19:42 +0000 | [diff] [blame] | 219 | default: |
Chris Lattner | 0e3d633 | 2008-12-05 21:04:20 +0000 | [diff] [blame] | 220 | return MemDepResult::getClobber(Inst); |
Chris Lattner | 702e46e | 2008-12-09 21:19:42 +0000 | [diff] [blame] | 221 | } |
Chris Lattner | ff862c4 | 2008-11-30 01:44:00 +0000 | [diff] [blame] | 222 | } |
Nadav Rotem | 5d4e205 | 2012-08-13 23:03:43 +0000 | [diff] [blame] | 223 | |
| 224 | // If we could not obtain a pointer for the instruction and the instruction |
| 225 | // touches memory then assume that this is a dependency. |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 226 | if (MR != MRI_NoModRef) |
Nadav Rotem | 5d4e205 | 2012-08-13 23:03:43 +0000 | [diff] [blame] | 227 | return MemDepResult::getClobber(Inst); |
Owen Anderson | 9c88457 | 2007-07-10 17:59:22 +0000 | [diff] [blame] | 228 | } |
Nadav Rotem | 5d4e205 | 2012-08-13 23:03:43 +0000 | [diff] [blame] | 229 | |
Eli Friedman | 7d58bc7 | 2011-06-15 00:47:34 +0000 | [diff] [blame] | 230 | // No dependence found. If this is the entry block of the function, it is |
| 231 | // unknown, otherwise it is non-local. |
Chris Lattner | 2faa2c7 | 2008-12-07 02:15:47 +0000 | [diff] [blame] | 232 | if (BB != &BB->getParent()->getEntryBlock()) |
| 233 | return MemDepResult::getNonLocal(); |
Eli Friedman | c1702c8 | 2011-10-13 22:14:57 +0000 | [diff] [blame] | 234 | return MemDepResult::getNonFuncLocal(); |
Owen Anderson | 9c88457 | 2007-07-10 17:59:22 +0000 | [diff] [blame] | 235 | } |
| 236 | |
Chandler Carruth | 40e21f2 | 2016-03-07 12:30:06 +0000 | [diff] [blame] | 237 | /// Return true if LI is a load that would fully overlap MemLoc if done as |
| 238 | /// a wider legal integer load. |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 239 | /// |
| 240 | /// MemLocBase, MemLocOffset are lazily computed here the first time the |
| 241 | /// base/offs of memloc is needed. |
Chandler Carruth | ac80dc7 | 2015-06-17 07:18:54 +0000 | [diff] [blame] | 242 | static bool isLoadLoadClobberIfExtendedToFullWidth(const MemoryLocation &MemLoc, |
| 243 | const Value *&MemLocBase, |
| 244 | int64_t &MemLocOffs, |
| 245 | const LoadInst *LI) { |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 246 | const DataLayout &DL = LI->getModule()->getDataLayout(); |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 247 | |
| 248 | // If we haven't already computed the base/offset of MemLoc, do so now. |
Craig Topper | 9f00886 | 2014-04-15 04:59:12 +0000 | [diff] [blame] | 249 | if (!MemLocBase) |
Rafael Espindola | 7c68beb | 2014-02-18 15:33:12 +0000 | [diff] [blame] | 250 | MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL); |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 251 | |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 252 | unsigned Size = MemoryDependenceResults::getLoadLoadClobberFullWidthSize( |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 253 | MemLocBase, MemLocOffs, MemLoc.Size, LI); |
Chris Lattner | 827a270 | 2011-04-28 07:29:08 +0000 | [diff] [blame] | 254 | return Size != 0; |
| 255 | } |
| 256 | |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 257 | unsigned MemoryDependenceResults::getLoadLoadClobberFullWidthSize( |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 258 | const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize, |
| 259 | const LoadInst *LI) { |
Eli Friedman | 5494ada | 2011-08-15 20:54:19 +0000 | [diff] [blame] | 260 | // We can only extend simple integer loads. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 261 | if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) |
| 262 | return 0; |
Kostya Serebryany | 3838f27 | 2013-02-13 05:59:45 +0000 | [diff] [blame] | 263 | |
| 264 | // Load widening is hostile to ThreadSanitizer: it may cause false positives |
| 265 | // or make the reports more cryptic (access sizes are wrong). |
Duncan P. N. Exon Smith | b3fc83c | 2015-02-14 00:12:15 +0000 | [diff] [blame] | 266 | if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread)) |
Kostya Serebryany | 3838f27 | 2013-02-13 05:59:45 +0000 | [diff] [blame] | 267 | return 0; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 268 | |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 269 | const DataLayout &DL = LI->getModule()->getDataLayout(); |
| 270 | |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 271 | // Get the base of this load. |
| 272 | int64_t LIOffs = 0; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 273 | const Value *LIBase = |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 274 | GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, DL); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 275 | |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 276 | // If the two pointers are not based on the same pointer, we can't tell that |
| 277 | // they are related. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 278 | if (LIBase != MemLocBase) |
| 279 | return 0; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 280 | |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 281 | // Okay, the two values are based on the same pointer, but returned as |
| 282 | // no-alias. This happens when we have things like two byte loads at "P+1" |
| 283 | // and "P+3". Check to see if increasing the size of the "LI" load up to its |
| 284 | // alignment (or the largest native integer type) will allow us to load all |
| 285 | // the bits required by MemLoc. |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 286 | |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 287 | // If MemLoc is before LI, then no widening of LI will help us out. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 288 | if (MemLocOffs < LIOffs) |
| 289 | return 0; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 290 | |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 291 | // Get the alignment of the load in bytes. We assume that it is safe to load |
| 292 | // any legal integer up to this size without a problem. For example, if we're |
| 293 | // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can |
| 294 | // widen it up to an i32 load. If it is known 2-byte aligned, we can widen it |
| 295 | // to i16. |
| 296 | unsigned LoadAlign = LI->getAlignment(); |
| 297 | |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 298 | int64_t MemLocEnd = MemLocOffs + MemLocSize; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 299 | |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 300 | // If no amount of rounding up will let MemLoc fit into LI, then bail out. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 301 | if (LIOffs + LoadAlign < MemLocEnd) |
| 302 | return 0; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 303 | |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 304 | // This is the size of the load to try. Start with the next larger power of |
| 305 | // two. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 306 | unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits() / 8U; |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 307 | NewLoadByteSize = NextPowerOf2(NewLoadByteSize); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 308 | |
Eugene Zelenko | 1804a77 | 2016-08-25 00:45:04 +0000 | [diff] [blame^] | 309 | while (true) { |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 310 | // If this load size is bigger than our known alignment or would not fit |
| 311 | // into a native integer register, then we fail. |
| 312 | if (NewLoadByteSize > LoadAlign || |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 313 | !DL.fitsInLegalInteger(NewLoadByteSize * 8)) |
Chris Lattner | 827a270 | 2011-04-28 07:29:08 +0000 | [diff] [blame] | 314 | return 0; |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 315 | |
Duncan P. N. Exon Smith | b3fc83c | 2015-02-14 00:12:15 +0000 | [diff] [blame] | 316 | if (LIOffs + NewLoadByteSize > MemLocEnd && |
| 317 | LI->getParent()->getParent()->hasFnAttribute( |
| 318 | Attribute::SanitizeAddress)) |
Kostya Serebryany | 9e0d377 | 2012-02-06 22:48:56 +0000 | [diff] [blame] | 319 | // We will be reading past the location accessed by the original program. |
| 320 | // While this is safe in a regular build, Address Safety analysis tools |
| 321 | // may start reporting false warnings. So, don't do widening. |
| 322 | return 0; |
Kostya Serebryany | 9e0d377 | 2012-02-06 22:48:56 +0000 | [diff] [blame] | 323 | |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 324 | // If a load of this width would include all of MemLoc, then we succeed. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 325 | if (LIOffs + NewLoadByteSize >= MemLocEnd) |
Chris Lattner | 827a270 | 2011-04-28 07:29:08 +0000 | [diff] [blame] | 326 | return NewLoadByteSize; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 327 | |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 328 | NewLoadByteSize <<= 1; |
| 329 | } |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 330 | } |
| 331 | |
Philip Reames | a7ad6a5 | 2015-01-26 18:54:27 +0000 | [diff] [blame] | 332 | static bool isVolatile(Instruction *Inst) { |
| 333 | if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) |
| 334 | return LI->isVolatile(); |
| 335 | else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) |
| 336 | return SI->isVolatile(); |
| 337 | else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst)) |
| 338 | return AI->isVolatile(); |
| 339 | return false; |
| 340 | } |
| 341 | |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 342 | MemDepResult MemoryDependenceResults::getPointerDependencyFrom( |
Chandler Carruth | ac80dc7 | 2015-06-17 07:18:54 +0000 | [diff] [blame] | 343 | const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, |
| 344 | BasicBlock *BB, Instruction *QueryInst) { |
Chris Lattner | 2faa2c7 | 2008-12-07 02:15:47 +0000 | [diff] [blame] | 345 | |
Piotr Padlewski | dc9b2cf | 2015-10-02 22:12:22 +0000 | [diff] [blame] | 346 | if (QueryInst != nullptr) { |
| 347 | if (auto *LI = dyn_cast<LoadInst>(QueryInst)) { |
| 348 | MemDepResult invariantGroupDependency = |
| 349 | getInvariantGroupPointerDependency(LI, BB); |
| 350 | |
| 351 | if (invariantGroupDependency.isDef()) |
| 352 | return invariantGroupDependency; |
| 353 | } |
| 354 | } |
| 355 | return getSimplePointerDependencyFrom(MemLoc, isLoad, ScanIt, BB, QueryInst); |
| 356 | } |
| 357 | |
| 358 | MemDepResult |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 359 | MemoryDependenceResults::getInvariantGroupPointerDependency(LoadInst *LI, |
Piotr Padlewski | dc9b2cf | 2015-10-02 22:12:22 +0000 | [diff] [blame] | 360 | BasicBlock *BB) { |
| 361 | Value *LoadOperand = LI->getPointerOperand(); |
| 362 | // It's is not safe to walk the use list of global value, because function |
| 363 | // passes aren't allowed to look outside their functions. |
| 364 | if (isa<GlobalValue>(LoadOperand)) |
| 365 | return MemDepResult::getUnknown(); |
| 366 | |
| 367 | auto *InvariantGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group); |
| 368 | if (!InvariantGroupMD) |
| 369 | return MemDepResult::getUnknown(); |
| 370 | |
| 371 | MemDepResult Result = MemDepResult::getUnknown(); |
Eugene Zelenko | 1804a77 | 2016-08-25 00:45:04 +0000 | [diff] [blame^] | 372 | SmallSet<Value *, 14> Seen; |
Piotr Padlewski | dc9b2cf | 2015-10-02 22:12:22 +0000 | [diff] [blame] | 373 | // Queue to process all pointers that are equivalent to load operand. |
Eugene Zelenko | 1804a77 | 2016-08-25 00:45:04 +0000 | [diff] [blame^] | 374 | SmallVector<Value *, 8> LoadOperandsQueue; |
Piotr Padlewski | dc9b2cf | 2015-10-02 22:12:22 +0000 | [diff] [blame] | 375 | LoadOperandsQueue.push_back(LoadOperand); |
| 376 | while (!LoadOperandsQueue.empty()) { |
| 377 | Value *Ptr = LoadOperandsQueue.pop_back_val(); |
| 378 | if (isa<GlobalValue>(Ptr)) |
| 379 | continue; |
| 380 | |
| 381 | if (auto *BCI = dyn_cast<BitCastInst>(Ptr)) { |
Benjamin Kramer | 4dea8f5 | 2016-06-17 18:59:41 +0000 | [diff] [blame] | 382 | if (Seen.insert(BCI->getOperand(0)).second) { |
Piotr Padlewski | dc9b2cf | 2015-10-02 22:12:22 +0000 | [diff] [blame] | 383 | LoadOperandsQueue.push_back(BCI->getOperand(0)); |
Piotr Padlewski | dc9b2cf | 2015-10-02 22:12:22 +0000 | [diff] [blame] | 384 | } |
| 385 | } |
| 386 | |
| 387 | for (Use &Us : Ptr->uses()) { |
| 388 | auto *U = dyn_cast<Instruction>(Us.getUser()); |
Chandler Carruth | aef32bd | 2016-03-11 13:46:00 +0000 | [diff] [blame] | 389 | if (!U || U == LI || !DT.dominates(U, LI)) |
Piotr Padlewski | dc9b2cf | 2015-10-02 22:12:22 +0000 | [diff] [blame] | 390 | continue; |
| 391 | |
| 392 | if (auto *BCI = dyn_cast<BitCastInst>(U)) { |
Benjamin Kramer | 4dea8f5 | 2016-06-17 18:59:41 +0000 | [diff] [blame] | 393 | if (Seen.insert(BCI).second) { |
Piotr Padlewski | dc9b2cf | 2015-10-02 22:12:22 +0000 | [diff] [blame] | 394 | LoadOperandsQueue.push_back(BCI); |
Piotr Padlewski | dc9b2cf | 2015-10-02 22:12:22 +0000 | [diff] [blame] | 395 | } |
| 396 | continue; |
| 397 | } |
| 398 | // If we hit load/store with the same invariant.group metadata (and the |
| 399 | // same pointer operand) we can assume that value pointed by pointer |
| 400 | // operand didn't change. |
| 401 | if ((isa<LoadInst>(U) || isa<StoreInst>(U)) && U->getParent() == BB && |
| 402 | U->getMetadata(LLVMContext::MD_invariant_group) == InvariantGroupMD) |
| 403 | return MemDepResult::getDef(U); |
| 404 | } |
| 405 | } |
| 406 | return Result; |
| 407 | } |
| 408 | |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 409 | MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom( |
Piotr Padlewski | dc9b2cf | 2015-10-02 22:12:22 +0000 | [diff] [blame] | 410 | const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, |
| 411 | BasicBlock *BB, Instruction *QueryInst) { |
Craig Topper | 9f00886 | 2014-04-15 04:59:12 +0000 | [diff] [blame] | 412 | const Value *MemLocBase = nullptr; |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 413 | int64_t MemLocOffset = 0; |
Eli Friedman | 8b098b0 | 2011-06-15 23:59:25 +0000 | [diff] [blame] | 414 | unsigned Limit = BlockScanLimit; |
Shuxin Yang | 408bdad | 2013-03-06 17:48:48 +0000 | [diff] [blame] | 415 | bool isInvariantLoad = false; |
Robin Morisset | 163ef04 | 2014-08-29 20:32:58 +0000 | [diff] [blame] | 416 | |
| 417 | // We must be careful with atomic accesses, as they may allow another thread |
Chad Rosier | 02e831c | 2016-06-28 17:19:10 +0000 | [diff] [blame] | 418 | // to touch this location, clobbering it. We are conservative: if the |
Robin Morisset | 163ef04 | 2014-08-29 20:32:58 +0000 | [diff] [blame] | 419 | // QueryInst is not a simple (non-atomic) memory access, we automatically |
| 420 | // return getClobber. |
| 421 | // If it is simple, we know based on the results of |
| 422 | // "Compiler testing via a theory of sound optimisations in the C11/C++11 |
| 423 | // memory model" in PLDI 2013, that a non-atomic location can only be |
| 424 | // clobbered between a pair of a release and an acquire action, with no |
| 425 | // access to the location in between. |
| 426 | // Here is an example for giving the general intuition behind this rule. |
| 427 | // In the following code: |
| 428 | // store x 0; |
| 429 | // release action; [1] |
| 430 | // acquire action; [4] |
| 431 | // %val = load x; |
| 432 | // It is unsafe to replace %val by 0 because another thread may be running: |
| 433 | // acquire action; [2] |
| 434 | // store x 42; |
| 435 | // release action; [3] |
| 436 | // with synchronization from 1 to 2 and from 3 to 4, resulting in %val |
| 437 | // being 42. A key property of this program however is that if either |
| 438 | // 1 or 4 were missing, there would be a race between the store of 42 |
Chad Rosier | 02e831c | 2016-06-28 17:19:10 +0000 | [diff] [blame] | 439 | // either the store of 0 or the load (making the whole program racy). |
Nick Lewycky | 947ca8a | 2016-01-04 16:44:44 +0000 | [diff] [blame] | 440 | // The paper mentioned above shows that the same property is respected |
Chad Rosier | 02e831c | 2016-06-28 17:19:10 +0000 | [diff] [blame] | 441 | // by every program that can detect any optimization of that kind: either |
Robin Morisset | 163ef04 | 2014-08-29 20:32:58 +0000 | [diff] [blame] | 442 | // it is racy (undefined) or there is a release followed by an acquire |
| 443 | // between the pair of accesses under consideration. |
Robin Morisset | 163ef04 | 2014-08-29 20:32:58 +0000 | [diff] [blame] | 444 | |
Philip Reames | 4dbd88f | 2015-03-24 23:54:54 +0000 | [diff] [blame] | 445 | // If the load is invariant, we "know" that it doesn't alias *any* write. We |
| 446 | // do want to respect mustalias results since defs are useful for value |
| 447 | // forwarding, but any mayalias write can be assumed to be noalias. |
| 448 | // Arguably, this logic should be pushed inside AliasAnalysis itself. |
Shuxin Yang | 408bdad | 2013-03-06 17:48:48 +0000 | [diff] [blame] | 449 | if (isLoad && QueryInst) { |
| 450 | LoadInst *LI = dyn_cast<LoadInst>(QueryInst); |
Craig Topper | 9f00886 | 2014-04-15 04:59:12 +0000 | [diff] [blame] | 451 | if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr) |
Shuxin Yang | 408bdad | 2013-03-06 17:48:48 +0000 | [diff] [blame] | 452 | isInvariantLoad = true; |
| 453 | } |
Eli Friedman | 8b098b0 | 2011-06-15 23:59:25 +0000 | [diff] [blame] | 454 | |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 455 | const DataLayout &DL = BB->getModule()->getDataLayout(); |
| 456 | |
Bruno Cardoso Lopes | dfc1d96 | 2015-07-31 14:31:35 +0000 | [diff] [blame] | 457 | // Create a numbered basic block to lazily compute and cache instruction |
| 458 | // positions inside a BB. This is used to provide fast queries for relative |
| 459 | // position between two instructions in a BB and can be used by |
| 460 | // AliasAnalysis::callCapturesBefore. |
| 461 | OrderedBasicBlock OBB(BB); |
| 462 | |
Krzysztof Parzyszek | e261e5a | 2016-02-22 23:07:43 +0000 | [diff] [blame] | 463 | // Return "true" if and only if the instruction I is either a non-simple |
| 464 | // load or a non-simple store. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 465 | auto isNonSimpleLoadOrStore = [](Instruction *I) -> bool { |
Krzysztof Parzyszek | e261e5a | 2016-02-22 23:07:43 +0000 | [diff] [blame] | 466 | if (auto *LI = dyn_cast<LoadInst>(I)) |
| 467 | return !LI->isSimple(); |
| 468 | if (auto *SI = dyn_cast<StoreInst>(I)) |
| 469 | return !SI->isSimple(); |
| 470 | return false; |
| 471 | }; |
| 472 | |
| 473 | // Return "true" if I is not a load and not a store, but it does access |
| 474 | // memory. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 475 | auto isOtherMemAccess = [](Instruction *I) -> bool { |
Krzysztof Parzyszek | e261e5a | 2016-02-22 23:07:43 +0000 | [diff] [blame] | 476 | return !isa<LoadInst>(I) && !isa<StoreInst>(I) && I->mayReadOrWriteMemory(); |
| 477 | }; |
| 478 | |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 479 | // Walk backwards through the basic block, looking for dependencies. |
Philip Reames | 090a824 | 2015-02-15 19:07:31 +0000 | [diff] [blame] | 480 | while (ScanIt != BB->begin()) { |
Duncan P. N. Exon Smith | 5a82c91 | 2015-10-10 00:53:03 +0000 | [diff] [blame] | 481 | Instruction *Inst = &*--ScanIt; |
Yunzhong Gao | 5cbcf56 | 2013-11-14 01:10:52 +0000 | [diff] [blame] | 482 | |
| 483 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) |
| 484 | // Debug intrinsics don't (and can't) cause dependencies. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 485 | if (isa<DbgInfoIntrinsic>(II)) |
| 486 | continue; |
Yunzhong Gao | 5cbcf56 | 2013-11-14 01:10:52 +0000 | [diff] [blame] | 487 | |
Eli Friedman | 8b098b0 | 2011-06-15 23:59:25 +0000 | [diff] [blame] | 488 | // Limit the amount of scanning we do so we don't end up with quadratic |
| 489 | // running time on extreme testcases. |
| 490 | --Limit; |
| 491 | if (!Limit) |
| 492 | return MemDepResult::getUnknown(); |
| 493 | |
Chris Lattner | 506b858 | 2009-12-01 21:15:15 +0000 | [diff] [blame] | 494 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { |
Owen Anderson | 2b2bd28 | 2009-10-28 07:05:35 +0000 | [diff] [blame] | 495 | // If we reach a lifetime begin or end marker, then the query ends here |
| 496 | // because the value is undefined. |
Chris Lattner | a58edd1 | 2010-09-06 03:58:04 +0000 | [diff] [blame] | 497 | if (II->getIntrinsicID() == Intrinsic::lifetime_start) { |
Owen Anderson | b9878ee | 2009-12-02 07:35:19 +0000 | [diff] [blame] | 498 | // FIXME: This only considers queries directly on the invariant-tagged |
| 499 | // pointer, not on query pointers that are indexed off of them. It'd |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 500 | // be nice to handle that at some point (the right approach is to use |
| 501 | // GetPointerBaseWithConstantOffset). |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 502 | if (AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), MemLoc)) |
Owen Anderson | 2b2bd28 | 2009-10-28 07:05:35 +0000 | [diff] [blame] | 503 | return MemDepResult::getDef(II); |
Chris Lattner | a58edd1 | 2010-09-06 03:58:04 +0000 | [diff] [blame] | 504 | continue; |
Owen Anderson | d0e86d5 | 2009-10-28 06:18:42 +0000 | [diff] [blame] | 505 | } |
| 506 | } |
| 507 | |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 508 | // Values depend on loads if the pointers are must aliased. This means |
| 509 | // that a load depends on another must aliased load from the same value. |
| 510 | // One exception is atomic loads: a value can depend on an atomic load that |
| 511 | // it does not alias with when this atomic load indicates that another |
| 512 | // thread may be accessing the location. |
Chris Lattner | 0e3d633 | 2008-12-05 21:04:20 +0000 | [diff] [blame] | 513 | if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { |
Philip Reames | a7ad6a5 | 2015-01-26 18:54:27 +0000 | [diff] [blame] | 514 | |
| 515 | // While volatile access cannot be eliminated, they do not have to clobber |
| 516 | // non-aliasing locations, as normal accesses, for example, can be safely |
| 517 | // reordered with volatile accesses. |
| 518 | if (LI->isVolatile()) { |
| 519 | if (!QueryInst) |
| 520 | // Original QueryInst *may* be volatile |
| 521 | return MemDepResult::getClobber(LI); |
| 522 | if (isVolatile(QueryInst)) |
| 523 | // Ordering required if QueryInst is itself volatile |
| 524 | return MemDepResult::getClobber(LI); |
| 525 | // Otherwise, volatile doesn't imply any special ordering |
| 526 | } |
Krzysztof Parzyszek | e261e5a | 2016-02-22 23:07:43 +0000 | [diff] [blame] | 527 | |
Eli Friedman | 5494ada | 2011-08-15 20:54:19 +0000 | [diff] [blame] | 528 | // Atomic loads have complications involved. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 529 | // A Monotonic (or higher) load is OK if the query inst is itself not |
| 530 | // atomic. |
Eli Friedman | 5494ada | 2011-08-15 20:54:19 +0000 | [diff] [blame] | 531 | // FIXME: This is overly conservative. |
JF Bastien | 800f87a | 2016-04-06 21:19:33 +0000 | [diff] [blame] | 532 | if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) { |
Krzysztof Parzyszek | e261e5a | 2016-02-22 23:07:43 +0000 | [diff] [blame] | 533 | if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || |
| 534 | isOtherMemAccess(QueryInst)) |
Robin Morisset | 9e98e7f | 2014-08-18 22:18:14 +0000 | [diff] [blame] | 535 | return MemDepResult::getClobber(LI); |
JF Bastien | 800f87a | 2016-04-06 21:19:33 +0000 | [diff] [blame] | 536 | if (LI->getOrdering() != AtomicOrdering::Monotonic) |
David Majnemer | e165502 | 2015-03-21 06:19:17 +0000 | [diff] [blame] | 537 | return MemDepResult::getClobber(LI); |
Robin Morisset | 4ffe8aa | 2014-08-18 22:18:11 +0000 | [diff] [blame] | 538 | } |
Eli Friedman | 5494ada | 2011-08-15 20:54:19 +0000 | [diff] [blame] | 539 | |
Chandler Carruth | ac80dc7 | 2015-06-17 07:18:54 +0000 | [diff] [blame] | 540 | MemoryLocation LoadLoc = MemoryLocation::get(LI); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 541 | |
Chris Lattner | 0e3d633 | 2008-12-05 21:04:20 +0000 | [diff] [blame] | 542 | // If we found a pointer, check if it could be the same as our pointer. |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 543 | AliasResult R = AA.alias(LoadLoc, MemLoc); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 544 | |
Chris Lattner | 6f83d06 | 2011-04-26 01:21:15 +0000 | [diff] [blame] | 545 | if (isLoad) { |
Chandler Carruth | c3f49eb | 2015-06-22 02:16:51 +0000 | [diff] [blame] | 546 | if (R == NoAlias) { |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 547 | // If this is an over-aligned integer load (for example, |
| 548 | // "load i8* %P, align 4") see if it would obviously overlap with the |
| 549 | // queried location if widened to a larger load (e.g. if the queried |
| 550 | // location is 1 byte at P+1). If so, return it as a load/load |
| 551 | // clobber result, allowing the client to decide to widen the load if |
| 552 | // it wants to. |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 553 | if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) { |
| 554 | if (LI->getAlignment() * 8 > ITy->getPrimitiveSizeInBits() && |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 555 | isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase, |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 556 | MemLocOffset, LI)) |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 557 | return MemDepResult::getClobber(Inst); |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 558 | } |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 559 | continue; |
| 560 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 561 | |
Chris Lattner | 6f83d06 | 2011-04-26 01:21:15 +0000 | [diff] [blame] | 562 | // Must aliased loads are defs of each other. |
Chandler Carruth | c3f49eb | 2015-06-22 02:16:51 +0000 | [diff] [blame] | 563 | if (R == MustAlias) |
Chris Lattner | 6f83d06 | 2011-04-26 01:21:15 +0000 | [diff] [blame] | 564 | return MemDepResult::getDef(Inst); |
| 565 | |
Dan Gohman | a471751 | 2011-06-04 06:48:50 +0000 | [diff] [blame] | 566 | #if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads |
| 567 | // in terms of clobbering loads, but since it does this by looking |
| 568 | // at the clobbering load directly, it doesn't know about any |
| 569 | // phi translation that may have happened along the way. |
| 570 | |
Chris Lattner | 6f83d06 | 2011-04-26 01:21:15 +0000 | [diff] [blame] | 571 | // If we have a partial alias, then return this as a clobber for the |
| 572 | // client to handle. |
Chandler Carruth | c3f49eb | 2015-06-22 02:16:51 +0000 | [diff] [blame] | 573 | if (R == PartialAlias) |
Chris Lattner | 6f83d06 | 2011-04-26 01:21:15 +0000 | [diff] [blame] | 574 | return MemDepResult::getClobber(Inst); |
Dan Gohman | a471751 | 2011-06-04 06:48:50 +0000 | [diff] [blame] | 575 | #endif |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 576 | |
Chris Lattner | 6f83d06 | 2011-04-26 01:21:15 +0000 | [diff] [blame] | 577 | // Random may-alias loads don't depend on each other without a |
| 578 | // dependence. |
Chris Lattner | 80c0818 | 2008-11-29 09:09:48 +0000 | [diff] [blame] | 579 | continue; |
Chris Lattner | 6f83d06 | 2011-04-26 01:21:15 +0000 | [diff] [blame] | 580 | } |
Dan Gohman | 15a4396 | 2010-10-29 01:14:04 +0000 | [diff] [blame] | 581 | |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 582 | // Stores don't depend on other no-aliased accesses. |
Chandler Carruth | c3f49eb | 2015-06-22 02:16:51 +0000 | [diff] [blame] | 583 | if (R == NoAlias) |
Chris Lattner | 7aab279 | 2011-04-26 22:42:01 +0000 | [diff] [blame] | 584 | continue; |
| 585 | |
Dan Gohman | 15a4396 | 2010-10-29 01:14:04 +0000 | [diff] [blame] | 586 | // Stores don't alias loads from read-only memory. |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 587 | if (AA.pointsToConstantMemory(LoadLoc)) |
Dan Gohman | 15a4396 | 2010-10-29 01:14:04 +0000 | [diff] [blame] | 588 | continue; |
| 589 | |
Chris Lattner | 6f83d06 | 2011-04-26 01:21:15 +0000 | [diff] [blame] | 590 | // Stores depend on may/must aliased loads. |
Chris Lattner | 0e3d633 | 2008-12-05 21:04:20 +0000 | [diff] [blame] | 591 | return MemDepResult::getDef(Inst); |
| 592 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 593 | |
Chris Lattner | 0e3d633 | 2008-12-05 21:04:20 +0000 | [diff] [blame] | 594 | if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { |
Eli Friedman | 5494ada | 2011-08-15 20:54:19 +0000 | [diff] [blame] | 595 | // Atomic stores have complications involved. |
Robin Morisset | 163ef04 | 2014-08-29 20:32:58 +0000 | [diff] [blame] | 596 | // A Monotonic store is OK if the query inst is itself not atomic. |
Eli Friedman | 5494ada | 2011-08-15 20:54:19 +0000 | [diff] [blame] | 597 | // FIXME: This is overly conservative. |
Krzysztof Parzyszek | e261e5a | 2016-02-22 23:07:43 +0000 | [diff] [blame] | 598 | if (!SI->isUnordered() && SI->isAtomic()) { |
| 599 | if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || |
| 600 | isOtherMemAccess(QueryInst)) |
Robin Morisset | 9e98e7f | 2014-08-18 22:18:14 +0000 | [diff] [blame] | 601 | return MemDepResult::getClobber(SI); |
JF Bastien | 800f87a | 2016-04-06 21:19:33 +0000 | [diff] [blame] | 602 | if (SI->getOrdering() != AtomicOrdering::Monotonic) |
David Majnemer | e165502 | 2015-03-21 06:19:17 +0000 | [diff] [blame] | 603 | return MemDepResult::getClobber(SI); |
Robin Morisset | 4ffe8aa | 2014-08-18 22:18:11 +0000 | [diff] [blame] | 604 | } |
Eli Friedman | 5494ada | 2011-08-15 20:54:19 +0000 | [diff] [blame] | 605 | |
Robin Morisset | 9e98e7f | 2014-08-18 22:18:14 +0000 | [diff] [blame] | 606 | // FIXME: this is overly conservative. |
| 607 | // While volatile access cannot be eliminated, they do not have to clobber |
| 608 | // non-aliasing locations, as normal accesses can for example be reordered |
| 609 | // with volatile accesses. |
| 610 | if (SI->isVolatile()) |
Krzysztof Parzyszek | e261e5a | 2016-02-22 23:07:43 +0000 | [diff] [blame] | 611 | if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || |
| 612 | isOtherMemAccess(QueryInst)) |
| 613 | return MemDepResult::getClobber(SI); |
Robin Morisset | 9e98e7f | 2014-08-18 22:18:14 +0000 | [diff] [blame] | 614 | |
Chris Lattner | 02274a7 | 2009-05-25 21:28:56 +0000 | [diff] [blame] | 615 | // If alias analysis can tell that this store is guaranteed to not modify |
| 616 | // the query pointer, ignore it. Use getModRefInfo to handle cases where |
| 617 | // the query pointer points to constant memory etc. |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 618 | if (AA.getModRefInfo(SI, MemLoc) == MRI_NoModRef) |
Chris Lattner | 02274a7 | 2009-05-25 21:28:56 +0000 | [diff] [blame] | 619 | continue; |
| 620 | |
| 621 | // Ok, this store might clobber the query pointer. Check to see if it is |
| 622 | // a must alias: in this case, we want to return this as a def. |
Chandler Carruth | ac80dc7 | 2015-06-17 07:18:54 +0000 | [diff] [blame] | 623 | MemoryLocation StoreLoc = MemoryLocation::get(SI); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 624 | |
Chris Lattner | 0e3d633 | 2008-12-05 21:04:20 +0000 | [diff] [blame] | 625 | // If we found a pointer, check if it could be the same as our pointer. |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 626 | AliasResult R = AA.alias(StoreLoc, MemLoc); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 627 | |
Chandler Carruth | c3f49eb | 2015-06-22 02:16:51 +0000 | [diff] [blame] | 628 | if (R == NoAlias) |
Chris Lattner | 0e3d633 | 2008-12-05 21:04:20 +0000 | [diff] [blame] | 629 | continue; |
Chandler Carruth | c3f49eb | 2015-06-22 02:16:51 +0000 | [diff] [blame] | 630 | if (R == MustAlias) |
Dan Gohman | ba5d0ab | 2010-12-13 22:47:57 +0000 | [diff] [blame] | 631 | return MemDepResult::getDef(Inst); |
Shuxin Yang | 408bdad | 2013-03-06 17:48:48 +0000 | [diff] [blame] | 632 | if (isInvariantLoad) |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 633 | continue; |
Dan Gohman | ba5d0ab | 2010-12-13 22:47:57 +0000 | [diff] [blame] | 634 | return MemDepResult::getClobber(Inst); |
Owen Anderson | c0daf5f | 2007-07-06 23:14:35 +0000 | [diff] [blame] | 635 | } |
Chris Lattner | 3ff6d01 | 2008-11-30 01:39:32 +0000 | [diff] [blame] | 636 | |
| 637 | // If this is an allocation, and if we know that the accessed pointer is to |
Chris Lattner | 0e3d633 | 2008-12-05 21:04:20 +0000 | [diff] [blame] | 638 | // the allocation, return Def. This means that there is no dependence and |
Chris Lattner | 3ff6d01 | 2008-11-30 01:39:32 +0000 | [diff] [blame] | 639 | // the access can be optimized based on that. For example, a load could |
Philip Reames | d9f4a3d | 2016-03-09 23:19:56 +0000 | [diff] [blame] | 640 | // turn into undef. Note that we can bypass the allocation itself when |
| 641 | // looking for a clobber in many cases; that's an alias property and is |
| 642 | // handled by BasicAA. |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 643 | if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, &TLI)) { |
Rafael Espindola | 7c68beb | 2014-02-18 15:33:12 +0000 | [diff] [blame] | 644 | const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL); |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 645 | if (AccessPtr == Inst || AA.isMustAlias(Inst, AccessPtr)) |
Victor Hernandez | 537d8d9 | 2009-09-18 21:34:51 +0000 | [diff] [blame] | 646 | return MemDepResult::getDef(Inst); |
Victor Hernandez | 537d8d9 | 2009-09-18 21:34:51 +0000 | [diff] [blame] | 647 | } |
| 648 | |
Philip Reames | 4dbd88f | 2015-03-24 23:54:54 +0000 | [diff] [blame] | 649 | if (isInvariantLoad) |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 650 | continue; |
Philip Reames | 4dbd88f | 2015-03-24 23:54:54 +0000 | [diff] [blame] | 651 | |
Philip Reames | b568113 | 2016-03-25 22:40:35 +0000 | [diff] [blame] | 652 | // A release fence requires that all stores complete before it, but does |
| 653 | // not prevent the reordering of following loads or stores 'before' the |
| 654 | // fence. As a result, we look past it when finding a dependency for |
| 655 | // loads. DSE uses this to find preceeding stores to delete and thus we |
| 656 | // can't bypass the fence if the query instruction is a store. |
| 657 | if (FenceInst *FI = dyn_cast<FenceInst>(Inst)) |
JF Bastien | 800f87a | 2016-04-06 21:19:33 +0000 | [diff] [blame] | 658 | if (isLoad && FI->getOrdering() == AtomicOrdering::Release) |
Philip Reames | b568113 | 2016-03-25 22:40:35 +0000 | [diff] [blame] | 659 | continue; |
JF Bastien | 800f87a | 2016-04-06 21:19:33 +0000 | [diff] [blame] | 660 | |
Chris Lattner | 0e3d633 | 2008-12-05 21:04:20 +0000 | [diff] [blame] | 661 | // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer. |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 662 | ModRefInfo MR = AA.getModRefInfo(Inst, MemLoc); |
Chad Rosier | a968caf | 2012-05-14 20:35:04 +0000 | [diff] [blame] | 663 | // If necessary, perform additional analysis. |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 664 | if (MR == MRI_ModRef) |
Chandler Carruth | aef32bd | 2016-03-11 13:46:00 +0000 | [diff] [blame] | 665 | MR = AA.callCapturesBefore(Inst, MemLoc, &DT, &OBB); |
Chad Rosier | a968caf | 2012-05-14 20:35:04 +0000 | [diff] [blame] | 666 | switch (MR) { |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 667 | case MRI_NoModRef: |
Chris Lattner | 41efb68 | 2008-12-09 19:47:40 +0000 | [diff] [blame] | 668 | // If the call has no effect on the queried pointer, just ignore it. |
Chris Lattner | 81f19e9 | 2008-11-29 08:51:16 +0000 | [diff] [blame] | 669 | continue; |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 670 | case MRI_Mod: |
Owen Anderson | fc16e5a | 2009-10-28 06:30:52 +0000 | [diff] [blame] | 671 | return MemDepResult::getClobber(Inst); |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 672 | case MRI_Ref: |
Chris Lattner | 41efb68 | 2008-12-09 19:47:40 +0000 | [diff] [blame] | 673 | // If the call is known to never store to the pointer, and if this is a |
| 674 | // load query, we can safely ignore it (scan past it). |
| 675 | if (isLoad) |
| 676 | continue; |
Chris Lattner | 41efb68 | 2008-12-09 19:47:40 +0000 | [diff] [blame] | 677 | default: |
| 678 | // Otherwise, there is a potential dependence. Return a clobber. |
| 679 | return MemDepResult::getClobber(Inst); |
| 680 | } |
Owen Anderson | c0daf5f | 2007-07-06 23:14:35 +0000 | [diff] [blame] | 681 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 682 | |
Eli Friedman | 7d58bc7 | 2011-06-15 00:47:34 +0000 | [diff] [blame] | 683 | // No dependence found. If this is the entry block of the function, it is |
| 684 | // unknown, otherwise it is non-local. |
Chris Lattner | 2faa2c7 | 2008-12-07 02:15:47 +0000 | [diff] [blame] | 685 | if (BB != &BB->getParent()->getEntryBlock()) |
| 686 | return MemDepResult::getNonLocal(); |
Eli Friedman | c1702c8 | 2011-10-13 22:14:57 +0000 | [diff] [blame] | 687 | return MemDepResult::getNonFuncLocal(); |
Owen Anderson | c0daf5f | 2007-07-06 23:14:35 +0000 | [diff] [blame] | 688 | } |
| 689 | |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 690 | MemDepResult MemoryDependenceResults::getDependency(Instruction *QueryInst) { |
Chris Lattner | 51ba8d0 | 2008-11-29 03:47:00 +0000 | [diff] [blame] | 691 | Instruction *ScanPos = QueryInst; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 692 | |
Chris Lattner | 51ba8d0 | 2008-11-29 03:47:00 +0000 | [diff] [blame] | 693 | // Check for a cached result |
Chris Lattner | 47e81d0 | 2008-11-30 23:17:19 +0000 | [diff] [blame] | 694 | MemDepResult &LocalCache = LocalDeps[QueryInst]; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 695 | |
Chris Lattner | e7d7e13 | 2008-11-29 22:02:15 +0000 | [diff] [blame] | 696 | // If the cached entry is non-dirty, just return it. Note that this depends |
Chris Lattner | 47e81d0 | 2008-11-30 23:17:19 +0000 | [diff] [blame] | 697 | // on MemDepResult's default constructing to 'dirty'. |
| 698 | if (!LocalCache.isDirty()) |
| 699 | return LocalCache; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 700 | |
Chris Lattner | 51ba8d0 | 2008-11-29 03:47:00 +0000 | [diff] [blame] | 701 | // Otherwise, if we have a dirty entry, we know we can start the scan at that |
| 702 | // instruction, which may save us some work. |
Chris Lattner | 47e81d0 | 2008-11-30 23:17:19 +0000 | [diff] [blame] | 703 | if (Instruction *Inst = LocalCache.getInst()) { |
Chris Lattner | 51ba8d0 | 2008-11-29 03:47:00 +0000 | [diff] [blame] | 704 | ScanPos = Inst; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 705 | |
Chris Lattner | de4440c | 2008-12-07 18:39:13 +0000 | [diff] [blame] | 706 | RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst); |
Chris Lattner | 4410427 | 2008-11-30 02:52:26 +0000 | [diff] [blame] | 707 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 708 | |
Chris Lattner | 5a78604 | 2008-12-07 01:50:16 +0000 | [diff] [blame] | 709 | BasicBlock *QueryParent = QueryInst->getParent(); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 710 | |
Chris Lattner | 51ba8d0 | 2008-11-29 03:47:00 +0000 | [diff] [blame] | 711 | // Do the scan. |
Chris Lattner | 5a78604 | 2008-12-07 01:50:16 +0000 | [diff] [blame] | 712 | if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) { |
Eli Friedman | 7d58bc7 | 2011-06-15 00:47:34 +0000 | [diff] [blame] | 713 | // No dependence found. If this is the entry block of the function, it is |
| 714 | // unknown, otherwise it is non-local. |
Chris Lattner | 2faa2c7 | 2008-12-07 02:15:47 +0000 | [diff] [blame] | 715 | if (QueryParent != &QueryParent->getParent()->getEntryBlock()) |
| 716 | LocalCache = MemDepResult::getNonLocal(); |
| 717 | else |
Eli Friedman | c1702c8 | 2011-10-13 22:14:57 +0000 | [diff] [blame] | 718 | LocalCache = MemDepResult::getNonFuncLocal(); |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 719 | } else { |
Chandler Carruth | ac80dc7 | 2015-06-17 07:18:54 +0000 | [diff] [blame] | 720 | MemoryLocation MemLoc; |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 721 | ModRefInfo MR = GetLocation(QueryInst, MemLoc, TLI); |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 722 | if (MemLoc.Ptr) { |
| 723 | // If we can do a pointer scan, make it happen. |
Chandler Carruth | 194f59c | 2015-07-22 23:15:57 +0000 | [diff] [blame] | 724 | bool isLoad = !(MR & MRI_Mod); |
Chris Lattner | d540a5d | 2010-11-30 01:56:13 +0000 | [diff] [blame] | 725 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst)) |
Owen Anderson | 97f0cf3 | 2011-05-17 00:05:49 +0000 | [diff] [blame] | 726 | isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start; |
Chris Lattner | e48c31c | 2010-11-21 07:34:32 +0000 | [diff] [blame] | 727 | |
Duncan P. N. Exon Smith | 5a82c91 | 2015-10-10 00:53:03 +0000 | [diff] [blame] | 728 | LocalCache = getPointerDependencyFrom( |
| 729 | MemLoc, isLoad, ScanPos->getIterator(), QueryParent, QueryInst); |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 730 | } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) { |
Gabor Greif | ef1ca24 | 2010-07-27 22:02:00 +0000 | [diff] [blame] | 731 | CallSite QueryCS(QueryInst); |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 732 | bool isReadOnly = AA.onlyReadsMemory(QueryCS); |
Duncan P. N. Exon Smith | 5a82c91 | 2015-10-10 00:53:03 +0000 | [diff] [blame] | 733 | LocalCache = getCallSiteDependencyFrom( |
| 734 | QueryCS, isReadOnly, ScanPos->getIterator(), QueryParent); |
Dan Gohman | 1d760ce | 2010-11-10 21:51:35 +0000 | [diff] [blame] | 735 | } else |
| 736 | // Non-memory instruction. |
Eli Friedman | 7d58bc7 | 2011-06-15 00:47:34 +0000 | [diff] [blame] | 737 | LocalCache = MemDepResult::getUnknown(); |
Nick Lewycky | 218a339 | 2009-11-28 21:27:49 +0000 | [diff] [blame] | 738 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 739 | |
Chris Lattner | 51ba8d0 | 2008-11-29 03:47:00 +0000 | [diff] [blame] | 740 | // Remember the result! |
Chris Lattner | 47e81d0 | 2008-11-30 23:17:19 +0000 | [diff] [blame] | 741 | if (Instruction *I = LocalCache.getInst()) |
Chris Lattner | 9f1988ab | 2008-11-29 09:20:15 +0000 | [diff] [blame] | 742 | ReverseLocalDeps[I].insert(QueryInst); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 743 | |
Chris Lattner | 47e81d0 | 2008-11-30 23:17:19 +0000 | [diff] [blame] | 744 | return LocalCache; |
Chris Lattner | 51ba8d0 | 2008-11-29 03:47:00 +0000 | [diff] [blame] | 745 | } |
| 746 | |
Chris Lattner | f09619d | 2009-01-22 07:04:01 +0000 | [diff] [blame] | 747 | #ifndef NDEBUG |
Chandler Carruth | 40e21f2 | 2016-03-07 12:30:06 +0000 | [diff] [blame] | 748 | /// This method is used when -debug is specified to verify that cache arrays |
| 749 | /// are properly kept sorted. |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 750 | static void AssertSorted(MemoryDependenceResults::NonLocalDepInfo &Cache, |
Chris Lattner | f09619d | 2009-01-22 07:04:01 +0000 | [diff] [blame] | 751 | int Count = -1) { |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 752 | if (Count == -1) |
| 753 | Count = Cache.size(); |
Craig Topper | e30b8ca | 2016-01-03 19:43:40 +0000 | [diff] [blame] | 754 | assert(std::is_sorted(Cache.begin(), Cache.begin() + Count) && |
| 755 | "Cache isn't sorted!"); |
Chris Lattner | f09619d | 2009-01-22 07:04:01 +0000 | [diff] [blame] | 756 | } |
| 757 | #endif |
| 758 | |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 759 | const MemoryDependenceResults::NonLocalDepInfo & |
| 760 | MemoryDependenceResults::getNonLocalCallDependency(CallSite QueryCS) { |
Chris Lattner | 254314e | 2008-12-09 19:38:05 +0000 | [diff] [blame] | 761 | assert(getDependency(QueryCS.getInstruction()).isNonLocal() && |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 762 | "getNonLocalCallDependency should only be used on calls with " |
| 763 | "non-local deps!"); |
Chris Lattner | 254314e | 2008-12-09 19:38:05 +0000 | [diff] [blame] | 764 | PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()]; |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 765 | NonLocalDepInfo &Cache = CacheP.first; |
Chris Lattner | 2059753 | 2008-11-30 01:18:27 +0000 | [diff] [blame] | 766 | |
Chandler Carruth | 40e21f2 | 2016-03-07 12:30:06 +0000 | [diff] [blame] | 767 | // This is the set of blocks that need to be recomputed. In the cached case, |
| 768 | // this can happen due to instructions being deleted etc. In the uncached |
| 769 | // case, this starts out as the set of predecessors we care about. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 770 | SmallVector<BasicBlock *, 32> DirtyBlocks; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 771 | |
Chris Lattner | 2059753 | 2008-11-30 01:18:27 +0000 | [diff] [blame] | 772 | if (!Cache.empty()) { |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 773 | // Okay, we have a cache entry. If we know it is not dirty, just return it |
| 774 | // with no computation. |
| 775 | if (!CacheP.second) { |
Dan Gohman | d2d1ae1 | 2010-06-22 15:08:57 +0000 | [diff] [blame] | 776 | ++NumCacheNonLocal; |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 777 | return Cache; |
| 778 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 779 | |
Chris Lattner | 2059753 | 2008-11-30 01:18:27 +0000 | [diff] [blame] | 780 | // If we already have a partially computed set of results, scan them to |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 781 | // determine what is dirty, seeding our initial DirtyBlocks worklist. |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 782 | for (auto &Entry : Cache) |
| 783 | if (Entry.getResult().isDirty()) |
| 784 | DirtyBlocks.push_back(Entry.getBB()); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 785 | |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 786 | // Sort the cache so that we can do fast binary search lookups below. |
| 787 | std::sort(Cache.begin(), Cache.end()); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 788 | |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 789 | ++NumCacheDirtyNonLocal; |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 790 | // cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: " |
Chris Lattner | 2059753 | 2008-11-30 01:18:27 +0000 | [diff] [blame] | 791 | // << Cache.size() << " cached: " << *QueryInst; |
| 792 | } else { |
| 793 | // Seed DirtyBlocks with each of the preds of QueryInst's block. |
Chris Lattner | 254314e | 2008-12-09 19:38:05 +0000 | [diff] [blame] | 794 | BasicBlock *QueryBB = QueryCS.getInstruction()->getParent(); |
Daniel Berlin | b4e7a4a | 2015-04-21 21:11:50 +0000 | [diff] [blame] | 795 | for (BasicBlock *Pred : PredCache.get(QueryBB)) |
| 796 | DirtyBlocks.push_back(Pred); |
Dan Gohman | d2d1ae1 | 2010-06-22 15:08:57 +0000 | [diff] [blame] | 797 | ++NumUncacheNonLocal; |
Chris Lattner | 2059753 | 2008-11-30 01:18:27 +0000 | [diff] [blame] | 798 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 799 | |
Chris Lattner | 702e46e | 2008-12-09 21:19:42 +0000 | [diff] [blame] | 800 | // isReadonlyCall - If this is a read-only call, we can be more aggressive. |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 801 | bool isReadonlyCall = AA.onlyReadsMemory(QueryCS); |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 802 | |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 803 | SmallPtrSet<BasicBlock *, 32> Visited; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 804 | |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 805 | unsigned NumSortedEntries = Cache.size(); |
Chris Lattner | f09619d | 2009-01-22 07:04:01 +0000 | [diff] [blame] | 806 | DEBUG(AssertSorted(Cache)); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 807 | |
Chris Lattner | 2059753 | 2008-11-30 01:18:27 +0000 | [diff] [blame] | 808 | // Iterate while we still have blocks to update. |
| 809 | while (!DirtyBlocks.empty()) { |
| 810 | BasicBlock *DirtyBB = DirtyBlocks.back(); |
| 811 | DirtyBlocks.pop_back(); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 812 | |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 813 | // Already processed this block? |
David Blaikie | 70573dc | 2014-11-19 07:49:26 +0000 | [diff] [blame] | 814 | if (!Visited.insert(DirtyBB).second) |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 815 | continue; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 816 | |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 817 | // Do a binary search to see if we already have an entry for this block in |
| 818 | // the cache set. If so, find it. |
Chris Lattner | f09619d | 2009-01-22 07:04:01 +0000 | [diff] [blame] | 819 | DEBUG(AssertSorted(Cache, NumSortedEntries)); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 820 | NonLocalDepInfo::iterator Entry = |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 821 | std::upper_bound(Cache.begin(), Cache.begin() + NumSortedEntries, |
| 822 | NonLocalDepEntry(DirtyBB)); |
Benjamin Kramer | b6d0bd4 | 2014-03-02 12:27:27 +0000 | [diff] [blame] | 823 | if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB) |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 824 | --Entry; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 825 | |
Craig Topper | 9f00886 | 2014-04-15 04:59:12 +0000 | [diff] [blame] | 826 | NonLocalDepEntry *ExistingResult = nullptr; |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 827 | if (Entry != Cache.begin() + NumSortedEntries && |
Chris Lattner | 0c31547 | 2009-12-09 07:08:01 +0000 | [diff] [blame] | 828 | Entry->getBB() == DirtyBB) { |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 829 | // If we already have an entry, and if it isn't already dirty, the block |
| 830 | // is done. |
Chris Lattner | 0c31547 | 2009-12-09 07:08:01 +0000 | [diff] [blame] | 831 | if (!Entry->getResult().isDirty()) |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 832 | continue; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 833 | |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 834 | // Otherwise, remember this slot so we can update the value. |
Chris Lattner | 0c31547 | 2009-12-09 07:08:01 +0000 | [diff] [blame] | 835 | ExistingResult = &*Entry; |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 836 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 837 | |
Chris Lattner | 2059753 | 2008-11-30 01:18:27 +0000 | [diff] [blame] | 838 | // If the dirty entry has a pointer, start scanning from it so we don't have |
| 839 | // to rescan the entire block. |
| 840 | BasicBlock::iterator ScanPos = DirtyBB->end(); |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 841 | if (ExistingResult) { |
Chris Lattner | 0c31547 | 2009-12-09 07:08:01 +0000 | [diff] [blame] | 842 | if (Instruction *Inst = ExistingResult->getResult().getInst()) { |
Duncan P. N. Exon Smith | 5a82c91 | 2015-10-10 00:53:03 +0000 | [diff] [blame] | 843 | ScanPos = Inst->getIterator(); |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 844 | // We're removing QueryInst's use of Inst. |
Chris Lattner | 254314e | 2008-12-09 19:38:05 +0000 | [diff] [blame] | 845 | RemoveFromReverseMap(ReverseNonLocalDeps, Inst, |
| 846 | QueryCS.getInstruction()); |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 847 | } |
Chris Lattner | 1b810bd | 2008-11-30 02:28:25 +0000 | [diff] [blame] | 848 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 849 | |
Chris Lattner | 60444f8 | 2008-11-30 01:26:32 +0000 | [diff] [blame] | 850 | // Find out if this block has a local dependency for QueryInst. |
Chris Lattner | ed494f7 | 2008-12-07 01:21:14 +0000 | [diff] [blame] | 851 | MemDepResult Dep; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 852 | |
Chris Lattner | 254314e | 2008-12-09 19:38:05 +0000 | [diff] [blame] | 853 | if (ScanPos != DirtyBB->begin()) { |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 854 | Dep = |
| 855 | getCallSiteDependencyFrom(QueryCS, isReadonlyCall, ScanPos, DirtyBB); |
Chris Lattner | 254314e | 2008-12-09 19:38:05 +0000 | [diff] [blame] | 856 | } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) { |
| 857 | // No dependence found. If this is the entry block of the function, it is |
Eli Friedman | 7d58bc7 | 2011-06-15 00:47:34 +0000 | [diff] [blame] | 858 | // a clobber, otherwise it is unknown. |
Chris Lattner | 254314e | 2008-12-09 19:38:05 +0000 | [diff] [blame] | 859 | Dep = MemDepResult::getNonLocal(); |
Chris Lattner | 5a78604 | 2008-12-07 01:50:16 +0000 | [diff] [blame] | 860 | } else { |
Eli Friedman | c1702c8 | 2011-10-13 22:14:57 +0000 | [diff] [blame] | 861 | Dep = MemDepResult::getNonFuncLocal(); |
Chris Lattner | 5a78604 | 2008-12-07 01:50:16 +0000 | [diff] [blame] | 862 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 863 | |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 864 | // If we had a dirty entry for the block, update it. Otherwise, just add |
| 865 | // a new entry. |
| 866 | if (ExistingResult) |
Chris Lattner | 9b7d99e | 2009-12-22 04:25:02 +0000 | [diff] [blame] | 867 | ExistingResult->setResult(Dep); |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 868 | else |
Chris Lattner | 9b7d99e | 2009-12-22 04:25:02 +0000 | [diff] [blame] | 869 | Cache.push_back(NonLocalDepEntry(DirtyBB, Dep)); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 870 | |
Chris Lattner | 2059753 | 2008-11-30 01:18:27 +0000 | [diff] [blame] | 871 | // If the block has a dependency (i.e. it isn't completely transparent to |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 872 | // the value), remember the association! |
| 873 | if (!Dep.isNonLocal()) { |
Chris Lattner | 2059753 | 2008-11-30 01:18:27 +0000 | [diff] [blame] | 874 | // Keep the ReverseNonLocalDeps map up to date so we can efficiently |
| 875 | // update this when we remove instructions. |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 876 | if (Instruction *Inst = Dep.getInst()) |
Chris Lattner | 254314e | 2008-12-09 19:38:05 +0000 | [diff] [blame] | 877 | ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction()); |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 878 | } else { |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 879 | |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 880 | // If the block *is* completely transparent to the load, we need to check |
| 881 | // the predecessors of this block. Add them to our worklist. |
Daniel Berlin | b4e7a4a | 2015-04-21 21:11:50 +0000 | [diff] [blame] | 882 | for (BasicBlock *Pred : PredCache.get(DirtyBB)) |
| 883 | DirtyBlocks.push_back(Pred); |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 884 | } |
Chris Lattner | 2059753 | 2008-11-30 01:18:27 +0000 | [diff] [blame] | 885 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 886 | |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 887 | return Cache; |
Chris Lattner | 2059753 | 2008-11-30 01:18:27 +0000 | [diff] [blame] | 888 | } |
| 889 | |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 890 | void MemoryDependenceResults::getNonLocalPointerDependency( |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 891 | Instruction *QueryInst, SmallVectorImpl<NonLocalDepResult> &Result) { |
Chandler Carruth | ac80dc7 | 2015-06-17 07:18:54 +0000 | [diff] [blame] | 892 | const MemoryLocation Loc = MemoryLocation::get(QueryInst); |
Philip Reames | 567feb9 | 2015-01-09 00:04:22 +0000 | [diff] [blame] | 893 | bool isLoad = isa<LoadInst>(QueryInst); |
| 894 | BasicBlock *FromBB = QueryInst->getParent(); |
| 895 | assert(FromBB); |
Philip Reames | 33d7f9d | 2015-01-09 00:26:45 +0000 | [diff] [blame] | 896 | |
| 897 | assert(Loc.Ptr->getType()->isPointerTy() && |
| 898 | "Can't get pointer deps of a non-pointer!"); |
| 899 | Result.clear(); |
Krzysztof Parzyszek | e261e5a | 2016-02-22 23:07:43 +0000 | [diff] [blame] | 900 | |
Philip Reames | 33d7f9d | 2015-01-09 00:26:45 +0000 | [diff] [blame] | 901 | // This routine does not expect to deal with volatile instructions. |
| 902 | // Doing so would require piping through the QueryInst all the way through. |
Philip Reames | 567feb9 | 2015-01-09 00:04:22 +0000 | [diff] [blame] | 903 | // TODO: volatiles can't be elided, but they can be reordered with other |
Philip Reames | 33d7f9d | 2015-01-09 00:26:45 +0000 | [diff] [blame] | 904 | // non-volatile accesses. |
Philip Reames | a7ad6a5 | 2015-01-26 18:54:27 +0000 | [diff] [blame] | 905 | |
Philip Reames | 567feb9 | 2015-01-09 00:04:22 +0000 | [diff] [blame] | 906 | // We currently give up on any instruction which is ordered, but we do handle |
| 907 | // atomic instructions which are unordered. |
| 908 | // TODO: Handle ordered instructions |
| 909 | auto isOrdered = [](Instruction *Inst) { |
| 910 | if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { |
| 911 | return !LI->isUnordered(); |
| 912 | } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { |
| 913 | return !SI->isUnordered(); |
| 914 | } |
| 915 | return false; |
| 916 | }; |
Philip Reames | 33d7f9d | 2015-01-09 00:26:45 +0000 | [diff] [blame] | 917 | if (isVolatile(QueryInst) || isOrdered(QueryInst)) { |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 918 | Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(), |
Philip Reames | 33d7f9d | 2015-01-09 00:26:45 +0000 | [diff] [blame] | 919 | const_cast<Value *>(Loc.Ptr))); |
| 920 | return; |
| 921 | } |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 922 | const DataLayout &DL = FromBB->getModule()->getDataLayout(); |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 923 | PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, &AC); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 924 | |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 925 | // This is the set of blocks we've inspected, and the pointer we consider in |
| 926 | // each block. Because of critical edges, we currently bail out if querying |
| 927 | // a block with multiple different pointers. This can happen during PHI |
| 928 | // translation. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 929 | DenseMap<BasicBlock *, Value *> Visited; |
Chandler Carruth | b32febe | 2016-03-07 12:45:07 +0000 | [diff] [blame] | 930 | if (getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB, |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 931 | Result, Visited, true)) |
| 932 | return; |
Chris Lattner | 7ed5ccc | 2008-12-15 04:58:29 +0000 | [diff] [blame] | 933 | Result.clear(); |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 934 | Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(), |
Dan Gohman | 2348393 | 2010-09-22 21:41:02 +0000 | [diff] [blame] | 935 | const_cast<Value *>(Loc.Ptr))); |
Chris Lattner | 7564a3b | 2008-12-07 02:56:57 +0000 | [diff] [blame] | 936 | } |
| 937 | |
Chandler Carruth | 40e21f2 | 2016-03-07 12:30:06 +0000 | [diff] [blame] | 938 | /// Compute the memdep value for BB with Pointer/PointeeSize using either |
| 939 | /// cached information in Cache or by doing a lookup (which may use dirty cache |
| 940 | /// info if available). |
| 941 | /// |
| 942 | /// If we do a lookup, add the result to the cache. |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 943 | MemDepResult MemoryDependenceResults::GetNonLocalInfoForBlock( |
Chandler Carruth | ac80dc7 | 2015-06-17 07:18:54 +0000 | [diff] [blame] | 944 | Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad, |
| 945 | BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) { |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 946 | |
Chris Lattner | f903fe1 | 2008-12-09 07:47:11 +0000 | [diff] [blame] | 947 | // Do a binary search to see if we already have an entry for this block in |
| 948 | // the cache set. If so, find it. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 949 | NonLocalDepInfo::iterator Entry = std::upper_bound( |
| 950 | Cache->begin(), Cache->begin() + NumSortedEntries, NonLocalDepEntry(BB)); |
| 951 | if (Entry != Cache->begin() && (Entry - 1)->getBB() == BB) |
Chris Lattner | f903fe1 | 2008-12-09 07:47:11 +0000 | [diff] [blame] | 952 | --Entry; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 953 | |
Craig Topper | 9f00886 | 2014-04-15 04:59:12 +0000 | [diff] [blame] | 954 | NonLocalDepEntry *ExistingResult = nullptr; |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 955 | if (Entry != Cache->begin() + NumSortedEntries && Entry->getBB() == BB) |
Chris Lattner | 0c31547 | 2009-12-09 07:08:01 +0000 | [diff] [blame] | 956 | ExistingResult = &*Entry; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 957 | |
Chris Lattner | f903fe1 | 2008-12-09 07:47:11 +0000 | [diff] [blame] | 958 | // If we have a cached entry, and it is non-dirty, use it as the value for |
| 959 | // this dependency. |
Chris Lattner | 0c31547 | 2009-12-09 07:08:01 +0000 | [diff] [blame] | 960 | if (ExistingResult && !ExistingResult->getResult().isDirty()) { |
Chris Lattner | f903fe1 | 2008-12-09 07:47:11 +0000 | [diff] [blame] | 961 | ++NumCacheNonLocalPtr; |
Chris Lattner | 0c31547 | 2009-12-09 07:08:01 +0000 | [diff] [blame] | 962 | return ExistingResult->getResult(); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 963 | } |
| 964 | |
Chris Lattner | f903fe1 | 2008-12-09 07:47:11 +0000 | [diff] [blame] | 965 | // Otherwise, we have to scan for the value. If we have a dirty cache |
| 966 | // entry, start scanning from its position, otherwise we scan from the end |
| 967 | // of the block. |
| 968 | BasicBlock::iterator ScanPos = BB->end(); |
Chris Lattner | 0c31547 | 2009-12-09 07:08:01 +0000 | [diff] [blame] | 969 | if (ExistingResult && ExistingResult->getResult().getInst()) { |
| 970 | assert(ExistingResult->getResult().getInst()->getParent() == BB && |
Chris Lattner | f903fe1 | 2008-12-09 07:47:11 +0000 | [diff] [blame] | 971 | "Instruction invalidated?"); |
| 972 | ++NumCacheDirtyNonLocalPtr; |
Duncan P. N. Exon Smith | 5a82c91 | 2015-10-10 00:53:03 +0000 | [diff] [blame] | 973 | ScanPos = ExistingResult->getResult().getInst()->getIterator(); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 974 | |
Chris Lattner | f903fe1 | 2008-12-09 07:47:11 +0000 | [diff] [blame] | 975 | // Eliminating the dirty entry from 'Cache', so update the reverse info. |
Dan Gohman | 2348393 | 2010-09-22 21:41:02 +0000 | [diff] [blame] | 976 | ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); |
Duncan P. N. Exon Smith | 5a82c91 | 2015-10-10 00:53:03 +0000 | [diff] [blame] | 977 | RemoveFromReverseMap(ReverseNonLocalPtrDeps, &*ScanPos, CacheKey); |
Chris Lattner | f903fe1 | 2008-12-09 07:47:11 +0000 | [diff] [blame] | 978 | } else { |
| 979 | ++NumUncacheNonLocalPtr; |
| 980 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 981 | |
Chris Lattner | f903fe1 | 2008-12-09 07:47:11 +0000 | [diff] [blame] | 982 | // Scan the block for the dependency. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 983 | MemDepResult Dep = |
| 984 | getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, QueryInst); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 985 | |
Chris Lattner | f903fe1 | 2008-12-09 07:47:11 +0000 | [diff] [blame] | 986 | // If we had a dirty entry for the block, update it. Otherwise, just add |
| 987 | // a new entry. |
| 988 | if (ExistingResult) |
Chris Lattner | 9b7d99e | 2009-12-22 04:25:02 +0000 | [diff] [blame] | 989 | ExistingResult->setResult(Dep); |
Chris Lattner | f903fe1 | 2008-12-09 07:47:11 +0000 | [diff] [blame] | 990 | else |
Chris Lattner | 9b7d99e | 2009-12-22 04:25:02 +0000 | [diff] [blame] | 991 | Cache->push_back(NonLocalDepEntry(BB, Dep)); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 992 | |
Chris Lattner | f903fe1 | 2008-12-09 07:47:11 +0000 | [diff] [blame] | 993 | // If the block has a dependency (i.e. it isn't completely transparent to |
| 994 | // the value), remember the reverse association because we just added it |
| 995 | // to Cache! |
Eli Friedman | c1702c8 | 2011-10-13 22:14:57 +0000 | [diff] [blame] | 996 | if (!Dep.isDef() && !Dep.isClobber()) |
Chris Lattner | f903fe1 | 2008-12-09 07:47:11 +0000 | [diff] [blame] | 997 | return Dep; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 998 | |
Chris Lattner | f903fe1 | 2008-12-09 07:47:11 +0000 | [diff] [blame] | 999 | // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently |
| 1000 | // update MemDep when we remove instructions. |
| 1001 | Instruction *Inst = Dep.getInst(); |
| 1002 | assert(Inst && "Didn't depend on anything?"); |
Dan Gohman | 2348393 | 2010-09-22 21:41:02 +0000 | [diff] [blame] | 1003 | ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); |
Chris Lattner | 8eda11b | 2009-03-29 00:24:04 +0000 | [diff] [blame] | 1004 | ReverseNonLocalPtrDeps[Inst].insert(CacheKey); |
Chris Lattner | f903fe1 | 2008-12-09 07:47:11 +0000 | [diff] [blame] | 1005 | return Dep; |
| 1006 | } |
| 1007 | |
Chandler Carruth | 40e21f2 | 2016-03-07 12:30:06 +0000 | [diff] [blame] | 1008 | /// Sort the NonLocalDepInfo cache, given a certain number of elements in the |
| 1009 | /// array that are already properly ordered. |
| 1010 | /// |
| 1011 | /// This is optimized for the case when only a few entries are added. |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1012 | static void |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 1013 | SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache, |
Chris Lattner | 370aada | 2009-07-13 17:20:05 +0000 | [diff] [blame] | 1014 | unsigned NumSortedEntries) { |
| 1015 | switch (Cache.size() - NumSortedEntries) { |
| 1016 | case 0: |
| 1017 | // done, no new entries. |
| 1018 | break; |
| 1019 | case 2: { |
| 1020 | // Two new entries, insert the last one into place. |
Chris Lattner | 0c31547 | 2009-12-09 07:08:01 +0000 | [diff] [blame] | 1021 | NonLocalDepEntry Val = Cache.back(); |
Chris Lattner | 370aada | 2009-07-13 17:20:05 +0000 | [diff] [blame] | 1022 | Cache.pop_back(); |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 1023 | MemoryDependenceResults::NonLocalDepInfo::iterator Entry = |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1024 | std::upper_bound(Cache.begin(), Cache.end() - 1, Val); |
Chris Lattner | 370aada | 2009-07-13 17:20:05 +0000 | [diff] [blame] | 1025 | Cache.insert(Entry, Val); |
Justin Bogner | cd1d5aa | 2016-08-17 20:30:52 +0000 | [diff] [blame] | 1026 | LLVM_FALLTHROUGH; |
Chris Lattner | 370aada | 2009-07-13 17:20:05 +0000 | [diff] [blame] | 1027 | } |
| 1028 | case 1: |
| 1029 | // One new entry, Just insert the new value at the appropriate position. |
| 1030 | if (Cache.size() != 1) { |
Chris Lattner | 0c31547 | 2009-12-09 07:08:01 +0000 | [diff] [blame] | 1031 | NonLocalDepEntry Val = Cache.back(); |
Chris Lattner | 370aada | 2009-07-13 17:20:05 +0000 | [diff] [blame] | 1032 | Cache.pop_back(); |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 1033 | MemoryDependenceResults::NonLocalDepInfo::iterator Entry = |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1034 | std::upper_bound(Cache.begin(), Cache.end(), Val); |
Chris Lattner | 370aada | 2009-07-13 17:20:05 +0000 | [diff] [blame] | 1035 | Cache.insert(Entry, Val); |
| 1036 | } |
| 1037 | break; |
| 1038 | default: |
| 1039 | // Added many values, do a full scale sort. |
| 1040 | std::sort(Cache.begin(), Cache.end()); |
| 1041 | break; |
| 1042 | } |
| 1043 | } |
| 1044 | |
Chandler Carruth | 40e21f2 | 2016-03-07 12:30:06 +0000 | [diff] [blame] | 1045 | /// Perform a dependency query based on pointer/pointeesize starting at the end |
| 1046 | /// of StartBB. |
| 1047 | /// |
| 1048 | /// Add any clobber/def results to the results vector and keep track of which |
| 1049 | /// blocks are visited in 'Visited'. |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1050 | /// |
| 1051 | /// This has special behavior for the first block queries (when SkipFirstBlock |
| 1052 | /// is true). In this special case, it ignores the contents of the specified |
| 1053 | /// block and starts returning dependence info for its predecessors. |
| 1054 | /// |
Chandler Carruth | b32febe | 2016-03-07 12:45:07 +0000 | [diff] [blame] | 1055 | /// This function returns true on success, or false to indicate that it could |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1056 | /// not compute dependence information for some reason. This should be treated |
| 1057 | /// as a clobber dependence on the first instruction in the predecessor block. |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 1058 | bool MemoryDependenceResults::getNonLocalPointerDepFromBB( |
Chandler Carruth | ac80dc7 | 2015-06-17 07:18:54 +0000 | [diff] [blame] | 1059 | Instruction *QueryInst, const PHITransAddr &Pointer, |
| 1060 | const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB, |
| 1061 | SmallVectorImpl<NonLocalDepResult> &Result, |
| 1062 | DenseMap<BasicBlock *, Value *> &Visited, bool SkipFirstBlock) { |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1063 | // Look up the cached info for Pointer. |
Chris Lattner | 972e6d8 | 2009-12-09 01:59:31 +0000 | [diff] [blame] | 1064 | ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad); |
Dan Gohman | 2348393 | 2010-09-22 21:41:02 +0000 | [diff] [blame] | 1065 | |
Dan Gohman | 0a6021a | 2010-11-10 20:37:15 +0000 | [diff] [blame] | 1066 | // Set up a temporary NLPI value. If the map doesn't yet have an entry for |
| 1067 | // CacheKey, this value will be inserted as the associated value. Otherwise, |
| 1068 | // it'll be ignored, and we'll have to check to see if the cached size and |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 1069 | // aa tags are consistent with the current query. |
Dan Gohman | 0a6021a | 2010-11-10 20:37:15 +0000 | [diff] [blame] | 1070 | NonLocalPointerInfo InitialNLPI; |
| 1071 | InitialNLPI.Size = Loc.Size; |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 1072 | InitialNLPI.AATags = Loc.AATags; |
Dan Gohman | 0a6021a | 2010-11-10 20:37:15 +0000 | [diff] [blame] | 1073 | |
| 1074 | // Get the NLPI for CacheKey, inserting one into the map if it doesn't |
| 1075 | // already have one. |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1076 | std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair = |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1077 | NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI)); |
Dan Gohman | 0a6021a | 2010-11-10 20:37:15 +0000 | [diff] [blame] | 1078 | NonLocalPointerInfo *CacheInfo = &Pair.first->second; |
| 1079 | |
Dan Gohman | 2e8ca44 | 2010-11-10 21:45:11 +0000 | [diff] [blame] | 1080 | // If we already have a cache entry for this CacheKey, we may need to do some |
| 1081 | // work to reconcile the cache entry and the current query. |
Dan Gohman | 0a6021a | 2010-11-10 20:37:15 +0000 | [diff] [blame] | 1082 | if (!Pair.second) { |
Dan Gohman | 2e8ca44 | 2010-11-10 21:45:11 +0000 | [diff] [blame] | 1083 | if (CacheInfo->Size < Loc.Size) { |
| 1084 | // The query's Size is greater than the cached one. Throw out the |
Benjamin Kramer | bde9176 | 2012-06-02 10:20:22 +0000 | [diff] [blame] | 1085 | // cached data and proceed with the query at the greater size. |
Dan Gohman | 2e8ca44 | 2010-11-10 21:45:11 +0000 | [diff] [blame] | 1086 | CacheInfo->Pair = BBSkipFirstBlockPair(); |
| 1087 | CacheInfo->Size = Loc.Size; |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1088 | for (auto &Entry : CacheInfo->NonLocalDeps) |
| 1089 | if (Instruction *Inst = Entry.getResult().getInst()) |
Dan Gohman | 6791936 | 2010-11-10 22:35:02 +0000 | [diff] [blame] | 1090 | RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); |
Dan Gohman | 2e8ca44 | 2010-11-10 21:45:11 +0000 | [diff] [blame] | 1091 | CacheInfo->NonLocalDeps.clear(); |
| 1092 | } else if (CacheInfo->Size > Loc.Size) { |
| 1093 | // This query's Size is less than the cached one. Conservatively restart |
| 1094 | // the query using the greater size. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1095 | return getNonLocalPointerDepFromBB( |
| 1096 | QueryInst, Pointer, Loc.getWithNewSize(CacheInfo->Size), isLoad, |
| 1097 | StartBB, Result, Visited, SkipFirstBlock); |
Dan Gohman | 0a6021a | 2010-11-10 20:37:15 +0000 | [diff] [blame] | 1098 | } |
| 1099 | |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 1100 | // If the query's AATags are inconsistent with the cached one, |
Dan Gohman | 2e8ca44 | 2010-11-10 21:45:11 +0000 | [diff] [blame] | 1101 | // conservatively throw out the cached data and restart the query with |
| 1102 | // no tag if needed. |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 1103 | if (CacheInfo->AATags != Loc.AATags) { |
| 1104 | if (CacheInfo->AATags) { |
Dan Gohman | 2e8ca44 | 2010-11-10 21:45:11 +0000 | [diff] [blame] | 1105 | CacheInfo->Pair = BBSkipFirstBlockPair(); |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 1106 | CacheInfo->AATags = AAMDNodes(); |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1107 | for (auto &Entry : CacheInfo->NonLocalDeps) |
| 1108 | if (Instruction *Inst = Entry.getResult().getInst()) |
Dan Gohman | 6791936 | 2010-11-10 22:35:02 +0000 | [diff] [blame] | 1109 | RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); |
Dan Gohman | 2e8ca44 | 2010-11-10 21:45:11 +0000 | [diff] [blame] | 1110 | CacheInfo->NonLocalDeps.clear(); |
| 1111 | } |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 1112 | if (Loc.AATags) |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1113 | return getNonLocalPointerDepFromBB( |
| 1114 | QueryInst, Pointer, Loc.getWithoutAATags(), isLoad, StartBB, Result, |
| 1115 | Visited, SkipFirstBlock); |
Dan Gohman | 0a6021a | 2010-11-10 20:37:15 +0000 | [diff] [blame] | 1116 | } |
Dan Gohman | 2348393 | 2010-09-22 21:41:02 +0000 | [diff] [blame] | 1117 | } |
| 1118 | |
| 1119 | NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps; |
Chris Lattner | 5ed409e | 2008-12-08 07:31:50 +0000 | [diff] [blame] | 1120 | |
| 1121 | // If we have valid cached information for exactly the block we are |
| 1122 | // investigating, just return it with no recomputation. |
Dan Gohman | 2348393 | 2010-09-22 21:41:02 +0000 | [diff] [blame] | 1123 | if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) { |
Chris Lattner | 8b4be37 | 2008-12-16 07:10:09 +0000 | [diff] [blame] | 1124 | // We have a fully cached result for this query then we can just return the |
| 1125 | // cached results and populate the visited set. However, we have to verify |
| 1126 | // that we don't already have conflicting results for these blocks. Check |
| 1127 | // to ensure that if a block in the results set is in the visited set that |
| 1128 | // it was for the same pointer query. |
| 1129 | if (!Visited.empty()) { |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1130 | for (auto &Entry : *Cache) { |
| 1131 | DenseMap<BasicBlock *, Value *>::iterator VI = |
| 1132 | Visited.find(Entry.getBB()); |
Chris Lattner | 972e6d8 | 2009-12-09 01:59:31 +0000 | [diff] [blame] | 1133 | if (VI == Visited.end() || VI->second == Pointer.getAddr()) |
| 1134 | continue; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1135 | |
Chandler Carruth | b32febe | 2016-03-07 12:45:07 +0000 | [diff] [blame] | 1136 | // We have a pointer mismatch in a block. Just return false, saying |
Chris Lattner | 8b4be37 | 2008-12-16 07:10:09 +0000 | [diff] [blame] | 1137 | // that something was clobbered in this result. We could also do a |
| 1138 | // non-fully cached query, but there is little point in doing this. |
Chandler Carruth | b32febe | 2016-03-07 12:45:07 +0000 | [diff] [blame] | 1139 | return false; |
Chris Lattner | 8b4be37 | 2008-12-16 07:10:09 +0000 | [diff] [blame] | 1140 | } |
| 1141 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1142 | |
Chris Lattner | 9b7d99e | 2009-12-22 04:25:02 +0000 | [diff] [blame] | 1143 | Value *Addr = Pointer.getAddr(); |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1144 | for (auto &Entry : *Cache) { |
| 1145 | Visited.insert(std::make_pair(Entry.getBB(), Addr)); |
| 1146 | if (Entry.getResult().isNonLocal()) { |
Matt Arsenault | c23753a | 2013-05-06 02:07:24 +0000 | [diff] [blame] | 1147 | continue; |
| 1148 | } |
| 1149 | |
Chandler Carruth | aef32bd | 2016-03-11 13:46:00 +0000 | [diff] [blame] | 1150 | if (DT.isReachableFromEntry(Entry.getBB())) { |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1151 | Result.push_back( |
| 1152 | NonLocalDepResult(Entry.getBB(), Entry.getResult(), Addr)); |
Matt Arsenault | c23753a | 2013-05-06 02:07:24 +0000 | [diff] [blame] | 1153 | } |
Chris Lattner | 8b4be37 | 2008-12-16 07:10:09 +0000 | [diff] [blame] | 1154 | } |
Chris Lattner | 5ed409e | 2008-12-08 07:31:50 +0000 | [diff] [blame] | 1155 | ++NumCacheCompleteNonLocalPtr; |
Chandler Carruth | b32febe | 2016-03-07 12:45:07 +0000 | [diff] [blame] | 1156 | return true; |
Chris Lattner | 5ed409e | 2008-12-08 07:31:50 +0000 | [diff] [blame] | 1157 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1158 | |
Chris Lattner | 5ed409e | 2008-12-08 07:31:50 +0000 | [diff] [blame] | 1159 | // Otherwise, either this is a new block, a block with an invalid cache |
| 1160 | // pointer or one that we're about to invalidate by putting more info into it |
| 1161 | // than its valid cache info. If empty, the result will be valid cache info, |
| 1162 | // otherwise it isn't. |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1163 | if (Cache->empty()) |
Dan Gohman | 2348393 | 2010-09-22 21:41:02 +0000 | [diff] [blame] | 1164 | CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock); |
Dan Gohman | c87c843 | 2010-11-11 00:42:22 +0000 | [diff] [blame] | 1165 | else |
Dan Gohman | 2348393 | 2010-09-22 21:41:02 +0000 | [diff] [blame] | 1166 | CacheInfo->Pair = BBSkipFirstBlockPair(); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1167 | |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1168 | SmallVector<BasicBlock *, 32> Worklist; |
Chris Lattner | 5ed409e | 2008-12-08 07:31:50 +0000 | [diff] [blame] | 1169 | Worklist.push_back(StartBB); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1170 | |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1171 | // PredList used inside loop. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1172 | SmallVector<std::pair<BasicBlock *, PHITransAddr>, 16> PredList; |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1173 | |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1174 | // Keep track of the entries that we know are sorted. Previously cached |
| 1175 | // entries will all be sorted. The entries we add we only sort on demand (we |
| 1176 | // don't insert every element into its sorted position). We know that we |
| 1177 | // won't get any reuse from currently inserted values, because we don't |
| 1178 | // revisit blocks after we insert info for them. |
| 1179 | unsigned NumSortedEntries = Cache->size(); |
Joerg Sonnenberger | 36894dc | 2016-02-20 11:24:44 +0000 | [diff] [blame] | 1180 | unsigned WorklistEntries = BlockNumberLimit; |
| 1181 | bool GotWorklistLimit = false; |
Chris Lattner | f09619d | 2009-01-22 07:04:01 +0000 | [diff] [blame] | 1182 | DEBUG(AssertSorted(*Cache)); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1183 | |
Chris Lattner | 2faa2c7 | 2008-12-07 02:15:47 +0000 | [diff] [blame] | 1184 | while (!Worklist.empty()) { |
Chris Lattner | 7564a3b | 2008-12-07 02:56:57 +0000 | [diff] [blame] | 1185 | BasicBlock *BB = Worklist.pop_back_val(); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1186 | |
Bruno Cardoso Lopes | e3c513a | 2014-10-01 20:07:13 +0000 | [diff] [blame] | 1187 | // If we do process a large number of blocks it becomes very expensive and |
| 1188 | // likely it isn't worth worrying about |
| 1189 | if (Result.size() > NumResultsLimit) { |
| 1190 | Worklist.clear(); |
| 1191 | // Sort it now (if needed) so that recursive invocations of |
| 1192 | // getNonLocalPointerDepFromBB and other routines that could reuse the |
| 1193 | // cache value will only see properly sorted cache arrays. |
| 1194 | if (Cache && NumSortedEntries != Cache->size()) { |
| 1195 | SortNonLocalDepInfoCache(*Cache, NumSortedEntries); |
Bruno Cardoso Lopes | e3c513a | 2014-10-01 20:07:13 +0000 | [diff] [blame] | 1196 | } |
| 1197 | // Since we bail out, the "Cache" set won't contain all of the |
| 1198 | // results for the query. This is ok (we can still use it to accelerate |
| 1199 | // specific block queries) but we can't do the fastpath "return all |
| 1200 | // results from the set". Clear out the indicator for this. |
| 1201 | CacheInfo->Pair = BBSkipFirstBlockPair(); |
Chandler Carruth | b32febe | 2016-03-07 12:45:07 +0000 | [diff] [blame] | 1202 | return false; |
Bruno Cardoso Lopes | e3c513a | 2014-10-01 20:07:13 +0000 | [diff] [blame] | 1203 | } |
| 1204 | |
Chris Lattner | 75510d8 | 2008-12-09 07:52:59 +0000 | [diff] [blame] | 1205 | // Skip the first block if we have it. |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1206 | if (!SkipFirstBlock) { |
Chris Lattner | 75510d8 | 2008-12-09 07:52:59 +0000 | [diff] [blame] | 1207 | // Analyze the dependency of *Pointer in FromBB. See if we already have |
| 1208 | // been here. |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1209 | assert(Visited.count(BB) && "Should check 'visited' before adding to WL"); |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1210 | |
Chris Lattner | 75510d8 | 2008-12-09 07:52:59 +0000 | [diff] [blame] | 1211 | // Get the dependency info for Pointer in BB. If we have cached |
| 1212 | // information, we will use it, otherwise we compute it. |
Chris Lattner | f09619d | 2009-01-22 07:04:01 +0000 | [diff] [blame] | 1213 | DEBUG(AssertSorted(*Cache, NumSortedEntries)); |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1214 | MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst, Loc, isLoad, BB, |
| 1215 | Cache, NumSortedEntries); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1216 | |
Chris Lattner | 75510d8 | 2008-12-09 07:52:59 +0000 | [diff] [blame] | 1217 | // If we got a Def or Clobber, add this to the list of results. |
Matt Arsenault | c23753a | 2013-05-06 02:07:24 +0000 | [diff] [blame] | 1218 | if (!Dep.isNonLocal()) { |
Chandler Carruth | aef32bd | 2016-03-11 13:46:00 +0000 | [diff] [blame] | 1219 | if (DT.isReachableFromEntry(BB)) { |
Matt Arsenault | c23753a | 2013-05-06 02:07:24 +0000 | [diff] [blame] | 1220 | Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr())); |
| 1221 | continue; |
| 1222 | } |
Chris Lattner | 75510d8 | 2008-12-09 07:52:59 +0000 | [diff] [blame] | 1223 | } |
Chris Lattner | 2faa2c7 | 2008-12-07 02:15:47 +0000 | [diff] [blame] | 1224 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1225 | |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1226 | // If 'Pointer' is an instruction defined in this block, then we need to do |
| 1227 | // phi translation to change it into a value live in the predecessor block. |
Chris Lattner | 972e6d8 | 2009-12-09 01:59:31 +0000 | [diff] [blame] | 1228 | // If not, we just add the predecessors to the worklist and scan them with |
| 1229 | // the same Pointer. |
| 1230 | if (!Pointer.NeedsPHITranslationFromBlock(BB)) { |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1231 | SkipFirstBlock = false; |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1232 | SmallVector<BasicBlock *, 16> NewBlocks; |
Daniel Berlin | b4e7a4a | 2015-04-21 21:11:50 +0000 | [diff] [blame] | 1233 | for (BasicBlock *Pred : PredCache.get(BB)) { |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1234 | // Verify that we haven't looked at this block yet. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1235 | std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = |
| 1236 | Visited.insert(std::make_pair(Pred, Pointer.getAddr())); |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1237 | if (InsertRes.second) { |
| 1238 | // First time we've looked at *PI. |
Daniel Berlin | b4e7a4a | 2015-04-21 21:11:50 +0000 | [diff] [blame] | 1239 | NewBlocks.push_back(Pred); |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1240 | continue; |
| 1241 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1242 | |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1243 | // If we have seen this block before, but it was with a different |
| 1244 | // pointer then we have a phi translation failure and we have to treat |
| 1245 | // this as a clobber. |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1246 | if (InsertRes.first->second != Pointer.getAddr()) { |
| 1247 | // Make sure to clean up the Visited map before continuing on to |
| 1248 | // PredTranslationFailure. |
| 1249 | for (unsigned i = 0; i < NewBlocks.size(); i++) |
| 1250 | Visited.erase(NewBlocks[i]); |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1251 | goto PredTranslationFailure; |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1252 | } |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1253 | } |
Joerg Sonnenberger | 36894dc | 2016-02-20 11:24:44 +0000 | [diff] [blame] | 1254 | if (NewBlocks.size() > WorklistEntries) { |
| 1255 | // Make sure to clean up the Visited map before continuing on to |
| 1256 | // PredTranslationFailure. |
| 1257 | for (unsigned i = 0; i < NewBlocks.size(); i++) |
| 1258 | Visited.erase(NewBlocks[i]); |
| 1259 | GotWorklistLimit = true; |
| 1260 | goto PredTranslationFailure; |
| 1261 | } |
| 1262 | WorklistEntries -= NewBlocks.size(); |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1263 | Worklist.append(NewBlocks.begin(), NewBlocks.end()); |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1264 | continue; |
| 1265 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1266 | |
Chris Lattner | 972e6d8 | 2009-12-09 01:59:31 +0000 | [diff] [blame] | 1267 | // We do need to do phi translation, if we know ahead of time we can't phi |
| 1268 | // translate this value, don't even try. |
| 1269 | if (!Pointer.IsPotentiallyPHITranslatable()) |
| 1270 | goto PredTranslationFailure; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1271 | |
Chris Lattner | 2f0c1c4 | 2009-07-13 17:14:23 +0000 | [diff] [blame] | 1272 | // We may have added values to the cache list before this PHI translation. |
| 1273 | // If so, we haven't done anything to ensure that the cache remains sorted. |
| 1274 | // Sort it now (if needed) so that recursive invocations of |
| 1275 | // getNonLocalPointerDepFromBB and other routines that could reuse the cache |
| 1276 | // value will only see properly sorted cache arrays. |
| 1277 | if (Cache && NumSortedEntries != Cache->size()) { |
Chris Lattner | 370aada | 2009-07-13 17:20:05 +0000 | [diff] [blame] | 1278 | SortNonLocalDepInfoCache(*Cache, NumSortedEntries); |
Chris Lattner | 2f0c1c4 | 2009-07-13 17:14:23 +0000 | [diff] [blame] | 1279 | NumSortedEntries = Cache->size(); |
| 1280 | } |
Craig Topper | 9f00886 | 2014-04-15 04:59:12 +0000 | [diff] [blame] | 1281 | Cache = nullptr; |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1282 | |
| 1283 | PredList.clear(); |
Daniel Berlin | b4e7a4a | 2015-04-21 21:11:50 +0000 | [diff] [blame] | 1284 | for (BasicBlock *Pred : PredCache.get(BB)) { |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1285 | PredList.push_back(std::make_pair(Pred, Pointer)); |
| 1286 | |
Chris Lattner | 972e6d8 | 2009-12-09 01:59:31 +0000 | [diff] [blame] | 1287 | // Get the PHI translated pointer in this predecessor. This can fail if |
| 1288 | // not translatable, in which case the getAddr() returns null. |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1289 | PHITransAddr &PredPointer = PredList.back().second; |
Chandler Carruth | aef32bd | 2016-03-11 13:46:00 +0000 | [diff] [blame] | 1290 | PredPointer.PHITranslateValue(BB, Pred, &DT, /*MustDominate=*/false); |
Chris Lattner | 972e6d8 | 2009-12-09 01:59:31 +0000 | [diff] [blame] | 1291 | Value *PredPtrVal = PredPointer.getAddr(); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1292 | |
Chris Lattner | ac32329 | 2009-11-27 08:37:22 +0000 | [diff] [blame] | 1293 | // Check to see if we have already visited this pred block with another |
| 1294 | // pointer. If so, we can't do this lookup. This failure can occur |
| 1295 | // with PHI translation when a critical edge exists and the PHI node in |
| 1296 | // the successor translates to a pointer value different than the |
| 1297 | // pointer the block was first analyzed with. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1298 | std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = |
| 1299 | Visited.insert(std::make_pair(Pred, PredPtrVal)); |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1300 | |
Chris Lattner | ac32329 | 2009-11-27 08:37:22 +0000 | [diff] [blame] | 1301 | if (!InsertRes.second) { |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1302 | // We found the pred; take it off the list of preds to visit. |
| 1303 | PredList.pop_back(); |
| 1304 | |
Chris Lattner | ac32329 | 2009-11-27 08:37:22 +0000 | [diff] [blame] | 1305 | // If the predecessor was visited with PredPtr, then we already did |
| 1306 | // the analysis and can ignore it. |
Chris Lattner | 972e6d8 | 2009-12-09 01:59:31 +0000 | [diff] [blame] | 1307 | if (InsertRes.first->second == PredPtrVal) |
Chris Lattner | ac32329 | 2009-11-27 08:37:22 +0000 | [diff] [blame] | 1308 | continue; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1309 | |
Chris Lattner | ac32329 | 2009-11-27 08:37:22 +0000 | [diff] [blame] | 1310 | // Otherwise, the block was previously analyzed with a different |
| 1311 | // pointer. We can't represent the result of this case, so we just |
| 1312 | // treat this as a phi translation failure. |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1313 | |
| 1314 | // Make sure to clean up the Visited map before continuing on to |
| 1315 | // PredTranslationFailure. |
Matt Arsenault | 2080ecd | 2013-03-29 18:48:42 +0000 | [diff] [blame] | 1316 | for (unsigned i = 0, n = PredList.size(); i < n; ++i) |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1317 | Visited.erase(PredList[i].first); |
| 1318 | |
Chris Lattner | ac32329 | 2009-11-27 08:37:22 +0000 | [diff] [blame] | 1319 | goto PredTranslationFailure; |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1320 | } |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1321 | } |
| 1322 | |
| 1323 | // Actually process results here; this need to be a separate loop to avoid |
| 1324 | // calling getNonLocalPointerDepFromBB for blocks we don't want to return |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1325 | // any results for. (getNonLocalPointerDepFromBB will modify our |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1326 | // datastructures in ways the code after the PredTranslationFailure label |
| 1327 | // doesn't expect.) |
Matt Arsenault | 2080ecd | 2013-03-29 18:48:42 +0000 | [diff] [blame] | 1328 | for (unsigned i = 0, n = PredList.size(); i < n; ++i) { |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1329 | BasicBlock *Pred = PredList[i].first; |
| 1330 | PHITransAddr &PredPointer = PredList[i].second; |
| 1331 | Value *PredPtrVal = PredPointer.getAddr(); |
| 1332 | |
| 1333 | bool CanTranslate = true; |
Chris Lattner | 2be52e7 | 2009-11-27 22:05:15 +0000 | [diff] [blame] | 1334 | // If PHI translation was unable to find an available pointer in this |
| 1335 | // predecessor, then we have to assume that the pointer is clobbered in |
| 1336 | // that predecessor. We can still do PRE of the load, which would insert |
| 1337 | // a computation of the pointer in this predecessor. |
Craig Topper | 9f00886 | 2014-04-15 04:59:12 +0000 | [diff] [blame] | 1338 | if (!PredPtrVal) |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1339 | CanTranslate = false; |
| 1340 | |
| 1341 | // FIXME: it is entirely possible that PHI translating will end up with |
| 1342 | // the same value. Consider PHI translating something like: |
| 1343 | // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need* |
| 1344 | // to recurse here, pedantically speaking. |
| 1345 | |
| 1346 | // If getNonLocalPointerDepFromBB fails here, that means the cached |
| 1347 | // result conflicted with the Visited list; we have to conservatively |
Eli Friedman | 7d58bc7 | 2011-06-15 00:47:34 +0000 | [diff] [blame] | 1348 | // assume it is unknown, but this also does not block PRE of the load. |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1349 | if (!CanTranslate || |
Chandler Carruth | b32febe | 2016-03-07 12:45:07 +0000 | [diff] [blame] | 1350 | !getNonLocalPointerDepFromBB(QueryInst, PredPointer, |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1351 | Loc.getWithNewPtr(PredPtrVal), isLoad, |
| 1352 | Pred, Result, Visited)) { |
Chris Lattner | 9c2053b | 2009-12-01 07:33:32 +0000 | [diff] [blame] | 1353 | // Add the entry to the Result list. |
Eli Friedman | 7d58bc7 | 2011-06-15 00:47:34 +0000 | [diff] [blame] | 1354 | NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal); |
Chris Lattner | 9c2053b | 2009-12-01 07:33:32 +0000 | [diff] [blame] | 1355 | Result.push_back(Entry); |
| 1356 | |
Chris Lattner | 25bf6f8 | 2009-12-19 21:29:22 +0000 | [diff] [blame] | 1357 | // Since we had a phi translation failure, the cache for CacheKey won't |
| 1358 | // include all of the entries that we need to immediately satisfy future |
| 1359 | // queries. Mark this in NonLocalPointerDeps by setting the |
| 1360 | // BBSkipFirstBlockPair pointer to null. This requires reuse of the |
| 1361 | // cached value to do more work but not miss the phi trans failure. |
Dan Gohman | 2348393 | 2010-09-22 21:41:02 +0000 | [diff] [blame] | 1362 | NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey]; |
| 1363 | NLPI.Pair = BBSkipFirstBlockPair(); |
Chris Lattner | 2be52e7 | 2009-11-27 22:05:15 +0000 | [diff] [blame] | 1364 | continue; |
Chris Lattner | 2be52e7 | 2009-11-27 22:05:15 +0000 | [diff] [blame] | 1365 | } |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1366 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1367 | |
Chris Lattner | ac32329 | 2009-11-27 08:37:22 +0000 | [diff] [blame] | 1368 | // Refresh the CacheInfo/Cache pointer so that it isn't invalidated. |
| 1369 | CacheInfo = &NonLocalPointerDeps[CacheKey]; |
Dan Gohman | 2348393 | 2010-09-22 21:41:02 +0000 | [diff] [blame] | 1370 | Cache = &CacheInfo->NonLocalDeps; |
Chris Lattner | ac32329 | 2009-11-27 08:37:22 +0000 | [diff] [blame] | 1371 | NumSortedEntries = Cache->size(); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1372 | |
Chris Lattner | ac32329 | 2009-11-27 08:37:22 +0000 | [diff] [blame] | 1373 | // Since we did phi translation, the "Cache" set won't contain all of the |
| 1374 | // results for the query. This is ok (we can still use it to accelerate |
| 1375 | // specific block queries) but we can't do the fastpath "return all |
| 1376 | // results from the set" Clear out the indicator for this. |
Dan Gohman | 2348393 | 2010-09-22 21:41:02 +0000 | [diff] [blame] | 1377 | CacheInfo->Pair = BBSkipFirstBlockPair(); |
Chris Lattner | ac32329 | 2009-11-27 08:37:22 +0000 | [diff] [blame] | 1378 | SkipFirstBlock = false; |
| 1379 | continue; |
Chris Lattner | c49f5ac | 2009-11-26 23:18:49 +0000 | [diff] [blame] | 1380 | |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1381 | PredTranslationFailure: |
Eli Friedman | 4b6eeb9 | 2011-06-01 23:16:53 +0000 | [diff] [blame] | 1382 | // The following code is "failure"; we can't produce a sane translation |
| 1383 | // for the given block. It assumes that we haven't modified any of |
| 1384 | // our datastructures while processing the current block. |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1385 | |
Craig Topper | 9f00886 | 2014-04-15 04:59:12 +0000 | [diff] [blame] | 1386 | if (!Cache) { |
Chris Lattner | 3f4591c | 2009-01-23 07:12:16 +0000 | [diff] [blame] | 1387 | // Refresh the CacheInfo/Cache pointer if it got invalidated. |
| 1388 | CacheInfo = &NonLocalPointerDeps[CacheKey]; |
Dan Gohman | 2348393 | 2010-09-22 21:41:02 +0000 | [diff] [blame] | 1389 | Cache = &CacheInfo->NonLocalDeps; |
Chris Lattner | 3f4591c | 2009-01-23 07:12:16 +0000 | [diff] [blame] | 1390 | NumSortedEntries = Cache->size(); |
Chris Lattner | 3f4591c | 2009-01-23 07:12:16 +0000 | [diff] [blame] | 1391 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1392 | |
Chris Lattner | 25bf6f8 | 2009-12-19 21:29:22 +0000 | [diff] [blame] | 1393 | // Since we failed phi translation, the "Cache" set won't contain all of the |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1394 | // results for the query. This is ok (we can still use it to accelerate |
| 1395 | // specific block queries) but we can't do the fastpath "return all |
Chris Lattner | 25bf6f8 | 2009-12-19 21:29:22 +0000 | [diff] [blame] | 1396 | // results from the set". Clear out the indicator for this. |
Dan Gohman | 2348393 | 2010-09-22 21:41:02 +0000 | [diff] [blame] | 1397 | CacheInfo->Pair = BBSkipFirstBlockPair(); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1398 | |
Eli Friedman | 7d58bc7 | 2011-06-15 00:47:34 +0000 | [diff] [blame] | 1399 | // If *nothing* works, mark the pointer as unknown. |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1400 | // |
| 1401 | // If this is the magic first block, return this as a clobber of the whole |
| 1402 | // incoming value. Since we can't phi translate to one of the predecessors, |
| 1403 | // we have to bail out. |
| 1404 | if (SkipFirstBlock) |
Chandler Carruth | b32febe | 2016-03-07 12:45:07 +0000 | [diff] [blame] | 1405 | return false; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1406 | |
Joerg Sonnenberger | 36894dc | 2016-02-20 11:24:44 +0000 | [diff] [blame] | 1407 | bool foundBlock = false; |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1408 | for (NonLocalDepEntry &I : llvm::reverse(*Cache)) { |
Joerg Sonnenberger | 36894dc | 2016-02-20 11:24:44 +0000 | [diff] [blame] | 1409 | if (I.getBB() != BB) |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1410 | continue; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1411 | |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1412 | assert((GotWorklistLimit || I.getResult().isNonLocal() || |
Chandler Carruth | aef32bd | 2016-03-11 13:46:00 +0000 | [diff] [blame] | 1413 | !DT.isReachableFromEntry(BB)) && |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1414 | "Should only be here with transparent block"); |
Joerg Sonnenberger | 36894dc | 2016-02-20 11:24:44 +0000 | [diff] [blame] | 1415 | foundBlock = true; |
| 1416 | I.setResult(MemDepResult::getUnknown()); |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1417 | Result.push_back( |
| 1418 | NonLocalDepResult(I.getBB(), I.getResult(), Pointer.getAddr())); |
Chris Lattner | ff9f3db | 2008-12-15 03:35:32 +0000 | [diff] [blame] | 1419 | break; |
Chris Lattner | 7564a3b | 2008-12-07 02:56:57 +0000 | [diff] [blame] | 1420 | } |
Mehdi Amini | 89038a1 | 2016-04-02 05:34:19 +0000 | [diff] [blame] | 1421 | (void)foundBlock; (void)GotWorklistLimit; |
Joerg Sonnenberger | 36894dc | 2016-02-20 11:24:44 +0000 | [diff] [blame] | 1422 | assert((foundBlock || GotWorklistLimit) && "Current block not in cache?"); |
Chris Lattner | 2faa2c7 | 2008-12-07 02:15:47 +0000 | [diff] [blame] | 1423 | } |
Chris Lattner | 3f4591c | 2009-01-23 07:12:16 +0000 | [diff] [blame] | 1424 | |
Chris Lattner | f903fe1 | 2008-12-09 07:47:11 +0000 | [diff] [blame] | 1425 | // Okay, we're done now. If we added new values to the cache, re-sort it. |
Chris Lattner | 370aada | 2009-07-13 17:20:05 +0000 | [diff] [blame] | 1426 | SortNonLocalDepInfoCache(*Cache, NumSortedEntries); |
Chris Lattner | f09619d | 2009-01-22 07:04:01 +0000 | [diff] [blame] | 1427 | DEBUG(AssertSorted(*Cache)); |
Chandler Carruth | b32febe | 2016-03-07 12:45:07 +0000 | [diff] [blame] | 1428 | return true; |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1429 | } |
| 1430 | |
Chandler Carruth | 40e21f2 | 2016-03-07 12:30:06 +0000 | [diff] [blame] | 1431 | /// If P exists in CachedNonLocalPointerInfo, remove it. |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 1432 | void MemoryDependenceResults::RemoveCachedNonLocalPointerDependencies( |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1433 | ValueIsLoadPair P) { |
| 1434 | CachedNonLocalPointerInfo::iterator It = NonLocalPointerDeps.find(P); |
| 1435 | if (It == NonLocalPointerDeps.end()) |
| 1436 | return; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1437 | |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1438 | // Remove all of the entries in the BB->val map. This involves removing |
| 1439 | // instructions from the reverse map. |
Dan Gohman | 2348393 | 2010-09-22 21:41:02 +0000 | [diff] [blame] | 1440 | NonLocalDepInfo &PInfo = It->second.NonLocalDeps; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1441 | |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1442 | for (unsigned i = 0, e = PInfo.size(); i != e; ++i) { |
Chris Lattner | 0c31547 | 2009-12-09 07:08:01 +0000 | [diff] [blame] | 1443 | Instruction *Target = PInfo[i].getResult().getInst(); |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1444 | if (!Target) |
| 1445 | continue; // Ignore non-local dep results. |
Chris Lattner | 0c31547 | 2009-12-09 07:08:01 +0000 | [diff] [blame] | 1446 | assert(Target->getParent() == PInfo[i].getBB()); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1447 | |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1448 | // Eliminating the dirty entry from 'Cache', so update the reverse info. |
Chris Lattner | 8eda11b | 2009-03-29 00:24:04 +0000 | [diff] [blame] | 1449 | RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P); |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1450 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1451 | |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1452 | // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo). |
| 1453 | NonLocalPointerDeps.erase(It); |
Chris Lattner | 2faa2c7 | 2008-12-07 02:15:47 +0000 | [diff] [blame] | 1454 | } |
| 1455 | |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 1456 | void MemoryDependenceResults::invalidateCachedPointerInfo(Value *Ptr) { |
Chris Lattner | fa9f99a | 2008-12-09 22:06:23 +0000 | [diff] [blame] | 1457 | // If Ptr isn't really a pointer, just ignore it. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1458 | if (!Ptr->getType()->isPointerTy()) |
| 1459 | return; |
Chris Lattner | fa9f99a | 2008-12-09 22:06:23 +0000 | [diff] [blame] | 1460 | // Flush store info for the pointer. |
| 1461 | RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false)); |
| 1462 | // Flush load info for the pointer. |
| 1463 | RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true)); |
| 1464 | } |
| 1465 | |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 1466 | void MemoryDependenceResults::invalidateCachedPredecessors() { |
Daniel Berlin | b4e7a4a | 2015-04-21 21:11:50 +0000 | [diff] [blame] | 1467 | PredCache.clear(); |
Bob Wilson | 92cdb6e | 2010-02-16 19:51:59 +0000 | [diff] [blame] | 1468 | } |
| 1469 | |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 1470 | void MemoryDependenceResults::removeInstruction(Instruction *RemInst) { |
Chris Lattner | a25d3952 | 2008-11-28 22:04:47 +0000 | [diff] [blame] | 1471 | // Walk through the Non-local dependencies, removing this one as the value |
| 1472 | // for any cached queries. |
Chris Lattner | 1b810bd | 2008-11-30 02:28:25 +0000 | [diff] [blame] | 1473 | NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst); |
| 1474 | if (NLDI != NonLocalDeps.end()) { |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 1475 | NonLocalDepInfo &BlockMap = NLDI->second.first; |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1476 | for (auto &Entry : BlockMap) |
| 1477 | if (Instruction *Inst = Entry.getResult().getInst()) |
Chris Lattner | de4440c | 2008-12-07 18:39:13 +0000 | [diff] [blame] | 1478 | RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst); |
Chris Lattner | 1b810bd | 2008-11-30 02:28:25 +0000 | [diff] [blame] | 1479 | NonLocalDeps.erase(NLDI); |
| 1480 | } |
Owen Anderson | 086b2c4 | 2007-12-08 01:37:09 +0000 | [diff] [blame] | 1481 | |
Chris Lattner | a25d3952 | 2008-11-28 22:04:47 +0000 | [diff] [blame] | 1482 | // If we have a cached local dependence query for this instruction, remove it. |
Chris Lattner | 73c2545 | 2008-11-28 22:28:27 +0000 | [diff] [blame] | 1483 | // |
Chris Lattner | de04e11 | 2008-11-29 01:43:36 +0000 | [diff] [blame] | 1484 | LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst); |
| 1485 | if (LocalDepEntry != LocalDeps.end()) { |
Chris Lattner | ada1f87 | 2008-11-30 01:09:30 +0000 | [diff] [blame] | 1486 | // Remove us from DepInst's reverse set now that the local dep info is gone. |
Chris Lattner | de4440c | 2008-12-07 18:39:13 +0000 | [diff] [blame] | 1487 | if (Instruction *Inst = LocalDepEntry->second.getInst()) |
| 1488 | RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst); |
Chris Lattner | ada1f87 | 2008-11-30 01:09:30 +0000 | [diff] [blame] | 1489 | |
Chris Lattner | 73c2545 | 2008-11-28 22:28:27 +0000 | [diff] [blame] | 1490 | // Remove this local dependency info. |
Chris Lattner | de04e11 | 2008-11-29 01:43:36 +0000 | [diff] [blame] | 1491 | LocalDeps.erase(LocalDepEntry); |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1492 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1493 | |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1494 | // If we have any cached pointer dependencies on this instruction, remove |
| 1495 | // them. If the instruction has non-pointer type, then it can't be a pointer |
| 1496 | // base. |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1497 | |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1498 | // Remove it from both the load info and the store info. The instruction |
| 1499 | // can't be in either of these maps if it is non-pointer. |
Duncan Sands | 19d0b47 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 1500 | if (RemInst->getType()->isPointerTy()) { |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1501 | RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false)); |
| 1502 | RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true)); |
| 1503 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1504 | |
Chris Lattner | d3d9111 | 2008-11-28 22:51:08 +0000 | [diff] [blame] | 1505 | // Loop over all of the things that depend on the instruction we're removing. |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1506 | // |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1507 | SmallVector<std::pair<Instruction *, Instruction *>, 8> ReverseDepsToAdd; |
Chris Lattner | 82b7034 | 2008-12-07 18:42:51 +0000 | [diff] [blame] | 1508 | |
| 1509 | // If we find RemInst as a clobber or Def in any of the maps for other values, |
| 1510 | // we need to replace its entry with a dirty version of the instruction after |
| 1511 | // it. If RemInst is a terminator, we use a null dirty value. |
| 1512 | // |
| 1513 | // Using a dirty version of the instruction after RemInst saves having to scan |
| 1514 | // the entire block to get to this point. |
| 1515 | MemDepResult NewDirtyVal; |
| 1516 | if (!RemInst->isTerminator()) |
Duncan P. N. Exon Smith | 5a82c91 | 2015-10-10 00:53:03 +0000 | [diff] [blame] | 1517 | NewDirtyVal = MemDepResult::getDirty(&*++RemInst->getIterator()); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1518 | |
Chris Lattner | 9f1988ab | 2008-11-29 09:20:15 +0000 | [diff] [blame] | 1519 | ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst); |
| 1520 | if (ReverseDepIt != ReverseLocalDeps.end()) { |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1521 | // RemInst can't be the terminator if it has local stuff depending on it. |
Craig Topper | 4627679 | 2014-08-24 23:23:06 +0000 | [diff] [blame] | 1522 | assert(!ReverseDepIt->second.empty() && !isa<TerminatorInst>(RemInst) && |
Chris Lattner | ada1f87 | 2008-11-30 01:09:30 +0000 | [diff] [blame] | 1523 | "Nothing can locally depend on a terminator"); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1524 | |
Craig Topper | 4627679 | 2014-08-24 23:23:06 +0000 | [diff] [blame] | 1525 | for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) { |
Chris Lattner | 1b810bd | 2008-11-30 02:28:25 +0000 | [diff] [blame] | 1526 | assert(InstDependingOnRemInst != RemInst && |
| 1527 | "Already removed our local dep info"); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1528 | |
Chris Lattner | 82b7034 | 2008-12-07 18:42:51 +0000 | [diff] [blame] | 1529 | LocalDeps[InstDependingOnRemInst] = NewDirtyVal; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1530 | |
Chris Lattner | ada1f87 | 2008-11-30 01:09:30 +0000 | [diff] [blame] | 1531 | // Make sure to remember that new things depend on NewDepInst. |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1532 | assert(NewDirtyVal.getInst() && |
| 1533 | "There is no way something else can have " |
Chris Lattner | 82b7034 | 2008-12-07 18:42:51 +0000 | [diff] [blame] | 1534 | "a local dep on this if it is a terminator!"); |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1535 | ReverseDepsToAdd.push_back( |
| 1536 | std::make_pair(NewDirtyVal.getInst(), InstDependingOnRemInst)); |
Chris Lattner | d3d9111 | 2008-11-28 22:51:08 +0000 | [diff] [blame] | 1537 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1538 | |
Chris Lattner | 63bd586 | 2008-11-29 23:30:39 +0000 | [diff] [blame] | 1539 | ReverseLocalDeps.erase(ReverseDepIt); |
| 1540 | |
| 1541 | // Add new reverse deps after scanning the set, to avoid invalidating the |
| 1542 | // 'ReverseDeps' reference. |
| 1543 | while (!ReverseDepsToAdd.empty()) { |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1544 | ReverseLocalDeps[ReverseDepsToAdd.back().first].insert( |
| 1545 | ReverseDepsToAdd.back().second); |
Chris Lattner | 63bd586 | 2008-11-29 23:30:39 +0000 | [diff] [blame] | 1546 | ReverseDepsToAdd.pop_back(); |
| 1547 | } |
Owen Anderson | c0daf5f | 2007-07-06 23:14:35 +0000 | [diff] [blame] | 1548 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1549 | |
Chris Lattner | 9f1988ab | 2008-11-29 09:20:15 +0000 | [diff] [blame] | 1550 | ReverseDepIt = ReverseNonLocalDeps.find(RemInst); |
| 1551 | if (ReverseDepIt != ReverseNonLocalDeps.end()) { |
Craig Topper | 4627679 | 2014-08-24 23:23:06 +0000 | [diff] [blame] | 1552 | for (Instruction *I : ReverseDepIt->second) { |
| 1553 | assert(I != RemInst && "Already removed NonLocalDep info for RemInst"); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1554 | |
Craig Topper | 4627679 | 2014-08-24 23:23:06 +0000 | [diff] [blame] | 1555 | PerInstNLInfo &INLD = NonLocalDeps[I]; |
Chris Lattner | 4410427 | 2008-11-30 02:52:26 +0000 | [diff] [blame] | 1556 | // The information is now dirty! |
Chris Lattner | 7e61daf | 2008-12-01 01:15:42 +0000 | [diff] [blame] | 1557 | INLD.second = true; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1558 | |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1559 | for (auto &Entry : INLD.first) { |
| 1560 | if (Entry.getResult().getInst() != RemInst) |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1561 | continue; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1562 | |
Chris Lattner | 1b810bd | 2008-11-30 02:28:25 +0000 | [diff] [blame] | 1563 | // Convert to a dirty entry for the subsequent instruction. |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1564 | Entry.setResult(NewDirtyVal); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1565 | |
Chris Lattner | 82b7034 | 2008-12-07 18:42:51 +0000 | [diff] [blame] | 1566 | if (Instruction *NextI = NewDirtyVal.getInst()) |
Craig Topper | 4627679 | 2014-08-24 23:23:06 +0000 | [diff] [blame] | 1567 | ReverseDepsToAdd.push_back(std::make_pair(NextI, I)); |
Chris Lattner | 1b810bd | 2008-11-30 02:28:25 +0000 | [diff] [blame] | 1568 | } |
| 1569 | } |
Chris Lattner | 63bd586 | 2008-11-29 23:30:39 +0000 | [diff] [blame] | 1570 | |
| 1571 | ReverseNonLocalDeps.erase(ReverseDepIt); |
| 1572 | |
Chris Lattner | e7d7e13 | 2008-11-29 22:02:15 +0000 | [diff] [blame] | 1573 | // Add new reverse deps after scanning the set, to avoid invalidating 'Set' |
| 1574 | while (!ReverseDepsToAdd.empty()) { |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1575 | ReverseNonLocalDeps[ReverseDepsToAdd.back().first].insert( |
| 1576 | ReverseDepsToAdd.back().second); |
Chris Lattner | e7d7e13 | 2008-11-29 22:02:15 +0000 | [diff] [blame] | 1577 | ReverseDepsToAdd.pop_back(); |
| 1578 | } |
Owen Anderson | 5f208be | 2007-08-16 21:27:05 +0000 | [diff] [blame] | 1579 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1580 | |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1581 | // If the instruction is in ReverseNonLocalPtrDeps then it appears as a |
| 1582 | // value in the NonLocalPointerDeps info. |
| 1583 | ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt = |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1584 | ReverseNonLocalPtrDeps.find(RemInst); |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1585 | if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) { |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1586 | SmallVector<std::pair<Instruction *, ValueIsLoadPair>, 8> |
| 1587 | ReversePtrDepsToAdd; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1588 | |
Craig Topper | 4627679 | 2014-08-24 23:23:06 +0000 | [diff] [blame] | 1589 | for (ValueIsLoadPair P : ReversePtrDepIt->second) { |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1590 | assert(P.getPointer() != RemInst && |
| 1591 | "Already removed NonLocalPointerDeps info for RemInst"); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1592 | |
Dan Gohman | 2348393 | 2010-09-22 21:41:02 +0000 | [diff] [blame] | 1593 | NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1594 | |
Chris Lattner | 5ed409e | 2008-12-08 07:31:50 +0000 | [diff] [blame] | 1595 | // The cache is not valid for any specific block anymore. |
Dan Gohman | 2348393 | 2010-09-22 21:41:02 +0000 | [diff] [blame] | 1596 | NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair(); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1597 | |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1598 | // Update any entries for RemInst to use the instruction after it. |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1599 | for (auto &Entry : NLPDI) { |
| 1600 | if (Entry.getResult().getInst() != RemInst) |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1601 | continue; |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1602 | |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1603 | // Convert to a dirty entry for the subsequent instruction. |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1604 | Entry.setResult(NewDirtyVal); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1605 | |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1606 | if (Instruction *NewDirtyInst = NewDirtyVal.getInst()) |
| 1607 | ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P)); |
| 1608 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1609 | |
Chris Lattner | 3f4591c | 2009-01-23 07:12:16 +0000 | [diff] [blame] | 1610 | // Re-sort the NonLocalDepInfo. Changing the dirty entry to its |
| 1611 | // subsequent value may invalidate the sortedness. |
| 1612 | std::sort(NLPDI.begin(), NLPDI.end()); |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1613 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1614 | |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1615 | ReverseNonLocalPtrDeps.erase(ReversePtrDepIt); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1616 | |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1617 | while (!ReversePtrDepsToAdd.empty()) { |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1618 | ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first].insert( |
| 1619 | ReversePtrDepsToAdd.back().second); |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1620 | ReversePtrDepsToAdd.pop_back(); |
| 1621 | } |
| 1622 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1623 | |
Chris Lattner | 1b810bd | 2008-11-30 02:28:25 +0000 | [diff] [blame] | 1624 | assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?"); |
Jakob Stoklund Olesen | 087f207 | 2011-01-11 04:05:39 +0000 | [diff] [blame] | 1625 | DEBUG(verifyRemoved(RemInst)); |
Owen Anderson | c0daf5f | 2007-07-06 23:14:35 +0000 | [diff] [blame] | 1626 | } |
Chandler Carruth | 40e21f2 | 2016-03-07 12:30:06 +0000 | [diff] [blame] | 1627 | |
| 1628 | /// Verify that the specified instruction does not occur in our internal data |
| 1629 | /// structures. |
| 1630 | /// |
| 1631 | /// This function verifies by asserting in debug builds. |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 1632 | void MemoryDependenceResults::verifyRemoved(Instruction *D) const { |
Craig Topper | 4627679 | 2014-08-24 23:23:06 +0000 | [diff] [blame] | 1633 | #ifndef NDEBUG |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1634 | for (const auto &DepKV : LocalDeps) { |
| 1635 | assert(DepKV.first != D && "Inst occurs in data structures"); |
| 1636 | assert(DepKV.second.getInst() != D && "Inst occurs in data structures"); |
Chris Lattner | b8ec75b | 2008-11-29 21:25:10 +0000 | [diff] [blame] | 1637 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1638 | |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1639 | for (const auto &DepKV : NonLocalPointerDeps) { |
| 1640 | assert(DepKV.first.getPointer() != D && "Inst occurs in NLPD map key"); |
| 1641 | for (const auto &Entry : DepKV.second.NonLocalDeps) |
| 1642 | assert(Entry.getResult().getInst() != D && "Inst occurs as NLPD value"); |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1643 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1644 | |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1645 | for (const auto &DepKV : NonLocalDeps) { |
| 1646 | assert(DepKV.first != D && "Inst occurs in data structures"); |
| 1647 | const PerInstNLInfo &INLD = DepKV.second; |
| 1648 | for (const auto &Entry : INLD.first) |
| 1649 | assert(Entry.getResult().getInst() != D && |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1650 | "Inst occurs in data structures"); |
Chris Lattner | b8ec75b | 2008-11-29 21:25:10 +0000 | [diff] [blame] | 1651 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1652 | |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1653 | for (const auto &DepKV : ReverseLocalDeps) { |
| 1654 | assert(DepKV.first != D && "Inst occurs in data structures"); |
| 1655 | for (Instruction *Inst : DepKV.second) |
Craig Topper | 4627679 | 2014-08-24 23:23:06 +0000 | [diff] [blame] | 1656 | assert(Inst != D && "Inst occurs in data structures"); |
Chris Lattner | 1b810bd | 2008-11-30 02:28:25 +0000 | [diff] [blame] | 1657 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1658 | |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1659 | for (const auto &DepKV : ReverseNonLocalDeps) { |
| 1660 | assert(DepKV.first != D && "Inst occurs in data structures"); |
| 1661 | for (Instruction *Inst : DepKV.second) |
Craig Topper | 4627679 | 2014-08-24 23:23:06 +0000 | [diff] [blame] | 1662 | assert(Inst != D && "Inst occurs in data structures"); |
Chris Lattner | 1b810bd | 2008-11-30 02:28:25 +0000 | [diff] [blame] | 1663 | } |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1664 | |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1665 | for (const auto &DepKV : ReverseNonLocalPtrDeps) { |
| 1666 | assert(DepKV.first != D && "Inst occurs in rev NLPD map"); |
Jakub Staszak | b0a7eed | 2013-03-20 21:47:51 +0000 | [diff] [blame] | 1667 | |
Chandler Carruth | af8321e | 2016-03-07 15:12:57 +0000 | [diff] [blame] | 1668 | for (ValueIsLoadPair P : DepKV.second) |
Chandler Carruth | 60fb1b4 | 2016-03-07 10:19:30 +0000 | [diff] [blame] | 1669 | assert(P != ValueIsLoadPair(D, false) && P != ValueIsLoadPair(D, true) && |
Chris Lattner | a28355d | 2008-12-07 08:50:20 +0000 | [diff] [blame] | 1670 | "Inst occurs in ReverseNonLocalPtrDeps map"); |
| 1671 | } |
Craig Topper | 4627679 | 2014-08-24 23:23:06 +0000 | [diff] [blame] | 1672 | #endif |
Chris Lattner | b8ec75b | 2008-11-29 21:25:10 +0000 | [diff] [blame] | 1673 | } |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 1674 | |
Chandler Carruth | b4faf13 | 2016-03-11 10:22:49 +0000 | [diff] [blame] | 1675 | char MemoryDependenceAnalysis::PassID; |
| 1676 | |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 1677 | MemoryDependenceResults |
Sean Silva | 36e0d01 | 2016-08-09 00:28:15 +0000 | [diff] [blame] | 1678 | MemoryDependenceAnalysis::run(Function &F, FunctionAnalysisManager &AM) { |
Chandler Carruth | b47f801 | 2016-03-11 11:05:24 +0000 | [diff] [blame] | 1679 | auto &AA = AM.getResult<AAManager>(F); |
| 1680 | auto &AC = AM.getResult<AssumptionAnalysis>(F); |
| 1681 | auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); |
Chandler Carruth | aef32bd | 2016-03-11 13:46:00 +0000 | [diff] [blame] | 1682 | auto &DT = AM.getResult<DominatorTreeAnalysis>(F); |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 1683 | return MemoryDependenceResults(AA, AC, TLI, DT); |
| 1684 | } |
| 1685 | |
| 1686 | char MemoryDependenceWrapperPass::ID = 0; |
| 1687 | |
| 1688 | INITIALIZE_PASS_BEGIN(MemoryDependenceWrapperPass, "memdep", |
| 1689 | "Memory Dependence Analysis", false, true) |
| 1690 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) |
| 1691 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) |
Chandler Carruth | aef32bd | 2016-03-11 13:46:00 +0000 | [diff] [blame] | 1692 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 1693 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) |
| 1694 | INITIALIZE_PASS_END(MemoryDependenceWrapperPass, "memdep", |
| 1695 | "Memory Dependence Analysis", false, true) |
| 1696 | |
| 1697 | MemoryDependenceWrapperPass::MemoryDependenceWrapperPass() : FunctionPass(ID) { |
| 1698 | initializeMemoryDependenceWrapperPassPass(*PassRegistry::getPassRegistry()); |
| 1699 | } |
Eugene Zelenko | 1804a77 | 2016-08-25 00:45:04 +0000 | [diff] [blame^] | 1700 | |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 1701 | MemoryDependenceWrapperPass::~MemoryDependenceWrapperPass() {} |
| 1702 | |
| 1703 | void MemoryDependenceWrapperPass::releaseMemory() { |
| 1704 | MemDep.reset(); |
| 1705 | } |
| 1706 | |
| 1707 | void MemoryDependenceWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { |
| 1708 | AU.setPreservesAll(); |
| 1709 | AU.addRequired<AssumptionCacheTracker>(); |
Chandler Carruth | aef32bd | 2016-03-11 13:46:00 +0000 | [diff] [blame] | 1710 | AU.addRequired<DominatorTreeWrapperPass>(); |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 1711 | AU.addRequiredTransitive<AAResultsWrapperPass>(); |
| 1712 | AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); |
| 1713 | } |
| 1714 | |
| 1715 | bool MemoryDependenceWrapperPass::runOnFunction(Function &F) { |
| 1716 | auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); |
| 1717 | auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); |
| 1718 | auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); |
Chandler Carruth | aef32bd | 2016-03-11 13:46:00 +0000 | [diff] [blame] | 1719 | auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); |
| 1720 | MemDep.emplace(AA, AC, TLI, DT); |
Chandler Carruth | 61440d2 | 2016-03-10 00:55:30 +0000 | [diff] [blame] | 1721 | return false; |
| 1722 | } |