George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1 | //===-- MemorySSA.cpp - Memory SSA Builder---------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file implements the MemorySSA class. |
| 11 | // |
| 12 | //===----------------------------------------------------------------===// |
| 13 | #include "llvm/ADT/DenseMap.h" |
| 14 | #include "llvm/ADT/DenseSet.h" |
| 15 | #include "llvm/ADT/DepthFirstIterator.h" |
| 16 | #include "llvm/ADT/GraphTraits.h" |
| 17 | #include "llvm/ADT/PostOrderIterator.h" |
| 18 | #include "llvm/ADT/STLExtras.h" |
| 19 | #include "llvm/ADT/SmallPtrSet.h" |
| 20 | #include "llvm/ADT/SmallSet.h" |
| 21 | #include "llvm/ADT/Statistic.h" |
| 22 | #include "llvm/Analysis/AliasAnalysis.h" |
| 23 | #include "llvm/Analysis/CFG.h" |
| 24 | #include "llvm/Analysis/GlobalsModRef.h" |
| 25 | #include "llvm/Analysis/IteratedDominanceFrontier.h" |
| 26 | #include "llvm/Analysis/MemoryLocation.h" |
| 27 | #include "llvm/Analysis/PHITransAddr.h" |
| 28 | #include "llvm/IR/AssemblyAnnotationWriter.h" |
| 29 | #include "llvm/IR/DataLayout.h" |
| 30 | #include "llvm/IR/Dominators.h" |
| 31 | #include "llvm/IR/GlobalVariable.h" |
| 32 | #include "llvm/IR/IRBuilder.h" |
| 33 | #include "llvm/IR/IntrinsicInst.h" |
| 34 | #include "llvm/IR/LLVMContext.h" |
| 35 | #include "llvm/IR/Metadata.h" |
| 36 | #include "llvm/IR/Module.h" |
| 37 | #include "llvm/IR/PatternMatch.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 38 | #include "llvm/Support/Debug.h" |
| 39 | #include "llvm/Support/FormattedStream.h" |
| 40 | #include "llvm/Transforms/Scalar.h" |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 41 | #include "llvm/Transforms/Utils/MemorySSA.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 42 | #include <algorithm> |
| 43 | |
| 44 | #define DEBUG_TYPE "memoryssa" |
| 45 | using namespace llvm; |
| 46 | STATISTIC(NumClobberCacheLookups, "Number of Memory SSA version cache lookups"); |
| 47 | STATISTIC(NumClobberCacheHits, "Number of Memory SSA version cache hits"); |
| 48 | STATISTIC(NumClobberCacheInserts, "Number of MemorySSA version cache inserts"); |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 49 | |
Geoff Berry | efb0dd1 | 2016-06-14 21:19:40 +0000 | [diff] [blame] | 50 | INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 51 | true) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 52 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) |
| 53 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) |
Geoff Berry | efb0dd1 | 2016-06-14 21:19:40 +0000 | [diff] [blame] | 54 | INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, |
| 55 | true) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 56 | |
| 57 | namespace llvm { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 58 | /// \brief An assembly annotator class to print Memory SSA information in |
| 59 | /// comments. |
| 60 | class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter { |
| 61 | friend class MemorySSA; |
| 62 | const MemorySSA *MSSA; |
| 63 | |
| 64 | public: |
| 65 | MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {} |
| 66 | |
| 67 | virtual void emitBasicBlockStartAnnot(const BasicBlock *BB, |
| 68 | formatted_raw_ostream &OS) { |
| 69 | if (MemoryAccess *MA = MSSA->getMemoryAccess(BB)) |
| 70 | OS << "; " << *MA << "\n"; |
| 71 | } |
| 72 | |
| 73 | virtual void emitInstructionAnnot(const Instruction *I, |
| 74 | formatted_raw_ostream &OS) { |
| 75 | if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) |
| 76 | OS << "; " << *MA << "\n"; |
| 77 | } |
| 78 | }; |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 79 | |
| 80 | /// \brief A MemorySSAWalker that does AA walks and caching of lookups to |
| 81 | /// disambiguate accesses. |
| 82 | /// |
| 83 | /// FIXME: The current implementation of this can take quadratic space in rare |
| 84 | /// cases. This can be fixed, but it is something to note until it is fixed. |
| 85 | /// |
| 86 | /// In order to trigger this behavior, you need to store to N distinct locations |
| 87 | /// (that AA can prove don't alias), perform M stores to other memory |
| 88 | /// locations that AA can prove don't alias any of the initial N locations, and |
| 89 | /// then load from all of the N locations. In this case, we insert M cache |
| 90 | /// entries for each of the N loads. |
| 91 | /// |
| 92 | /// For example: |
| 93 | /// define i32 @foo() { |
| 94 | /// %a = alloca i32, align 4 |
| 95 | /// %b = alloca i32, align 4 |
| 96 | /// store i32 0, i32* %a, align 4 |
| 97 | /// store i32 0, i32* %b, align 4 |
| 98 | /// |
| 99 | /// ; Insert M stores to other memory that doesn't alias %a or %b here |
| 100 | /// |
| 101 | /// %c = load i32, i32* %a, align 4 ; Caches M entries in |
| 102 | /// ; CachedUpwardsClobberingAccess for the |
| 103 | /// ; MemoryLocation %a |
| 104 | /// %d = load i32, i32* %b, align 4 ; Caches M entries in |
| 105 | /// ; CachedUpwardsClobberingAccess for the |
| 106 | /// ; MemoryLocation %b |
| 107 | /// |
| 108 | /// ; For completeness' sake, loading %a or %b again would not cache *another* |
| 109 | /// ; M entries. |
| 110 | /// %r = add i32 %c, %d |
| 111 | /// ret i32 %r |
| 112 | /// } |
| 113 | class MemorySSA::CachingWalker final : public MemorySSAWalker { |
| 114 | public: |
| 115 | CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *); |
| 116 | ~CachingWalker() override; |
| 117 | |
| 118 | MemoryAccess *getClobberingMemoryAccess(const Instruction *) override; |
| 119 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
| 120 | MemoryLocation &) override; |
| 121 | void invalidateInfo(MemoryAccess *) override; |
| 122 | |
| 123 | protected: |
| 124 | struct UpwardsMemoryQuery; |
| 125 | MemoryAccess *doCacheLookup(const MemoryAccess *, const UpwardsMemoryQuery &, |
| 126 | const MemoryLocation &); |
| 127 | |
| 128 | void doCacheInsert(const MemoryAccess *, MemoryAccess *, |
| 129 | const UpwardsMemoryQuery &, const MemoryLocation &); |
| 130 | |
| 131 | void doCacheRemove(const MemoryAccess *, const UpwardsMemoryQuery &, |
| 132 | const MemoryLocation &); |
| 133 | |
| 134 | private: |
| 135 | MemoryAccessPair UpwardsDFSWalk(MemoryAccess *, const MemoryLocation &, |
| 136 | UpwardsMemoryQuery &, bool); |
| 137 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &); |
| 138 | bool instructionClobbersQuery(const MemoryDef *, UpwardsMemoryQuery &, |
| 139 | const MemoryLocation &Loc) const; |
| 140 | void verifyRemoved(MemoryAccess *); |
| 141 | SmallDenseMap<ConstMemoryAccessPair, MemoryAccess *> |
| 142 | CachedUpwardsClobberingAccess; |
| 143 | DenseMap<const MemoryAccess *, MemoryAccess *> CachedUpwardsClobberingCall; |
| 144 | AliasAnalysis *AA; |
| 145 | DominatorTree *DT; |
| 146 | }; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 147 | } |
| 148 | |
| 149 | namespace { |
| 150 | struct RenamePassData { |
| 151 | DomTreeNode *DTN; |
| 152 | DomTreeNode::const_iterator ChildIt; |
| 153 | MemoryAccess *IncomingVal; |
| 154 | |
| 155 | RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It, |
| 156 | MemoryAccess *M) |
| 157 | : DTN(D), ChildIt(It), IncomingVal(M) {} |
| 158 | void swap(RenamePassData &RHS) { |
| 159 | std::swap(DTN, RHS.DTN); |
| 160 | std::swap(ChildIt, RHS.ChildIt); |
| 161 | std::swap(IncomingVal, RHS.IncomingVal); |
| 162 | } |
| 163 | }; |
| 164 | } |
| 165 | |
| 166 | namespace llvm { |
| 167 | /// \brief Rename a single basic block into MemorySSA form. |
| 168 | /// Uses the standard SSA renaming algorithm. |
| 169 | /// \returns The new incoming value. |
| 170 | MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, |
| 171 | MemoryAccess *IncomingVal) { |
| 172 | auto It = PerBlockAccesses.find(BB); |
| 173 | // Skip most processing if the list is empty. |
| 174 | if (It != PerBlockAccesses.end()) { |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 175 | AccessList *Accesses = It->second.get(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 176 | for (MemoryAccess &L : *Accesses) { |
| 177 | switch (L.getValueID()) { |
| 178 | case Value::MemoryUseVal: |
| 179 | cast<MemoryUse>(&L)->setDefiningAccess(IncomingVal); |
| 180 | break; |
| 181 | case Value::MemoryDefVal: |
| 182 | // We can't legally optimize defs, because we only allow single |
| 183 | // memory phis/uses on operations, and if we optimize these, we can |
| 184 | // end up with multiple reaching defs. Uses do not have this |
| 185 | // problem, since they do not produce a value |
| 186 | cast<MemoryDef>(&L)->setDefiningAccess(IncomingVal); |
| 187 | IncomingVal = &L; |
| 188 | break; |
| 189 | case Value::MemoryPhiVal: |
| 190 | IncomingVal = &L; |
| 191 | break; |
| 192 | } |
| 193 | } |
| 194 | } |
| 195 | |
| 196 | // Pass through values to our successors |
| 197 | for (const BasicBlock *S : successors(BB)) { |
| 198 | auto It = PerBlockAccesses.find(S); |
| 199 | // Rename the phi nodes in our successor block |
| 200 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) |
| 201 | continue; |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 202 | AccessList *Accesses = It->second.get(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 203 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); |
| 204 | assert(std::find(succ_begin(BB), succ_end(BB), S) != succ_end(BB) && |
| 205 | "Must be at least one edge from Succ to BB!"); |
| 206 | Phi->addIncoming(IncomingVal, BB); |
| 207 | } |
| 208 | |
| 209 | return IncomingVal; |
| 210 | } |
| 211 | |
| 212 | /// \brief This is the standard SSA renaming algorithm. |
| 213 | /// |
| 214 | /// We walk the dominator tree in preorder, renaming accesses, and then filling |
| 215 | /// in phi nodes in our successors. |
| 216 | void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal, |
| 217 | SmallPtrSet<BasicBlock *, 16> &Visited) { |
| 218 | SmallVector<RenamePassData, 32> WorkStack; |
| 219 | IncomingVal = renameBlock(Root->getBlock(), IncomingVal); |
| 220 | WorkStack.push_back({Root, Root->begin(), IncomingVal}); |
| 221 | Visited.insert(Root->getBlock()); |
| 222 | |
| 223 | while (!WorkStack.empty()) { |
| 224 | DomTreeNode *Node = WorkStack.back().DTN; |
| 225 | DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt; |
| 226 | IncomingVal = WorkStack.back().IncomingVal; |
| 227 | |
| 228 | if (ChildIt == Node->end()) { |
| 229 | WorkStack.pop_back(); |
| 230 | } else { |
| 231 | DomTreeNode *Child = *ChildIt; |
| 232 | ++WorkStack.back().ChildIt; |
| 233 | BasicBlock *BB = Child->getBlock(); |
| 234 | Visited.insert(BB); |
| 235 | IncomingVal = renameBlock(BB, IncomingVal); |
| 236 | WorkStack.push_back({Child, Child->begin(), IncomingVal}); |
| 237 | } |
| 238 | } |
| 239 | } |
| 240 | |
| 241 | /// \brief Compute dominator levels, used by the phi insertion algorithm above. |
| 242 | void MemorySSA::computeDomLevels(DenseMap<DomTreeNode *, unsigned> &DomLevels) { |
| 243 | for (auto DFI = df_begin(DT->getRootNode()), DFE = df_end(DT->getRootNode()); |
| 244 | DFI != DFE; ++DFI) |
| 245 | DomLevels[*DFI] = DFI.getPathLength() - 1; |
| 246 | } |
| 247 | |
| 248 | /// \brief This handles unreachable block acccesses by deleting phi nodes in |
| 249 | /// unreachable blocks, and marking all other unreachable MemoryAccess's as |
| 250 | /// being uses of the live on entry definition. |
| 251 | void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) { |
| 252 | assert(!DT->isReachableFromEntry(BB) && |
| 253 | "Reachable block found while handling unreachable blocks"); |
| 254 | |
| 255 | auto It = PerBlockAccesses.find(BB); |
| 256 | if (It == PerBlockAccesses.end()) |
| 257 | return; |
| 258 | |
| 259 | auto &Accesses = It->second; |
| 260 | for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) { |
| 261 | auto Next = std::next(AI); |
| 262 | // If we have a phi, just remove it. We are going to replace all |
| 263 | // users with live on entry. |
| 264 | if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI)) |
| 265 | UseOrDef->setDefiningAccess(LiveOnEntryDef.get()); |
| 266 | else |
| 267 | Accesses->erase(AI); |
| 268 | AI = Next; |
| 269 | } |
| 270 | } |
| 271 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 272 | MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT) |
| 273 | : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr), |
| 274 | NextID(0) { |
| 275 | getWalker(); // Ensure MemorySSA has been built. |
| 276 | } |
| 277 | |
| 278 | MemorySSA::MemorySSA(MemorySSA &&MSSA) |
| 279 | : AA(MSSA.AA), DT(MSSA.DT), F(MSSA.F), |
| 280 | ValueToMemoryAccess(std::move(MSSA.ValueToMemoryAccess)), |
| 281 | PerBlockAccesses(std::move(MSSA.PerBlockAccesses)), |
| 282 | LiveOnEntryDef(std::move(MSSA.LiveOnEntryDef)), |
| 283 | Walker(std::move(MSSA.Walker)), NextID(MSSA.NextID) { |
| 284 | // Update the Walker MSSA pointer so it doesn't point to the moved-from MSSA |
| 285 | // object any more. |
| 286 | Walker->MSSA = this; |
| 287 | } |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 288 | |
| 289 | MemorySSA::~MemorySSA() { |
| 290 | // Drop all our references |
| 291 | for (const auto &Pair : PerBlockAccesses) |
| 292 | for (MemoryAccess &MA : *Pair.second) |
| 293 | MA.dropAllReferences(); |
| 294 | } |
| 295 | |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 296 | MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 297 | auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr)); |
| 298 | |
| 299 | if (Res.second) |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 300 | Res.first->second = make_unique<AccessList>(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 301 | return Res.first->second.get(); |
| 302 | } |
| 303 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 304 | MemorySSAWalker *MemorySSA::getWalker() { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 305 | if (Walker) |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 306 | return Walker.get(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 307 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 308 | Walker = make_unique<CachingWalker>(this, AA, DT); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 309 | |
| 310 | // We create an access to represent "live on entry", for things like |
| 311 | // arguments or users of globals, where the memory they use is defined before |
| 312 | // the beginning of the function. We do not actually insert it into the IR. |
| 313 | // We do not define a live on exit for the immediate uses, and thus our |
| 314 | // semantics do *not* imply that something with no immediate uses can simply |
| 315 | // be removed. |
| 316 | BasicBlock &StartingPoint = F.getEntryBlock(); |
| 317 | LiveOnEntryDef = make_unique<MemoryDef>(F.getContext(), nullptr, nullptr, |
| 318 | &StartingPoint, NextID++); |
| 319 | |
| 320 | // We maintain lists of memory accesses per-block, trading memory for time. We |
| 321 | // could just look up the memory access for every possible instruction in the |
| 322 | // stream. |
| 323 | SmallPtrSet<BasicBlock *, 32> DefiningBlocks; |
Daniel Berlin | 1b51a29 | 2016-02-07 01:52:19 +0000 | [diff] [blame] | 324 | SmallPtrSet<BasicBlock *, 32> DefUseBlocks; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 325 | // Go through each block, figure out where defs occur, and chain together all |
| 326 | // the accesses. |
| 327 | for (BasicBlock &B : F) { |
Daniel Berlin | 7898ca6 | 2016-02-07 01:52:15 +0000 | [diff] [blame] | 328 | bool InsertIntoDef = false; |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 329 | AccessList *Accesses = nullptr; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 330 | for (Instruction &I : B) { |
Peter Collingbourne | ffecb14 | 2016-05-26 01:19:17 +0000 | [diff] [blame] | 331 | MemoryUseOrDef *MUD = createNewAccess(&I); |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 332 | if (!MUD) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 333 | continue; |
George Burgess IV | 3887a41 | 2016-03-21 21:25:39 +0000 | [diff] [blame] | 334 | InsertIntoDef |= isa<MemoryDef>(MUD); |
Daniel Berlin | 1b51a29 | 2016-02-07 01:52:19 +0000 | [diff] [blame] | 335 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 336 | if (!Accesses) |
| 337 | Accesses = getOrCreateAccessList(&B); |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 338 | Accesses->push_back(MUD); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 339 | } |
Daniel Berlin | 7898ca6 | 2016-02-07 01:52:15 +0000 | [diff] [blame] | 340 | if (InsertIntoDef) |
| 341 | DefiningBlocks.insert(&B); |
George Burgess IV | 3887a41 | 2016-03-21 21:25:39 +0000 | [diff] [blame] | 342 | if (Accesses) |
Daniel Berlin | 1b51a29 | 2016-02-07 01:52:19 +0000 | [diff] [blame] | 343 | DefUseBlocks.insert(&B); |
| 344 | } |
| 345 | |
| 346 | // Compute live-in. |
| 347 | // Live in is normally defined as "all the blocks on the path from each def to |
| 348 | // each of it's uses". |
| 349 | // MemoryDef's are implicit uses of previous state, so they are also uses. |
| 350 | // This means we don't really have def-only instructions. The only |
| 351 | // MemoryDef's that are not really uses are those that are of the LiveOnEntry |
| 352 | // variable (because LiveOnEntry can reach anywhere, and every def is a |
| 353 | // must-kill of LiveOnEntry). |
| 354 | // In theory, you could precisely compute live-in by using alias-analysis to |
| 355 | // disambiguate defs and uses to see which really pair up with which. |
| 356 | // In practice, this would be really expensive and difficult. So we simply |
| 357 | // assume all defs are also uses that need to be kept live. |
| 358 | // Because of this, the end result of this live-in computation will be "the |
| 359 | // entire set of basic blocks that reach any use". |
| 360 | |
| 361 | SmallPtrSet<BasicBlock *, 32> LiveInBlocks; |
| 362 | SmallVector<BasicBlock *, 64> LiveInBlockWorklist(DefUseBlocks.begin(), |
| 363 | DefUseBlocks.end()); |
| 364 | // Now that we have a set of blocks where a value is live-in, recursively add |
| 365 | // predecessors until we find the full region the value is live. |
| 366 | while (!LiveInBlockWorklist.empty()) { |
| 367 | BasicBlock *BB = LiveInBlockWorklist.pop_back_val(); |
| 368 | |
| 369 | // The block really is live in here, insert it into the set. If already in |
| 370 | // the set, then it has already been processed. |
| 371 | if (!LiveInBlocks.insert(BB).second) |
| 372 | continue; |
| 373 | |
| 374 | // Since the value is live into BB, it is either defined in a predecessor or |
| 375 | // live into it to. |
| 376 | LiveInBlockWorklist.append(pred_begin(BB), pred_end(BB)); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 377 | } |
| 378 | |
| 379 | // Determine where our MemoryPhi's should go |
Daniel Berlin | 77fa84e | 2016-04-19 06:13:28 +0000 | [diff] [blame] | 380 | ForwardIDFCalculator IDFs(*DT); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 381 | IDFs.setDefiningBlocks(DefiningBlocks); |
Daniel Berlin | 1b51a29 | 2016-02-07 01:52:19 +0000 | [diff] [blame] | 382 | IDFs.setLiveInBlocks(LiveInBlocks); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 383 | SmallVector<BasicBlock *, 32> IDFBlocks; |
| 384 | IDFs.calculate(IDFBlocks); |
| 385 | |
| 386 | // Now place MemoryPhi nodes. |
| 387 | for (auto &BB : IDFBlocks) { |
| 388 | // Insert phi node |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 389 | AccessList *Accesses = getOrCreateAccessList(BB); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 390 | MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++); |
Daniel Berlin | f6c9ae9 | 2016-02-10 17:41:25 +0000 | [diff] [blame] | 391 | ValueToMemoryAccess.insert(std::make_pair(BB, Phi)); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 392 | // Phi's always are placed at the front of the block. |
| 393 | Accesses->push_front(Phi); |
| 394 | } |
| 395 | |
| 396 | // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get |
| 397 | // filled in with all blocks. |
| 398 | SmallPtrSet<BasicBlock *, 16> Visited; |
| 399 | renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited); |
| 400 | |
| 401 | // Now optimize the MemoryUse's defining access to point to the nearest |
| 402 | // dominating clobbering def. |
| 403 | // This ensures that MemoryUse's that are killed by the same store are |
| 404 | // immediate users of that store, one of the invariants we guarantee. |
| 405 | for (auto DomNode : depth_first(DT)) { |
| 406 | BasicBlock *BB = DomNode->getBlock(); |
| 407 | auto AI = PerBlockAccesses.find(BB); |
| 408 | if (AI == PerBlockAccesses.end()) |
| 409 | continue; |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 410 | AccessList *Accesses = AI->second.get(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 411 | for (auto &MA : *Accesses) { |
| 412 | if (auto *MU = dyn_cast<MemoryUse>(&MA)) { |
| 413 | Instruction *Inst = MU->getMemoryInst(); |
Daniel Berlin | 6412002 | 2016-03-02 21:16:28 +0000 | [diff] [blame] | 414 | MU->setDefiningAccess(Walker->getClobberingMemoryAccess(Inst)); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 415 | } |
| 416 | } |
| 417 | } |
| 418 | |
| 419 | // Mark the uses in unreachable blocks as live on entry, so that they go |
| 420 | // somewhere. |
| 421 | for (auto &BB : F) |
| 422 | if (!Visited.count(&BB)) |
| 423 | markUnreachableAsLiveOnEntry(&BB); |
| 424 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 425 | return Walker.get(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 426 | } |
| 427 | |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 428 | MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) { |
| 429 | assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB"); |
| 430 | AccessList *Accesses = getOrCreateAccessList(BB); |
| 431 | MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++); |
| 432 | ValueToMemoryAccess.insert(std::make_pair(BB, Phi)); |
| 433 | // Phi's always are placed at the front of the block. |
| 434 | Accesses->push_front(Phi); |
| 435 | return Phi; |
| 436 | } |
| 437 | |
| 438 | MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I, |
| 439 | MemoryAccess *Definition) { |
| 440 | assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI"); |
| 441 | MemoryUseOrDef *NewAccess = createNewAccess(I); |
| 442 | assert( |
| 443 | NewAccess != nullptr && |
| 444 | "Tried to create a memory access for a non-memory touching instruction"); |
| 445 | NewAccess->setDefiningAccess(Definition); |
| 446 | return NewAccess; |
| 447 | } |
| 448 | |
| 449 | MemoryAccess *MemorySSA::createMemoryAccessInBB(Instruction *I, |
| 450 | MemoryAccess *Definition, |
| 451 | const BasicBlock *BB, |
| 452 | InsertionPlace Point) { |
| 453 | MemoryUseOrDef *NewAccess = createDefinedAccess(I, Definition); |
| 454 | auto *Accesses = getOrCreateAccessList(BB); |
| 455 | if (Point == Beginning) { |
| 456 | // It goes after any phi nodes |
| 457 | auto AI = std::find_if( |
| 458 | Accesses->begin(), Accesses->end(), |
| 459 | [](const MemoryAccess &MA) { return !isa<MemoryPhi>(MA); }); |
| 460 | |
| 461 | Accesses->insert(AI, NewAccess); |
| 462 | } else { |
| 463 | Accesses->push_back(NewAccess); |
| 464 | } |
| 465 | |
| 466 | return NewAccess; |
| 467 | } |
| 468 | MemoryAccess *MemorySSA::createMemoryAccessBefore(Instruction *I, |
| 469 | MemoryAccess *Definition, |
| 470 | MemoryAccess *InsertPt) { |
| 471 | assert(I->getParent() == InsertPt->getBlock() && |
| 472 | "New and old access must be in the same block"); |
| 473 | MemoryUseOrDef *NewAccess = createDefinedAccess(I, Definition); |
| 474 | auto *Accesses = getOrCreateAccessList(InsertPt->getBlock()); |
| 475 | Accesses->insert(AccessList::iterator(InsertPt), NewAccess); |
| 476 | return NewAccess; |
| 477 | } |
| 478 | |
| 479 | MemoryAccess *MemorySSA::createMemoryAccessAfter(Instruction *I, |
| 480 | MemoryAccess *Definition, |
| 481 | MemoryAccess *InsertPt) { |
| 482 | assert(I->getParent() == InsertPt->getBlock() && |
| 483 | "New and old access must be in the same block"); |
| 484 | MemoryUseOrDef *NewAccess = createDefinedAccess(I, Definition); |
| 485 | auto *Accesses = getOrCreateAccessList(InsertPt->getBlock()); |
| 486 | Accesses->insertAfter(AccessList::iterator(InsertPt), NewAccess); |
| 487 | return NewAccess; |
| 488 | } |
| 489 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 490 | /// \brief Helper function to create new memory accesses |
Peter Collingbourne | ffecb14 | 2016-05-26 01:19:17 +0000 | [diff] [blame] | 491 | MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) { |
Peter Collingbourne | b9aa1f4 | 2016-05-26 04:58:46 +0000 | [diff] [blame] | 492 | // The assume intrinsic has a control dependency which we model by claiming |
| 493 | // that it writes arbitrarily. Ignore that fake memory dependency here. |
| 494 | // FIXME: Replace this special casing with a more accurate modelling of |
| 495 | // assume's control dependency. |
| 496 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) |
| 497 | if (II->getIntrinsicID() == Intrinsic::assume) |
| 498 | return nullptr; |
| 499 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 500 | // Find out what affect this instruction has on memory. |
| 501 | ModRefInfo ModRef = AA->getModRefInfo(I); |
| 502 | bool Def = bool(ModRef & MRI_Mod); |
| 503 | bool Use = bool(ModRef & MRI_Ref); |
| 504 | |
| 505 | // It's possible for an instruction to not modify memory at all. During |
| 506 | // construction, we ignore them. |
Peter Collingbourne | ffecb14 | 2016-05-26 01:19:17 +0000 | [diff] [blame] | 507 | if (!Def && !Use) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 508 | return nullptr; |
| 509 | |
| 510 | assert((Def || Use) && |
| 511 | "Trying to create a memory access with a non-memory instruction"); |
| 512 | |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 513 | MemoryUseOrDef *MUD; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 514 | if (Def) |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 515 | MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 516 | else |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 517 | MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent()); |
| 518 | ValueToMemoryAccess.insert(std::make_pair(I, MUD)); |
| 519 | return MUD; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 520 | } |
| 521 | |
| 522 | MemoryAccess *MemorySSA::findDominatingDef(BasicBlock *UseBlock, |
| 523 | enum InsertionPlace Where) { |
| 524 | // Handle the initial case |
| 525 | if (Where == Beginning) |
| 526 | // The only thing that could define us at the beginning is a phi node |
| 527 | if (MemoryPhi *Phi = getMemoryAccess(UseBlock)) |
| 528 | return Phi; |
| 529 | |
| 530 | DomTreeNode *CurrNode = DT->getNode(UseBlock); |
| 531 | // Need to be defined by our dominator |
| 532 | if (Where == Beginning) |
| 533 | CurrNode = CurrNode->getIDom(); |
| 534 | Where = End; |
| 535 | while (CurrNode) { |
| 536 | auto It = PerBlockAccesses.find(CurrNode->getBlock()); |
| 537 | if (It != PerBlockAccesses.end()) { |
| 538 | auto &Accesses = It->second; |
David Majnemer | d770877 | 2016-06-24 04:05:21 +0000 | [diff] [blame] | 539 | for (MemoryAccess &RA : reverse(*Accesses)) { |
| 540 | if (isa<MemoryDef>(RA) || isa<MemoryPhi>(RA)) |
| 541 | return &RA; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 542 | } |
| 543 | } |
| 544 | CurrNode = CurrNode->getIDom(); |
| 545 | } |
| 546 | return LiveOnEntryDef.get(); |
| 547 | } |
| 548 | |
| 549 | /// \brief Returns true if \p Replacer dominates \p Replacee . |
| 550 | bool MemorySSA::dominatesUse(const MemoryAccess *Replacer, |
| 551 | const MemoryAccess *Replacee) const { |
| 552 | if (isa<MemoryUseOrDef>(Replacee)) |
| 553 | return DT->dominates(Replacer->getBlock(), Replacee->getBlock()); |
| 554 | const auto *MP = cast<MemoryPhi>(Replacee); |
| 555 | // For a phi node, the use occurs in the predecessor block of the phi node. |
| 556 | // Since we may occur multiple times in the phi node, we have to check each |
| 557 | // operand to ensure Replacer dominates each operand where Replacee occurs. |
| 558 | for (const Use &Arg : MP->operands()) { |
George Burgess IV | b5a229f | 2016-02-02 23:15:26 +0000 | [diff] [blame] | 559 | if (Arg.get() != Replacee && |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 560 | !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg))) |
| 561 | return false; |
| 562 | } |
| 563 | return true; |
| 564 | } |
| 565 | |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 566 | /// \brief If all arguments of a MemoryPHI are defined by the same incoming |
| 567 | /// argument, return that argument. |
| 568 | static MemoryAccess *onlySingleValue(MemoryPhi *MP) { |
| 569 | MemoryAccess *MA = nullptr; |
| 570 | |
| 571 | for (auto &Arg : MP->operands()) { |
| 572 | if (!MA) |
| 573 | MA = cast<MemoryAccess>(Arg); |
| 574 | else if (MA != Arg) |
| 575 | return nullptr; |
| 576 | } |
| 577 | return MA; |
| 578 | } |
| 579 | |
| 580 | /// \brief Properly remove \p MA from all of MemorySSA's lookup tables. |
| 581 | /// |
| 582 | /// Because of the way the intrusive list and use lists work, it is important to |
| 583 | /// do removal in the right order. |
| 584 | void MemorySSA::removeFromLookups(MemoryAccess *MA) { |
| 585 | assert(MA->use_empty() && |
| 586 | "Trying to remove memory access that still has uses"); |
| 587 | if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) |
| 588 | MUD->setDefiningAccess(nullptr); |
| 589 | // Invalidate our walker's cache if necessary |
| 590 | if (!isa<MemoryUse>(MA)) |
| 591 | Walker->invalidateInfo(MA); |
| 592 | // The call below to erase will destroy MA, so we can't change the order we |
| 593 | // are doing things here |
| 594 | Value *MemoryInst; |
| 595 | if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) { |
| 596 | MemoryInst = MUD->getMemoryInst(); |
| 597 | } else { |
| 598 | MemoryInst = MA->getBlock(); |
| 599 | } |
| 600 | ValueToMemoryAccess.erase(MemoryInst); |
| 601 | |
George Burgess IV | e0e6e48 | 2016-03-02 02:35:04 +0000 | [diff] [blame] | 602 | auto AccessIt = PerBlockAccesses.find(MA->getBlock()); |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 603 | std::unique_ptr<AccessList> &Accesses = AccessIt->second; |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 604 | Accesses->erase(MA); |
George Burgess IV | e0e6e48 | 2016-03-02 02:35:04 +0000 | [diff] [blame] | 605 | if (Accesses->empty()) |
| 606 | PerBlockAccesses.erase(AccessIt); |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 607 | } |
| 608 | |
| 609 | void MemorySSA::removeMemoryAccess(MemoryAccess *MA) { |
| 610 | assert(!isLiveOnEntryDef(MA) && "Trying to remove the live on entry def"); |
| 611 | // We can only delete phi nodes if they have no uses, or we can replace all |
| 612 | // uses with a single definition. |
| 613 | MemoryAccess *NewDefTarget = nullptr; |
| 614 | if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) { |
| 615 | // Note that it is sufficient to know that all edges of the phi node have |
| 616 | // the same argument. If they do, by the definition of dominance frontiers |
| 617 | // (which we used to place this phi), that argument must dominate this phi, |
| 618 | // and thus, must dominate the phi's uses, and so we will not hit the assert |
| 619 | // below. |
| 620 | NewDefTarget = onlySingleValue(MP); |
| 621 | assert((NewDefTarget || MP->use_empty()) && |
| 622 | "We can't delete this memory phi"); |
| 623 | } else { |
| 624 | NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess(); |
| 625 | } |
| 626 | |
| 627 | // Re-point the uses at our defining access |
| 628 | if (!MA->use_empty()) |
| 629 | MA->replaceAllUsesWith(NewDefTarget); |
| 630 | |
| 631 | // The call below to erase will destroy MA, so we can't change the order we |
| 632 | // are doing things here |
| 633 | removeFromLookups(MA); |
| 634 | } |
| 635 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 636 | void MemorySSA::print(raw_ostream &OS) const { |
| 637 | MemorySSAAnnotatedWriter Writer(this); |
| 638 | F.print(OS, &Writer); |
| 639 | } |
| 640 | |
| 641 | void MemorySSA::dump() const { |
| 642 | MemorySSAAnnotatedWriter Writer(this); |
| 643 | F.print(dbgs(), &Writer); |
| 644 | } |
| 645 | |
Daniel Berlin | 932b4cb | 2016-02-10 17:39:43 +0000 | [diff] [blame] | 646 | void MemorySSA::verifyMemorySSA() const { |
| 647 | verifyDefUses(F); |
| 648 | verifyDomination(F); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 649 | verifyOrdering(F); |
| 650 | } |
| 651 | |
| 652 | /// \brief Verify that the order and existence of MemoryAccesses matches the |
| 653 | /// order and existence of memory affecting instructions. |
| 654 | void MemorySSA::verifyOrdering(Function &F) const { |
| 655 | // Walk all the blocks, comparing what the lookups think and what the access |
| 656 | // lists think, as well as the order in the blocks vs the order in the access |
| 657 | // lists. |
| 658 | SmallVector<MemoryAccess *, 32> ActualAccesses; |
| 659 | for (BasicBlock &B : F) { |
| 660 | const AccessList *AL = getBlockAccesses(&B); |
| 661 | MemoryAccess *Phi = getMemoryAccess(&B); |
| 662 | if (Phi) |
| 663 | ActualAccesses.push_back(Phi); |
| 664 | for (Instruction &I : B) { |
| 665 | MemoryAccess *MA = getMemoryAccess(&I); |
| 666 | assert((!MA || AL) && "We have memory affecting instructions " |
| 667 | "in this block but they are not in the " |
| 668 | "access list"); |
| 669 | if (MA) |
| 670 | ActualAccesses.push_back(MA); |
| 671 | } |
| 672 | // Either we hit the assert, really have no accesses, or we have both |
| 673 | // accesses and an access list |
| 674 | if (!AL) |
| 675 | continue; |
| 676 | assert(AL->size() == ActualAccesses.size() && |
| 677 | "We don't have the same number of accesses in the block as on the " |
| 678 | "access list"); |
| 679 | auto ALI = AL->begin(); |
| 680 | auto AAI = ActualAccesses.begin(); |
| 681 | while (ALI != AL->end() && AAI != ActualAccesses.end()) { |
| 682 | assert(&*ALI == *AAI && "Not the same accesses in the same order"); |
| 683 | ++ALI; |
| 684 | ++AAI; |
| 685 | } |
| 686 | ActualAccesses.clear(); |
| 687 | } |
Daniel Berlin | 932b4cb | 2016-02-10 17:39:43 +0000 | [diff] [blame] | 688 | } |
| 689 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 690 | /// \brief Verify the domination properties of MemorySSA by checking that each |
| 691 | /// definition dominates all of its uses. |
Daniel Berlin | 932b4cb | 2016-02-10 17:39:43 +0000 | [diff] [blame] | 692 | void MemorySSA::verifyDomination(Function &F) const { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 693 | for (BasicBlock &B : F) { |
| 694 | // Phi nodes are attached to basic blocks |
| 695 | if (MemoryPhi *MP = getMemoryAccess(&B)) { |
| 696 | for (User *U : MP->users()) { |
| 697 | BasicBlock *UseBlock; |
| 698 | // Phi operands are used on edges, we simulate the right domination by |
| 699 | // acting as if the use occurred at the end of the predecessor block. |
| 700 | if (MemoryPhi *P = dyn_cast<MemoryPhi>(U)) { |
| 701 | for (const auto &Arg : P->operands()) { |
| 702 | if (Arg == MP) { |
| 703 | UseBlock = P->getIncomingBlock(Arg); |
| 704 | break; |
| 705 | } |
| 706 | } |
| 707 | } else { |
| 708 | UseBlock = cast<MemoryAccess>(U)->getBlock(); |
| 709 | } |
George Burgess IV | 60adac4 | 2016-02-02 23:26:01 +0000 | [diff] [blame] | 710 | (void)UseBlock; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 711 | assert(DT->dominates(MP->getBlock(), UseBlock) && |
| 712 | "Memory PHI does not dominate it's uses"); |
| 713 | } |
| 714 | } |
| 715 | |
| 716 | for (Instruction &I : B) { |
| 717 | MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I)); |
| 718 | if (!MD) |
| 719 | continue; |
| 720 | |
Benjamin Kramer | 451f54c | 2016-02-22 13:11:58 +0000 | [diff] [blame] | 721 | for (User *U : MD->users()) { |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 722 | BasicBlock *UseBlock; |
| 723 | (void)UseBlock; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 724 | // Things are allowed to flow to phi nodes over their predecessor edge. |
| 725 | if (auto *P = dyn_cast<MemoryPhi>(U)) { |
| 726 | for (const auto &Arg : P->operands()) { |
| 727 | if (Arg == MD) { |
| 728 | UseBlock = P->getIncomingBlock(Arg); |
| 729 | break; |
| 730 | } |
| 731 | } |
| 732 | } else { |
| 733 | UseBlock = cast<MemoryAccess>(U)->getBlock(); |
| 734 | } |
| 735 | assert(DT->dominates(MD->getBlock(), UseBlock) && |
| 736 | "Memory Def does not dominate it's uses"); |
| 737 | } |
| 738 | } |
| 739 | } |
| 740 | } |
| 741 | |
| 742 | /// \brief Verify the def-use lists in MemorySSA, by verifying that \p Use |
| 743 | /// appears in the use list of \p Def. |
| 744 | /// |
| 745 | /// llvm_unreachable is used instead of asserts because this may be called in |
| 746 | /// a build without asserts. In that case, we don't want this to turn into a |
| 747 | /// nop. |
Daniel Berlin | 932b4cb | 2016-02-10 17:39:43 +0000 | [diff] [blame] | 748 | void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 749 | // The live on entry use may cause us to get a NULL def here |
| 750 | if (!Def) { |
| 751 | if (!isLiveOnEntryDef(Use)) |
| 752 | llvm_unreachable("Null def but use not point to live on entry def"); |
| 753 | } else if (std::find(Def->user_begin(), Def->user_end(), Use) == |
| 754 | Def->user_end()) { |
| 755 | llvm_unreachable("Did not find use in def's use list"); |
| 756 | } |
| 757 | } |
| 758 | |
| 759 | /// \brief Verify the immediate use information, by walking all the memory |
| 760 | /// accesses and verifying that, for each use, it appears in the |
| 761 | /// appropriate def's use list |
Daniel Berlin | 932b4cb | 2016-02-10 17:39:43 +0000 | [diff] [blame] | 762 | void MemorySSA::verifyDefUses(Function &F) const { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 763 | for (BasicBlock &B : F) { |
| 764 | // Phi nodes are attached to basic blocks |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 765 | if (MemoryPhi *Phi = getMemoryAccess(&B)) { |
David Majnemer | 580e754 | 2016-06-25 00:04:06 +0000 | [diff] [blame^] | 766 | assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance( |
| 767 | pred_begin(&B), pred_end(&B))) && |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 768 | "Incomplete MemoryPhi Node"); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 769 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) |
| 770 | verifyUseInDefs(Phi->getIncomingValue(I), Phi); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 771 | } |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 772 | |
| 773 | for (Instruction &I : B) { |
| 774 | if (MemoryAccess *MA = getMemoryAccess(&I)) { |
| 775 | assert(isa<MemoryUseOrDef>(MA) && |
| 776 | "Found a phi node not attached to a bb"); |
| 777 | verifyUseInDefs(cast<MemoryUseOrDef>(MA)->getDefiningAccess(), MA); |
| 778 | } |
| 779 | } |
| 780 | } |
| 781 | } |
| 782 | |
| 783 | MemoryAccess *MemorySSA::getMemoryAccess(const Value *I) const { |
Daniel Berlin | f6c9ae9 | 2016-02-10 17:41:25 +0000 | [diff] [blame] | 784 | return ValueToMemoryAccess.lookup(I); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 785 | } |
| 786 | |
| 787 | MemoryPhi *MemorySSA::getMemoryAccess(const BasicBlock *BB) const { |
| 788 | return cast_or_null<MemoryPhi>(getMemoryAccess((const Value *)BB)); |
| 789 | } |
| 790 | |
| 791 | /// \brief Determine, for two memory accesses in the same block, |
| 792 | /// whether \p Dominator dominates \p Dominatee. |
| 793 | /// \returns True if \p Dominator dominates \p Dominatee. |
| 794 | bool MemorySSA::locallyDominates(const MemoryAccess *Dominator, |
| 795 | const MemoryAccess *Dominatee) const { |
| 796 | |
| 797 | assert((Dominator->getBlock() == Dominatee->getBlock()) && |
| 798 | "Asking for local domination when accesses are in different blocks!"); |
Sebastian Pop | e1f60b1 | 2016-06-10 21:36:41 +0000 | [diff] [blame] | 799 | |
| 800 | // A node dominates itself. |
| 801 | if (Dominatee == Dominator) |
| 802 | return true; |
| 803 | |
| 804 | // When Dominatee is defined on function entry, it is not dominated by another |
| 805 | // memory access. |
| 806 | if (isLiveOnEntryDef(Dominatee)) |
| 807 | return false; |
| 808 | |
| 809 | // When Dominator is defined on function entry, it dominates the other memory |
| 810 | // access. |
| 811 | if (isLiveOnEntryDef(Dominator)) |
| 812 | return true; |
| 813 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 814 | // Get the access list for the block |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 815 | const AccessList *AccessList = getBlockAccesses(Dominator->getBlock()); |
| 816 | AccessList::const_reverse_iterator It(Dominator->getIterator()); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 817 | |
| 818 | // If we hit the beginning of the access list before we hit dominatee, we must |
| 819 | // dominate it |
| 820 | return std::none_of(It, AccessList->rend(), |
| 821 | [&](const MemoryAccess &MA) { return &MA == Dominatee; }); |
| 822 | } |
| 823 | |
| 824 | const static char LiveOnEntryStr[] = "liveOnEntry"; |
| 825 | |
| 826 | void MemoryDef::print(raw_ostream &OS) const { |
| 827 | MemoryAccess *UO = getDefiningAccess(); |
| 828 | |
| 829 | OS << getID() << " = MemoryDef("; |
| 830 | if (UO && UO->getID()) |
| 831 | OS << UO->getID(); |
| 832 | else |
| 833 | OS << LiveOnEntryStr; |
| 834 | OS << ')'; |
| 835 | } |
| 836 | |
| 837 | void MemoryPhi::print(raw_ostream &OS) const { |
| 838 | bool First = true; |
| 839 | OS << getID() << " = MemoryPhi("; |
| 840 | for (const auto &Op : operands()) { |
| 841 | BasicBlock *BB = getIncomingBlock(Op); |
| 842 | MemoryAccess *MA = cast<MemoryAccess>(Op); |
| 843 | if (!First) |
| 844 | OS << ','; |
| 845 | else |
| 846 | First = false; |
| 847 | |
| 848 | OS << '{'; |
| 849 | if (BB->hasName()) |
| 850 | OS << BB->getName(); |
| 851 | else |
| 852 | BB->printAsOperand(OS, false); |
| 853 | OS << ','; |
| 854 | if (unsigned ID = MA->getID()) |
| 855 | OS << ID; |
| 856 | else |
| 857 | OS << LiveOnEntryStr; |
| 858 | OS << '}'; |
| 859 | } |
| 860 | OS << ')'; |
| 861 | } |
| 862 | |
| 863 | MemoryAccess::~MemoryAccess() {} |
| 864 | |
| 865 | void MemoryUse::print(raw_ostream &OS) const { |
| 866 | MemoryAccess *UO = getDefiningAccess(); |
| 867 | OS << "MemoryUse("; |
| 868 | if (UO && UO->getID()) |
| 869 | OS << UO->getID(); |
| 870 | else |
| 871 | OS << LiveOnEntryStr; |
| 872 | OS << ')'; |
| 873 | } |
| 874 | |
| 875 | void MemoryAccess::dump() const { |
| 876 | print(dbgs()); |
| 877 | dbgs() << "\n"; |
| 878 | } |
| 879 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 880 | char MemorySSAAnalysis::PassID; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 881 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 882 | MemorySSA MemorySSAAnalysis::run(Function &F, AnalysisManager<Function> &AM) { |
| 883 | auto &DT = AM.getResult<DominatorTreeAnalysis>(F); |
| 884 | auto &AA = AM.getResult<AAManager>(F); |
| 885 | return MemorySSA(F, &AA, &DT); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 886 | } |
| 887 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 888 | PreservedAnalyses MemorySSAPrinterPass::run(Function &F, |
| 889 | FunctionAnalysisManager &AM) { |
| 890 | OS << "MemorySSA for function: " << F.getName() << "\n"; |
| 891 | AM.getResult<MemorySSAAnalysis>(F).print(OS); |
| 892 | |
| 893 | return PreservedAnalyses::all(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 894 | } |
| 895 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 896 | PreservedAnalyses MemorySSAVerifierPass::run(Function &F, |
| 897 | FunctionAnalysisManager &AM) { |
| 898 | AM.getResult<MemorySSAAnalysis>(F).verifyMemorySSA(); |
| 899 | |
| 900 | return PreservedAnalyses::all(); |
| 901 | } |
| 902 | |
| 903 | char MemorySSAWrapperPass::ID = 0; |
| 904 | |
| 905 | MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) { |
| 906 | initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry()); |
| 907 | } |
| 908 | |
| 909 | void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); } |
| 910 | |
| 911 | void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 912 | AU.setPreservesAll(); |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 913 | AU.addRequiredTransitive<DominatorTreeWrapperPass>(); |
| 914 | AU.addRequiredTransitive<AAResultsWrapperPass>(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 915 | } |
| 916 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 917 | bool MemorySSAWrapperPass::runOnFunction(Function &F) { |
| 918 | auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); |
| 919 | auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); |
| 920 | MSSA.reset(new MemorySSA(F, &AA, &DT)); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 921 | return false; |
| 922 | } |
| 923 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 924 | void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); } |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 925 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 926 | void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 927 | MSSA->print(OS); |
| 928 | } |
| 929 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 930 | MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {} |
| 931 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 932 | MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A, |
| 933 | DominatorTree *D) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 934 | : MemorySSAWalker(M), AA(A), DT(D) {} |
| 935 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 936 | MemorySSA::CachingWalker::~CachingWalker() {} |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 937 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 938 | struct MemorySSA::CachingWalker::UpwardsMemoryQuery { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 939 | // True if we saw a phi whose predecessor was a backedge |
| 940 | bool SawBackedgePhi; |
| 941 | // True if our original query started off as a call |
| 942 | bool IsCall; |
| 943 | // The pointer location we started the query with. This will be empty if |
| 944 | // IsCall is true. |
| 945 | MemoryLocation StartingLoc; |
| 946 | // This is the instruction we were querying about. |
| 947 | const Instruction *Inst; |
| 948 | // Set of visited Instructions for this query. |
| 949 | DenseSet<MemoryAccessPair> Visited; |
George Burgess IV | 49cad7d | 2016-03-30 03:12:08 +0000 | [diff] [blame] | 950 | // Vector of visited call accesses for this query. This is separated out |
| 951 | // because you can always cache and lookup the result of call queries (IE when |
| 952 | // IsCall == true) for every call in the chain. The calls have no AA location |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 953 | // associated with them with them, and thus, no context dependence. |
George Burgess IV | 49cad7d | 2016-03-30 03:12:08 +0000 | [diff] [blame] | 954 | SmallVector<const MemoryAccess *, 32> VisitedCalls; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 955 | // The MemoryAccess we actually got called with, used to test local domination |
| 956 | const MemoryAccess *OriginalAccess; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 957 | |
| 958 | UpwardsMemoryQuery() |
| 959 | : SawBackedgePhi(false), IsCall(false), Inst(nullptr), |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 960 | OriginalAccess(nullptr) {} |
| 961 | |
| 962 | UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) |
| 963 | : SawBackedgePhi(false), IsCall(ImmutableCallSite(Inst)), Inst(Inst), |
| 964 | OriginalAccess(Access) {} |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 965 | }; |
| 966 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 967 | void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) { |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 968 | |
| 969 | // TODO: We can do much better cache invalidation with differently stored |
| 970 | // caches. For now, for MemoryUses, we simply remove them |
| 971 | // from the cache, and kill the entire call/non-call cache for everything |
| 972 | // else. The problem is for phis or defs, currently we'd need to follow use |
| 973 | // chains down and invalidate anything below us in the chain that currently |
| 974 | // terminates at this access. |
| 975 | |
| 976 | // See if this is a MemoryUse, if so, just remove the cached info. MemoryUse |
| 977 | // is by definition never a barrier, so nothing in the cache could point to |
| 978 | // this use. In that case, we only need invalidate the info for the use |
| 979 | // itself. |
| 980 | |
| 981 | if (MemoryUse *MU = dyn_cast<MemoryUse>(MA)) { |
| 982 | UpwardsMemoryQuery Q; |
| 983 | Instruction *I = MU->getMemoryInst(); |
| 984 | Q.IsCall = bool(ImmutableCallSite(I)); |
| 985 | Q.Inst = I; |
| 986 | if (!Q.IsCall) |
| 987 | Q.StartingLoc = MemoryLocation::get(I); |
| 988 | doCacheRemove(MA, Q, Q.StartingLoc); |
Geoff Berry | 9fe26e6 | 2016-04-22 14:44:10 +0000 | [diff] [blame] | 989 | } else { |
| 990 | // If it is not a use, the best we can do right now is destroy the cache. |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 991 | CachedUpwardsClobberingCall.clear(); |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 992 | CachedUpwardsClobberingAccess.clear(); |
Geoff Berry | 9fe26e6 | 2016-04-22 14:44:10 +0000 | [diff] [blame] | 993 | } |
| 994 | |
Filipe Cabecinhas | 0da9937 | 2016-04-29 15:22:48 +0000 | [diff] [blame] | 995 | #ifdef EXPENSIVE_CHECKS |
Geoff Berry | 9fe26e6 | 2016-04-22 14:44:10 +0000 | [diff] [blame] | 996 | // Run this only when expensive checks are enabled. |
| 997 | verifyRemoved(MA); |
| 998 | #endif |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 999 | } |
| 1000 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1001 | void MemorySSA::CachingWalker::doCacheRemove(const MemoryAccess *M, |
| 1002 | const UpwardsMemoryQuery &Q, |
| 1003 | const MemoryLocation &Loc) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1004 | if (Q.IsCall) |
| 1005 | CachedUpwardsClobberingCall.erase(M); |
| 1006 | else |
| 1007 | CachedUpwardsClobberingAccess.erase({M, Loc}); |
| 1008 | } |
| 1009 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1010 | void MemorySSA::CachingWalker::doCacheInsert(const MemoryAccess *M, |
| 1011 | MemoryAccess *Result, |
| 1012 | const UpwardsMemoryQuery &Q, |
| 1013 | const MemoryLocation &Loc) { |
George Burgess IV | 1b1fef3 | 2016-04-29 18:42:55 +0000 | [diff] [blame] | 1014 | // This is fine for Phis, since there are times where we can't optimize them. |
| 1015 | // Making a def its own clobber is never correct, though. |
| 1016 | assert((Result != M || isa<MemoryPhi>(M)) && |
| 1017 | "Something can't clobber itself!"); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1018 | ++NumClobberCacheInserts; |
| 1019 | if (Q.IsCall) |
| 1020 | CachedUpwardsClobberingCall[M] = Result; |
| 1021 | else |
| 1022 | CachedUpwardsClobberingAccess[{M, Loc}] = Result; |
| 1023 | } |
| 1024 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1025 | MemoryAccess * |
| 1026 | MemorySSA::CachingWalker::doCacheLookup(const MemoryAccess *M, |
| 1027 | const UpwardsMemoryQuery &Q, |
| 1028 | const MemoryLocation &Loc) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1029 | ++NumClobberCacheLookups; |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1030 | MemoryAccess *Result; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1031 | |
| 1032 | if (Q.IsCall) |
| 1033 | Result = CachedUpwardsClobberingCall.lookup(M); |
| 1034 | else |
| 1035 | Result = CachedUpwardsClobberingAccess.lookup({M, Loc}); |
| 1036 | |
| 1037 | if (Result) |
| 1038 | ++NumClobberCacheHits; |
| 1039 | return Result; |
| 1040 | } |
| 1041 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1042 | bool MemorySSA::CachingWalker::instructionClobbersQuery( |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1043 | const MemoryDef *MD, UpwardsMemoryQuery &Q, |
| 1044 | const MemoryLocation &Loc) const { |
| 1045 | Instruction *DefMemoryInst = MD->getMemoryInst(); |
| 1046 | assert(DefMemoryInst && "Defining instruction not actually an instruction"); |
| 1047 | |
| 1048 | if (!Q.IsCall) |
| 1049 | return AA->getModRefInfo(DefMemoryInst, Loc) & MRI_Mod; |
| 1050 | |
| 1051 | // If this is a call, mark it for caching |
| 1052 | if (ImmutableCallSite(DefMemoryInst)) |
George Burgess IV | 49cad7d | 2016-03-30 03:12:08 +0000 | [diff] [blame] | 1053 | Q.VisitedCalls.push_back(MD); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1054 | ModRefInfo I = AA->getModRefInfo(DefMemoryInst, ImmutableCallSite(Q.Inst)); |
| 1055 | return I != MRI_NoModRef; |
| 1056 | } |
| 1057 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1058 | MemoryAccessPair MemorySSA::CachingWalker::UpwardsDFSWalk( |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1059 | MemoryAccess *StartingAccess, const MemoryLocation &Loc, |
| 1060 | UpwardsMemoryQuery &Q, bool FollowingBackedge) { |
| 1061 | MemoryAccess *ModifyingAccess = nullptr; |
| 1062 | |
| 1063 | auto DFI = df_begin(StartingAccess); |
| 1064 | for (auto DFE = df_end(StartingAccess); DFI != DFE;) { |
| 1065 | MemoryAccess *CurrAccess = *DFI; |
| 1066 | if (MSSA->isLiveOnEntryDef(CurrAccess)) |
| 1067 | return {CurrAccess, Loc}; |
George Burgess IV | 1b1fef3 | 2016-04-29 18:42:55 +0000 | [diff] [blame] | 1068 | // If this is a MemoryDef, check whether it clobbers our current query. This |
| 1069 | // needs to be done before consulting the cache, because the cache reports |
| 1070 | // the clobber for CurrAccess. If CurrAccess is a clobber for this query, |
| 1071 | // and we ask the cache for information first, then we might skip this |
| 1072 | // clobber, which is bad. |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1073 | if (auto *MD = dyn_cast<MemoryDef>(CurrAccess)) { |
| 1074 | // If we hit the top, stop following this path. |
| 1075 | // While we can do lookups, we can't sanely do inserts here unless we were |
| 1076 | // to track everything we saw along the way, since we don't know where we |
| 1077 | // will stop. |
| 1078 | if (instructionClobbersQuery(MD, Q, Loc)) { |
| 1079 | ModifyingAccess = CurrAccess; |
| 1080 | break; |
| 1081 | } |
| 1082 | } |
George Burgess IV | 1b1fef3 | 2016-04-29 18:42:55 +0000 | [diff] [blame] | 1083 | if (auto CacheResult = doCacheLookup(CurrAccess, Q, Loc)) |
| 1084 | return {CacheResult, Loc}; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1085 | |
| 1086 | // We need to know whether it is a phi so we can track backedges. |
| 1087 | // Otherwise, walk all upward defs. |
| 1088 | if (!isa<MemoryPhi>(CurrAccess)) { |
| 1089 | ++DFI; |
| 1090 | continue; |
| 1091 | } |
| 1092 | |
George Burgess IV | 0e48986 | 2016-03-23 18:31:55 +0000 | [diff] [blame] | 1093 | #ifndef NDEBUG |
| 1094 | // The loop below visits the phi's children for us. Because phis are the |
| 1095 | // only things with multiple edges, skipping the children should always lead |
| 1096 | // us to the end of the loop. |
| 1097 | // |
| 1098 | // Use a copy of DFI because skipChildren would kill our search stack, which |
| 1099 | // would make caching anything on the way back impossible. |
| 1100 | auto DFICopy = DFI; |
| 1101 | assert(DFICopy.skipChildren() == DFE && |
| 1102 | "Skipping phi's children doesn't end the DFS?"); |
| 1103 | #endif |
| 1104 | |
George Burgess IV | 82ee942 | 2016-03-30 00:26:26 +0000 | [diff] [blame] | 1105 | const MemoryAccessPair PHIPair(CurrAccess, Loc); |
| 1106 | |
| 1107 | // Don't try to optimize this phi again if we've already tried to do so. |
| 1108 | if (!Q.Visited.insert(PHIPair).second) { |
| 1109 | ModifyingAccess = CurrAccess; |
| 1110 | break; |
| 1111 | } |
| 1112 | |
George Burgess IV | 49cad7d | 2016-03-30 03:12:08 +0000 | [diff] [blame] | 1113 | std::size_t InitialVisitedCallSize = Q.VisitedCalls.size(); |
| 1114 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1115 | // Recurse on PHI nodes, since we need to change locations. |
| 1116 | // TODO: Allow graphtraits on pairs, which would turn this whole function |
| 1117 | // into a normal single depth first walk. |
| 1118 | MemoryAccess *FirstDef = nullptr; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1119 | for (auto MPI = upward_defs_begin(PHIPair), MPE = upward_defs_end(); |
| 1120 | MPI != MPE; ++MPI) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1121 | bool Backedge = |
| 1122 | !FollowingBackedge && |
| 1123 | DT->dominates(CurrAccess->getBlock(), MPI.getPhiArgBlock()); |
| 1124 | |
| 1125 | MemoryAccessPair CurrentPair = |
| 1126 | UpwardsDFSWalk(MPI->first, MPI->second, Q, Backedge); |
| 1127 | // All the phi arguments should reach the same point if we can bypass |
| 1128 | // this phi. The alternative is that they hit this phi node, which |
| 1129 | // means we can skip this argument. |
| 1130 | if (FirstDef && CurrentPair.first != PHIPair.first && |
| 1131 | CurrentPair.first != FirstDef) { |
| 1132 | ModifyingAccess = CurrAccess; |
| 1133 | break; |
| 1134 | } |
| 1135 | |
| 1136 | if (!FirstDef) |
| 1137 | FirstDef = CurrentPair.first; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1138 | } |
| 1139 | |
George Burgess IV | 0e48986 | 2016-03-23 18:31:55 +0000 | [diff] [blame] | 1140 | // If we exited the loop early, go with the result it gave us. |
| 1141 | if (!ModifyingAccess) { |
George Burgess IV | 82ee942 | 2016-03-30 00:26:26 +0000 | [diff] [blame] | 1142 | assert(FirstDef && "Found a Phi with no upward defs?"); |
| 1143 | ModifyingAccess = FirstDef; |
George Burgess IV | 49cad7d | 2016-03-30 03:12:08 +0000 | [diff] [blame] | 1144 | } else { |
| 1145 | // If we can't optimize this Phi, then we can't safely cache any of the |
| 1146 | // calls we visited when trying to optimize it. Wipe them out now. |
| 1147 | Q.VisitedCalls.resize(InitialVisitedCallSize); |
George Burgess IV | 0e48986 | 2016-03-23 18:31:55 +0000 | [diff] [blame] | 1148 | } |
| 1149 | break; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1150 | } |
| 1151 | |
| 1152 | if (!ModifyingAccess) |
| 1153 | return {MSSA->getLiveOnEntryDef(), Q.StartingLoc}; |
| 1154 | |
George Burgess IV | 0e48986 | 2016-03-23 18:31:55 +0000 | [diff] [blame] | 1155 | const BasicBlock *OriginalBlock = StartingAccess->getBlock(); |
George Burgess IV | 1b1fef3 | 2016-04-29 18:42:55 +0000 | [diff] [blame] | 1156 | assert(DFI.getPathLength() > 0 && "We dropped our path?"); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1157 | unsigned N = DFI.getPathLength(); |
George Burgess IV | 1b1fef3 | 2016-04-29 18:42:55 +0000 | [diff] [blame] | 1158 | // If we found a clobbering def, the last element in the path will be our |
| 1159 | // clobber, so we don't want to cache that to itself. OTOH, if we optimized a |
| 1160 | // phi, we can add the last thing in the path to the cache, since that won't |
| 1161 | // be the result. |
| 1162 | if (DFI.getPath(N - 1) == ModifyingAccess) |
| 1163 | --N; |
| 1164 | for (; N > 1; --N) { |
George Burgess IV | 0e48986 | 2016-03-23 18:31:55 +0000 | [diff] [blame] | 1165 | MemoryAccess *CacheAccess = DFI.getPath(N - 1); |
| 1166 | BasicBlock *CurrBlock = CacheAccess->getBlock(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1167 | if (!FollowingBackedge) |
George Burgess IV | 0e48986 | 2016-03-23 18:31:55 +0000 | [diff] [blame] | 1168 | doCacheInsert(CacheAccess, ModifyingAccess, Q, Loc); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1169 | if (DT->dominates(CurrBlock, OriginalBlock) && |
| 1170 | (CurrBlock != OriginalBlock || !FollowingBackedge || |
George Burgess IV | 0e48986 | 2016-03-23 18:31:55 +0000 | [diff] [blame] | 1171 | MSSA->locallyDominates(CacheAccess, StartingAccess))) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1172 | break; |
| 1173 | } |
| 1174 | |
| 1175 | // Cache everything else on the way back. The caller should cache |
George Burgess IV | 1b1fef3 | 2016-04-29 18:42:55 +0000 | [diff] [blame] | 1176 | // StartingAccess for us. |
| 1177 | for (; N > 1; --N) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1178 | MemoryAccess *CacheAccess = DFI.getPath(N - 1); |
| 1179 | doCacheInsert(CacheAccess, ModifyingAccess, Q, Loc); |
| 1180 | } |
| 1181 | assert(Q.Visited.size() < 1000 && "Visited too much"); |
| 1182 | |
| 1183 | return {ModifyingAccess, Loc}; |
| 1184 | } |
| 1185 | |
| 1186 | /// \brief Walk the use-def chains starting at \p MA and find |
| 1187 | /// the MemoryAccess that actually clobbers Loc. |
| 1188 | /// |
| 1189 | /// \returns our clobbering memory access |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1190 | MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess( |
| 1191 | MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1192 | return UpwardsDFSWalk(StartingAccess, Q.StartingLoc, Q, false).first; |
| 1193 | } |
| 1194 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1195 | MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess( |
| 1196 | MemoryAccess *StartingAccess, MemoryLocation &Loc) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1197 | if (isa<MemoryPhi>(StartingAccess)) |
| 1198 | return StartingAccess; |
| 1199 | |
| 1200 | auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess); |
| 1201 | if (MSSA->isLiveOnEntryDef(StartingUseOrDef)) |
| 1202 | return StartingUseOrDef; |
| 1203 | |
| 1204 | Instruction *I = StartingUseOrDef->getMemoryInst(); |
| 1205 | |
| 1206 | // Conservatively, fences are always clobbers, so don't perform the walk if we |
| 1207 | // hit a fence. |
| 1208 | if (isa<FenceInst>(I)) |
| 1209 | return StartingUseOrDef; |
| 1210 | |
| 1211 | UpwardsMemoryQuery Q; |
| 1212 | Q.OriginalAccess = StartingUseOrDef; |
| 1213 | Q.StartingLoc = Loc; |
| 1214 | Q.Inst = StartingUseOrDef->getMemoryInst(); |
| 1215 | Q.IsCall = false; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1216 | |
| 1217 | if (auto CacheResult = doCacheLookup(StartingUseOrDef, Q, Q.StartingLoc)) |
| 1218 | return CacheResult; |
| 1219 | |
| 1220 | // Unlike the other function, do not walk to the def of a def, because we are |
| 1221 | // handed something we already believe is the clobbering access. |
| 1222 | MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef) |
| 1223 | ? StartingUseOrDef->getDefiningAccess() |
| 1224 | : StartingUseOrDef; |
| 1225 | |
| 1226 | MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q); |
George Burgess IV | 1b1fef3 | 2016-04-29 18:42:55 +0000 | [diff] [blame] | 1227 | // Only cache this if it wouldn't make Clobber point to itself. |
| 1228 | if (Clobber != StartingAccess) |
| 1229 | doCacheInsert(Q.OriginalAccess, Clobber, Q, Q.StartingLoc); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1230 | DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); |
| 1231 | DEBUG(dbgs() << *StartingUseOrDef << "\n"); |
| 1232 | DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is "); |
| 1233 | DEBUG(dbgs() << *Clobber << "\n"); |
| 1234 | return Clobber; |
| 1235 | } |
| 1236 | |
| 1237 | MemoryAccess * |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1238 | MemorySSA::CachingWalker::getClobberingMemoryAccess(const Instruction *I) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1239 | // There should be no way to lookup an instruction and get a phi as the |
| 1240 | // access, since we only map BB's to PHI's. So, this must be a use or def. |
| 1241 | auto *StartingAccess = cast<MemoryUseOrDef>(MSSA->getMemoryAccess(I)); |
| 1242 | |
| 1243 | // We can't sanely do anything with a FenceInst, they conservatively |
| 1244 | // clobber all memory, and have no locations to get pointers from to |
| 1245 | // try to disambiguate |
| 1246 | if (isa<FenceInst>(I)) |
| 1247 | return StartingAccess; |
| 1248 | |
| 1249 | UpwardsMemoryQuery Q; |
| 1250 | Q.OriginalAccess = StartingAccess; |
| 1251 | Q.IsCall = bool(ImmutableCallSite(I)); |
| 1252 | if (!Q.IsCall) |
| 1253 | Q.StartingLoc = MemoryLocation::get(I); |
| 1254 | Q.Inst = I; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1255 | if (auto CacheResult = doCacheLookup(StartingAccess, Q, Q.StartingLoc)) |
| 1256 | return CacheResult; |
| 1257 | |
| 1258 | // Start with the thing we already think clobbers this location |
| 1259 | MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); |
| 1260 | |
| 1261 | // At this point, DefiningAccess may be the live on entry def. |
| 1262 | // If it is, we will not get a better result. |
| 1263 | if (MSSA->isLiveOnEntryDef(DefiningAccess)) |
| 1264 | return DefiningAccess; |
| 1265 | |
| 1266 | MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q); |
George Burgess IV | 1b1fef3 | 2016-04-29 18:42:55 +0000 | [diff] [blame] | 1267 | // DFS won't cache a result for DefiningAccess. So, if DefiningAccess isn't |
| 1268 | // our clobber, be sure that it gets a cache entry, too. |
| 1269 | if (Result != DefiningAccess) |
| 1270 | doCacheInsert(DefiningAccess, Result, Q, Q.StartingLoc); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1271 | doCacheInsert(Q.OriginalAccess, Result, Q, Q.StartingLoc); |
| 1272 | // TODO: When this implementation is more mature, we may want to figure out |
| 1273 | // what this additional caching buys us. It's most likely A Good Thing. |
| 1274 | if (Q.IsCall) |
| 1275 | for (const MemoryAccess *MA : Q.VisitedCalls) |
George Burgess IV | 1b1fef3 | 2016-04-29 18:42:55 +0000 | [diff] [blame] | 1276 | if (MA != Result) |
| 1277 | doCacheInsert(MA, Result, Q, Q.StartingLoc); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1278 | |
| 1279 | DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); |
| 1280 | DEBUG(dbgs() << *DefiningAccess << "\n"); |
| 1281 | DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is "); |
| 1282 | DEBUG(dbgs() << *Result << "\n"); |
| 1283 | |
| 1284 | return Result; |
| 1285 | } |
| 1286 | |
Geoff Berry | 9fe26e6 | 2016-04-22 14:44:10 +0000 | [diff] [blame] | 1287 | // Verify that MA doesn't exist in any of the caches. |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1288 | void MemorySSA::CachingWalker::verifyRemoved(MemoryAccess *MA) { |
Geoff Berry | 9fe26e6 | 2016-04-22 14:44:10 +0000 | [diff] [blame] | 1289 | #ifndef NDEBUG |
| 1290 | for (auto &P : CachedUpwardsClobberingAccess) |
| 1291 | assert(P.first.first != MA && P.second != MA && |
| 1292 | "Found removed MemoryAccess in cache."); |
| 1293 | for (auto &P : CachedUpwardsClobberingCall) |
| 1294 | assert(P.first != MA && P.second != MA && |
| 1295 | "Found removed MemoryAccess in cache."); |
| 1296 | #endif // !NDEBUG |
| 1297 | } |
| 1298 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1299 | MemoryAccess * |
| 1300 | DoNothingMemorySSAWalker::getClobberingMemoryAccess(const Instruction *I) { |
| 1301 | MemoryAccess *MA = MSSA->getMemoryAccess(I); |
| 1302 | if (auto *Use = dyn_cast<MemoryUseOrDef>(MA)) |
| 1303 | return Use->getDefiningAccess(); |
| 1304 | return MA; |
| 1305 | } |
| 1306 | |
| 1307 | MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess( |
| 1308 | MemoryAccess *StartingAccess, MemoryLocation &) { |
| 1309 | if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess)) |
| 1310 | return Use->getDefiningAccess(); |
| 1311 | return StartingAccess; |
| 1312 | } |
| 1313 | } |