George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1 | //===-- MemorySSA.cpp - Memory SSA Builder---------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file implements the MemorySSA class. |
| 11 | // |
| 12 | //===----------------------------------------------------------------===// |
Daniel Berlin | 16ed57c | 2016-06-27 18:22:27 +0000 | [diff] [blame] | 13 | #include "llvm/Transforms/Utils/MemorySSA.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 14 | #include "llvm/ADT/DenseMap.h" |
| 15 | #include "llvm/ADT/DenseSet.h" |
| 16 | #include "llvm/ADT/DepthFirstIterator.h" |
| 17 | #include "llvm/ADT/GraphTraits.h" |
| 18 | #include "llvm/ADT/PostOrderIterator.h" |
| 19 | #include "llvm/ADT/STLExtras.h" |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 20 | #include "llvm/ADT/SmallBitVector.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 21 | #include "llvm/ADT/SmallPtrSet.h" |
| 22 | #include "llvm/ADT/SmallSet.h" |
| 23 | #include "llvm/ADT/Statistic.h" |
| 24 | #include "llvm/Analysis/AliasAnalysis.h" |
| 25 | #include "llvm/Analysis/CFG.h" |
| 26 | #include "llvm/Analysis/GlobalsModRef.h" |
| 27 | #include "llvm/Analysis/IteratedDominanceFrontier.h" |
| 28 | #include "llvm/Analysis/MemoryLocation.h" |
| 29 | #include "llvm/Analysis/PHITransAddr.h" |
| 30 | #include "llvm/IR/AssemblyAnnotationWriter.h" |
| 31 | #include "llvm/IR/DataLayout.h" |
| 32 | #include "llvm/IR/Dominators.h" |
| 33 | #include "llvm/IR/GlobalVariable.h" |
| 34 | #include "llvm/IR/IRBuilder.h" |
| 35 | #include "llvm/IR/IntrinsicInst.h" |
| 36 | #include "llvm/IR/LLVMContext.h" |
| 37 | #include "llvm/IR/Metadata.h" |
| 38 | #include "llvm/IR/Module.h" |
| 39 | #include "llvm/IR/PatternMatch.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 40 | #include "llvm/Support/Debug.h" |
| 41 | #include "llvm/Support/FormattedStream.h" |
| 42 | #include "llvm/Transforms/Scalar.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 43 | #include <algorithm> |
| 44 | |
| 45 | #define DEBUG_TYPE "memoryssa" |
| 46 | using namespace llvm; |
| 47 | STATISTIC(NumClobberCacheLookups, "Number of Memory SSA version cache lookups"); |
| 48 | STATISTIC(NumClobberCacheHits, "Number of Memory SSA version cache hits"); |
| 49 | STATISTIC(NumClobberCacheInserts, "Number of MemorySSA version cache inserts"); |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 50 | |
Geoff Berry | efb0dd1 | 2016-06-14 21:19:40 +0000 | [diff] [blame] | 51 | INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 52 | true) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 53 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) |
| 54 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) |
Geoff Berry | efb0dd1 | 2016-06-14 21:19:40 +0000 | [diff] [blame] | 55 | INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, |
| 56 | true) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 57 | |
Chad Rosier | 232e29e | 2016-07-06 21:20:47 +0000 | [diff] [blame] | 58 | INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa", |
| 59 | "Memory SSA Printer", false, false) |
| 60 | INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) |
| 61 | INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa", |
| 62 | "Memory SSA Printer", false, false) |
| 63 | |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 64 | static cl::opt<unsigned> MaxCheckLimit( |
| 65 | "memssa-check-limit", cl::Hidden, cl::init(100), |
| 66 | cl::desc("The maximum number of stores/phis MemorySSA" |
| 67 | "will consider trying to walk past (default = 100)")); |
| 68 | |
Chad Rosier | 232e29e | 2016-07-06 21:20:47 +0000 | [diff] [blame] | 69 | static cl::opt<bool> |
| 70 | VerifyMemorySSA("verify-memoryssa", cl::init(false), cl::Hidden, |
| 71 | cl::desc("Verify MemorySSA in legacy printer pass.")); |
| 72 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 73 | namespace llvm { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 74 | /// \brief An assembly annotator class to print Memory SSA information in |
| 75 | /// comments. |
| 76 | class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter { |
| 77 | friend class MemorySSA; |
| 78 | const MemorySSA *MSSA; |
| 79 | |
| 80 | public: |
| 81 | MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {} |
| 82 | |
| 83 | virtual void emitBasicBlockStartAnnot(const BasicBlock *BB, |
| 84 | formatted_raw_ostream &OS) { |
| 85 | if (MemoryAccess *MA = MSSA->getMemoryAccess(BB)) |
| 86 | OS << "; " << *MA << "\n"; |
| 87 | } |
| 88 | |
| 89 | virtual void emitInstructionAnnot(const Instruction *I, |
| 90 | formatted_raw_ostream &OS) { |
| 91 | if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) |
| 92 | OS << "; " << *MA << "\n"; |
| 93 | } |
| 94 | }; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 95 | } |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 96 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 97 | namespace { |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 98 | /// Our current alias analysis API differentiates heavily between calls and |
| 99 | /// non-calls, and functions called on one usually assert on the other. |
| 100 | /// This class encapsulates the distinction to simplify other code that wants |
| 101 | /// "Memory affecting instructions and related data" to use as a key. |
| 102 | /// For example, this class is used as a densemap key in the use optimizer. |
| 103 | class MemoryLocOrCall { |
| 104 | public: |
| 105 | MemoryLocOrCall() : IsCall(false) {} |
| 106 | MemoryLocOrCall(MemoryUseOrDef *MUD) |
| 107 | : MemoryLocOrCall(MUD->getMemoryInst()) {} |
| 108 | |
| 109 | MemoryLocOrCall(Instruction *Inst) { |
| 110 | if (ImmutableCallSite(Inst)) { |
| 111 | IsCall = true; |
| 112 | CS = ImmutableCallSite(Inst); |
| 113 | } else { |
| 114 | IsCall = false; |
| 115 | // There is no such thing as a memorylocation for a fence inst, and it is |
| 116 | // unique in that regard. |
| 117 | if (!isa<FenceInst>(Inst)) |
| 118 | Loc = MemoryLocation::get(Inst); |
| 119 | } |
| 120 | } |
| 121 | |
| 122 | explicit MemoryLocOrCall(const MemoryLocation &Loc) |
| 123 | : IsCall(false), Loc(Loc) {} |
| 124 | |
| 125 | bool IsCall; |
| 126 | ImmutableCallSite getCS() const { |
| 127 | assert(IsCall); |
| 128 | return CS; |
| 129 | } |
| 130 | MemoryLocation getLoc() const { |
| 131 | assert(!IsCall); |
| 132 | return Loc; |
| 133 | } |
| 134 | |
| 135 | bool operator==(const MemoryLocOrCall &Other) const { |
| 136 | if (IsCall != Other.IsCall) |
| 137 | return false; |
| 138 | |
| 139 | if (IsCall) |
| 140 | return CS.getCalledValue() == Other.CS.getCalledValue(); |
| 141 | return Loc == Other.Loc; |
| 142 | } |
| 143 | |
| 144 | private: |
| 145 | // FIXME: MSVC 2013 does not properly implement C++11 union rules, once we |
| 146 | // require newer versions, this should be made an anonymous union again. |
| 147 | ImmutableCallSite CS; |
| 148 | MemoryLocation Loc; |
| 149 | }; |
| 150 | } |
| 151 | |
| 152 | namespace llvm { |
| 153 | template <> struct DenseMapInfo<MemoryLocOrCall> { |
| 154 | static inline MemoryLocOrCall getEmptyKey() { |
| 155 | return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey()); |
| 156 | } |
| 157 | static inline MemoryLocOrCall getTombstoneKey() { |
| 158 | return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey()); |
| 159 | } |
| 160 | static unsigned getHashValue(const MemoryLocOrCall &MLOC) { |
| 161 | if (MLOC.IsCall) |
| 162 | return hash_combine(MLOC.IsCall, |
| 163 | DenseMapInfo<const Value *>::getHashValue( |
| 164 | MLOC.getCS().getCalledValue())); |
| 165 | return hash_combine( |
| 166 | MLOC.IsCall, DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc())); |
| 167 | } |
| 168 | static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) { |
| 169 | return LHS == RHS; |
| 170 | } |
| 171 | }; |
| 172 | } |
George Burgess IV | 024f3d2 | 2016-08-03 19:57:02 +0000 | [diff] [blame] | 173 | |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 174 | namespace { |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 175 | struct UpwardsMemoryQuery { |
| 176 | // True if our original query started off as a call |
| 177 | bool IsCall; |
| 178 | // The pointer location we started the query with. This will be empty if |
| 179 | // IsCall is true. |
| 180 | MemoryLocation StartingLoc; |
| 181 | // This is the instruction we were querying about. |
| 182 | const Instruction *Inst; |
| 183 | // The MemoryAccess we actually got called with, used to test local domination |
| 184 | const MemoryAccess *OriginalAccess; |
| 185 | |
| 186 | UpwardsMemoryQuery() |
| 187 | : IsCall(false), Inst(nullptr), OriginalAccess(nullptr) {} |
| 188 | |
| 189 | UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) |
| 190 | : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) { |
| 191 | if (!IsCall) |
| 192 | StartingLoc = MemoryLocation::get(Inst); |
| 193 | } |
| 194 | }; |
| 195 | |
Daniel Berlin | df10119 | 2016-08-03 00:01:46 +0000 | [diff] [blame] | 196 | static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, |
| 197 | AliasAnalysis &AA) { |
| 198 | Instruction *Inst = MD->getMemoryInst(); |
| 199 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { |
| 200 | switch (II->getIntrinsicID()) { |
George Burgess IV | f767285 | 2016-08-03 19:59:11 +0000 | [diff] [blame] | 201 | case Intrinsic::lifetime_start: |
| 202 | case Intrinsic::lifetime_end: |
| 203 | return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc); |
Daniel Berlin | df10119 | 2016-08-03 00:01:46 +0000 | [diff] [blame] | 204 | default: |
| 205 | return false; |
| 206 | } |
| 207 | } |
| 208 | return false; |
| 209 | } |
| 210 | |
George Burgess IV | f767285 | 2016-08-03 19:59:11 +0000 | [diff] [blame] | 211 | enum class Reorderability { Always, IfNoAlias, Never }; |
George Burgess IV | 82e355c | 2016-08-03 19:39:54 +0000 | [diff] [blame] | 212 | |
| 213 | /// This does one-way checks to see if Use could theoretically be hoisted above |
| 214 | /// MayClobber. This will not check the other way around. |
| 215 | /// |
| 216 | /// This assumes that, for the purposes of MemorySSA, Use comes directly after |
| 217 | /// MayClobber, with no potentially clobbering operations in between them. |
| 218 | /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.) |
| 219 | static Reorderability getLoadReorderability(const LoadInst *Use, |
| 220 | const LoadInst *MayClobber) { |
| 221 | bool VolatileUse = Use->isVolatile(); |
| 222 | bool VolatileClobber = MayClobber->isVolatile(); |
| 223 | // Volatile operations may never be reordered with other volatile operations. |
| 224 | if (VolatileUse && VolatileClobber) |
| 225 | return Reorderability::Never; |
| 226 | |
| 227 | // The lang ref allows reordering of volatile and non-volatile operations. |
| 228 | // Whether an aliasing nonvolatile load and volatile load can be reordered, |
| 229 | // though, is ambiguous. Because it may not be best to exploit this ambiguity, |
| 230 | // we only allow volatile/non-volatile reordering if the volatile and |
| 231 | // non-volatile operations don't alias. |
| 232 | Reorderability Result = VolatileUse || VolatileClobber |
| 233 | ? Reorderability::IfNoAlias |
| 234 | : Reorderability::Always; |
| 235 | |
| 236 | // If a load is seq_cst, it cannot be moved above other loads. If its ordering |
| 237 | // is weaker, it can be moved above other loads. We just need to be sure that |
| 238 | // MayClobber isn't an acquire load, because loads can't be moved above |
| 239 | // acquire loads. |
| 240 | // |
| 241 | // Note that this explicitly *does* allow the free reordering of monotonic (or |
| 242 | // weaker) loads of the same address. |
| 243 | bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent; |
| 244 | bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(), |
| 245 | AtomicOrdering::Acquire); |
| 246 | if (SeqCstUse || MayClobberIsAcquire) |
| 247 | return Reorderability::Never; |
| 248 | return Result; |
| 249 | } |
| 250 | |
George Burgess IV | 024f3d2 | 2016-08-03 19:57:02 +0000 | [diff] [blame] | 251 | static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA, |
| 252 | const Instruction *I) { |
| 253 | // If the memory can't be changed, then loads of the memory can't be |
| 254 | // clobbered. |
| 255 | // |
| 256 | // FIXME: We should handle invariant groups, as well. It's a bit harder, |
| 257 | // because we need to pay close attention to invariant group barriers. |
| 258 | return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) || |
| 259 | AA.pointsToConstantMemory(I)); |
| 260 | } |
| 261 | |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 262 | static bool instructionClobbersQuery(MemoryDef *MD, |
| 263 | const MemoryLocation &UseLoc, |
| 264 | const Instruction *UseInst, |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 265 | AliasAnalysis &AA) { |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 266 | Instruction *DefInst = MD->getMemoryInst(); |
| 267 | assert(DefInst && "Defining instruction not actually an instruction"); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 268 | |
Daniel Berlin | df10119 | 2016-08-03 00:01:46 +0000 | [diff] [blame] | 269 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) { |
| 270 | // These intrinsics will show up as affecting memory, but they are just |
| 271 | // markers. |
| 272 | switch (II->getIntrinsicID()) { |
| 273 | case Intrinsic::lifetime_start: |
| 274 | case Intrinsic::lifetime_end: |
| 275 | case Intrinsic::invariant_start: |
| 276 | case Intrinsic::invariant_end: |
| 277 | case Intrinsic::assume: |
| 278 | return false; |
| 279 | default: |
| 280 | break; |
| 281 | } |
| 282 | } |
| 283 | |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 284 | ImmutableCallSite UseCS(UseInst); |
| 285 | if (UseCS) { |
| 286 | ModRefInfo I = AA.getModRefInfo(DefInst, UseCS); |
| 287 | return I != MRI_NoModRef; |
| 288 | } |
George Burgess IV | 82e355c | 2016-08-03 19:39:54 +0000 | [diff] [blame] | 289 | |
| 290 | if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) { |
| 291 | if (auto *UseLoad = dyn_cast<LoadInst>(UseInst)) { |
| 292 | switch (getLoadReorderability(UseLoad, DefLoad)) { |
| 293 | case Reorderability::Always: |
| 294 | return false; |
| 295 | case Reorderability::Never: |
| 296 | return true; |
| 297 | case Reorderability::IfNoAlias: |
| 298 | return !AA.isNoAlias(UseLoc, MemoryLocation::get(DefLoad)); |
| 299 | } |
| 300 | } |
| 301 | } |
| 302 | |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 303 | return AA.getModRefInfo(DefInst, UseLoc) & MRI_Mod; |
| 304 | } |
| 305 | |
| 306 | static bool instructionClobbersQuery(MemoryDef *MD, MemoryUse *MU, |
| 307 | const MemoryLocOrCall &UseMLOC, |
| 308 | AliasAnalysis &AA) { |
| 309 | // FIXME: This is a temporary hack to allow a single instructionClobbersQuery |
| 310 | // to exist while MemoryLocOrCall is pushed through places. |
| 311 | if (UseMLOC.IsCall) |
| 312 | return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(), |
| 313 | AA); |
| 314 | return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(), |
| 315 | AA); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 316 | } |
| 317 | |
| 318 | /// Cache for our caching MemorySSA walker. |
| 319 | class WalkerCache { |
| 320 | DenseMap<ConstMemoryAccessPair, MemoryAccess *> Accesses; |
| 321 | DenseMap<const MemoryAccess *, MemoryAccess *> Calls; |
| 322 | |
| 323 | public: |
| 324 | MemoryAccess *lookup(const MemoryAccess *MA, const MemoryLocation &Loc, |
| 325 | bool IsCall) const { |
| 326 | ++NumClobberCacheLookups; |
| 327 | MemoryAccess *R = IsCall ? Calls.lookup(MA) : Accesses.lookup({MA, Loc}); |
| 328 | if (R) |
| 329 | ++NumClobberCacheHits; |
| 330 | return R; |
| 331 | } |
| 332 | |
| 333 | bool insert(const MemoryAccess *MA, MemoryAccess *To, |
| 334 | const MemoryLocation &Loc, bool IsCall) { |
| 335 | // This is fine for Phis, since there are times where we can't optimize |
| 336 | // them. Making a def its own clobber is never correct, though. |
| 337 | assert((MA != To || isa<MemoryPhi>(MA)) && |
| 338 | "Something can't clobber itself!"); |
| 339 | |
| 340 | ++NumClobberCacheInserts; |
| 341 | bool Inserted; |
| 342 | if (IsCall) |
| 343 | Inserted = Calls.insert({MA, To}).second; |
| 344 | else |
| 345 | Inserted = Accesses.insert({{MA, Loc}, To}).second; |
| 346 | |
| 347 | return Inserted; |
| 348 | } |
| 349 | |
| 350 | bool remove(const MemoryAccess *MA, const MemoryLocation &Loc, bool IsCall) { |
| 351 | return IsCall ? Calls.erase(MA) : Accesses.erase({MA, Loc}); |
| 352 | } |
| 353 | |
| 354 | void clear() { |
| 355 | Accesses.clear(); |
| 356 | Calls.clear(); |
| 357 | } |
| 358 | |
| 359 | bool contains(const MemoryAccess *MA) const { |
| 360 | for (auto &P : Accesses) |
| 361 | if (P.first.first == MA || P.second == MA) |
| 362 | return true; |
| 363 | for (auto &P : Calls) |
| 364 | if (P.first == MA || P.second == MA) |
| 365 | return true; |
| 366 | return false; |
| 367 | } |
| 368 | }; |
| 369 | |
| 370 | /// Walks the defining uses of MemoryDefs. Stops after we hit something that has |
| 371 | /// no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when comparing |
| 372 | /// against a null def_chain_iterator, this will compare equal only after |
| 373 | /// walking said Phi/liveOnEntry. |
| 374 | struct def_chain_iterator |
| 375 | : public iterator_facade_base<def_chain_iterator, std::forward_iterator_tag, |
| 376 | MemoryAccess *> { |
| 377 | def_chain_iterator() : MA(nullptr) {} |
| 378 | def_chain_iterator(MemoryAccess *MA) : MA(MA) {} |
| 379 | |
| 380 | MemoryAccess *operator*() const { return MA; } |
| 381 | |
| 382 | def_chain_iterator &operator++() { |
| 383 | // N.B. liveOnEntry has a null defining access. |
| 384 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) |
| 385 | MA = MUD->getDefiningAccess(); |
| 386 | else |
| 387 | MA = nullptr; |
| 388 | return *this; |
| 389 | } |
| 390 | |
| 391 | bool operator==(const def_chain_iterator &O) const { return MA == O.MA; } |
| 392 | |
| 393 | private: |
| 394 | MemoryAccess *MA; |
| 395 | }; |
| 396 | |
| 397 | static iterator_range<def_chain_iterator> |
| 398 | def_chain(MemoryAccess *MA, MemoryAccess *UpTo = nullptr) { |
| 399 | #ifdef EXPENSIVE_CHECKS |
| 400 | assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator()) && |
| 401 | "UpTo isn't in the def chain!"); |
| 402 | #endif |
| 403 | return make_range(def_chain_iterator(MA), def_chain_iterator(UpTo)); |
| 404 | } |
| 405 | |
| 406 | /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing |
| 407 | /// inbetween `Start` and `ClobberAt` can clobbers `Start`. |
| 408 | /// |
| 409 | /// This is meant to be as simple and self-contained as possible. Because it |
| 410 | /// uses no cache, etc., it can be relatively expensive. |
| 411 | /// |
| 412 | /// \param Start The MemoryAccess that we want to walk from. |
| 413 | /// \param ClobberAt A clobber for Start. |
| 414 | /// \param StartLoc The MemoryLocation for Start. |
| 415 | /// \param MSSA The MemorySSA isntance that Start and ClobberAt belong to. |
| 416 | /// \param Query The UpwardsMemoryQuery we used for our search. |
| 417 | /// \param AA The AliasAnalysis we used for our search. |
| 418 | static void LLVM_ATTRIBUTE_UNUSED |
| 419 | checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt, |
| 420 | const MemoryLocation &StartLoc, const MemorySSA &MSSA, |
| 421 | const UpwardsMemoryQuery &Query, AliasAnalysis &AA) { |
| 422 | assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?"); |
| 423 | |
| 424 | if (MSSA.isLiveOnEntryDef(Start)) { |
| 425 | assert(MSSA.isLiveOnEntryDef(ClobberAt) && |
| 426 | "liveOnEntry must clobber itself"); |
| 427 | return; |
| 428 | } |
| 429 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 430 | bool FoundClobber = false; |
| 431 | DenseSet<MemoryAccessPair> VisitedPhis; |
| 432 | SmallVector<MemoryAccessPair, 8> Worklist; |
| 433 | Worklist.emplace_back(Start, StartLoc); |
| 434 | // Walk all paths from Start to ClobberAt, while looking for clobbers. If one |
| 435 | // is found, complain. |
| 436 | while (!Worklist.empty()) { |
| 437 | MemoryAccessPair MAP = Worklist.pop_back_val(); |
| 438 | // All we care about is that nothing from Start to ClobberAt clobbers Start. |
| 439 | // We learn nothing from revisiting nodes. |
| 440 | if (!VisitedPhis.insert(MAP).second) |
| 441 | continue; |
| 442 | |
| 443 | for (MemoryAccess *MA : def_chain(MAP.first)) { |
| 444 | if (MA == ClobberAt) { |
| 445 | if (auto *MD = dyn_cast<MemoryDef>(MA)) { |
| 446 | // instructionClobbersQuery isn't essentially free, so don't use `|=`, |
| 447 | // since it won't let us short-circuit. |
| 448 | // |
| 449 | // Also, note that this can't be hoisted out of the `Worklist` loop, |
| 450 | // since MD may only act as a clobber for 1 of N MemoryLocations. |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 451 | FoundClobber = |
| 452 | FoundClobber || MSSA.isLiveOnEntryDef(MD) || |
| 453 | instructionClobbersQuery(MD, MAP.second, Query.Inst, AA); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 454 | } |
| 455 | break; |
| 456 | } |
| 457 | |
| 458 | // We should never hit liveOnEntry, unless it's the clobber. |
| 459 | assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?"); |
| 460 | |
| 461 | if (auto *MD = dyn_cast<MemoryDef>(MA)) { |
| 462 | (void)MD; |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 463 | assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) && |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 464 | "Found clobber before reaching ClobberAt!"); |
| 465 | continue; |
| 466 | } |
| 467 | |
| 468 | assert(isa<MemoryPhi>(MA)); |
| 469 | Worklist.append(upward_defs_begin({MA, MAP.second}), upward_defs_end()); |
| 470 | } |
| 471 | } |
| 472 | |
| 473 | // If ClobberAt is a MemoryPhi, we can assume something above it acted as a |
| 474 | // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point. |
| 475 | assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) && |
| 476 | "ClobberAt never acted as a clobber"); |
| 477 | } |
| 478 | |
| 479 | /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up |
| 480 | /// in one class. |
| 481 | class ClobberWalker { |
| 482 | /// Save a few bytes by using unsigned instead of size_t. |
| 483 | using ListIndex = unsigned; |
| 484 | |
| 485 | /// Represents a span of contiguous MemoryDefs, potentially ending in a |
| 486 | /// MemoryPhi. |
| 487 | struct DefPath { |
| 488 | MemoryLocation Loc; |
| 489 | // Note that, because we always walk in reverse, Last will always dominate |
| 490 | // First. Also note that First and Last are inclusive. |
| 491 | MemoryAccess *First; |
| 492 | MemoryAccess *Last; |
| 493 | // N.B. Blocker is currently basically unused. The goal is to use it to make |
| 494 | // cache invalidation better, but we're not there yet. |
| 495 | MemoryAccess *Blocker; |
| 496 | Optional<ListIndex> Previous; |
| 497 | |
| 498 | DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last, |
| 499 | Optional<ListIndex> Previous) |
| 500 | : Loc(Loc), First(First), Last(Last), Previous(Previous) {} |
| 501 | |
| 502 | DefPath(const MemoryLocation &Loc, MemoryAccess *Init, |
| 503 | Optional<ListIndex> Previous) |
| 504 | : DefPath(Loc, Init, Init, Previous) {} |
| 505 | }; |
| 506 | |
| 507 | const MemorySSA &MSSA; |
| 508 | AliasAnalysis &AA; |
| 509 | DominatorTree &DT; |
| 510 | WalkerCache &WC; |
| 511 | UpwardsMemoryQuery *Query; |
| 512 | bool UseCache; |
| 513 | |
| 514 | // Phi optimization bookkeeping |
| 515 | SmallVector<DefPath, 32> Paths; |
| 516 | DenseSet<ConstMemoryAccessPair> VisitedPhis; |
| 517 | DenseMap<const BasicBlock *, MemoryAccess *> WalkTargetCache; |
| 518 | |
| 519 | void setUseCache(bool Use) { UseCache = Use; } |
| 520 | bool shouldIgnoreCache() const { |
| 521 | // UseCache will only be false when we're debugging, or when expensive |
| 522 | // checks are enabled. In either case, we don't care deeply about speed. |
| 523 | return LLVM_UNLIKELY(!UseCache); |
| 524 | } |
| 525 | |
| 526 | void addCacheEntry(const MemoryAccess *What, MemoryAccess *To, |
| 527 | const MemoryLocation &Loc) const { |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 528 | // EXPENSIVE_CHECKS because most of these queries are redundant. |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 529 | #ifdef EXPENSIVE_CHECKS |
| 530 | assert(MSSA.dominates(To, What)); |
| 531 | #endif |
| 532 | if (shouldIgnoreCache()) |
| 533 | return; |
| 534 | WC.insert(What, To, Loc, Query->IsCall); |
| 535 | } |
| 536 | |
| 537 | MemoryAccess *lookupCache(const MemoryAccess *MA, const MemoryLocation &Loc) { |
| 538 | return shouldIgnoreCache() ? nullptr : WC.lookup(MA, Loc, Query->IsCall); |
| 539 | } |
| 540 | |
| 541 | void cacheDefPath(const DefPath &DN, MemoryAccess *Target) const { |
| 542 | if (shouldIgnoreCache()) |
| 543 | return; |
| 544 | |
| 545 | for (MemoryAccess *MA : def_chain(DN.First, DN.Last)) |
| 546 | addCacheEntry(MA, Target, DN.Loc); |
| 547 | |
| 548 | // DefPaths only express the path we walked. So, DN.Last could either be a |
| 549 | // thing we want to cache, or not. |
| 550 | if (DN.Last != Target) |
| 551 | addCacheEntry(DN.Last, Target, DN.Loc); |
| 552 | } |
| 553 | |
| 554 | /// Find the nearest def or phi that `From` can legally be optimized to. |
| 555 | /// |
| 556 | /// FIXME: Deduplicate this with MSSA::findDominatingDef. Ideally, MSSA should |
| 557 | /// keep track of this information for us, and allow us O(1) lookups of this |
| 558 | /// info. |
| 559 | MemoryAccess *getWalkTarget(const MemoryPhi *From) { |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 560 | assert(From->getNumOperands() && "Phi with no operands?"); |
| 561 | |
| 562 | BasicBlock *BB = From->getBlock(); |
| 563 | auto At = WalkTargetCache.find(BB); |
| 564 | if (At != WalkTargetCache.end()) |
| 565 | return At->second; |
| 566 | |
| 567 | SmallVector<const BasicBlock *, 8> ToCache; |
| 568 | ToCache.push_back(BB); |
| 569 | |
| 570 | MemoryAccess *Result = MSSA.getLiveOnEntryDef(); |
| 571 | DomTreeNode *Node = DT.getNode(BB); |
| 572 | while ((Node = Node->getIDom())) { |
| 573 | auto At = WalkTargetCache.find(BB); |
| 574 | if (At != WalkTargetCache.end()) { |
| 575 | Result = At->second; |
| 576 | break; |
| 577 | } |
| 578 | |
| 579 | auto *Accesses = MSSA.getBlockAccesses(Node->getBlock()); |
| 580 | if (Accesses) { |
| 581 | auto Iter = find_if(reverse(*Accesses), [](const MemoryAccess &MA) { |
| 582 | return !isa<MemoryUse>(MA); |
| 583 | }); |
| 584 | if (Iter != Accesses->rend()) { |
| 585 | Result = const_cast<MemoryAccess *>(&*Iter); |
| 586 | break; |
| 587 | } |
| 588 | } |
| 589 | |
| 590 | ToCache.push_back(Node->getBlock()); |
| 591 | } |
| 592 | |
| 593 | for (const BasicBlock *BB : ToCache) |
| 594 | WalkTargetCache.insert({BB, Result}); |
| 595 | return Result; |
| 596 | } |
| 597 | |
| 598 | /// Result of calling walkToPhiOrClobber. |
| 599 | struct UpwardsWalkResult { |
| 600 | /// The "Result" of the walk. Either a clobber, the last thing we walked, or |
| 601 | /// both. |
| 602 | MemoryAccess *Result; |
| 603 | bool IsKnownClobber; |
| 604 | bool FromCache; |
| 605 | }; |
| 606 | |
| 607 | /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last. |
| 608 | /// This will update Desc.Last as it walks. It will (optionally) also stop at |
| 609 | /// StopAt. |
| 610 | /// |
| 611 | /// This does not test for whether StopAt is a clobber |
| 612 | UpwardsWalkResult walkToPhiOrClobber(DefPath &Desc, |
| 613 | MemoryAccess *StopAt = nullptr) { |
| 614 | assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world"); |
| 615 | |
| 616 | for (MemoryAccess *Current : def_chain(Desc.Last)) { |
| 617 | Desc.Last = Current; |
| 618 | if (Current == StopAt) |
| 619 | return {Current, false, false}; |
| 620 | |
| 621 | if (auto *MD = dyn_cast<MemoryDef>(Current)) |
| 622 | if (MSSA.isLiveOnEntryDef(MD) || |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 623 | instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA)) |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 624 | return {MD, true, false}; |
| 625 | |
| 626 | // Cache checks must be done last, because if Current is a clobber, the |
| 627 | // cache will contain the clobber for Current. |
| 628 | if (MemoryAccess *MA = lookupCache(Current, Desc.Loc)) |
| 629 | return {MA, true, true}; |
| 630 | } |
| 631 | |
| 632 | assert(isa<MemoryPhi>(Desc.Last) && |
| 633 | "Ended at a non-clobber that's not a phi?"); |
| 634 | return {Desc.Last, false, false}; |
| 635 | } |
| 636 | |
| 637 | void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches, |
| 638 | ListIndex PriorNode) { |
| 639 | auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}), |
| 640 | upward_defs_end()); |
| 641 | for (const MemoryAccessPair &P : UpwardDefs) { |
| 642 | PausedSearches.push_back(Paths.size()); |
| 643 | Paths.emplace_back(P.second, P.first, PriorNode); |
| 644 | } |
| 645 | } |
| 646 | |
| 647 | /// Represents a search that terminated after finding a clobber. This clobber |
| 648 | /// may or may not be present in the path of defs from LastNode..SearchStart, |
| 649 | /// since it may have been retrieved from cache. |
| 650 | struct TerminatedPath { |
| 651 | MemoryAccess *Clobber; |
| 652 | ListIndex LastNode; |
| 653 | }; |
| 654 | |
| 655 | /// Get an access that keeps us from optimizing to the given phi. |
| 656 | /// |
| 657 | /// PausedSearches is an array of indices into the Paths array. Its incoming |
| 658 | /// value is the indices of searches that stopped at the last phi optimization |
| 659 | /// target. It's left in an unspecified state. |
| 660 | /// |
| 661 | /// If this returns None, NewPaused is a vector of searches that terminated |
| 662 | /// at StopWhere. Otherwise, NewPaused is left in an unspecified state. |
George Burgess IV | 14633b5 | 2016-08-03 01:22:19 +0000 | [diff] [blame] | 663 | Optional<TerminatedPath> |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 664 | getBlockingAccess(MemoryAccess *StopWhere, |
| 665 | SmallVectorImpl<ListIndex> &PausedSearches, |
| 666 | SmallVectorImpl<ListIndex> &NewPaused, |
| 667 | SmallVectorImpl<TerminatedPath> &Terminated) { |
| 668 | assert(!PausedSearches.empty() && "No searches to continue?"); |
| 669 | |
| 670 | // BFS vs DFS really doesn't make a difference here, so just do a DFS with |
| 671 | // PausedSearches as our stack. |
| 672 | while (!PausedSearches.empty()) { |
| 673 | ListIndex PathIndex = PausedSearches.pop_back_val(); |
| 674 | DefPath &Node = Paths[PathIndex]; |
| 675 | |
| 676 | // If we've already visited this path with this MemoryLocation, we don't |
| 677 | // need to do so again. |
| 678 | // |
| 679 | // NOTE: That we just drop these paths on the ground makes caching |
| 680 | // behavior sporadic. e.g. given a diamond: |
| 681 | // A |
| 682 | // B C |
| 683 | // D |
| 684 | // |
| 685 | // ...If we walk D, B, A, C, we'll only cache the result of phi |
| 686 | // optimization for A, B, and D; C will be skipped because it dies here. |
| 687 | // This arguably isn't the worst thing ever, since: |
| 688 | // - We generally query things in a top-down order, so if we got below D |
| 689 | // without needing cache entries for {C, MemLoc}, then chances are |
| 690 | // that those cache entries would end up ultimately unused. |
| 691 | // - We still cache things for A, so C only needs to walk up a bit. |
| 692 | // If this behavior becomes problematic, we can fix without a ton of extra |
| 693 | // work. |
| 694 | if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) |
| 695 | continue; |
| 696 | |
| 697 | UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere); |
| 698 | if (Res.IsKnownClobber) { |
| 699 | assert(Res.Result != StopWhere || Res.FromCache); |
| 700 | // If this wasn't a cache hit, we hit a clobber when walking. That's a |
| 701 | // failure. |
George Burgess IV | 14633b5 | 2016-08-03 01:22:19 +0000 | [diff] [blame] | 702 | TerminatedPath Term{Res.Result, PathIndex}; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 703 | if (!Res.FromCache || !MSSA.dominates(Res.Result, StopWhere)) |
George Burgess IV | 14633b5 | 2016-08-03 01:22:19 +0000 | [diff] [blame] | 704 | return Term; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 705 | |
| 706 | // Otherwise, it's a valid thing to potentially optimize to. |
George Burgess IV | 14633b5 | 2016-08-03 01:22:19 +0000 | [diff] [blame] | 707 | Terminated.push_back(Term); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 708 | continue; |
| 709 | } |
| 710 | |
| 711 | if (Res.Result == StopWhere) { |
| 712 | // We've hit our target. Save this path off for if we want to continue |
| 713 | // walking. |
| 714 | NewPaused.push_back(PathIndex); |
| 715 | continue; |
| 716 | } |
| 717 | |
| 718 | assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber"); |
| 719 | addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex); |
| 720 | } |
| 721 | |
| 722 | return None; |
| 723 | } |
| 724 | |
| 725 | template <typename T, typename Walker> |
| 726 | struct generic_def_path_iterator |
| 727 | : public iterator_facade_base<generic_def_path_iterator<T, Walker>, |
| 728 | std::forward_iterator_tag, T *> { |
| 729 | generic_def_path_iterator() : W(nullptr), N(None) {} |
| 730 | generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {} |
| 731 | |
| 732 | T &operator*() const { return curNode(); } |
| 733 | |
| 734 | generic_def_path_iterator &operator++() { |
| 735 | N = curNode().Previous; |
| 736 | return *this; |
| 737 | } |
| 738 | |
| 739 | bool operator==(const generic_def_path_iterator &O) const { |
| 740 | if (N.hasValue() != O.N.hasValue()) |
| 741 | return false; |
| 742 | return !N.hasValue() || *N == *O.N; |
| 743 | } |
| 744 | |
| 745 | private: |
| 746 | T &curNode() const { return W->Paths[*N]; } |
| 747 | |
| 748 | Walker *W; |
| 749 | Optional<ListIndex> N; |
| 750 | }; |
| 751 | |
| 752 | using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>; |
| 753 | using const_def_path_iterator = |
| 754 | generic_def_path_iterator<const DefPath, const ClobberWalker>; |
| 755 | |
| 756 | iterator_range<def_path_iterator> def_path(ListIndex From) { |
| 757 | return make_range(def_path_iterator(this, From), def_path_iterator()); |
| 758 | } |
| 759 | |
| 760 | iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const { |
| 761 | return make_range(const_def_path_iterator(this, From), |
| 762 | const_def_path_iterator()); |
| 763 | } |
| 764 | |
| 765 | struct OptznResult { |
| 766 | /// The path that contains our result. |
| 767 | TerminatedPath PrimaryClobber; |
| 768 | /// The paths that we can legally cache back from, but that aren't |
| 769 | /// necessarily the result of the Phi optimization. |
| 770 | SmallVector<TerminatedPath, 4> OtherClobbers; |
| 771 | }; |
| 772 | |
| 773 | ListIndex defPathIndex(const DefPath &N) const { |
| 774 | // The assert looks nicer if we don't need to do &N |
| 775 | const DefPath *NP = &N; |
| 776 | assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && |
| 777 | "Out of bounds DefPath!"); |
| 778 | return NP - &Paths.front(); |
| 779 | } |
| 780 | |
| 781 | /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths |
| 782 | /// that act as legal clobbers. Note that this won't return *all* clobbers. |
| 783 | /// |
| 784 | /// Phi optimization algorithm tl;dr: |
| 785 | /// - Find the earliest def/phi, A, we can optimize to |
| 786 | /// - Find if all paths from the starting memory access ultimately reach A |
| 787 | /// - If not, optimization isn't possible. |
| 788 | /// - Otherwise, walk from A to another clobber or phi, A'. |
| 789 | /// - If A' is a def, we're done. |
| 790 | /// - If A' is a phi, try to optimize it. |
| 791 | /// |
| 792 | /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path |
| 793 | /// terminates when a MemoryAccess that clobbers said MemoryLocation is found. |
| 794 | OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start, |
| 795 | const MemoryLocation &Loc) { |
| 796 | assert(Paths.empty() && VisitedPhis.empty() && |
| 797 | "Reset the optimization state."); |
| 798 | |
| 799 | Paths.emplace_back(Loc, Start, Phi, None); |
| 800 | // Stores how many "valid" optimization nodes we had prior to calling |
| 801 | // addSearches/getBlockingAccess. Necessary for caching if we had a blocker. |
| 802 | auto PriorPathsSize = Paths.size(); |
| 803 | |
| 804 | SmallVector<ListIndex, 16> PausedSearches; |
| 805 | SmallVector<ListIndex, 8> NewPaused; |
| 806 | SmallVector<TerminatedPath, 4> TerminatedPaths; |
| 807 | |
| 808 | addSearches(Phi, PausedSearches, 0); |
| 809 | |
| 810 | // Moves the TerminatedPath with the "most dominated" Clobber to the end of |
| 811 | // Paths. |
| 812 | auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) { |
| 813 | assert(!Paths.empty() && "Need a path to move"); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 814 | auto Dom = Paths.begin(); |
| 815 | for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I) |
| 816 | if (!MSSA.dominates(I->Clobber, Dom->Clobber)) |
| 817 | Dom = I; |
| 818 | auto Last = Paths.end() - 1; |
| 819 | if (Last != Dom) |
| 820 | std::iter_swap(Last, Dom); |
| 821 | }; |
| 822 | |
| 823 | MemoryPhi *Current = Phi; |
| 824 | while (1) { |
| 825 | assert(!MSSA.isLiveOnEntryDef(Current) && |
| 826 | "liveOnEntry wasn't treated as a clobber?"); |
| 827 | |
| 828 | MemoryAccess *Target = getWalkTarget(Current); |
| 829 | // If a TerminatedPath doesn't dominate Target, then it wasn't a legal |
| 830 | // optimization for the prior phi. |
| 831 | assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) { |
| 832 | return MSSA.dominates(P.Clobber, Target); |
| 833 | })); |
| 834 | |
| 835 | // FIXME: This is broken, because the Blocker may be reported to be |
| 836 | // liveOnEntry, and we'll happily wait for that to disappear (read: never) |
| 837 | // For the moment, this is fine, since we do basically nothing with |
| 838 | // blocker info. |
George Burgess IV | 14633b5 | 2016-08-03 01:22:19 +0000 | [diff] [blame] | 839 | if (Optional<TerminatedPath> Blocker = getBlockingAccess( |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 840 | Target, PausedSearches, NewPaused, TerminatedPaths)) { |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 841 | // Cache our work on the blocking node, since we know that's correct. |
George Burgess IV | 14633b5 | 2016-08-03 01:22:19 +0000 | [diff] [blame] | 842 | cacheDefPath(Paths[Blocker->LastNode], Blocker->Clobber); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 843 | |
| 844 | // Find the node we started at. We can't search based on N->Last, since |
| 845 | // we may have gone around a loop with a different MemoryLocation. |
George Burgess IV | 14633b5 | 2016-08-03 01:22:19 +0000 | [diff] [blame] | 846 | auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) { |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 847 | return defPathIndex(N) < PriorPathsSize; |
| 848 | }); |
| 849 | assert(Iter != def_path_iterator()); |
| 850 | |
| 851 | DefPath &CurNode = *Iter; |
| 852 | assert(CurNode.Last == Current); |
George Burgess IV | 14633b5 | 2016-08-03 01:22:19 +0000 | [diff] [blame] | 853 | CurNode.Blocker = Blocker->Clobber; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 854 | |
| 855 | // Two things: |
| 856 | // A. We can't reliably cache all of NewPaused back. Consider a case |
| 857 | // where we have two paths in NewPaused; one of which can't optimize |
| 858 | // above this phi, whereas the other can. If we cache the second path |
| 859 | // back, we'll end up with suboptimal cache entries. We can handle |
| 860 | // cases like this a bit better when we either try to find all |
| 861 | // clobbers that block phi optimization, or when our cache starts |
| 862 | // supporting unfinished searches. |
| 863 | // B. We can't reliably cache TerminatedPaths back here without doing |
| 864 | // extra checks; consider a case like: |
| 865 | // T |
| 866 | // / \ |
| 867 | // D C |
| 868 | // \ / |
| 869 | // S |
| 870 | // Where T is our target, C is a node with a clobber on it, D is a |
| 871 | // diamond (with a clobber *only* on the left or right node, N), and |
| 872 | // S is our start. Say we walk to D, through the node opposite N |
| 873 | // (read: ignoring the clobber), and see a cache entry in the top |
| 874 | // node of D. That cache entry gets put into TerminatedPaths. We then |
| 875 | // walk up to C (N is later in our worklist), find the clobber, and |
| 876 | // quit. If we append TerminatedPaths to OtherClobbers, we'll cache |
| 877 | // the bottom part of D to the cached clobber, ignoring the clobber |
| 878 | // in N. Again, this problem goes away if we start tracking all |
| 879 | // blockers for a given phi optimization. |
| 880 | TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)}; |
| 881 | return {Result, {}}; |
| 882 | } |
| 883 | |
| 884 | // If there's nothing left to search, then all paths led to valid clobbers |
| 885 | // that we got from our cache; pick the nearest to the start, and allow |
| 886 | // the rest to be cached back. |
| 887 | if (NewPaused.empty()) { |
| 888 | MoveDominatedPathToEnd(TerminatedPaths); |
| 889 | TerminatedPath Result = TerminatedPaths.pop_back_val(); |
| 890 | return {Result, std::move(TerminatedPaths)}; |
| 891 | } |
| 892 | |
| 893 | MemoryAccess *DefChainEnd = nullptr; |
| 894 | SmallVector<TerminatedPath, 4> Clobbers; |
| 895 | for (ListIndex Paused : NewPaused) { |
| 896 | UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]); |
| 897 | if (WR.IsKnownClobber) |
| 898 | Clobbers.push_back({WR.Result, Paused}); |
| 899 | else |
| 900 | // Micro-opt: If we hit the end of the chain, save it. |
| 901 | DefChainEnd = WR.Result; |
| 902 | } |
| 903 | |
| 904 | if (!TerminatedPaths.empty()) { |
| 905 | // If we couldn't find the dominating phi/liveOnEntry in the above loop, |
| 906 | // do it now. |
| 907 | if (!DefChainEnd) |
| 908 | for (MemoryAccess *MA : def_chain(Target)) |
| 909 | DefChainEnd = MA; |
| 910 | |
| 911 | // If any of the terminated paths don't dominate the phi we'll try to |
| 912 | // optimize, we need to figure out what they are and quit. |
| 913 | const BasicBlock *ChainBB = DefChainEnd->getBlock(); |
| 914 | for (const TerminatedPath &TP : TerminatedPaths) { |
| 915 | // Because we know that DefChainEnd is as "high" as we can go, we |
| 916 | // don't need local dominance checks; BB dominance is sufficient. |
| 917 | if (DT.dominates(ChainBB, TP.Clobber->getBlock())) |
| 918 | Clobbers.push_back(TP); |
| 919 | } |
| 920 | } |
| 921 | |
| 922 | // If we have clobbers in the def chain, find the one closest to Current |
| 923 | // and quit. |
| 924 | if (!Clobbers.empty()) { |
| 925 | MoveDominatedPathToEnd(Clobbers); |
| 926 | TerminatedPath Result = Clobbers.pop_back_val(); |
| 927 | return {Result, std::move(Clobbers)}; |
| 928 | } |
| 929 | |
| 930 | assert(all_of(NewPaused, |
| 931 | [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })); |
| 932 | |
| 933 | // Because liveOnEntry is a clobber, this must be a phi. |
| 934 | auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd); |
| 935 | |
| 936 | PriorPathsSize = Paths.size(); |
| 937 | PausedSearches.clear(); |
| 938 | for (ListIndex I : NewPaused) |
| 939 | addSearches(DefChainPhi, PausedSearches, I); |
| 940 | NewPaused.clear(); |
| 941 | |
| 942 | Current = DefChainPhi; |
| 943 | } |
| 944 | } |
| 945 | |
| 946 | /// Caches everything in an OptznResult. |
| 947 | void cacheOptResult(const OptznResult &R) { |
| 948 | if (R.OtherClobbers.empty()) { |
| 949 | // If we're not going to be caching OtherClobbers, don't bother with |
| 950 | // marking visited/etc. |
| 951 | for (const DefPath &N : const_def_path(R.PrimaryClobber.LastNode)) |
| 952 | cacheDefPath(N, R.PrimaryClobber.Clobber); |
| 953 | return; |
| 954 | } |
| 955 | |
| 956 | // PrimaryClobber is our answer. If we can cache anything back, we need to |
| 957 | // stop caching when we visit PrimaryClobber. |
| 958 | SmallBitVector Visited(Paths.size()); |
| 959 | for (const DefPath &N : const_def_path(R.PrimaryClobber.LastNode)) { |
| 960 | Visited[defPathIndex(N)] = true; |
| 961 | cacheDefPath(N, R.PrimaryClobber.Clobber); |
| 962 | } |
| 963 | |
| 964 | for (const TerminatedPath &P : R.OtherClobbers) { |
| 965 | for (const DefPath &N : const_def_path(P.LastNode)) { |
| 966 | ListIndex NIndex = defPathIndex(N); |
| 967 | if (Visited[NIndex]) |
| 968 | break; |
| 969 | Visited[NIndex] = true; |
| 970 | cacheDefPath(N, P.Clobber); |
| 971 | } |
| 972 | } |
| 973 | } |
| 974 | |
| 975 | void verifyOptResult(const OptznResult &R) const { |
| 976 | assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) { |
| 977 | return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); |
| 978 | })); |
| 979 | } |
| 980 | |
| 981 | void resetPhiOptznState() { |
| 982 | Paths.clear(); |
| 983 | VisitedPhis.clear(); |
| 984 | } |
| 985 | |
| 986 | public: |
| 987 | ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT, |
| 988 | WalkerCache &WC) |
| 989 | : MSSA(MSSA), AA(AA), DT(DT), WC(WC), UseCache(true) {} |
| 990 | |
| 991 | void reset() { WalkTargetCache.clear(); } |
| 992 | |
| 993 | /// Finds the nearest clobber for the given query, optimizing phis if |
| 994 | /// possible. |
| 995 | MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q, |
| 996 | bool UseWalkerCache = true) { |
| 997 | setUseCache(UseWalkerCache); |
| 998 | Query = &Q; |
| 999 | |
| 1000 | MemoryAccess *Current = Start; |
| 1001 | // This walker pretends uses don't exist. If we're handed one, silently grab |
| 1002 | // its def. (This has the nice side-effect of ensuring we never cache uses) |
| 1003 | if (auto *MU = dyn_cast<MemoryUse>(Start)) |
| 1004 | Current = MU->getDefiningAccess(); |
| 1005 | |
| 1006 | DefPath FirstDesc(Q.StartingLoc, Current, Current, None); |
| 1007 | // Fast path for the overly-common case (no crazy phi optimization |
| 1008 | // necessary) |
| 1009 | UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc); |
George Burgess IV | 93ea19b | 2016-07-24 07:03:49 +0000 | [diff] [blame] | 1010 | MemoryAccess *Result; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1011 | if (WalkResult.IsKnownClobber) { |
| 1012 | cacheDefPath(FirstDesc, WalkResult.Result); |
George Burgess IV | 93ea19b | 2016-07-24 07:03:49 +0000 | [diff] [blame] | 1013 | Result = WalkResult.Result; |
| 1014 | } else { |
| 1015 | OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last), |
| 1016 | Current, Q.StartingLoc); |
| 1017 | verifyOptResult(OptRes); |
| 1018 | cacheOptResult(OptRes); |
| 1019 | resetPhiOptznState(); |
| 1020 | Result = OptRes.PrimaryClobber.Clobber; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1021 | } |
| 1022 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1023 | #ifdef EXPENSIVE_CHECKS |
George Burgess IV | 93ea19b | 2016-07-24 07:03:49 +0000 | [diff] [blame] | 1024 | checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1025 | #endif |
George Burgess IV | 93ea19b | 2016-07-24 07:03:49 +0000 | [diff] [blame] | 1026 | return Result; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1027 | } |
Geoff Berry | cdf5333 | 2016-08-08 17:52:01 +0000 | [diff] [blame] | 1028 | |
| 1029 | void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA); } |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1030 | }; |
| 1031 | |
| 1032 | struct RenamePassData { |
| 1033 | DomTreeNode *DTN; |
| 1034 | DomTreeNode::const_iterator ChildIt; |
| 1035 | MemoryAccess *IncomingVal; |
| 1036 | |
| 1037 | RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It, |
| 1038 | MemoryAccess *M) |
| 1039 | : DTN(D), ChildIt(It), IncomingVal(M) {} |
| 1040 | void swap(RenamePassData &RHS) { |
| 1041 | std::swap(DTN, RHS.DTN); |
| 1042 | std::swap(ChildIt, RHS.ChildIt); |
| 1043 | std::swap(IncomingVal, RHS.IncomingVal); |
| 1044 | } |
| 1045 | }; |
| 1046 | } // anonymous namespace |
| 1047 | |
| 1048 | namespace llvm { |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1049 | /// \brief A MemorySSAWalker that does AA walks and caching of lookups to |
| 1050 | /// disambiguate accesses. |
| 1051 | /// |
| 1052 | /// FIXME: The current implementation of this can take quadratic space in rare |
| 1053 | /// cases. This can be fixed, but it is something to note until it is fixed. |
| 1054 | /// |
| 1055 | /// In order to trigger this behavior, you need to store to N distinct locations |
| 1056 | /// (that AA can prove don't alias), perform M stores to other memory |
| 1057 | /// locations that AA can prove don't alias any of the initial N locations, and |
| 1058 | /// then load from all of the N locations. In this case, we insert M cache |
| 1059 | /// entries for each of the N loads. |
| 1060 | /// |
| 1061 | /// For example: |
| 1062 | /// define i32 @foo() { |
| 1063 | /// %a = alloca i32, align 4 |
| 1064 | /// %b = alloca i32, align 4 |
| 1065 | /// store i32 0, i32* %a, align 4 |
| 1066 | /// store i32 0, i32* %b, align 4 |
| 1067 | /// |
| 1068 | /// ; Insert M stores to other memory that doesn't alias %a or %b here |
| 1069 | /// |
| 1070 | /// %c = load i32, i32* %a, align 4 ; Caches M entries in |
| 1071 | /// ; CachedUpwardsClobberingAccess for the |
| 1072 | /// ; MemoryLocation %a |
| 1073 | /// %d = load i32, i32* %b, align 4 ; Caches M entries in |
| 1074 | /// ; CachedUpwardsClobberingAccess for the |
| 1075 | /// ; MemoryLocation %b |
| 1076 | /// |
| 1077 | /// ; For completeness' sake, loading %a or %b again would not cache *another* |
| 1078 | /// ; M entries. |
| 1079 | /// %r = add i32 %c, %d |
| 1080 | /// ret i32 %r |
| 1081 | /// } |
| 1082 | class MemorySSA::CachingWalker final : public MemorySSAWalker { |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1083 | WalkerCache Cache; |
| 1084 | ClobberWalker Walker; |
| 1085 | bool AutoResetWalker; |
| 1086 | |
| 1087 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &); |
| 1088 | void verifyRemoved(MemoryAccess *); |
| 1089 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1090 | public: |
| 1091 | CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *); |
| 1092 | ~CachingWalker() override; |
| 1093 | |
George Burgess IV | 400ae40 | 2016-07-20 19:51:34 +0000 | [diff] [blame] | 1094 | using MemorySSAWalker::getClobberingMemoryAccess; |
| 1095 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override; |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1096 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
| 1097 | MemoryLocation &) override; |
| 1098 | void invalidateInfo(MemoryAccess *) override; |
| 1099 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1100 | /// Whether we call resetClobberWalker() after each time we *actually* walk to |
| 1101 | /// answer a clobber query. |
| 1102 | void setAutoResetWalker(bool AutoReset) { AutoResetWalker = AutoReset; } |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1103 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1104 | /// Drop the walker's persistent data structures. At the moment, this means |
| 1105 | /// "drop the walker's cache of BasicBlocks -> |
| 1106 | /// earliest-MemoryAccess-we-can-optimize-to". This is necessary if we're |
| 1107 | /// going to have DT updates, if we remove MemoryAccesses, etc. |
| 1108 | void resetClobberWalker() { Walker.reset(); } |
Geoff Berry | cdf5333 | 2016-08-08 17:52:01 +0000 | [diff] [blame] | 1109 | |
| 1110 | void verify(const MemorySSA *MSSA) override { |
| 1111 | MemorySSAWalker::verify(MSSA); |
| 1112 | Walker.verify(MSSA); |
| 1113 | } |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1114 | }; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1115 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1116 | /// \brief Rename a single basic block into MemorySSA form. |
| 1117 | /// Uses the standard SSA renaming algorithm. |
| 1118 | /// \returns The new incoming value. |
| 1119 | MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, |
| 1120 | MemoryAccess *IncomingVal) { |
| 1121 | auto It = PerBlockAccesses.find(BB); |
| 1122 | // Skip most processing if the list is empty. |
| 1123 | if (It != PerBlockAccesses.end()) { |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 1124 | AccessList *Accesses = It->second.get(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1125 | for (MemoryAccess &L : *Accesses) { |
| 1126 | switch (L.getValueID()) { |
| 1127 | case Value::MemoryUseVal: |
| 1128 | cast<MemoryUse>(&L)->setDefiningAccess(IncomingVal); |
| 1129 | break; |
| 1130 | case Value::MemoryDefVal: |
| 1131 | // We can't legally optimize defs, because we only allow single |
| 1132 | // memory phis/uses on operations, and if we optimize these, we can |
| 1133 | // end up with multiple reaching defs. Uses do not have this |
| 1134 | // problem, since they do not produce a value |
| 1135 | cast<MemoryDef>(&L)->setDefiningAccess(IncomingVal); |
| 1136 | IncomingVal = &L; |
| 1137 | break; |
| 1138 | case Value::MemoryPhiVal: |
| 1139 | IncomingVal = &L; |
| 1140 | break; |
| 1141 | } |
| 1142 | } |
| 1143 | } |
| 1144 | |
| 1145 | // Pass through values to our successors |
| 1146 | for (const BasicBlock *S : successors(BB)) { |
| 1147 | auto It = PerBlockAccesses.find(S); |
| 1148 | // Rename the phi nodes in our successor block |
| 1149 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) |
| 1150 | continue; |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 1151 | AccessList *Accesses = It->second.get(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1152 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1153 | Phi->addIncoming(IncomingVal, BB); |
| 1154 | } |
| 1155 | |
| 1156 | return IncomingVal; |
| 1157 | } |
| 1158 | |
| 1159 | /// \brief This is the standard SSA renaming algorithm. |
| 1160 | /// |
| 1161 | /// We walk the dominator tree in preorder, renaming accesses, and then filling |
| 1162 | /// in phi nodes in our successors. |
| 1163 | void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal, |
| 1164 | SmallPtrSet<BasicBlock *, 16> &Visited) { |
| 1165 | SmallVector<RenamePassData, 32> WorkStack; |
| 1166 | IncomingVal = renameBlock(Root->getBlock(), IncomingVal); |
| 1167 | WorkStack.push_back({Root, Root->begin(), IncomingVal}); |
| 1168 | Visited.insert(Root->getBlock()); |
| 1169 | |
| 1170 | while (!WorkStack.empty()) { |
| 1171 | DomTreeNode *Node = WorkStack.back().DTN; |
| 1172 | DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt; |
| 1173 | IncomingVal = WorkStack.back().IncomingVal; |
| 1174 | |
| 1175 | if (ChildIt == Node->end()) { |
| 1176 | WorkStack.pop_back(); |
| 1177 | } else { |
| 1178 | DomTreeNode *Child = *ChildIt; |
| 1179 | ++WorkStack.back().ChildIt; |
| 1180 | BasicBlock *BB = Child->getBlock(); |
| 1181 | Visited.insert(BB); |
| 1182 | IncomingVal = renameBlock(BB, IncomingVal); |
| 1183 | WorkStack.push_back({Child, Child->begin(), IncomingVal}); |
| 1184 | } |
| 1185 | } |
| 1186 | } |
| 1187 | |
| 1188 | /// \brief Compute dominator levels, used by the phi insertion algorithm above. |
| 1189 | void MemorySSA::computeDomLevels(DenseMap<DomTreeNode *, unsigned> &DomLevels) { |
| 1190 | for (auto DFI = df_begin(DT->getRootNode()), DFE = df_end(DT->getRootNode()); |
| 1191 | DFI != DFE; ++DFI) |
| 1192 | DomLevels[*DFI] = DFI.getPathLength() - 1; |
| 1193 | } |
| 1194 | |
George Burgess IV | a362b09 | 2016-07-06 00:28:43 +0000 | [diff] [blame] | 1195 | /// \brief This handles unreachable block accesses by deleting phi nodes in |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1196 | /// unreachable blocks, and marking all other unreachable MemoryAccess's as |
| 1197 | /// being uses of the live on entry definition. |
| 1198 | void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) { |
| 1199 | assert(!DT->isReachableFromEntry(BB) && |
| 1200 | "Reachable block found while handling unreachable blocks"); |
| 1201 | |
Daniel Berlin | fc7e651 | 2016-07-06 05:32:05 +0000 | [diff] [blame] | 1202 | // Make sure phi nodes in our reachable successors end up with a |
| 1203 | // LiveOnEntryDef for our incoming edge, even though our block is forward |
| 1204 | // unreachable. We could just disconnect these blocks from the CFG fully, |
| 1205 | // but we do not right now. |
| 1206 | for (const BasicBlock *S : successors(BB)) { |
| 1207 | if (!DT->isReachableFromEntry(S)) |
| 1208 | continue; |
| 1209 | auto It = PerBlockAccesses.find(S); |
| 1210 | // Rename the phi nodes in our successor block |
| 1211 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) |
| 1212 | continue; |
| 1213 | AccessList *Accesses = It->second.get(); |
| 1214 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); |
| 1215 | Phi->addIncoming(LiveOnEntryDef.get(), BB); |
| 1216 | } |
| 1217 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1218 | auto It = PerBlockAccesses.find(BB); |
| 1219 | if (It == PerBlockAccesses.end()) |
| 1220 | return; |
| 1221 | |
| 1222 | auto &Accesses = It->second; |
| 1223 | for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) { |
| 1224 | auto Next = std::next(AI); |
| 1225 | // If we have a phi, just remove it. We are going to replace all |
| 1226 | // users with live on entry. |
| 1227 | if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI)) |
| 1228 | UseOrDef->setDefiningAccess(LiveOnEntryDef.get()); |
| 1229 | else |
| 1230 | Accesses->erase(AI); |
| 1231 | AI = Next; |
| 1232 | } |
| 1233 | } |
| 1234 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 1235 | MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT) |
| 1236 | : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr), |
| 1237 | NextID(0) { |
Daniel Berlin | 16ed57c | 2016-06-27 18:22:27 +0000 | [diff] [blame] | 1238 | buildMemorySSA(); |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 1239 | } |
| 1240 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1241 | MemorySSA::~MemorySSA() { |
| 1242 | // Drop all our references |
| 1243 | for (const auto &Pair : PerBlockAccesses) |
| 1244 | for (MemoryAccess &MA : *Pair.second) |
| 1245 | MA.dropAllReferences(); |
| 1246 | } |
| 1247 | |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1248 | MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1249 | auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr)); |
| 1250 | |
| 1251 | if (Res.second) |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 1252 | Res.first->second = make_unique<AccessList>(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1253 | return Res.first->second.get(); |
| 1254 | } |
| 1255 | |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1256 | /// This class is a batch walker of all MemoryUse's in the program, and points |
| 1257 | /// their defining access at the thing that actually clobbers them. Because it |
| 1258 | /// is a batch walker that touches everything, it does not operate like the |
| 1259 | /// other walkers. This walker is basically performing a top-down SSA renaming |
| 1260 | /// pass, where the version stack is used as the cache. This enables it to be |
| 1261 | /// significantly more time and memory efficient than using the regular walker, |
| 1262 | /// which is walking bottom-up. |
| 1263 | class MemorySSA::OptimizeUses { |
| 1264 | public: |
| 1265 | OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA, |
| 1266 | DominatorTree *DT) |
| 1267 | : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) { |
| 1268 | Walker = MSSA->getWalker(); |
| 1269 | } |
| 1270 | |
| 1271 | void optimizeUses(); |
| 1272 | |
| 1273 | private: |
| 1274 | /// This represents where a given memorylocation is in the stack. |
| 1275 | struct MemlocStackInfo { |
| 1276 | // This essentially is keeping track of versions of the stack. Whenever |
| 1277 | // the stack changes due to pushes or pops, these versions increase. |
| 1278 | unsigned long StackEpoch; |
| 1279 | unsigned long PopEpoch; |
| 1280 | // This is the lower bound of places on the stack to check. It is equal to |
| 1281 | // the place the last stack walk ended. |
| 1282 | // Note: Correctness depends on this being initialized to 0, which densemap |
| 1283 | // does |
| 1284 | unsigned long LowerBound; |
Daniel Berlin | 4b4c722 | 2016-08-08 04:44:53 +0000 | [diff] [blame] | 1285 | const BasicBlock *LowerBoundBlock; |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1286 | // This is where the last walk for this memory location ended. |
| 1287 | unsigned long LastKill; |
| 1288 | bool LastKillValid; |
| 1289 | }; |
| 1290 | void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &, |
| 1291 | SmallVectorImpl<MemoryAccess *> &, |
| 1292 | DenseMap<MemoryLocOrCall, MemlocStackInfo> &); |
| 1293 | MemorySSA *MSSA; |
| 1294 | MemorySSAWalker *Walker; |
| 1295 | AliasAnalysis *AA; |
| 1296 | DominatorTree *DT; |
| 1297 | }; |
| 1298 | |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1299 | /// Optimize the uses in a given block This is basically the SSA renaming |
| 1300 | /// algorithm, with one caveat: We are able to use a single stack for all |
| 1301 | /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is |
| 1302 | /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just |
| 1303 | /// going to be some position in that stack of possible ones. |
| 1304 | /// |
| 1305 | /// We track the stack positions that each MemoryLocation needs |
| 1306 | /// to check, and last ended at. This is because we only want to check the |
| 1307 | /// things that changed since last time. The same MemoryLocation should |
| 1308 | /// get clobbered by the same store (getModRefInfo does not use invariantness or |
| 1309 | /// things like this, and if they start, we can modify MemoryLocOrCall to |
| 1310 | /// include relevant data) |
| 1311 | void MemorySSA::OptimizeUses::optimizeUsesInBlock( |
| 1312 | const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch, |
| 1313 | SmallVectorImpl<MemoryAccess *> &VersionStack, |
| 1314 | DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) { |
| 1315 | |
| 1316 | /// If no accesses, nothing to do. |
| 1317 | MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB); |
| 1318 | if (Accesses == nullptr) |
| 1319 | return; |
| 1320 | |
| 1321 | // Pop everything that doesn't dominate the current block off the stack, |
| 1322 | // increment the PopEpoch to account for this. |
| 1323 | while (!VersionStack.empty()) { |
| 1324 | BasicBlock *BackBlock = VersionStack.back()->getBlock(); |
| 1325 | if (DT->dominates(BackBlock, BB)) |
| 1326 | break; |
| 1327 | while (VersionStack.back()->getBlock() == BackBlock) |
| 1328 | VersionStack.pop_back(); |
| 1329 | ++PopEpoch; |
| 1330 | } |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1331 | for (MemoryAccess &MA : *Accesses) { |
| 1332 | auto *MU = dyn_cast<MemoryUse>(&MA); |
| 1333 | if (!MU) { |
| 1334 | VersionStack.push_back(&MA); |
| 1335 | ++StackEpoch; |
| 1336 | continue; |
| 1337 | } |
| 1338 | |
George Burgess IV | 024f3d2 | 2016-08-03 19:57:02 +0000 | [diff] [blame] | 1339 | if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) { |
| 1340 | MU->setDefiningAccess(MSSA->getLiveOnEntryDef()); |
| 1341 | continue; |
| 1342 | } |
| 1343 | |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1344 | MemoryLocOrCall UseMLOC(MU); |
| 1345 | auto &LocInfo = LocStackInfo[UseMLOC]; |
Daniel Berlin | 26fcea9 | 2016-08-02 20:02:21 +0000 | [diff] [blame] | 1346 | // If the pop epoch changed, it means we've removed stuff from top of |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1347 | // stack due to changing blocks. We may have to reset the lower bound or |
| 1348 | // last kill info. |
| 1349 | if (LocInfo.PopEpoch != PopEpoch) { |
| 1350 | LocInfo.PopEpoch = PopEpoch; |
| 1351 | LocInfo.StackEpoch = StackEpoch; |
Daniel Berlin | 4b4c722 | 2016-08-08 04:44:53 +0000 | [diff] [blame] | 1352 | // If the lower bound was in something that no longer dominates us, we |
| 1353 | // have to reset it. |
| 1354 | // We can't simply track stack size, because the stack may have had |
| 1355 | // pushes/pops in the meantime. |
| 1356 | // XXX: This is non-optimal, but only is slower cases with heavily |
| 1357 | // branching dominator trees. To get the optimal number of queries would |
| 1358 | // be to make lowerbound and lastkill a per-loc stack, and pop it until |
| 1359 | // the top of that stack dominates us. This does not seem worth it ATM. |
| 1360 | // A much cheaper optimization would be to always explore the deepest |
| 1361 | // branch of the dominator tree first. This will guarantee this resets on |
| 1362 | // the smallest set of blocks. |
| 1363 | if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB && |
| 1364 | !DT->dominates(LocInfo.LowerBoundBlock, BB)){ |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1365 | // Reset the lower bound of things to check. |
| 1366 | // TODO: Some day we should be able to reset to last kill, rather than |
| 1367 | // 0. |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1368 | LocInfo.LowerBound = 0; |
Daniel Berlin | 4b4c722 | 2016-08-08 04:44:53 +0000 | [diff] [blame] | 1369 | LocInfo.LowerBoundBlock = VersionStack[0]->getBlock(); |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1370 | LocInfo.LastKillValid = false; |
| 1371 | } |
| 1372 | } else if (LocInfo.StackEpoch != StackEpoch) { |
| 1373 | // If all that has changed is the StackEpoch, we only have to check the |
| 1374 | // new things on the stack, because we've checked everything before. In |
| 1375 | // this case, the lower bound of things to check remains the same. |
| 1376 | LocInfo.PopEpoch = PopEpoch; |
| 1377 | LocInfo.StackEpoch = StackEpoch; |
| 1378 | } |
| 1379 | if (!LocInfo.LastKillValid) { |
| 1380 | LocInfo.LastKill = VersionStack.size() - 1; |
| 1381 | LocInfo.LastKillValid = true; |
| 1382 | } |
| 1383 | |
| 1384 | // At this point, we should have corrected last kill and LowerBound to be |
| 1385 | // in bounds. |
| 1386 | assert(LocInfo.LowerBound < VersionStack.size() && |
| 1387 | "Lower bound out of range"); |
| 1388 | assert(LocInfo.LastKill < VersionStack.size() && |
| 1389 | "Last kill info out of range"); |
| 1390 | // In any case, the new upper bound is the top of the stack. |
| 1391 | unsigned long UpperBound = VersionStack.size() - 1; |
| 1392 | |
| 1393 | if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) { |
Daniel Berlin | 26fcea9 | 2016-08-02 20:02:21 +0000 | [diff] [blame] | 1394 | DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " (" |
| 1395 | << *(MU->getMemoryInst()) << ")" |
| 1396 | << " because there are " << UpperBound - LocInfo.LowerBound |
| 1397 | << " stores to disambiguate\n"); |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1398 | // Because we did not walk, LastKill is no longer valid, as this may |
| 1399 | // have been a kill. |
| 1400 | LocInfo.LastKillValid = false; |
| 1401 | continue; |
| 1402 | } |
| 1403 | bool FoundClobberResult = false; |
| 1404 | while (UpperBound > LocInfo.LowerBound) { |
| 1405 | if (isa<MemoryPhi>(VersionStack[UpperBound])) { |
| 1406 | // For phis, use the walker, see where we ended up, go there |
| 1407 | Instruction *UseInst = MU->getMemoryInst(); |
| 1408 | MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst); |
| 1409 | // We are guaranteed to find it or something is wrong |
| 1410 | while (VersionStack[UpperBound] != Result) { |
| 1411 | assert(UpperBound != 0); |
| 1412 | --UpperBound; |
| 1413 | } |
| 1414 | FoundClobberResult = true; |
| 1415 | break; |
| 1416 | } |
| 1417 | |
| 1418 | MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]); |
Daniel Berlin | df10119 | 2016-08-03 00:01:46 +0000 | [diff] [blame] | 1419 | // If the lifetime of the pointer ends at this instruction, it's live on |
| 1420 | // entry. |
| 1421 | if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) { |
| 1422 | // Reset UpperBound to liveOnEntryDef's place in the stack |
| 1423 | UpperBound = 0; |
| 1424 | FoundClobberResult = true; |
| 1425 | break; |
| 1426 | } |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 1427 | if (instructionClobbersQuery(MD, MU, UseMLOC, *AA)) { |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1428 | FoundClobberResult = true; |
| 1429 | break; |
| 1430 | } |
| 1431 | --UpperBound; |
| 1432 | } |
| 1433 | // At the end of this loop, UpperBound is either a clobber, or lower bound |
| 1434 | // PHI walking may cause it to be < LowerBound, and in fact, < LastKill. |
| 1435 | if (FoundClobberResult || UpperBound < LocInfo.LastKill) { |
| 1436 | MU->setDefiningAccess(VersionStack[UpperBound]); |
| 1437 | // We were last killed now by where we got to |
| 1438 | LocInfo.LastKill = UpperBound; |
| 1439 | } else { |
| 1440 | // Otherwise, we checked all the new ones, and now we know we can get to |
| 1441 | // LastKill. |
| 1442 | MU->setDefiningAccess(VersionStack[LocInfo.LastKill]); |
| 1443 | } |
| 1444 | LocInfo.LowerBound = VersionStack.size() - 1; |
Daniel Berlin | 4b4c722 | 2016-08-08 04:44:53 +0000 | [diff] [blame] | 1445 | LocInfo.LowerBoundBlock = BB; |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1446 | } |
| 1447 | } |
| 1448 | |
| 1449 | /// Optimize uses to point to their actual clobbering definitions. |
| 1450 | void MemorySSA::OptimizeUses::optimizeUses() { |
| 1451 | |
| 1452 | // We perform a non-recursive top-down dominator tree walk |
| 1453 | struct StackInfo { |
| 1454 | const DomTreeNode *Node; |
| 1455 | DomTreeNode::const_iterator Iter; |
| 1456 | }; |
| 1457 | |
| 1458 | SmallVector<MemoryAccess *, 16> VersionStack; |
| 1459 | SmallVector<StackInfo, 16> DomTreeWorklist; |
| 1460 | DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo; |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1461 | VersionStack.push_back(MSSA->getLiveOnEntryDef()); |
| 1462 | |
| 1463 | unsigned long StackEpoch = 1; |
| 1464 | unsigned long PopEpoch = 1; |
Daniel Berlin | 7ac3d74 | 2016-08-05 22:09:14 +0000 | [diff] [blame] | 1465 | for (const auto *DomNode : depth_first(DT->getRootNode())) |
| 1466 | optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack, |
| 1467 | LocStackInfo); |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1468 | } |
| 1469 | |
Daniel Berlin | 16ed57c | 2016-06-27 18:22:27 +0000 | [diff] [blame] | 1470 | void MemorySSA::buildMemorySSA() { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1471 | // We create an access to represent "live on entry", for things like |
| 1472 | // arguments or users of globals, where the memory they use is defined before |
| 1473 | // the beginning of the function. We do not actually insert it into the IR. |
| 1474 | // We do not define a live on exit for the immediate uses, and thus our |
| 1475 | // semantics do *not* imply that something with no immediate uses can simply |
| 1476 | // be removed. |
| 1477 | BasicBlock &StartingPoint = F.getEntryBlock(); |
| 1478 | LiveOnEntryDef = make_unique<MemoryDef>(F.getContext(), nullptr, nullptr, |
| 1479 | &StartingPoint, NextID++); |
| 1480 | |
| 1481 | // We maintain lists of memory accesses per-block, trading memory for time. We |
| 1482 | // could just look up the memory access for every possible instruction in the |
| 1483 | // stream. |
| 1484 | SmallPtrSet<BasicBlock *, 32> DefiningBlocks; |
Daniel Berlin | 1b51a29 | 2016-02-07 01:52:19 +0000 | [diff] [blame] | 1485 | SmallPtrSet<BasicBlock *, 32> DefUseBlocks; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1486 | // Go through each block, figure out where defs occur, and chain together all |
| 1487 | // the accesses. |
| 1488 | for (BasicBlock &B : F) { |
Daniel Berlin | 7898ca6 | 2016-02-07 01:52:15 +0000 | [diff] [blame] | 1489 | bool InsertIntoDef = false; |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 1490 | AccessList *Accesses = nullptr; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1491 | for (Instruction &I : B) { |
Peter Collingbourne | ffecb14 | 2016-05-26 01:19:17 +0000 | [diff] [blame] | 1492 | MemoryUseOrDef *MUD = createNewAccess(&I); |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 1493 | if (!MUD) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1494 | continue; |
George Burgess IV | 3887a41 | 2016-03-21 21:25:39 +0000 | [diff] [blame] | 1495 | InsertIntoDef |= isa<MemoryDef>(MUD); |
Daniel Berlin | 1b51a29 | 2016-02-07 01:52:19 +0000 | [diff] [blame] | 1496 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1497 | if (!Accesses) |
| 1498 | Accesses = getOrCreateAccessList(&B); |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 1499 | Accesses->push_back(MUD); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1500 | } |
Daniel Berlin | 7898ca6 | 2016-02-07 01:52:15 +0000 | [diff] [blame] | 1501 | if (InsertIntoDef) |
| 1502 | DefiningBlocks.insert(&B); |
George Burgess IV | 3887a41 | 2016-03-21 21:25:39 +0000 | [diff] [blame] | 1503 | if (Accesses) |
Daniel Berlin | 1b51a29 | 2016-02-07 01:52:19 +0000 | [diff] [blame] | 1504 | DefUseBlocks.insert(&B); |
| 1505 | } |
| 1506 | |
| 1507 | // Compute live-in. |
| 1508 | // Live in is normally defined as "all the blocks on the path from each def to |
| 1509 | // each of it's uses". |
| 1510 | // MemoryDef's are implicit uses of previous state, so they are also uses. |
| 1511 | // This means we don't really have def-only instructions. The only |
| 1512 | // MemoryDef's that are not really uses are those that are of the LiveOnEntry |
| 1513 | // variable (because LiveOnEntry can reach anywhere, and every def is a |
| 1514 | // must-kill of LiveOnEntry). |
| 1515 | // In theory, you could precisely compute live-in by using alias-analysis to |
| 1516 | // disambiguate defs and uses to see which really pair up with which. |
| 1517 | // In practice, this would be really expensive and difficult. So we simply |
| 1518 | // assume all defs are also uses that need to be kept live. |
| 1519 | // Because of this, the end result of this live-in computation will be "the |
| 1520 | // entire set of basic blocks that reach any use". |
| 1521 | |
| 1522 | SmallPtrSet<BasicBlock *, 32> LiveInBlocks; |
| 1523 | SmallVector<BasicBlock *, 64> LiveInBlockWorklist(DefUseBlocks.begin(), |
| 1524 | DefUseBlocks.end()); |
| 1525 | // Now that we have a set of blocks where a value is live-in, recursively add |
| 1526 | // predecessors until we find the full region the value is live. |
| 1527 | while (!LiveInBlockWorklist.empty()) { |
| 1528 | BasicBlock *BB = LiveInBlockWorklist.pop_back_val(); |
| 1529 | |
| 1530 | // The block really is live in here, insert it into the set. If already in |
| 1531 | // the set, then it has already been processed. |
| 1532 | if (!LiveInBlocks.insert(BB).second) |
| 1533 | continue; |
| 1534 | |
| 1535 | // Since the value is live into BB, it is either defined in a predecessor or |
| 1536 | // live into it to. |
| 1537 | LiveInBlockWorklist.append(pred_begin(BB), pred_end(BB)); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1538 | } |
| 1539 | |
| 1540 | // Determine where our MemoryPhi's should go |
Daniel Berlin | 77fa84e | 2016-04-19 06:13:28 +0000 | [diff] [blame] | 1541 | ForwardIDFCalculator IDFs(*DT); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1542 | IDFs.setDefiningBlocks(DefiningBlocks); |
Daniel Berlin | 1b51a29 | 2016-02-07 01:52:19 +0000 | [diff] [blame] | 1543 | IDFs.setLiveInBlocks(LiveInBlocks); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1544 | SmallVector<BasicBlock *, 32> IDFBlocks; |
| 1545 | IDFs.calculate(IDFBlocks); |
| 1546 | |
| 1547 | // Now place MemoryPhi nodes. |
| 1548 | for (auto &BB : IDFBlocks) { |
| 1549 | // Insert phi node |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 1550 | AccessList *Accesses = getOrCreateAccessList(BB); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1551 | MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++); |
Daniel Berlin | 5130cc8 | 2016-07-31 21:08:20 +0000 | [diff] [blame] | 1552 | ValueToMemoryAccess[BB] = Phi; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1553 | // Phi's always are placed at the front of the block. |
| 1554 | Accesses->push_front(Phi); |
| 1555 | } |
| 1556 | |
| 1557 | // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get |
| 1558 | // filled in with all blocks. |
| 1559 | SmallPtrSet<BasicBlock *, 16> Visited; |
| 1560 | renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited); |
| 1561 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1562 | CachingWalker *Walker = getWalkerImpl(); |
| 1563 | |
| 1564 | // We're doing a batch of updates; don't drop useful caches between them. |
| 1565 | Walker->setAutoResetWalker(false); |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1566 | OptimizeUses(this, Walker, AA, DT).optimizeUses(); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1567 | Walker->setAutoResetWalker(true); |
| 1568 | Walker->resetClobberWalker(); |
| 1569 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1570 | // Mark the uses in unreachable blocks as live on entry, so that they go |
| 1571 | // somewhere. |
| 1572 | for (auto &BB : F) |
| 1573 | if (!Visited.count(&BB)) |
| 1574 | markUnreachableAsLiveOnEntry(&BB); |
Daniel Berlin | 16ed57c | 2016-06-27 18:22:27 +0000 | [diff] [blame] | 1575 | } |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1576 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1577 | MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); } |
| 1578 | |
| 1579 | MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() { |
Daniel Berlin | 16ed57c | 2016-06-27 18:22:27 +0000 | [diff] [blame] | 1580 | if (Walker) |
| 1581 | return Walker.get(); |
| 1582 | |
| 1583 | Walker = make_unique<CachingWalker>(this, AA, DT); |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 1584 | return Walker.get(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1585 | } |
| 1586 | |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1587 | MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) { |
| 1588 | assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB"); |
| 1589 | AccessList *Accesses = getOrCreateAccessList(BB); |
| 1590 | MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++); |
Daniel Berlin | 5130cc8 | 2016-07-31 21:08:20 +0000 | [diff] [blame] | 1591 | ValueToMemoryAccess[BB] = Phi; |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1592 | // Phi's always are placed at the front of the block. |
| 1593 | Accesses->push_front(Phi); |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1594 | BlockNumberingValid.erase(BB); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1595 | return Phi; |
| 1596 | } |
| 1597 | |
| 1598 | MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I, |
| 1599 | MemoryAccess *Definition) { |
| 1600 | assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI"); |
| 1601 | MemoryUseOrDef *NewAccess = createNewAccess(I); |
| 1602 | assert( |
| 1603 | NewAccess != nullptr && |
| 1604 | "Tried to create a memory access for a non-memory touching instruction"); |
| 1605 | NewAccess->setDefiningAccess(Definition); |
| 1606 | return NewAccess; |
| 1607 | } |
| 1608 | |
| 1609 | MemoryAccess *MemorySSA::createMemoryAccessInBB(Instruction *I, |
| 1610 | MemoryAccess *Definition, |
| 1611 | const BasicBlock *BB, |
| 1612 | InsertionPlace Point) { |
| 1613 | MemoryUseOrDef *NewAccess = createDefinedAccess(I, Definition); |
| 1614 | auto *Accesses = getOrCreateAccessList(BB); |
| 1615 | if (Point == Beginning) { |
| 1616 | // It goes after any phi nodes |
David Majnemer | 4253126 | 2016-08-12 03:55:06 +0000 | [diff] [blame^] | 1617 | auto AI = find_if( |
| 1618 | *Accesses, [](const MemoryAccess &MA) { return !isa<MemoryPhi>(MA); }); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1619 | |
| 1620 | Accesses->insert(AI, NewAccess); |
| 1621 | } else { |
| 1622 | Accesses->push_back(NewAccess); |
| 1623 | } |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1624 | BlockNumberingValid.erase(BB); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1625 | return NewAccess; |
| 1626 | } |
| 1627 | MemoryAccess *MemorySSA::createMemoryAccessBefore(Instruction *I, |
| 1628 | MemoryAccess *Definition, |
| 1629 | MemoryAccess *InsertPt) { |
| 1630 | assert(I->getParent() == InsertPt->getBlock() && |
| 1631 | "New and old access must be in the same block"); |
| 1632 | MemoryUseOrDef *NewAccess = createDefinedAccess(I, Definition); |
| 1633 | auto *Accesses = getOrCreateAccessList(InsertPt->getBlock()); |
| 1634 | Accesses->insert(AccessList::iterator(InsertPt), NewAccess); |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1635 | BlockNumberingValid.erase(InsertPt->getBlock()); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1636 | return NewAccess; |
| 1637 | } |
| 1638 | |
| 1639 | MemoryAccess *MemorySSA::createMemoryAccessAfter(Instruction *I, |
| 1640 | MemoryAccess *Definition, |
| 1641 | MemoryAccess *InsertPt) { |
| 1642 | assert(I->getParent() == InsertPt->getBlock() && |
| 1643 | "New and old access must be in the same block"); |
| 1644 | MemoryUseOrDef *NewAccess = createDefinedAccess(I, Definition); |
| 1645 | auto *Accesses = getOrCreateAccessList(InsertPt->getBlock()); |
| 1646 | Accesses->insertAfter(AccessList::iterator(InsertPt), NewAccess); |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1647 | BlockNumberingValid.erase(InsertPt->getBlock()); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1648 | return NewAccess; |
| 1649 | } |
| 1650 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1651 | /// \brief Helper function to create new memory accesses |
Peter Collingbourne | ffecb14 | 2016-05-26 01:19:17 +0000 | [diff] [blame] | 1652 | MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) { |
Peter Collingbourne | b9aa1f4 | 2016-05-26 04:58:46 +0000 | [diff] [blame] | 1653 | // The assume intrinsic has a control dependency which we model by claiming |
| 1654 | // that it writes arbitrarily. Ignore that fake memory dependency here. |
| 1655 | // FIXME: Replace this special casing with a more accurate modelling of |
| 1656 | // assume's control dependency. |
| 1657 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) |
| 1658 | if (II->getIntrinsicID() == Intrinsic::assume) |
| 1659 | return nullptr; |
| 1660 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1661 | // Find out what affect this instruction has on memory. |
| 1662 | ModRefInfo ModRef = AA->getModRefInfo(I); |
| 1663 | bool Def = bool(ModRef & MRI_Mod); |
| 1664 | bool Use = bool(ModRef & MRI_Ref); |
| 1665 | |
| 1666 | // It's possible for an instruction to not modify memory at all. During |
| 1667 | // construction, we ignore them. |
Peter Collingbourne | ffecb14 | 2016-05-26 01:19:17 +0000 | [diff] [blame] | 1668 | if (!Def && !Use) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1669 | return nullptr; |
| 1670 | |
| 1671 | assert((Def || Use) && |
| 1672 | "Trying to create a memory access with a non-memory instruction"); |
| 1673 | |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 1674 | MemoryUseOrDef *MUD; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1675 | if (Def) |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 1676 | MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1677 | else |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 1678 | MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent()); |
Daniel Berlin | 5130cc8 | 2016-07-31 21:08:20 +0000 | [diff] [blame] | 1679 | ValueToMemoryAccess[I] = MUD; |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 1680 | return MUD; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1681 | } |
| 1682 | |
| 1683 | MemoryAccess *MemorySSA::findDominatingDef(BasicBlock *UseBlock, |
| 1684 | enum InsertionPlace Where) { |
| 1685 | // Handle the initial case |
| 1686 | if (Where == Beginning) |
| 1687 | // The only thing that could define us at the beginning is a phi node |
| 1688 | if (MemoryPhi *Phi = getMemoryAccess(UseBlock)) |
| 1689 | return Phi; |
| 1690 | |
| 1691 | DomTreeNode *CurrNode = DT->getNode(UseBlock); |
| 1692 | // Need to be defined by our dominator |
| 1693 | if (Where == Beginning) |
| 1694 | CurrNode = CurrNode->getIDom(); |
| 1695 | Where = End; |
| 1696 | while (CurrNode) { |
| 1697 | auto It = PerBlockAccesses.find(CurrNode->getBlock()); |
| 1698 | if (It != PerBlockAccesses.end()) { |
| 1699 | auto &Accesses = It->second; |
David Majnemer | d770877 | 2016-06-24 04:05:21 +0000 | [diff] [blame] | 1700 | for (MemoryAccess &RA : reverse(*Accesses)) { |
| 1701 | if (isa<MemoryDef>(RA) || isa<MemoryPhi>(RA)) |
| 1702 | return &RA; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1703 | } |
| 1704 | } |
| 1705 | CurrNode = CurrNode->getIDom(); |
| 1706 | } |
| 1707 | return LiveOnEntryDef.get(); |
| 1708 | } |
| 1709 | |
| 1710 | /// \brief Returns true if \p Replacer dominates \p Replacee . |
| 1711 | bool MemorySSA::dominatesUse(const MemoryAccess *Replacer, |
| 1712 | const MemoryAccess *Replacee) const { |
| 1713 | if (isa<MemoryUseOrDef>(Replacee)) |
| 1714 | return DT->dominates(Replacer->getBlock(), Replacee->getBlock()); |
| 1715 | const auto *MP = cast<MemoryPhi>(Replacee); |
| 1716 | // For a phi node, the use occurs in the predecessor block of the phi node. |
| 1717 | // Since we may occur multiple times in the phi node, we have to check each |
| 1718 | // operand to ensure Replacer dominates each operand where Replacee occurs. |
| 1719 | for (const Use &Arg : MP->operands()) { |
George Burgess IV | b5a229f | 2016-02-02 23:15:26 +0000 | [diff] [blame] | 1720 | if (Arg.get() != Replacee && |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1721 | !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg))) |
| 1722 | return false; |
| 1723 | } |
| 1724 | return true; |
| 1725 | } |
| 1726 | |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 1727 | /// \brief If all arguments of a MemoryPHI are defined by the same incoming |
| 1728 | /// argument, return that argument. |
| 1729 | static MemoryAccess *onlySingleValue(MemoryPhi *MP) { |
| 1730 | MemoryAccess *MA = nullptr; |
| 1731 | |
| 1732 | for (auto &Arg : MP->operands()) { |
| 1733 | if (!MA) |
| 1734 | MA = cast<MemoryAccess>(Arg); |
| 1735 | else if (MA != Arg) |
| 1736 | return nullptr; |
| 1737 | } |
| 1738 | return MA; |
| 1739 | } |
| 1740 | |
| 1741 | /// \brief Properly remove \p MA from all of MemorySSA's lookup tables. |
| 1742 | /// |
| 1743 | /// Because of the way the intrusive list and use lists work, it is important to |
| 1744 | /// do removal in the right order. |
| 1745 | void MemorySSA::removeFromLookups(MemoryAccess *MA) { |
| 1746 | assert(MA->use_empty() && |
| 1747 | "Trying to remove memory access that still has uses"); |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1748 | BlockNumbering.erase(MA); |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 1749 | if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) |
| 1750 | MUD->setDefiningAccess(nullptr); |
| 1751 | // Invalidate our walker's cache if necessary |
| 1752 | if (!isa<MemoryUse>(MA)) |
| 1753 | Walker->invalidateInfo(MA); |
| 1754 | // The call below to erase will destroy MA, so we can't change the order we |
| 1755 | // are doing things here |
| 1756 | Value *MemoryInst; |
| 1757 | if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) { |
| 1758 | MemoryInst = MUD->getMemoryInst(); |
| 1759 | } else { |
| 1760 | MemoryInst = MA->getBlock(); |
| 1761 | } |
Daniel Berlin | 5130cc8 | 2016-07-31 21:08:20 +0000 | [diff] [blame] | 1762 | auto VMA = ValueToMemoryAccess.find(MemoryInst); |
| 1763 | if (VMA->second == MA) |
| 1764 | ValueToMemoryAccess.erase(VMA); |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 1765 | |
George Burgess IV | e0e6e48 | 2016-03-02 02:35:04 +0000 | [diff] [blame] | 1766 | auto AccessIt = PerBlockAccesses.find(MA->getBlock()); |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 1767 | std::unique_ptr<AccessList> &Accesses = AccessIt->second; |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 1768 | Accesses->erase(MA); |
George Burgess IV | e0e6e48 | 2016-03-02 02:35:04 +0000 | [diff] [blame] | 1769 | if (Accesses->empty()) |
| 1770 | PerBlockAccesses.erase(AccessIt); |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 1771 | } |
| 1772 | |
| 1773 | void MemorySSA::removeMemoryAccess(MemoryAccess *MA) { |
| 1774 | assert(!isLiveOnEntryDef(MA) && "Trying to remove the live on entry def"); |
| 1775 | // We can only delete phi nodes if they have no uses, or we can replace all |
| 1776 | // uses with a single definition. |
| 1777 | MemoryAccess *NewDefTarget = nullptr; |
| 1778 | if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) { |
| 1779 | // Note that it is sufficient to know that all edges of the phi node have |
| 1780 | // the same argument. If they do, by the definition of dominance frontiers |
| 1781 | // (which we used to place this phi), that argument must dominate this phi, |
| 1782 | // and thus, must dominate the phi's uses, and so we will not hit the assert |
| 1783 | // below. |
| 1784 | NewDefTarget = onlySingleValue(MP); |
| 1785 | assert((NewDefTarget || MP->use_empty()) && |
| 1786 | "We can't delete this memory phi"); |
| 1787 | } else { |
| 1788 | NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess(); |
| 1789 | } |
| 1790 | |
| 1791 | // Re-point the uses at our defining access |
| 1792 | if (!MA->use_empty()) |
| 1793 | MA->replaceAllUsesWith(NewDefTarget); |
| 1794 | |
| 1795 | // The call below to erase will destroy MA, so we can't change the order we |
| 1796 | // are doing things here |
| 1797 | removeFromLookups(MA); |
| 1798 | } |
| 1799 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1800 | void MemorySSA::print(raw_ostream &OS) const { |
| 1801 | MemorySSAAnnotatedWriter Writer(this); |
| 1802 | F.print(OS, &Writer); |
| 1803 | } |
| 1804 | |
| 1805 | void MemorySSA::dump() const { |
| 1806 | MemorySSAAnnotatedWriter Writer(this); |
| 1807 | F.print(dbgs(), &Writer); |
| 1808 | } |
| 1809 | |
Daniel Berlin | 932b4cb | 2016-02-10 17:39:43 +0000 | [diff] [blame] | 1810 | void MemorySSA::verifyMemorySSA() const { |
| 1811 | verifyDefUses(F); |
| 1812 | verifyDomination(F); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1813 | verifyOrdering(F); |
Geoff Berry | cdf5333 | 2016-08-08 17:52:01 +0000 | [diff] [blame] | 1814 | Walker->verify(this); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1815 | } |
| 1816 | |
| 1817 | /// \brief Verify that the order and existence of MemoryAccesses matches the |
| 1818 | /// order and existence of memory affecting instructions. |
| 1819 | void MemorySSA::verifyOrdering(Function &F) const { |
| 1820 | // Walk all the blocks, comparing what the lookups think and what the access |
| 1821 | // lists think, as well as the order in the blocks vs the order in the access |
| 1822 | // lists. |
| 1823 | SmallVector<MemoryAccess *, 32> ActualAccesses; |
| 1824 | for (BasicBlock &B : F) { |
| 1825 | const AccessList *AL = getBlockAccesses(&B); |
| 1826 | MemoryAccess *Phi = getMemoryAccess(&B); |
| 1827 | if (Phi) |
| 1828 | ActualAccesses.push_back(Phi); |
| 1829 | for (Instruction &I : B) { |
| 1830 | MemoryAccess *MA = getMemoryAccess(&I); |
| 1831 | assert((!MA || AL) && "We have memory affecting instructions " |
| 1832 | "in this block but they are not in the " |
| 1833 | "access list"); |
| 1834 | if (MA) |
| 1835 | ActualAccesses.push_back(MA); |
| 1836 | } |
| 1837 | // Either we hit the assert, really have no accesses, or we have both |
| 1838 | // accesses and an access list |
| 1839 | if (!AL) |
| 1840 | continue; |
| 1841 | assert(AL->size() == ActualAccesses.size() && |
| 1842 | "We don't have the same number of accesses in the block as on the " |
| 1843 | "access list"); |
| 1844 | auto ALI = AL->begin(); |
| 1845 | auto AAI = ActualAccesses.begin(); |
| 1846 | while (ALI != AL->end() && AAI != ActualAccesses.end()) { |
| 1847 | assert(&*ALI == *AAI && "Not the same accesses in the same order"); |
| 1848 | ++ALI; |
| 1849 | ++AAI; |
| 1850 | } |
| 1851 | ActualAccesses.clear(); |
| 1852 | } |
Daniel Berlin | 932b4cb | 2016-02-10 17:39:43 +0000 | [diff] [blame] | 1853 | } |
| 1854 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1855 | /// \brief Verify the domination properties of MemorySSA by checking that each |
| 1856 | /// definition dominates all of its uses. |
Daniel Berlin | 932b4cb | 2016-02-10 17:39:43 +0000 | [diff] [blame] | 1857 | void MemorySSA::verifyDomination(Function &F) const { |
Daniel Berlin | 7af9587 | 2016-08-05 21:47:20 +0000 | [diff] [blame] | 1858 | #ifndef NDEBUG |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1859 | for (BasicBlock &B : F) { |
| 1860 | // Phi nodes are attached to basic blocks |
Daniel Berlin | 2919b1c | 2016-08-05 21:46:52 +0000 | [diff] [blame] | 1861 | if (MemoryPhi *MP = getMemoryAccess(&B)) |
| 1862 | for (const Use &U : MP->uses()) |
| 1863 | assert(dominates(MP, U) && "Memory PHI does not dominate it's uses"); |
Daniel Berlin | 7af9587 | 2016-08-05 21:47:20 +0000 | [diff] [blame] | 1864 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1865 | for (Instruction &I : B) { |
| 1866 | MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I)); |
| 1867 | if (!MD) |
| 1868 | continue; |
| 1869 | |
Daniel Berlin | 2919b1c | 2016-08-05 21:46:52 +0000 | [diff] [blame] | 1870 | for (const Use &U : MD->uses()) |
| 1871 | assert(dominates(MD, U) && "Memory Def does not dominate it's uses"); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1872 | } |
| 1873 | } |
Daniel Berlin | 7af9587 | 2016-08-05 21:47:20 +0000 | [diff] [blame] | 1874 | #endif |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1875 | } |
| 1876 | |
| 1877 | /// \brief Verify the def-use lists in MemorySSA, by verifying that \p Use |
| 1878 | /// appears in the use list of \p Def. |
Daniel Berlin | 7af9587 | 2016-08-05 21:47:20 +0000 | [diff] [blame] | 1879 | |
Daniel Berlin | 932b4cb | 2016-02-10 17:39:43 +0000 | [diff] [blame] | 1880 | void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const { |
Daniel Berlin | 7af9587 | 2016-08-05 21:47:20 +0000 | [diff] [blame] | 1881 | #ifndef NDEBUG |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1882 | // The live on entry use may cause us to get a NULL def here |
Daniel Berlin | 7af9587 | 2016-08-05 21:47:20 +0000 | [diff] [blame] | 1883 | if (!Def) |
| 1884 | assert(isLiveOnEntryDef(Use) && |
| 1885 | "Null def but use not point to live on entry def"); |
| 1886 | else |
Daniel Berlin | da2f38e | 2016-08-11 21:26:50 +0000 | [diff] [blame] | 1887 | assert(is_contained(Def->users(), Use) && |
Daniel Berlin | 7af9587 | 2016-08-05 21:47:20 +0000 | [diff] [blame] | 1888 | "Did not find use in def's use list"); |
| 1889 | #endif |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1890 | } |
| 1891 | |
| 1892 | /// \brief Verify the immediate use information, by walking all the memory |
| 1893 | /// accesses and verifying that, for each use, it appears in the |
| 1894 | /// appropriate def's use list |
Daniel Berlin | 932b4cb | 2016-02-10 17:39:43 +0000 | [diff] [blame] | 1895 | void MemorySSA::verifyDefUses(Function &F) const { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1896 | for (BasicBlock &B : F) { |
| 1897 | // Phi nodes are attached to basic blocks |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1898 | if (MemoryPhi *Phi = getMemoryAccess(&B)) { |
David Majnemer | 580e754 | 2016-06-25 00:04:06 +0000 | [diff] [blame] | 1899 | assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance( |
| 1900 | pred_begin(&B), pred_end(&B))) && |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1901 | "Incomplete MemoryPhi Node"); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1902 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) |
| 1903 | verifyUseInDefs(Phi->getIncomingValue(I), Phi); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1904 | } |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1905 | |
| 1906 | for (Instruction &I : B) { |
| 1907 | if (MemoryAccess *MA = getMemoryAccess(&I)) { |
| 1908 | assert(isa<MemoryUseOrDef>(MA) && |
| 1909 | "Found a phi node not attached to a bb"); |
| 1910 | verifyUseInDefs(cast<MemoryUseOrDef>(MA)->getDefiningAccess(), MA); |
| 1911 | } |
| 1912 | } |
| 1913 | } |
| 1914 | } |
| 1915 | |
| 1916 | MemoryAccess *MemorySSA::getMemoryAccess(const Value *I) const { |
Daniel Berlin | f6c9ae9 | 2016-02-10 17:41:25 +0000 | [diff] [blame] | 1917 | return ValueToMemoryAccess.lookup(I); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1918 | } |
| 1919 | |
| 1920 | MemoryPhi *MemorySSA::getMemoryAccess(const BasicBlock *BB) const { |
| 1921 | return cast_or_null<MemoryPhi>(getMemoryAccess((const Value *)BB)); |
| 1922 | } |
| 1923 | |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1924 | /// Perform a local numbering on blocks so that instruction ordering can be |
| 1925 | /// determined in constant time. |
| 1926 | /// TODO: We currently just number in order. If we numbered by N, we could |
| 1927 | /// allow at least N-1 sequences of insertBefore or insertAfter (and at least |
| 1928 | /// log2(N) sequences of mixed before and after) without needing to invalidate |
| 1929 | /// the numbering. |
| 1930 | void MemorySSA::renumberBlock(const BasicBlock *B) const { |
| 1931 | // The pre-increment ensures the numbers really start at 1. |
| 1932 | unsigned long CurrentNumber = 0; |
| 1933 | const AccessList *AL = getBlockAccesses(B); |
| 1934 | assert(AL != nullptr && "Asking to renumber an empty block"); |
| 1935 | for (const auto &I : *AL) |
| 1936 | BlockNumbering[&I] = ++CurrentNumber; |
| 1937 | BlockNumberingValid.insert(B); |
| 1938 | } |
| 1939 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1940 | /// \brief Determine, for two memory accesses in the same block, |
| 1941 | /// whether \p Dominator dominates \p Dominatee. |
| 1942 | /// \returns True if \p Dominator dominates \p Dominatee. |
| 1943 | bool MemorySSA::locallyDominates(const MemoryAccess *Dominator, |
| 1944 | const MemoryAccess *Dominatee) const { |
Sebastian Pop | e1f60b1 | 2016-06-10 21:36:41 +0000 | [diff] [blame] | 1945 | |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1946 | const BasicBlock *DominatorBlock = Dominator->getBlock(); |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1947 | |
Daniel Berlin | 1986030 | 2016-07-19 23:08:08 +0000 | [diff] [blame] | 1948 | assert((DominatorBlock == Dominatee->getBlock()) && |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1949 | "Asking for local domination when accesses are in different blocks!"); |
Sebastian Pop | e1f60b1 | 2016-06-10 21:36:41 +0000 | [diff] [blame] | 1950 | // A node dominates itself. |
| 1951 | if (Dominatee == Dominator) |
| 1952 | return true; |
| 1953 | |
| 1954 | // When Dominatee is defined on function entry, it is not dominated by another |
| 1955 | // memory access. |
| 1956 | if (isLiveOnEntryDef(Dominatee)) |
| 1957 | return false; |
| 1958 | |
| 1959 | // When Dominator is defined on function entry, it dominates the other memory |
| 1960 | // access. |
| 1961 | if (isLiveOnEntryDef(Dominator)) |
| 1962 | return true; |
| 1963 | |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1964 | if (!BlockNumberingValid.count(DominatorBlock)) |
| 1965 | renumberBlock(DominatorBlock); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1966 | |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1967 | unsigned long DominatorNum = BlockNumbering.lookup(Dominator); |
| 1968 | // All numbers start with 1 |
| 1969 | assert(DominatorNum != 0 && "Block was not numbered properly"); |
| 1970 | unsigned long DominateeNum = BlockNumbering.lookup(Dominatee); |
| 1971 | assert(DominateeNum != 0 && "Block was not numbered properly"); |
| 1972 | return DominatorNum < DominateeNum; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1973 | } |
| 1974 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1975 | bool MemorySSA::dominates(const MemoryAccess *Dominator, |
| 1976 | const MemoryAccess *Dominatee) const { |
| 1977 | if (Dominator == Dominatee) |
| 1978 | return true; |
| 1979 | |
| 1980 | if (isLiveOnEntryDef(Dominatee)) |
| 1981 | return false; |
| 1982 | |
| 1983 | if (Dominator->getBlock() != Dominatee->getBlock()) |
| 1984 | return DT->dominates(Dominator->getBlock(), Dominatee->getBlock()); |
| 1985 | return locallyDominates(Dominator, Dominatee); |
| 1986 | } |
| 1987 | |
Daniel Berlin | 2919b1c | 2016-08-05 21:46:52 +0000 | [diff] [blame] | 1988 | bool MemorySSA::dominates(const MemoryAccess *Dominator, |
| 1989 | const Use &Dominatee) const { |
| 1990 | if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) { |
| 1991 | BasicBlock *UseBB = MP->getIncomingBlock(Dominatee); |
| 1992 | // The def must dominate the incoming block of the phi. |
| 1993 | if (UseBB != Dominator->getBlock()) |
| 1994 | return DT->dominates(Dominator->getBlock(), UseBB); |
| 1995 | // If the UseBB and the DefBB are the same, compare locally. |
| 1996 | return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee)); |
| 1997 | } |
| 1998 | // If it's not a PHI node use, the normal dominates can already handle it. |
| 1999 | return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser())); |
| 2000 | } |
| 2001 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2002 | const static char LiveOnEntryStr[] = "liveOnEntry"; |
| 2003 | |
| 2004 | void MemoryDef::print(raw_ostream &OS) const { |
| 2005 | MemoryAccess *UO = getDefiningAccess(); |
| 2006 | |
| 2007 | OS << getID() << " = MemoryDef("; |
| 2008 | if (UO && UO->getID()) |
| 2009 | OS << UO->getID(); |
| 2010 | else |
| 2011 | OS << LiveOnEntryStr; |
| 2012 | OS << ')'; |
| 2013 | } |
| 2014 | |
| 2015 | void MemoryPhi::print(raw_ostream &OS) const { |
| 2016 | bool First = true; |
| 2017 | OS << getID() << " = MemoryPhi("; |
| 2018 | for (const auto &Op : operands()) { |
| 2019 | BasicBlock *BB = getIncomingBlock(Op); |
| 2020 | MemoryAccess *MA = cast<MemoryAccess>(Op); |
| 2021 | if (!First) |
| 2022 | OS << ','; |
| 2023 | else |
| 2024 | First = false; |
| 2025 | |
| 2026 | OS << '{'; |
| 2027 | if (BB->hasName()) |
| 2028 | OS << BB->getName(); |
| 2029 | else |
| 2030 | BB->printAsOperand(OS, false); |
| 2031 | OS << ','; |
| 2032 | if (unsigned ID = MA->getID()) |
| 2033 | OS << ID; |
| 2034 | else |
| 2035 | OS << LiveOnEntryStr; |
| 2036 | OS << '}'; |
| 2037 | } |
| 2038 | OS << ')'; |
| 2039 | } |
| 2040 | |
| 2041 | MemoryAccess::~MemoryAccess() {} |
| 2042 | |
| 2043 | void MemoryUse::print(raw_ostream &OS) const { |
| 2044 | MemoryAccess *UO = getDefiningAccess(); |
| 2045 | OS << "MemoryUse("; |
| 2046 | if (UO && UO->getID()) |
| 2047 | OS << UO->getID(); |
| 2048 | else |
| 2049 | OS << LiveOnEntryStr; |
| 2050 | OS << ')'; |
| 2051 | } |
| 2052 | |
| 2053 | void MemoryAccess::dump() const { |
| 2054 | print(dbgs()); |
| 2055 | dbgs() << "\n"; |
| 2056 | } |
| 2057 | |
Chad Rosier | 232e29e | 2016-07-06 21:20:47 +0000 | [diff] [blame] | 2058 | char MemorySSAPrinterLegacyPass::ID = 0; |
| 2059 | |
| 2060 | MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) { |
| 2061 | initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry()); |
| 2062 | } |
| 2063 | |
| 2064 | void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const { |
| 2065 | AU.setPreservesAll(); |
| 2066 | AU.addRequired<MemorySSAWrapperPass>(); |
| 2067 | AU.addPreserved<MemorySSAWrapperPass>(); |
| 2068 | } |
| 2069 | |
| 2070 | bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) { |
| 2071 | auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); |
| 2072 | MSSA.print(dbgs()); |
| 2073 | if (VerifyMemorySSA) |
| 2074 | MSSA.verifyMemorySSA(); |
| 2075 | return false; |
| 2076 | } |
| 2077 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 2078 | char MemorySSAAnalysis::PassID; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2079 | |
Geoff Berry | 290a13e | 2016-08-08 18:27:22 +0000 | [diff] [blame] | 2080 | MemorySSAAnalysis::Result |
Sean Silva | 36e0d01 | 2016-08-09 00:28:15 +0000 | [diff] [blame] | 2081 | MemorySSAAnalysis::run(Function &F, FunctionAnalysisManager &AM) { |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 2082 | auto &DT = AM.getResult<DominatorTreeAnalysis>(F); |
| 2083 | auto &AA = AM.getResult<AAManager>(F); |
Geoff Berry | 290a13e | 2016-08-08 18:27:22 +0000 | [diff] [blame] | 2084 | return MemorySSAAnalysis::Result(make_unique<MemorySSA>(F, &AA, &DT)); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2085 | } |
| 2086 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 2087 | PreservedAnalyses MemorySSAPrinterPass::run(Function &F, |
| 2088 | FunctionAnalysisManager &AM) { |
| 2089 | OS << "MemorySSA for function: " << F.getName() << "\n"; |
Geoff Berry | 290a13e | 2016-08-08 18:27:22 +0000 | [diff] [blame] | 2090 | AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS); |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 2091 | |
| 2092 | return PreservedAnalyses::all(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2093 | } |
| 2094 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 2095 | PreservedAnalyses MemorySSAVerifierPass::run(Function &F, |
| 2096 | FunctionAnalysisManager &AM) { |
Geoff Berry | 290a13e | 2016-08-08 18:27:22 +0000 | [diff] [blame] | 2097 | AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA(); |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 2098 | |
| 2099 | return PreservedAnalyses::all(); |
| 2100 | } |
| 2101 | |
| 2102 | char MemorySSAWrapperPass::ID = 0; |
| 2103 | |
| 2104 | MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) { |
| 2105 | initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry()); |
| 2106 | } |
| 2107 | |
| 2108 | void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); } |
| 2109 | |
| 2110 | void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2111 | AU.setPreservesAll(); |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 2112 | AU.addRequiredTransitive<DominatorTreeWrapperPass>(); |
| 2113 | AU.addRequiredTransitive<AAResultsWrapperPass>(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2114 | } |
| 2115 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 2116 | bool MemorySSAWrapperPass::runOnFunction(Function &F) { |
| 2117 | auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); |
| 2118 | auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); |
| 2119 | MSSA.reset(new MemorySSA(F, &AA, &DT)); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2120 | return false; |
| 2121 | } |
| 2122 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 2123 | void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); } |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2124 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 2125 | void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2126 | MSSA->print(OS); |
| 2127 | } |
| 2128 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2129 | MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {} |
| 2130 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 2131 | MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A, |
| 2132 | DominatorTree *D) |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 2133 | : MemorySSAWalker(M), Walker(*M, *A, *D, Cache), AutoResetWalker(true) {} |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2134 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 2135 | MemorySSA::CachingWalker::~CachingWalker() {} |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2136 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 2137 | void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) { |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 2138 | // TODO: We can do much better cache invalidation with differently stored |
| 2139 | // caches. For now, for MemoryUses, we simply remove them |
| 2140 | // from the cache, and kill the entire call/non-call cache for everything |
| 2141 | // else. The problem is for phis or defs, currently we'd need to follow use |
| 2142 | // chains down and invalidate anything below us in the chain that currently |
| 2143 | // terminates at this access. |
| 2144 | |
| 2145 | // See if this is a MemoryUse, if so, just remove the cached info. MemoryUse |
| 2146 | // is by definition never a barrier, so nothing in the cache could point to |
| 2147 | // this use. In that case, we only need invalidate the info for the use |
| 2148 | // itself. |
| 2149 | |
| 2150 | if (MemoryUse *MU = dyn_cast<MemoryUse>(MA)) { |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 2151 | UpwardsMemoryQuery Q(MU->getMemoryInst(), MU); |
| 2152 | Cache.remove(MU, Q.StartingLoc, Q.IsCall); |
Geoff Berry | 9fe26e6 | 2016-04-22 14:44:10 +0000 | [diff] [blame] | 2153 | } else { |
| 2154 | // If it is not a use, the best we can do right now is destroy the cache. |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 2155 | Cache.clear(); |
Geoff Berry | 9fe26e6 | 2016-04-22 14:44:10 +0000 | [diff] [blame] | 2156 | } |
| 2157 | |
Filipe Cabecinhas | 0da9937 | 2016-04-29 15:22:48 +0000 | [diff] [blame] | 2158 | #ifdef EXPENSIVE_CHECKS |
Geoff Berry | 9fe26e6 | 2016-04-22 14:44:10 +0000 | [diff] [blame] | 2159 | verifyRemoved(MA); |
| 2160 | #endif |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 2161 | } |
| 2162 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2163 | /// \brief Walk the use-def chains starting at \p MA and find |
| 2164 | /// the MemoryAccess that actually clobbers Loc. |
| 2165 | /// |
| 2166 | /// \returns our clobbering memory access |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 2167 | MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess( |
| 2168 | MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) { |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 2169 | MemoryAccess *New = Walker.findClobber(StartingAccess, Q); |
| 2170 | #ifdef EXPENSIVE_CHECKS |
| 2171 | MemoryAccess *NewNoCache = |
| 2172 | Walker.findClobber(StartingAccess, Q, /*UseWalkerCache=*/false); |
| 2173 | assert(NewNoCache == New && "Cache made us hand back a different result?"); |
| 2174 | #endif |
| 2175 | if (AutoResetWalker) |
| 2176 | resetClobberWalker(); |
| 2177 | return New; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2178 | } |
| 2179 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 2180 | MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess( |
| 2181 | MemoryAccess *StartingAccess, MemoryLocation &Loc) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2182 | if (isa<MemoryPhi>(StartingAccess)) |
| 2183 | return StartingAccess; |
| 2184 | |
| 2185 | auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess); |
| 2186 | if (MSSA->isLiveOnEntryDef(StartingUseOrDef)) |
| 2187 | return StartingUseOrDef; |
| 2188 | |
| 2189 | Instruction *I = StartingUseOrDef->getMemoryInst(); |
| 2190 | |
| 2191 | // Conservatively, fences are always clobbers, so don't perform the walk if we |
| 2192 | // hit a fence. |
David Majnemer | a940f36 | 2016-07-15 17:19:24 +0000 | [diff] [blame] | 2193 | if (!ImmutableCallSite(I) && I->isFenceLike()) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2194 | return StartingUseOrDef; |
| 2195 | |
| 2196 | UpwardsMemoryQuery Q; |
| 2197 | Q.OriginalAccess = StartingUseOrDef; |
| 2198 | Q.StartingLoc = Loc; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 2199 | Q.Inst = I; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2200 | Q.IsCall = false; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2201 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 2202 | if (auto *CacheResult = Cache.lookup(StartingUseOrDef, Loc, Q.IsCall)) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2203 | return CacheResult; |
| 2204 | |
| 2205 | // Unlike the other function, do not walk to the def of a def, because we are |
| 2206 | // handed something we already believe is the clobbering access. |
| 2207 | MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef) |
| 2208 | ? StartingUseOrDef->getDefiningAccess() |
| 2209 | : StartingUseOrDef; |
| 2210 | |
| 2211 | MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2212 | DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); |
| 2213 | DEBUG(dbgs() << *StartingUseOrDef << "\n"); |
| 2214 | DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is "); |
| 2215 | DEBUG(dbgs() << *Clobber << "\n"); |
| 2216 | return Clobber; |
| 2217 | } |
| 2218 | |
| 2219 | MemoryAccess * |
George Burgess IV | 400ae40 | 2016-07-20 19:51:34 +0000 | [diff] [blame] | 2220 | MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) { |
| 2221 | auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA); |
| 2222 | // If this is a MemoryPhi, we can't do anything. |
| 2223 | if (!StartingAccess) |
| 2224 | return MA; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2225 | |
George Burgess IV | 400ae40 | 2016-07-20 19:51:34 +0000 | [diff] [blame] | 2226 | const Instruction *I = StartingAccess->getMemoryInst(); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 2227 | UpwardsMemoryQuery Q(I, StartingAccess); |
David Majnemer | a940f36 | 2016-07-15 17:19:24 +0000 | [diff] [blame] | 2228 | // We can't sanely do anything with a fences, they conservatively |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2229 | // clobber all memory, and have no locations to get pointers from to |
David Majnemer | a940f36 | 2016-07-15 17:19:24 +0000 | [diff] [blame] | 2230 | // try to disambiguate. |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 2231 | if (!Q.IsCall && I->isFenceLike()) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2232 | return StartingAccess; |
| 2233 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 2234 | if (auto *CacheResult = Cache.lookup(StartingAccess, Q.StartingLoc, Q.IsCall)) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2235 | return CacheResult; |
| 2236 | |
George Burgess IV | 024f3d2 | 2016-08-03 19:57:02 +0000 | [diff] [blame] | 2237 | if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) { |
| 2238 | MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef(); |
| 2239 | Cache.insert(StartingAccess, LiveOnEntry, Q.StartingLoc, Q.IsCall); |
| 2240 | return LiveOnEntry; |
| 2241 | } |
| 2242 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2243 | // Start with the thing we already think clobbers this location |
| 2244 | MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); |
| 2245 | |
| 2246 | // At this point, DefiningAccess may be the live on entry def. |
| 2247 | // If it is, we will not get a better result. |
| 2248 | if (MSSA->isLiveOnEntryDef(DefiningAccess)) |
| 2249 | return DefiningAccess; |
| 2250 | |
| 2251 | MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2252 | DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); |
| 2253 | DEBUG(dbgs() << *DefiningAccess << "\n"); |
| 2254 | DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is "); |
| 2255 | DEBUG(dbgs() << *Result << "\n"); |
| 2256 | |
| 2257 | return Result; |
| 2258 | } |
| 2259 | |
Geoff Berry | 9fe26e6 | 2016-04-22 14:44:10 +0000 | [diff] [blame] | 2260 | // Verify that MA doesn't exist in any of the caches. |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 2261 | void MemorySSA::CachingWalker::verifyRemoved(MemoryAccess *MA) { |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 2262 | assert(!Cache.contains(MA) && "Found removed MemoryAccess in cache."); |
Geoff Berry | 9fe26e6 | 2016-04-22 14:44:10 +0000 | [diff] [blame] | 2263 | } |
| 2264 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2265 | MemoryAccess * |
George Burgess IV | 400ae40 | 2016-07-20 19:51:34 +0000 | [diff] [blame] | 2266 | DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2267 | if (auto *Use = dyn_cast<MemoryUseOrDef>(MA)) |
| 2268 | return Use->getDefiningAccess(); |
| 2269 | return MA; |
| 2270 | } |
| 2271 | |
| 2272 | MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess( |
| 2273 | MemoryAccess *StartingAccess, MemoryLocation &) { |
| 2274 | if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess)) |
| 2275 | return Use->getDefiningAccess(); |
| 2276 | return StartingAccess; |
| 2277 | } |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 2278 | } // namespace llvm |