Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 1 | //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===// |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 8 | //===----------------------------------------------------------------------===// |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 9 | // |
| 10 | // This file implements the MemorySSA class. |
| 11 | // |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 12 | //===----------------------------------------------------------------------===// |
| 13 | |
Daniel Berlin | 554dcd8 | 2017-04-11 20:06:36 +0000 | [diff] [blame] | 14 | #include "llvm/Analysis/MemorySSA.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 15 | #include "llvm/ADT/DenseMap.h" |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 16 | #include "llvm/ADT/DenseMapInfo.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 17 | #include "llvm/ADT/DenseSet.h" |
| 18 | #include "llvm/ADT/DepthFirstIterator.h" |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 19 | #include "llvm/ADT/Hashing.h" |
| 20 | #include "llvm/ADT/None.h" |
| 21 | #include "llvm/ADT/Optional.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 22 | #include "llvm/ADT/STLExtras.h" |
| 23 | #include "llvm/ADT/SmallPtrSet.h" |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 24 | #include "llvm/ADT/SmallVector.h" |
| 25 | #include "llvm/ADT/iterator.h" |
| 26 | #include "llvm/ADT/iterator_range.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 27 | #include "llvm/Analysis/AliasAnalysis.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 28 | #include "llvm/Analysis/IteratedDominanceFrontier.h" |
| 29 | #include "llvm/Analysis/MemoryLocation.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 30 | #include "llvm/IR/AssemblyAnnotationWriter.h" |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 31 | #include "llvm/IR/BasicBlock.h" |
| 32 | #include "llvm/IR/CallSite.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 33 | #include "llvm/IR/Dominators.h" |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 34 | #include "llvm/IR/Function.h" |
| 35 | #include "llvm/IR/Instruction.h" |
| 36 | #include "llvm/IR/Instructions.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 37 | #include "llvm/IR/IntrinsicInst.h" |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 38 | #include "llvm/IR/Intrinsics.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 39 | #include "llvm/IR/LLVMContext.h" |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 40 | #include "llvm/IR/PassManager.h" |
| 41 | #include "llvm/IR/Use.h" |
| 42 | #include "llvm/Pass.h" |
| 43 | #include "llvm/Support/AtomicOrdering.h" |
| 44 | #include "llvm/Support/Casting.h" |
| 45 | #include "llvm/Support/CommandLine.h" |
| 46 | #include "llvm/Support/Compiler.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 47 | #include "llvm/Support/Debug.h" |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 48 | #include "llvm/Support/ErrorHandling.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 49 | #include "llvm/Support/FormattedStream.h" |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 50 | #include "llvm/Support/raw_ostream.h" |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 51 | #include <algorithm> |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 52 | #include <cassert> |
| 53 | #include <iterator> |
| 54 | #include <memory> |
| 55 | #include <utility> |
| 56 | |
| 57 | using namespace llvm; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 58 | |
| 59 | #define DEBUG_TYPE "memoryssa" |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 60 | |
Geoff Berry | efb0dd1 | 2016-06-14 21:19:40 +0000 | [diff] [blame] | 61 | INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 62 | true) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 63 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) |
| 64 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) |
Geoff Berry | efb0dd1 | 2016-06-14 21:19:40 +0000 | [diff] [blame] | 65 | INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, |
| 66 | true) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 67 | |
Chad Rosier | 232e29e | 2016-07-06 21:20:47 +0000 | [diff] [blame] | 68 | INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa", |
| 69 | "Memory SSA Printer", false, false) |
| 70 | INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) |
| 71 | INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa", |
| 72 | "Memory SSA Printer", false, false) |
| 73 | |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 74 | static cl::opt<unsigned> MaxCheckLimit( |
| 75 | "memssa-check-limit", cl::Hidden, cl::init(100), |
| 76 | cl::desc("The maximum number of stores/phis MemorySSA" |
| 77 | "will consider trying to walk past (default = 100)")); |
| 78 | |
Chad Rosier | 232e29e | 2016-07-06 21:20:47 +0000 | [diff] [blame] | 79 | static cl::opt<bool> |
| 80 | VerifyMemorySSA("verify-memoryssa", cl::init(false), cl::Hidden, |
| 81 | cl::desc("Verify MemorySSA in legacy printer pass.")); |
| 82 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 83 | namespace llvm { |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 84 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 85 | /// \brief An assembly annotator class to print Memory SSA information in |
| 86 | /// comments. |
| 87 | class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter { |
| 88 | friend class MemorySSA; |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 89 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 90 | const MemorySSA *MSSA; |
| 91 | |
| 92 | public: |
| 93 | MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {} |
| 94 | |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 95 | void emitBasicBlockStartAnnot(const BasicBlock *BB, |
| 96 | formatted_raw_ostream &OS) override { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 97 | if (MemoryAccess *MA = MSSA->getMemoryAccess(BB)) |
| 98 | OS << "; " << *MA << "\n"; |
| 99 | } |
| 100 | |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 101 | void emitInstructionAnnot(const Instruction *I, |
| 102 | formatted_raw_ostream &OS) override { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 103 | if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) |
| 104 | OS << "; " << *MA << "\n"; |
| 105 | } |
| 106 | }; |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 107 | |
| 108 | } // end namespace llvm |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 109 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 110 | namespace { |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 111 | |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 112 | /// Our current alias analysis API differentiates heavily between calls and |
| 113 | /// non-calls, and functions called on one usually assert on the other. |
| 114 | /// This class encapsulates the distinction to simplify other code that wants |
| 115 | /// "Memory affecting instructions and related data" to use as a key. |
| 116 | /// For example, this class is used as a densemap key in the use optimizer. |
| 117 | class MemoryLocOrCall { |
| 118 | public: |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 119 | bool IsCall = false; |
| 120 | |
| 121 | MemoryLocOrCall() = default; |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 122 | MemoryLocOrCall(MemoryUseOrDef *MUD) |
| 123 | : MemoryLocOrCall(MUD->getMemoryInst()) {} |
Sebastian Pop | 5068d7a | 2016-10-13 03:23:33 +0000 | [diff] [blame] | 124 | MemoryLocOrCall(const MemoryUseOrDef *MUD) |
| 125 | : MemoryLocOrCall(MUD->getMemoryInst()) {} |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 126 | |
| 127 | MemoryLocOrCall(Instruction *Inst) { |
| 128 | if (ImmutableCallSite(Inst)) { |
| 129 | IsCall = true; |
| 130 | CS = ImmutableCallSite(Inst); |
| 131 | } else { |
| 132 | IsCall = false; |
| 133 | // There is no such thing as a memorylocation for a fence inst, and it is |
| 134 | // unique in that regard. |
| 135 | if (!isa<FenceInst>(Inst)) |
| 136 | Loc = MemoryLocation::get(Inst); |
| 137 | } |
| 138 | } |
| 139 | |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 140 | explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {} |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 141 | |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 142 | ImmutableCallSite getCS() const { |
| 143 | assert(IsCall); |
| 144 | return CS; |
| 145 | } |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 146 | |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 147 | MemoryLocation getLoc() const { |
| 148 | assert(!IsCall); |
| 149 | return Loc; |
| 150 | } |
| 151 | |
| 152 | bool operator==(const MemoryLocOrCall &Other) const { |
| 153 | if (IsCall != Other.IsCall) |
| 154 | return false; |
| 155 | |
| 156 | if (IsCall) |
| 157 | return CS.getCalledValue() == Other.CS.getCalledValue(); |
| 158 | return Loc == Other.Loc; |
| 159 | } |
| 160 | |
| 161 | private: |
Daniel Berlin | f536113 | 2016-10-22 04:15:41 +0000 | [diff] [blame] | 162 | union { |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 163 | ImmutableCallSite CS; |
| 164 | MemoryLocation Loc; |
Daniel Berlin | f536113 | 2016-10-22 04:15:41 +0000 | [diff] [blame] | 165 | }; |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 166 | }; |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 167 | |
| 168 | } // end anonymous namespace |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 169 | |
| 170 | namespace llvm { |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 171 | |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 172 | template <> struct DenseMapInfo<MemoryLocOrCall> { |
| 173 | static inline MemoryLocOrCall getEmptyKey() { |
| 174 | return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey()); |
| 175 | } |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 176 | |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 177 | static inline MemoryLocOrCall getTombstoneKey() { |
| 178 | return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey()); |
| 179 | } |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 180 | |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 181 | static unsigned getHashValue(const MemoryLocOrCall &MLOC) { |
| 182 | if (MLOC.IsCall) |
| 183 | return hash_combine(MLOC.IsCall, |
| 184 | DenseMapInfo<const Value *>::getHashValue( |
| 185 | MLOC.getCS().getCalledValue())); |
| 186 | return hash_combine( |
| 187 | MLOC.IsCall, DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc())); |
| 188 | } |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 189 | |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 190 | static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) { |
| 191 | return LHS == RHS; |
| 192 | } |
| 193 | }; |
Daniel Berlin | df10119 | 2016-08-03 00:01:46 +0000 | [diff] [blame] | 194 | |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 195 | } // end namespace llvm |
| 196 | |
George Burgess IV | 82e355c | 2016-08-03 19:39:54 +0000 | [diff] [blame] | 197 | /// This does one-way checks to see if Use could theoretically be hoisted above |
| 198 | /// MayClobber. This will not check the other way around. |
| 199 | /// |
| 200 | /// This assumes that, for the purposes of MemorySSA, Use comes directly after |
| 201 | /// MayClobber, with no potentially clobbering operations in between them. |
| 202 | /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.) |
Alina Sbirlea | ca741a8 | 2017-12-22 19:54:03 +0000 | [diff] [blame^] | 203 | static bool areLoadsReorderable(const LoadInst *Use, |
| 204 | const LoadInst *MayClobber) { |
George Burgess IV | 82e355c | 2016-08-03 19:39:54 +0000 | [diff] [blame] | 205 | bool VolatileUse = Use->isVolatile(); |
| 206 | bool VolatileClobber = MayClobber->isVolatile(); |
| 207 | // Volatile operations may never be reordered with other volatile operations. |
| 208 | if (VolatileUse && VolatileClobber) |
Alina Sbirlea | ca741a8 | 2017-12-22 19:54:03 +0000 | [diff] [blame^] | 209 | return false; |
| 210 | // Otherwise, volatile doesn't matter here. From the language reference: |
| 211 | // 'optimizers may change the order of volatile operations relative to |
| 212 | // non-volatile operations.'" |
George Burgess IV | 82e355c | 2016-08-03 19:39:54 +0000 | [diff] [blame] | 213 | |
| 214 | // If a load is seq_cst, it cannot be moved above other loads. If its ordering |
| 215 | // is weaker, it can be moved above other loads. We just need to be sure that |
| 216 | // MayClobber isn't an acquire load, because loads can't be moved above |
| 217 | // acquire loads. |
| 218 | // |
| 219 | // Note that this explicitly *does* allow the free reordering of monotonic (or |
| 220 | // weaker) loads of the same address. |
| 221 | bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent; |
| 222 | bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(), |
| 223 | AtomicOrdering::Acquire); |
Alina Sbirlea | ca741a8 | 2017-12-22 19:54:03 +0000 | [diff] [blame^] | 224 | return !(SeqCstUse || MayClobberIsAcquire); |
George Burgess IV | 82e355c | 2016-08-03 19:39:54 +0000 | [diff] [blame] | 225 | } |
| 226 | |
Sebastian Pop | d57d93c | 2016-10-12 03:08:40 +0000 | [diff] [blame] | 227 | static bool instructionClobbersQuery(MemoryDef *MD, |
| 228 | const MemoryLocation &UseLoc, |
| 229 | const Instruction *UseInst, |
| 230 | AliasAnalysis &AA) { |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 231 | Instruction *DefInst = MD->getMemoryInst(); |
| 232 | assert(DefInst && "Defining instruction not actually an instruction"); |
Daniel Berlin | 74603a6 | 2017-04-10 18:46:00 +0000 | [diff] [blame] | 233 | ImmutableCallSite UseCS(UseInst); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 234 | |
Daniel Berlin | df10119 | 2016-08-03 00:01:46 +0000 | [diff] [blame] | 235 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) { |
| 236 | // These intrinsics will show up as affecting memory, but they are just |
| 237 | // markers. |
| 238 | switch (II->getIntrinsicID()) { |
| 239 | case Intrinsic::lifetime_start: |
Daniel Berlin | 74603a6 | 2017-04-10 18:46:00 +0000 | [diff] [blame] | 240 | if (UseCS) |
| 241 | return false; |
| 242 | return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), UseLoc); |
Daniel Berlin | df10119 | 2016-08-03 00:01:46 +0000 | [diff] [blame] | 243 | case Intrinsic::lifetime_end: |
| 244 | case Intrinsic::invariant_start: |
| 245 | case Intrinsic::invariant_end: |
| 246 | case Intrinsic::assume: |
| 247 | return false; |
| 248 | default: |
| 249 | break; |
| 250 | } |
| 251 | } |
| 252 | |
Hans Wennborg | 70e22d1 | 2017-11-21 18:00:01 +0000 | [diff] [blame] | 253 | if (UseCS) { |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 254 | ModRefInfo I = AA.getModRefInfo(DefInst, UseCS); |
Alina Sbirlea | 63d2250 | 2017-12-05 20:12:23 +0000 | [diff] [blame] | 255 | return isModOrRefSet(I); |
Hans Wennborg | 70e22d1 | 2017-11-21 18:00:01 +0000 | [diff] [blame] | 256 | } |
George Burgess IV | 82e355c | 2016-08-03 19:39:54 +0000 | [diff] [blame] | 257 | |
Alina Sbirlea | ca741a8 | 2017-12-22 19:54:03 +0000 | [diff] [blame^] | 258 | if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) |
| 259 | if (auto *UseLoad = dyn_cast<LoadInst>(UseInst)) |
| 260 | return !areLoadsReorderable(UseLoad, DefLoad); |
George Burgess IV | 82e355c | 2016-08-03 19:39:54 +0000 | [diff] [blame] | 261 | |
Alina Sbirlea | 63d2250 | 2017-12-05 20:12:23 +0000 | [diff] [blame] | 262 | return isModSet(AA.getModRefInfo(DefInst, UseLoc)); |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 263 | } |
| 264 | |
Sebastian Pop | 5068d7a | 2016-10-13 03:23:33 +0000 | [diff] [blame] | 265 | static bool instructionClobbersQuery(MemoryDef *MD, const MemoryUseOrDef *MU, |
| 266 | const MemoryLocOrCall &UseMLOC, |
| 267 | AliasAnalysis &AA) { |
| 268 | // FIXME: This is a temporary hack to allow a single instructionClobbersQuery |
| 269 | // to exist while MemoryLocOrCall is pushed through places. |
| 270 | if (UseMLOC.IsCall) |
| 271 | return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(), |
| 272 | AA); |
| 273 | return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(), |
| 274 | AA); |
| 275 | } |
| 276 | |
Sebastian Pop | 5ba9f24 | 2016-10-13 01:39:10 +0000 | [diff] [blame] | 277 | // Return true when MD may alias MU, return false otherwise. |
Daniel Berlin | dcb004f | 2017-03-02 23:06:46 +0000 | [diff] [blame] | 278 | bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, |
| 279 | AliasAnalysis &AA) { |
Sebastian Pop | 5068d7a | 2016-10-13 03:23:33 +0000 | [diff] [blame] | 280 | return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA); |
Sebastian Pop | 5ba9f24 | 2016-10-13 01:39:10 +0000 | [diff] [blame] | 281 | } |
Sebastian Pop | 5ba9f24 | 2016-10-13 01:39:10 +0000 | [diff] [blame] | 282 | |
| 283 | namespace { |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 284 | |
Sebastian Pop | 5ba9f24 | 2016-10-13 01:39:10 +0000 | [diff] [blame] | 285 | struct UpwardsMemoryQuery { |
| 286 | // True if our original query started off as a call |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 287 | bool IsCall = false; |
Sebastian Pop | 5ba9f24 | 2016-10-13 01:39:10 +0000 | [diff] [blame] | 288 | // The pointer location we started the query with. This will be empty if |
| 289 | // IsCall is true. |
| 290 | MemoryLocation StartingLoc; |
| 291 | // This is the instruction we were querying about. |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 292 | const Instruction *Inst = nullptr; |
Sebastian Pop | 5ba9f24 | 2016-10-13 01:39:10 +0000 | [diff] [blame] | 293 | // The MemoryAccess we actually got called with, used to test local domination |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 294 | const MemoryAccess *OriginalAccess = nullptr; |
Sebastian Pop | 5ba9f24 | 2016-10-13 01:39:10 +0000 | [diff] [blame] | 295 | |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 296 | UpwardsMemoryQuery() = default; |
Sebastian Pop | 5ba9f24 | 2016-10-13 01:39:10 +0000 | [diff] [blame] | 297 | |
| 298 | UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) |
| 299 | : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) { |
| 300 | if (!IsCall) |
| 301 | StartingLoc = MemoryLocation::get(Inst); |
| 302 | } |
| 303 | }; |
| 304 | |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 305 | } // end anonymous namespace |
| 306 | |
Sebastian Pop | 5ba9f24 | 2016-10-13 01:39:10 +0000 | [diff] [blame] | 307 | static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, |
| 308 | AliasAnalysis &AA) { |
| 309 | Instruction *Inst = MD->getMemoryInst(); |
| 310 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { |
| 311 | switch (II->getIntrinsicID()) { |
Sebastian Pop | 5ba9f24 | 2016-10-13 01:39:10 +0000 | [diff] [blame] | 312 | case Intrinsic::lifetime_end: |
| 313 | return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc); |
| 314 | default: |
| 315 | return false; |
| 316 | } |
| 317 | } |
| 318 | return false; |
| 319 | } |
| 320 | |
| 321 | static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA, |
| 322 | const Instruction *I) { |
| 323 | // If the memory can't be changed, then loads of the memory can't be |
| 324 | // clobbered. |
| 325 | // |
| 326 | // FIXME: We should handle invariant groups, as well. It's a bit harder, |
| 327 | // because we need to pay close attention to invariant group barriers. |
| 328 | return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) || |
Hal Finkel | a9d67cf | 2017-04-09 12:57:50 +0000 | [diff] [blame] | 329 | AA.pointsToConstantMemory(cast<LoadInst>(I)-> |
| 330 | getPointerOperand())); |
Sebastian Pop | 5ba9f24 | 2016-10-13 01:39:10 +0000 | [diff] [blame] | 331 | } |
| 332 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 333 | /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing |
| 334 | /// inbetween `Start` and `ClobberAt` can clobbers `Start`. |
| 335 | /// |
| 336 | /// This is meant to be as simple and self-contained as possible. Because it |
| 337 | /// uses no cache, etc., it can be relatively expensive. |
| 338 | /// |
| 339 | /// \param Start The MemoryAccess that we want to walk from. |
| 340 | /// \param ClobberAt A clobber for Start. |
| 341 | /// \param StartLoc The MemoryLocation for Start. |
| 342 | /// \param MSSA The MemorySSA isntance that Start and ClobberAt belong to. |
| 343 | /// \param Query The UpwardsMemoryQuery we used for our search. |
| 344 | /// \param AA The AliasAnalysis we used for our search. |
| 345 | static void LLVM_ATTRIBUTE_UNUSED |
| 346 | checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt, |
| 347 | const MemoryLocation &StartLoc, const MemorySSA &MSSA, |
| 348 | const UpwardsMemoryQuery &Query, AliasAnalysis &AA) { |
| 349 | assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?"); |
| 350 | |
| 351 | if (MSSA.isLiveOnEntryDef(Start)) { |
| 352 | assert(MSSA.isLiveOnEntryDef(ClobberAt) && |
| 353 | "liveOnEntry must clobber itself"); |
| 354 | return; |
| 355 | } |
| 356 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 357 | bool FoundClobber = false; |
| 358 | DenseSet<MemoryAccessPair> VisitedPhis; |
| 359 | SmallVector<MemoryAccessPair, 8> Worklist; |
| 360 | Worklist.emplace_back(Start, StartLoc); |
| 361 | // Walk all paths from Start to ClobberAt, while looking for clobbers. If one |
| 362 | // is found, complain. |
| 363 | while (!Worklist.empty()) { |
| 364 | MemoryAccessPair MAP = Worklist.pop_back_val(); |
| 365 | // All we care about is that nothing from Start to ClobberAt clobbers Start. |
| 366 | // We learn nothing from revisiting nodes. |
| 367 | if (!VisitedPhis.insert(MAP).second) |
| 368 | continue; |
| 369 | |
| 370 | for (MemoryAccess *MA : def_chain(MAP.first)) { |
| 371 | if (MA == ClobberAt) { |
| 372 | if (auto *MD = dyn_cast<MemoryDef>(MA)) { |
| 373 | // instructionClobbersQuery isn't essentially free, so don't use `|=`, |
| 374 | // since it won't let us short-circuit. |
| 375 | // |
| 376 | // Also, note that this can't be hoisted out of the `Worklist` loop, |
| 377 | // since MD may only act as a clobber for 1 of N MemoryLocations. |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 378 | FoundClobber = |
| 379 | FoundClobber || MSSA.isLiveOnEntryDef(MD) || |
| 380 | instructionClobbersQuery(MD, MAP.second, Query.Inst, AA); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 381 | } |
| 382 | break; |
| 383 | } |
| 384 | |
| 385 | // We should never hit liveOnEntry, unless it's the clobber. |
| 386 | assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?"); |
| 387 | |
| 388 | if (auto *MD = dyn_cast<MemoryDef>(MA)) { |
| 389 | (void)MD; |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 390 | assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) && |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 391 | "Found clobber before reaching ClobberAt!"); |
| 392 | continue; |
| 393 | } |
| 394 | |
| 395 | assert(isa<MemoryPhi>(MA)); |
| 396 | Worklist.append(upward_defs_begin({MA, MAP.second}), upward_defs_end()); |
| 397 | } |
| 398 | } |
| 399 | |
| 400 | // If ClobberAt is a MemoryPhi, we can assume something above it acted as a |
| 401 | // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point. |
| 402 | assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) && |
| 403 | "ClobberAt never acted as a clobber"); |
| 404 | } |
| 405 | |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 406 | namespace { |
| 407 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 408 | /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up |
| 409 | /// in one class. |
| 410 | class ClobberWalker { |
| 411 | /// Save a few bytes by using unsigned instead of size_t. |
| 412 | using ListIndex = unsigned; |
| 413 | |
| 414 | /// Represents a span of contiguous MemoryDefs, potentially ending in a |
| 415 | /// MemoryPhi. |
| 416 | struct DefPath { |
| 417 | MemoryLocation Loc; |
| 418 | // Note that, because we always walk in reverse, Last will always dominate |
| 419 | // First. Also note that First and Last are inclusive. |
| 420 | MemoryAccess *First; |
| 421 | MemoryAccess *Last; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 422 | Optional<ListIndex> Previous; |
| 423 | |
| 424 | DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last, |
| 425 | Optional<ListIndex> Previous) |
| 426 | : Loc(Loc), First(First), Last(Last), Previous(Previous) {} |
| 427 | |
| 428 | DefPath(const MemoryLocation &Loc, MemoryAccess *Init, |
| 429 | Optional<ListIndex> Previous) |
| 430 | : DefPath(Loc, Init, Init, Previous) {} |
| 431 | }; |
| 432 | |
| 433 | const MemorySSA &MSSA; |
| 434 | AliasAnalysis &AA; |
| 435 | DominatorTree &DT; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 436 | UpwardsMemoryQuery *Query; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 437 | |
| 438 | // Phi optimization bookkeeping |
| 439 | SmallVector<DefPath, 32> Paths; |
| 440 | DenseSet<ConstMemoryAccessPair> VisitedPhis; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 441 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 442 | /// Find the nearest def or phi that `From` can legally be optimized to. |
Daniel Berlin | d042031 | 2017-04-01 09:01:12 +0000 | [diff] [blame] | 443 | const MemoryAccess *getWalkTarget(const MemoryPhi *From) const { |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 444 | assert(From->getNumOperands() && "Phi with no operands?"); |
| 445 | |
| 446 | BasicBlock *BB = From->getBlock(); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 447 | MemoryAccess *Result = MSSA.getLiveOnEntryDef(); |
| 448 | DomTreeNode *Node = DT.getNode(BB); |
| 449 | while ((Node = Node->getIDom())) { |
Daniel Berlin | 7500c56 | 2017-04-01 08:59:45 +0000 | [diff] [blame] | 450 | auto *Defs = MSSA.getBlockDefs(Node->getBlock()); |
| 451 | if (Defs) |
Daniel Berlin | d042031 | 2017-04-01 09:01:12 +0000 | [diff] [blame] | 452 | return &*Defs->rbegin(); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 453 | } |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 454 | return Result; |
| 455 | } |
| 456 | |
| 457 | /// Result of calling walkToPhiOrClobber. |
| 458 | struct UpwardsWalkResult { |
| 459 | /// The "Result" of the walk. Either a clobber, the last thing we walked, or |
| 460 | /// both. |
| 461 | MemoryAccess *Result; |
| 462 | bool IsKnownClobber; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 463 | }; |
| 464 | |
| 465 | /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last. |
| 466 | /// This will update Desc.Last as it walks. It will (optionally) also stop at |
| 467 | /// StopAt. |
| 468 | /// |
| 469 | /// This does not test for whether StopAt is a clobber |
Daniel Berlin | d042031 | 2017-04-01 09:01:12 +0000 | [diff] [blame] | 470 | UpwardsWalkResult |
| 471 | walkToPhiOrClobber(DefPath &Desc, |
| 472 | const MemoryAccess *StopAt = nullptr) const { |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 473 | assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world"); |
| 474 | |
| 475 | for (MemoryAccess *Current : def_chain(Desc.Last)) { |
| 476 | Desc.Last = Current; |
| 477 | if (Current == StopAt) |
Daniel Berlin | d7a7ae0 | 2017-04-05 19:01:58 +0000 | [diff] [blame] | 478 | return {Current, false}; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 479 | |
| 480 | if (auto *MD = dyn_cast<MemoryDef>(Current)) |
| 481 | if (MSSA.isLiveOnEntryDef(MD) || |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 482 | instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA)) |
Daniel Berlin | d7a7ae0 | 2017-04-05 19:01:58 +0000 | [diff] [blame] | 483 | return {MD, true}; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 484 | } |
| 485 | |
| 486 | assert(isa<MemoryPhi>(Desc.Last) && |
| 487 | "Ended at a non-clobber that's not a phi?"); |
Daniel Berlin | d7a7ae0 | 2017-04-05 19:01:58 +0000 | [diff] [blame] | 488 | return {Desc.Last, false}; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 489 | } |
| 490 | |
| 491 | void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches, |
| 492 | ListIndex PriorNode) { |
| 493 | auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}), |
| 494 | upward_defs_end()); |
| 495 | for (const MemoryAccessPair &P : UpwardDefs) { |
| 496 | PausedSearches.push_back(Paths.size()); |
| 497 | Paths.emplace_back(P.second, P.first, PriorNode); |
| 498 | } |
| 499 | } |
| 500 | |
| 501 | /// Represents a search that terminated after finding a clobber. This clobber |
| 502 | /// may or may not be present in the path of defs from LastNode..SearchStart, |
| 503 | /// since it may have been retrieved from cache. |
| 504 | struct TerminatedPath { |
| 505 | MemoryAccess *Clobber; |
| 506 | ListIndex LastNode; |
| 507 | }; |
| 508 | |
| 509 | /// Get an access that keeps us from optimizing to the given phi. |
| 510 | /// |
| 511 | /// PausedSearches is an array of indices into the Paths array. Its incoming |
| 512 | /// value is the indices of searches that stopped at the last phi optimization |
| 513 | /// target. It's left in an unspecified state. |
| 514 | /// |
| 515 | /// If this returns None, NewPaused is a vector of searches that terminated |
| 516 | /// at StopWhere. Otherwise, NewPaused is left in an unspecified state. |
George Burgess IV | 14633b5 | 2016-08-03 01:22:19 +0000 | [diff] [blame] | 517 | Optional<TerminatedPath> |
Daniel Berlin | d042031 | 2017-04-01 09:01:12 +0000 | [diff] [blame] | 518 | getBlockingAccess(const MemoryAccess *StopWhere, |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 519 | SmallVectorImpl<ListIndex> &PausedSearches, |
| 520 | SmallVectorImpl<ListIndex> &NewPaused, |
| 521 | SmallVectorImpl<TerminatedPath> &Terminated) { |
| 522 | assert(!PausedSearches.empty() && "No searches to continue?"); |
| 523 | |
| 524 | // BFS vs DFS really doesn't make a difference here, so just do a DFS with |
| 525 | // PausedSearches as our stack. |
| 526 | while (!PausedSearches.empty()) { |
| 527 | ListIndex PathIndex = PausedSearches.pop_back_val(); |
| 528 | DefPath &Node = Paths[PathIndex]; |
| 529 | |
| 530 | // If we've already visited this path with this MemoryLocation, we don't |
| 531 | // need to do so again. |
| 532 | // |
| 533 | // NOTE: That we just drop these paths on the ground makes caching |
| 534 | // behavior sporadic. e.g. given a diamond: |
| 535 | // A |
| 536 | // B C |
| 537 | // D |
| 538 | // |
| 539 | // ...If we walk D, B, A, C, we'll only cache the result of phi |
| 540 | // optimization for A, B, and D; C will be skipped because it dies here. |
| 541 | // This arguably isn't the worst thing ever, since: |
| 542 | // - We generally query things in a top-down order, so if we got below D |
| 543 | // without needing cache entries for {C, MemLoc}, then chances are |
| 544 | // that those cache entries would end up ultimately unused. |
| 545 | // - We still cache things for A, so C only needs to walk up a bit. |
| 546 | // If this behavior becomes problematic, we can fix without a ton of extra |
| 547 | // work. |
| 548 | if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) |
| 549 | continue; |
| 550 | |
| 551 | UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere); |
| 552 | if (Res.IsKnownClobber) { |
Daniel Berlin | d7a7ae0 | 2017-04-05 19:01:58 +0000 | [diff] [blame] | 553 | assert(Res.Result != StopWhere); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 554 | // If this wasn't a cache hit, we hit a clobber when walking. That's a |
| 555 | // failure. |
George Burgess IV | 14633b5 | 2016-08-03 01:22:19 +0000 | [diff] [blame] | 556 | TerminatedPath Term{Res.Result, PathIndex}; |
Daniel Berlin | d7a7ae0 | 2017-04-05 19:01:58 +0000 | [diff] [blame] | 557 | if (!MSSA.dominates(Res.Result, StopWhere)) |
George Burgess IV | 14633b5 | 2016-08-03 01:22:19 +0000 | [diff] [blame] | 558 | return Term; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 559 | |
| 560 | // Otherwise, it's a valid thing to potentially optimize to. |
George Burgess IV | 14633b5 | 2016-08-03 01:22:19 +0000 | [diff] [blame] | 561 | Terminated.push_back(Term); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 562 | continue; |
| 563 | } |
| 564 | |
| 565 | if (Res.Result == StopWhere) { |
| 566 | // We've hit our target. Save this path off for if we want to continue |
| 567 | // walking. |
| 568 | NewPaused.push_back(PathIndex); |
| 569 | continue; |
| 570 | } |
| 571 | |
| 572 | assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber"); |
| 573 | addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex); |
| 574 | } |
| 575 | |
| 576 | return None; |
| 577 | } |
| 578 | |
| 579 | template <typename T, typename Walker> |
| 580 | struct generic_def_path_iterator |
| 581 | : public iterator_facade_base<generic_def_path_iterator<T, Walker>, |
| 582 | std::forward_iterator_tag, T *> { |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 583 | generic_def_path_iterator() = default; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 584 | generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {} |
| 585 | |
| 586 | T &operator*() const { return curNode(); } |
| 587 | |
| 588 | generic_def_path_iterator &operator++() { |
| 589 | N = curNode().Previous; |
| 590 | return *this; |
| 591 | } |
| 592 | |
| 593 | bool operator==(const generic_def_path_iterator &O) const { |
| 594 | if (N.hasValue() != O.N.hasValue()) |
| 595 | return false; |
| 596 | return !N.hasValue() || *N == *O.N; |
| 597 | } |
| 598 | |
| 599 | private: |
| 600 | T &curNode() const { return W->Paths[*N]; } |
| 601 | |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 602 | Walker *W = nullptr; |
| 603 | Optional<ListIndex> N = None; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 604 | }; |
| 605 | |
| 606 | using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>; |
| 607 | using const_def_path_iterator = |
| 608 | generic_def_path_iterator<const DefPath, const ClobberWalker>; |
| 609 | |
| 610 | iterator_range<def_path_iterator> def_path(ListIndex From) { |
| 611 | return make_range(def_path_iterator(this, From), def_path_iterator()); |
| 612 | } |
| 613 | |
| 614 | iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const { |
| 615 | return make_range(const_def_path_iterator(this, From), |
| 616 | const_def_path_iterator()); |
| 617 | } |
| 618 | |
| 619 | struct OptznResult { |
| 620 | /// The path that contains our result. |
| 621 | TerminatedPath PrimaryClobber; |
| 622 | /// The paths that we can legally cache back from, but that aren't |
| 623 | /// necessarily the result of the Phi optimization. |
| 624 | SmallVector<TerminatedPath, 4> OtherClobbers; |
| 625 | }; |
| 626 | |
| 627 | ListIndex defPathIndex(const DefPath &N) const { |
| 628 | // The assert looks nicer if we don't need to do &N |
| 629 | const DefPath *NP = &N; |
| 630 | assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && |
| 631 | "Out of bounds DefPath!"); |
| 632 | return NP - &Paths.front(); |
| 633 | } |
| 634 | |
| 635 | /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths |
| 636 | /// that act as legal clobbers. Note that this won't return *all* clobbers. |
| 637 | /// |
| 638 | /// Phi optimization algorithm tl;dr: |
| 639 | /// - Find the earliest def/phi, A, we can optimize to |
| 640 | /// - Find if all paths from the starting memory access ultimately reach A |
| 641 | /// - If not, optimization isn't possible. |
| 642 | /// - Otherwise, walk from A to another clobber or phi, A'. |
| 643 | /// - If A' is a def, we're done. |
| 644 | /// - If A' is a phi, try to optimize it. |
| 645 | /// |
| 646 | /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path |
| 647 | /// terminates when a MemoryAccess that clobbers said MemoryLocation is found. |
| 648 | OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start, |
| 649 | const MemoryLocation &Loc) { |
| 650 | assert(Paths.empty() && VisitedPhis.empty() && |
| 651 | "Reset the optimization state."); |
| 652 | |
| 653 | Paths.emplace_back(Loc, Start, Phi, None); |
| 654 | // Stores how many "valid" optimization nodes we had prior to calling |
| 655 | // addSearches/getBlockingAccess. Necessary for caching if we had a blocker. |
| 656 | auto PriorPathsSize = Paths.size(); |
| 657 | |
| 658 | SmallVector<ListIndex, 16> PausedSearches; |
| 659 | SmallVector<ListIndex, 8> NewPaused; |
| 660 | SmallVector<TerminatedPath, 4> TerminatedPaths; |
| 661 | |
| 662 | addSearches(Phi, PausedSearches, 0); |
| 663 | |
| 664 | // Moves the TerminatedPath with the "most dominated" Clobber to the end of |
| 665 | // Paths. |
| 666 | auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) { |
| 667 | assert(!Paths.empty() && "Need a path to move"); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 668 | auto Dom = Paths.begin(); |
| 669 | for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I) |
| 670 | if (!MSSA.dominates(I->Clobber, Dom->Clobber)) |
| 671 | Dom = I; |
| 672 | auto Last = Paths.end() - 1; |
| 673 | if (Last != Dom) |
| 674 | std::iter_swap(Last, Dom); |
| 675 | }; |
| 676 | |
| 677 | MemoryPhi *Current = Phi; |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 678 | while (true) { |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 679 | assert(!MSSA.isLiveOnEntryDef(Current) && |
| 680 | "liveOnEntry wasn't treated as a clobber?"); |
| 681 | |
Daniel Berlin | d042031 | 2017-04-01 09:01:12 +0000 | [diff] [blame] | 682 | const auto *Target = getWalkTarget(Current); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 683 | // If a TerminatedPath doesn't dominate Target, then it wasn't a legal |
| 684 | // optimization for the prior phi. |
| 685 | assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) { |
| 686 | return MSSA.dominates(P.Clobber, Target); |
| 687 | })); |
| 688 | |
| 689 | // FIXME: This is broken, because the Blocker may be reported to be |
| 690 | // liveOnEntry, and we'll happily wait for that to disappear (read: never) |
George Burgess IV | 7f414b9 | 2016-08-22 23:40:01 +0000 | [diff] [blame] | 691 | // For the moment, this is fine, since we do nothing with blocker info. |
George Burgess IV | 14633b5 | 2016-08-03 01:22:19 +0000 | [diff] [blame] | 692 | if (Optional<TerminatedPath> Blocker = getBlockingAccess( |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 693 | Target, PausedSearches, NewPaused, TerminatedPaths)) { |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 694 | |
| 695 | // Find the node we started at. We can't search based on N->Last, since |
| 696 | // we may have gone around a loop with a different MemoryLocation. |
George Burgess IV | 14633b5 | 2016-08-03 01:22:19 +0000 | [diff] [blame] | 697 | auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) { |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 698 | return defPathIndex(N) < PriorPathsSize; |
| 699 | }); |
| 700 | assert(Iter != def_path_iterator()); |
| 701 | |
| 702 | DefPath &CurNode = *Iter; |
| 703 | assert(CurNode.Last == Current); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 704 | |
| 705 | // Two things: |
| 706 | // A. We can't reliably cache all of NewPaused back. Consider a case |
| 707 | // where we have two paths in NewPaused; one of which can't optimize |
| 708 | // above this phi, whereas the other can. If we cache the second path |
| 709 | // back, we'll end up with suboptimal cache entries. We can handle |
| 710 | // cases like this a bit better when we either try to find all |
| 711 | // clobbers that block phi optimization, or when our cache starts |
| 712 | // supporting unfinished searches. |
| 713 | // B. We can't reliably cache TerminatedPaths back here without doing |
| 714 | // extra checks; consider a case like: |
| 715 | // T |
| 716 | // / \ |
| 717 | // D C |
| 718 | // \ / |
| 719 | // S |
| 720 | // Where T is our target, C is a node with a clobber on it, D is a |
| 721 | // diamond (with a clobber *only* on the left or right node, N), and |
| 722 | // S is our start. Say we walk to D, through the node opposite N |
| 723 | // (read: ignoring the clobber), and see a cache entry in the top |
| 724 | // node of D. That cache entry gets put into TerminatedPaths. We then |
| 725 | // walk up to C (N is later in our worklist), find the clobber, and |
| 726 | // quit. If we append TerminatedPaths to OtherClobbers, we'll cache |
| 727 | // the bottom part of D to the cached clobber, ignoring the clobber |
| 728 | // in N. Again, this problem goes away if we start tracking all |
| 729 | // blockers for a given phi optimization. |
| 730 | TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)}; |
| 731 | return {Result, {}}; |
| 732 | } |
| 733 | |
| 734 | // If there's nothing left to search, then all paths led to valid clobbers |
| 735 | // that we got from our cache; pick the nearest to the start, and allow |
| 736 | // the rest to be cached back. |
| 737 | if (NewPaused.empty()) { |
| 738 | MoveDominatedPathToEnd(TerminatedPaths); |
| 739 | TerminatedPath Result = TerminatedPaths.pop_back_val(); |
| 740 | return {Result, std::move(TerminatedPaths)}; |
| 741 | } |
| 742 | |
| 743 | MemoryAccess *DefChainEnd = nullptr; |
| 744 | SmallVector<TerminatedPath, 4> Clobbers; |
| 745 | for (ListIndex Paused : NewPaused) { |
| 746 | UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]); |
| 747 | if (WR.IsKnownClobber) |
| 748 | Clobbers.push_back({WR.Result, Paused}); |
| 749 | else |
| 750 | // Micro-opt: If we hit the end of the chain, save it. |
| 751 | DefChainEnd = WR.Result; |
| 752 | } |
| 753 | |
| 754 | if (!TerminatedPaths.empty()) { |
| 755 | // If we couldn't find the dominating phi/liveOnEntry in the above loop, |
| 756 | // do it now. |
| 757 | if (!DefChainEnd) |
Daniel Berlin | d042031 | 2017-04-01 09:01:12 +0000 | [diff] [blame] | 758 | for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target))) |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 759 | DefChainEnd = MA; |
| 760 | |
| 761 | // If any of the terminated paths don't dominate the phi we'll try to |
| 762 | // optimize, we need to figure out what they are and quit. |
| 763 | const BasicBlock *ChainBB = DefChainEnd->getBlock(); |
| 764 | for (const TerminatedPath &TP : TerminatedPaths) { |
| 765 | // Because we know that DefChainEnd is as "high" as we can go, we |
| 766 | // don't need local dominance checks; BB dominance is sufficient. |
| 767 | if (DT.dominates(ChainBB, TP.Clobber->getBlock())) |
| 768 | Clobbers.push_back(TP); |
| 769 | } |
| 770 | } |
| 771 | |
| 772 | // If we have clobbers in the def chain, find the one closest to Current |
| 773 | // and quit. |
| 774 | if (!Clobbers.empty()) { |
| 775 | MoveDominatedPathToEnd(Clobbers); |
| 776 | TerminatedPath Result = Clobbers.pop_back_val(); |
| 777 | return {Result, std::move(Clobbers)}; |
| 778 | } |
| 779 | |
| 780 | assert(all_of(NewPaused, |
| 781 | [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })); |
| 782 | |
| 783 | // Because liveOnEntry is a clobber, this must be a phi. |
| 784 | auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd); |
| 785 | |
| 786 | PriorPathsSize = Paths.size(); |
| 787 | PausedSearches.clear(); |
| 788 | for (ListIndex I : NewPaused) |
| 789 | addSearches(DefChainPhi, PausedSearches, I); |
| 790 | NewPaused.clear(); |
| 791 | |
| 792 | Current = DefChainPhi; |
| 793 | } |
| 794 | } |
| 795 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 796 | void verifyOptResult(const OptznResult &R) const { |
| 797 | assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) { |
| 798 | return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); |
| 799 | })); |
| 800 | } |
| 801 | |
| 802 | void resetPhiOptznState() { |
| 803 | Paths.clear(); |
| 804 | VisitedPhis.clear(); |
| 805 | } |
| 806 | |
| 807 | public: |
Daniel Berlin | d7a7ae0 | 2017-04-05 19:01:58 +0000 | [diff] [blame] | 808 | ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT) |
| 809 | : MSSA(MSSA), AA(AA), DT(DT) {} |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 810 | |
Daniel Berlin | 7500c56 | 2017-04-01 08:59:45 +0000 | [diff] [blame] | 811 | void reset() {} |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 812 | |
| 813 | /// Finds the nearest clobber for the given query, optimizing phis if |
| 814 | /// possible. |
Daniel Berlin | d7a7ae0 | 2017-04-05 19:01:58 +0000 | [diff] [blame] | 815 | MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) { |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 816 | Query = &Q; |
| 817 | |
| 818 | MemoryAccess *Current = Start; |
| 819 | // This walker pretends uses don't exist. If we're handed one, silently grab |
| 820 | // its def. (This has the nice side-effect of ensuring we never cache uses) |
| 821 | if (auto *MU = dyn_cast<MemoryUse>(Start)) |
| 822 | Current = MU->getDefiningAccess(); |
| 823 | |
| 824 | DefPath FirstDesc(Q.StartingLoc, Current, Current, None); |
| 825 | // Fast path for the overly-common case (no crazy phi optimization |
| 826 | // necessary) |
| 827 | UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc); |
George Burgess IV | 93ea19b | 2016-07-24 07:03:49 +0000 | [diff] [blame] | 828 | MemoryAccess *Result; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 829 | if (WalkResult.IsKnownClobber) { |
George Burgess IV | 93ea19b | 2016-07-24 07:03:49 +0000 | [diff] [blame] | 830 | Result = WalkResult.Result; |
| 831 | } else { |
| 832 | OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last), |
| 833 | Current, Q.StartingLoc); |
| 834 | verifyOptResult(OptRes); |
George Burgess IV | 93ea19b | 2016-07-24 07:03:49 +0000 | [diff] [blame] | 835 | resetPhiOptznState(); |
| 836 | Result = OptRes.PrimaryClobber.Clobber; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 837 | } |
| 838 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 839 | #ifdef EXPENSIVE_CHECKS |
George Burgess IV | 93ea19b | 2016-07-24 07:03:49 +0000 | [diff] [blame] | 840 | checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 841 | #endif |
George Burgess IV | 93ea19b | 2016-07-24 07:03:49 +0000 | [diff] [blame] | 842 | return Result; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 843 | } |
Geoff Berry | cdf5333 | 2016-08-08 17:52:01 +0000 | [diff] [blame] | 844 | |
| 845 | void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA); } |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 846 | }; |
| 847 | |
| 848 | struct RenamePassData { |
| 849 | DomTreeNode *DTN; |
| 850 | DomTreeNode::const_iterator ChildIt; |
| 851 | MemoryAccess *IncomingVal; |
| 852 | |
| 853 | RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It, |
| 854 | MemoryAccess *M) |
| 855 | : DTN(D), ChildIt(It), IncomingVal(M) {} |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 856 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 857 | void swap(RenamePassData &RHS) { |
| 858 | std::swap(DTN, RHS.DTN); |
| 859 | std::swap(ChildIt, RHS.ChildIt); |
| 860 | std::swap(IncomingVal, RHS.IncomingVal); |
| 861 | } |
| 862 | }; |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 863 | |
| 864 | } // end anonymous namespace |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 865 | |
| 866 | namespace llvm { |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 867 | |
Daniel Berlin | d952cea | 2017-04-07 01:28:36 +0000 | [diff] [blame] | 868 | /// \brief A MemorySSAWalker that does AA walks to disambiguate accesses. It no |
| 869 | /// longer does caching on its own, |
Daniel Berlin | d7a7ae0 | 2017-04-05 19:01:58 +0000 | [diff] [blame] | 870 | /// but the name has been retained for the moment. |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 871 | class MemorySSA::CachingWalker final : public MemorySSAWalker { |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 872 | ClobberWalker Walker; |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 873 | bool AutoResetWalker = true; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 874 | |
| 875 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 876 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 877 | public: |
| 878 | CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *); |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 879 | ~CachingWalker() override = default; |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 880 | |
George Burgess IV | 400ae40 | 2016-07-20 19:51:34 +0000 | [diff] [blame] | 881 | using MemorySSAWalker::getClobberingMemoryAccess; |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 882 | |
George Burgess IV | 400ae40 | 2016-07-20 19:51:34 +0000 | [diff] [blame] | 883 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override; |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 884 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
George Burgess IV | 013fd73 | 2016-10-28 19:22:46 +0000 | [diff] [blame] | 885 | const MemoryLocation &) override; |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 886 | void invalidateInfo(MemoryAccess *) override; |
| 887 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 888 | /// Whether we call resetClobberWalker() after each time we *actually* walk to |
| 889 | /// answer a clobber query. |
| 890 | void setAutoResetWalker(bool AutoReset) { AutoResetWalker = AutoReset; } |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 891 | |
Daniel Berlin | 7500c56 | 2017-04-01 08:59:45 +0000 | [diff] [blame] | 892 | /// Drop the walker's persistent data structures. |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 893 | void resetClobberWalker() { Walker.reset(); } |
Geoff Berry | cdf5333 | 2016-08-08 17:52:01 +0000 | [diff] [blame] | 894 | |
| 895 | void verify(const MemorySSA *MSSA) override { |
| 896 | MemorySSAWalker::verify(MSSA); |
| 897 | Walker.verify(MSSA); |
| 898 | } |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 899 | }; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 900 | |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 901 | } // end namespace llvm |
| 902 | |
Daniel Berlin | 78cbd28 | 2017-02-20 22:26:03 +0000 | [diff] [blame] | 903 | void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal, |
| 904 | bool RenameAllUses) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 905 | // Pass through values to our successors |
| 906 | for (const BasicBlock *S : successors(BB)) { |
| 907 | auto It = PerBlockAccesses.find(S); |
| 908 | // Rename the phi nodes in our successor block |
| 909 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) |
| 910 | continue; |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 911 | AccessList *Accesses = It->second.get(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 912 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); |
Daniel Berlin | 78cbd28 | 2017-02-20 22:26:03 +0000 | [diff] [blame] | 913 | if (RenameAllUses) { |
| 914 | int PhiIndex = Phi->getBasicBlockIndex(BB); |
| 915 | assert(PhiIndex != -1 && "Incomplete phi during partial rename"); |
| 916 | Phi->setIncomingValue(PhiIndex, IncomingVal); |
| 917 | } else |
| 918 | Phi->addIncoming(IncomingVal, BB); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 919 | } |
Daniel Berlin | 78cbd28 | 2017-02-20 22:26:03 +0000 | [diff] [blame] | 920 | } |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 921 | |
Daniel Berlin | 78cbd28 | 2017-02-20 22:26:03 +0000 | [diff] [blame] | 922 | /// \brief Rename a single basic block into MemorySSA form. |
| 923 | /// Uses the standard SSA renaming algorithm. |
| 924 | /// \returns The new incoming value. |
| 925 | MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal, |
| 926 | bool RenameAllUses) { |
| 927 | auto It = PerBlockAccesses.find(BB); |
| 928 | // Skip most processing if the list is empty. |
| 929 | if (It != PerBlockAccesses.end()) { |
| 930 | AccessList *Accesses = It->second.get(); |
| 931 | for (MemoryAccess &L : *Accesses) { |
| 932 | if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) { |
| 933 | if (MUD->getDefiningAccess() == nullptr || RenameAllUses) |
| 934 | MUD->setDefiningAccess(IncomingVal); |
| 935 | if (isa<MemoryDef>(&L)) |
| 936 | IncomingVal = &L; |
| 937 | } else { |
| 938 | IncomingVal = &L; |
| 939 | } |
| 940 | } |
| 941 | } |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 942 | return IncomingVal; |
| 943 | } |
| 944 | |
| 945 | /// \brief This is the standard SSA renaming algorithm. |
| 946 | /// |
| 947 | /// We walk the dominator tree in preorder, renaming accesses, and then filling |
| 948 | /// in phi nodes in our successors. |
| 949 | void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal, |
Daniel Berlin | 78cbd28 | 2017-02-20 22:26:03 +0000 | [diff] [blame] | 950 | SmallPtrSetImpl<BasicBlock *> &Visited, |
| 951 | bool SkipVisited, bool RenameAllUses) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 952 | SmallVector<RenamePassData, 32> WorkStack; |
Daniel Berlin | 78cbd28 | 2017-02-20 22:26:03 +0000 | [diff] [blame] | 953 | // Skip everything if we already renamed this block and we are skipping. |
| 954 | // Note: You can't sink this into the if, because we need it to occur |
| 955 | // regardless of whether we skip blocks or not. |
| 956 | bool AlreadyVisited = !Visited.insert(Root->getBlock()).second; |
| 957 | if (SkipVisited && AlreadyVisited) |
| 958 | return; |
| 959 | |
| 960 | IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses); |
| 961 | renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 962 | WorkStack.push_back({Root, Root->begin(), IncomingVal}); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 963 | |
| 964 | while (!WorkStack.empty()) { |
| 965 | DomTreeNode *Node = WorkStack.back().DTN; |
| 966 | DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt; |
| 967 | IncomingVal = WorkStack.back().IncomingVal; |
| 968 | |
| 969 | if (ChildIt == Node->end()) { |
| 970 | WorkStack.pop_back(); |
| 971 | } else { |
| 972 | DomTreeNode *Child = *ChildIt; |
| 973 | ++WorkStack.back().ChildIt; |
| 974 | BasicBlock *BB = Child->getBlock(); |
Daniel Berlin | 78cbd28 | 2017-02-20 22:26:03 +0000 | [diff] [blame] | 975 | // Note: You can't sink this into the if, because we need it to occur |
| 976 | // regardless of whether we skip blocks or not. |
| 977 | AlreadyVisited = !Visited.insert(BB).second; |
| 978 | if (SkipVisited && AlreadyVisited) { |
| 979 | // We already visited this during our renaming, which can happen when |
| 980 | // being asked to rename multiple blocks. Figure out the incoming val, |
| 981 | // which is the last def. |
| 982 | // Incoming value can only change if there is a block def, and in that |
| 983 | // case, it's the last block def in the list. |
| 984 | if (auto *BlockDefs = getWritableBlockDefs(BB)) |
| 985 | IncomingVal = &*BlockDefs->rbegin(); |
| 986 | } else |
| 987 | IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses); |
| 988 | renameSuccessorPhis(BB, IncomingVal, RenameAllUses); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 989 | WorkStack.push_back({Child, Child->begin(), IncomingVal}); |
| 990 | } |
| 991 | } |
| 992 | } |
| 993 | |
George Burgess IV | a362b09 | 2016-07-06 00:28:43 +0000 | [diff] [blame] | 994 | /// \brief This handles unreachable block accesses by deleting phi nodes in |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 995 | /// unreachable blocks, and marking all other unreachable MemoryAccess's as |
| 996 | /// being uses of the live on entry definition. |
| 997 | void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) { |
| 998 | assert(!DT->isReachableFromEntry(BB) && |
| 999 | "Reachable block found while handling unreachable blocks"); |
| 1000 | |
Daniel Berlin | fc7e651 | 2016-07-06 05:32:05 +0000 | [diff] [blame] | 1001 | // Make sure phi nodes in our reachable successors end up with a |
| 1002 | // LiveOnEntryDef for our incoming edge, even though our block is forward |
| 1003 | // unreachable. We could just disconnect these blocks from the CFG fully, |
| 1004 | // but we do not right now. |
| 1005 | for (const BasicBlock *S : successors(BB)) { |
| 1006 | if (!DT->isReachableFromEntry(S)) |
| 1007 | continue; |
| 1008 | auto It = PerBlockAccesses.find(S); |
| 1009 | // Rename the phi nodes in our successor block |
| 1010 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) |
| 1011 | continue; |
| 1012 | AccessList *Accesses = It->second.get(); |
| 1013 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); |
| 1014 | Phi->addIncoming(LiveOnEntryDef.get(), BB); |
| 1015 | } |
| 1016 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1017 | auto It = PerBlockAccesses.find(BB); |
| 1018 | if (It == PerBlockAccesses.end()) |
| 1019 | return; |
| 1020 | |
| 1021 | auto &Accesses = It->second; |
| 1022 | for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) { |
| 1023 | auto Next = std::next(AI); |
| 1024 | // If we have a phi, just remove it. We are going to replace all |
| 1025 | // users with live on entry. |
| 1026 | if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI)) |
| 1027 | UseOrDef->setDefiningAccess(LiveOnEntryDef.get()); |
| 1028 | else |
| 1029 | Accesses->erase(AI); |
| 1030 | AI = Next; |
| 1031 | } |
| 1032 | } |
| 1033 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 1034 | MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT) |
| 1035 | : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr), |
Daniel Berlin | cd2deac | 2016-10-20 20:13:45 +0000 | [diff] [blame] | 1036 | NextID(INVALID_MEMORYACCESS_ID) { |
Daniel Berlin | 16ed57c | 2016-06-27 18:22:27 +0000 | [diff] [blame] | 1037 | buildMemorySSA(); |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 1038 | } |
| 1039 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1040 | MemorySSA::~MemorySSA() { |
| 1041 | // Drop all our references |
| 1042 | for (const auto &Pair : PerBlockAccesses) |
| 1043 | for (MemoryAccess &MA : *Pair.second) |
| 1044 | MA.dropAllReferences(); |
| 1045 | } |
| 1046 | |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1047 | MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1048 | auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr)); |
| 1049 | |
| 1050 | if (Res.second) |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 1051 | Res.first->second = llvm::make_unique<AccessList>(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1052 | return Res.first->second.get(); |
| 1053 | } |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 1054 | |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1055 | MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) { |
| 1056 | auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr)); |
| 1057 | |
| 1058 | if (Res.second) |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 1059 | Res.first->second = llvm::make_unique<DefsList>(); |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1060 | return Res.first->second.get(); |
| 1061 | } |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1062 | |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 1063 | namespace llvm { |
| 1064 | |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1065 | /// This class is a batch walker of all MemoryUse's in the program, and points |
| 1066 | /// their defining access at the thing that actually clobbers them. Because it |
| 1067 | /// is a batch walker that touches everything, it does not operate like the |
| 1068 | /// other walkers. This walker is basically performing a top-down SSA renaming |
| 1069 | /// pass, where the version stack is used as the cache. This enables it to be |
| 1070 | /// significantly more time and memory efficient than using the regular walker, |
| 1071 | /// which is walking bottom-up. |
| 1072 | class MemorySSA::OptimizeUses { |
| 1073 | public: |
| 1074 | OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA, |
| 1075 | DominatorTree *DT) |
| 1076 | : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) { |
| 1077 | Walker = MSSA->getWalker(); |
| 1078 | } |
| 1079 | |
| 1080 | void optimizeUses(); |
| 1081 | |
| 1082 | private: |
| 1083 | /// This represents where a given memorylocation is in the stack. |
| 1084 | struct MemlocStackInfo { |
| 1085 | // This essentially is keeping track of versions of the stack. Whenever |
| 1086 | // the stack changes due to pushes or pops, these versions increase. |
| 1087 | unsigned long StackEpoch; |
| 1088 | unsigned long PopEpoch; |
| 1089 | // This is the lower bound of places on the stack to check. It is equal to |
| 1090 | // the place the last stack walk ended. |
| 1091 | // Note: Correctness depends on this being initialized to 0, which densemap |
| 1092 | // does |
| 1093 | unsigned long LowerBound; |
Daniel Berlin | 4b4c722 | 2016-08-08 04:44:53 +0000 | [diff] [blame] | 1094 | const BasicBlock *LowerBoundBlock; |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1095 | // This is where the last walk for this memory location ended. |
| 1096 | unsigned long LastKill; |
| 1097 | bool LastKillValid; |
| 1098 | }; |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 1099 | |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1100 | void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &, |
| 1101 | SmallVectorImpl<MemoryAccess *> &, |
| 1102 | DenseMap<MemoryLocOrCall, MemlocStackInfo> &); |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 1103 | |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1104 | MemorySSA *MSSA; |
| 1105 | MemorySSAWalker *Walker; |
| 1106 | AliasAnalysis *AA; |
| 1107 | DominatorTree *DT; |
| 1108 | }; |
| 1109 | |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 1110 | } // end namespace llvm |
| 1111 | |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1112 | /// Optimize the uses in a given block This is basically the SSA renaming |
| 1113 | /// algorithm, with one caveat: We are able to use a single stack for all |
| 1114 | /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is |
| 1115 | /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just |
| 1116 | /// going to be some position in that stack of possible ones. |
| 1117 | /// |
| 1118 | /// We track the stack positions that each MemoryLocation needs |
| 1119 | /// to check, and last ended at. This is because we only want to check the |
| 1120 | /// things that changed since last time. The same MemoryLocation should |
| 1121 | /// get clobbered by the same store (getModRefInfo does not use invariantness or |
| 1122 | /// things like this, and if they start, we can modify MemoryLocOrCall to |
| 1123 | /// include relevant data) |
| 1124 | void MemorySSA::OptimizeUses::optimizeUsesInBlock( |
| 1125 | const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch, |
| 1126 | SmallVectorImpl<MemoryAccess *> &VersionStack, |
| 1127 | DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) { |
| 1128 | |
| 1129 | /// If no accesses, nothing to do. |
| 1130 | MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB); |
| 1131 | if (Accesses == nullptr) |
| 1132 | return; |
| 1133 | |
| 1134 | // Pop everything that doesn't dominate the current block off the stack, |
| 1135 | // increment the PopEpoch to account for this. |
Piotr Padlewski | cc5868c1 | 2017-02-18 20:34:36 +0000 | [diff] [blame] | 1136 | while (true) { |
| 1137 | assert( |
| 1138 | !VersionStack.empty() && |
| 1139 | "Version stack should have liveOnEntry sentinel dominating everything"); |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1140 | BasicBlock *BackBlock = VersionStack.back()->getBlock(); |
| 1141 | if (DT->dominates(BackBlock, BB)) |
| 1142 | break; |
| 1143 | while (VersionStack.back()->getBlock() == BackBlock) |
| 1144 | VersionStack.pop_back(); |
| 1145 | ++PopEpoch; |
| 1146 | } |
Piotr Padlewski | cc5868c1 | 2017-02-18 20:34:36 +0000 | [diff] [blame] | 1147 | |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1148 | for (MemoryAccess &MA : *Accesses) { |
| 1149 | auto *MU = dyn_cast<MemoryUse>(&MA); |
| 1150 | if (!MU) { |
| 1151 | VersionStack.push_back(&MA); |
| 1152 | ++StackEpoch; |
| 1153 | continue; |
| 1154 | } |
| 1155 | |
George Burgess IV | 024f3d2 | 2016-08-03 19:57:02 +0000 | [diff] [blame] | 1156 | if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) { |
Daniel Berlin | cd2deac | 2016-10-20 20:13:45 +0000 | [diff] [blame] | 1157 | MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true); |
George Burgess IV | 024f3d2 | 2016-08-03 19:57:02 +0000 | [diff] [blame] | 1158 | continue; |
| 1159 | } |
| 1160 | |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1161 | MemoryLocOrCall UseMLOC(MU); |
| 1162 | auto &LocInfo = LocStackInfo[UseMLOC]; |
Daniel Berlin | 26fcea9 | 2016-08-02 20:02:21 +0000 | [diff] [blame] | 1163 | // If the pop epoch changed, it means we've removed stuff from top of |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1164 | // stack due to changing blocks. We may have to reset the lower bound or |
| 1165 | // last kill info. |
| 1166 | if (LocInfo.PopEpoch != PopEpoch) { |
| 1167 | LocInfo.PopEpoch = PopEpoch; |
| 1168 | LocInfo.StackEpoch = StackEpoch; |
Daniel Berlin | 4b4c722 | 2016-08-08 04:44:53 +0000 | [diff] [blame] | 1169 | // If the lower bound was in something that no longer dominates us, we |
| 1170 | // have to reset it. |
| 1171 | // We can't simply track stack size, because the stack may have had |
| 1172 | // pushes/pops in the meantime. |
| 1173 | // XXX: This is non-optimal, but only is slower cases with heavily |
| 1174 | // branching dominator trees. To get the optimal number of queries would |
| 1175 | // be to make lowerbound and lastkill a per-loc stack, and pop it until |
| 1176 | // the top of that stack dominates us. This does not seem worth it ATM. |
| 1177 | // A much cheaper optimization would be to always explore the deepest |
| 1178 | // branch of the dominator tree first. This will guarantee this resets on |
| 1179 | // the smallest set of blocks. |
| 1180 | if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB && |
Daniel Berlin | 1e98c04 | 2016-09-26 17:22:54 +0000 | [diff] [blame] | 1181 | !DT->dominates(LocInfo.LowerBoundBlock, BB)) { |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1182 | // Reset the lower bound of things to check. |
| 1183 | // TODO: Some day we should be able to reset to last kill, rather than |
| 1184 | // 0. |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1185 | LocInfo.LowerBound = 0; |
Daniel Berlin | 4b4c722 | 2016-08-08 04:44:53 +0000 | [diff] [blame] | 1186 | LocInfo.LowerBoundBlock = VersionStack[0]->getBlock(); |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1187 | LocInfo.LastKillValid = false; |
| 1188 | } |
| 1189 | } else if (LocInfo.StackEpoch != StackEpoch) { |
| 1190 | // If all that has changed is the StackEpoch, we only have to check the |
| 1191 | // new things on the stack, because we've checked everything before. In |
| 1192 | // this case, the lower bound of things to check remains the same. |
| 1193 | LocInfo.PopEpoch = PopEpoch; |
| 1194 | LocInfo.StackEpoch = StackEpoch; |
| 1195 | } |
| 1196 | if (!LocInfo.LastKillValid) { |
| 1197 | LocInfo.LastKill = VersionStack.size() - 1; |
| 1198 | LocInfo.LastKillValid = true; |
| 1199 | } |
| 1200 | |
| 1201 | // At this point, we should have corrected last kill and LowerBound to be |
| 1202 | // in bounds. |
| 1203 | assert(LocInfo.LowerBound < VersionStack.size() && |
| 1204 | "Lower bound out of range"); |
| 1205 | assert(LocInfo.LastKill < VersionStack.size() && |
| 1206 | "Last kill info out of range"); |
| 1207 | // In any case, the new upper bound is the top of the stack. |
| 1208 | unsigned long UpperBound = VersionStack.size() - 1; |
| 1209 | |
| 1210 | if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) { |
Daniel Berlin | 26fcea9 | 2016-08-02 20:02:21 +0000 | [diff] [blame] | 1211 | DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " (" |
| 1212 | << *(MU->getMemoryInst()) << ")" |
| 1213 | << " because there are " << UpperBound - LocInfo.LowerBound |
| 1214 | << " stores to disambiguate\n"); |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1215 | // Because we did not walk, LastKill is no longer valid, as this may |
| 1216 | // have been a kill. |
| 1217 | LocInfo.LastKillValid = false; |
| 1218 | continue; |
| 1219 | } |
| 1220 | bool FoundClobberResult = false; |
| 1221 | while (UpperBound > LocInfo.LowerBound) { |
| 1222 | if (isa<MemoryPhi>(VersionStack[UpperBound])) { |
| 1223 | // For phis, use the walker, see where we ended up, go there |
| 1224 | Instruction *UseInst = MU->getMemoryInst(); |
| 1225 | MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst); |
| 1226 | // We are guaranteed to find it or something is wrong |
| 1227 | while (VersionStack[UpperBound] != Result) { |
| 1228 | assert(UpperBound != 0); |
| 1229 | --UpperBound; |
| 1230 | } |
| 1231 | FoundClobberResult = true; |
| 1232 | break; |
| 1233 | } |
| 1234 | |
| 1235 | MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]); |
Daniel Berlin | df10119 | 2016-08-03 00:01:46 +0000 | [diff] [blame] | 1236 | // If the lifetime of the pointer ends at this instruction, it's live on |
| 1237 | // entry. |
| 1238 | if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) { |
| 1239 | // Reset UpperBound to liveOnEntryDef's place in the stack |
| 1240 | UpperBound = 0; |
| 1241 | FoundClobberResult = true; |
| 1242 | break; |
| 1243 | } |
Daniel Berlin | dff31de | 2016-08-02 21:57:52 +0000 | [diff] [blame] | 1244 | if (instructionClobbersQuery(MD, MU, UseMLOC, *AA)) { |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1245 | FoundClobberResult = true; |
| 1246 | break; |
| 1247 | } |
| 1248 | --UpperBound; |
| 1249 | } |
| 1250 | // At the end of this loop, UpperBound is either a clobber, or lower bound |
| 1251 | // PHI walking may cause it to be < LowerBound, and in fact, < LastKill. |
| 1252 | if (FoundClobberResult || UpperBound < LocInfo.LastKill) { |
Daniel Berlin | cd2deac | 2016-10-20 20:13:45 +0000 | [diff] [blame] | 1253 | MU->setDefiningAccess(VersionStack[UpperBound], true); |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1254 | // We were last killed now by where we got to |
| 1255 | LocInfo.LastKill = UpperBound; |
| 1256 | } else { |
| 1257 | // Otherwise, we checked all the new ones, and now we know we can get to |
| 1258 | // LastKill. |
Daniel Berlin | cd2deac | 2016-10-20 20:13:45 +0000 | [diff] [blame] | 1259 | MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true); |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1260 | } |
| 1261 | LocInfo.LowerBound = VersionStack.size() - 1; |
Daniel Berlin | 4b4c722 | 2016-08-08 04:44:53 +0000 | [diff] [blame] | 1262 | LocInfo.LowerBoundBlock = BB; |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1263 | } |
| 1264 | } |
| 1265 | |
| 1266 | /// Optimize uses to point to their actual clobbering definitions. |
| 1267 | void MemorySSA::OptimizeUses::optimizeUses() { |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1268 | SmallVector<MemoryAccess *, 16> VersionStack; |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1269 | DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo; |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1270 | VersionStack.push_back(MSSA->getLiveOnEntryDef()); |
| 1271 | |
| 1272 | unsigned long StackEpoch = 1; |
| 1273 | unsigned long PopEpoch = 1; |
Piotr Padlewski | cc5868c1 | 2017-02-18 20:34:36 +0000 | [diff] [blame] | 1274 | // We perform a non-recursive top-down dominator tree walk. |
Daniel Berlin | 7ac3d74 | 2016-08-05 22:09:14 +0000 | [diff] [blame] | 1275 | for (const auto *DomNode : depth_first(DT->getRootNode())) |
| 1276 | optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack, |
| 1277 | LocStackInfo); |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1278 | } |
| 1279 | |
Daniel Berlin | 3d512a2 | 2016-08-22 19:14:30 +0000 | [diff] [blame] | 1280 | void MemorySSA::placePHINodes( |
Mandeep Singh Grang | 73f0095 | 2016-11-21 19:33:02 +0000 | [diff] [blame] | 1281 | const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks, |
| 1282 | const DenseMap<const BasicBlock *, unsigned int> &BBNumbers) { |
Daniel Berlin | 3d512a2 | 2016-08-22 19:14:30 +0000 | [diff] [blame] | 1283 | // Determine where our MemoryPhi's should go |
| 1284 | ForwardIDFCalculator IDFs(*DT); |
| 1285 | IDFs.setDefiningBlocks(DefiningBlocks); |
Daniel Berlin | 3d512a2 | 2016-08-22 19:14:30 +0000 | [diff] [blame] | 1286 | SmallVector<BasicBlock *, 32> IDFBlocks; |
| 1287 | IDFs.calculate(IDFBlocks); |
| 1288 | |
Mandeep Singh Grang | 73f0095 | 2016-11-21 19:33:02 +0000 | [diff] [blame] | 1289 | std::sort(IDFBlocks.begin(), IDFBlocks.end(), |
| 1290 | [&BBNumbers](const BasicBlock *A, const BasicBlock *B) { |
| 1291 | return BBNumbers.lookup(A) < BBNumbers.lookup(B); |
| 1292 | }); |
| 1293 | |
Daniel Berlin | 3d512a2 | 2016-08-22 19:14:30 +0000 | [diff] [blame] | 1294 | // Now place MemoryPhi nodes. |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1295 | for (auto &BB : IDFBlocks) |
| 1296 | createMemoryPhi(BB); |
Daniel Berlin | 3d512a2 | 2016-08-22 19:14:30 +0000 | [diff] [blame] | 1297 | } |
| 1298 | |
Daniel Berlin | 16ed57c | 2016-06-27 18:22:27 +0000 | [diff] [blame] | 1299 | void MemorySSA::buildMemorySSA() { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1300 | // We create an access to represent "live on entry", for things like |
| 1301 | // arguments or users of globals, where the memory they use is defined before |
| 1302 | // the beginning of the function. We do not actually insert it into the IR. |
| 1303 | // We do not define a live on exit for the immediate uses, and thus our |
| 1304 | // semantics do *not* imply that something with no immediate uses can simply |
| 1305 | // be removed. |
| 1306 | BasicBlock &StartingPoint = F.getEntryBlock(); |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 1307 | LiveOnEntryDef = |
| 1308 | llvm::make_unique<MemoryDef>(F.getContext(), nullptr, nullptr, |
| 1309 | &StartingPoint, NextID++); |
Mandeep Singh Grang | 73f0095 | 2016-11-21 19:33:02 +0000 | [diff] [blame] | 1310 | DenseMap<const BasicBlock *, unsigned int> BBNumbers; |
| 1311 | unsigned NextBBNum = 0; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1312 | |
| 1313 | // We maintain lists of memory accesses per-block, trading memory for time. We |
| 1314 | // could just look up the memory access for every possible instruction in the |
| 1315 | // stream. |
| 1316 | SmallPtrSet<BasicBlock *, 32> DefiningBlocks; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1317 | // Go through each block, figure out where defs occur, and chain together all |
| 1318 | // the accesses. |
| 1319 | for (BasicBlock &B : F) { |
Mandeep Singh Grang | 73f0095 | 2016-11-21 19:33:02 +0000 | [diff] [blame] | 1320 | BBNumbers[&B] = NextBBNum++; |
Daniel Berlin | 7898ca6 | 2016-02-07 01:52:15 +0000 | [diff] [blame] | 1321 | bool InsertIntoDef = false; |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 1322 | AccessList *Accesses = nullptr; |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1323 | DefsList *Defs = nullptr; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1324 | for (Instruction &I : B) { |
Peter Collingbourne | ffecb14 | 2016-05-26 01:19:17 +0000 | [diff] [blame] | 1325 | MemoryUseOrDef *MUD = createNewAccess(&I); |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 1326 | if (!MUD) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1327 | continue; |
Daniel Berlin | 1b51a29 | 2016-02-07 01:52:19 +0000 | [diff] [blame] | 1328 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1329 | if (!Accesses) |
| 1330 | Accesses = getOrCreateAccessList(&B); |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 1331 | Accesses->push_back(MUD); |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1332 | if (isa<MemoryDef>(MUD)) { |
| 1333 | InsertIntoDef = true; |
| 1334 | if (!Defs) |
| 1335 | Defs = getOrCreateDefsList(&B); |
| 1336 | Defs->push_back(*MUD); |
| 1337 | } |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1338 | } |
Daniel Berlin | 7898ca6 | 2016-02-07 01:52:15 +0000 | [diff] [blame] | 1339 | if (InsertIntoDef) |
| 1340 | DefiningBlocks.insert(&B); |
Daniel Berlin | 1b51a29 | 2016-02-07 01:52:19 +0000 | [diff] [blame] | 1341 | } |
Mandeep Singh Grang | 73f0095 | 2016-11-21 19:33:02 +0000 | [diff] [blame] | 1342 | placePHINodes(DefiningBlocks, BBNumbers); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1343 | |
| 1344 | // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get |
| 1345 | // filled in with all blocks. |
| 1346 | SmallPtrSet<BasicBlock *, 16> Visited; |
| 1347 | renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited); |
| 1348 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1349 | CachingWalker *Walker = getWalkerImpl(); |
| 1350 | |
| 1351 | // We're doing a batch of updates; don't drop useful caches between them. |
| 1352 | Walker->setAutoResetWalker(false); |
Daniel Berlin | c43aa5a | 2016-08-02 16:24:03 +0000 | [diff] [blame] | 1353 | OptimizeUses(this, Walker, AA, DT).optimizeUses(); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1354 | Walker->setAutoResetWalker(true); |
| 1355 | Walker->resetClobberWalker(); |
| 1356 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1357 | // Mark the uses in unreachable blocks as live on entry, so that they go |
| 1358 | // somewhere. |
| 1359 | for (auto &BB : F) |
| 1360 | if (!Visited.count(&BB)) |
| 1361 | markUnreachableAsLiveOnEntry(&BB); |
Daniel Berlin | 16ed57c | 2016-06-27 18:22:27 +0000 | [diff] [blame] | 1362 | } |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1363 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1364 | MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); } |
| 1365 | |
| 1366 | MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() { |
Daniel Berlin | 16ed57c | 2016-06-27 18:22:27 +0000 | [diff] [blame] | 1367 | if (Walker) |
| 1368 | return Walker.get(); |
| 1369 | |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 1370 | Walker = llvm::make_unique<CachingWalker>(this, AA, DT); |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 1371 | return Walker.get(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1372 | } |
| 1373 | |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1374 | // This is a helper function used by the creation routines. It places NewAccess |
| 1375 | // into the access and defs lists for a given basic block, at the given |
| 1376 | // insertion point. |
| 1377 | void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess, |
| 1378 | const BasicBlock *BB, |
| 1379 | InsertionPlace Point) { |
| 1380 | auto *Accesses = getOrCreateAccessList(BB); |
| 1381 | if (Point == Beginning) { |
| 1382 | // If it's a phi node, it goes first, otherwise, it goes after any phi |
| 1383 | // nodes. |
| 1384 | if (isa<MemoryPhi>(NewAccess)) { |
| 1385 | Accesses->push_front(NewAccess); |
| 1386 | auto *Defs = getOrCreateDefsList(BB); |
| 1387 | Defs->push_front(*NewAccess); |
| 1388 | } else { |
| 1389 | auto AI = find_if_not( |
| 1390 | *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); |
| 1391 | Accesses->insert(AI, NewAccess); |
| 1392 | if (!isa<MemoryUse>(NewAccess)) { |
| 1393 | auto *Defs = getOrCreateDefsList(BB); |
| 1394 | auto DI = find_if_not( |
| 1395 | *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); |
| 1396 | Defs->insert(DI, *NewAccess); |
| 1397 | } |
| 1398 | } |
| 1399 | } else { |
| 1400 | Accesses->push_back(NewAccess); |
| 1401 | if (!isa<MemoryUse>(NewAccess)) { |
| 1402 | auto *Defs = getOrCreateDefsList(BB); |
| 1403 | Defs->push_back(*NewAccess); |
| 1404 | } |
| 1405 | } |
Daniel Berlin | 9d8a335 | 2017-01-30 11:35:39 +0000 | [diff] [blame] | 1406 | BlockNumberingValid.erase(BB); |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1407 | } |
| 1408 | |
| 1409 | void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB, |
| 1410 | AccessList::iterator InsertPt) { |
| 1411 | auto *Accesses = getWritableBlockAccesses(BB); |
| 1412 | bool WasEnd = InsertPt == Accesses->end(); |
| 1413 | Accesses->insert(AccessList::iterator(InsertPt), What); |
| 1414 | if (!isa<MemoryUse>(What)) { |
| 1415 | auto *Defs = getOrCreateDefsList(BB); |
| 1416 | // If we got asked to insert at the end, we have an easy job, just shove it |
| 1417 | // at the end. If we got asked to insert before an existing def, we also get |
| 1418 | // an terator. If we got asked to insert before a use, we have to hunt for |
| 1419 | // the next def. |
| 1420 | if (WasEnd) { |
| 1421 | Defs->push_back(*What); |
| 1422 | } else if (isa<MemoryDef>(InsertPt)) { |
| 1423 | Defs->insert(InsertPt->getDefsIterator(), *What); |
| 1424 | } else { |
| 1425 | while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt)) |
| 1426 | ++InsertPt; |
| 1427 | // Either we found a def, or we are inserting at the end |
| 1428 | if (InsertPt == Accesses->end()) |
| 1429 | Defs->push_back(*What); |
| 1430 | else |
| 1431 | Defs->insert(InsertPt->getDefsIterator(), *What); |
| 1432 | } |
| 1433 | } |
Daniel Berlin | 9d8a335 | 2017-01-30 11:35:39 +0000 | [diff] [blame] | 1434 | BlockNumberingValid.erase(BB); |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1435 | } |
| 1436 | |
Daniel Berlin | 60ead05 | 2017-01-28 01:23:13 +0000 | [diff] [blame] | 1437 | // Move What before Where in the IR. The end result is taht What will belong to |
| 1438 | // the right lists and have the right Block set, but will not otherwise be |
| 1439 | // correct. It will not have the right defining access, and if it is a def, |
| 1440 | // things below it will not properly be updated. |
| 1441 | void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB, |
| 1442 | AccessList::iterator Where) { |
| 1443 | // Keep it in the lookup tables, remove from the lists |
| 1444 | removeFromLists(What, false); |
| 1445 | What->setBlock(BB); |
| 1446 | insertIntoListsBefore(What, BB, Where); |
| 1447 | } |
| 1448 | |
Daniel Berlin | 9d8a335 | 2017-01-30 11:35:39 +0000 | [diff] [blame] | 1449 | void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB, |
| 1450 | InsertionPlace Point) { |
| 1451 | removeFromLists(What, false); |
| 1452 | What->setBlock(BB); |
| 1453 | insertIntoListsForBlock(What, BB, Point); |
| 1454 | } |
| 1455 | |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1456 | MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) { |
| 1457 | assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB"); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1458 | MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++); |
Daniel Berlin | 9d8a335 | 2017-01-30 11:35:39 +0000 | [diff] [blame] | 1459 | // Phi's always are placed at the front of the block. |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1460 | insertIntoListsForBlock(Phi, BB, Beginning); |
Daniel Berlin | 5130cc8 | 2016-07-31 21:08:20 +0000 | [diff] [blame] | 1461 | ValueToMemoryAccess[BB] = Phi; |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1462 | return Phi; |
| 1463 | } |
| 1464 | |
| 1465 | MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I, |
| 1466 | MemoryAccess *Definition) { |
| 1467 | assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI"); |
| 1468 | MemoryUseOrDef *NewAccess = createNewAccess(I); |
| 1469 | assert( |
| 1470 | NewAccess != nullptr && |
| 1471 | "Tried to create a memory access for a non-memory touching instruction"); |
| 1472 | NewAccess->setDefiningAccess(Definition); |
| 1473 | return NewAccess; |
| 1474 | } |
| 1475 | |
Daniel Berlin | d952cea | 2017-04-07 01:28:36 +0000 | [diff] [blame] | 1476 | // Return true if the instruction has ordering constraints. |
| 1477 | // Note specifically that this only considers stores and loads |
| 1478 | // because others are still considered ModRef by getModRefInfo. |
| 1479 | static inline bool isOrdered(const Instruction *I) { |
| 1480 | if (auto *SI = dyn_cast<StoreInst>(I)) { |
| 1481 | if (!SI->isUnordered()) |
| 1482 | return true; |
| 1483 | } else if (auto *LI = dyn_cast<LoadInst>(I)) { |
| 1484 | if (!LI->isUnordered()) |
| 1485 | return true; |
| 1486 | } |
| 1487 | return false; |
| 1488 | } |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 1489 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1490 | /// \brief Helper function to create new memory accesses |
Peter Collingbourne | ffecb14 | 2016-05-26 01:19:17 +0000 | [diff] [blame] | 1491 | MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) { |
Peter Collingbourne | b9aa1f4 | 2016-05-26 04:58:46 +0000 | [diff] [blame] | 1492 | // The assume intrinsic has a control dependency which we model by claiming |
| 1493 | // that it writes arbitrarily. Ignore that fake memory dependency here. |
| 1494 | // FIXME: Replace this special casing with a more accurate modelling of |
| 1495 | // assume's control dependency. |
| 1496 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) |
| 1497 | if (II->getIntrinsicID() == Intrinsic::assume) |
| 1498 | return nullptr; |
| 1499 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1500 | // Find out what affect this instruction has on memory. |
Alina Sbirlea | 967e796 | 2017-08-01 00:28:29 +0000 | [diff] [blame] | 1501 | ModRefInfo ModRef = AA->getModRefInfo(I, None); |
Daniel Berlin | d952cea | 2017-04-07 01:28:36 +0000 | [diff] [blame] | 1502 | // The isOrdered check is used to ensure that volatiles end up as defs |
| 1503 | // (atomics end up as ModRef right now anyway). Until we separate the |
| 1504 | // ordering chain from the memory chain, this enables people to see at least |
| 1505 | // some relative ordering to volatiles. Note that getClobberingMemoryAccess |
| 1506 | // will still give an answer that bypasses other volatile loads. TODO: |
| 1507 | // Separate memory aliasing and ordering into two different chains so that we |
| 1508 | // can precisely represent both "what memory will this read/write/is clobbered |
| 1509 | // by" and "what instructions can I move this past". |
Alina Sbirlea | 63d2250 | 2017-12-05 20:12:23 +0000 | [diff] [blame] | 1510 | bool Def = isModSet(ModRef) || isOrdered(I); |
| 1511 | bool Use = isRefSet(ModRef); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1512 | |
| 1513 | // It's possible for an instruction to not modify memory at all. During |
| 1514 | // construction, we ignore them. |
Peter Collingbourne | ffecb14 | 2016-05-26 01:19:17 +0000 | [diff] [blame] | 1515 | if (!Def && !Use) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1516 | return nullptr; |
| 1517 | |
| 1518 | assert((Def || Use) && |
| 1519 | "Trying to create a memory access with a non-memory instruction"); |
| 1520 | |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 1521 | MemoryUseOrDef *MUD; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1522 | if (Def) |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 1523 | MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1524 | else |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 1525 | MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent()); |
Daniel Berlin | 5130cc8 | 2016-07-31 21:08:20 +0000 | [diff] [blame] | 1526 | ValueToMemoryAccess[I] = MUD; |
George Burgess IV | b42b762 | 2016-03-11 19:34:03 +0000 | [diff] [blame] | 1527 | return MUD; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1528 | } |
| 1529 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1530 | /// \brief Returns true if \p Replacer dominates \p Replacee . |
| 1531 | bool MemorySSA::dominatesUse(const MemoryAccess *Replacer, |
| 1532 | const MemoryAccess *Replacee) const { |
| 1533 | if (isa<MemoryUseOrDef>(Replacee)) |
| 1534 | return DT->dominates(Replacer->getBlock(), Replacee->getBlock()); |
| 1535 | const auto *MP = cast<MemoryPhi>(Replacee); |
| 1536 | // For a phi node, the use occurs in the predecessor block of the phi node. |
| 1537 | // Since we may occur multiple times in the phi node, we have to check each |
| 1538 | // operand to ensure Replacer dominates each operand where Replacee occurs. |
| 1539 | for (const Use &Arg : MP->operands()) { |
George Burgess IV | b5a229f | 2016-02-02 23:15:26 +0000 | [diff] [blame] | 1540 | if (Arg.get() != Replacee && |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1541 | !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg))) |
| 1542 | return false; |
| 1543 | } |
| 1544 | return true; |
| 1545 | } |
| 1546 | |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 1547 | /// \brief Properly remove \p MA from all of MemorySSA's lookup tables. |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 1548 | void MemorySSA::removeFromLookups(MemoryAccess *MA) { |
| 1549 | assert(MA->use_empty() && |
| 1550 | "Trying to remove memory access that still has uses"); |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1551 | BlockNumbering.erase(MA); |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 1552 | if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) |
| 1553 | MUD->setDefiningAccess(nullptr); |
| 1554 | // Invalidate our walker's cache if necessary |
| 1555 | if (!isa<MemoryUse>(MA)) |
| 1556 | Walker->invalidateInfo(MA); |
| 1557 | // The call below to erase will destroy MA, so we can't change the order we |
| 1558 | // are doing things here |
| 1559 | Value *MemoryInst; |
| 1560 | if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) { |
| 1561 | MemoryInst = MUD->getMemoryInst(); |
| 1562 | } else { |
| 1563 | MemoryInst = MA->getBlock(); |
| 1564 | } |
Daniel Berlin | 5130cc8 | 2016-07-31 21:08:20 +0000 | [diff] [blame] | 1565 | auto VMA = ValueToMemoryAccess.find(MemoryInst); |
| 1566 | if (VMA->second == MA) |
| 1567 | ValueToMemoryAccess.erase(VMA); |
Daniel Berlin | 60ead05 | 2017-01-28 01:23:13 +0000 | [diff] [blame] | 1568 | } |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 1569 | |
Daniel Berlin | 60ead05 | 2017-01-28 01:23:13 +0000 | [diff] [blame] | 1570 | /// \brief Properly remove \p MA from all of MemorySSA's lists. |
| 1571 | /// |
| 1572 | /// Because of the way the intrusive list and use lists work, it is important to |
| 1573 | /// do removal in the right order. |
| 1574 | /// ShouldDelete defaults to true, and will cause the memory access to also be |
| 1575 | /// deleted, not just removed. |
| 1576 | void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) { |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1577 | // The access list owns the reference, so we erase it from the non-owning list |
| 1578 | // first. |
| 1579 | if (!isa<MemoryUse>(MA)) { |
| 1580 | auto DefsIt = PerBlockDefs.find(MA->getBlock()); |
| 1581 | std::unique_ptr<DefsList> &Defs = DefsIt->second; |
| 1582 | Defs->remove(*MA); |
| 1583 | if (Defs->empty()) |
| 1584 | PerBlockDefs.erase(DefsIt); |
| 1585 | } |
| 1586 | |
Daniel Berlin | 60ead05 | 2017-01-28 01:23:13 +0000 | [diff] [blame] | 1587 | // The erase call here will delete it. If we don't want it deleted, we call |
| 1588 | // remove instead. |
George Burgess IV | e0e6e48 | 2016-03-02 02:35:04 +0000 | [diff] [blame] | 1589 | auto AccessIt = PerBlockAccesses.find(MA->getBlock()); |
Daniel Berlin | ada263d | 2016-06-20 20:21:33 +0000 | [diff] [blame] | 1590 | std::unique_ptr<AccessList> &Accesses = AccessIt->second; |
Daniel Berlin | 60ead05 | 2017-01-28 01:23:13 +0000 | [diff] [blame] | 1591 | if (ShouldDelete) |
| 1592 | Accesses->erase(MA); |
| 1593 | else |
| 1594 | Accesses->remove(MA); |
| 1595 | |
George Burgess IV | e0e6e48 | 2016-03-02 02:35:04 +0000 | [diff] [blame] | 1596 | if (Accesses->empty()) |
| 1597 | PerBlockAccesses.erase(AccessIt); |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 1598 | } |
| 1599 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1600 | void MemorySSA::print(raw_ostream &OS) const { |
| 1601 | MemorySSAAnnotatedWriter Writer(this); |
| 1602 | F.print(OS, &Writer); |
| 1603 | } |
| 1604 | |
Aaron Ballman | 615eb47 | 2017-10-15 14:32:27 +0000 | [diff] [blame] | 1605 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
Daniel Berlin | 78cbd28 | 2017-02-20 22:26:03 +0000 | [diff] [blame] | 1606 | LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); } |
Matthias Braun | 8c209aa | 2017-01-28 02:02:38 +0000 | [diff] [blame] | 1607 | #endif |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1608 | |
Daniel Berlin | 932b4cb | 2016-02-10 17:39:43 +0000 | [diff] [blame] | 1609 | void MemorySSA::verifyMemorySSA() const { |
| 1610 | verifyDefUses(F); |
| 1611 | verifyDomination(F); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1612 | verifyOrdering(F); |
Geoff Berry | cdf5333 | 2016-08-08 17:52:01 +0000 | [diff] [blame] | 1613 | Walker->verify(this); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1614 | } |
| 1615 | |
| 1616 | /// \brief Verify that the order and existence of MemoryAccesses matches the |
| 1617 | /// order and existence of memory affecting instructions. |
| 1618 | void MemorySSA::verifyOrdering(Function &F) const { |
| 1619 | // Walk all the blocks, comparing what the lookups think and what the access |
| 1620 | // lists think, as well as the order in the blocks vs the order in the access |
| 1621 | // lists. |
| 1622 | SmallVector<MemoryAccess *, 32> ActualAccesses; |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1623 | SmallVector<MemoryAccess *, 32> ActualDefs; |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1624 | for (BasicBlock &B : F) { |
| 1625 | const AccessList *AL = getBlockAccesses(&B); |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1626 | const auto *DL = getBlockDefs(&B); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1627 | MemoryAccess *Phi = getMemoryAccess(&B); |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1628 | if (Phi) { |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1629 | ActualAccesses.push_back(Phi); |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1630 | ActualDefs.push_back(Phi); |
| 1631 | } |
| 1632 | |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1633 | for (Instruction &I : B) { |
| 1634 | MemoryAccess *MA = getMemoryAccess(&I); |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1635 | assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && |
| 1636 | "We have memory affecting instructions " |
| 1637 | "in this block but they are not in the " |
| 1638 | "access list or defs list"); |
| 1639 | if (MA) { |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1640 | ActualAccesses.push_back(MA); |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1641 | if (isa<MemoryDef>(MA)) |
| 1642 | ActualDefs.push_back(MA); |
| 1643 | } |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1644 | } |
| 1645 | // Either we hit the assert, really have no accesses, or we have both |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1646 | // accesses and an access list. |
| 1647 | // Same with defs. |
| 1648 | if (!AL && !DL) |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1649 | continue; |
| 1650 | assert(AL->size() == ActualAccesses.size() && |
| 1651 | "We don't have the same number of accesses in the block as on the " |
| 1652 | "access list"); |
Davide Italiano | 6c77de0 | 2017-01-30 03:16:43 +0000 | [diff] [blame] | 1653 | assert((DL || ActualDefs.size() == 0) && |
| 1654 | "Either we should have a defs list, or we should have no defs"); |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1655 | assert((!DL || DL->size() == ActualDefs.size()) && |
| 1656 | "We don't have the same number of defs in the block as on the " |
| 1657 | "def list"); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1658 | auto ALI = AL->begin(); |
| 1659 | auto AAI = ActualAccesses.begin(); |
| 1660 | while (ALI != AL->end() && AAI != ActualAccesses.end()) { |
| 1661 | assert(&*ALI == *AAI && "Not the same accesses in the same order"); |
| 1662 | ++ALI; |
| 1663 | ++AAI; |
| 1664 | } |
| 1665 | ActualAccesses.clear(); |
Daniel Berlin | d602e04 | 2017-01-25 20:56:19 +0000 | [diff] [blame] | 1666 | if (DL) { |
| 1667 | auto DLI = DL->begin(); |
| 1668 | auto ADI = ActualDefs.begin(); |
| 1669 | while (DLI != DL->end() && ADI != ActualDefs.end()) { |
| 1670 | assert(&*DLI == *ADI && "Not the same defs in the same order"); |
| 1671 | ++DLI; |
| 1672 | ++ADI; |
| 1673 | } |
| 1674 | } |
| 1675 | ActualDefs.clear(); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1676 | } |
Daniel Berlin | 932b4cb | 2016-02-10 17:39:43 +0000 | [diff] [blame] | 1677 | } |
| 1678 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1679 | /// \brief Verify the domination properties of MemorySSA by checking that each |
| 1680 | /// definition dominates all of its uses. |
Daniel Berlin | 932b4cb | 2016-02-10 17:39:43 +0000 | [diff] [blame] | 1681 | void MemorySSA::verifyDomination(Function &F) const { |
Daniel Berlin | 7af9587 | 2016-08-05 21:47:20 +0000 | [diff] [blame] | 1682 | #ifndef NDEBUG |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1683 | for (BasicBlock &B : F) { |
| 1684 | // Phi nodes are attached to basic blocks |
Daniel Berlin | 2919b1c | 2016-08-05 21:46:52 +0000 | [diff] [blame] | 1685 | if (MemoryPhi *MP = getMemoryAccess(&B)) |
| 1686 | for (const Use &U : MP->uses()) |
| 1687 | assert(dominates(MP, U) && "Memory PHI does not dominate it's uses"); |
Daniel Berlin | 7af9587 | 2016-08-05 21:47:20 +0000 | [diff] [blame] | 1688 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1689 | for (Instruction &I : B) { |
| 1690 | MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I)); |
| 1691 | if (!MD) |
| 1692 | continue; |
| 1693 | |
Daniel Berlin | 2919b1c | 2016-08-05 21:46:52 +0000 | [diff] [blame] | 1694 | for (const Use &U : MD->uses()) |
| 1695 | assert(dominates(MD, U) && "Memory Def does not dominate it's uses"); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1696 | } |
| 1697 | } |
Daniel Berlin | 7af9587 | 2016-08-05 21:47:20 +0000 | [diff] [blame] | 1698 | #endif |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1699 | } |
| 1700 | |
| 1701 | /// \brief Verify the def-use lists in MemorySSA, by verifying that \p Use |
| 1702 | /// appears in the use list of \p Def. |
Daniel Berlin | 932b4cb | 2016-02-10 17:39:43 +0000 | [diff] [blame] | 1703 | void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const { |
Daniel Berlin | 7af9587 | 2016-08-05 21:47:20 +0000 | [diff] [blame] | 1704 | #ifndef NDEBUG |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1705 | // The live on entry use may cause us to get a NULL def here |
Daniel Berlin | 7af9587 | 2016-08-05 21:47:20 +0000 | [diff] [blame] | 1706 | if (!Def) |
| 1707 | assert(isLiveOnEntryDef(Use) && |
| 1708 | "Null def but use not point to live on entry def"); |
| 1709 | else |
Daniel Berlin | da2f38e | 2016-08-11 21:26:50 +0000 | [diff] [blame] | 1710 | assert(is_contained(Def->users(), Use) && |
Daniel Berlin | 7af9587 | 2016-08-05 21:47:20 +0000 | [diff] [blame] | 1711 | "Did not find use in def's use list"); |
| 1712 | #endif |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1713 | } |
| 1714 | |
| 1715 | /// \brief Verify the immediate use information, by walking all the memory |
| 1716 | /// accesses and verifying that, for each use, it appears in the |
| 1717 | /// appropriate def's use list |
Daniel Berlin | 932b4cb | 2016-02-10 17:39:43 +0000 | [diff] [blame] | 1718 | void MemorySSA::verifyDefUses(Function &F) const { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1719 | for (BasicBlock &B : F) { |
| 1720 | // Phi nodes are attached to basic blocks |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1721 | if (MemoryPhi *Phi = getMemoryAccess(&B)) { |
David Majnemer | 580e754 | 2016-06-25 00:04:06 +0000 | [diff] [blame] | 1722 | assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance( |
| 1723 | pred_begin(&B), pred_end(&B))) && |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1724 | "Incomplete MemoryPhi Node"); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1725 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) |
| 1726 | verifyUseInDefs(Phi->getIncomingValue(I), Phi); |
Daniel Berlin | 1430026 | 2016-06-21 18:39:20 +0000 | [diff] [blame] | 1727 | } |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1728 | |
| 1729 | for (Instruction &I : B) { |
George Burgess IV | 66837ab | 2016-11-01 21:17:46 +0000 | [diff] [blame] | 1730 | if (MemoryUseOrDef *MA = getMemoryAccess(&I)) { |
| 1731 | verifyUseInDefs(MA->getDefiningAccess(), MA); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1732 | } |
| 1733 | } |
| 1734 | } |
| 1735 | } |
| 1736 | |
George Burgess IV | 66837ab | 2016-11-01 21:17:46 +0000 | [diff] [blame] | 1737 | MemoryUseOrDef *MemorySSA::getMemoryAccess(const Instruction *I) const { |
| 1738 | return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I)); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1739 | } |
| 1740 | |
| 1741 | MemoryPhi *MemorySSA::getMemoryAccess(const BasicBlock *BB) const { |
George Burgess IV | 66837ab | 2016-11-01 21:17:46 +0000 | [diff] [blame] | 1742 | return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB))); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1743 | } |
| 1744 | |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1745 | /// Perform a local numbering on blocks so that instruction ordering can be |
| 1746 | /// determined in constant time. |
| 1747 | /// TODO: We currently just number in order. If we numbered by N, we could |
| 1748 | /// allow at least N-1 sequences of insertBefore or insertAfter (and at least |
| 1749 | /// log2(N) sequences of mixed before and after) without needing to invalidate |
| 1750 | /// the numbering. |
| 1751 | void MemorySSA::renumberBlock(const BasicBlock *B) const { |
| 1752 | // The pre-increment ensures the numbers really start at 1. |
| 1753 | unsigned long CurrentNumber = 0; |
| 1754 | const AccessList *AL = getBlockAccesses(B); |
| 1755 | assert(AL != nullptr && "Asking to renumber an empty block"); |
| 1756 | for (const auto &I : *AL) |
| 1757 | BlockNumbering[&I] = ++CurrentNumber; |
| 1758 | BlockNumberingValid.insert(B); |
| 1759 | } |
| 1760 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1761 | /// \brief Determine, for two memory accesses in the same block, |
| 1762 | /// whether \p Dominator dominates \p Dominatee. |
| 1763 | /// \returns True if \p Dominator dominates \p Dominatee. |
| 1764 | bool MemorySSA::locallyDominates(const MemoryAccess *Dominator, |
| 1765 | const MemoryAccess *Dominatee) const { |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1766 | const BasicBlock *DominatorBlock = Dominator->getBlock(); |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1767 | |
Daniel Berlin | 1986030 | 2016-07-19 23:08:08 +0000 | [diff] [blame] | 1768 | assert((DominatorBlock == Dominatee->getBlock()) && |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1769 | "Asking for local domination when accesses are in different blocks!"); |
Sebastian Pop | e1f60b1 | 2016-06-10 21:36:41 +0000 | [diff] [blame] | 1770 | // A node dominates itself. |
| 1771 | if (Dominatee == Dominator) |
| 1772 | return true; |
| 1773 | |
| 1774 | // When Dominatee is defined on function entry, it is not dominated by another |
| 1775 | // memory access. |
| 1776 | if (isLiveOnEntryDef(Dominatee)) |
| 1777 | return false; |
| 1778 | |
| 1779 | // When Dominator is defined on function entry, it dominates the other memory |
| 1780 | // access. |
| 1781 | if (isLiveOnEntryDef(Dominator)) |
| 1782 | return true; |
| 1783 | |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1784 | if (!BlockNumberingValid.count(DominatorBlock)) |
| 1785 | renumberBlock(DominatorBlock); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1786 | |
Daniel Berlin | 5c46b94 | 2016-07-19 22:49:43 +0000 | [diff] [blame] | 1787 | unsigned long DominatorNum = BlockNumbering.lookup(Dominator); |
| 1788 | // All numbers start with 1 |
| 1789 | assert(DominatorNum != 0 && "Block was not numbered properly"); |
| 1790 | unsigned long DominateeNum = BlockNumbering.lookup(Dominatee); |
| 1791 | assert(DominateeNum != 0 && "Block was not numbered properly"); |
| 1792 | return DominatorNum < DominateeNum; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1793 | } |
| 1794 | |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1795 | bool MemorySSA::dominates(const MemoryAccess *Dominator, |
| 1796 | const MemoryAccess *Dominatee) const { |
| 1797 | if (Dominator == Dominatee) |
| 1798 | return true; |
| 1799 | |
| 1800 | if (isLiveOnEntryDef(Dominatee)) |
| 1801 | return false; |
| 1802 | |
| 1803 | if (Dominator->getBlock() != Dominatee->getBlock()) |
| 1804 | return DT->dominates(Dominator->getBlock(), Dominatee->getBlock()); |
| 1805 | return locallyDominates(Dominator, Dominatee); |
| 1806 | } |
| 1807 | |
Daniel Berlin | 2919b1c | 2016-08-05 21:46:52 +0000 | [diff] [blame] | 1808 | bool MemorySSA::dominates(const MemoryAccess *Dominator, |
| 1809 | const Use &Dominatee) const { |
| 1810 | if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) { |
| 1811 | BasicBlock *UseBB = MP->getIncomingBlock(Dominatee); |
| 1812 | // The def must dominate the incoming block of the phi. |
| 1813 | if (UseBB != Dominator->getBlock()) |
| 1814 | return DT->dominates(Dominator->getBlock(), UseBB); |
| 1815 | // If the UseBB and the DefBB are the same, compare locally. |
| 1816 | return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee)); |
| 1817 | } |
| 1818 | // If it's not a PHI node use, the normal dominates can already handle it. |
| 1819 | return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser())); |
| 1820 | } |
| 1821 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1822 | const static char LiveOnEntryStr[] = "liveOnEntry"; |
| 1823 | |
Reid Kleckner | 96ab872 | 2017-05-18 17:24:10 +0000 | [diff] [blame] | 1824 | void MemoryAccess::print(raw_ostream &OS) const { |
| 1825 | switch (getValueID()) { |
| 1826 | case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS); |
| 1827 | case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS); |
| 1828 | case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS); |
| 1829 | } |
| 1830 | llvm_unreachable("invalid value id"); |
| 1831 | } |
| 1832 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1833 | void MemoryDef::print(raw_ostream &OS) const { |
| 1834 | MemoryAccess *UO = getDefiningAccess(); |
| 1835 | |
| 1836 | OS << getID() << " = MemoryDef("; |
| 1837 | if (UO && UO->getID()) |
| 1838 | OS << UO->getID(); |
| 1839 | else |
| 1840 | OS << LiveOnEntryStr; |
| 1841 | OS << ')'; |
| 1842 | } |
| 1843 | |
| 1844 | void MemoryPhi::print(raw_ostream &OS) const { |
| 1845 | bool First = true; |
| 1846 | OS << getID() << " = MemoryPhi("; |
| 1847 | for (const auto &Op : operands()) { |
| 1848 | BasicBlock *BB = getIncomingBlock(Op); |
| 1849 | MemoryAccess *MA = cast<MemoryAccess>(Op); |
| 1850 | if (!First) |
| 1851 | OS << ','; |
| 1852 | else |
| 1853 | First = false; |
| 1854 | |
| 1855 | OS << '{'; |
| 1856 | if (BB->hasName()) |
| 1857 | OS << BB->getName(); |
| 1858 | else |
| 1859 | BB->printAsOperand(OS, false); |
| 1860 | OS << ','; |
| 1861 | if (unsigned ID = MA->getID()) |
| 1862 | OS << ID; |
| 1863 | else |
| 1864 | OS << LiveOnEntryStr; |
| 1865 | OS << '}'; |
| 1866 | } |
| 1867 | OS << ')'; |
| 1868 | } |
| 1869 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1870 | void MemoryUse::print(raw_ostream &OS) const { |
| 1871 | MemoryAccess *UO = getDefiningAccess(); |
| 1872 | OS << "MemoryUse("; |
| 1873 | if (UO && UO->getID()) |
| 1874 | OS << UO->getID(); |
| 1875 | else |
| 1876 | OS << LiveOnEntryStr; |
| 1877 | OS << ')'; |
| 1878 | } |
| 1879 | |
| 1880 | void MemoryAccess::dump() const { |
Daniel Berlin | 78cbd28 | 2017-02-20 22:26:03 +0000 | [diff] [blame] | 1881 | // Cannot completely remove virtual function even in release mode. |
Aaron Ballman | 615eb47 | 2017-10-15 14:32:27 +0000 | [diff] [blame] | 1882 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1883 | print(dbgs()); |
| 1884 | dbgs() << "\n"; |
Matthias Braun | 8c209aa | 2017-01-28 02:02:38 +0000 | [diff] [blame] | 1885 | #endif |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1886 | } |
| 1887 | |
Chad Rosier | 232e29e | 2016-07-06 21:20:47 +0000 | [diff] [blame] | 1888 | char MemorySSAPrinterLegacyPass::ID = 0; |
| 1889 | |
| 1890 | MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) { |
| 1891 | initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry()); |
| 1892 | } |
| 1893 | |
| 1894 | void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const { |
| 1895 | AU.setPreservesAll(); |
| 1896 | AU.addRequired<MemorySSAWrapperPass>(); |
Chad Rosier | 232e29e | 2016-07-06 21:20:47 +0000 | [diff] [blame] | 1897 | } |
| 1898 | |
| 1899 | bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) { |
| 1900 | auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); |
| 1901 | MSSA.print(dbgs()); |
| 1902 | if (VerifyMemorySSA) |
| 1903 | MSSA.verifyMemorySSA(); |
| 1904 | return false; |
| 1905 | } |
| 1906 | |
Chandler Carruth | dab4eae | 2016-11-23 17:53:26 +0000 | [diff] [blame] | 1907 | AnalysisKey MemorySSAAnalysis::Key; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1908 | |
Daniel Berlin | 1e98c04 | 2016-09-26 17:22:54 +0000 | [diff] [blame] | 1909 | MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F, |
| 1910 | FunctionAnalysisManager &AM) { |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 1911 | auto &DT = AM.getResult<DominatorTreeAnalysis>(F); |
| 1912 | auto &AA = AM.getResult<AAManager>(F); |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 1913 | return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT)); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1914 | } |
| 1915 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 1916 | PreservedAnalyses MemorySSAPrinterPass::run(Function &F, |
| 1917 | FunctionAnalysisManager &AM) { |
| 1918 | OS << "MemorySSA for function: " << F.getName() << "\n"; |
Geoff Berry | 290a13e | 2016-08-08 18:27:22 +0000 | [diff] [blame] | 1919 | AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS); |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 1920 | |
| 1921 | return PreservedAnalyses::all(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1922 | } |
| 1923 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 1924 | PreservedAnalyses MemorySSAVerifierPass::run(Function &F, |
| 1925 | FunctionAnalysisManager &AM) { |
Geoff Berry | 290a13e | 2016-08-08 18:27:22 +0000 | [diff] [blame] | 1926 | AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA(); |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 1927 | |
| 1928 | return PreservedAnalyses::all(); |
| 1929 | } |
| 1930 | |
| 1931 | char MemorySSAWrapperPass::ID = 0; |
| 1932 | |
| 1933 | MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) { |
| 1934 | initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry()); |
| 1935 | } |
| 1936 | |
| 1937 | void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); } |
| 1938 | |
| 1939 | void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1940 | AU.setPreservesAll(); |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 1941 | AU.addRequiredTransitive<DominatorTreeWrapperPass>(); |
| 1942 | AU.addRequiredTransitive<AAResultsWrapperPass>(); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1943 | } |
| 1944 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 1945 | bool MemorySSAWrapperPass::runOnFunction(Function &F) { |
| 1946 | auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); |
| 1947 | auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); |
| 1948 | MSSA.reset(new MemorySSA(F, &AA, &DT)); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1949 | return false; |
| 1950 | } |
| 1951 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 1952 | void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); } |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1953 | |
Geoff Berry | b96d3b2 | 2016-06-01 21:30:40 +0000 | [diff] [blame] | 1954 | void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1955 | MSSA->print(OS); |
| 1956 | } |
| 1957 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1958 | MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {} |
| 1959 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1960 | MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A, |
| 1961 | DominatorTree *D) |
Eugene Zelenko | bb1b2d0 | 2017-08-16 22:07:40 +0000 | [diff] [blame] | 1962 | : MemorySSAWalker(M), Walker(*M, *A, *D) {} |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1963 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1964 | void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) { |
Daniel Berlin | d7a7ae0 | 2017-04-05 19:01:58 +0000 | [diff] [blame] | 1965 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) |
| 1966 | MUD->resetOptimized(); |
Daniel Berlin | 83fc77b | 2016-03-01 18:46:54 +0000 | [diff] [blame] | 1967 | } |
| 1968 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1969 | /// \brief Walk the use-def chains starting at \p MA and find |
| 1970 | /// the MemoryAccess that actually clobbers Loc. |
| 1971 | /// |
| 1972 | /// \returns our clobbering memory access |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1973 | MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess( |
| 1974 | MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) { |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1975 | MemoryAccess *New = Walker.findClobber(StartingAccess, Q); |
| 1976 | #ifdef EXPENSIVE_CHECKS |
Daniel Berlin | d7a7ae0 | 2017-04-05 19:01:58 +0000 | [diff] [blame] | 1977 | MemoryAccess *NewNoCache = Walker.findClobber(StartingAccess, Q); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1978 | assert(NewNoCache == New && "Cache made us hand back a different result?"); |
Simon Pilgrim | 5169384 | 2017-06-11 12:49:29 +0000 | [diff] [blame] | 1979 | (void)NewNoCache; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 1980 | #endif |
| 1981 | if (AutoResetWalker) |
| 1982 | resetClobberWalker(); |
| 1983 | return New; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1984 | } |
| 1985 | |
George Burgess IV | fd1f2f8 | 2016-06-24 21:02:12 +0000 | [diff] [blame] | 1986 | MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess( |
George Burgess IV | 013fd73 | 2016-10-28 19:22:46 +0000 | [diff] [blame] | 1987 | MemoryAccess *StartingAccess, const MemoryLocation &Loc) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 1988 | if (isa<MemoryPhi>(StartingAccess)) |
| 1989 | return StartingAccess; |
| 1990 | |
| 1991 | auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess); |
| 1992 | if (MSSA->isLiveOnEntryDef(StartingUseOrDef)) |
| 1993 | return StartingUseOrDef; |
| 1994 | |
| 1995 | Instruction *I = StartingUseOrDef->getMemoryInst(); |
| 1996 | |
| 1997 | // Conservatively, fences are always clobbers, so don't perform the walk if we |
| 1998 | // hit a fence. |
David Majnemer | a940f36 | 2016-07-15 17:19:24 +0000 | [diff] [blame] | 1999 | if (!ImmutableCallSite(I) && I->isFenceLike()) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2000 | return StartingUseOrDef; |
| 2001 | |
| 2002 | UpwardsMemoryQuery Q; |
| 2003 | Q.OriginalAccess = StartingUseOrDef; |
| 2004 | Q.StartingLoc = Loc; |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 2005 | Q.Inst = I; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2006 | Q.IsCall = false; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2007 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2008 | // Unlike the other function, do not walk to the def of a def, because we are |
| 2009 | // handed something we already believe is the clobbering access. |
| 2010 | MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef) |
| 2011 | ? StartingUseOrDef->getDefiningAccess() |
| 2012 | : StartingUseOrDef; |
| 2013 | |
| 2014 | MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2015 | DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); |
| 2016 | DEBUG(dbgs() << *StartingUseOrDef << "\n"); |
| 2017 | DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is "); |
| 2018 | DEBUG(dbgs() << *Clobber << "\n"); |
| 2019 | return Clobber; |
| 2020 | } |
| 2021 | |
| 2022 | MemoryAccess * |
George Burgess IV | 400ae40 | 2016-07-20 19:51:34 +0000 | [diff] [blame] | 2023 | MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) { |
| 2024 | auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA); |
| 2025 | // If this is a MemoryPhi, we can't do anything. |
| 2026 | if (!StartingAccess) |
| 2027 | return MA; |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2028 | |
Daniel Berlin | cd2deac | 2016-10-20 20:13:45 +0000 | [diff] [blame] | 2029 | // If this is an already optimized use or def, return the optimized result. |
| 2030 | // Note: Currently, we do not store the optimized def result because we'd need |
| 2031 | // a separate field, since we can't use it as the defining access. |
Daniel Berlin | e33bc31 | 2017-04-04 23:43:10 +0000 | [diff] [blame] | 2032 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess)) |
| 2033 | if (MUD->isOptimized()) |
| 2034 | return MUD->getOptimized(); |
Daniel Berlin | cd2deac | 2016-10-20 20:13:45 +0000 | [diff] [blame] | 2035 | |
George Burgess IV | 400ae40 | 2016-07-20 19:51:34 +0000 | [diff] [blame] | 2036 | const Instruction *I = StartingAccess->getMemoryInst(); |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 2037 | UpwardsMemoryQuery Q(I, StartingAccess); |
David Majnemer | a940f36 | 2016-07-15 17:19:24 +0000 | [diff] [blame] | 2038 | // We can't sanely do anything with a fences, they conservatively |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2039 | // clobber all memory, and have no locations to get pointers from to |
David Majnemer | a940f36 | 2016-07-15 17:19:24 +0000 | [diff] [blame] | 2040 | // try to disambiguate. |
George Burgess IV | 5f30897 | 2016-07-19 01:29:15 +0000 | [diff] [blame] | 2041 | if (!Q.IsCall && I->isFenceLike()) |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2042 | return StartingAccess; |
| 2043 | |
George Burgess IV | 024f3d2 | 2016-08-03 19:57:02 +0000 | [diff] [blame] | 2044 | if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) { |
| 2045 | MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef(); |
Daniel Berlin | e33bc31 | 2017-04-04 23:43:10 +0000 | [diff] [blame] | 2046 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess)) |
| 2047 | MUD->setOptimized(LiveOnEntry); |
George Burgess IV | 024f3d2 | 2016-08-03 19:57:02 +0000 | [diff] [blame] | 2048 | return LiveOnEntry; |
| 2049 | } |
| 2050 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2051 | // Start with the thing we already think clobbers this location |
| 2052 | MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); |
| 2053 | |
| 2054 | // At this point, DefiningAccess may be the live on entry def. |
| 2055 | // If it is, we will not get a better result. |
| 2056 | if (MSSA->isLiveOnEntryDef(DefiningAccess)) |
| 2057 | return DefiningAccess; |
| 2058 | |
| 2059 | MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2060 | DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); |
| 2061 | DEBUG(dbgs() << *DefiningAccess << "\n"); |
| 2062 | DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is "); |
| 2063 | DEBUG(dbgs() << *Result << "\n"); |
Daniel Berlin | e33bc31 | 2017-04-04 23:43:10 +0000 | [diff] [blame] | 2064 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess)) |
| 2065 | MUD->setOptimized(Result); |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2066 | |
| 2067 | return Result; |
| 2068 | } |
| 2069 | |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2070 | MemoryAccess * |
George Burgess IV | 400ae40 | 2016-07-20 19:51:34 +0000 | [diff] [blame] | 2071 | DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2072 | if (auto *Use = dyn_cast<MemoryUseOrDef>(MA)) |
| 2073 | return Use->getDefiningAccess(); |
| 2074 | return MA; |
| 2075 | } |
| 2076 | |
| 2077 | MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess( |
George Burgess IV | 013fd73 | 2016-10-28 19:22:46 +0000 | [diff] [blame] | 2078 | MemoryAccess *StartingAccess, const MemoryLocation &) { |
George Burgess IV | e1100f5 | 2016-02-02 22:46:49 +0000 | [diff] [blame] | 2079 | if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess)) |
| 2080 | return Use->getDefiningAccess(); |
| 2081 | return StartingAccess; |
| 2082 | } |
Reid Kleckner | 96ab872 | 2017-05-18 17:24:10 +0000 | [diff] [blame] | 2083 | |
| 2084 | void MemoryPhi::deleteMe(DerivedUser *Self) { |
| 2085 | delete static_cast<MemoryPhi *>(Self); |
| 2086 | } |
| 2087 | |
| 2088 | void MemoryDef::deleteMe(DerivedUser *Self) { |
| 2089 | delete static_cast<MemoryDef *>(Self); |
| 2090 | } |
| 2091 | |
| 2092 | void MemoryUse::deleteMe(DerivedUser *Self) { |
| 2093 | delete static_cast<MemoryUse *>(Self); |
| 2094 | } |