blob: a8100312f2186dc2989b5183e351d9ab3b578f22 [file] [log] [blame]
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +00001//===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
George Burgess IVe1100f52016-02-02 22:46:49 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
George Burgess IVe1100f52016-02-02 22:46:49 +00006//
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +00007//===----------------------------------------------------------------------===//
George Burgess IVe1100f52016-02-02 22:46:49 +00008//
9// This file implements the MemorySSA class.
10//
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000011//===----------------------------------------------------------------------===//
12
Daniel Berlin554dcd82017-04-11 20:06:36 +000013#include "llvm/Analysis/MemorySSA.h"
George Burgess IVe1100f52016-02-02 22:46:49 +000014#include "llvm/ADT/DenseMap.h"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000015#include "llvm/ADT/DenseMapInfo.h"
George Burgess IVe1100f52016-02-02 22:46:49 +000016#include "llvm/ADT/DenseSet.h"
17#include "llvm/ADT/DepthFirstIterator.h"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000018#include "llvm/ADT/Hashing.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/Optional.h"
George Burgess IVe1100f52016-02-02 22:46:49 +000021#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallPtrSet.h"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000023#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/iterator.h"
25#include "llvm/ADT/iterator_range.h"
George Burgess IVe1100f52016-02-02 22:46:49 +000026#include "llvm/Analysis/AliasAnalysis.h"
George Burgess IVe1100f52016-02-02 22:46:49 +000027#include "llvm/Analysis/IteratedDominanceFrontier.h"
28#include "llvm/Analysis/MemoryLocation.h"
Nico Weber432a3882018-04-30 14:59:11 +000029#include "llvm/Config/llvm-config.h"
George Burgess IVe1100f52016-02-02 22:46:49 +000030#include "llvm/IR/AssemblyAnnotationWriter.h"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000031#include "llvm/IR/BasicBlock.h"
George Burgess IVe1100f52016-02-02 22:46:49 +000032#include "llvm/IR/Dominators.h"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000033#include "llvm/IR/Function.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/Instructions.h"
George Burgess IVe1100f52016-02-02 22:46:49 +000036#include "llvm/IR/IntrinsicInst.h"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000037#include "llvm/IR/Intrinsics.h"
George Burgess IVe1100f52016-02-02 22:46:49 +000038#include "llvm/IR/LLVMContext.h"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000039#include "llvm/IR/PassManager.h"
40#include "llvm/IR/Use.h"
41#include "llvm/Pass.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/CommandLine.h"
45#include "llvm/Support/Compiler.h"
George Burgess IVe1100f52016-02-02 22:46:49 +000046#include "llvm/Support/Debug.h"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000047#include "llvm/Support/ErrorHandling.h"
George Burgess IVe1100f52016-02-02 22:46:49 +000048#include "llvm/Support/FormattedStream.h"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000049#include "llvm/Support/raw_ostream.h"
George Burgess IVe1100f52016-02-02 22:46:49 +000050#include <algorithm>
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000051#include <cassert>
52#include <iterator>
53#include <memory>
54#include <utility>
55
56using namespace llvm;
George Burgess IVe1100f52016-02-02 22:46:49 +000057
58#define DEBUG_TYPE "memoryssa"
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000059
Geoff Berryefb0dd12016-06-14 21:19:40 +000060INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
Geoff Berryb96d3b22016-06-01 21:30:40 +000061 true)
George Burgess IVe1100f52016-02-02 22:46:49 +000062INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
63INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Geoff Berryefb0dd12016-06-14 21:19:40 +000064INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
65 true)
George Burgess IVe1100f52016-02-02 22:46:49 +000066
Chad Rosier232e29e2016-07-06 21:20:47 +000067INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
68 "Memory SSA Printer", false, false)
69INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
70INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
71 "Memory SSA Printer", false, false)
72
Daniel Berlinc43aa5a2016-08-02 16:24:03 +000073static cl::opt<unsigned> MaxCheckLimit(
74 "memssa-check-limit", cl::Hidden, cl::init(100),
75 cl::desc("The maximum number of stores/phis MemorySSA"
76 "will consider trying to walk past (default = 100)"));
77
Alina Sbirleacc2e8cc2018-08-15 17:34:55 +000078// Always verify MemorySSA if expensive checking is enabled.
79#ifdef EXPENSIVE_CHECKS
80bool llvm::VerifyMemorySSA = true;
81#else
82bool llvm::VerifyMemorySSA = false;
83#endif
84static cl::opt<bool, true>
85 VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
86 cl::Hidden, cl::desc("Enable verification of MemorySSA."));
Chad Rosier232e29e2016-07-06 21:20:47 +000087
George Burgess IVe1100f52016-02-02 22:46:49 +000088namespace llvm {
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000089
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000090/// An assembly annotator class to print Memory SSA information in
George Burgess IVe1100f52016-02-02 22:46:49 +000091/// comments.
92class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
93 friend class MemorySSA;
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +000094
George Burgess IVe1100f52016-02-02 22:46:49 +000095 const MemorySSA *MSSA;
96
97public:
98 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
99
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000100 void emitBasicBlockStartAnnot(const BasicBlock *BB,
101 formatted_raw_ostream &OS) override {
George Burgess IVe1100f52016-02-02 22:46:49 +0000102 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
103 OS << "; " << *MA << "\n";
104 }
105
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000106 void emitInstructionAnnot(const Instruction *I,
107 formatted_raw_ostream &OS) override {
George Burgess IVe1100f52016-02-02 22:46:49 +0000108 if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
109 OS << "; " << *MA << "\n";
110 }
111};
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000112
113} // end namespace llvm
George Burgess IVfd1f2f82016-06-24 21:02:12 +0000114
George Burgess IV5f308972016-07-19 01:29:15 +0000115namespace {
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000116
Daniel Berlindff31de2016-08-02 21:57:52 +0000117/// Our current alias analysis API differentiates heavily between calls and
118/// non-calls, and functions called on one usually assert on the other.
119/// This class encapsulates the distinction to simplify other code that wants
120/// "Memory affecting instructions and related data" to use as a key.
121/// For example, this class is used as a densemap key in the use optimizer.
122class MemoryLocOrCall {
123public:
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000124 bool IsCall = false;
125
Daniel Berlindff31de2016-08-02 21:57:52 +0000126 MemoryLocOrCall(MemoryUseOrDef *MUD)
127 : MemoryLocOrCall(MUD->getMemoryInst()) {}
Sebastian Pop5068d7a2016-10-13 03:23:33 +0000128 MemoryLocOrCall(const MemoryUseOrDef *MUD)
129 : MemoryLocOrCall(MUD->getMemoryInst()) {}
Daniel Berlindff31de2016-08-02 21:57:52 +0000130
131 MemoryLocOrCall(Instruction *Inst) {
Chandler Carruth363ac682019-01-07 05:42:51 +0000132 if (auto *C = dyn_cast<CallBase>(Inst)) {
Daniel Berlindff31de2016-08-02 21:57:52 +0000133 IsCall = true;
Chandler Carruth363ac682019-01-07 05:42:51 +0000134 Call = C;
Daniel Berlindff31de2016-08-02 21:57:52 +0000135 } else {
136 IsCall = false;
137 // There is no such thing as a memorylocation for a fence inst, and it is
138 // unique in that regard.
139 if (!isa<FenceInst>(Inst))
140 Loc = MemoryLocation::get(Inst);
141 }
142 }
143
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000144 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
Daniel Berlindff31de2016-08-02 21:57:52 +0000145
Chandler Carruth363ac682019-01-07 05:42:51 +0000146 const CallBase *getCall() const {
Daniel Berlindff31de2016-08-02 21:57:52 +0000147 assert(IsCall);
Chandler Carruth363ac682019-01-07 05:42:51 +0000148 return Call;
Daniel Berlindff31de2016-08-02 21:57:52 +0000149 }
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000150
Daniel Berlindff31de2016-08-02 21:57:52 +0000151 MemoryLocation getLoc() const {
152 assert(!IsCall);
153 return Loc;
154 }
155
156 bool operator==(const MemoryLocOrCall &Other) const {
157 if (IsCall != Other.IsCall)
158 return false;
159
George Burgess IV3588fd42018-03-29 00:54:39 +0000160 if (!IsCall)
161 return Loc == Other.Loc;
162
Chandler Carruth363ac682019-01-07 05:42:51 +0000163 if (Call->getCalledValue() != Other.Call->getCalledValue())
George Burgess IV3588fd42018-03-29 00:54:39 +0000164 return false;
165
Chandler Carruth363ac682019-01-07 05:42:51 +0000166 return Call->arg_size() == Other.Call->arg_size() &&
167 std::equal(Call->arg_begin(), Call->arg_end(),
168 Other.Call->arg_begin());
Daniel Berlindff31de2016-08-02 21:57:52 +0000169 }
170
171private:
Daniel Berlinf5361132016-10-22 04:15:41 +0000172 union {
Chandler Carruth363ac682019-01-07 05:42:51 +0000173 const CallBase *Call;
Daniel Berlind602e042017-01-25 20:56:19 +0000174 MemoryLocation Loc;
Daniel Berlinf5361132016-10-22 04:15:41 +0000175 };
Daniel Berlindff31de2016-08-02 21:57:52 +0000176};
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000177
178} // end anonymous namespace
Daniel Berlindff31de2016-08-02 21:57:52 +0000179
180namespace llvm {
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000181
Daniel Berlindff31de2016-08-02 21:57:52 +0000182template <> struct DenseMapInfo<MemoryLocOrCall> {
183 static inline MemoryLocOrCall getEmptyKey() {
184 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
185 }
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000186
Daniel Berlindff31de2016-08-02 21:57:52 +0000187 static inline MemoryLocOrCall getTombstoneKey() {
188 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
189 }
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000190
Daniel Berlindff31de2016-08-02 21:57:52 +0000191 static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
George Burgess IV3588fd42018-03-29 00:54:39 +0000192 if (!MLOC.IsCall)
193 return hash_combine(
194 MLOC.IsCall,
195 DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
196
197 hash_code hash =
198 hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
Chandler Carruth363ac682019-01-07 05:42:51 +0000199 MLOC.getCall()->getCalledValue()));
George Burgess IV3588fd42018-03-29 00:54:39 +0000200
Chandler Carruth363ac682019-01-07 05:42:51 +0000201 for (const Value *Arg : MLOC.getCall()->args())
George Burgess IV3588fd42018-03-29 00:54:39 +0000202 hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
203 return hash;
Daniel Berlindff31de2016-08-02 21:57:52 +0000204 }
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000205
Daniel Berlindff31de2016-08-02 21:57:52 +0000206 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
207 return LHS == RHS;
208 }
209};
Daniel Berlindf101192016-08-03 00:01:46 +0000210
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000211} // end namespace llvm
212
George Burgess IV82e355c2016-08-03 19:39:54 +0000213/// This does one-way checks to see if Use could theoretically be hoisted above
214/// MayClobber. This will not check the other way around.
215///
216/// This assumes that, for the purposes of MemorySSA, Use comes directly after
217/// MayClobber, with no potentially clobbering operations in between them.
218/// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
Alina Sbirleaca741a82017-12-22 19:54:03 +0000219static bool areLoadsReorderable(const LoadInst *Use,
220 const LoadInst *MayClobber) {
George Burgess IV82e355c2016-08-03 19:39:54 +0000221 bool VolatileUse = Use->isVolatile();
222 bool VolatileClobber = MayClobber->isVolatile();
223 // Volatile operations may never be reordered with other volatile operations.
224 if (VolatileUse && VolatileClobber)
Alina Sbirleaca741a82017-12-22 19:54:03 +0000225 return false;
226 // Otherwise, volatile doesn't matter here. From the language reference:
227 // 'optimizers may change the order of volatile operations relative to
228 // non-volatile operations.'"
George Burgess IV82e355c2016-08-03 19:39:54 +0000229
230 // If a load is seq_cst, it cannot be moved above other loads. If its ordering
231 // is weaker, it can be moved above other loads. We just need to be sure that
232 // MayClobber isn't an acquire load, because loads can't be moved above
233 // acquire loads.
234 //
235 // Note that this explicitly *does* allow the free reordering of monotonic (or
236 // weaker) loads of the same address.
237 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
238 bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
239 AtomicOrdering::Acquire);
Alina Sbirleaca741a82017-12-22 19:54:03 +0000240 return !(SeqCstUse || MayClobberIsAcquire);
George Burgess IV82e355c2016-08-03 19:39:54 +0000241}
242
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000243namespace {
244
245struct ClobberAlias {
246 bool IsClobber;
247 Optional<AliasResult> AR;
248};
249
250} // end anonymous namespace
251
252// Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
253// ignored if IsClobber = false.
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000254template <typename AliasAnalysisType>
255static ClobberAlias
256instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
257 const Instruction *UseInst, AliasAnalysisType &AA) {
Daniel Berlinc43aa5a2016-08-02 16:24:03 +0000258 Instruction *DefInst = MD->getMemoryInst();
259 assert(DefInst && "Defining instruction not actually an instruction");
Chandler Carruth363ac682019-01-07 05:42:51 +0000260 const auto *UseCall = dyn_cast<CallBase>(UseInst);
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000261 Optional<AliasResult> AR;
George Burgess IV5f308972016-07-19 01:29:15 +0000262
Daniel Berlindf101192016-08-03 00:01:46 +0000263 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
264 // These intrinsics will show up as affecting memory, but they are just
George Burgess IVff08c802018-08-10 05:14:43 +0000265 // markers, mostly.
266 //
267 // FIXME: We probably don't actually want MemorySSA to model these at all
268 // (including creating MemoryAccesses for them): we just end up inventing
269 // clobbers where they don't really exist at all. Please see D43269 for
270 // context.
Daniel Berlindf101192016-08-03 00:01:46 +0000271 switch (II->getIntrinsicID()) {
272 case Intrinsic::lifetime_start:
Chandler Carruth363ac682019-01-07 05:42:51 +0000273 if (UseCall)
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000274 return {false, NoAlias};
275 AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
George Burgess IVff08c802018-08-10 05:14:43 +0000276 return {AR != NoAlias, AR};
Daniel Berlindf101192016-08-03 00:01:46 +0000277 case Intrinsic::lifetime_end:
278 case Intrinsic::invariant_start:
279 case Intrinsic::invariant_end:
280 case Intrinsic::assume:
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000281 return {false, NoAlias};
Daniel Berlindf101192016-08-03 00:01:46 +0000282 default:
283 break;
284 }
285 }
286
Chandler Carruth363ac682019-01-07 05:42:51 +0000287 if (UseCall) {
288 ModRefInfo I = AA.getModRefInfo(DefInst, UseCall);
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000289 AR = isMustSet(I) ? MustAlias : MayAlias;
290 return {isModOrRefSet(I), AR};
Hans Wennborg70e22d12017-11-21 18:00:01 +0000291 }
George Burgess IV82e355c2016-08-03 19:39:54 +0000292
Alina Sbirleaca741a82017-12-22 19:54:03 +0000293 if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
294 if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000295 return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias};
George Burgess IV82e355c2016-08-03 19:39:54 +0000296
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000297 ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
298 AR = isMustSet(I) ? MustAlias : MayAlias;
299 return {isModSet(I), AR};
Daniel Berlindff31de2016-08-02 21:57:52 +0000300}
301
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000302template <typename AliasAnalysisType>
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000303static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
304 const MemoryUseOrDef *MU,
305 const MemoryLocOrCall &UseMLOC,
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000306 AliasAnalysisType &AA) {
Sebastian Pop5068d7a2016-10-13 03:23:33 +0000307 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
308 // to exist while MemoryLocOrCall is pushed through places.
309 if (UseMLOC.IsCall)
310 return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
311 AA);
312 return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
313 AA);
314}
315
Sebastian Pop5ba9f242016-10-13 01:39:10 +0000316// Return true when MD may alias MU, return false otherwise.
Daniel Berlindcb004f2017-03-02 23:06:46 +0000317bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
318 AliasAnalysis &AA) {
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000319 return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
Sebastian Pop5ba9f242016-10-13 01:39:10 +0000320}
Sebastian Pop5ba9f242016-10-13 01:39:10 +0000321
322namespace {
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000323
Sebastian Pop5ba9f242016-10-13 01:39:10 +0000324struct UpwardsMemoryQuery {
325 // True if our original query started off as a call
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000326 bool IsCall = false;
Sebastian Pop5ba9f242016-10-13 01:39:10 +0000327 // The pointer location we started the query with. This will be empty if
328 // IsCall is true.
329 MemoryLocation StartingLoc;
330 // This is the instruction we were querying about.
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000331 const Instruction *Inst = nullptr;
Sebastian Pop5ba9f242016-10-13 01:39:10 +0000332 // The MemoryAccess we actually got called with, used to test local domination
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000333 const MemoryAccess *OriginalAccess = nullptr;
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000334 Optional<AliasResult> AR = MayAlias;
Alina Sbirleaf7230202019-01-07 18:40:27 +0000335 bool SkipSelfAccess = false;
Sebastian Pop5ba9f242016-10-13 01:39:10 +0000336
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000337 UpwardsMemoryQuery() = default;
Sebastian Pop5ba9f242016-10-13 01:39:10 +0000338
339 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
Chandler Carruth363ac682019-01-07 05:42:51 +0000340 : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
Sebastian Pop5ba9f242016-10-13 01:39:10 +0000341 if (!IsCall)
342 StartingLoc = MemoryLocation::get(Inst);
343 }
344};
345
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000346} // end anonymous namespace
347
Sebastian Pop5ba9f242016-10-13 01:39:10 +0000348static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000349 BatchAAResults &AA) {
Sebastian Pop5ba9f242016-10-13 01:39:10 +0000350 Instruction *Inst = MD->getMemoryInst();
351 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
352 switch (II->getIntrinsicID()) {
Sebastian Pop5ba9f242016-10-13 01:39:10 +0000353 case Intrinsic::lifetime_end:
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000354 return AA.alias(MemoryLocation(II->getArgOperand(1)), Loc) == MustAlias;
Sebastian Pop5ba9f242016-10-13 01:39:10 +0000355 default:
356 return false;
357 }
358 }
359 return false;
360}
361
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000362template <typename AliasAnalysisType>
363static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
Sebastian Pop5ba9f242016-10-13 01:39:10 +0000364 const Instruction *I) {
365 // If the memory can't be changed, then loads of the memory can't be
366 // clobbered.
Sebastian Pop5ba9f242016-10-13 01:39:10 +0000367 return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000368 AA.pointsToConstantMemory(MemoryLocation(
369 cast<LoadInst>(I)->getPointerOperand())));
Sebastian Pop5ba9f242016-10-13 01:39:10 +0000370}
371
George Burgess IV5f308972016-07-19 01:29:15 +0000372/// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
373/// inbetween `Start` and `ClobberAt` can clobbers `Start`.
374///
375/// This is meant to be as simple and self-contained as possible. Because it
376/// uses no cache, etc., it can be relatively expensive.
377///
378/// \param Start The MemoryAccess that we want to walk from.
379/// \param ClobberAt A clobber for Start.
380/// \param StartLoc The MemoryLocation for Start.
Alina Sbirleaf5403d82018-08-29 18:26:04 +0000381/// \param MSSA The MemorySSA instance that Start and ClobberAt belong to.
George Burgess IV5f308972016-07-19 01:29:15 +0000382/// \param Query The UpwardsMemoryQuery we used for our search.
383/// \param AA The AliasAnalysis we used for our search.
Alina Sbirlea65f385d2018-09-07 23:51:41 +0000384/// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000385
386template <typename AliasAnalysisType>
Alina Sbirlead77edc02019-02-11 19:51:21 +0000387LLVM_ATTRIBUTE_UNUSED static void
Alina Sbirleaf5403d82018-08-29 18:26:04 +0000388checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
George Burgess IV5f308972016-07-19 01:29:15 +0000389 const MemoryLocation &StartLoc, const MemorySSA &MSSA,
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000390 const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
Alina Sbirlea65f385d2018-09-07 23:51:41 +0000391 bool AllowImpreciseClobber = false) {
George Burgess IV5f308972016-07-19 01:29:15 +0000392 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
393
394 if (MSSA.isLiveOnEntryDef(Start)) {
395 assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
396 "liveOnEntry must clobber itself");
397 return;
398 }
399
George Burgess IV5f308972016-07-19 01:29:15 +0000400 bool FoundClobber = false;
Alina Sbirleaf5403d82018-08-29 18:26:04 +0000401 DenseSet<ConstMemoryAccessPair> VisitedPhis;
402 SmallVector<ConstMemoryAccessPair, 8> Worklist;
George Burgess IV5f308972016-07-19 01:29:15 +0000403 Worklist.emplace_back(Start, StartLoc);
404 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
405 // is found, complain.
406 while (!Worklist.empty()) {
Alina Sbirleaf5403d82018-08-29 18:26:04 +0000407 auto MAP = Worklist.pop_back_val();
George Burgess IV5f308972016-07-19 01:29:15 +0000408 // All we care about is that nothing from Start to ClobberAt clobbers Start.
409 // We learn nothing from revisiting nodes.
410 if (!VisitedPhis.insert(MAP).second)
411 continue;
412
Alina Sbirleaf5403d82018-08-29 18:26:04 +0000413 for (const auto *MA : def_chain(MAP.first)) {
George Burgess IV5f308972016-07-19 01:29:15 +0000414 if (MA == ClobberAt) {
Alina Sbirleaf5403d82018-08-29 18:26:04 +0000415 if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
George Burgess IV5f308972016-07-19 01:29:15 +0000416 // instructionClobbersQuery isn't essentially free, so don't use `|=`,
417 // since it won't let us short-circuit.
418 //
419 // Also, note that this can't be hoisted out of the `Worklist` loop,
420 // since MD may only act as a clobber for 1 of N MemoryLocations.
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000421 FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
422 if (!FoundClobber) {
423 ClobberAlias CA =
424 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
425 if (CA.IsClobber) {
426 FoundClobber = true;
427 // Not used: CA.AR;
428 }
429 }
George Burgess IV5f308972016-07-19 01:29:15 +0000430 }
431 break;
432 }
433
434 // We should never hit liveOnEntry, unless it's the clobber.
435 assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
436
Alina Sbirleaf5403d82018-08-29 18:26:04 +0000437 if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
Alina Sbirlea5bce4d52018-08-29 22:38:51 +0000438 // If Start is a Def, skip self.
439 if (MD == Start)
440 continue;
441
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000442 assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
443 .IsClobber &&
George Burgess IV5f308972016-07-19 01:29:15 +0000444 "Found clobber before reaching ClobberAt!");
445 continue;
446 }
447
Alina Sbirlea5bce4d52018-08-29 22:38:51 +0000448 if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
Alina Sbirlea6edcc9e2018-08-29 23:20:29 +0000449 (void)MU;
Alina Sbirlea5bce4d52018-08-29 22:38:51 +0000450 assert (MU == Start &&
451 "Can only find use in def chain if Start is a use");
452 continue;
453 }
454
George Burgess IV5f308972016-07-19 01:29:15 +0000455 assert(isa<MemoryPhi>(MA));
Alina Sbirleaf5403d82018-08-29 18:26:04 +0000456 Worklist.append(
457 upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}),
458 upward_defs_end());
George Burgess IV5f308972016-07-19 01:29:15 +0000459 }
460 }
461
Alina Sbirlea65f385d2018-09-07 23:51:41 +0000462 // If the verify is done following an optimization, it's possible that
463 // ClobberAt was a conservative clobbering, that we can now infer is not a
464 // true clobbering access. Don't fail the verify if that's the case.
465 // We do have accesses that claim they're optimized, but could be optimized
466 // further. Updating all these can be expensive, so allow it for now (FIXME).
467 if (AllowImpreciseClobber)
468 return;
469
George Burgess IV5f308972016-07-19 01:29:15 +0000470 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
471 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
472 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
473 "ClobberAt never acted as a clobber");
474}
475
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000476namespace {
477
George Burgess IV5f308972016-07-19 01:29:15 +0000478/// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
479/// in one class.
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000480template <class AliasAnalysisType> class ClobberWalker {
George Burgess IV5f308972016-07-19 01:29:15 +0000481 /// Save a few bytes by using unsigned instead of size_t.
482 using ListIndex = unsigned;
483
484 /// Represents a span of contiguous MemoryDefs, potentially ending in a
485 /// MemoryPhi.
486 struct DefPath {
487 MemoryLocation Loc;
488 // Note that, because we always walk in reverse, Last will always dominate
489 // First. Also note that First and Last are inclusive.
490 MemoryAccess *First;
491 MemoryAccess *Last;
George Burgess IV5f308972016-07-19 01:29:15 +0000492 Optional<ListIndex> Previous;
493
494 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
495 Optional<ListIndex> Previous)
496 : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
497
498 DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
499 Optional<ListIndex> Previous)
500 : DefPath(Loc, Init, Init, Previous) {}
501 };
502
503 const MemorySSA &MSSA;
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000504 AliasAnalysisType &AA;
George Burgess IV5f308972016-07-19 01:29:15 +0000505 DominatorTree &DT;
George Burgess IV5f308972016-07-19 01:29:15 +0000506 UpwardsMemoryQuery *Query;
Alina Sbirleaf085cc52019-03-29 21:56:09 +0000507 unsigned *UpwardWalkLimit;
George Burgess IV5f308972016-07-19 01:29:15 +0000508
509 // Phi optimization bookkeeping
510 SmallVector<DefPath, 32> Paths;
511 DenseSet<ConstMemoryAccessPair> VisitedPhis;
George Burgess IV5f308972016-07-19 01:29:15 +0000512
George Burgess IV5f308972016-07-19 01:29:15 +0000513 /// Find the nearest def or phi that `From` can legally be optimized to.
Daniel Berlind0420312017-04-01 09:01:12 +0000514 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
George Burgess IV5f308972016-07-19 01:29:15 +0000515 assert(From->getNumOperands() && "Phi with no operands?");
516
517 BasicBlock *BB = From->getBlock();
George Burgess IV5f308972016-07-19 01:29:15 +0000518 MemoryAccess *Result = MSSA.getLiveOnEntryDef();
519 DomTreeNode *Node = DT.getNode(BB);
520 while ((Node = Node->getIDom())) {
Daniel Berlin7500c562017-04-01 08:59:45 +0000521 auto *Defs = MSSA.getBlockDefs(Node->getBlock());
522 if (Defs)
Daniel Berlind0420312017-04-01 09:01:12 +0000523 return &*Defs->rbegin();
George Burgess IV5f308972016-07-19 01:29:15 +0000524 }
George Burgess IV5f308972016-07-19 01:29:15 +0000525 return Result;
526 }
527
528 /// Result of calling walkToPhiOrClobber.
529 struct UpwardsWalkResult {
530 /// The "Result" of the walk. Either a clobber, the last thing we walked, or
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000531 /// both. Include alias info when clobber found.
George Burgess IV5f308972016-07-19 01:29:15 +0000532 MemoryAccess *Result;
533 bool IsKnownClobber;
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000534 Optional<AliasResult> AR;
George Burgess IV5f308972016-07-19 01:29:15 +0000535 };
536
537 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
538 /// This will update Desc.Last as it walks. It will (optionally) also stop at
539 /// StopAt.
540 ///
541 /// This does not test for whether StopAt is a clobber
Daniel Berlind0420312017-04-01 09:01:12 +0000542 UpwardsWalkResult
Alina Sbirleaf7230202019-01-07 18:40:27 +0000543 walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
544 const MemoryAccess *SkipStopAt = nullptr) const {
George Burgess IV5f308972016-07-19 01:29:15 +0000545 assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
Alina Sbirleac8d6e042019-03-29 22:55:59 +0000546 assert(UpwardWalkLimit && "Need a valid walk limit");
547 // This will not do any alias() calls. It returns in the first iteration in
548 // the loop below.
549 if (*UpwardWalkLimit == 0)
550 (*UpwardWalkLimit)++;
George Burgess IV5f308972016-07-19 01:29:15 +0000551
552 for (MemoryAccess *Current : def_chain(Desc.Last)) {
553 Desc.Last = Current;
Alina Sbirleaf7230202019-01-07 18:40:27 +0000554 if (Current == StopAt || Current == SkipStopAt)
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000555 return {Current, false, MayAlias};
George Burgess IV5f308972016-07-19 01:29:15 +0000556
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000557 if (auto *MD = dyn_cast<MemoryDef>(Current)) {
558 if (MSSA.isLiveOnEntryDef(MD))
559 return {MD, true, MustAlias};
Alina Sbirleaf085cc52019-03-29 21:56:09 +0000560
561 if (!--*UpwardWalkLimit)
562 return {Current, true, MayAlias};
563
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000564 ClobberAlias CA =
565 instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
566 if (CA.IsClobber)
567 return {MD, true, CA.AR};
568 }
George Burgess IV5f308972016-07-19 01:29:15 +0000569 }
570
571 assert(isa<MemoryPhi>(Desc.Last) &&
572 "Ended at a non-clobber that's not a phi?");
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000573 return {Desc.Last, false, MayAlias};
George Burgess IV5f308972016-07-19 01:29:15 +0000574 }
575
576 void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
577 ListIndex PriorNode) {
578 auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
579 upward_defs_end());
580 for (const MemoryAccessPair &P : UpwardDefs) {
581 PausedSearches.push_back(Paths.size());
582 Paths.emplace_back(P.second, P.first, PriorNode);
583 }
584 }
585
586 /// Represents a search that terminated after finding a clobber. This clobber
587 /// may or may not be present in the path of defs from LastNode..SearchStart,
588 /// since it may have been retrieved from cache.
589 struct TerminatedPath {
590 MemoryAccess *Clobber;
591 ListIndex LastNode;
592 };
593
594 /// Get an access that keeps us from optimizing to the given phi.
595 ///
596 /// PausedSearches is an array of indices into the Paths array. Its incoming
597 /// value is the indices of searches that stopped at the last phi optimization
598 /// target. It's left in an unspecified state.
599 ///
600 /// If this returns None, NewPaused is a vector of searches that terminated
601 /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
George Burgess IV14633b52016-08-03 01:22:19 +0000602 Optional<TerminatedPath>
Daniel Berlind0420312017-04-01 09:01:12 +0000603 getBlockingAccess(const MemoryAccess *StopWhere,
George Burgess IV5f308972016-07-19 01:29:15 +0000604 SmallVectorImpl<ListIndex> &PausedSearches,
605 SmallVectorImpl<ListIndex> &NewPaused,
606 SmallVectorImpl<TerminatedPath> &Terminated) {
607 assert(!PausedSearches.empty() && "No searches to continue?");
608
609 // BFS vs DFS really doesn't make a difference here, so just do a DFS with
610 // PausedSearches as our stack.
611 while (!PausedSearches.empty()) {
612 ListIndex PathIndex = PausedSearches.pop_back_val();
613 DefPath &Node = Paths[PathIndex];
614
615 // If we've already visited this path with this MemoryLocation, we don't
616 // need to do so again.
617 //
618 // NOTE: That we just drop these paths on the ground makes caching
619 // behavior sporadic. e.g. given a diamond:
620 // A
621 // B C
622 // D
623 //
624 // ...If we walk D, B, A, C, we'll only cache the result of phi
625 // optimization for A, B, and D; C will be skipped because it dies here.
626 // This arguably isn't the worst thing ever, since:
627 // - We generally query things in a top-down order, so if we got below D
628 // without needing cache entries for {C, MemLoc}, then chances are
629 // that those cache entries would end up ultimately unused.
630 // - We still cache things for A, so C only needs to walk up a bit.
631 // If this behavior becomes problematic, we can fix without a ton of extra
632 // work.
633 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
634 continue;
635
Alina Sbirleaf7230202019-01-07 18:40:27 +0000636 const MemoryAccess *SkipStopWhere = nullptr;
637 if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
638 assert(isa<MemoryDef>(Query->OriginalAccess));
639 SkipStopWhere = Query->OriginalAccess;
640 }
641
Alina Sbirleaf085cc52019-03-29 21:56:09 +0000642 UpwardsWalkResult Res = walkToPhiOrClobber(Node,
643 /*StopAt=*/StopWhere,
Alina Sbirleaf7230202019-01-07 18:40:27 +0000644 /*SkipStopAt=*/SkipStopWhere);
George Burgess IV5f308972016-07-19 01:29:15 +0000645 if (Res.IsKnownClobber) {
Alina Sbirleaf7230202019-01-07 18:40:27 +0000646 assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
Alina Sbirleaf085cc52019-03-29 21:56:09 +0000647
George Burgess IV5f308972016-07-19 01:29:15 +0000648 // If this wasn't a cache hit, we hit a clobber when walking. That's a
649 // failure.
George Burgess IV14633b52016-08-03 01:22:19 +0000650 TerminatedPath Term{Res.Result, PathIndex};
Daniel Berlind7a7ae02017-04-05 19:01:58 +0000651 if (!MSSA.dominates(Res.Result, StopWhere))
George Burgess IV14633b52016-08-03 01:22:19 +0000652 return Term;
George Burgess IV5f308972016-07-19 01:29:15 +0000653
654 // Otherwise, it's a valid thing to potentially optimize to.
George Burgess IV14633b52016-08-03 01:22:19 +0000655 Terminated.push_back(Term);
George Burgess IV5f308972016-07-19 01:29:15 +0000656 continue;
657 }
658
Alina Sbirleaf7230202019-01-07 18:40:27 +0000659 if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
George Burgess IV5f308972016-07-19 01:29:15 +0000660 // We've hit our target. Save this path off for if we want to continue
Alina Sbirleaf7230202019-01-07 18:40:27 +0000661 // walking. If we are in the mode of skipping the OriginalAccess, and
662 // we've reached back to the OriginalAccess, do not save path, we've
663 // just looped back to self.
664 if (Res.Result != SkipStopWhere)
665 NewPaused.push_back(PathIndex);
George Burgess IV5f308972016-07-19 01:29:15 +0000666 continue;
667 }
668
669 assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
670 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
671 }
672
673 return None;
674 }
675
676 template <typename T, typename Walker>
677 struct generic_def_path_iterator
678 : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
679 std::forward_iterator_tag, T *> {
Hans Wennborg5519cb22019-03-25 09:27:42 +0000680 generic_def_path_iterator() {}
George Burgess IV5f308972016-07-19 01:29:15 +0000681 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
682
683 T &operator*() const { return curNode(); }
684
685 generic_def_path_iterator &operator++() {
686 N = curNode().Previous;
687 return *this;
688 }
689
690 bool operator==(const generic_def_path_iterator &O) const {
691 if (N.hasValue() != O.N.hasValue())
692 return false;
693 return !N.hasValue() || *N == *O.N;
694 }
695
696 private:
697 T &curNode() const { return W->Paths[*N]; }
698
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000699 Walker *W = nullptr;
700 Optional<ListIndex> N = None;
George Burgess IV5f308972016-07-19 01:29:15 +0000701 };
702
703 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
704 using const_def_path_iterator =
705 generic_def_path_iterator<const DefPath, const ClobberWalker>;
706
707 iterator_range<def_path_iterator> def_path(ListIndex From) {
708 return make_range(def_path_iterator(this, From), def_path_iterator());
709 }
710
711 iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
712 return make_range(const_def_path_iterator(this, From),
713 const_def_path_iterator());
714 }
715
716 struct OptznResult {
717 /// The path that contains our result.
718 TerminatedPath PrimaryClobber;
719 /// The paths that we can legally cache back from, but that aren't
720 /// necessarily the result of the Phi optimization.
721 SmallVector<TerminatedPath, 4> OtherClobbers;
722 };
723
724 ListIndex defPathIndex(const DefPath &N) const {
725 // The assert looks nicer if we don't need to do &N
726 const DefPath *NP = &N;
727 assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
728 "Out of bounds DefPath!");
729 return NP - &Paths.front();
730 }
731
732 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
733 /// that act as legal clobbers. Note that this won't return *all* clobbers.
734 ///
735 /// Phi optimization algorithm tl;dr:
736 /// - Find the earliest def/phi, A, we can optimize to
737 /// - Find if all paths from the starting memory access ultimately reach A
738 /// - If not, optimization isn't possible.
739 /// - Otherwise, walk from A to another clobber or phi, A'.
740 /// - If A' is a def, we're done.
741 /// - If A' is a phi, try to optimize it.
742 ///
743 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
744 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
745 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
746 const MemoryLocation &Loc) {
747 assert(Paths.empty() && VisitedPhis.empty() &&
748 "Reset the optimization state.");
749
750 Paths.emplace_back(Loc, Start, Phi, None);
751 // Stores how many "valid" optimization nodes we had prior to calling
752 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
753 auto PriorPathsSize = Paths.size();
754
755 SmallVector<ListIndex, 16> PausedSearches;
756 SmallVector<ListIndex, 8> NewPaused;
757 SmallVector<TerminatedPath, 4> TerminatedPaths;
758
759 addSearches(Phi, PausedSearches, 0);
760
761 // Moves the TerminatedPath with the "most dominated" Clobber to the end of
762 // Paths.
763 auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
764 assert(!Paths.empty() && "Need a path to move");
George Burgess IV5f308972016-07-19 01:29:15 +0000765 auto Dom = Paths.begin();
766 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
767 if (!MSSA.dominates(I->Clobber, Dom->Clobber))
768 Dom = I;
769 auto Last = Paths.end() - 1;
770 if (Last != Dom)
771 std::iter_swap(Last, Dom);
772 };
773
774 MemoryPhi *Current = Phi;
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000775 while (true) {
George Burgess IV5f308972016-07-19 01:29:15 +0000776 assert(!MSSA.isLiveOnEntryDef(Current) &&
777 "liveOnEntry wasn't treated as a clobber?");
778
Daniel Berlind0420312017-04-01 09:01:12 +0000779 const auto *Target = getWalkTarget(Current);
George Burgess IV5f308972016-07-19 01:29:15 +0000780 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
781 // optimization for the prior phi.
782 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
783 return MSSA.dominates(P.Clobber, Target);
784 }));
785
786 // FIXME: This is broken, because the Blocker may be reported to be
787 // liveOnEntry, and we'll happily wait for that to disappear (read: never)
George Burgess IV7f414b92016-08-22 23:40:01 +0000788 // For the moment, this is fine, since we do nothing with blocker info.
George Burgess IV14633b52016-08-03 01:22:19 +0000789 if (Optional<TerminatedPath> Blocker = getBlockingAccess(
George Burgess IV5f308972016-07-19 01:29:15 +0000790 Target, PausedSearches, NewPaused, TerminatedPaths)) {
George Burgess IV5f308972016-07-19 01:29:15 +0000791
792 // Find the node we started at. We can't search based on N->Last, since
793 // we may have gone around a loop with a different MemoryLocation.
George Burgess IV14633b52016-08-03 01:22:19 +0000794 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
George Burgess IV5f308972016-07-19 01:29:15 +0000795 return defPathIndex(N) < PriorPathsSize;
796 });
797 assert(Iter != def_path_iterator());
798
799 DefPath &CurNode = *Iter;
800 assert(CurNode.Last == Current);
George Burgess IV5f308972016-07-19 01:29:15 +0000801
802 // Two things:
803 // A. We can't reliably cache all of NewPaused back. Consider a case
804 // where we have two paths in NewPaused; one of which can't optimize
805 // above this phi, whereas the other can. If we cache the second path
806 // back, we'll end up with suboptimal cache entries. We can handle
807 // cases like this a bit better when we either try to find all
808 // clobbers that block phi optimization, or when our cache starts
809 // supporting unfinished searches.
810 // B. We can't reliably cache TerminatedPaths back here without doing
811 // extra checks; consider a case like:
812 // T
813 // / \
814 // D C
815 // \ /
816 // S
817 // Where T is our target, C is a node with a clobber on it, D is a
818 // diamond (with a clobber *only* on the left or right node, N), and
819 // S is our start. Say we walk to D, through the node opposite N
820 // (read: ignoring the clobber), and see a cache entry in the top
821 // node of D. That cache entry gets put into TerminatedPaths. We then
822 // walk up to C (N is later in our worklist), find the clobber, and
823 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
824 // the bottom part of D to the cached clobber, ignoring the clobber
825 // in N. Again, this problem goes away if we start tracking all
826 // blockers for a given phi optimization.
827 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
828 return {Result, {}};
829 }
830
831 // If there's nothing left to search, then all paths led to valid clobbers
832 // that we got from our cache; pick the nearest to the start, and allow
833 // the rest to be cached back.
834 if (NewPaused.empty()) {
835 MoveDominatedPathToEnd(TerminatedPaths);
836 TerminatedPath Result = TerminatedPaths.pop_back_val();
837 return {Result, std::move(TerminatedPaths)};
838 }
839
840 MemoryAccess *DefChainEnd = nullptr;
841 SmallVector<TerminatedPath, 4> Clobbers;
842 for (ListIndex Paused : NewPaused) {
843 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
844 if (WR.IsKnownClobber)
845 Clobbers.push_back({WR.Result, Paused});
846 else
847 // Micro-opt: If we hit the end of the chain, save it.
848 DefChainEnd = WR.Result;
849 }
850
851 if (!TerminatedPaths.empty()) {
852 // If we couldn't find the dominating phi/liveOnEntry in the above loop,
853 // do it now.
854 if (!DefChainEnd)
Daniel Berlind0420312017-04-01 09:01:12 +0000855 for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
George Burgess IV5f308972016-07-19 01:29:15 +0000856 DefChainEnd = MA;
857
858 // If any of the terminated paths don't dominate the phi we'll try to
859 // optimize, we need to figure out what they are and quit.
860 const BasicBlock *ChainBB = DefChainEnd->getBlock();
861 for (const TerminatedPath &TP : TerminatedPaths) {
862 // Because we know that DefChainEnd is as "high" as we can go, we
863 // don't need local dominance checks; BB dominance is sufficient.
864 if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
865 Clobbers.push_back(TP);
866 }
867 }
868
869 // If we have clobbers in the def chain, find the one closest to Current
870 // and quit.
871 if (!Clobbers.empty()) {
872 MoveDominatedPathToEnd(Clobbers);
873 TerminatedPath Result = Clobbers.pop_back_val();
874 return {Result, std::move(Clobbers)};
875 }
876
877 assert(all_of(NewPaused,
878 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
879
880 // Because liveOnEntry is a clobber, this must be a phi.
881 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
882
883 PriorPathsSize = Paths.size();
884 PausedSearches.clear();
885 for (ListIndex I : NewPaused)
886 addSearches(DefChainPhi, PausedSearches, I);
887 NewPaused.clear();
888
889 Current = DefChainPhi;
890 }
891 }
892
George Burgess IV5f308972016-07-19 01:29:15 +0000893 void verifyOptResult(const OptznResult &R) const {
894 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
895 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
896 }));
897 }
898
899 void resetPhiOptznState() {
900 Paths.clear();
901 VisitedPhis.clear();
902 }
903
904public:
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000905 ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
Daniel Berlind7a7ae02017-04-05 19:01:58 +0000906 : MSSA(MSSA), AA(AA), DT(DT) {}
George Burgess IV5f308972016-07-19 01:29:15 +0000907
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000908 AliasAnalysisType *getAA() { return &AA; }
George Burgess IV5f308972016-07-19 01:29:15 +0000909 /// Finds the nearest clobber for the given query, optimizing phis if
910 /// possible.
Alina Sbirleaf085cc52019-03-29 21:56:09 +0000911 MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q,
912 unsigned &UpWalkLimit) {
George Burgess IV5f308972016-07-19 01:29:15 +0000913 Query = &Q;
Alina Sbirleaf085cc52019-03-29 21:56:09 +0000914 UpwardWalkLimit = &UpWalkLimit;
915 // Starting limit must be > 0.
916 if (!UpWalkLimit)
917 UpWalkLimit++;
George Burgess IV5f308972016-07-19 01:29:15 +0000918
919 MemoryAccess *Current = Start;
920 // This walker pretends uses don't exist. If we're handed one, silently grab
921 // its def. (This has the nice side-effect of ensuring we never cache uses)
922 if (auto *MU = dyn_cast<MemoryUse>(Start))
923 Current = MU->getDefiningAccess();
924
925 DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
926 // Fast path for the overly-common case (no crazy phi optimization
927 // necessary)
928 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
George Burgess IV93ea19b2016-07-24 07:03:49 +0000929 MemoryAccess *Result;
George Burgess IV5f308972016-07-19 01:29:15 +0000930 if (WalkResult.IsKnownClobber) {
George Burgess IV93ea19b2016-07-24 07:03:49 +0000931 Result = WalkResult.Result;
Alina Sbirlead90c9f42018-03-08 18:03:14 +0000932 Q.AR = WalkResult.AR;
George Burgess IV93ea19b2016-07-24 07:03:49 +0000933 } else {
934 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
935 Current, Q.StartingLoc);
936 verifyOptResult(OptRes);
George Burgess IV93ea19b2016-07-24 07:03:49 +0000937 resetPhiOptznState();
938 Result = OptRes.PrimaryClobber.Clobber;
George Burgess IV5f308972016-07-19 01:29:15 +0000939 }
940
George Burgess IV5f308972016-07-19 01:29:15 +0000941#ifdef EXPENSIVE_CHECKS
Alina Sbirleaf085cc52019-03-29 21:56:09 +0000942 if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
Alina Sbirleae41f4b32019-01-10 21:47:15 +0000943 checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
George Burgess IV5f308972016-07-19 01:29:15 +0000944#endif
George Burgess IV93ea19b2016-07-24 07:03:49 +0000945 return Result;
George Burgess IV5f308972016-07-19 01:29:15 +0000946 }
947};
948
949struct RenamePassData {
950 DomTreeNode *DTN;
951 DomTreeNode::const_iterator ChildIt;
952 MemoryAccess *IncomingVal;
953
954 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
955 MemoryAccess *M)
956 : DTN(D), ChildIt(It), IncomingVal(M) {}
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000957
George Burgess IV5f308972016-07-19 01:29:15 +0000958 void swap(RenamePassData &RHS) {
959 std::swap(DTN, RHS.DTN);
960 std::swap(ChildIt, RHS.ChildIt);
961 std::swap(IncomingVal, RHS.IncomingVal);
962 }
963};
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000964
965} // end anonymous namespace
George Burgess IV5f308972016-07-19 01:29:15 +0000966
967namespace llvm {
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000968
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000969template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
970 ClobberWalker<AliasAnalysisType> Walker;
Alina Sbirleabc8aa242019-01-07 19:22:37 +0000971 MemorySSA *MSSA;
972
973public:
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000974 ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
Alina Sbirleabc8aa242019-01-07 19:22:37 +0000975 : Walker(*M, *A, *D), MSSA(M) {}
976
977 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
Alina Sbirleaf085cc52019-03-29 21:56:09 +0000978 const MemoryLocation &,
979 unsigned &);
980 // Third argument (bool), defines whether the clobber search should skip the
Alina Sbirleabc8aa242019-01-07 19:22:37 +0000981 // original queried access. If true, there will be a follow-up query searching
982 // for a clobber access past "self". Note that the Optimized access is not
983 // updated if a new clobber is found by this SkipSelf search. If this
984 // additional query becomes heavily used we may decide to cache the result.
985 // Walker instantiations will decide how to set the SkipSelf bool.
Alina Sbirleaf085cc52019-03-29 21:56:09 +0000986 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool);
Alina Sbirleabc8aa242019-01-07 19:22:37 +0000987};
988
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000989/// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
George Burgess IV45f263d2018-05-26 02:28:55 +0000990/// longer does caching on its own, but the name has been retained for the
991/// moment.
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000992template <class AliasAnalysisType>
George Burgess IVfd1f2f82016-06-24 21:02:12 +0000993class MemorySSA::CachingWalker final : public MemorySSAWalker {
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000994 ClobberWalkerBase<AliasAnalysisType> *Walker;
George Burgess IV5f308972016-07-19 01:29:15 +0000995
George Burgess IVfd1f2f82016-06-24 21:02:12 +0000996public:
Alina Sbirleabfc779e2019-03-22 17:22:19 +0000997 CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
Alina Sbirleabc8aa242019-01-07 19:22:37 +0000998 : MemorySSAWalker(M), Walker(W) {}
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +0000999 ~CachingWalker() override = default;
George Burgess IVfd1f2f82016-06-24 21:02:12 +00001000
George Burgess IV400ae402016-07-20 19:51:34 +00001001 using MemorySSAWalker::getClobberingMemoryAccess;
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +00001002
Alina Sbirleaf085cc52019-03-29 21:56:09 +00001003 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1004 return Walker->getClobberingMemoryAccessBase(MA, UWL, false);
1005 }
1006 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1007 const MemoryLocation &Loc,
1008 unsigned &UWL) {
1009 return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1010 }
1011
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001012 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
Alina Sbirleaf085cc52019-03-29 21:56:09 +00001013 unsigned UpwardWalkLimit = MaxCheckLimit;
1014 return getClobberingMemoryAccess(MA, UpwardWalkLimit);
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001015 }
Alina Sbirleabc8aa242019-01-07 19:22:37 +00001016 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001017 const MemoryLocation &Loc) override {
Alina Sbirleaf085cc52019-03-29 21:56:09 +00001018 unsigned UpwardWalkLimit = MaxCheckLimit;
1019 return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001020 }
Alina Sbirleabc8aa242019-01-07 19:22:37 +00001021
1022 void invalidateInfo(MemoryAccess *MA) override {
1023 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1024 MUD->resetOptimized();
1025 }
George Burgess IVfd1f2f82016-06-24 21:02:12 +00001026};
George Burgess IVe1100f52016-02-02 22:46:49 +00001027
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001028template <class AliasAnalysisType>
Alina Sbirlea12bbb4f2019-01-07 19:38:47 +00001029class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001030 ClobberWalkerBase<AliasAnalysisType> *Walker;
Alina Sbirlea12bbb4f2019-01-07 19:38:47 +00001031
1032public:
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001033 SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
Alina Sbirlea12bbb4f2019-01-07 19:38:47 +00001034 : MemorySSAWalker(M), Walker(W) {}
1035 ~SkipSelfWalker() override = default;
1036
1037 using MemorySSAWalker::getClobberingMemoryAccess;
1038
Alina Sbirleaf085cc52019-03-29 21:56:09 +00001039 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1040 return Walker->getClobberingMemoryAccessBase(MA, UWL, true);
1041 }
1042 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1043 const MemoryLocation &Loc,
1044 unsigned &UWL) {
1045 return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1046 }
1047
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001048 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
Alina Sbirleaf085cc52019-03-29 21:56:09 +00001049 unsigned UpwardWalkLimit = MaxCheckLimit;
1050 return getClobberingMemoryAccess(MA, UpwardWalkLimit);
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001051 }
Alina Sbirlea12bbb4f2019-01-07 19:38:47 +00001052 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001053 const MemoryLocation &Loc) override {
Alina Sbirleaf085cc52019-03-29 21:56:09 +00001054 unsigned UpwardWalkLimit = MaxCheckLimit;
1055 return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001056 }
Alina Sbirlea12bbb4f2019-01-07 19:38:47 +00001057
1058 void invalidateInfo(MemoryAccess *MA) override {
1059 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1060 MUD->resetOptimized();
1061 }
Alina Sbirlea12bbb4f2019-01-07 19:38:47 +00001062};
1063
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +00001064} // end namespace llvm
1065
Daniel Berlin78cbd282017-02-20 22:26:03 +00001066void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1067 bool RenameAllUses) {
George Burgess IVe1100f52016-02-02 22:46:49 +00001068 // Pass through values to our successors
1069 for (const BasicBlock *S : successors(BB)) {
1070 auto It = PerBlockAccesses.find(S);
1071 // Rename the phi nodes in our successor block
1072 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1073 continue;
Daniel Berlinada263d2016-06-20 20:21:33 +00001074 AccessList *Accesses = It->second.get();
George Burgess IVe1100f52016-02-02 22:46:49 +00001075 auto *Phi = cast<MemoryPhi>(&Accesses->front());
Daniel Berlin78cbd282017-02-20 22:26:03 +00001076 if (RenameAllUses) {
1077 int PhiIndex = Phi->getBasicBlockIndex(BB);
1078 assert(PhiIndex != -1 && "Incomplete phi during partial rename");
1079 Phi->setIncomingValue(PhiIndex, IncomingVal);
1080 } else
1081 Phi->addIncoming(IncomingVal, BB);
George Burgess IVe1100f52016-02-02 22:46:49 +00001082 }
Daniel Berlin78cbd282017-02-20 22:26:03 +00001083}
George Burgess IVe1100f52016-02-02 22:46:49 +00001084
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00001085/// Rename a single basic block into MemorySSA form.
Daniel Berlin78cbd282017-02-20 22:26:03 +00001086/// Uses the standard SSA renaming algorithm.
1087/// \returns The new incoming value.
1088MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1089 bool RenameAllUses) {
1090 auto It = PerBlockAccesses.find(BB);
1091 // Skip most processing if the list is empty.
1092 if (It != PerBlockAccesses.end()) {
1093 AccessList *Accesses = It->second.get();
1094 for (MemoryAccess &L : *Accesses) {
1095 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1096 if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1097 MUD->setDefiningAccess(IncomingVal);
1098 if (isa<MemoryDef>(&L))
1099 IncomingVal = &L;
1100 } else {
1101 IncomingVal = &L;
1102 }
1103 }
1104 }
George Burgess IVe1100f52016-02-02 22:46:49 +00001105 return IncomingVal;
1106}
1107
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00001108/// This is the standard SSA renaming algorithm.
George Burgess IVe1100f52016-02-02 22:46:49 +00001109///
1110/// We walk the dominator tree in preorder, renaming accesses, and then filling
1111/// in phi nodes in our successors.
1112void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
Daniel Berlin78cbd282017-02-20 22:26:03 +00001113 SmallPtrSetImpl<BasicBlock *> &Visited,
1114 bool SkipVisited, bool RenameAllUses) {
George Burgess IVe1100f52016-02-02 22:46:49 +00001115 SmallVector<RenamePassData, 32> WorkStack;
Daniel Berlin78cbd282017-02-20 22:26:03 +00001116 // Skip everything if we already renamed this block and we are skipping.
1117 // Note: You can't sink this into the if, because we need it to occur
1118 // regardless of whether we skip blocks or not.
1119 bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1120 if (SkipVisited && AlreadyVisited)
1121 return;
1122
1123 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1124 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
George Burgess IVe1100f52016-02-02 22:46:49 +00001125 WorkStack.push_back({Root, Root->begin(), IncomingVal});
George Burgess IVe1100f52016-02-02 22:46:49 +00001126
1127 while (!WorkStack.empty()) {
1128 DomTreeNode *Node = WorkStack.back().DTN;
1129 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1130 IncomingVal = WorkStack.back().IncomingVal;
1131
1132 if (ChildIt == Node->end()) {
1133 WorkStack.pop_back();
1134 } else {
1135 DomTreeNode *Child = *ChildIt;
1136 ++WorkStack.back().ChildIt;
1137 BasicBlock *BB = Child->getBlock();
Daniel Berlin78cbd282017-02-20 22:26:03 +00001138 // Note: You can't sink this into the if, because we need it to occur
1139 // regardless of whether we skip blocks or not.
1140 AlreadyVisited = !Visited.insert(BB).second;
1141 if (SkipVisited && AlreadyVisited) {
1142 // We already visited this during our renaming, which can happen when
1143 // being asked to rename multiple blocks. Figure out the incoming val,
1144 // which is the last def.
1145 // Incoming value can only change if there is a block def, and in that
1146 // case, it's the last block def in the list.
1147 if (auto *BlockDefs = getWritableBlockDefs(BB))
1148 IncomingVal = &*BlockDefs->rbegin();
1149 } else
1150 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1151 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
George Burgess IVe1100f52016-02-02 22:46:49 +00001152 WorkStack.push_back({Child, Child->begin(), IncomingVal});
1153 }
1154 }
1155}
1156
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00001157/// This handles unreachable block accesses by deleting phi nodes in
George Burgess IVe1100f52016-02-02 22:46:49 +00001158/// unreachable blocks, and marking all other unreachable MemoryAccess's as
1159/// being uses of the live on entry definition.
1160void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1161 assert(!DT->isReachableFromEntry(BB) &&
1162 "Reachable block found while handling unreachable blocks");
1163
Daniel Berlinfc7e6512016-07-06 05:32:05 +00001164 // Make sure phi nodes in our reachable successors end up with a
1165 // LiveOnEntryDef for our incoming edge, even though our block is forward
1166 // unreachable. We could just disconnect these blocks from the CFG fully,
1167 // but we do not right now.
1168 for (const BasicBlock *S : successors(BB)) {
1169 if (!DT->isReachableFromEntry(S))
1170 continue;
1171 auto It = PerBlockAccesses.find(S);
1172 // Rename the phi nodes in our successor block
1173 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1174 continue;
1175 AccessList *Accesses = It->second.get();
1176 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1177 Phi->addIncoming(LiveOnEntryDef.get(), BB);
1178 }
1179
George Burgess IVe1100f52016-02-02 22:46:49 +00001180 auto It = PerBlockAccesses.find(BB);
1181 if (It == PerBlockAccesses.end())
1182 return;
1183
1184 auto &Accesses = It->second;
1185 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1186 auto Next = std::next(AI);
1187 // If we have a phi, just remove it. We are going to replace all
1188 // users with live on entry.
1189 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1190 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1191 else
1192 Accesses->erase(AI);
1193 AI = Next;
1194 }
1195}
1196
Geoff Berryb96d3b22016-06-01 21:30:40 +00001197MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001198 : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
Alina Sbirlea12bbb4f2019-01-07 19:38:47 +00001199 SkipWalker(nullptr), NextID(0) {
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001200 // Build MemorySSA using a batch alias analysis. This reuses the internal
1201 // state that AA collects during an alias()/getModRefInfo() call. This is
1202 // safe because there are no CFG changes while building MemorySSA and can
1203 // significantly reduce the time spent by the compiler in AA, because we will
1204 // make queries about all the instructions in the Function.
1205 BatchAAResults BatchAA(*AA);
1206 buildMemorySSA(BatchAA);
1207 // Intentionally leave AA to nullptr while building so we don't accidently
1208 // use non-batch AliasAnalysis.
1209 this->AA = AA;
1210 // Also create the walker here.
1211 getWalker();
Geoff Berryb96d3b22016-06-01 21:30:40 +00001212}
1213
George Burgess IVe1100f52016-02-02 22:46:49 +00001214MemorySSA::~MemorySSA() {
1215 // Drop all our references
1216 for (const auto &Pair : PerBlockAccesses)
1217 for (MemoryAccess &MA : *Pair.second)
1218 MA.dropAllReferences();
1219}
1220
Daniel Berlin14300262016-06-21 18:39:20 +00001221MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
George Burgess IVe1100f52016-02-02 22:46:49 +00001222 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1223
1224 if (Res.second)
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +00001225 Res.first->second = llvm::make_unique<AccessList>();
George Burgess IVe1100f52016-02-02 22:46:49 +00001226 return Res.first->second.get();
1227}
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +00001228
Daniel Berlind602e042017-01-25 20:56:19 +00001229MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1230 auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1231
1232 if (Res.second)
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +00001233 Res.first->second = llvm::make_unique<DefsList>();
Daniel Berlind602e042017-01-25 20:56:19 +00001234 return Res.first->second.get();
1235}
George Burgess IVe1100f52016-02-02 22:46:49 +00001236
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +00001237namespace llvm {
1238
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001239/// This class is a batch walker of all MemoryUse's in the program, and points
1240/// their defining access at the thing that actually clobbers them. Because it
1241/// is a batch walker that touches everything, it does not operate like the
1242/// other walkers. This walker is basically performing a top-down SSA renaming
1243/// pass, where the version stack is used as the cache. This enables it to be
1244/// significantly more time and memory efficient than using the regular walker,
1245/// which is walking bottom-up.
1246class MemorySSA::OptimizeUses {
1247public:
Alina Sbirleaf085cc52019-03-29 21:56:09 +00001248 OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker,
1249 BatchAAResults *BAA, DominatorTree *DT)
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001250 : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001251
1252 void optimizeUses();
1253
1254private:
1255 /// This represents where a given memorylocation is in the stack.
1256 struct MemlocStackInfo {
1257 // This essentially is keeping track of versions of the stack. Whenever
1258 // the stack changes due to pushes or pops, these versions increase.
1259 unsigned long StackEpoch;
1260 unsigned long PopEpoch;
1261 // This is the lower bound of places on the stack to check. It is equal to
1262 // the place the last stack walk ended.
1263 // Note: Correctness depends on this being initialized to 0, which densemap
1264 // does
1265 unsigned long LowerBound;
Daniel Berlin4b4c7222016-08-08 04:44:53 +00001266 const BasicBlock *LowerBoundBlock;
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001267 // This is where the last walk for this memory location ended.
1268 unsigned long LastKill;
1269 bool LastKillValid;
Alina Sbirlead90c9f42018-03-08 18:03:14 +00001270 Optional<AliasResult> AR;
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001271 };
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +00001272
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001273 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1274 SmallVectorImpl<MemoryAccess *> &,
1275 DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +00001276
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001277 MemorySSA *MSSA;
Alina Sbirleaf085cc52019-03-29 21:56:09 +00001278 CachingWalker<BatchAAResults> *Walker;
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001279 BatchAAResults *AA;
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001280 DominatorTree *DT;
1281};
1282
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +00001283} // end namespace llvm
1284
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001285/// Optimize the uses in a given block This is basically the SSA renaming
1286/// algorithm, with one caveat: We are able to use a single stack for all
1287/// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1288/// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1289/// going to be some position in that stack of possible ones.
1290///
1291/// We track the stack positions that each MemoryLocation needs
1292/// to check, and last ended at. This is because we only want to check the
1293/// things that changed since last time. The same MemoryLocation should
1294/// get clobbered by the same store (getModRefInfo does not use invariantness or
1295/// things like this, and if they start, we can modify MemoryLocOrCall to
1296/// include relevant data)
1297void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1298 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1299 SmallVectorImpl<MemoryAccess *> &VersionStack,
1300 DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1301
1302 /// If no accesses, nothing to do.
1303 MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1304 if (Accesses == nullptr)
1305 return;
1306
1307 // Pop everything that doesn't dominate the current block off the stack,
1308 // increment the PopEpoch to account for this.
Piotr Padlewskicc5868c12017-02-18 20:34:36 +00001309 while (true) {
1310 assert(
1311 !VersionStack.empty() &&
1312 "Version stack should have liveOnEntry sentinel dominating everything");
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001313 BasicBlock *BackBlock = VersionStack.back()->getBlock();
1314 if (DT->dominates(BackBlock, BB))
1315 break;
1316 while (VersionStack.back()->getBlock() == BackBlock)
1317 VersionStack.pop_back();
1318 ++PopEpoch;
1319 }
Piotr Padlewskicc5868c12017-02-18 20:34:36 +00001320
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001321 for (MemoryAccess &MA : *Accesses) {
1322 auto *MU = dyn_cast<MemoryUse>(&MA);
1323 if (!MU) {
1324 VersionStack.push_back(&MA);
1325 ++StackEpoch;
1326 continue;
1327 }
1328
George Burgess IV024f3d22016-08-03 19:57:02 +00001329 if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
Alina Sbirlead90c9f42018-03-08 18:03:14 +00001330 MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
George Burgess IV024f3d22016-08-03 19:57:02 +00001331 continue;
1332 }
1333
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001334 MemoryLocOrCall UseMLOC(MU);
1335 auto &LocInfo = LocStackInfo[UseMLOC];
Daniel Berlin26fcea92016-08-02 20:02:21 +00001336 // If the pop epoch changed, it means we've removed stuff from top of
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001337 // stack due to changing blocks. We may have to reset the lower bound or
1338 // last kill info.
1339 if (LocInfo.PopEpoch != PopEpoch) {
1340 LocInfo.PopEpoch = PopEpoch;
1341 LocInfo.StackEpoch = StackEpoch;
Daniel Berlin4b4c7222016-08-08 04:44:53 +00001342 // If the lower bound was in something that no longer dominates us, we
1343 // have to reset it.
1344 // We can't simply track stack size, because the stack may have had
1345 // pushes/pops in the meantime.
1346 // XXX: This is non-optimal, but only is slower cases with heavily
1347 // branching dominator trees. To get the optimal number of queries would
1348 // be to make lowerbound and lastkill a per-loc stack, and pop it until
1349 // the top of that stack dominates us. This does not seem worth it ATM.
1350 // A much cheaper optimization would be to always explore the deepest
1351 // branch of the dominator tree first. This will guarantee this resets on
1352 // the smallest set of blocks.
1353 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
Daniel Berlin1e98c042016-09-26 17:22:54 +00001354 !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001355 // Reset the lower bound of things to check.
1356 // TODO: Some day we should be able to reset to last kill, rather than
1357 // 0.
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001358 LocInfo.LowerBound = 0;
Daniel Berlin4b4c7222016-08-08 04:44:53 +00001359 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001360 LocInfo.LastKillValid = false;
1361 }
1362 } else if (LocInfo.StackEpoch != StackEpoch) {
1363 // If all that has changed is the StackEpoch, we only have to check the
1364 // new things on the stack, because we've checked everything before. In
1365 // this case, the lower bound of things to check remains the same.
1366 LocInfo.PopEpoch = PopEpoch;
1367 LocInfo.StackEpoch = StackEpoch;
1368 }
1369 if (!LocInfo.LastKillValid) {
1370 LocInfo.LastKill = VersionStack.size() - 1;
1371 LocInfo.LastKillValid = true;
Alina Sbirlead90c9f42018-03-08 18:03:14 +00001372 LocInfo.AR = MayAlias;
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001373 }
1374
1375 // At this point, we should have corrected last kill and LowerBound to be
1376 // in bounds.
1377 assert(LocInfo.LowerBound < VersionStack.size() &&
1378 "Lower bound out of range");
1379 assert(LocInfo.LastKill < VersionStack.size() &&
1380 "Last kill info out of range");
1381 // In any case, the new upper bound is the top of the stack.
1382 unsigned long UpperBound = VersionStack.size() - 1;
1383
1384 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001385 LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1386 << *(MU->getMemoryInst()) << ")"
1387 << " because there are "
1388 << UpperBound - LocInfo.LowerBound
1389 << " stores to disambiguate\n");
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001390 // Because we did not walk, LastKill is no longer valid, as this may
1391 // have been a kill.
1392 LocInfo.LastKillValid = false;
1393 continue;
1394 }
1395 bool FoundClobberResult = false;
Alina Sbirleaf085cc52019-03-29 21:56:09 +00001396 unsigned UpwardWalkLimit = MaxCheckLimit;
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001397 while (UpperBound > LocInfo.LowerBound) {
1398 if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1399 // For phis, use the walker, see where we ended up, go there
Alina Sbirleaf085cc52019-03-29 21:56:09 +00001400 MemoryAccess *Result =
1401 Walker->getClobberingMemoryAccess(MU, UpwardWalkLimit);
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001402 // We are guaranteed to find it or something is wrong
1403 while (VersionStack[UpperBound] != Result) {
1404 assert(UpperBound != 0);
1405 --UpperBound;
1406 }
1407 FoundClobberResult = true;
1408 break;
1409 }
1410
1411 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
Daniel Berlindf101192016-08-03 00:01:46 +00001412 // If the lifetime of the pointer ends at this instruction, it's live on
1413 // entry.
1414 if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1415 // Reset UpperBound to liveOnEntryDef's place in the stack
1416 UpperBound = 0;
1417 FoundClobberResult = true;
Alina Sbirlead90c9f42018-03-08 18:03:14 +00001418 LocInfo.AR = MustAlias;
Daniel Berlindf101192016-08-03 00:01:46 +00001419 break;
1420 }
Alina Sbirlead90c9f42018-03-08 18:03:14 +00001421 ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1422 if (CA.IsClobber) {
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001423 FoundClobberResult = true;
Alina Sbirlead90c9f42018-03-08 18:03:14 +00001424 LocInfo.AR = CA.AR;
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001425 break;
1426 }
1427 --UpperBound;
1428 }
Alina Sbirlead90c9f42018-03-08 18:03:14 +00001429
1430 // Note: Phis always have AliasResult AR set to MayAlias ATM.
1431
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001432 // At the end of this loop, UpperBound is either a clobber, or lower bound
1433 // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1434 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001435 // We were last killed now by where we got to
Alina Sbirlead90c9f42018-03-08 18:03:14 +00001436 if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1437 LocInfo.AR = None;
1438 MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001439 LocInfo.LastKill = UpperBound;
1440 } else {
1441 // Otherwise, we checked all the new ones, and now we know we can get to
1442 // LastKill.
Alina Sbirlead90c9f42018-03-08 18:03:14 +00001443 MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001444 }
1445 LocInfo.LowerBound = VersionStack.size() - 1;
Daniel Berlin4b4c7222016-08-08 04:44:53 +00001446 LocInfo.LowerBoundBlock = BB;
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001447 }
1448}
1449
1450/// Optimize uses to point to their actual clobbering definitions.
1451void MemorySSA::OptimizeUses::optimizeUses() {
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001452 SmallVector<MemoryAccess *, 16> VersionStack;
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001453 DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001454 VersionStack.push_back(MSSA->getLiveOnEntryDef());
1455
1456 unsigned long StackEpoch = 1;
1457 unsigned long PopEpoch = 1;
Piotr Padlewskicc5868c12017-02-18 20:34:36 +00001458 // We perform a non-recursive top-down dominator tree walk.
Daniel Berlin7ac3d742016-08-05 22:09:14 +00001459 for (const auto *DomNode : depth_first(DT->getRootNode()))
1460 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1461 LocStackInfo);
Daniel Berlinc43aa5a2016-08-02 16:24:03 +00001462}
1463
Daniel Berlin3d512a22016-08-22 19:14:30 +00001464void MemorySSA::placePHINodes(
Michael Zolotukhin67cfbaa2018-05-15 18:40:29 +00001465 const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
Daniel Berlin3d512a22016-08-22 19:14:30 +00001466 // Determine where our MemoryPhi's should go
1467 ForwardIDFCalculator IDFs(*DT);
1468 IDFs.setDefiningBlocks(DefiningBlocks);
Daniel Berlin3d512a22016-08-22 19:14:30 +00001469 SmallVector<BasicBlock *, 32> IDFBlocks;
1470 IDFs.calculate(IDFBlocks);
1471
1472 // Now place MemoryPhi nodes.
Daniel Berlind602e042017-01-25 20:56:19 +00001473 for (auto &BB : IDFBlocks)
1474 createMemoryPhi(BB);
Daniel Berlin3d512a22016-08-22 19:14:30 +00001475}
1476
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001477void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
George Burgess IVe1100f52016-02-02 22:46:49 +00001478 // We create an access to represent "live on entry", for things like
1479 // arguments or users of globals, where the memory they use is defined before
1480 // the beginning of the function. We do not actually insert it into the IR.
1481 // We do not define a live on exit for the immediate uses, and thus our
1482 // semantics do *not* imply that something with no immediate uses can simply
1483 // be removed.
1484 BasicBlock &StartingPoint = F.getEntryBlock();
George Burgess IV612cf212018-02-27 06:43:19 +00001485 LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1486 &StartingPoint, NextID++));
George Burgess IVe1100f52016-02-02 22:46:49 +00001487
1488 // We maintain lists of memory accesses per-block, trading memory for time. We
1489 // could just look up the memory access for every possible instruction in the
1490 // stream.
1491 SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
George Burgess IVe1100f52016-02-02 22:46:49 +00001492 // Go through each block, figure out where defs occur, and chain together all
1493 // the accesses.
1494 for (BasicBlock &B : F) {
Daniel Berlin7898ca62016-02-07 01:52:15 +00001495 bool InsertIntoDef = false;
Daniel Berlinada263d2016-06-20 20:21:33 +00001496 AccessList *Accesses = nullptr;
Daniel Berlind602e042017-01-25 20:56:19 +00001497 DefsList *Defs = nullptr;
George Burgess IVe1100f52016-02-02 22:46:49 +00001498 for (Instruction &I : B) {
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001499 MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
George Burgess IVb42b7622016-03-11 19:34:03 +00001500 if (!MUD)
George Burgess IVe1100f52016-02-02 22:46:49 +00001501 continue;
Daniel Berlin1b51a292016-02-07 01:52:19 +00001502
George Burgess IVe1100f52016-02-02 22:46:49 +00001503 if (!Accesses)
1504 Accesses = getOrCreateAccessList(&B);
George Burgess IVb42b7622016-03-11 19:34:03 +00001505 Accesses->push_back(MUD);
Daniel Berlind602e042017-01-25 20:56:19 +00001506 if (isa<MemoryDef>(MUD)) {
1507 InsertIntoDef = true;
1508 if (!Defs)
1509 Defs = getOrCreateDefsList(&B);
1510 Defs->push_back(*MUD);
1511 }
George Burgess IVe1100f52016-02-02 22:46:49 +00001512 }
Daniel Berlin7898ca62016-02-07 01:52:15 +00001513 if (InsertIntoDef)
1514 DefiningBlocks.insert(&B);
Daniel Berlin1b51a292016-02-07 01:52:19 +00001515 }
Michael Zolotukhin67cfbaa2018-05-15 18:40:29 +00001516 placePHINodes(DefiningBlocks);
George Burgess IVe1100f52016-02-02 22:46:49 +00001517
1518 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1519 // filled in with all blocks.
1520 SmallPtrSet<BasicBlock *, 16> Visited;
1521 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1522
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001523 ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT);
1524 CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
1525 OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses();
George Burgess IV5f308972016-07-19 01:29:15 +00001526
George Burgess IVe1100f52016-02-02 22:46:49 +00001527 // Mark the uses in unreachable blocks as live on entry, so that they go
1528 // somewhere.
1529 for (auto &BB : F)
1530 if (!Visited.count(&BB))
1531 markUnreachableAsLiveOnEntry(&BB);
Daniel Berlin16ed57c2016-06-27 18:22:27 +00001532}
George Burgess IVe1100f52016-02-02 22:46:49 +00001533
George Burgess IV5f308972016-07-19 01:29:15 +00001534MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1535
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001536MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
Daniel Berlin16ed57c2016-06-27 18:22:27 +00001537 if (Walker)
1538 return Walker.get();
1539
Alina Sbirleabc8aa242019-01-07 19:22:37 +00001540 if (!WalkerBase)
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001541 WalkerBase =
1542 llvm::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
Alina Sbirleabc8aa242019-01-07 19:22:37 +00001543
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001544 Walker =
1545 llvm::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
Geoff Berryb96d3b22016-06-01 21:30:40 +00001546 return Walker.get();
George Burgess IVe1100f52016-02-02 22:46:49 +00001547}
1548
Alina Sbirlea12bbb4f2019-01-07 19:38:47 +00001549MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
1550 if (SkipWalker)
1551 return SkipWalker.get();
1552
1553 if (!WalkerBase)
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001554 WalkerBase =
1555 llvm::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
Alina Sbirlea12bbb4f2019-01-07 19:38:47 +00001556
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001557 SkipWalker =
1558 llvm::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
Alina Sbirlea12bbb4f2019-01-07 19:38:47 +00001559 return SkipWalker.get();
1560 }
1561
1562
Daniel Berlind602e042017-01-25 20:56:19 +00001563// This is a helper function used by the creation routines. It places NewAccess
1564// into the access and defs lists for a given basic block, at the given
1565// insertion point.
1566void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1567 const BasicBlock *BB,
1568 InsertionPlace Point) {
1569 auto *Accesses = getOrCreateAccessList(BB);
1570 if (Point == Beginning) {
1571 // If it's a phi node, it goes first, otherwise, it goes after any phi
1572 // nodes.
1573 if (isa<MemoryPhi>(NewAccess)) {
1574 Accesses->push_front(NewAccess);
1575 auto *Defs = getOrCreateDefsList(BB);
1576 Defs->push_front(*NewAccess);
1577 } else {
1578 auto AI = find_if_not(
1579 *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1580 Accesses->insert(AI, NewAccess);
1581 if (!isa<MemoryUse>(NewAccess)) {
1582 auto *Defs = getOrCreateDefsList(BB);
1583 auto DI = find_if_not(
1584 *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1585 Defs->insert(DI, *NewAccess);
1586 }
1587 }
1588 } else {
1589 Accesses->push_back(NewAccess);
1590 if (!isa<MemoryUse>(NewAccess)) {
1591 auto *Defs = getOrCreateDefsList(BB);
1592 Defs->push_back(*NewAccess);
1593 }
1594 }
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001595 BlockNumberingValid.erase(BB);
Daniel Berlind602e042017-01-25 20:56:19 +00001596}
1597
1598void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1599 AccessList::iterator InsertPt) {
1600 auto *Accesses = getWritableBlockAccesses(BB);
1601 bool WasEnd = InsertPt == Accesses->end();
1602 Accesses->insert(AccessList::iterator(InsertPt), What);
1603 if (!isa<MemoryUse>(What)) {
1604 auto *Defs = getOrCreateDefsList(BB);
1605 // If we got asked to insert at the end, we have an easy job, just shove it
1606 // at the end. If we got asked to insert before an existing def, we also get
Zhaoshi Zhenga5531f22018-04-04 21:08:11 +00001607 // an iterator. If we got asked to insert before a use, we have to hunt for
Daniel Berlind602e042017-01-25 20:56:19 +00001608 // the next def.
1609 if (WasEnd) {
1610 Defs->push_back(*What);
1611 } else if (isa<MemoryDef>(InsertPt)) {
1612 Defs->insert(InsertPt->getDefsIterator(), *What);
1613 } else {
1614 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1615 ++InsertPt;
1616 // Either we found a def, or we are inserting at the end
1617 if (InsertPt == Accesses->end())
1618 Defs->push_back(*What);
1619 else
1620 Defs->insert(InsertPt->getDefsIterator(), *What);
1621 }
1622 }
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001623 BlockNumberingValid.erase(BB);
Daniel Berlind602e042017-01-25 20:56:19 +00001624}
1625
George Burgess IV5676a5d2018-08-22 22:34:38 +00001626void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1627 // Keep it in the lookup tables, remove from the lists
1628 removeFromLists(What, false);
1629
1630 // Note that moving should implicitly invalidate the optimized state of a
1631 // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1632 // MemoryDef.
1633 if (auto *MD = dyn_cast<MemoryDef>(What))
1634 MD->resetOptimized();
1635 What->setBlock(BB);
1636}
1637
Zhaoshi Zhenga5531f22018-04-04 21:08:11 +00001638// Move What before Where in the IR. The end result is that What will belong to
Daniel Berlin60ead052017-01-28 01:23:13 +00001639// the right lists and have the right Block set, but will not otherwise be
1640// correct. It will not have the right defining access, and if it is a def,
1641// things below it will not properly be updated.
1642void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1643 AccessList::iterator Where) {
George Burgess IV5676a5d2018-08-22 22:34:38 +00001644 prepareForMoveTo(What, BB);
Daniel Berlin60ead052017-01-28 01:23:13 +00001645 insertIntoListsBefore(What, BB, Where);
1646}
1647
Alina Sbirlea0f533552018-07-11 22:11:46 +00001648void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001649 InsertionPlace Point) {
Alina Sbirlea0f533552018-07-11 22:11:46 +00001650 if (isa<MemoryPhi>(What)) {
1651 assert(Point == Beginning &&
1652 "Can only move a Phi at the beginning of the block");
1653 // Update lookup table entry
1654 ValueToMemoryAccess.erase(What->getBlock());
1655 bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1656 (void)Inserted;
1657 assert(Inserted && "Cannot move a Phi to a block that already has one");
1658 }
1659
George Burgess IV5676a5d2018-08-22 22:34:38 +00001660 prepareForMoveTo(What, BB);
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001661 insertIntoListsForBlock(What, BB, Point);
1662}
1663
Daniel Berlin14300262016-06-21 18:39:20 +00001664MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1665 assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
Daniel Berlin14300262016-06-21 18:39:20 +00001666 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001667 // Phi's always are placed at the front of the block.
Daniel Berlind602e042017-01-25 20:56:19 +00001668 insertIntoListsForBlock(Phi, BB, Beginning);
Daniel Berlin5130cc82016-07-31 21:08:20 +00001669 ValueToMemoryAccess[BB] = Phi;
Daniel Berlin14300262016-06-21 18:39:20 +00001670 return Phi;
1671}
1672
1673MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
Alina Sbirlea79800992018-09-10 20:13:01 +00001674 MemoryAccess *Definition,
1675 const MemoryUseOrDef *Template) {
Daniel Berlin14300262016-06-21 18:39:20 +00001676 assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001677 MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
Daniel Berlin14300262016-06-21 18:39:20 +00001678 assert(
1679 NewAccess != nullptr &&
1680 "Tried to create a memory access for a non-memory touching instruction");
1681 NewAccess->setDefiningAccess(Definition);
1682 return NewAccess;
1683}
1684
Daniel Berlind952cea2017-04-07 01:28:36 +00001685// Return true if the instruction has ordering constraints.
1686// Note specifically that this only considers stores and loads
1687// because others are still considered ModRef by getModRefInfo.
1688static inline bool isOrdered(const Instruction *I) {
1689 if (auto *SI = dyn_cast<StoreInst>(I)) {
1690 if (!SI->isUnordered())
1691 return true;
1692 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1693 if (!LI->isUnordered())
1694 return true;
1695 }
1696 return false;
1697}
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +00001698
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00001699/// Helper function to create new memory accesses
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001700template <typename AliasAnalysisType>
Alina Sbirlea79800992018-09-10 20:13:01 +00001701MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001702 AliasAnalysisType *AAP,
Alina Sbirlea79800992018-09-10 20:13:01 +00001703 const MemoryUseOrDef *Template) {
Peter Collingbourneb9aa1f42016-05-26 04:58:46 +00001704 // The assume intrinsic has a control dependency which we model by claiming
1705 // that it writes arbitrarily. Ignore that fake memory dependency here.
1706 // FIXME: Replace this special casing with a more accurate modelling of
1707 // assume's control dependency.
1708 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1709 if (II->getIntrinsicID() == Intrinsic::assume)
1710 return nullptr;
1711
Alina Sbirlea79800992018-09-10 20:13:01 +00001712 bool Def, Use;
1713 if (Template) {
1714 Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr;
1715 Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr;
1716#if !defined(NDEBUG)
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001717 ModRefInfo ModRef = AAP->getModRefInfo(I, None);
Alina Sbirlea79800992018-09-10 20:13:01 +00001718 bool DefCheck, UseCheck;
1719 DefCheck = isModSet(ModRef) || isOrdered(I);
1720 UseCheck = isRefSet(ModRef);
1721 assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
1722#endif
1723 } else {
1724 // Find out what affect this instruction has on memory.
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001725 ModRefInfo ModRef = AAP->getModRefInfo(I, None);
Alina Sbirlea79800992018-09-10 20:13:01 +00001726 // The isOrdered check is used to ensure that volatiles end up as defs
1727 // (atomics end up as ModRef right now anyway). Until we separate the
1728 // ordering chain from the memory chain, this enables people to see at least
1729 // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1730 // will still give an answer that bypasses other volatile loads. TODO:
1731 // Separate memory aliasing and ordering into two different chains so that
1732 // we can precisely represent both "what memory will this read/write/is
1733 // clobbered by" and "what instructions can I move this past".
1734 Def = isModSet(ModRef) || isOrdered(I);
1735 Use = isRefSet(ModRef);
1736 }
George Burgess IVe1100f52016-02-02 22:46:49 +00001737
1738 // It's possible for an instruction to not modify memory at all. During
1739 // construction, we ignore them.
Peter Collingbourneffecb142016-05-26 01:19:17 +00001740 if (!Def && !Use)
George Burgess IVe1100f52016-02-02 22:46:49 +00001741 return nullptr;
1742
George Burgess IVb42b7622016-03-11 19:34:03 +00001743 MemoryUseOrDef *MUD;
George Burgess IVe1100f52016-02-02 22:46:49 +00001744 if (Def)
George Burgess IVb42b7622016-03-11 19:34:03 +00001745 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
George Burgess IVe1100f52016-02-02 22:46:49 +00001746 else
George Burgess IVb42b7622016-03-11 19:34:03 +00001747 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
Daniel Berlin5130cc82016-07-31 21:08:20 +00001748 ValueToMemoryAccess[I] = MUD;
George Burgess IVb42b7622016-03-11 19:34:03 +00001749 return MUD;
George Burgess IVe1100f52016-02-02 22:46:49 +00001750}
1751
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00001752/// Returns true if \p Replacer dominates \p Replacee .
George Burgess IVe1100f52016-02-02 22:46:49 +00001753bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1754 const MemoryAccess *Replacee) const {
1755 if (isa<MemoryUseOrDef>(Replacee))
1756 return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1757 const auto *MP = cast<MemoryPhi>(Replacee);
1758 // For a phi node, the use occurs in the predecessor block of the phi node.
1759 // Since we may occur multiple times in the phi node, we have to check each
1760 // operand to ensure Replacer dominates each operand where Replacee occurs.
1761 for (const Use &Arg : MP->operands()) {
George Burgess IVb5a229f2016-02-02 23:15:26 +00001762 if (Arg.get() != Replacee &&
George Burgess IVe1100f52016-02-02 22:46:49 +00001763 !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1764 return false;
1765 }
1766 return true;
1767}
1768
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00001769/// Properly remove \p MA from all of MemorySSA's lookup tables.
Daniel Berlin83fc77b2016-03-01 18:46:54 +00001770void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1771 assert(MA->use_empty() &&
1772 "Trying to remove memory access that still has uses");
Daniel Berlin5c46b942016-07-19 22:49:43 +00001773 BlockNumbering.erase(MA);
George Burgess IV2cbf9732018-06-22 22:34:07 +00001774 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
Daniel Berlin83fc77b2016-03-01 18:46:54 +00001775 MUD->setDefiningAccess(nullptr);
1776 // Invalidate our walker's cache if necessary
1777 if (!isa<MemoryUse>(MA))
Alina Sbirleabfc779e2019-03-22 17:22:19 +00001778 getWalker()->invalidateInfo(MA);
George Burgess IV2cbf9732018-06-22 22:34:07 +00001779
Daniel Berlin83fc77b2016-03-01 18:46:54 +00001780 Value *MemoryInst;
George Burgess IV2cbf9732018-06-22 22:34:07 +00001781 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
Daniel Berlin83fc77b2016-03-01 18:46:54 +00001782 MemoryInst = MUD->getMemoryInst();
George Burgess IV2cbf9732018-06-22 22:34:07 +00001783 else
Daniel Berlin83fc77b2016-03-01 18:46:54 +00001784 MemoryInst = MA->getBlock();
George Burgess IV2cbf9732018-06-22 22:34:07 +00001785
Daniel Berlin5130cc82016-07-31 21:08:20 +00001786 auto VMA = ValueToMemoryAccess.find(MemoryInst);
1787 if (VMA->second == MA)
1788 ValueToMemoryAccess.erase(VMA);
Daniel Berlin60ead052017-01-28 01:23:13 +00001789}
Daniel Berlin83fc77b2016-03-01 18:46:54 +00001790
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00001791/// Properly remove \p MA from all of MemorySSA's lists.
Daniel Berlin60ead052017-01-28 01:23:13 +00001792///
1793/// Because of the way the intrusive list and use lists work, it is important to
1794/// do removal in the right order.
1795/// ShouldDelete defaults to true, and will cause the memory access to also be
1796/// deleted, not just removed.
1797void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001798 BasicBlock *BB = MA->getBlock();
Daniel Berlind602e042017-01-25 20:56:19 +00001799 // The access list owns the reference, so we erase it from the non-owning list
1800 // first.
1801 if (!isa<MemoryUse>(MA)) {
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001802 auto DefsIt = PerBlockDefs.find(BB);
Daniel Berlind602e042017-01-25 20:56:19 +00001803 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1804 Defs->remove(*MA);
1805 if (Defs->empty())
1806 PerBlockDefs.erase(DefsIt);
1807 }
1808
Daniel Berlin60ead052017-01-28 01:23:13 +00001809 // The erase call here will delete it. If we don't want it deleted, we call
1810 // remove instead.
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001811 auto AccessIt = PerBlockAccesses.find(BB);
Daniel Berlinada263d2016-06-20 20:21:33 +00001812 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
Daniel Berlin60ead052017-01-28 01:23:13 +00001813 if (ShouldDelete)
1814 Accesses->erase(MA);
1815 else
1816 Accesses->remove(MA);
1817
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001818 if (Accesses->empty()) {
George Burgess IVe0e6e482016-03-02 02:35:04 +00001819 PerBlockAccesses.erase(AccessIt);
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001820 BlockNumberingValid.erase(BB);
1821 }
Daniel Berlin83fc77b2016-03-01 18:46:54 +00001822}
1823
George Burgess IVe1100f52016-02-02 22:46:49 +00001824void MemorySSA::print(raw_ostream &OS) const {
1825 MemorySSAAnnotatedWriter Writer(this);
1826 F.print(OS, &Writer);
1827}
1828
Aaron Ballman615eb472017-10-15 14:32:27 +00001829#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
Daniel Berlin78cbd282017-02-20 22:26:03 +00001830LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
Matthias Braun8c209aa2017-01-28 02:02:38 +00001831#endif
George Burgess IVe1100f52016-02-02 22:46:49 +00001832
Daniel Berlin932b4cb2016-02-10 17:39:43 +00001833void MemorySSA::verifyMemorySSA() const {
1834 verifyDefUses(F);
1835 verifyDomination(F);
Daniel Berlin14300262016-06-21 18:39:20 +00001836 verifyOrdering(F);
George Burgess IV97ec6242018-06-25 05:30:36 +00001837 verifyDominationNumbers(F);
Alina Sbirlead77edc02019-02-11 19:51:21 +00001838 // Previously, the verification used to also verify that the clobberingAccess
1839 // cached by MemorySSA is the same as the clobberingAccess found at a later
1840 // query to AA. This does not hold true in general due to the current fragility
1841 // of BasicAA which has arbitrary caps on the things it analyzes before giving
1842 // up. As a result, transformations that are correct, will lead to BasicAA
1843 // returning different Alias answers before and after that transformation.
1844 // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1845 // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1846 // every transformation, which defeats the purpose of using it. For such an
1847 // example, see test4 added in D51960.
Daniel Berlin14300262016-06-21 18:39:20 +00001848}
1849
George Burgess IV97ec6242018-06-25 05:30:36 +00001850/// Verify that all of the blocks we believe to have valid domination numbers
1851/// actually have valid domination numbers.
1852void MemorySSA::verifyDominationNumbers(const Function &F) const {
1853#ifndef NDEBUG
1854 if (BlockNumberingValid.empty())
1855 return;
1856
1857 SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1858 for (const BasicBlock &BB : F) {
1859 if (!ValidBlocks.count(&BB))
1860 continue;
1861
1862 ValidBlocks.erase(&BB);
1863
1864 const AccessList *Accesses = getBlockAccesses(&BB);
1865 // It's correct to say an empty block has valid numbering.
1866 if (!Accesses)
1867 continue;
1868
1869 // Block numbering starts at 1.
1870 unsigned long LastNumber = 0;
1871 for (const MemoryAccess &MA : *Accesses) {
1872 auto ThisNumberIter = BlockNumbering.find(&MA);
1873 assert(ThisNumberIter != BlockNumbering.end() &&
1874 "MemoryAccess has no domination number in a valid block!");
1875
1876 unsigned long ThisNumber = ThisNumberIter->second;
1877 assert(ThisNumber > LastNumber &&
1878 "Domination numbers should be strictly increasing!");
1879 LastNumber = ThisNumber;
1880 }
1881 }
1882
1883 assert(ValidBlocks.empty() &&
1884 "All valid BasicBlocks should exist in F -- dangling pointers?");
1885#endif
1886}
1887
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00001888/// Verify that the order and existence of MemoryAccesses matches the
Daniel Berlin14300262016-06-21 18:39:20 +00001889/// order and existence of memory affecting instructions.
1890void MemorySSA::verifyOrdering(Function &F) const {
George Burgess IV6a9aa022018-08-28 00:32:32 +00001891#ifndef NDEBUG
Daniel Berlin14300262016-06-21 18:39:20 +00001892 // Walk all the blocks, comparing what the lookups think and what the access
1893 // lists think, as well as the order in the blocks vs the order in the access
1894 // lists.
1895 SmallVector<MemoryAccess *, 32> ActualAccesses;
Daniel Berlind602e042017-01-25 20:56:19 +00001896 SmallVector<MemoryAccess *, 32> ActualDefs;
Daniel Berlin14300262016-06-21 18:39:20 +00001897 for (BasicBlock &B : F) {
1898 const AccessList *AL = getBlockAccesses(&B);
Daniel Berlind602e042017-01-25 20:56:19 +00001899 const auto *DL = getBlockDefs(&B);
Daniel Berlin14300262016-06-21 18:39:20 +00001900 MemoryAccess *Phi = getMemoryAccess(&B);
Daniel Berlind602e042017-01-25 20:56:19 +00001901 if (Phi) {
Daniel Berlin14300262016-06-21 18:39:20 +00001902 ActualAccesses.push_back(Phi);
Daniel Berlind602e042017-01-25 20:56:19 +00001903 ActualDefs.push_back(Phi);
1904 }
1905
Daniel Berlin14300262016-06-21 18:39:20 +00001906 for (Instruction &I : B) {
1907 MemoryAccess *MA = getMemoryAccess(&I);
Daniel Berlind602e042017-01-25 20:56:19 +00001908 assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1909 "We have memory affecting instructions "
1910 "in this block but they are not in the "
1911 "access list or defs list");
1912 if (MA) {
Daniel Berlin14300262016-06-21 18:39:20 +00001913 ActualAccesses.push_back(MA);
Daniel Berlind602e042017-01-25 20:56:19 +00001914 if (isa<MemoryDef>(MA))
1915 ActualDefs.push_back(MA);
1916 }
Daniel Berlin14300262016-06-21 18:39:20 +00001917 }
1918 // Either we hit the assert, really have no accesses, or we have both
Daniel Berlind602e042017-01-25 20:56:19 +00001919 // accesses and an access list.
1920 // Same with defs.
1921 if (!AL && !DL)
Daniel Berlin14300262016-06-21 18:39:20 +00001922 continue;
1923 assert(AL->size() == ActualAccesses.size() &&
1924 "We don't have the same number of accesses in the block as on the "
1925 "access list");
Davide Italiano6c77de02017-01-30 03:16:43 +00001926 assert((DL || ActualDefs.size() == 0) &&
1927 "Either we should have a defs list, or we should have no defs");
Daniel Berlind602e042017-01-25 20:56:19 +00001928 assert((!DL || DL->size() == ActualDefs.size()) &&
1929 "We don't have the same number of defs in the block as on the "
1930 "def list");
Daniel Berlin14300262016-06-21 18:39:20 +00001931 auto ALI = AL->begin();
1932 auto AAI = ActualAccesses.begin();
1933 while (ALI != AL->end() && AAI != ActualAccesses.end()) {
1934 assert(&*ALI == *AAI && "Not the same accesses in the same order");
1935 ++ALI;
1936 ++AAI;
1937 }
1938 ActualAccesses.clear();
Daniel Berlind602e042017-01-25 20:56:19 +00001939 if (DL) {
1940 auto DLI = DL->begin();
1941 auto ADI = ActualDefs.begin();
1942 while (DLI != DL->end() && ADI != ActualDefs.end()) {
1943 assert(&*DLI == *ADI && "Not the same defs in the same order");
1944 ++DLI;
1945 ++ADI;
1946 }
1947 }
1948 ActualDefs.clear();
Daniel Berlin14300262016-06-21 18:39:20 +00001949 }
George Burgess IV6a9aa022018-08-28 00:32:32 +00001950#endif
Daniel Berlin932b4cb2016-02-10 17:39:43 +00001951}
1952
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00001953/// Verify the domination properties of MemorySSA by checking that each
George Burgess IVe1100f52016-02-02 22:46:49 +00001954/// definition dominates all of its uses.
Daniel Berlin932b4cb2016-02-10 17:39:43 +00001955void MemorySSA::verifyDomination(Function &F) const {
Daniel Berlin7af95872016-08-05 21:47:20 +00001956#ifndef NDEBUG
George Burgess IVe1100f52016-02-02 22:46:49 +00001957 for (BasicBlock &B : F) {
1958 // Phi nodes are attached to basic blocks
Daniel Berlin2919b1c2016-08-05 21:46:52 +00001959 if (MemoryPhi *MP = getMemoryAccess(&B))
1960 for (const Use &U : MP->uses())
1961 assert(dominates(MP, U) && "Memory PHI does not dominate it's uses");
Daniel Berlin7af95872016-08-05 21:47:20 +00001962
George Burgess IVe1100f52016-02-02 22:46:49 +00001963 for (Instruction &I : B) {
1964 MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
1965 if (!MD)
1966 continue;
1967
Daniel Berlin2919b1c2016-08-05 21:46:52 +00001968 for (const Use &U : MD->uses())
1969 assert(dominates(MD, U) && "Memory Def does not dominate it's uses");
George Burgess IVe1100f52016-02-02 22:46:49 +00001970 }
1971 }
Daniel Berlin7af95872016-08-05 21:47:20 +00001972#endif
George Burgess IVe1100f52016-02-02 22:46:49 +00001973}
1974
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00001975/// Verify the def-use lists in MemorySSA, by verifying that \p Use
George Burgess IVe1100f52016-02-02 22:46:49 +00001976/// appears in the use list of \p Def.
Daniel Berlin932b4cb2016-02-10 17:39:43 +00001977void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
Daniel Berlin7af95872016-08-05 21:47:20 +00001978#ifndef NDEBUG
George Burgess IVe1100f52016-02-02 22:46:49 +00001979 // The live on entry use may cause us to get a NULL def here
Daniel Berlin7af95872016-08-05 21:47:20 +00001980 if (!Def)
1981 assert(isLiveOnEntryDef(Use) &&
1982 "Null def but use not point to live on entry def");
1983 else
Daniel Berlinda2f38e2016-08-11 21:26:50 +00001984 assert(is_contained(Def->users(), Use) &&
Daniel Berlin7af95872016-08-05 21:47:20 +00001985 "Did not find use in def's use list");
1986#endif
George Burgess IVe1100f52016-02-02 22:46:49 +00001987}
1988
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00001989/// Verify the immediate use information, by walking all the memory
George Burgess IVe1100f52016-02-02 22:46:49 +00001990/// accesses and verifying that, for each use, it appears in the
1991/// appropriate def's use list
Daniel Berlin932b4cb2016-02-10 17:39:43 +00001992void MemorySSA::verifyDefUses(Function &F) const {
George Burgess IV6a9aa022018-08-28 00:32:32 +00001993#ifndef NDEBUG
George Burgess IVe1100f52016-02-02 22:46:49 +00001994 for (BasicBlock &B : F) {
1995 // Phi nodes are attached to basic blocks
Daniel Berlin14300262016-06-21 18:39:20 +00001996 if (MemoryPhi *Phi = getMemoryAccess(&B)) {
David Majnemer580e7542016-06-25 00:04:06 +00001997 assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
1998 pred_begin(&B), pred_end(&B))) &&
Daniel Berlin14300262016-06-21 18:39:20 +00001999 "Incomplete MemoryPhi Node");
Alina Sbirlea201d02c2018-06-20 21:06:13 +00002000 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
George Burgess IVe1100f52016-02-02 22:46:49 +00002001 verifyUseInDefs(Phi->getIncomingValue(I), Phi);
Alina Sbirlea201d02c2018-06-20 21:06:13 +00002002 assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=
2003 pred_end(&B) &&
2004 "Incoming phi block not a block predecessor");
2005 }
Daniel Berlin14300262016-06-21 18:39:20 +00002006 }
George Burgess IVe1100f52016-02-02 22:46:49 +00002007
2008 for (Instruction &I : B) {
George Burgess IV66837ab2016-11-01 21:17:46 +00002009 if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
2010 verifyUseInDefs(MA->getDefiningAccess(), MA);
George Burgess IVe1100f52016-02-02 22:46:49 +00002011 }
2012 }
2013 }
George Burgess IV6a9aa022018-08-28 00:32:32 +00002014#endif
George Burgess IVe1100f52016-02-02 22:46:49 +00002015}
2016
Daniel Berlin5c46b942016-07-19 22:49:43 +00002017/// Perform a local numbering on blocks so that instruction ordering can be
2018/// determined in constant time.
2019/// TODO: We currently just number in order. If we numbered by N, we could
2020/// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2021/// log2(N) sequences of mixed before and after) without needing to invalidate
2022/// the numbering.
2023void MemorySSA::renumberBlock(const BasicBlock *B) const {
2024 // The pre-increment ensures the numbers really start at 1.
2025 unsigned long CurrentNumber = 0;
2026 const AccessList *AL = getBlockAccesses(B);
2027 assert(AL != nullptr && "Asking to renumber an empty block");
2028 for (const auto &I : *AL)
2029 BlockNumbering[&I] = ++CurrentNumber;
2030 BlockNumberingValid.insert(B);
2031}
2032
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00002033/// Determine, for two memory accesses in the same block,
George Burgess IVe1100f52016-02-02 22:46:49 +00002034/// whether \p Dominator dominates \p Dominatee.
2035/// \returns True if \p Dominator dominates \p Dominatee.
2036bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
2037 const MemoryAccess *Dominatee) const {
Daniel Berlin5c46b942016-07-19 22:49:43 +00002038 const BasicBlock *DominatorBlock = Dominator->getBlock();
Daniel Berlin5c46b942016-07-19 22:49:43 +00002039
Daniel Berlin19860302016-07-19 23:08:08 +00002040 assert((DominatorBlock == Dominatee->getBlock()) &&
Daniel Berlin5c46b942016-07-19 22:49:43 +00002041 "Asking for local domination when accesses are in different blocks!");
Sebastian Pope1f60b12016-06-10 21:36:41 +00002042 // A node dominates itself.
2043 if (Dominatee == Dominator)
2044 return true;
2045
2046 // When Dominatee is defined on function entry, it is not dominated by another
2047 // memory access.
2048 if (isLiveOnEntryDef(Dominatee))
2049 return false;
2050
2051 // When Dominator is defined on function entry, it dominates the other memory
2052 // access.
2053 if (isLiveOnEntryDef(Dominator))
2054 return true;
2055
Daniel Berlin5c46b942016-07-19 22:49:43 +00002056 if (!BlockNumberingValid.count(DominatorBlock))
2057 renumberBlock(DominatorBlock);
George Burgess IVe1100f52016-02-02 22:46:49 +00002058
Daniel Berlin5c46b942016-07-19 22:49:43 +00002059 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2060 // All numbers start with 1
2061 assert(DominatorNum != 0 && "Block was not numbered properly");
2062 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2063 assert(DominateeNum != 0 && "Block was not numbered properly");
2064 return DominatorNum < DominateeNum;
George Burgess IVe1100f52016-02-02 22:46:49 +00002065}
2066
George Burgess IV5f308972016-07-19 01:29:15 +00002067bool MemorySSA::dominates(const MemoryAccess *Dominator,
2068 const MemoryAccess *Dominatee) const {
2069 if (Dominator == Dominatee)
2070 return true;
2071
2072 if (isLiveOnEntryDef(Dominatee))
2073 return false;
2074
2075 if (Dominator->getBlock() != Dominatee->getBlock())
2076 return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2077 return locallyDominates(Dominator, Dominatee);
2078}
2079
Daniel Berlin2919b1c2016-08-05 21:46:52 +00002080bool MemorySSA::dominates(const MemoryAccess *Dominator,
2081 const Use &Dominatee) const {
2082 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2083 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2084 // The def must dominate the incoming block of the phi.
2085 if (UseBB != Dominator->getBlock())
2086 return DT->dominates(Dominator->getBlock(), UseBB);
2087 // If the UseBB and the DefBB are the same, compare locally.
2088 return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2089 }
2090 // If it's not a PHI node use, the normal dominates can already handle it.
2091 return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2092}
2093
George Burgess IVe1100f52016-02-02 22:46:49 +00002094const static char LiveOnEntryStr[] = "liveOnEntry";
2095
Reid Kleckner96ab8722017-05-18 17:24:10 +00002096void MemoryAccess::print(raw_ostream &OS) const {
2097 switch (getValueID()) {
2098 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2099 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2100 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2101 }
2102 llvm_unreachable("invalid value id");
2103}
2104
George Burgess IVe1100f52016-02-02 22:46:49 +00002105void MemoryDef::print(raw_ostream &OS) const {
2106 MemoryAccess *UO = getDefiningAccess();
2107
George Burgess IVaa283d82018-06-14 19:55:53 +00002108 auto printID = [&OS](MemoryAccess *A) {
2109 if (A && A->getID())
2110 OS << A->getID();
2111 else
2112 OS << LiveOnEntryStr;
2113 };
2114
George Burgess IVe1100f52016-02-02 22:46:49 +00002115 OS << getID() << " = MemoryDef(";
George Burgess IVaa283d82018-06-14 19:55:53 +00002116 printID(UO);
2117 OS << ")";
2118
2119 if (isOptimized()) {
2120 OS << "->";
2121 printID(getOptimized());
2122
2123 if (Optional<AliasResult> AR = getOptimizedAccessType())
2124 OS << " " << *AR;
2125 }
George Burgess IVe1100f52016-02-02 22:46:49 +00002126}
2127
2128void MemoryPhi::print(raw_ostream &OS) const {
2129 bool First = true;
2130 OS << getID() << " = MemoryPhi(";
2131 for (const auto &Op : operands()) {
2132 BasicBlock *BB = getIncomingBlock(Op);
2133 MemoryAccess *MA = cast<MemoryAccess>(Op);
2134 if (!First)
2135 OS << ',';
2136 else
2137 First = false;
2138
2139 OS << '{';
2140 if (BB->hasName())
2141 OS << BB->getName();
2142 else
2143 BB->printAsOperand(OS, false);
2144 OS << ',';
2145 if (unsigned ID = MA->getID())
2146 OS << ID;
2147 else
2148 OS << LiveOnEntryStr;
2149 OS << '}';
2150 }
2151 OS << ')';
2152}
2153
George Burgess IVe1100f52016-02-02 22:46:49 +00002154void MemoryUse::print(raw_ostream &OS) const {
2155 MemoryAccess *UO = getDefiningAccess();
2156 OS << "MemoryUse(";
2157 if (UO && UO->getID())
2158 OS << UO->getID();
2159 else
2160 OS << LiveOnEntryStr;
2161 OS << ')';
George Burgess IVaa283d82018-06-14 19:55:53 +00002162
2163 if (Optional<AliasResult> AR = getOptimizedAccessType())
2164 OS << " " << *AR;
George Burgess IVe1100f52016-02-02 22:46:49 +00002165}
2166
2167void MemoryAccess::dump() const {
Daniel Berlin78cbd282017-02-20 22:26:03 +00002168// Cannot completely remove virtual function even in release mode.
Aaron Ballman615eb472017-10-15 14:32:27 +00002169#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
George Burgess IVe1100f52016-02-02 22:46:49 +00002170 print(dbgs());
2171 dbgs() << "\n";
Matthias Braun8c209aa2017-01-28 02:02:38 +00002172#endif
George Burgess IVe1100f52016-02-02 22:46:49 +00002173}
2174
Chad Rosier232e29e2016-07-06 21:20:47 +00002175char MemorySSAPrinterLegacyPass::ID = 0;
2176
2177MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
2178 initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
2179}
2180
2181void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
2182 AU.setPreservesAll();
2183 AU.addRequired<MemorySSAWrapperPass>();
Chad Rosier232e29e2016-07-06 21:20:47 +00002184}
2185
2186bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
2187 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2188 MSSA.print(dbgs());
2189 if (VerifyMemorySSA)
2190 MSSA.verifyMemorySSA();
2191 return false;
2192}
2193
Chandler Carruthdab4eae2016-11-23 17:53:26 +00002194AnalysisKey MemorySSAAnalysis::Key;
George Burgess IVe1100f52016-02-02 22:46:49 +00002195
Daniel Berlin1e98c042016-09-26 17:22:54 +00002196MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2197 FunctionAnalysisManager &AM) {
Geoff Berryb96d3b22016-06-01 21:30:40 +00002198 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2199 auto &AA = AM.getResult<AAManager>(F);
Eugene Zelenkobb1b2d02017-08-16 22:07:40 +00002200 return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));
George Burgess IVe1100f52016-02-02 22:46:49 +00002201}
2202
Geoff Berryb96d3b22016-06-01 21:30:40 +00002203PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2204 FunctionAnalysisManager &AM) {
2205 OS << "MemorySSA for function: " << F.getName() << "\n";
Geoff Berry290a13e2016-08-08 18:27:22 +00002206 AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
Geoff Berryb96d3b22016-06-01 21:30:40 +00002207
2208 return PreservedAnalyses::all();
George Burgess IVe1100f52016-02-02 22:46:49 +00002209}
2210
Geoff Berryb96d3b22016-06-01 21:30:40 +00002211PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2212 FunctionAnalysisManager &AM) {
Geoff Berry290a13e2016-08-08 18:27:22 +00002213 AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
Geoff Berryb96d3b22016-06-01 21:30:40 +00002214
2215 return PreservedAnalyses::all();
2216}
2217
2218char MemorySSAWrapperPass::ID = 0;
2219
2220MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2221 initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2222}
2223
2224void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2225
2226void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
George Burgess IVe1100f52016-02-02 22:46:49 +00002227 AU.setPreservesAll();
Geoff Berryb96d3b22016-06-01 21:30:40 +00002228 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2229 AU.addRequiredTransitive<AAResultsWrapperPass>();
George Burgess IVe1100f52016-02-02 22:46:49 +00002230}
2231
Geoff Berryb96d3b22016-06-01 21:30:40 +00002232bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2233 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2234 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2235 MSSA.reset(new MemorySSA(F, &AA, &DT));
George Burgess IVe1100f52016-02-02 22:46:49 +00002236 return false;
2237}
2238
Geoff Berryb96d3b22016-06-01 21:30:40 +00002239void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
George Burgess IVe1100f52016-02-02 22:46:49 +00002240
Geoff Berryb96d3b22016-06-01 21:30:40 +00002241void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
George Burgess IVe1100f52016-02-02 22:46:49 +00002242 MSSA->print(OS);
2243}
2244
George Burgess IVe1100f52016-02-02 22:46:49 +00002245MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2246
Alina Sbirleabc8aa242019-01-07 19:22:37 +00002247/// Walk the use-def chains starting at \p StartingAccess and find
George Burgess IVe1100f52016-02-02 22:46:49 +00002248/// the MemoryAccess that actually clobbers Loc.
2249///
2250/// \returns our clobbering memory access
Alina Sbirleabfc779e2019-03-22 17:22:19 +00002251template <typename AliasAnalysisType>
2252MemoryAccess *
2253MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
Alina Sbirleaf085cc52019-03-29 21:56:09 +00002254 MemoryAccess *StartingAccess, const MemoryLocation &Loc,
2255 unsigned &UpwardWalkLimit) {
George Burgess IVe1100f52016-02-02 22:46:49 +00002256 if (isa<MemoryPhi>(StartingAccess))
2257 return StartingAccess;
2258
2259 auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2260 if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2261 return StartingUseOrDef;
2262
2263 Instruction *I = StartingUseOrDef->getMemoryInst();
2264
2265 // Conservatively, fences are always clobbers, so don't perform the walk if we
2266 // hit a fence.
Chandler Carruth363ac682019-01-07 05:42:51 +00002267 if (!isa<CallBase>(I) && I->isFenceLike())
George Burgess IVe1100f52016-02-02 22:46:49 +00002268 return StartingUseOrDef;
2269
2270 UpwardsMemoryQuery Q;
2271 Q.OriginalAccess = StartingUseOrDef;
2272 Q.StartingLoc = Loc;
George Burgess IV5f308972016-07-19 01:29:15 +00002273 Q.Inst = I;
George Burgess IVe1100f52016-02-02 22:46:49 +00002274 Q.IsCall = false;
George Burgess IVe1100f52016-02-02 22:46:49 +00002275
George Burgess IVe1100f52016-02-02 22:46:49 +00002276 // Unlike the other function, do not walk to the def of a def, because we are
2277 // handed something we already believe is the clobbering access.
Alina Sbirleabc8aa242019-01-07 19:22:37 +00002278 // We never set SkipSelf to true in Q in this method.
George Burgess IVe1100f52016-02-02 22:46:49 +00002279 MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2280 ? StartingUseOrDef->getDefiningAccess()
2281 : StartingUseOrDef;
2282
Alina Sbirleaf085cc52019-03-29 21:56:09 +00002283 MemoryAccess *Clobber =
2284 Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
Nicola Zaghend34e60c2018-05-14 12:53:11 +00002285 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2286 LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
2287 LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2288 LLVM_DEBUG(dbgs() << *Clobber << "\n");
George Burgess IVe1100f52016-02-02 22:46:49 +00002289 return Clobber;
2290}
2291
Alina Sbirleabfc779e2019-03-22 17:22:19 +00002292template <typename AliasAnalysisType>
George Burgess IVe1100f52016-02-02 22:46:49 +00002293MemoryAccess *
Alina Sbirleabfc779e2019-03-22 17:22:19 +00002294MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
Alina Sbirleaf085cc52019-03-29 21:56:09 +00002295 MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf) {
George Burgess IV400ae402016-07-20 19:51:34 +00002296 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2297 // If this is a MemoryPhi, we can't do anything.
2298 if (!StartingAccess)
2299 return MA;
George Burgess IVe1100f52016-02-02 22:46:49 +00002300
Alina Sbirleabc8aa242019-01-07 19:22:37 +00002301 bool IsOptimized = false;
2302
Daniel Berlincd2deac2016-10-20 20:13:45 +00002303 // If this is an already optimized use or def, return the optimized result.
Alina Sbirlead90c9f42018-03-08 18:03:14 +00002304 // Note: Currently, we store the optimized def result in a separate field,
2305 // since we can't use the defining access.
Alina Sbirleabc8aa242019-01-07 19:22:37 +00002306 if (StartingAccess->isOptimized()) {
2307 if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2308 return StartingAccess->getOptimized();
2309 IsOptimized = true;
2310 }
Daniel Berlincd2deac2016-10-20 20:13:45 +00002311
George Burgess IV400ae402016-07-20 19:51:34 +00002312 const Instruction *I = StartingAccess->getMemoryInst();
George Burgess IV44477c62018-03-11 04:16:12 +00002313 // We can't sanely do anything with a fence, since they conservatively clobber
2314 // all memory, and have no locations to get pointers from to try to
2315 // disambiguate.
Chandler Carruth363ac682019-01-07 05:42:51 +00002316 if (!isa<CallBase>(I) && I->isFenceLike())
George Burgess IVe1100f52016-02-02 22:46:49 +00002317 return StartingAccess;
2318
Alina Sbirleab4d088d2018-11-13 21:12:49 +00002319 UpwardsMemoryQuery Q(I, StartingAccess);
2320
Alina Sbirleabfc779e2019-03-22 17:22:19 +00002321 if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
George Burgess IV024f3d22016-08-03 19:57:02 +00002322 MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
George Burgess IV44477c62018-03-11 04:16:12 +00002323 StartingAccess->setOptimized(LiveOnEntry);
2324 StartingAccess->setOptimizedAccessType(None);
George Burgess IV024f3d22016-08-03 19:57:02 +00002325 return LiveOnEntry;
2326 }
2327
Alina Sbirleabc8aa242019-01-07 19:22:37 +00002328 MemoryAccess *OptimizedAccess;
2329 if (!IsOptimized) {
2330 // Start with the thing we already think clobbers this location
2331 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
George Burgess IVe1100f52016-02-02 22:46:49 +00002332
Alina Sbirleabc8aa242019-01-07 19:22:37 +00002333 // At this point, DefiningAccess may be the live on entry def.
2334 // If it is, we will not get a better result.
2335 if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2336 StartingAccess->setOptimized(DefiningAccess);
2337 StartingAccess->setOptimizedAccessType(None);
2338 return DefiningAccess;
2339 }
George Burgess IVe1100f52016-02-02 22:46:49 +00002340
Alina Sbirleaf085cc52019-03-29 21:56:09 +00002341 OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
Alina Sbirleabc8aa242019-01-07 19:22:37 +00002342 StartingAccess->setOptimized(OptimizedAccess);
2343 if (MSSA->isLiveOnEntryDef(OptimizedAccess))
2344 StartingAccess->setOptimizedAccessType(None);
2345 else if (Q.AR == MustAlias)
2346 StartingAccess->setOptimizedAccessType(MustAlias);
2347 } else
2348 OptimizedAccess = StartingAccess->getOptimized();
2349
Nicola Zaghend34e60c2018-05-14 12:53:11 +00002350 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
Alina Sbirleabc8aa242019-01-07 19:22:37 +00002351 LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2352 LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2353 LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
Alina Sbirlead90c9f42018-03-08 18:03:14 +00002354
Alina Sbirleabc8aa242019-01-07 19:22:37 +00002355 MemoryAccess *Result;
2356 if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
Alina Sbirleaf085cc52019-03-29 21:56:09 +00002357 isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
Alina Sbirleabc8aa242019-01-07 19:22:37 +00002358 assert(isa<MemoryDef>(Q.OriginalAccess));
2359 Q.SkipSelfAccess = true;
Alina Sbirleaf085cc52019-03-29 21:56:09 +00002360 Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit);
Alina Sbirleabc8aa242019-01-07 19:22:37 +00002361 } else
2362 Result = OptimizedAccess;
2363
2364 LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2365 LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
George Burgess IVe1100f52016-02-02 22:46:49 +00002366
2367 return Result;
2368}
2369
George Burgess IVe1100f52016-02-02 22:46:49 +00002370MemoryAccess *
George Burgess IV400ae402016-07-20 19:51:34 +00002371DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
George Burgess IVe1100f52016-02-02 22:46:49 +00002372 if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2373 return Use->getDefiningAccess();
2374 return MA;
2375}
2376
2377MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
George Burgess IV013fd732016-10-28 19:22:46 +00002378 MemoryAccess *StartingAccess, const MemoryLocation &) {
George Burgess IVe1100f52016-02-02 22:46:49 +00002379 if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2380 return Use->getDefiningAccess();
2381 return StartingAccess;
2382}
Reid Kleckner96ab8722017-05-18 17:24:10 +00002383
2384void MemoryPhi::deleteMe(DerivedUser *Self) {
2385 delete static_cast<MemoryPhi *>(Self);
2386}
2387
2388void MemoryDef::deleteMe(DerivedUser *Self) {
2389 delete static_cast<MemoryDef *>(Self);
2390}
2391
2392void MemoryUse::deleteMe(DerivedUser *Self) {
2393 delete static_cast<MemoryUse *>(Self);
2394}