blob: 858ebdcf6b8955685d6addfe724b0ea312f5cac7 [file] [log] [blame]
George Burgess IVe1100f52016-02-02 22:46:49 +00001//===-- MemorySSA.cpp - Memory SSA Builder---------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------===//
9//
10// This file implements the MemorySSA class.
11//
12//===----------------------------------------------------------------===//
13#include "llvm/ADT/DenseMap.h"
14#include "llvm/ADT/DenseSet.h"
15#include "llvm/ADT/DepthFirstIterator.h"
16#include "llvm/ADT/GraphTraits.h"
17#include "llvm/ADT/PostOrderIterator.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/SmallPtrSet.h"
20#include "llvm/ADT/SmallSet.h"
21#include "llvm/ADT/Statistic.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/Analysis/CFG.h"
24#include "llvm/Analysis/GlobalsModRef.h"
25#include "llvm/Analysis/IteratedDominanceFrontier.h"
26#include "llvm/Analysis/MemoryLocation.h"
27#include "llvm/Analysis/PHITransAddr.h"
28#include "llvm/IR/AssemblyAnnotationWriter.h"
29#include "llvm/IR/DataLayout.h"
30#include "llvm/IR/Dominators.h"
31#include "llvm/IR/GlobalVariable.h"
32#include "llvm/IR/IRBuilder.h"
33#include "llvm/IR/IntrinsicInst.h"
34#include "llvm/IR/LLVMContext.h"
35#include "llvm/IR/Metadata.h"
36#include "llvm/IR/Module.h"
37#include "llvm/IR/PatternMatch.h"
38#include "llvm/Support/CommandLine.h"
39#include "llvm/Support/Debug.h"
40#include "llvm/Support/FormattedStream.h"
41#include "llvm/Transforms/Scalar.h"
42#include "llvm/Transforms/Utils/MemorySSA.h"
43#include <algorithm>
44
45#define DEBUG_TYPE "memoryssa"
46using namespace llvm;
47STATISTIC(NumClobberCacheLookups, "Number of Memory SSA version cache lookups");
48STATISTIC(NumClobberCacheHits, "Number of Memory SSA version cache hits");
49STATISTIC(NumClobberCacheInserts, "Number of MemorySSA version cache inserts");
50INITIALIZE_PASS_WITH_OPTIONS_BEGIN(MemorySSAPrinterPass, "print-memoryssa",
51 "Memory SSA", true, true)
52INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
53INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
54INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
55INITIALIZE_PASS_END(MemorySSAPrinterPass, "print-memoryssa", "Memory SSA", true,
56 true)
57INITIALIZE_PASS(MemorySSALazy, "memoryssalazy", "Memory SSA", true, true)
58
59namespace llvm {
60
61/// \brief An assembly annotator class to print Memory SSA information in
62/// comments.
63class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
64 friend class MemorySSA;
65 const MemorySSA *MSSA;
66
67public:
68 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
69
70 virtual void emitBasicBlockStartAnnot(const BasicBlock *BB,
71 formatted_raw_ostream &OS) {
72 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
73 OS << "; " << *MA << "\n";
74 }
75
76 virtual void emitInstructionAnnot(const Instruction *I,
77 formatted_raw_ostream &OS) {
78 if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
79 OS << "; " << *MA << "\n";
80 }
81};
82}
83
84namespace {
85struct RenamePassData {
86 DomTreeNode *DTN;
87 DomTreeNode::const_iterator ChildIt;
88 MemoryAccess *IncomingVal;
89
90 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
91 MemoryAccess *M)
92 : DTN(D), ChildIt(It), IncomingVal(M) {}
93 void swap(RenamePassData &RHS) {
94 std::swap(DTN, RHS.DTN);
95 std::swap(ChildIt, RHS.ChildIt);
96 std::swap(IncomingVal, RHS.IncomingVal);
97 }
98};
99}
100
101namespace llvm {
102/// \brief Rename a single basic block into MemorySSA form.
103/// Uses the standard SSA renaming algorithm.
104/// \returns The new incoming value.
105MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB,
106 MemoryAccess *IncomingVal) {
107 auto It = PerBlockAccesses.find(BB);
108 // Skip most processing if the list is empty.
109 if (It != PerBlockAccesses.end()) {
110 AccessListType *Accesses = It->second.get();
111 for (MemoryAccess &L : *Accesses) {
112 switch (L.getValueID()) {
113 case Value::MemoryUseVal:
114 cast<MemoryUse>(&L)->setDefiningAccess(IncomingVal);
115 break;
116 case Value::MemoryDefVal:
117 // We can't legally optimize defs, because we only allow single
118 // memory phis/uses on operations, and if we optimize these, we can
119 // end up with multiple reaching defs. Uses do not have this
120 // problem, since they do not produce a value
121 cast<MemoryDef>(&L)->setDefiningAccess(IncomingVal);
122 IncomingVal = &L;
123 break;
124 case Value::MemoryPhiVal:
125 IncomingVal = &L;
126 break;
127 }
128 }
129 }
130
131 // Pass through values to our successors
132 for (const BasicBlock *S : successors(BB)) {
133 auto It = PerBlockAccesses.find(S);
134 // Rename the phi nodes in our successor block
135 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
136 continue;
137 AccessListType *Accesses = It->second.get();
138 auto *Phi = cast<MemoryPhi>(&Accesses->front());
139 assert(std::find(succ_begin(BB), succ_end(BB), S) != succ_end(BB) &&
140 "Must be at least one edge from Succ to BB!");
141 Phi->addIncoming(IncomingVal, BB);
142 }
143
144 return IncomingVal;
145}
146
147/// \brief This is the standard SSA renaming algorithm.
148///
149/// We walk the dominator tree in preorder, renaming accesses, and then filling
150/// in phi nodes in our successors.
151void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
152 SmallPtrSet<BasicBlock *, 16> &Visited) {
153 SmallVector<RenamePassData, 32> WorkStack;
154 IncomingVal = renameBlock(Root->getBlock(), IncomingVal);
155 WorkStack.push_back({Root, Root->begin(), IncomingVal});
156 Visited.insert(Root->getBlock());
157
158 while (!WorkStack.empty()) {
159 DomTreeNode *Node = WorkStack.back().DTN;
160 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
161 IncomingVal = WorkStack.back().IncomingVal;
162
163 if (ChildIt == Node->end()) {
164 WorkStack.pop_back();
165 } else {
166 DomTreeNode *Child = *ChildIt;
167 ++WorkStack.back().ChildIt;
168 BasicBlock *BB = Child->getBlock();
169 Visited.insert(BB);
170 IncomingVal = renameBlock(BB, IncomingVal);
171 WorkStack.push_back({Child, Child->begin(), IncomingVal});
172 }
173 }
174}
175
176/// \brief Compute dominator levels, used by the phi insertion algorithm above.
177void MemorySSA::computeDomLevels(DenseMap<DomTreeNode *, unsigned> &DomLevels) {
178 for (auto DFI = df_begin(DT->getRootNode()), DFE = df_end(DT->getRootNode());
179 DFI != DFE; ++DFI)
180 DomLevels[*DFI] = DFI.getPathLength() - 1;
181}
182
183/// \brief This handles unreachable block acccesses by deleting phi nodes in
184/// unreachable blocks, and marking all other unreachable MemoryAccess's as
185/// being uses of the live on entry definition.
186void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
187 assert(!DT->isReachableFromEntry(BB) &&
188 "Reachable block found while handling unreachable blocks");
189
190 auto It = PerBlockAccesses.find(BB);
191 if (It == PerBlockAccesses.end())
192 return;
193
194 auto &Accesses = It->second;
195 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
196 auto Next = std::next(AI);
197 // If we have a phi, just remove it. We are going to replace all
198 // users with live on entry.
199 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
200 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
201 else
202 Accesses->erase(AI);
203 AI = Next;
204 }
205}
206
207MemorySSA::MemorySSA(Function &Func)
208 : AA(nullptr), DT(nullptr), F(Func), LiveOnEntryDef(nullptr),
209 Walker(nullptr), NextID(0) {}
210
211MemorySSA::~MemorySSA() {
212 // Drop all our references
213 for (const auto &Pair : PerBlockAccesses)
214 for (MemoryAccess &MA : *Pair.second)
215 MA.dropAllReferences();
216}
217
218MemorySSA::AccessListType *MemorySSA::getOrCreateAccessList(BasicBlock *BB) {
219 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
220
221 if (Res.second)
222 Res.first->second = make_unique<AccessListType>();
223 return Res.first->second.get();
224}
225
226MemorySSAWalker *MemorySSA::buildMemorySSA(AliasAnalysis *AA,
227 DominatorTree *DT) {
228 if (Walker)
229 return Walker;
230
231 assert(!this->AA && !this->DT &&
232 "MemorySSA without a walker already has AA or DT?");
233
234 auto *Result = new CachingMemorySSAWalker(this, AA, DT);
235 this->AA = AA;
236 this->DT = DT;
237
238 // We create an access to represent "live on entry", for things like
239 // arguments or users of globals, where the memory they use is defined before
240 // the beginning of the function. We do not actually insert it into the IR.
241 // We do not define a live on exit for the immediate uses, and thus our
242 // semantics do *not* imply that something with no immediate uses can simply
243 // be removed.
244 BasicBlock &StartingPoint = F.getEntryBlock();
245 LiveOnEntryDef = make_unique<MemoryDef>(F.getContext(), nullptr, nullptr,
246 &StartingPoint, NextID++);
247
248 // We maintain lists of memory accesses per-block, trading memory for time. We
249 // could just look up the memory access for every possible instruction in the
250 // stream.
251 SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
Daniel Berlin1b51a292016-02-07 01:52:19 +0000252 SmallPtrSet<BasicBlock *, 32> DefUseBlocks;
George Burgess IVe1100f52016-02-02 22:46:49 +0000253 // Go through each block, figure out where defs occur, and chain together all
254 // the accesses.
255 for (BasicBlock &B : F) {
Daniel Berlin1b51a292016-02-07 01:52:19 +0000256 bool InsertIntoDefUse = false;
Daniel Berlin7898ca62016-02-07 01:52:15 +0000257 bool InsertIntoDef = false;
George Burgess IVe1100f52016-02-02 22:46:49 +0000258 AccessListType *Accesses = nullptr;
259 for (Instruction &I : B) {
260 MemoryAccess *MA = createNewAccess(&I, true);
261 if (!MA)
262 continue;
263 if (isa<MemoryDef>(MA))
Daniel Berlin7898ca62016-02-07 01:52:15 +0000264 InsertIntoDef = true;
Daniel Berlin1b51a292016-02-07 01:52:19 +0000265 else if (isa<MemoryUse>(MA))
266 InsertIntoDefUse = true;
267
George Burgess IVe1100f52016-02-02 22:46:49 +0000268 if (!Accesses)
269 Accesses = getOrCreateAccessList(&B);
270 Accesses->push_back(MA);
271 }
Daniel Berlin7898ca62016-02-07 01:52:15 +0000272 if (InsertIntoDef)
273 DefiningBlocks.insert(&B);
Daniel Berlin1b51a292016-02-07 01:52:19 +0000274 if (InsertIntoDefUse)
275 DefUseBlocks.insert(&B);
276 }
277
278 // Compute live-in.
279 // Live in is normally defined as "all the blocks on the path from each def to
280 // each of it's uses".
281 // MemoryDef's are implicit uses of previous state, so they are also uses.
282 // This means we don't really have def-only instructions. The only
283 // MemoryDef's that are not really uses are those that are of the LiveOnEntry
284 // variable (because LiveOnEntry can reach anywhere, and every def is a
285 // must-kill of LiveOnEntry).
286 // In theory, you could precisely compute live-in by using alias-analysis to
287 // disambiguate defs and uses to see which really pair up with which.
288 // In practice, this would be really expensive and difficult. So we simply
289 // assume all defs are also uses that need to be kept live.
290 // Because of this, the end result of this live-in computation will be "the
291 // entire set of basic blocks that reach any use".
292
293 SmallPtrSet<BasicBlock *, 32> LiveInBlocks;
294 SmallVector<BasicBlock *, 64> LiveInBlockWorklist(DefUseBlocks.begin(),
295 DefUseBlocks.end());
296 // Now that we have a set of blocks where a value is live-in, recursively add
297 // predecessors until we find the full region the value is live.
298 while (!LiveInBlockWorklist.empty()) {
299 BasicBlock *BB = LiveInBlockWorklist.pop_back_val();
300
301 // The block really is live in here, insert it into the set. If already in
302 // the set, then it has already been processed.
303 if (!LiveInBlocks.insert(BB).second)
304 continue;
305
306 // Since the value is live into BB, it is either defined in a predecessor or
307 // live into it to.
308 LiveInBlockWorklist.append(pred_begin(BB), pred_end(BB));
George Burgess IVe1100f52016-02-02 22:46:49 +0000309 }
310
311 // Determine where our MemoryPhi's should go
312 IDFCalculator IDFs(*DT);
313 IDFs.setDefiningBlocks(DefiningBlocks);
Daniel Berlin1b51a292016-02-07 01:52:19 +0000314 IDFs.setLiveInBlocks(LiveInBlocks);
George Burgess IVe1100f52016-02-02 22:46:49 +0000315 SmallVector<BasicBlock *, 32> IDFBlocks;
316 IDFs.calculate(IDFBlocks);
317
318 // Now place MemoryPhi nodes.
319 for (auto &BB : IDFBlocks) {
320 // Insert phi node
321 AccessListType *Accesses = getOrCreateAccessList(BB);
322 MemoryPhi *Phi = new MemoryPhi(F.getContext(), BB, NextID++);
Daniel Berlinf6c9ae92016-02-10 17:41:25 +0000323 ValueToMemoryAccess.insert(std::make_pair(BB, Phi));
George Burgess IVe1100f52016-02-02 22:46:49 +0000324 // Phi's always are placed at the front of the block.
325 Accesses->push_front(Phi);
326 }
327
328 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
329 // filled in with all blocks.
330 SmallPtrSet<BasicBlock *, 16> Visited;
331 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
332
333 // Now optimize the MemoryUse's defining access to point to the nearest
334 // dominating clobbering def.
335 // This ensures that MemoryUse's that are killed by the same store are
336 // immediate users of that store, one of the invariants we guarantee.
337 for (auto DomNode : depth_first(DT)) {
338 BasicBlock *BB = DomNode->getBlock();
339 auto AI = PerBlockAccesses.find(BB);
340 if (AI == PerBlockAccesses.end())
341 continue;
342 AccessListType *Accesses = AI->second.get();
343 for (auto &MA : *Accesses) {
344 if (auto *MU = dyn_cast<MemoryUse>(&MA)) {
345 Instruction *Inst = MU->getMemoryInst();
346 MU->setDefiningAccess(Result->getClobberingMemoryAccess(Inst));
347 }
348 }
349 }
350
351 // Mark the uses in unreachable blocks as live on entry, so that they go
352 // somewhere.
353 for (auto &BB : F)
354 if (!Visited.count(&BB))
355 markUnreachableAsLiveOnEntry(&BB);
356
357 Walker = Result;
358 return Walker;
359}
360
361/// \brief Helper function to create new memory accesses
362MemoryAccess *MemorySSA::createNewAccess(Instruction *I, bool IgnoreNonMemory) {
363 // Find out what affect this instruction has on memory.
364 ModRefInfo ModRef = AA->getModRefInfo(I);
365 bool Def = bool(ModRef & MRI_Mod);
366 bool Use = bool(ModRef & MRI_Ref);
367
368 // It's possible for an instruction to not modify memory at all. During
369 // construction, we ignore them.
370 if (IgnoreNonMemory && !Def && !Use)
371 return nullptr;
372
373 assert((Def || Use) &&
374 "Trying to create a memory access with a non-memory instruction");
375
376 MemoryUseOrDef *MA;
377 if (Def)
Daniel Berlinf6c9ae92016-02-10 17:41:25 +0000378 MA = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
George Burgess IVe1100f52016-02-02 22:46:49 +0000379 else
Daniel Berlinf6c9ae92016-02-10 17:41:25 +0000380 MA = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
381 ValueToMemoryAccess.insert(std::make_pair(I, MA));
George Burgess IVe1100f52016-02-02 22:46:49 +0000382 return MA;
383}
384
385MemoryAccess *MemorySSA::findDominatingDef(BasicBlock *UseBlock,
386 enum InsertionPlace Where) {
387 // Handle the initial case
388 if (Where == Beginning)
389 // The only thing that could define us at the beginning is a phi node
390 if (MemoryPhi *Phi = getMemoryAccess(UseBlock))
391 return Phi;
392
393 DomTreeNode *CurrNode = DT->getNode(UseBlock);
394 // Need to be defined by our dominator
395 if (Where == Beginning)
396 CurrNode = CurrNode->getIDom();
397 Where = End;
398 while (CurrNode) {
399 auto It = PerBlockAccesses.find(CurrNode->getBlock());
400 if (It != PerBlockAccesses.end()) {
401 auto &Accesses = It->second;
402 for (auto RAI = Accesses->rbegin(), RAE = Accesses->rend(); RAI != RAE;
403 ++RAI) {
404 if (isa<MemoryDef>(*RAI) || isa<MemoryPhi>(*RAI))
405 return &*RAI;
406 }
407 }
408 CurrNode = CurrNode->getIDom();
409 }
410 return LiveOnEntryDef.get();
411}
412
413/// \brief Returns true if \p Replacer dominates \p Replacee .
414bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
415 const MemoryAccess *Replacee) const {
416 if (isa<MemoryUseOrDef>(Replacee))
417 return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
418 const auto *MP = cast<MemoryPhi>(Replacee);
419 // For a phi node, the use occurs in the predecessor block of the phi node.
420 // Since we may occur multiple times in the phi node, we have to check each
421 // operand to ensure Replacer dominates each operand where Replacee occurs.
422 for (const Use &Arg : MP->operands()) {
George Burgess IVb5a229f2016-02-02 23:15:26 +0000423 if (Arg.get() != Replacee &&
George Burgess IVe1100f52016-02-02 22:46:49 +0000424 !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
425 return false;
426 }
427 return true;
428}
429
Daniel Berlin83fc77b2016-03-01 18:46:54 +0000430/// \brief If all arguments of a MemoryPHI are defined by the same incoming
431/// argument, return that argument.
432static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
433 MemoryAccess *MA = nullptr;
434
435 for (auto &Arg : MP->operands()) {
436 if (!MA)
437 MA = cast<MemoryAccess>(Arg);
438 else if (MA != Arg)
439 return nullptr;
440 }
441 return MA;
442}
443
444/// \brief Properly remove \p MA from all of MemorySSA's lookup tables.
445///
446/// Because of the way the intrusive list and use lists work, it is important to
447/// do removal in the right order.
448void MemorySSA::removeFromLookups(MemoryAccess *MA) {
449 assert(MA->use_empty() &&
450 "Trying to remove memory access that still has uses");
451 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA))
452 MUD->setDefiningAccess(nullptr);
453 // Invalidate our walker's cache if necessary
454 if (!isa<MemoryUse>(MA))
455 Walker->invalidateInfo(MA);
456 // The call below to erase will destroy MA, so we can't change the order we
457 // are doing things here
458 Value *MemoryInst;
459 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
460 MemoryInst = MUD->getMemoryInst();
461 } else {
462 MemoryInst = MA->getBlock();
463 }
464 ValueToMemoryAccess.erase(MemoryInst);
465
466 auto &Accesses = PerBlockAccesses.find(MA->getBlock())->second;
467 Accesses->erase(MA);
468 if (Accesses->empty()) {
469 PerBlockAccesses.erase(MA->getBlock());
470 }
471}
472
473void MemorySSA::removeMemoryAccess(MemoryAccess *MA) {
474 assert(!isLiveOnEntryDef(MA) && "Trying to remove the live on entry def");
475 // We can only delete phi nodes if they have no uses, or we can replace all
476 // uses with a single definition.
477 MemoryAccess *NewDefTarget = nullptr;
478 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
479 // Note that it is sufficient to know that all edges of the phi node have
480 // the same argument. If they do, by the definition of dominance frontiers
481 // (which we used to place this phi), that argument must dominate this phi,
482 // and thus, must dominate the phi's uses, and so we will not hit the assert
483 // below.
484 NewDefTarget = onlySingleValue(MP);
485 assert((NewDefTarget || MP->use_empty()) &&
486 "We can't delete this memory phi");
487 } else {
488 NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
489 }
490
491 // Re-point the uses at our defining access
492 if (!MA->use_empty())
493 MA->replaceAllUsesWith(NewDefTarget);
494
495 // The call below to erase will destroy MA, so we can't change the order we
496 // are doing things here
497 removeFromLookups(MA);
498}
499
George Burgess IVe1100f52016-02-02 22:46:49 +0000500void MemorySSA::print(raw_ostream &OS) const {
501 MemorySSAAnnotatedWriter Writer(this);
502 F.print(OS, &Writer);
503}
504
505void MemorySSA::dump() const {
506 MemorySSAAnnotatedWriter Writer(this);
507 F.print(dbgs(), &Writer);
508}
509
Daniel Berlin932b4cb2016-02-10 17:39:43 +0000510void MemorySSA::verifyMemorySSA() const {
511 verifyDefUses(F);
512 verifyDomination(F);
513}
514
George Burgess IVe1100f52016-02-02 22:46:49 +0000515/// \brief Verify the domination properties of MemorySSA by checking that each
516/// definition dominates all of its uses.
Daniel Berlin932b4cb2016-02-10 17:39:43 +0000517void MemorySSA::verifyDomination(Function &F) const {
George Burgess IVe1100f52016-02-02 22:46:49 +0000518 for (BasicBlock &B : F) {
519 // Phi nodes are attached to basic blocks
520 if (MemoryPhi *MP = getMemoryAccess(&B)) {
521 for (User *U : MP->users()) {
522 BasicBlock *UseBlock;
523 // Phi operands are used on edges, we simulate the right domination by
524 // acting as if the use occurred at the end of the predecessor block.
525 if (MemoryPhi *P = dyn_cast<MemoryPhi>(U)) {
526 for (const auto &Arg : P->operands()) {
527 if (Arg == MP) {
528 UseBlock = P->getIncomingBlock(Arg);
529 break;
530 }
531 }
532 } else {
533 UseBlock = cast<MemoryAccess>(U)->getBlock();
534 }
George Burgess IV60adac42016-02-02 23:26:01 +0000535 (void)UseBlock;
George Burgess IVe1100f52016-02-02 22:46:49 +0000536 assert(DT->dominates(MP->getBlock(), UseBlock) &&
537 "Memory PHI does not dominate it's uses");
538 }
539 }
540
541 for (Instruction &I : B) {
542 MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
543 if (!MD)
544 continue;
545
Benjamin Kramer451f54c2016-02-22 13:11:58 +0000546 for (User *U : MD->users()) {
George Burgess IVe1100f52016-02-02 22:46:49 +0000547 BasicBlock *UseBlock;
548 // Things are allowed to flow to phi nodes over their predecessor edge.
549 if (auto *P = dyn_cast<MemoryPhi>(U)) {
550 for (const auto &Arg : P->operands()) {
551 if (Arg == MD) {
552 UseBlock = P->getIncomingBlock(Arg);
553 break;
554 }
555 }
556 } else {
557 UseBlock = cast<MemoryAccess>(U)->getBlock();
558 }
559 assert(DT->dominates(MD->getBlock(), UseBlock) &&
560 "Memory Def does not dominate it's uses");
561 }
562 }
563 }
564}
565
566/// \brief Verify the def-use lists in MemorySSA, by verifying that \p Use
567/// appears in the use list of \p Def.
568///
569/// llvm_unreachable is used instead of asserts because this may be called in
570/// a build without asserts. In that case, we don't want this to turn into a
571/// nop.
Daniel Berlin932b4cb2016-02-10 17:39:43 +0000572void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
George Burgess IVe1100f52016-02-02 22:46:49 +0000573 // The live on entry use may cause us to get a NULL def here
574 if (!Def) {
575 if (!isLiveOnEntryDef(Use))
576 llvm_unreachable("Null def but use not point to live on entry def");
577 } else if (std::find(Def->user_begin(), Def->user_end(), Use) ==
578 Def->user_end()) {
579 llvm_unreachable("Did not find use in def's use list");
580 }
581}
582
583/// \brief Verify the immediate use information, by walking all the memory
584/// accesses and verifying that, for each use, it appears in the
585/// appropriate def's use list
Daniel Berlin932b4cb2016-02-10 17:39:43 +0000586void MemorySSA::verifyDefUses(Function &F) const {
George Burgess IVe1100f52016-02-02 22:46:49 +0000587 for (BasicBlock &B : F) {
588 // Phi nodes are attached to basic blocks
589 if (MemoryPhi *Phi = getMemoryAccess(&B))
590 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
591 verifyUseInDefs(Phi->getIncomingValue(I), Phi);
592
593 for (Instruction &I : B) {
594 if (MemoryAccess *MA = getMemoryAccess(&I)) {
595 assert(isa<MemoryUseOrDef>(MA) &&
596 "Found a phi node not attached to a bb");
597 verifyUseInDefs(cast<MemoryUseOrDef>(MA)->getDefiningAccess(), MA);
598 }
599 }
600 }
601}
602
603MemoryAccess *MemorySSA::getMemoryAccess(const Value *I) const {
Daniel Berlinf6c9ae92016-02-10 17:41:25 +0000604 return ValueToMemoryAccess.lookup(I);
George Burgess IVe1100f52016-02-02 22:46:49 +0000605}
606
607MemoryPhi *MemorySSA::getMemoryAccess(const BasicBlock *BB) const {
608 return cast_or_null<MemoryPhi>(getMemoryAccess((const Value *)BB));
609}
610
611/// \brief Determine, for two memory accesses in the same block,
612/// whether \p Dominator dominates \p Dominatee.
613/// \returns True if \p Dominator dominates \p Dominatee.
614bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
615 const MemoryAccess *Dominatee) const {
616
617 assert((Dominator->getBlock() == Dominatee->getBlock()) &&
618 "Asking for local domination when accesses are in different blocks!");
619 // Get the access list for the block
620 const AccessListType *AccessList = getBlockAccesses(Dominator->getBlock());
621 AccessListType::const_reverse_iterator It(Dominator->getIterator());
622
623 // If we hit the beginning of the access list before we hit dominatee, we must
624 // dominate it
625 return std::none_of(It, AccessList->rend(),
626 [&](const MemoryAccess &MA) { return &MA == Dominatee; });
627}
628
629const static char LiveOnEntryStr[] = "liveOnEntry";
630
631void MemoryDef::print(raw_ostream &OS) const {
632 MemoryAccess *UO = getDefiningAccess();
633
634 OS << getID() << " = MemoryDef(";
635 if (UO && UO->getID())
636 OS << UO->getID();
637 else
638 OS << LiveOnEntryStr;
639 OS << ')';
640}
641
642void MemoryPhi::print(raw_ostream &OS) const {
643 bool First = true;
644 OS << getID() << " = MemoryPhi(";
645 for (const auto &Op : operands()) {
646 BasicBlock *BB = getIncomingBlock(Op);
647 MemoryAccess *MA = cast<MemoryAccess>(Op);
648 if (!First)
649 OS << ',';
650 else
651 First = false;
652
653 OS << '{';
654 if (BB->hasName())
655 OS << BB->getName();
656 else
657 BB->printAsOperand(OS, false);
658 OS << ',';
659 if (unsigned ID = MA->getID())
660 OS << ID;
661 else
662 OS << LiveOnEntryStr;
663 OS << '}';
664 }
665 OS << ')';
666}
667
668MemoryAccess::~MemoryAccess() {}
669
670void MemoryUse::print(raw_ostream &OS) const {
671 MemoryAccess *UO = getDefiningAccess();
672 OS << "MemoryUse(";
673 if (UO && UO->getID())
674 OS << UO->getID();
675 else
676 OS << LiveOnEntryStr;
677 OS << ')';
678}
679
680void MemoryAccess::dump() const {
681 print(dbgs());
682 dbgs() << "\n";
683}
684
685char MemorySSAPrinterPass::ID = 0;
686
687MemorySSAPrinterPass::MemorySSAPrinterPass() : FunctionPass(ID) {
688 initializeMemorySSAPrinterPassPass(*PassRegistry::getPassRegistry());
689}
690
691void MemorySSAPrinterPass::releaseMemory() {
692 // Subtlety: Be sure to delete the walker before MSSA, because the walker's
693 // dtor may try to access MemorySSA.
694 Walker.reset();
695 MSSA.reset();
696}
697
698void MemorySSAPrinterPass::getAnalysisUsage(AnalysisUsage &AU) const {
699 AU.setPreservesAll();
700 AU.addRequired<AAResultsWrapperPass>();
701 AU.addRequired<DominatorTreeWrapperPass>();
702 AU.addPreserved<DominatorTreeWrapperPass>();
703 AU.addPreserved<GlobalsAAWrapperPass>();
704}
705
706bool MemorySSAPrinterPass::doInitialization(Module &M) {
George Burgess IV60adac42016-02-02 23:26:01 +0000707 VerifyMemorySSA = M.getContext()
708 .getOption<bool, MemorySSAPrinterPass,
709 &MemorySSAPrinterPass::VerifyMemorySSA>();
George Burgess IVe1100f52016-02-02 22:46:49 +0000710 return false;
711}
712
713void MemorySSAPrinterPass::registerOptions() {
714 OptionRegistry::registerOption<bool, MemorySSAPrinterPass,
715 &MemorySSAPrinterPass::VerifyMemorySSA>(
716 "verify-memoryssa", "Run the Memory SSA verifier", false);
717}
718
719void MemorySSAPrinterPass::print(raw_ostream &OS, const Module *M) const {
720 MSSA->print(OS);
721}
722
723bool MemorySSAPrinterPass::runOnFunction(Function &F) {
724 this->F = &F;
725 MSSA.reset(new MemorySSA(F));
726 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
727 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
728 Walker.reset(MSSA->buildMemorySSA(AA, DT));
729
730 if (VerifyMemorySSA) {
Daniel Berlin932b4cb2016-02-10 17:39:43 +0000731 MSSA->verifyMemorySSA();
George Burgess IVe1100f52016-02-02 22:46:49 +0000732 }
733
734 return false;
735}
736
737char MemorySSALazy::ID = 0;
738
739MemorySSALazy::MemorySSALazy() : FunctionPass(ID) {
740 initializeMemorySSALazyPass(*PassRegistry::getPassRegistry());
741}
742
743void MemorySSALazy::releaseMemory() { MSSA.reset(); }
744
745bool MemorySSALazy::runOnFunction(Function &F) {
746 MSSA.reset(new MemorySSA(F));
747 return false;
748}
749
750MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
751
752CachingMemorySSAWalker::CachingMemorySSAWalker(MemorySSA *M, AliasAnalysis *A,
753 DominatorTree *D)
754 : MemorySSAWalker(M), AA(A), DT(D) {}
755
756CachingMemorySSAWalker::~CachingMemorySSAWalker() {}
757
758struct CachingMemorySSAWalker::UpwardsMemoryQuery {
759 // True if we saw a phi whose predecessor was a backedge
760 bool SawBackedgePhi;
761 // True if our original query started off as a call
762 bool IsCall;
763 // The pointer location we started the query with. This will be empty if
764 // IsCall is true.
765 MemoryLocation StartingLoc;
766 // This is the instruction we were querying about.
767 const Instruction *Inst;
768 // Set of visited Instructions for this query.
769 DenseSet<MemoryAccessPair> Visited;
770 // Set of visited call accesses for this query. This is separated out because
771 // you can always cache and lookup the result of call queries (IE when IsCall
772 // == true) for every call in the chain. The calls have no AA location
773 // associated with them with them, and thus, no context dependence.
774 SmallPtrSet<const MemoryAccess *, 32> VisitedCalls;
775 // The MemoryAccess we actually got called with, used to test local domination
776 const MemoryAccess *OriginalAccess;
777 // The Datalayout for the module we started in
778 const DataLayout *DL;
779
780 UpwardsMemoryQuery()
781 : SawBackedgePhi(false), IsCall(false), Inst(nullptr),
782 OriginalAccess(nullptr), DL(nullptr) {}
783};
784
Daniel Berlin83fc77b2016-03-01 18:46:54 +0000785void CachingMemorySSAWalker::invalidateInfo(MemoryAccess *MA) {
786
787 // TODO: We can do much better cache invalidation with differently stored
788 // caches. For now, for MemoryUses, we simply remove them
789 // from the cache, and kill the entire call/non-call cache for everything
790 // else. The problem is for phis or defs, currently we'd need to follow use
791 // chains down and invalidate anything below us in the chain that currently
792 // terminates at this access.
793
794 // See if this is a MemoryUse, if so, just remove the cached info. MemoryUse
795 // is by definition never a barrier, so nothing in the cache could point to
796 // this use. In that case, we only need invalidate the info for the use
797 // itself.
798
799 if (MemoryUse *MU = dyn_cast<MemoryUse>(MA)) {
800 UpwardsMemoryQuery Q;
801 Instruction *I = MU->getMemoryInst();
802 Q.IsCall = bool(ImmutableCallSite(I));
803 Q.Inst = I;
804 if (!Q.IsCall)
805 Q.StartingLoc = MemoryLocation::get(I);
806 doCacheRemove(MA, Q, Q.StartingLoc);
807 return;
808 }
809 // If it is not a use, the best we can do right now is destroy the cache.
810 bool IsCall = false;
811
812 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
813 Instruction *I = MUD->getMemoryInst();
814 IsCall = bool(ImmutableCallSite(I));
815 }
816 if (IsCall)
817 CachedUpwardsClobberingCall.clear();
818 else
819 CachedUpwardsClobberingAccess.clear();
820}
821
George Burgess IVe1100f52016-02-02 22:46:49 +0000822void CachingMemorySSAWalker::doCacheRemove(const MemoryAccess *M,
823 const UpwardsMemoryQuery &Q,
824 const MemoryLocation &Loc) {
825 if (Q.IsCall)
826 CachedUpwardsClobberingCall.erase(M);
827 else
828 CachedUpwardsClobberingAccess.erase({M, Loc});
829}
830
831void CachingMemorySSAWalker::doCacheInsert(const MemoryAccess *M,
832 MemoryAccess *Result,
833 const UpwardsMemoryQuery &Q,
834 const MemoryLocation &Loc) {
835 ++NumClobberCacheInserts;
836 if (Q.IsCall)
837 CachedUpwardsClobberingCall[M] = Result;
838 else
839 CachedUpwardsClobberingAccess[{M, Loc}] = Result;
840}
841
842MemoryAccess *CachingMemorySSAWalker::doCacheLookup(const MemoryAccess *M,
843 const UpwardsMemoryQuery &Q,
844 const MemoryLocation &Loc) {
845 ++NumClobberCacheLookups;
846 MemoryAccess *Result = nullptr;
847
848 if (Q.IsCall)
849 Result = CachedUpwardsClobberingCall.lookup(M);
850 else
851 Result = CachedUpwardsClobberingAccess.lookup({M, Loc});
852
853 if (Result)
854 ++NumClobberCacheHits;
855 return Result;
856}
857
858bool CachingMemorySSAWalker::instructionClobbersQuery(
859 const MemoryDef *MD, UpwardsMemoryQuery &Q,
860 const MemoryLocation &Loc) const {
861 Instruction *DefMemoryInst = MD->getMemoryInst();
862 assert(DefMemoryInst && "Defining instruction not actually an instruction");
863
864 if (!Q.IsCall)
865 return AA->getModRefInfo(DefMemoryInst, Loc) & MRI_Mod;
866
867 // If this is a call, mark it for caching
868 if (ImmutableCallSite(DefMemoryInst))
869 Q.VisitedCalls.insert(MD);
870 ModRefInfo I = AA->getModRefInfo(DefMemoryInst, ImmutableCallSite(Q.Inst));
871 return I != MRI_NoModRef;
872}
873
874MemoryAccessPair CachingMemorySSAWalker::UpwardsDFSWalk(
875 MemoryAccess *StartingAccess, const MemoryLocation &Loc,
876 UpwardsMemoryQuery &Q, bool FollowingBackedge) {
877 MemoryAccess *ModifyingAccess = nullptr;
878
879 auto DFI = df_begin(StartingAccess);
880 for (auto DFE = df_end(StartingAccess); DFI != DFE;) {
881 MemoryAccess *CurrAccess = *DFI;
882 if (MSSA->isLiveOnEntryDef(CurrAccess))
883 return {CurrAccess, Loc};
884 if (auto CacheResult = doCacheLookup(CurrAccess, Q, Loc))
885 return {CacheResult, Loc};
886 // If this is a MemoryDef, check whether it clobbers our current query.
887 if (auto *MD = dyn_cast<MemoryDef>(CurrAccess)) {
888 // If we hit the top, stop following this path.
889 // While we can do lookups, we can't sanely do inserts here unless we were
890 // to track everything we saw along the way, since we don't know where we
891 // will stop.
892 if (instructionClobbersQuery(MD, Q, Loc)) {
893 ModifyingAccess = CurrAccess;
894 break;
895 }
896 }
897
898 // We need to know whether it is a phi so we can track backedges.
899 // Otherwise, walk all upward defs.
900 if (!isa<MemoryPhi>(CurrAccess)) {
901 ++DFI;
902 continue;
903 }
904
905 // Recurse on PHI nodes, since we need to change locations.
906 // TODO: Allow graphtraits on pairs, which would turn this whole function
907 // into a normal single depth first walk.
908 MemoryAccess *FirstDef = nullptr;
909 DFI = DFI.skipChildren();
910 const MemoryAccessPair PHIPair(CurrAccess, Loc);
911 bool VisitedOnlyOne = true;
912 for (auto MPI = upward_defs_begin(PHIPair), MPE = upward_defs_end();
913 MPI != MPE; ++MPI) {
914 // Don't follow this path again if we've followed it once
915 if (!Q.Visited.insert(*MPI).second)
916 continue;
917
918 bool Backedge =
919 !FollowingBackedge &&
920 DT->dominates(CurrAccess->getBlock(), MPI.getPhiArgBlock());
921
922 MemoryAccessPair CurrentPair =
923 UpwardsDFSWalk(MPI->first, MPI->second, Q, Backedge);
924 // All the phi arguments should reach the same point if we can bypass
925 // this phi. The alternative is that they hit this phi node, which
926 // means we can skip this argument.
927 if (FirstDef && CurrentPair.first != PHIPair.first &&
928 CurrentPair.first != FirstDef) {
929 ModifyingAccess = CurrAccess;
930 break;
931 }
932
933 if (!FirstDef)
934 FirstDef = CurrentPair.first;
935 else
936 VisitedOnlyOne = false;
937 }
938
939 // The above loop determines if all arguments of the phi node reach the
940 // same place. However we skip arguments that are cyclically dependent
941 // only on the value of this phi node. This means in some cases, we may
942 // only visit one argument of the phi node, and the above loop will
943 // happily say that all the arguments are the same. However, in that case,
944 // we still can't walk past the phi node, because that argument still
945 // kills the access unless we hit the top of the function when walking
946 // that argument.
947 if (VisitedOnlyOne && FirstDef && !MSSA->isLiveOnEntryDef(FirstDef))
948 ModifyingAccess = CurrAccess;
949 }
950
951 if (!ModifyingAccess)
952 return {MSSA->getLiveOnEntryDef(), Q.StartingLoc};
953
954 const BasicBlock *OriginalBlock = Q.OriginalAccess->getBlock();
955 unsigned N = DFI.getPathLength();
956 MemoryAccess *FinalAccess = ModifyingAccess;
957 for (; N != 0; --N) {
958 ModifyingAccess = DFI.getPath(N - 1);
959 BasicBlock *CurrBlock = ModifyingAccess->getBlock();
960 if (!FollowingBackedge)
961 doCacheInsert(ModifyingAccess, FinalAccess, Q, Loc);
962 if (DT->dominates(CurrBlock, OriginalBlock) &&
963 (CurrBlock != OriginalBlock || !FollowingBackedge ||
964 MSSA->locallyDominates(ModifyingAccess, Q.OriginalAccess)))
965 break;
966 }
967
968 // Cache everything else on the way back. The caller should cache
969 // Q.OriginalAccess for us.
970 for (; N != 0; --N) {
971 MemoryAccess *CacheAccess = DFI.getPath(N - 1);
972 doCacheInsert(CacheAccess, ModifyingAccess, Q, Loc);
973 }
974 assert(Q.Visited.size() < 1000 && "Visited too much");
975
976 return {ModifyingAccess, Loc};
977}
978
979/// \brief Walk the use-def chains starting at \p MA and find
980/// the MemoryAccess that actually clobbers Loc.
981///
982/// \returns our clobbering memory access
983MemoryAccess *
984CachingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *StartingAccess,
985 UpwardsMemoryQuery &Q) {
986 return UpwardsDFSWalk(StartingAccess, Q.StartingLoc, Q, false).first;
987}
988
989MemoryAccess *
990CachingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *StartingAccess,
991 MemoryLocation &Loc) {
992 if (isa<MemoryPhi>(StartingAccess))
993 return StartingAccess;
994
995 auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
996 if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
997 return StartingUseOrDef;
998
999 Instruction *I = StartingUseOrDef->getMemoryInst();
1000
1001 // Conservatively, fences are always clobbers, so don't perform the walk if we
1002 // hit a fence.
1003 if (isa<FenceInst>(I))
1004 return StartingUseOrDef;
1005
1006 UpwardsMemoryQuery Q;
1007 Q.OriginalAccess = StartingUseOrDef;
1008 Q.StartingLoc = Loc;
1009 Q.Inst = StartingUseOrDef->getMemoryInst();
1010 Q.IsCall = false;
1011 Q.DL = &Q.Inst->getModule()->getDataLayout();
1012
1013 if (auto CacheResult = doCacheLookup(StartingUseOrDef, Q, Q.StartingLoc))
1014 return CacheResult;
1015
1016 // Unlike the other function, do not walk to the def of a def, because we are
1017 // handed something we already believe is the clobbering access.
1018 MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
1019 ? StartingUseOrDef->getDefiningAccess()
1020 : StartingUseOrDef;
1021
1022 MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
1023 doCacheInsert(Q.OriginalAccess, Clobber, Q, Q.StartingLoc);
1024 DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
1025 DEBUG(dbgs() << *StartingUseOrDef << "\n");
1026 DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
1027 DEBUG(dbgs() << *Clobber << "\n");
1028 return Clobber;
1029}
1030
1031MemoryAccess *
1032CachingMemorySSAWalker::getClobberingMemoryAccess(const Instruction *I) {
1033 // There should be no way to lookup an instruction and get a phi as the
1034 // access, since we only map BB's to PHI's. So, this must be a use or def.
1035 auto *StartingAccess = cast<MemoryUseOrDef>(MSSA->getMemoryAccess(I));
1036
1037 // We can't sanely do anything with a FenceInst, they conservatively
1038 // clobber all memory, and have no locations to get pointers from to
1039 // try to disambiguate
1040 if (isa<FenceInst>(I))
1041 return StartingAccess;
1042
1043 UpwardsMemoryQuery Q;
1044 Q.OriginalAccess = StartingAccess;
1045 Q.IsCall = bool(ImmutableCallSite(I));
1046 if (!Q.IsCall)
1047 Q.StartingLoc = MemoryLocation::get(I);
1048 Q.Inst = I;
1049 Q.DL = &Q.Inst->getModule()->getDataLayout();
1050 if (auto CacheResult = doCacheLookup(StartingAccess, Q, Q.StartingLoc))
1051 return CacheResult;
1052
1053 // Start with the thing we already think clobbers this location
1054 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
1055
1056 // At this point, DefiningAccess may be the live on entry def.
1057 // If it is, we will not get a better result.
1058 if (MSSA->isLiveOnEntryDef(DefiningAccess))
1059 return DefiningAccess;
1060
1061 MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
1062 doCacheInsert(Q.OriginalAccess, Result, Q, Q.StartingLoc);
1063 // TODO: When this implementation is more mature, we may want to figure out
1064 // what this additional caching buys us. It's most likely A Good Thing.
1065 if (Q.IsCall)
1066 for (const MemoryAccess *MA : Q.VisitedCalls)
1067 doCacheInsert(MA, Result, Q, Q.StartingLoc);
1068
1069 DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
1070 DEBUG(dbgs() << *DefiningAccess << "\n");
1071 DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
1072 DEBUG(dbgs() << *Result << "\n");
1073
1074 return Result;
1075}
1076
1077MemoryAccess *
1078DoNothingMemorySSAWalker::getClobberingMemoryAccess(const Instruction *I) {
1079 MemoryAccess *MA = MSSA->getMemoryAccess(I);
1080 if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
1081 return Use->getDefiningAccess();
1082 return MA;
1083}
1084
1085MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
1086 MemoryAccess *StartingAccess, MemoryLocation &) {
1087 if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
1088 return Use->getDefiningAccess();
1089 return StartingAccess;
1090}
1091}