blob: ce74c708df1e4175c8269c42e94c4f008d823f3b [file] [log] [blame]
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001//===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Daniel Berlinae6b8b62017-01-28 01:35:02 +00006//
7//===----------------------------------------------------------------===//
8//
9// This file implements the MemorySSAUpdater class.
10//
11//===----------------------------------------------------------------===//
Daniel Berlin554dcd82017-04-11 20:06:36 +000012#include "llvm/Analysis/MemorySSAUpdater.h"
Daniel Berlinae6b8b62017-01-28 01:35:02 +000013#include "llvm/ADT/STLExtras.h"
Alina Sbirlea79800992018-09-10 20:13:01 +000014#include "llvm/ADT/SetVector.h"
Daniel Berlinae6b8b62017-01-28 01:35:02 +000015#include "llvm/ADT/SmallPtrSet.h"
Alina Sbirlea79800992018-09-10 20:13:01 +000016#include "llvm/Analysis/IteratedDominanceFrontier.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000017#include "llvm/Analysis/MemorySSA.h"
Daniel Berlinae6b8b62017-01-28 01:35:02 +000018#include "llvm/IR/DataLayout.h"
19#include "llvm/IR/Dominators.h"
20#include "llvm/IR/GlobalVariable.h"
21#include "llvm/IR/IRBuilder.h"
Daniel Berlinae6b8b62017-01-28 01:35:02 +000022#include "llvm/IR/LLVMContext.h"
23#include "llvm/IR/Metadata.h"
24#include "llvm/IR/Module.h"
25#include "llvm/Support/Debug.h"
26#include "llvm/Support/FormattedStream.h"
Daniel Berlinae6b8b62017-01-28 01:35:02 +000027#include <algorithm>
28
29#define DEBUG_TYPE "memoryssa"
30using namespace llvm;
George Burgess IV56169ed2017-04-21 04:54:52 +000031
Daniel Berlinae6b8b62017-01-28 01:35:02 +000032// This is the marker algorithm from "Simple and Efficient Construction of
33// Static Single Assignment Form"
34// The simple, non-marker algorithm places phi nodes at any join
35// Here, we place markers, and only place phi nodes if they end up necessary.
36// They are only necessary if they break a cycle (IE we recursively visit
37// ourselves again), or we discover, while getting the value of the operands,
38// that there are two or more definitions needing to be merged.
39// This still will leave non-minimal form in the case of irreducible control
40// flow, where phi nodes may be in cycles with themselves, but unnecessary.
Eli Friedman88e2bac2018-03-26 19:52:54 +000041MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(
42 BasicBlock *BB,
43 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
44 // First, do a cache lookup. Without this cache, certain CFG structures
45 // (like a series of if statements) take exponential time to visit.
46 auto Cached = CachedPreviousDef.find(BB);
47 if (Cached != CachedPreviousDef.end()) {
48 return Cached->second;
George Burgess IV45f263d2018-05-26 02:28:55 +000049 }
50
Alina Sbirlea67f0c5c2019-10-10 20:43:06 +000051 // If this method is called from an unreachable block, return LoE.
52 if (!MSSA->DT->isReachableFromEntry(BB))
53 return MSSA->getLiveOnEntryDef();
54
George Burgess IV45f263d2018-05-26 02:28:55 +000055 if (BasicBlock *Pred = BB->getSinglePredecessor()) {
Eli Friedman88e2bac2018-03-26 19:52:54 +000056 // Single predecessor case, just recurse, we can only have one definition.
57 MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
58 CachedPreviousDef.insert({BB, Result});
59 return Result;
George Burgess IV45f263d2018-05-26 02:28:55 +000060 }
61
62 if (VisitedBlocks.count(BB)) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +000063 // We hit our node again, meaning we had a cycle, we must insert a phi
64 // node to break it so we have an operand. The only case this will
65 // insert useless phis is if we have irreducible control flow.
Eli Friedman88e2bac2018-03-26 19:52:54 +000066 MemoryAccess *Result = MSSA->createMemoryPhi(BB);
67 CachedPreviousDef.insert({BB, Result});
68 return Result;
George Burgess IV45f263d2018-05-26 02:28:55 +000069 }
70
71 if (VisitedBlocks.insert(BB).second) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +000072 // Mark us visited so we can detect a cycle
Alexandros Lamprineasbf6009c2018-07-23 10:56:30 +000073 SmallVector<TrackingVH<MemoryAccess>, 8> PhiOps;
Daniel Berlinae6b8b62017-01-28 01:35:02 +000074
75 // Recurse to get the values in our predecessors for placement of a
76 // potential phi node. This will insert phi nodes if we cycle in order to
77 // break the cycle and have an operand.
Alina Sbirlea6720ed82019-09-25 23:24:39 +000078 bool UniqueIncomingAccess = true;
79 MemoryAccess *SingleAccess = nullptr;
80 for (auto *Pred : predecessors(BB)) {
81 if (MSSA->DT->isReachableFromEntry(Pred)) {
82 auto *IncomingAccess = getPreviousDefFromEnd(Pred, CachedPreviousDef);
83 if (!SingleAccess)
84 SingleAccess = IncomingAccess;
85 else if (IncomingAccess != SingleAccess)
86 UniqueIncomingAccess = false;
87 PhiOps.push_back(IncomingAccess);
88 } else
Alina Sbirlea0363c3b2019-05-02 23:41:58 +000089 PhiOps.push_back(MSSA->getLiveOnEntryDef());
Alina Sbirlea6720ed82019-09-25 23:24:39 +000090 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +000091
92 // Now try to simplify the ops to avoid placing a phi.
93 // This may return null if we never created a phi yet, that's okay
94 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB));
Daniel Berlinae6b8b62017-01-28 01:35:02 +000095
96 // See if we can avoid the phi by simplifying it.
97 auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
98 // If we couldn't simplify, we may have to create a phi
Alina Sbirlea6720ed82019-09-25 23:24:39 +000099 if (Result == Phi && UniqueIncomingAccess && SingleAccess)
100 Result = SingleAccess;
101 else if (Result == Phi && !(UniqueIncomingAccess && SingleAccess)) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000102 if (!Phi)
103 Phi = MSSA->createMemoryPhi(BB);
104
Alexandros Lamprineasbf6009c2018-07-23 10:56:30 +0000105 // See if the existing phi operands match what we need.
106 // Unlike normal SSA, we only allow one phi node per block, so we can't just
107 // create a new one.
108 if (Phi->getNumOperands() != 0) {
109 // FIXME: Figure out whether this is dead code and if so remove it.
110 if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) {
111 // These will have been filled in by the recursive read we did above.
Fangrui Song75709322018-11-17 01:44:25 +0000112 llvm::copy(PhiOps, Phi->op_begin());
Alexandros Lamprineasbf6009c2018-07-23 10:56:30 +0000113 std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin());
114 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000115 } else {
116 unsigned i = 0;
117 for (auto *Pred : predecessors(BB))
Alexandros Lamprineasbf6009c2018-07-23 10:56:30 +0000118 Phi->addIncoming(&*PhiOps[i++], Pred);
Daniel Berlin97f34e82017-09-27 05:35:19 +0000119 InsertedPHIs.push_back(Phi);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000120 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000121 Result = Phi;
122 }
Daniel Berlin97f34e82017-09-27 05:35:19 +0000123
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000124 // Set ourselves up for the next variable by resetting visited state.
125 VisitedBlocks.erase(BB);
Eli Friedman88e2bac2018-03-26 19:52:54 +0000126 CachedPreviousDef.insert({BB, Result});
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000127 return Result;
128 }
129 llvm_unreachable("Should have hit one of the three cases above");
130}
131
132// This starts at the memory access, and goes backwards in the block to find the
133// previous definition. If a definition is not found the block of the access,
134// it continues globally, creating phi nodes to ensure we have a single
135// definition.
136MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) {
Eli Friedman88e2bac2018-03-26 19:52:54 +0000137 if (auto *LocalResult = getPreviousDefInBlock(MA))
138 return LocalResult;
139 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
140 return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000141}
142
143// This starts at the memory access, and goes backwards in the block to the find
144// the previous definition. If the definition is not found in the block of the
145// access, it returns nullptr.
146MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) {
147 auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock());
148
149 // It's possible there are no defs, or we got handed the first def to start.
150 if (Defs) {
151 // If this is a def, we can just use the def iterators.
152 if (!isa<MemoryUse>(MA)) {
153 auto Iter = MA->getReverseDefsIterator();
154 ++Iter;
155 if (Iter != Defs->rend())
156 return &*Iter;
157 } else {
158 // Otherwise, have to walk the all access iterator.
Alina Sbirlea33e58722017-06-07 16:46:53 +0000159 auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend();
160 for (auto &U : make_range(++MA->getReverseIterator(), End))
161 if (!isa<MemoryUse>(U))
162 return cast<MemoryAccess>(&U);
163 // Note that if MA comes before Defs->begin(), we won't hit a def.
164 return nullptr;
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000165 }
166 }
167 return nullptr;
168}
169
170// This starts at the end of block
Eli Friedman88e2bac2018-03-26 19:52:54 +0000171MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(
172 BasicBlock *BB,
173 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000174 auto *Defs = MSSA->getWritableBlockDefs(BB);
175
Alina Sbirleaf9f073a2019-04-12 21:58:52 +0000176 if (Defs) {
177 CachedPreviousDef.insert({BB, &*Defs->rbegin()});
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000178 return &*Defs->rbegin();
Alina Sbirleaf9f073a2019-04-12 21:58:52 +0000179 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000180
Eli Friedman88e2bac2018-03-26 19:52:54 +0000181 return getPreviousDefRecursive(BB, CachedPreviousDef);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000182}
183// Recurse over a set of phi uses to eliminate the trivial ones
184MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) {
185 if (!Phi)
186 return nullptr;
187 TrackingVH<MemoryAccess> Res(Phi);
188 SmallVector<TrackingVH<Value>, 8> Uses;
189 std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses));
Alina Sbirlea28637212019-08-20 22:47:58 +0000190 for (auto &U : Uses)
191 if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U))
192 tryRemoveTrivialPhi(UsePhi);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000193 return Res;
194}
195
196// Eliminate trivial phis
197// Phis are trivial if they are defined either by themselves, or all the same
198// argument.
199// IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
200// We recursively try to remove them.
Alina Sbirlea28637212019-08-20 22:47:58 +0000201MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi) {
202 assert(Phi && "Can only remove concrete Phi.");
203 auto OperRange = Phi->operands();
204 return tryRemoveTrivialPhi(Phi, OperRange);
205}
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000206template <class RangeType>
207MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi,
208 RangeType &Operands) {
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +0000209 // Bail out on non-opt Phis.
210 if (NonOptPhis.count(Phi))
211 return Phi;
212
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000213 // Detect equal or self arguments
214 MemoryAccess *Same = nullptr;
215 for (auto &Op : Operands) {
216 // If the same or self, good so far
217 if (Op == Phi || Op == Same)
218 continue;
219 // not the same, return the phi since it's not eliminatable by us
220 if (Same)
221 return Phi;
Alexandros Lamprineasbf6009c2018-07-23 10:56:30 +0000222 Same = cast<MemoryAccess>(&*Op);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000223 }
224 // Never found a non-self reference, the phi is undef
225 if (Same == nullptr)
226 return MSSA->getLiveOnEntryDef();
227 if (Phi) {
228 Phi->replaceAllUsesWith(Same);
Daniel Berlin17e8d0e2017-02-22 22:19:55 +0000229 removeMemoryAccess(Phi);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000230 }
231
232 // We should only end up recursing in case we replaced something, in which
233 // case, we may have made other Phis trivial.
234 return recursePhi(Same);
235}
236
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +0000237void MemorySSAUpdater::insertUse(MemoryUse *MU, bool RenameUses) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000238 InsertedPHIs.clear();
239 MU->setDefiningAccess(getPreviousDef(MU));
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +0000240 // In cases without unreachable blocks, because uses do not create new
241 // may-defs, there are only two cases:
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000242 // 1. There was a def already below us, and therefore, we should not have
243 // created a phi node because it was already needed for the def.
244 //
245 // 2. There is no def below us, and therefore, there is no extra renaming work
246 // to do.
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +0000247
248 // In cases with unreachable blocks, where the unnecessary Phis were
249 // optimized out, adding the Use may re-insert those Phis. Hence, when
250 // inserting Uses outside of the MSSA creation process, and new Phis were
251 // added, rename all uses if we are asked.
252
253 if (!RenameUses && !InsertedPHIs.empty()) {
254 auto *Defs = MSSA->getBlockDefs(MU->getBlock());
255 (void)Defs;
256 assert((!Defs || (++Defs->begin() == Defs->end())) &&
257 "Block may have only a Phi or no defs");
258 }
259
260 if (RenameUses && InsertedPHIs.size()) {
261 SmallPtrSet<BasicBlock *, 16> Visited;
262 BasicBlock *StartBlock = MU->getBlock();
263
264 if (auto *Defs = MSSA->getWritableBlockDefs(StartBlock)) {
265 MemoryAccess *FirstDef = &*Defs->begin();
266 // Convert to incoming value if it's a memorydef. A phi *is* already an
267 // incoming value.
268 if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
269 FirstDef = MD->getDefiningAccess();
270
271 MSSA->renamePass(MU->getBlock(), FirstDef, Visited);
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +0000272 }
Alina Sbirlea228ffac2019-08-27 00:34:47 +0000273 // We just inserted a phi into this block, so the incoming value will
274 // become the phi anyway, so it does not matter what we pass.
275 for (auto &MP : InsertedPHIs)
276 if (MemoryPhi *Phi = cast_or_null<MemoryPhi>(MP))
277 MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +0000278 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000279}
280
Daniel Berlin9d8a3352017-01-30 11:35:39 +0000281// Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
George Burgess IV56169ed2017-04-21 04:54:52 +0000282static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB,
283 MemoryAccess *NewDef) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000284 // Replace any operand with us an incoming block with the new defining
285 // access.
286 int i = MP->getBasicBlockIndex(BB);
287 assert(i != -1 && "Should have found the basic block in the phi");
Daniel Berlin9d8a3352017-01-30 11:35:39 +0000288 // We can't just compare i against getNumOperands since one is signed and the
289 // other not. So use it to index into the block iterator.
290 for (auto BBIter = MP->block_begin() + i; BBIter != MP->block_end();
291 ++BBIter) {
292 if (*BBIter != BB)
293 break;
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000294 MP->setIncomingValue(i, NewDef);
295 ++i;
296 }
297}
298
299// A brief description of the algorithm:
300// First, we compute what should define the new def, using the SSA
301// construction algorithm.
302// Then, we update the defs below us (and any new phi nodes) in the graph to
303// point to the correct new defs, to ensure we only have one variable, and no
304// disconnected stores.
Daniel Berlin78cbd282017-02-20 22:26:03 +0000305void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000306 InsertedPHIs.clear();
307
308 // See if we had a local def, and if not, go hunting.
Eli Friedman88e2bac2018-03-26 19:52:54 +0000309 MemoryAccess *DefBefore = getPreviousDef(MD);
Alina Sbirleaae40dfc2019-10-01 18:34:39 +0000310 bool DefBeforeSameBlock = false;
311 if (DefBefore->getBlock() == MD->getBlock() &&
312 !(isa<MemoryPhi>(DefBefore) &&
313 std::find(InsertedPHIs.begin(), InsertedPHIs.end(), DefBefore) !=
314 InsertedPHIs.end()))
315 DefBeforeSameBlock = true;
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000316
317 // There is a def before us, which means we can replace any store/phi uses
318 // of that thing with us, since we are in the way of whatever was there
319 // before.
320 // We now define that def's memorydefs and memoryphis
Daniel Berlin9d8a3352017-01-30 11:35:39 +0000321 if (DefBeforeSameBlock) {
Roman Lebedev081e9902019-08-01 12:32:08 +0000322 DefBefore->replaceUsesWithIf(MD, [MD](Use &U) {
Alexandros Lamprineas96762b32018-09-11 14:29:59 +0000323 // Leave the MemoryUses alone.
324 // Also make sure we skip ourselves to avoid self references.
Roman Lebedev081e9902019-08-01 12:32:08 +0000325 User *Usr = U.getUser();
326 return !isa<MemoryUse>(Usr) && Usr != MD;
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000327 // Defs are automatically unoptimized when the user is set to MD below,
328 // because the isOptimized() call will fail to find the same ID.
Roman Lebedev081e9902019-08-01 12:32:08 +0000329 });
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000330 }
Daniel Berlin9d8a3352017-01-30 11:35:39 +0000331
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000332 // and that def is now our defining access.
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000333 MD->setDefiningAccess(DefBefore);
334
Alexandros Lamprineasf854ce82018-07-16 07:51:27 +0000335 SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end());
Alina Sbirlea2c5e6642019-09-23 23:50:16 +0000336
Alina Sbirlea6720ed82019-09-25 23:24:39 +0000337 // Remember the index where we may insert new phis.
338 unsigned NewPhiIndex = InsertedPHIs.size();
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000339 if (!DefBeforeSameBlock) {
340 // If there was a local def before us, we must have the same effect it
341 // did. Because every may-def is the same, any phis/etc we would create, it
342 // would also have created. If there was no local def before us, we
343 // performed a global update, and have to search all successors and make
344 // sure we update the first def in each of them (following all paths until
345 // we hit the first def along each path). This may also insert phi nodes.
346 // TODO: There are other cases we can skip this work, such as when we have a
347 // single successor, and only used a straight line of single pred blocks
348 // backwards to find the def. To make that work, we'd have to track whether
349 // getDefRecursive only ever used the single predecessor case. These types
350 // of paths also only exist in between CFG simplifications.
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000351
352 // If this is the first def in the block and this insert is in an arbitrary
353 // place, compute IDF and place phis.
Alina Sbirlea24ae5ce2019-10-02 18:42:33 +0000354 SmallPtrSet<BasicBlock *, 2> DefiningBlocks;
355
356 // If this is the last Def in the block, also compute IDF based on MD, since
357 // this may a new Def added, and we may need additional Phis.
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000358 auto Iter = MD->getDefsIterator();
359 ++Iter;
360 auto IterEnd = MSSA->getBlockDefs(MD->getBlock())->end();
Alina Sbirlea24ae5ce2019-10-02 18:42:33 +0000361 if (Iter == IterEnd)
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000362 DefiningBlocks.insert(MD->getBlock());
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000363
Alina Sbirlea24ae5ce2019-10-02 18:42:33 +0000364 for (const auto &VH : InsertedPHIs)
365 if (const auto *RealPHI = cast_or_null<MemoryPhi>(VH))
366 DefiningBlocks.insert(RealPHI->getBlock());
367 ForwardIDFCalculator IDFs(*MSSA->DT);
368 SmallVector<BasicBlock *, 32> IDFBlocks;
369 IDFs.setDefiningBlocks(DefiningBlocks);
370 IDFs.calculate(IDFBlocks);
371 SmallVector<AssertingVH<MemoryPhi>, 4> NewInsertedPHIs;
372 for (auto *BBIDF : IDFBlocks) {
373 auto *MPhi = MSSA->getMemoryAccess(BBIDF);
374 if (!MPhi) {
375 MPhi = MSSA->createMemoryPhi(BBIDF);
376 NewInsertedPHIs.push_back(MPhi);
377 }
378 // Add the phis created into the IDF blocks to NonOptPhis, so they are not
379 // optimized out as trivial by the call to getPreviousDefFromEnd below.
380 // Once they are complete, all these Phis are added to the FixupList, and
381 // removed from NonOptPhis inside fixupDefs(). Existing Phis in IDF may
382 // need fixing as well, and potentially be trivial before this insertion,
383 // hence add all IDF Phis. See PR43044.
384 NonOptPhis.insert(MPhi);
385 }
386 for (auto &MPhi : NewInsertedPHIs) {
387 auto *BBIDF = MPhi->getBlock();
388 for (auto *Pred : predecessors(BBIDF)) {
389 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
390 MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef), Pred);
Alina Sbirlea6720ed82019-09-25 23:24:39 +0000391 }
392 }
Alina Sbirlea24ae5ce2019-10-02 18:42:33 +0000393
394 // Re-take the index where we're adding the new phis, because the above call
395 // to getPreviousDefFromEnd, may have inserted into InsertedPHIs.
396 NewPhiIndex = InsertedPHIs.size();
397 for (auto &MPhi : NewInsertedPHIs) {
398 InsertedPHIs.push_back(&*MPhi);
399 FixupList.push_back(&*MPhi);
400 }
401
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000402 FixupList.push_back(MD);
403 }
404
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000405 // Remember the index where we stopped inserting new phis above, since the
406 // fixupDefs call in the loop below may insert more, that are already minimal.
407 unsigned NewPhiIndexEnd = InsertedPHIs.size();
408
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000409 while (!FixupList.empty()) {
410 unsigned StartingPHISize = InsertedPHIs.size();
411 fixupDefs(FixupList);
412 FixupList.clear();
413 // Put any new phis on the fixup list, and process them
Alexandros Lamprineasf854ce82018-07-16 07:51:27 +0000414 FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end());
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000415 }
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000416
417 // Optimize potentially non-minimal phis added in this method.
Alina Sbirlea151ab482019-05-02 23:12:49 +0000418 unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex;
419 if (NewPhiSize)
420 tryRemoveTrivialPhis(ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize));
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000421
Daniel Berlin78cbd282017-02-20 22:26:03 +0000422 // Now that all fixups are done, rename all uses if we are asked.
423 if (RenameUses) {
424 SmallPtrSet<BasicBlock *, 16> Visited;
425 BasicBlock *StartBlock = MD->getBlock();
426 // We are guaranteed there is a def in the block, because we just got it
427 // handed to us in this function.
428 MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin();
429 // Convert to incoming value if it's a memorydef. A phi *is* already an
430 // incoming value.
431 if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
432 FirstDef = MD->getDefiningAccess();
433
434 MSSA->renamePass(MD->getBlock(), FirstDef, Visited);
435 // We just inserted a phi into this block, so the incoming value will become
436 // the phi anyway, so it does not matter what we pass.
Alexandros Lamprineasf854ce82018-07-16 07:51:27 +0000437 for (auto &MP : InsertedPHIs) {
438 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
439 if (Phi)
440 MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
441 }
Daniel Berlin78cbd282017-02-20 22:26:03 +0000442 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000443}
444
Alexandros Lamprineasf854ce82018-07-16 07:51:27 +0000445void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000446 SmallPtrSet<const BasicBlock *, 8> Seen;
447 SmallVector<const BasicBlock *, 16> Worklist;
Alexandros Lamprineasf854ce82018-07-16 07:51:27 +0000448 for (auto &Var : Vars) {
449 MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var);
450 if (!NewDef)
451 continue;
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000452 // First, see if there is a local def after the operand.
453 auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock());
454 auto DefIter = NewDef->getDefsIterator();
455
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +0000456 // The temporary Phi is being fixed, unmark it for not to optimize.
George Burgess IVe7cdb7e2018-07-12 21:56:31 +0000457 if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef))
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +0000458 NonOptPhis.erase(Phi);
459
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000460 // If there is a local def after us, we only have to rename that.
461 if (++DefIter != Defs->end()) {
462 cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
463 continue;
464 }
465
466 // Otherwise, we need to search down through the CFG.
467 // For each of our successors, handle it directly if their is a phi, or
468 // place on the fixup worklist.
469 for (const auto *S : successors(NewDef->getBlock())) {
470 if (auto *MP = MSSA->getMemoryAccess(S))
471 setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef);
472 else
473 Worklist.push_back(S);
474 }
475
476 while (!Worklist.empty()) {
477 const BasicBlock *FixupBlock = Worklist.back();
478 Worklist.pop_back();
479
480 // Get the first def in the block that isn't a phi node.
481 if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) {
482 auto *FirstDef = &*Defs->begin();
483 // The loop above and below should have taken care of phi nodes
484 assert(!isa<MemoryPhi>(FirstDef) &&
485 "Should have already handled phi nodes!");
486 // We are now this def's defining access, make sure we actually dominate
487 // it
488 assert(MSSA->dominates(NewDef, FirstDef) &&
489 "Should have dominated the new access");
490
491 // This may insert new phi nodes, because we are not guaranteed the
492 // block we are processing has a single pred, and depending where the
493 // store was inserted, it may require phi nodes below it.
494 cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
495 return;
496 }
497 // We didn't find a def, so we must continue.
498 for (const auto *S : successors(FixupBlock)) {
499 // If there is a phi node, handle it.
500 // Otherwise, put the block on the worklist
501 if (auto *MP = MSSA->getMemoryAccess(S))
502 setMemoryPhiValueForBlock(MP, FixupBlock, NewDef);
503 else {
504 // If we cycle, we should have ended up at a phi node that we already
505 // processed. FIXME: Double check this
506 if (!Seen.insert(S).second)
507 continue;
508 Worklist.push_back(S);
509 }
510 }
511 }
512 }
513}
514
Alina Sbirlea79800992018-09-10 20:13:01 +0000515void MemorySSAUpdater::removeEdge(BasicBlock *From, BasicBlock *To) {
516 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
517 MPhi->unorderedDeleteIncomingBlock(From);
Alina Sbirlea28637212019-08-20 22:47:58 +0000518 tryRemoveTrivialPhi(MPhi);
Alina Sbirlea79800992018-09-10 20:13:01 +0000519 }
520}
521
Alina Sbirleaf31eba62019-05-08 17:05:36 +0000522void MemorySSAUpdater::removeDuplicatePhiEdgesBetween(const BasicBlock *From,
523 const BasicBlock *To) {
Alina Sbirlea79800992018-09-10 20:13:01 +0000524 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
525 bool Found = false;
526 MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) {
527 if (From != B)
528 return false;
529 if (Found)
530 return true;
531 Found = true;
532 return false;
533 });
Alina Sbirlea28637212019-08-20 22:47:58 +0000534 tryRemoveTrivialPhi(MPhi);
Alina Sbirlea79800992018-09-10 20:13:01 +0000535 }
536}
537
Alina Sbirlea4bc625c2019-07-30 20:10:33 +0000538static MemoryAccess *getNewDefiningAccessForClone(MemoryAccess *MA,
539 const ValueToValueMapTy &VMap,
540 PhiToDefMap &MPhiMap,
541 bool CloneWasSimplified,
542 MemorySSA *MSSA) {
543 MemoryAccess *InsnDefining = MA;
544 if (MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) {
545 if (!MSSA->isLiveOnEntryDef(DefMUD)) {
546 Instruction *DefMUDI = DefMUD->getMemoryInst();
547 assert(DefMUDI && "Found MemoryUseOrDef with no Instruction.");
548 if (Instruction *NewDefMUDI =
549 cast_or_null<Instruction>(VMap.lookup(DefMUDI))) {
550 InsnDefining = MSSA->getMemoryAccess(NewDefMUDI);
551 if (!CloneWasSimplified)
552 assert(InsnDefining && "Defining instruction cannot be nullptr.");
553 else if (!InsnDefining || isa<MemoryUse>(InsnDefining)) {
554 // The clone was simplified, it's no longer a MemoryDef, look up.
555 auto DefIt = DefMUD->getDefsIterator();
556 // Since simplified clones only occur in single block cloning, a
557 // previous definition must exist, otherwise NewDefMUDI would not
558 // have been found in VMap.
559 assert(DefIt != MSSA->getBlockDefs(DefMUD->getBlock())->begin() &&
560 "Previous def must exist");
561 InsnDefining = getNewDefiningAccessForClone(
562 &*(--DefIt), VMap, MPhiMap, CloneWasSimplified, MSSA);
563 }
564 }
565 }
566 } else {
567 MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining);
568 if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi))
569 InsnDefining = NewDefPhi;
570 }
571 assert(InsnDefining && "Defining instruction cannot be nullptr.");
572 return InsnDefining;
573}
574
Alina Sbirlea79800992018-09-10 20:13:01 +0000575void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
576 const ValueToValueMapTy &VMap,
Alina Sbirlea7a0098a2019-06-17 18:58:40 +0000577 PhiToDefMap &MPhiMap,
578 bool CloneWasSimplified) {
Alina Sbirlea79800992018-09-10 20:13:01 +0000579 const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
580 if (!Acc)
581 return;
582 for (const MemoryAccess &MA : *Acc) {
583 if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) {
584 Instruction *Insn = MUD->getMemoryInst();
585 // Entry does not exist if the clone of the block did not clone all
586 // instructions. This occurs in LoopRotate when cloning instructions
587 // from the old header to the old preheader. The cloned instruction may
588 // also be a simplified Value, not an Instruction (see LoopRotate).
Alina Sbirlea7a0098a2019-06-17 18:58:40 +0000589 // Also in LoopRotate, even when it's an instruction, due to it being
590 // simplified, it may be a Use rather than a Def, so we cannot use MUD as
591 // template. Calls coming from updateForClonedBlockIntoPred, ensure this.
Alina Sbirlea79800992018-09-10 20:13:01 +0000592 if (Instruction *NewInsn =
593 dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) {
594 MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess(
Alina Sbirlea4bc625c2019-07-30 20:10:33 +0000595 NewInsn,
596 getNewDefiningAccessForClone(MUD->getDefiningAccess(), VMap,
597 MPhiMap, CloneWasSimplified, MSSA),
598 /*Template=*/CloneWasSimplified ? nullptr : MUD,
599 /*CreationMustSucceed=*/CloneWasSimplified ? false : true);
600 if (NewUseOrDef)
601 MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End);
Alina Sbirlea79800992018-09-10 20:13:01 +0000602 }
603 }
604 }
605}
606
Alina Sbirleaf31eba62019-05-08 17:05:36 +0000607void MemorySSAUpdater::updatePhisWhenInsertingUniqueBackedgeBlock(
608 BasicBlock *Header, BasicBlock *Preheader, BasicBlock *BEBlock) {
609 auto *MPhi = MSSA->getMemoryAccess(Header);
610 if (!MPhi)
611 return;
612
613 // Create phi node in the backedge block and populate it with the same
614 // incoming values as MPhi. Skip incoming values coming from Preheader.
615 auto *NewMPhi = MSSA->createMemoryPhi(BEBlock);
616 bool HasUniqueIncomingValue = true;
617 MemoryAccess *UniqueValue = nullptr;
618 for (unsigned I = 0, E = MPhi->getNumIncomingValues(); I != E; ++I) {
619 BasicBlock *IBB = MPhi->getIncomingBlock(I);
620 MemoryAccess *IV = MPhi->getIncomingValue(I);
621 if (IBB != Preheader) {
622 NewMPhi->addIncoming(IV, IBB);
623 if (HasUniqueIncomingValue) {
624 if (!UniqueValue)
625 UniqueValue = IV;
626 else if (UniqueValue != IV)
627 HasUniqueIncomingValue = false;
628 }
629 }
630 }
631
632 // Update incoming edges into MPhi. Remove all but the incoming edge from
633 // Preheader. Add an edge from NewMPhi
634 auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader);
635 MPhi->setIncomingValue(0, AccFromPreheader);
636 MPhi->setIncomingBlock(0, Preheader);
637 for (unsigned I = MPhi->getNumIncomingValues() - 1; I >= 1; --I)
638 MPhi->unorderedDeleteIncoming(I);
639 MPhi->addIncoming(NewMPhi, BEBlock);
640
641 // If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be
642 // replaced with the unique value.
Alina Sbirleaae40dfc2019-10-01 18:34:39 +0000643 tryRemoveTrivialPhi(NewMPhi);
Alina Sbirleaf31eba62019-05-08 17:05:36 +0000644}
645
Alina Sbirlea79800992018-09-10 20:13:01 +0000646void MemorySSAUpdater::updateForClonedLoop(const LoopBlocksRPO &LoopBlocks,
647 ArrayRef<BasicBlock *> ExitBlocks,
648 const ValueToValueMapTy &VMap,
649 bool IgnoreIncomingWithNoClones) {
650 PhiToDefMap MPhiMap;
651
652 auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) {
653 assert(Phi && NewPhi && "Invalid Phi nodes.");
654 BasicBlock *NewPhiBB = NewPhi->getBlock();
655 SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB),
656 pred_end(NewPhiBB));
657 for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) {
658 MemoryAccess *IncomingAccess = Phi->getIncomingValue(It);
659 BasicBlock *IncBB = Phi->getIncomingBlock(It);
660
661 if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB)))
662 IncBB = NewIncBB;
663 else if (IgnoreIncomingWithNoClones)
664 continue;
665
666 // Now we have IncBB, and will need to add incoming from it to NewPhi.
667
668 // If IncBB is not a predecessor of NewPhiBB, then do not add it.
669 // NewPhiBB was cloned without that edge.
670 if (!NewPhiBBPreds.count(IncBB))
671 continue;
672
673 // Determine incoming value and add it as incoming from IncBB.
674 if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) {
675 if (!MSSA->isLiveOnEntryDef(IncMUD)) {
676 Instruction *IncI = IncMUD->getMemoryInst();
677 assert(IncI && "Found MemoryUseOrDef with no Instruction.");
678 if (Instruction *NewIncI =
679 cast_or_null<Instruction>(VMap.lookup(IncI))) {
680 IncMUD = MSSA->getMemoryAccess(NewIncI);
681 assert(IncMUD &&
682 "MemoryUseOrDef cannot be null, all preds processed.");
683 }
684 }
685 NewPhi->addIncoming(IncMUD, IncBB);
686 } else {
687 MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess);
688 if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi))
689 NewPhi->addIncoming(NewDefPhi, IncBB);
690 else
691 NewPhi->addIncoming(IncPhi, IncBB);
692 }
693 }
694 };
695
696 auto ProcessBlock = [&](BasicBlock *BB) {
697 BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB));
698 if (!NewBlock)
699 return;
700
701 assert(!MSSA->getWritableBlockAccesses(NewBlock) &&
702 "Cloned block should have no accesses");
703
704 // Add MemoryPhi.
705 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) {
706 MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock);
707 MPhiMap[MPhi] = NewPhi;
708 }
709 // Update Uses and Defs.
710 cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap);
711 };
712
713 for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
714 ProcessBlock(BB);
715
716 for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
717 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
718 if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi))
719 FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi));
720}
721
722void MemorySSAUpdater::updateForClonedBlockIntoPred(
723 BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) {
724 // All defs/phis from outside BB that are used in BB, are valid uses in P1.
725 // Since those defs/phis must have dominated BB, and also dominate P1.
726 // Defs from BB being used in BB will be replaced with the cloned defs from
727 // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the
728 // incoming def into the Phi from P1.
Alina Sbirlea7a0098a2019-06-17 18:58:40 +0000729 // Instructions cloned into the predecessor are in practice sometimes
730 // simplified, so disable the use of the template, and create an access from
731 // scratch.
Alina Sbirlea79800992018-09-10 20:13:01 +0000732 PhiToDefMap MPhiMap;
733 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
734 MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1);
Alina Sbirlea7a0098a2019-06-17 18:58:40 +0000735 cloneUsesAndDefs(BB, P1, VM, MPhiMap, /*CloneWasSimplified=*/true);
Alina Sbirlea79800992018-09-10 20:13:01 +0000736}
737
738template <typename Iter>
739void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
740 ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd,
741 DominatorTree &DT) {
742 SmallVector<CFGUpdate, 4> Updates;
743 // Update/insert phis in all successors of exit blocks.
744 for (auto *Exit : ExitBlocks)
745 for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd))
746 if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) {
747 BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0);
748 Updates.push_back({DT.Insert, NewExit, ExitSucc});
749 }
750 applyInsertUpdates(Updates, DT);
751}
752
753void MemorySSAUpdater::updateExitBlocksForClonedLoop(
754 ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap,
755 DominatorTree &DT) {
756 const ValueToValueMapTy *const Arr[] = {&VMap};
757 privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr),
758 std::end(Arr), DT);
759}
760
761void MemorySSAUpdater::updateExitBlocksForClonedLoop(
762 ArrayRef<BasicBlock *> ExitBlocks,
763 ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) {
764 auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) {
765 return I.get();
766 };
767 using MappedIteratorType =
768 mapped_iterator<const std::unique_ptr<ValueToValueMapTy> *,
769 decltype(GetPtr)>;
770 auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
771 auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
772 privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT);
773}
774
775void MemorySSAUpdater::applyUpdates(ArrayRef<CFGUpdate> Updates,
776 DominatorTree &DT) {
777 SmallVector<CFGUpdate, 4> RevDeleteUpdates;
778 SmallVector<CFGUpdate, 4> InsertUpdates;
779 for (auto &Update : Updates) {
780 if (Update.getKind() == DT.Insert)
781 InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
782 else
783 RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
784 }
785
786 if (!RevDeleteUpdates.empty()) {
787 // Update for inserted edges: use newDT and snapshot CFG as if deletes had
Hiroshi Inoue02a2bb22019-02-05 08:30:48 +0000788 // not occurred.
Alina Sbirlea79800992018-09-10 20:13:01 +0000789 // FIXME: This creates a new DT, so it's more expensive to do mix
790 // delete/inserts vs just inserts. We can do an incremental update on the DT
791 // to revert deletes, than re-delete the edges. Teaching DT to do this, is
792 // part of a pending cleanup.
793 DominatorTree NewDT(DT, RevDeleteUpdates);
794 GraphDiff<BasicBlock *> GD(RevDeleteUpdates);
795 applyInsertUpdates(InsertUpdates, NewDT, &GD);
796 } else {
797 GraphDiff<BasicBlock *> GD;
798 applyInsertUpdates(InsertUpdates, DT, &GD);
799 }
800
801 // Update for deleted edges
802 for (auto &Update : RevDeleteUpdates)
803 removeEdge(Update.getFrom(), Update.getTo());
804}
805
806void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
807 DominatorTree &DT) {
808 GraphDiff<BasicBlock *> GD;
809 applyInsertUpdates(Updates, DT, &GD);
810}
811
812void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
813 DominatorTree &DT,
814 const GraphDiff<BasicBlock *> *GD) {
815 // Get recursive last Def, assuming well formed MSSA and updated DT.
816 auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * {
817 while (true) {
818 MemorySSA::DefsList *Defs = MSSA->getWritableBlockDefs(BB);
819 // Return last Def or Phi in BB, if it exists.
820 if (Defs)
821 return &*(--Defs->end());
822
823 // Check number of predecessors, we only care if there's more than one.
824 unsigned Count = 0;
825 BasicBlock *Pred = nullptr;
826 for (auto &Pair : children<GraphDiffInvBBPair>({GD, BB})) {
827 Pred = Pair.second;
828 Count++;
829 if (Count == 2)
830 break;
831 }
832
833 // If BB has multiple predecessors, get last definition from IDom.
834 if (Count != 1) {
835 // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its
836 // DT is invalidated. Return LoE as its last def. This will be added to
837 // MemoryPhi node, and later deleted when the block is deleted.
838 if (!DT.getNode(BB))
839 return MSSA->getLiveOnEntryDef();
840 if (auto *IDom = DT.getNode(BB)->getIDom())
841 if (IDom->getBlock() != BB) {
842 BB = IDom->getBlock();
843 continue;
844 }
845 return MSSA->getLiveOnEntryDef();
846 } else {
847 // Single predecessor, BB cannot be dead. GetLastDef of Pred.
848 assert(Count == 1 && Pred && "Single predecessor expected.");
Alina Sbirlea890090f2019-10-01 19:09:50 +0000849 // BB can be unreachable though, return LoE if that is the case.
850 if (!DT.getNode(BB))
851 return MSSA->getLiveOnEntryDef();
Alina Sbirlea79800992018-09-10 20:13:01 +0000852 BB = Pred;
853 }
854 };
855 llvm_unreachable("Unable to get last definition.");
856 };
857
858 // Get nearest IDom given a set of blocks.
859 // TODO: this can be optimized by starting the search at the node with the
860 // lowest level (highest in the tree).
861 auto FindNearestCommonDominator =
862 [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * {
863 BasicBlock *PrevIDom = *BBSet.begin();
864 for (auto *BB : BBSet)
865 PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB);
866 return PrevIDom;
867 };
868
869 // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not
870 // include CurrIDom.
871 auto GetNoLongerDomBlocks =
872 [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom,
873 SmallVectorImpl<BasicBlock *> &BlocksPrevDom) {
874 if (PrevIDom == CurrIDom)
875 return;
876 BlocksPrevDom.push_back(PrevIDom);
877 BasicBlock *NextIDom = PrevIDom;
878 while (BasicBlock *UpIDom =
879 DT.getNode(NextIDom)->getIDom()->getBlock()) {
880 if (UpIDom == CurrIDom)
881 break;
882 BlocksPrevDom.push_back(UpIDom);
883 NextIDom = UpIDom;
884 }
885 };
886
887 // Map a BB to its predecessors: added + previously existing. To get a
888 // deterministic order, store predecessors as SetVectors. The order in each
Hiroshi Inoue02a2bb22019-02-05 08:30:48 +0000889 // will be defined by the order in Updates (fixed) and the order given by
Alina Sbirlea79800992018-09-10 20:13:01 +0000890 // children<> (also fixed). Since we further iterate over these ordered sets,
891 // we lose the information of multiple edges possibly existing between two
892 // blocks, so we'll keep and EdgeCount map for that.
893 // An alternate implementation could keep unordered set for the predecessors,
894 // traverse either Updates or children<> each time to get the deterministic
895 // order, and drop the usage of EdgeCount. This alternate approach would still
896 // require querying the maps for each predecessor, and children<> call has
897 // additional computation inside for creating the snapshot-graph predecessors.
898 // As such, we favor using a little additional storage and less compute time.
899 // This decision can be revisited if we find the alternative more favorable.
900
901 struct PredInfo {
902 SmallSetVector<BasicBlock *, 2> Added;
903 SmallSetVector<BasicBlock *, 2> Prev;
904 };
905 SmallDenseMap<BasicBlock *, PredInfo> PredMap;
906
907 for (auto &Edge : Updates) {
908 BasicBlock *BB = Edge.getTo();
909 auto &AddedBlockSet = PredMap[BB].Added;
910 AddedBlockSet.insert(Edge.getFrom());
911 }
912
913 // Store all existing predecessor for each BB, at least one must exist.
914 SmallDenseMap<std::pair<BasicBlock *, BasicBlock *>, int> EdgeCountMap;
915 SmallPtrSet<BasicBlock *, 2> NewBlocks;
916 for (auto &BBPredPair : PredMap) {
917 auto *BB = BBPredPair.first;
918 const auto &AddedBlockSet = BBPredPair.second.Added;
919 auto &PrevBlockSet = BBPredPair.second.Prev;
920 for (auto &Pair : children<GraphDiffInvBBPair>({GD, BB})) {
921 BasicBlock *Pi = Pair.second;
922 if (!AddedBlockSet.count(Pi))
923 PrevBlockSet.insert(Pi);
924 EdgeCountMap[{Pi, BB}]++;
925 }
926
927 if (PrevBlockSet.empty()) {
928 assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added.");
929 LLVM_DEBUG(
930 dbgs()
931 << "Adding a predecessor to a block with no predecessors. "
932 "This must be an edge added to a new, likely cloned, block. "
933 "Its memory accesses must be already correct, assuming completed "
934 "via the updateExitBlocksForClonedLoop API. "
935 "Assert a single such edge is added so no phi addition or "
936 "additional processing is required.\n");
937 assert(AddedBlockSet.size() == 1 &&
938 "Can only handle adding one predecessor to a new block.");
939 // Need to remove new blocks from PredMap. Remove below to not invalidate
940 // iterator here.
941 NewBlocks.insert(BB);
942 }
943 }
944 // Nothing to process for new/cloned blocks.
945 for (auto *BB : NewBlocks)
946 PredMap.erase(BB);
947
Alina Sbirlea79800992018-09-10 20:13:01 +0000948 SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace;
Alina Sbirleacb4ed8a2019-06-11 19:09:34 +0000949 SmallVector<WeakVH, 8> InsertedPhis;
Alina Sbirlea79800992018-09-10 20:13:01 +0000950
951 // First create MemoryPhis in all blocks that don't have one. Create in the
952 // order found in Updates, not in PredMap, to get deterministic numbering.
953 for (auto &Edge : Updates) {
954 BasicBlock *BB = Edge.getTo();
955 if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB))
Alina Sbirleacb4ed8a2019-06-11 19:09:34 +0000956 InsertedPhis.push_back(MSSA->createMemoryPhi(BB));
Alina Sbirlea79800992018-09-10 20:13:01 +0000957 }
958
959 // Now we'll fill in the MemoryPhis with the right incoming values.
960 for (auto &BBPredPair : PredMap) {
961 auto *BB = BBPredPair.first;
962 const auto &PrevBlockSet = BBPredPair.second.Prev;
963 const auto &AddedBlockSet = BBPredPair.second.Added;
964 assert(!PrevBlockSet.empty() &&
965 "At least one previous predecessor must exist.");
966
967 // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by
968 // keeping this map before the loop. We can reuse already populated entries
969 // if an edge is added from the same predecessor to two different blocks,
970 // and this does happen in rotate. Note that the map needs to be updated
971 // when deleting non-necessary phis below, if the phi is in the map by
972 // replacing the value with DefP1.
973 SmallDenseMap<BasicBlock *, MemoryAccess *> LastDefAddedPred;
974 for (auto *AddedPred : AddedBlockSet) {
975 auto *DefPn = GetLastDef(AddedPred);
976 assert(DefPn != nullptr && "Unable to find last definition.");
977 LastDefAddedPred[AddedPred] = DefPn;
978 }
979
980 MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB);
981 // If Phi is not empty, add an incoming edge from each added pred. Must
982 // still compute blocks with defs to replace for this block below.
983 if (NewPhi->getNumOperands()) {
984 for (auto *Pred : AddedBlockSet) {
985 auto *LastDefForPred = LastDefAddedPred[Pred];
986 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
987 NewPhi->addIncoming(LastDefForPred, Pred);
988 }
989 } else {
990 // Pick any existing predecessor and get its definition. All other
991 // existing predecessors should have the same one, since no phi existed.
992 auto *P1 = *PrevBlockSet.begin();
993 MemoryAccess *DefP1 = GetLastDef(P1);
994
995 // Check DefP1 against all Defs in LastDefPredPair. If all the same,
996 // nothing to add.
997 bool InsertPhi = false;
998 for (auto LastDefPredPair : LastDefAddedPred)
999 if (DefP1 != LastDefPredPair.second) {
1000 InsertPhi = true;
1001 break;
1002 }
1003 if (!InsertPhi) {
1004 // Since NewPhi may be used in other newly added Phis, replace all uses
1005 // of NewPhi with the definition coming from all predecessors (DefP1),
1006 // before deleting it.
1007 NewPhi->replaceAllUsesWith(DefP1);
1008 removeMemoryAccess(NewPhi);
1009 continue;
1010 }
1011
1012 // Update Phi with new values for new predecessors and old value for all
1013 // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered
1014 // sets, the order of entries in NewPhi is deterministic.
1015 for (auto *Pred : AddedBlockSet) {
1016 auto *LastDefForPred = LastDefAddedPred[Pred];
1017 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1018 NewPhi->addIncoming(LastDefForPred, Pred);
1019 }
1020 for (auto *Pred : PrevBlockSet)
1021 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1022 NewPhi->addIncoming(DefP1, Pred);
Alina Sbirlea79800992018-09-10 20:13:01 +00001023 }
1024
1025 // Get all blocks that used to dominate BB and no longer do after adding
1026 // AddedBlockSet, where PrevBlockSet are the previously known predecessors.
1027 assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom");
1028 BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
1029 assert(PrevIDom && "Previous IDom should exists");
1030 BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock();
1031 assert(NewIDom && "BB should have a new valid idom");
1032 assert(DT.dominates(NewIDom, PrevIDom) &&
1033 "New idom should dominate old idom");
1034 GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
1035 }
1036
Alina Sbirlea109d2ea2019-06-19 21:33:09 +00001037 tryRemoveTrivialPhis(InsertedPhis);
1038 // Create the set of blocks that now have a definition. We'll use this to
1039 // compute IDF and add Phis there next.
1040 SmallVector<BasicBlock *, 8> BlocksToProcess;
1041 for (auto &VH : InsertedPhis)
1042 if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
1043 BlocksToProcess.push_back(MPhi->getBlock());
1044
Alina Sbirlea79800992018-09-10 20:13:01 +00001045 // Compute IDF and add Phis in all IDF blocks that do not have one.
1046 SmallVector<BasicBlock *, 32> IDFBlocks;
1047 if (!BlocksToProcess.empty()) {
Alina Sbirlea238b8e62019-06-19 21:17:31 +00001048 ForwardIDFCalculator IDFs(DT, GD);
Alina Sbirlea79800992018-09-10 20:13:01 +00001049 SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(),
1050 BlocksToProcess.end());
1051 IDFs.setDefiningBlocks(DefiningBlocks);
1052 IDFs.calculate(IDFBlocks);
Alina Sbirlea05f77802019-06-17 18:16:53 +00001053
1054 SmallSetVector<MemoryPhi *, 4> PhisToFill;
1055 // First create all needed Phis.
1056 for (auto *BBIDF : IDFBlocks)
1057 if (!MSSA->getMemoryAccess(BBIDF)) {
1058 auto *IDFPhi = MSSA->createMemoryPhi(BBIDF);
1059 InsertedPhis.push_back(IDFPhi);
1060 PhisToFill.insert(IDFPhi);
1061 }
1062 // Then update or insert their correct incoming values.
Alina Sbirlea79800992018-09-10 20:13:01 +00001063 for (auto *BBIDF : IDFBlocks) {
Alina Sbirlea05f77802019-06-17 18:16:53 +00001064 auto *IDFPhi = MSSA->getMemoryAccess(BBIDF);
1065 assert(IDFPhi && "Phi must exist");
1066 if (!PhisToFill.count(IDFPhi)) {
Alina Sbirlea79800992018-09-10 20:13:01 +00001067 // Update existing Phi.
1068 // FIXME: some updates may be redundant, try to optimize and skip some.
1069 for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I)
1070 IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I)));
1071 } else {
Alina Sbirlea79800992018-09-10 20:13:01 +00001072 for (auto &Pair : children<GraphDiffInvBBPair>({GD, BBIDF})) {
1073 BasicBlock *Pi = Pair.second;
1074 IDFPhi->addIncoming(GetLastDef(Pi), Pi);
1075 }
1076 }
1077 }
1078 }
1079
1080 // Now for all defs in BlocksWithDefsToReplace, if there are uses they no
1081 // longer dominate, replace those with the closest dominating def.
1082 // This will also update optimized accesses, as they're also uses.
1083 for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
1084 if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) {
1085 for (auto &DefToReplaceUses : *DefsList) {
1086 BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
1087 Value::use_iterator UI = DefToReplaceUses.use_begin(),
1088 E = DefToReplaceUses.use_end();
1089 for (; UI != E;) {
1090 Use &U = *UI;
1091 ++UI;
Simon Pilgrimb635964a2019-10-02 13:09:12 +00001092 MemoryAccess *Usr = cast<MemoryAccess>(U.getUser());
Alina Sbirlea79800992018-09-10 20:13:01 +00001093 if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) {
1094 BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
1095 if (!DT.dominates(DominatingBlock, DominatedBlock))
1096 U.set(GetLastDef(DominatedBlock));
1097 } else {
1098 BasicBlock *DominatedBlock = Usr->getBlock();
1099 if (!DT.dominates(DominatingBlock, DominatedBlock)) {
1100 if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock))
1101 U.set(DomBlPhi);
1102 else {
1103 auto *IDom = DT.getNode(DominatedBlock)->getIDom();
1104 assert(IDom && "Block must have a valid IDom.");
1105 U.set(GetLastDef(IDom->getBlock()));
1106 }
1107 cast<MemoryUseOrDef>(Usr)->resetOptimized();
1108 }
1109 }
1110 }
1111 }
1112 }
1113 }
Alina Sbirleacb4ed8a2019-06-11 19:09:34 +00001114 tryRemoveTrivialPhis(InsertedPhis);
Alina Sbirlea79800992018-09-10 20:13:01 +00001115}
1116
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001117// Move What before Where in the MemorySSA IR.
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001118template <class WhereType>
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001119void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001120 WhereType Where) {
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +00001121 // Mark MemoryPhi users of What not to be optimized.
1122 for (auto *U : What->users())
George Burgess IVe7cdb7e2018-07-12 21:56:31 +00001123 if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U))
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +00001124 NonOptPhis.insert(PhiUser);
1125
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001126 // Replace all our users with our defining access.
1127 What->replaceAllUsesWith(What->getDefiningAccess());
1128
1129 // Let MemorySSA take care of moving it around in the lists.
1130 MSSA->moveTo(What, BB, Where);
1131
1132 // Now reinsert it into the IR and do whatever fixups needed.
1133 if (auto *MD = dyn_cast<MemoryDef>(What))
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +00001134 insertDef(MD, /*RenameUses=*/true);
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001135 else
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +00001136 insertUse(cast<MemoryUse>(What), /*RenameUses=*/true);
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +00001137
1138 // Clear dangling pointers. We added all MemoryPhi users, but not all
1139 // of them are removed by fixupDefs().
1140 NonOptPhis.clear();
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001141}
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001142
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001143// Move What before Where in the MemorySSA IR.
1144void MemorySSAUpdater::moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
1145 moveTo(What, Where->getBlock(), Where->getIterator());
1146}
1147
1148// Move What after Where in the MemorySSA IR.
1149void MemorySSAUpdater::moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
1150 moveTo(What, Where->getBlock(), ++Where->getIterator());
1151}
1152
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001153void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
1154 MemorySSA::InsertionPlace Where) {
1155 return moveTo(What, BB, Where);
1156}
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001157
Alina Sbirlea0f533552018-07-11 22:11:46 +00001158// All accesses in To used to be in From. Move to end and update access lists.
1159void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To,
1160 Instruction *Start) {
1161
1162 MemorySSA::AccessList *Accs = MSSA->getWritableBlockAccesses(From);
1163 if (!Accs)
1164 return;
1165
Alina Sbirlea7faa14a2019-10-09 15:54:24 +00001166 assert(Start->getParent() == To && "Incorrect Start instruction");
Alina Sbirlea0f533552018-07-11 22:11:46 +00001167 MemoryAccess *FirstInNew = nullptr;
1168 for (Instruction &I : make_range(Start->getIterator(), To->end()))
1169 if ((FirstInNew = MSSA->getMemoryAccess(&I)))
1170 break;
Alina Sbirlea7faa14a2019-10-09 15:54:24 +00001171 if (FirstInNew) {
1172 auto *MUD = cast<MemoryUseOrDef>(FirstInNew);
1173 do {
1174 auto NextIt = ++MUD->getIterator();
1175 MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end())
1176 ? nullptr
1177 : cast<MemoryUseOrDef>(&*NextIt);
1178 MSSA->moveTo(MUD, To, MemorySSA::End);
1179 // Moving MUD from Accs in the moveTo above, may delete Accs, so we need
1180 // to retrieve it again.
1181 Accs = MSSA->getWritableBlockAccesses(From);
1182 MUD = NextMUD;
1183 } while (MUD);
1184 }
Alina Sbirlea0f533552018-07-11 22:11:46 +00001185
Alina Sbirlea7faa14a2019-10-09 15:54:24 +00001186 // If all accesses were moved and only a trivial Phi remains, we try to remove
1187 // that Phi. This is needed when From is going to be deleted.
1188 auto *Defs = MSSA->getWritableBlockDefs(From);
1189 if (Defs && !Defs->empty())
1190 if (auto *Phi = dyn_cast<MemoryPhi>(&*Defs->begin()))
1191 tryRemoveTrivialPhi(Phi);
Alina Sbirlea0f533552018-07-11 22:11:46 +00001192}
1193
1194void MemorySSAUpdater::moveAllAfterSpliceBlocks(BasicBlock *From,
1195 BasicBlock *To,
1196 Instruction *Start) {
1197 assert(MSSA->getBlockAccesses(To) == nullptr &&
1198 "To block is expected to be free of MemoryAccesses.");
1199 moveAllAccesses(From, To, Start);
1200 for (BasicBlock *Succ : successors(To))
1201 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1202 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1203}
1204
1205void MemorySSAUpdater::moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To,
1206 Instruction *Start) {
Alina Sbirlea7faa14a2019-10-09 15:54:24 +00001207 assert(From->getUniquePredecessor() == To &&
Alina Sbirlea0f533552018-07-11 22:11:46 +00001208 "From block is expected to have a single predecessor (To).");
1209 moveAllAccesses(From, To, Start);
1210 for (BasicBlock *Succ : successors(From))
1211 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1212 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1213}
1214
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00001215/// If all arguments of a MemoryPHI are defined by the same incoming
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001216/// argument, return that argument.
1217static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
1218 MemoryAccess *MA = nullptr;
1219
1220 for (auto &Arg : MP->operands()) {
1221 if (!MA)
1222 MA = cast<MemoryAccess>(Arg);
1223 else if (MA != Arg)
1224 return nullptr;
1225 }
1226 return MA;
1227}
George Burgess IV56169ed2017-04-21 04:54:52 +00001228
Alina Sbirlea20c29622018-07-20 17:13:05 +00001229void MemorySSAUpdater::wireOldPredecessorsToNewImmediatePredecessor(
Alina Sbirleaf98c2c52018-09-07 21:14:48 +00001230 BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
1231 bool IdenticalEdgesWereMerged) {
Alina Sbirlea20c29622018-07-20 17:13:05 +00001232 assert(!MSSA->getWritableBlockAccesses(New) &&
1233 "Access list should be null for a new block.");
1234 MemoryPhi *Phi = MSSA->getMemoryAccess(Old);
1235 if (!Phi)
1236 return;
Vedant Kumar4de31bb2018-11-19 19:54:27 +00001237 if (Old->hasNPredecessors(1)) {
Alina Sbirlea20c29622018-07-20 17:13:05 +00001238 assert(pred_size(New) == Preds.size() &&
1239 "Should have moved all predecessors.");
1240 MSSA->moveTo(Phi, New, MemorySSA::Beginning);
1241 } else {
1242 assert(!Preds.empty() && "Must be moving at least one predecessor to the "
1243 "new immediate predecessor.");
1244 MemoryPhi *NewPhi = MSSA->createMemoryPhi(New);
1245 SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end());
Alina Sbirleaf98c2c52018-09-07 21:14:48 +00001246 // Currently only support the case of removing a single incoming edge when
1247 // identical edges were not merged.
1248 if (!IdenticalEdgesWereMerged)
1249 assert(PredsSet.size() == Preds.size() &&
1250 "If identical edges were not merged, we cannot have duplicate "
1251 "blocks in the predecessors");
Alina Sbirlea20c29622018-07-20 17:13:05 +00001252 Phi->unorderedDeleteIncomingIf([&](MemoryAccess *MA, BasicBlock *B) {
1253 if (PredsSet.count(B)) {
1254 NewPhi->addIncoming(MA, B);
Alina Sbirleaf98c2c52018-09-07 21:14:48 +00001255 if (!IdenticalEdgesWereMerged)
1256 PredsSet.erase(B);
Alina Sbirlea20c29622018-07-20 17:13:05 +00001257 return true;
1258 }
1259 return false;
1260 });
1261 Phi->addIncoming(NewPhi, New);
Alina Sbirlea28637212019-08-20 22:47:58 +00001262 tryRemoveTrivialPhi(NewPhi);
Alina Sbirlea20c29622018-07-20 17:13:05 +00001263 }
1264}
1265
Alina Sbirlea240a90a2019-01-31 20:13:47 +00001266void MemorySSAUpdater::removeMemoryAccess(MemoryAccess *MA, bool OptimizePhis) {
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001267 assert(!MSSA->isLiveOnEntryDef(MA) &&
1268 "Trying to remove the live on entry def");
1269 // We can only delete phi nodes if they have no uses, or we can replace all
1270 // uses with a single definition.
1271 MemoryAccess *NewDefTarget = nullptr;
1272 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
1273 // Note that it is sufficient to know that all edges of the phi node have
1274 // the same argument. If they do, by the definition of dominance frontiers
1275 // (which we used to place this phi), that argument must dominate this phi,
1276 // and thus, must dominate the phi's uses, and so we will not hit the assert
1277 // below.
1278 NewDefTarget = onlySingleValue(MP);
1279 assert((NewDefTarget || MP->use_empty()) &&
1280 "We can't delete this memory phi");
1281 } else {
1282 NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
1283 }
1284
Alina Sbirlea240a90a2019-01-31 20:13:47 +00001285 SmallSetVector<MemoryPhi *, 4> PhisToCheck;
1286
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001287 // Re-point the uses at our defining access
1288 if (!isa<MemoryUse>(MA) && !MA->use_empty()) {
1289 // Reset optimized on users of this store, and reset the uses.
1290 // A few notes:
1291 // 1. This is a slightly modified version of RAUW to avoid walking the
1292 // uses twice here.
1293 // 2. If we wanted to be complete, we would have to reset the optimized
1294 // flags on users of phi nodes if doing the below makes a phi node have all
1295 // the same arguments. Instead, we prefer users to removeMemoryAccess those
1296 // phi nodes, because doing it here would be N^3.
1297 if (MA->hasValueHandle())
1298 ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget);
1299 // Note: We assume MemorySSA is not used in metadata since it's not really
1300 // part of the IR.
1301
1302 while (!MA->use_empty()) {
1303 Use &U = *MA->use_begin();
Daniel Berline33bc312017-04-04 23:43:10 +00001304 if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
1305 MUD->resetOptimized();
Alina Sbirlea240a90a2019-01-31 20:13:47 +00001306 if (OptimizePhis)
1307 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser()))
1308 PhisToCheck.insert(MP);
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001309 U.set(NewDefTarget);
1310 }
1311 }
1312
1313 // The call below to erase will destroy MA, so we can't change the order we
1314 // are doing things here
1315 MSSA->removeFromLookups(MA);
1316 MSSA->removeFromLists(MA);
Alina Sbirlea240a90a2019-01-31 20:13:47 +00001317
1318 // Optionally optimize Phi uses. This will recursively remove trivial phis.
1319 if (!PhisToCheck.empty()) {
1320 SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(),
1321 PhisToCheck.end()};
1322 PhisToCheck.clear();
1323
1324 unsigned PhisSize = PhisToOptimize.size();
1325 while (PhisSize-- > 0)
1326 if (MemoryPhi *MP =
Alina Sbirlea28637212019-08-20 22:47:58 +00001327 cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val()))
1328 tryRemoveTrivialPhi(MP);
Alina Sbirlea240a90a2019-01-31 20:13:47 +00001329 }
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001330}
1331
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001332void MemorySSAUpdater::removeBlocks(
Alina Sbirleadb101862019-07-12 22:30:30 +00001333 const SmallSetVector<BasicBlock *, 8> &DeadBlocks) {
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001334 // First delete all uses of BB in MemoryPhis.
1335 for (BasicBlock *BB : DeadBlocks) {
Chandler Carruthedb12a82018-10-15 10:04:59 +00001336 Instruction *TI = BB->getTerminator();
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001337 assert(TI && "Basic block expected to have a terminator instruction");
Chandler Carruth96fc1de2018-08-26 08:41:15 +00001338 for (BasicBlock *Succ : successors(TI))
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001339 if (!DeadBlocks.count(Succ))
1340 if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) {
1341 MP->unorderedDeleteIncomingBlock(BB);
Alina Sbirlea28637212019-08-20 22:47:58 +00001342 tryRemoveTrivialPhi(MP);
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001343 }
1344 // Drop all references of all accesses in BB
1345 if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB))
1346 for (MemoryAccess &MA : *Acc)
1347 MA.dropAllReferences();
1348 }
1349
1350 // Next, delete all memory accesses in each block
1351 for (BasicBlock *BB : DeadBlocks) {
1352 MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB);
1353 if (!Acc)
1354 continue;
1355 for (auto AB = Acc->begin(), AE = Acc->end(); AB != AE;) {
1356 MemoryAccess *MA = &*AB;
1357 ++AB;
1358 MSSA->removeFromLookups(MA);
1359 MSSA->removeFromLists(MA);
1360 }
1361 }
1362}
1363
Alina Sbirlea151ab482019-05-02 23:12:49 +00001364void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs) {
1365 for (auto &VH : UpdatedPHIs)
Alina Sbirlea28637212019-08-20 22:47:58 +00001366 if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
1367 tryRemoveTrivialPhi(MPhi);
Alina Sbirlea151ab482019-05-02 23:12:49 +00001368}
1369
Alina Sbirleaf31eba62019-05-08 17:05:36 +00001370void MemorySSAUpdater::changeToUnreachable(const Instruction *I) {
1371 const BasicBlock *BB = I->getParent();
1372 // Remove memory accesses in BB for I and all following instructions.
1373 auto BBI = I->getIterator(), BBE = BB->end();
1374 // FIXME: If this becomes too expensive, iterate until the first instruction
1375 // with a memory access, then iterate over MemoryAccesses.
1376 while (BBI != BBE)
1377 removeMemoryAccess(&*(BBI++));
1378 // Update phis in BB's successors to remove BB.
1379 SmallVector<WeakVH, 16> UpdatedPHIs;
1380 for (const BasicBlock *Successor : successors(BB)) {
1381 removeDuplicatePhiEdgesBetween(BB, Successor);
1382 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Successor)) {
1383 MPhi->unorderedDeleteIncomingBlock(BB);
1384 UpdatedPHIs.push_back(MPhi);
1385 }
1386 }
1387 // Optimize trivial phis.
1388 tryRemoveTrivialPhis(UpdatedPHIs);
1389}
1390
1391void MemorySSAUpdater::changeCondBranchToUnconditionalTo(const BranchInst *BI,
1392 const BasicBlock *To) {
1393 const BasicBlock *BB = BI->getParent();
1394 SmallVector<WeakVH, 16> UpdatedPHIs;
1395 for (const BasicBlock *Succ : successors(BB)) {
1396 removeDuplicatePhiEdgesBetween(BB, Succ);
1397 if (Succ != To)
1398 if (auto *MPhi = MSSA->getMemoryAccess(Succ)) {
1399 MPhi->unorderedDeleteIncomingBlock(BB);
1400 UpdatedPHIs.push_back(MPhi);
1401 }
1402 }
1403 // Optimize trivial phis.
1404 tryRemoveTrivialPhis(UpdatedPHIs);
1405}
1406
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001407MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB(
1408 Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
1409 MemorySSA::InsertionPlace Point) {
1410 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1411 MSSA->insertIntoListsForBlock(NewAccess, BB, Point);
1412 return NewAccess;
1413}
1414
1415MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessBefore(
1416 Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) {
1417 assert(I->getParent() == InsertPt->getBlock() &&
1418 "New and old access must be in the same block");
1419 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1420 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1421 InsertPt->getIterator());
1422 return NewAccess;
1423}
1424
1425MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessAfter(
1426 Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) {
1427 assert(I->getParent() == InsertPt->getBlock() &&
1428 "New and old access must be in the same block");
1429 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1430 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1431 ++InsertPt->getIterator());
1432 return NewAccess;
1433}