blob: f633fbe4e12b27bb61d59758af2a06e74c205d20 [file] [log] [blame]
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001//===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Daniel Berlinae6b8b62017-01-28 01:35:02 +00006//
7//===----------------------------------------------------------------===//
8//
9// This file implements the MemorySSAUpdater class.
10//
11//===----------------------------------------------------------------===//
Daniel Berlin554dcd82017-04-11 20:06:36 +000012#include "llvm/Analysis/MemorySSAUpdater.h"
Simon Pilgrim44d86982020-06-05 10:45:42 +010013#include "llvm/Analysis/LoopIterator.h"
Daniel Berlinae6b8b62017-01-28 01:35:02 +000014#include "llvm/ADT/STLExtras.h"
Alina Sbirlea79800992018-09-10 20:13:01 +000015#include "llvm/ADT/SetVector.h"
Daniel Berlinae6b8b62017-01-28 01:35:02 +000016#include "llvm/ADT/SmallPtrSet.h"
Alina Sbirlea79800992018-09-10 20:13:01 +000017#include "llvm/Analysis/IteratedDominanceFrontier.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000018#include "llvm/Analysis/MemorySSA.h"
Simon Pilgrim44d86982020-06-05 10:45:42 +010019#include "llvm/IR/BasicBlock.h"
Daniel Berlinae6b8b62017-01-28 01:35:02 +000020#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/Dominators.h"
22#include "llvm/IR/GlobalVariable.h"
23#include "llvm/IR/IRBuilder.h"
Daniel Berlinae6b8b62017-01-28 01:35:02 +000024#include "llvm/IR/LLVMContext.h"
25#include "llvm/IR/Metadata.h"
26#include "llvm/IR/Module.h"
27#include "llvm/Support/Debug.h"
28#include "llvm/Support/FormattedStream.h"
Daniel Berlinae6b8b62017-01-28 01:35:02 +000029#include <algorithm>
30
31#define DEBUG_TYPE "memoryssa"
32using namespace llvm;
George Burgess IV56169ed2017-04-21 04:54:52 +000033
Daniel Berlinae6b8b62017-01-28 01:35:02 +000034// This is the marker algorithm from "Simple and Efficient Construction of
35// Static Single Assignment Form"
36// The simple, non-marker algorithm places phi nodes at any join
37// Here, we place markers, and only place phi nodes if they end up necessary.
38// They are only necessary if they break a cycle (IE we recursively visit
39// ourselves again), or we discover, while getting the value of the operands,
40// that there are two or more definitions needing to be merged.
41// This still will leave non-minimal form in the case of irreducible control
42// flow, where phi nodes may be in cycles with themselves, but unnecessary.
Eli Friedman88e2bac2018-03-26 19:52:54 +000043MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(
44 BasicBlock *BB,
45 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
46 // First, do a cache lookup. Without this cache, certain CFG structures
47 // (like a series of if statements) take exponential time to visit.
48 auto Cached = CachedPreviousDef.find(BB);
Alina Sbirlea6442b562019-10-10 23:27:21 +000049 if (Cached != CachedPreviousDef.end())
Eli Friedman88e2bac2018-03-26 19:52:54 +000050 return Cached->second;
George Burgess IV45f263d2018-05-26 02:28:55 +000051
Alina Sbirlea67f0c5c2019-10-10 20:43:06 +000052 // If this method is called from an unreachable block, return LoE.
53 if (!MSSA->DT->isReachableFromEntry(BB))
54 return MSSA->getLiveOnEntryDef();
55
Alina Sbirlea6442b562019-10-10 23:27:21 +000056 if (BasicBlock *Pred = BB->getUniquePredecessor()) {
57 VisitedBlocks.insert(BB);
Eli Friedman88e2bac2018-03-26 19:52:54 +000058 // Single predecessor case, just recurse, we can only have one definition.
59 MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
60 CachedPreviousDef.insert({BB, Result});
61 return Result;
George Burgess IV45f263d2018-05-26 02:28:55 +000062 }
63
64 if (VisitedBlocks.count(BB)) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +000065 // We hit our node again, meaning we had a cycle, we must insert a phi
66 // node to break it so we have an operand. The only case this will
67 // insert useless phis is if we have irreducible control flow.
Eli Friedman88e2bac2018-03-26 19:52:54 +000068 MemoryAccess *Result = MSSA->createMemoryPhi(BB);
69 CachedPreviousDef.insert({BB, Result});
70 return Result;
George Burgess IV45f263d2018-05-26 02:28:55 +000071 }
72
73 if (VisitedBlocks.insert(BB).second) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +000074 // Mark us visited so we can detect a cycle
Alexandros Lamprineasbf6009c2018-07-23 10:56:30 +000075 SmallVector<TrackingVH<MemoryAccess>, 8> PhiOps;
Daniel Berlinae6b8b62017-01-28 01:35:02 +000076
77 // Recurse to get the values in our predecessors for placement of a
78 // potential phi node. This will insert phi nodes if we cycle in order to
79 // break the cycle and have an operand.
Alina Sbirlea6720ed82019-09-25 23:24:39 +000080 bool UniqueIncomingAccess = true;
81 MemoryAccess *SingleAccess = nullptr;
82 for (auto *Pred : predecessors(BB)) {
83 if (MSSA->DT->isReachableFromEntry(Pred)) {
84 auto *IncomingAccess = getPreviousDefFromEnd(Pred, CachedPreviousDef);
85 if (!SingleAccess)
86 SingleAccess = IncomingAccess;
87 else if (IncomingAccess != SingleAccess)
88 UniqueIncomingAccess = false;
89 PhiOps.push_back(IncomingAccess);
90 } else
Alina Sbirlea0363c3b2019-05-02 23:41:58 +000091 PhiOps.push_back(MSSA->getLiveOnEntryDef());
Alina Sbirlea6720ed82019-09-25 23:24:39 +000092 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +000093
94 // Now try to simplify the ops to avoid placing a phi.
95 // This may return null if we never created a phi yet, that's okay
96 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB));
Daniel Berlinae6b8b62017-01-28 01:35:02 +000097
98 // See if we can avoid the phi by simplifying it.
99 auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
100 // If we couldn't simplify, we may have to create a phi
Alina Sbirlea6442b562019-10-10 23:27:21 +0000101 if (Result == Phi && UniqueIncomingAccess && SingleAccess) {
102 // A concrete Phi only exists if we created an empty one to break a cycle.
103 if (Phi) {
104 assert(Phi->operands().empty() && "Expected empty Phi");
105 Phi->replaceAllUsesWith(SingleAccess);
106 removeMemoryAccess(Phi);
107 }
Alina Sbirlea6720ed82019-09-25 23:24:39 +0000108 Result = SingleAccess;
Alina Sbirlea6442b562019-10-10 23:27:21 +0000109 } else if (Result == Phi && !(UniqueIncomingAccess && SingleAccess)) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000110 if (!Phi)
111 Phi = MSSA->createMemoryPhi(BB);
112
Alexandros Lamprineasbf6009c2018-07-23 10:56:30 +0000113 // See if the existing phi operands match what we need.
114 // Unlike normal SSA, we only allow one phi node per block, so we can't just
115 // create a new one.
116 if (Phi->getNumOperands() != 0) {
117 // FIXME: Figure out whether this is dead code and if so remove it.
118 if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) {
119 // These will have been filled in by the recursive read we did above.
Fangrui Song75709322018-11-17 01:44:25 +0000120 llvm::copy(PhiOps, Phi->op_begin());
Alexandros Lamprineasbf6009c2018-07-23 10:56:30 +0000121 std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin());
122 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000123 } else {
124 unsigned i = 0;
125 for (auto *Pred : predecessors(BB))
Alexandros Lamprineasbf6009c2018-07-23 10:56:30 +0000126 Phi->addIncoming(&*PhiOps[i++], Pred);
Daniel Berlin97f34e82017-09-27 05:35:19 +0000127 InsertedPHIs.push_back(Phi);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000128 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000129 Result = Phi;
130 }
Daniel Berlin97f34e82017-09-27 05:35:19 +0000131
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000132 // Set ourselves up for the next variable by resetting visited state.
133 VisitedBlocks.erase(BB);
Eli Friedman88e2bac2018-03-26 19:52:54 +0000134 CachedPreviousDef.insert({BB, Result});
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000135 return Result;
136 }
137 llvm_unreachable("Should have hit one of the three cases above");
138}
139
140// This starts at the memory access, and goes backwards in the block to find the
141// previous definition. If a definition is not found the block of the access,
142// it continues globally, creating phi nodes to ensure we have a single
143// definition.
144MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) {
Eli Friedman88e2bac2018-03-26 19:52:54 +0000145 if (auto *LocalResult = getPreviousDefInBlock(MA))
146 return LocalResult;
147 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
148 return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000149}
150
151// This starts at the memory access, and goes backwards in the block to the find
152// the previous definition. If the definition is not found in the block of the
153// access, it returns nullptr.
154MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) {
155 auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock());
156
157 // It's possible there are no defs, or we got handed the first def to start.
158 if (Defs) {
159 // If this is a def, we can just use the def iterators.
160 if (!isa<MemoryUse>(MA)) {
161 auto Iter = MA->getReverseDefsIterator();
162 ++Iter;
163 if (Iter != Defs->rend())
164 return &*Iter;
165 } else {
166 // Otherwise, have to walk the all access iterator.
Alina Sbirlea33e58722017-06-07 16:46:53 +0000167 auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend();
168 for (auto &U : make_range(++MA->getReverseIterator(), End))
169 if (!isa<MemoryUse>(U))
170 return cast<MemoryAccess>(&U);
171 // Note that if MA comes before Defs->begin(), we won't hit a def.
172 return nullptr;
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000173 }
174 }
175 return nullptr;
176}
177
178// This starts at the end of block
Eli Friedman88e2bac2018-03-26 19:52:54 +0000179MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(
180 BasicBlock *BB,
181 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000182 auto *Defs = MSSA->getWritableBlockDefs(BB);
183
Alina Sbirleaf9f073a2019-04-12 21:58:52 +0000184 if (Defs) {
185 CachedPreviousDef.insert({BB, &*Defs->rbegin()});
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000186 return &*Defs->rbegin();
Alina Sbirleaf9f073a2019-04-12 21:58:52 +0000187 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000188
Eli Friedman88e2bac2018-03-26 19:52:54 +0000189 return getPreviousDefRecursive(BB, CachedPreviousDef);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000190}
191// Recurse over a set of phi uses to eliminate the trivial ones
192MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) {
193 if (!Phi)
194 return nullptr;
195 TrackingVH<MemoryAccess> Res(Phi);
196 SmallVector<TrackingVH<Value>, 8> Uses;
197 std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses));
Alina Sbirlea28637212019-08-20 22:47:58 +0000198 for (auto &U : Uses)
199 if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U))
200 tryRemoveTrivialPhi(UsePhi);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000201 return Res;
202}
203
204// Eliminate trivial phis
205// Phis are trivial if they are defined either by themselves, or all the same
206// argument.
207// IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
208// We recursively try to remove them.
Alina Sbirlea28637212019-08-20 22:47:58 +0000209MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi) {
210 assert(Phi && "Can only remove concrete Phi.");
211 auto OperRange = Phi->operands();
212 return tryRemoveTrivialPhi(Phi, OperRange);
213}
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000214template <class RangeType>
215MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi,
216 RangeType &Operands) {
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +0000217 // Bail out on non-opt Phis.
218 if (NonOptPhis.count(Phi))
219 return Phi;
220
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000221 // Detect equal or self arguments
222 MemoryAccess *Same = nullptr;
223 for (auto &Op : Operands) {
224 // If the same or self, good so far
225 if (Op == Phi || Op == Same)
226 continue;
227 // not the same, return the phi since it's not eliminatable by us
228 if (Same)
229 return Phi;
Alexandros Lamprineasbf6009c2018-07-23 10:56:30 +0000230 Same = cast<MemoryAccess>(&*Op);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000231 }
232 // Never found a non-self reference, the phi is undef
233 if (Same == nullptr)
234 return MSSA->getLiveOnEntryDef();
235 if (Phi) {
236 Phi->replaceAllUsesWith(Same);
Daniel Berlin17e8d0e2017-02-22 22:19:55 +0000237 removeMemoryAccess(Phi);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000238 }
239
240 // We should only end up recursing in case we replaced something, in which
241 // case, we may have made other Phis trivial.
242 return recursePhi(Same);
243}
244
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +0000245void MemorySSAUpdater::insertUse(MemoryUse *MU, bool RenameUses) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000246 InsertedPHIs.clear();
247 MU->setDefiningAccess(getPreviousDef(MU));
Alina Sbirlea6442b562019-10-10 23:27:21 +0000248
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +0000249 // In cases without unreachable blocks, because uses do not create new
250 // may-defs, there are only two cases:
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000251 // 1. There was a def already below us, and therefore, we should not have
252 // created a phi node because it was already needed for the def.
253 //
254 // 2. There is no def below us, and therefore, there is no extra renaming work
255 // to do.
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +0000256
257 // In cases with unreachable blocks, where the unnecessary Phis were
258 // optimized out, adding the Use may re-insert those Phis. Hence, when
259 // inserting Uses outside of the MSSA creation process, and new Phis were
260 // added, rename all uses if we are asked.
261
262 if (!RenameUses && !InsertedPHIs.empty()) {
263 auto *Defs = MSSA->getBlockDefs(MU->getBlock());
264 (void)Defs;
265 assert((!Defs || (++Defs->begin() == Defs->end())) &&
266 "Block may have only a Phi or no defs");
267 }
268
269 if (RenameUses && InsertedPHIs.size()) {
270 SmallPtrSet<BasicBlock *, 16> Visited;
271 BasicBlock *StartBlock = MU->getBlock();
272
273 if (auto *Defs = MSSA->getWritableBlockDefs(StartBlock)) {
274 MemoryAccess *FirstDef = &*Defs->begin();
275 // Convert to incoming value if it's a memorydef. A phi *is* already an
276 // incoming value.
277 if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
278 FirstDef = MD->getDefiningAccess();
279
280 MSSA->renamePass(MU->getBlock(), FirstDef, Visited);
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +0000281 }
Alina Sbirlea228ffac2019-08-27 00:34:47 +0000282 // We just inserted a phi into this block, so the incoming value will
283 // become the phi anyway, so it does not matter what we pass.
284 for (auto &MP : InsertedPHIs)
285 if (MemoryPhi *Phi = cast_or_null<MemoryPhi>(MP))
286 MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +0000287 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000288}
289
Daniel Berlin9d8a3352017-01-30 11:35:39 +0000290// Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
George Burgess IV56169ed2017-04-21 04:54:52 +0000291static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB,
292 MemoryAccess *NewDef) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000293 // Replace any operand with us an incoming block with the new defining
294 // access.
295 int i = MP->getBasicBlockIndex(BB);
296 assert(i != -1 && "Should have found the basic block in the phi");
Daniel Berlin9d8a3352017-01-30 11:35:39 +0000297 // We can't just compare i against getNumOperands since one is signed and the
298 // other not. So use it to index into the block iterator.
299 for (auto BBIter = MP->block_begin() + i; BBIter != MP->block_end();
300 ++BBIter) {
301 if (*BBIter != BB)
302 break;
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000303 MP->setIncomingValue(i, NewDef);
304 ++i;
305 }
306}
307
308// A brief description of the algorithm:
309// First, we compute what should define the new def, using the SSA
310// construction algorithm.
311// Then, we update the defs below us (and any new phi nodes) in the graph to
312// point to the correct new defs, to ensure we only have one variable, and no
313// disconnected stores.
Daniel Berlin78cbd282017-02-20 22:26:03 +0000314void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000315 InsertedPHIs.clear();
316
317 // See if we had a local def, and if not, go hunting.
Eli Friedman88e2bac2018-03-26 19:52:54 +0000318 MemoryAccess *DefBefore = getPreviousDef(MD);
Alina Sbirleaae40dfc2019-10-01 18:34:39 +0000319 bool DefBeforeSameBlock = false;
320 if (DefBefore->getBlock() == MD->getBlock() &&
321 !(isa<MemoryPhi>(DefBefore) &&
Kazu Hirata60434982020-08-01 21:49:38 -0700322 llvm::is_contained(InsertedPHIs, DefBefore)))
Alina Sbirleaae40dfc2019-10-01 18:34:39 +0000323 DefBeforeSameBlock = true;
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000324
325 // There is a def before us, which means we can replace any store/phi uses
326 // of that thing with us, since we are in the way of whatever was there
327 // before.
328 // We now define that def's memorydefs and memoryphis
Daniel Berlin9d8a3352017-01-30 11:35:39 +0000329 if (DefBeforeSameBlock) {
Roman Lebedev081e9902019-08-01 12:32:08 +0000330 DefBefore->replaceUsesWithIf(MD, [MD](Use &U) {
Alexandros Lamprineas96762b32018-09-11 14:29:59 +0000331 // Leave the MemoryUses alone.
332 // Also make sure we skip ourselves to avoid self references.
Roman Lebedev081e9902019-08-01 12:32:08 +0000333 User *Usr = U.getUser();
334 return !isa<MemoryUse>(Usr) && Usr != MD;
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000335 // Defs are automatically unoptimized when the user is set to MD below,
336 // because the isOptimized() call will fail to find the same ID.
Roman Lebedev081e9902019-08-01 12:32:08 +0000337 });
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000338 }
Daniel Berlin9d8a3352017-01-30 11:35:39 +0000339
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000340 // and that def is now our defining access.
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000341 MD->setDefiningAccess(DefBefore);
342
Alexandros Lamprineasf854ce82018-07-16 07:51:27 +0000343 SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end());
Alina Sbirlea2c5e6642019-09-23 23:50:16 +0000344
Alina Sbirlea344a3d02020-09-14 18:07:44 -0700345 SmallSet<WeakVH, 8> ExistingPhis;
346
Alina Sbirlea6720ed82019-09-25 23:24:39 +0000347 // Remember the index where we may insert new phis.
348 unsigned NewPhiIndex = InsertedPHIs.size();
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000349 if (!DefBeforeSameBlock) {
350 // If there was a local def before us, we must have the same effect it
351 // did. Because every may-def is the same, any phis/etc we would create, it
352 // would also have created. If there was no local def before us, we
353 // performed a global update, and have to search all successors and make
354 // sure we update the first def in each of them (following all paths until
355 // we hit the first def along each path). This may also insert phi nodes.
356 // TODO: There are other cases we can skip this work, such as when we have a
357 // single successor, and only used a straight line of single pred blocks
358 // backwards to find the def. To make that work, we'd have to track whether
359 // getDefRecursive only ever used the single predecessor case. These types
360 // of paths also only exist in between CFG simplifications.
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000361
362 // If this is the first def in the block and this insert is in an arbitrary
363 // place, compute IDF and place phis.
Alina Sbirlea24ae5ce2019-10-02 18:42:33 +0000364 SmallPtrSet<BasicBlock *, 2> DefiningBlocks;
365
366 // If this is the last Def in the block, also compute IDF based on MD, since
367 // this may a new Def added, and we may need additional Phis.
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000368 auto Iter = MD->getDefsIterator();
369 ++Iter;
370 auto IterEnd = MSSA->getBlockDefs(MD->getBlock())->end();
Alina Sbirlea24ae5ce2019-10-02 18:42:33 +0000371 if (Iter == IterEnd)
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000372 DefiningBlocks.insert(MD->getBlock());
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000373
Alina Sbirlea24ae5ce2019-10-02 18:42:33 +0000374 for (const auto &VH : InsertedPHIs)
375 if (const auto *RealPHI = cast_or_null<MemoryPhi>(VH))
376 DefiningBlocks.insert(RealPHI->getBlock());
377 ForwardIDFCalculator IDFs(*MSSA->DT);
378 SmallVector<BasicBlock *, 32> IDFBlocks;
379 IDFs.setDefiningBlocks(DefiningBlocks);
380 IDFs.calculate(IDFBlocks);
381 SmallVector<AssertingVH<MemoryPhi>, 4> NewInsertedPHIs;
382 for (auto *BBIDF : IDFBlocks) {
383 auto *MPhi = MSSA->getMemoryAccess(BBIDF);
384 if (!MPhi) {
385 MPhi = MSSA->createMemoryPhi(BBIDF);
386 NewInsertedPHIs.push_back(MPhi);
Alina Sbirlea344a3d02020-09-14 18:07:44 -0700387 } else {
388 ExistingPhis.insert(MPhi);
Alina Sbirlea24ae5ce2019-10-02 18:42:33 +0000389 }
390 // Add the phis created into the IDF blocks to NonOptPhis, so they are not
391 // optimized out as trivial by the call to getPreviousDefFromEnd below.
392 // Once they are complete, all these Phis are added to the FixupList, and
393 // removed from NonOptPhis inside fixupDefs(). Existing Phis in IDF may
394 // need fixing as well, and potentially be trivial before this insertion,
395 // hence add all IDF Phis. See PR43044.
396 NonOptPhis.insert(MPhi);
397 }
398 for (auto &MPhi : NewInsertedPHIs) {
399 auto *BBIDF = MPhi->getBlock();
400 for (auto *Pred : predecessors(BBIDF)) {
401 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
402 MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef), Pred);
Alina Sbirlea6720ed82019-09-25 23:24:39 +0000403 }
404 }
Alina Sbirlea24ae5ce2019-10-02 18:42:33 +0000405
406 // Re-take the index where we're adding the new phis, because the above call
407 // to getPreviousDefFromEnd, may have inserted into InsertedPHIs.
408 NewPhiIndex = InsertedPHIs.size();
409 for (auto &MPhi : NewInsertedPHIs) {
410 InsertedPHIs.push_back(&*MPhi);
411 FixupList.push_back(&*MPhi);
412 }
413
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000414 FixupList.push_back(MD);
415 }
416
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000417 // Remember the index where we stopped inserting new phis above, since the
418 // fixupDefs call in the loop below may insert more, that are already minimal.
419 unsigned NewPhiIndexEnd = InsertedPHIs.size();
420
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000421 while (!FixupList.empty()) {
422 unsigned StartingPHISize = InsertedPHIs.size();
423 fixupDefs(FixupList);
424 FixupList.clear();
425 // Put any new phis on the fixup list, and process them
Alexandros Lamprineasf854ce82018-07-16 07:51:27 +0000426 FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end());
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000427 }
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000428
429 // Optimize potentially non-minimal phis added in this method.
Alina Sbirlea151ab482019-05-02 23:12:49 +0000430 unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex;
431 if (NewPhiSize)
432 tryRemoveTrivialPhis(ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize));
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000433
Daniel Berlin78cbd282017-02-20 22:26:03 +0000434 // Now that all fixups are done, rename all uses if we are asked.
435 if (RenameUses) {
436 SmallPtrSet<BasicBlock *, 16> Visited;
437 BasicBlock *StartBlock = MD->getBlock();
438 // We are guaranteed there is a def in the block, because we just got it
439 // handed to us in this function.
440 MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin();
441 // Convert to incoming value if it's a memorydef. A phi *is* already an
442 // incoming value.
443 if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
444 FirstDef = MD->getDefiningAccess();
445
446 MSSA->renamePass(MD->getBlock(), FirstDef, Visited);
447 // We just inserted a phi into this block, so the incoming value will become
448 // the phi anyway, so it does not matter what we pass.
Alexandros Lamprineasf854ce82018-07-16 07:51:27 +0000449 for (auto &MP : InsertedPHIs) {
450 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
451 if (Phi)
452 MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
453 }
Alina Sbirlea344a3d02020-09-14 18:07:44 -0700454 // Existing Phi blocks may need renaming too, if an access was previously
455 // optimized and the inserted Defs "covers" the Optimized value.
456 for (auto &MP : ExistingPhis) {
457 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
458 if (Phi)
459 MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
460 }
Daniel Berlin78cbd282017-02-20 22:26:03 +0000461 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000462}
463
Alexandros Lamprineasf854ce82018-07-16 07:51:27 +0000464void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000465 SmallPtrSet<const BasicBlock *, 8> Seen;
466 SmallVector<const BasicBlock *, 16> Worklist;
Alexandros Lamprineasf854ce82018-07-16 07:51:27 +0000467 for (auto &Var : Vars) {
468 MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var);
469 if (!NewDef)
470 continue;
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000471 // First, see if there is a local def after the operand.
472 auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock());
473 auto DefIter = NewDef->getDefsIterator();
474
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +0000475 // The temporary Phi is being fixed, unmark it for not to optimize.
George Burgess IVe7cdb7e2018-07-12 21:56:31 +0000476 if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef))
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +0000477 NonOptPhis.erase(Phi);
478
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000479 // If there is a local def after us, we only have to rename that.
480 if (++DefIter != Defs->end()) {
481 cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
482 continue;
483 }
484
485 // Otherwise, we need to search down through the CFG.
486 // For each of our successors, handle it directly if their is a phi, or
487 // place on the fixup worklist.
488 for (const auto *S : successors(NewDef->getBlock())) {
489 if (auto *MP = MSSA->getMemoryAccess(S))
490 setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef);
491 else
492 Worklist.push_back(S);
493 }
494
495 while (!Worklist.empty()) {
496 const BasicBlock *FixupBlock = Worklist.back();
497 Worklist.pop_back();
498
499 // Get the first def in the block that isn't a phi node.
500 if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) {
501 auto *FirstDef = &*Defs->begin();
502 // The loop above and below should have taken care of phi nodes
503 assert(!isa<MemoryPhi>(FirstDef) &&
504 "Should have already handled phi nodes!");
505 // We are now this def's defining access, make sure we actually dominate
506 // it
507 assert(MSSA->dominates(NewDef, FirstDef) &&
508 "Should have dominated the new access");
509
510 // This may insert new phi nodes, because we are not guaranteed the
511 // block we are processing has a single pred, and depending where the
512 // store was inserted, it may require phi nodes below it.
513 cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
514 return;
515 }
516 // We didn't find a def, so we must continue.
517 for (const auto *S : successors(FixupBlock)) {
518 // If there is a phi node, handle it.
519 // Otherwise, put the block on the worklist
520 if (auto *MP = MSSA->getMemoryAccess(S))
521 setMemoryPhiValueForBlock(MP, FixupBlock, NewDef);
522 else {
523 // If we cycle, we should have ended up at a phi node that we already
524 // processed. FIXME: Double check this
525 if (!Seen.insert(S).second)
526 continue;
527 Worklist.push_back(S);
528 }
529 }
530 }
531 }
532}
533
Alina Sbirlea79800992018-09-10 20:13:01 +0000534void MemorySSAUpdater::removeEdge(BasicBlock *From, BasicBlock *To) {
535 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
536 MPhi->unorderedDeleteIncomingBlock(From);
Alina Sbirlea28637212019-08-20 22:47:58 +0000537 tryRemoveTrivialPhi(MPhi);
Alina Sbirlea79800992018-09-10 20:13:01 +0000538 }
539}
540
Alina Sbirleaf31eba62019-05-08 17:05:36 +0000541void MemorySSAUpdater::removeDuplicatePhiEdgesBetween(const BasicBlock *From,
542 const BasicBlock *To) {
Alina Sbirlea79800992018-09-10 20:13:01 +0000543 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
544 bool Found = false;
545 MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) {
546 if (From != B)
547 return false;
548 if (Found)
549 return true;
550 Found = true;
551 return false;
552 });
Alina Sbirlea28637212019-08-20 22:47:58 +0000553 tryRemoveTrivialPhi(MPhi);
Alina Sbirlea79800992018-09-10 20:13:01 +0000554 }
555}
556
Alina Sbirlea63844c12020-08-31 16:56:05 -0700557/// If all arguments of a MemoryPHI are defined by the same incoming
558/// argument, return that argument.
559static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
560 MemoryAccess *MA = nullptr;
561
562 for (auto &Arg : MP->operands()) {
563 if (!MA)
564 MA = cast<MemoryAccess>(Arg);
565 else if (MA != Arg)
566 return nullptr;
567 }
568 return MA;
569}
570
Alina Sbirlea4bc625c2019-07-30 20:10:33 +0000571static MemoryAccess *getNewDefiningAccessForClone(MemoryAccess *MA,
572 const ValueToValueMapTy &VMap,
573 PhiToDefMap &MPhiMap,
574 bool CloneWasSimplified,
575 MemorySSA *MSSA) {
576 MemoryAccess *InsnDefining = MA;
577 if (MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) {
578 if (!MSSA->isLiveOnEntryDef(DefMUD)) {
579 Instruction *DefMUDI = DefMUD->getMemoryInst();
580 assert(DefMUDI && "Found MemoryUseOrDef with no Instruction.");
581 if (Instruction *NewDefMUDI =
582 cast_or_null<Instruction>(VMap.lookup(DefMUDI))) {
583 InsnDefining = MSSA->getMemoryAccess(NewDefMUDI);
584 if (!CloneWasSimplified)
585 assert(InsnDefining && "Defining instruction cannot be nullptr.");
586 else if (!InsnDefining || isa<MemoryUse>(InsnDefining)) {
587 // The clone was simplified, it's no longer a MemoryDef, look up.
588 auto DefIt = DefMUD->getDefsIterator();
589 // Since simplified clones only occur in single block cloning, a
590 // previous definition must exist, otherwise NewDefMUDI would not
591 // have been found in VMap.
592 assert(DefIt != MSSA->getBlockDefs(DefMUD->getBlock())->begin() &&
593 "Previous def must exist");
594 InsnDefining = getNewDefiningAccessForClone(
595 &*(--DefIt), VMap, MPhiMap, CloneWasSimplified, MSSA);
596 }
597 }
598 }
599 } else {
600 MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining);
601 if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi))
602 InsnDefining = NewDefPhi;
603 }
604 assert(InsnDefining && "Defining instruction cannot be nullptr.");
605 return InsnDefining;
606}
607
Alina Sbirlea79800992018-09-10 20:13:01 +0000608void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
609 const ValueToValueMapTy &VMap,
Alina Sbirlea7a0098a2019-06-17 18:58:40 +0000610 PhiToDefMap &MPhiMap,
611 bool CloneWasSimplified) {
Alina Sbirlea79800992018-09-10 20:13:01 +0000612 const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
613 if (!Acc)
614 return;
615 for (const MemoryAccess &MA : *Acc) {
616 if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) {
617 Instruction *Insn = MUD->getMemoryInst();
618 // Entry does not exist if the clone of the block did not clone all
619 // instructions. This occurs in LoopRotate when cloning instructions
620 // from the old header to the old preheader. The cloned instruction may
621 // also be a simplified Value, not an Instruction (see LoopRotate).
Alina Sbirlea7a0098a2019-06-17 18:58:40 +0000622 // Also in LoopRotate, even when it's an instruction, due to it being
623 // simplified, it may be a Use rather than a Def, so we cannot use MUD as
624 // template. Calls coming from updateForClonedBlockIntoPred, ensure this.
Alina Sbirlea79800992018-09-10 20:13:01 +0000625 if (Instruction *NewInsn =
626 dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) {
627 MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess(
Alina Sbirlea4bc625c2019-07-30 20:10:33 +0000628 NewInsn,
629 getNewDefiningAccessForClone(MUD->getDefiningAccess(), VMap,
630 MPhiMap, CloneWasSimplified, MSSA),
631 /*Template=*/CloneWasSimplified ? nullptr : MUD,
632 /*CreationMustSucceed=*/CloneWasSimplified ? false : true);
633 if (NewUseOrDef)
634 MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End);
Alina Sbirlea79800992018-09-10 20:13:01 +0000635 }
636 }
637 }
638}
639
Alina Sbirleaf31eba62019-05-08 17:05:36 +0000640void MemorySSAUpdater::updatePhisWhenInsertingUniqueBackedgeBlock(
641 BasicBlock *Header, BasicBlock *Preheader, BasicBlock *BEBlock) {
642 auto *MPhi = MSSA->getMemoryAccess(Header);
643 if (!MPhi)
644 return;
645
646 // Create phi node in the backedge block and populate it with the same
647 // incoming values as MPhi. Skip incoming values coming from Preheader.
648 auto *NewMPhi = MSSA->createMemoryPhi(BEBlock);
649 bool HasUniqueIncomingValue = true;
650 MemoryAccess *UniqueValue = nullptr;
651 for (unsigned I = 0, E = MPhi->getNumIncomingValues(); I != E; ++I) {
652 BasicBlock *IBB = MPhi->getIncomingBlock(I);
653 MemoryAccess *IV = MPhi->getIncomingValue(I);
654 if (IBB != Preheader) {
655 NewMPhi->addIncoming(IV, IBB);
656 if (HasUniqueIncomingValue) {
657 if (!UniqueValue)
658 UniqueValue = IV;
659 else if (UniqueValue != IV)
660 HasUniqueIncomingValue = false;
661 }
662 }
663 }
664
665 // Update incoming edges into MPhi. Remove all but the incoming edge from
666 // Preheader. Add an edge from NewMPhi
667 auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader);
668 MPhi->setIncomingValue(0, AccFromPreheader);
669 MPhi->setIncomingBlock(0, Preheader);
670 for (unsigned I = MPhi->getNumIncomingValues() - 1; I >= 1; --I)
671 MPhi->unorderedDeleteIncoming(I);
672 MPhi->addIncoming(NewMPhi, BEBlock);
673
674 // If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be
675 // replaced with the unique value.
Alina Sbirleaae40dfc2019-10-01 18:34:39 +0000676 tryRemoveTrivialPhi(NewMPhi);
Alina Sbirleaf31eba62019-05-08 17:05:36 +0000677}
678
Alina Sbirlea79800992018-09-10 20:13:01 +0000679void MemorySSAUpdater::updateForClonedLoop(const LoopBlocksRPO &LoopBlocks,
680 ArrayRef<BasicBlock *> ExitBlocks,
681 const ValueToValueMapTy &VMap,
682 bool IgnoreIncomingWithNoClones) {
683 PhiToDefMap MPhiMap;
684
685 auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) {
686 assert(Phi && NewPhi && "Invalid Phi nodes.");
687 BasicBlock *NewPhiBB = NewPhi->getBlock();
688 SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB),
689 pred_end(NewPhiBB));
690 for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) {
691 MemoryAccess *IncomingAccess = Phi->getIncomingValue(It);
692 BasicBlock *IncBB = Phi->getIncomingBlock(It);
693
694 if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB)))
695 IncBB = NewIncBB;
696 else if (IgnoreIncomingWithNoClones)
697 continue;
698
699 // Now we have IncBB, and will need to add incoming from it to NewPhi.
700
701 // If IncBB is not a predecessor of NewPhiBB, then do not add it.
702 // NewPhiBB was cloned without that edge.
703 if (!NewPhiBBPreds.count(IncBB))
704 continue;
705
706 // Determine incoming value and add it as incoming from IncBB.
707 if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) {
708 if (!MSSA->isLiveOnEntryDef(IncMUD)) {
709 Instruction *IncI = IncMUD->getMemoryInst();
710 assert(IncI && "Found MemoryUseOrDef with no Instruction.");
711 if (Instruction *NewIncI =
712 cast_or_null<Instruction>(VMap.lookup(IncI))) {
713 IncMUD = MSSA->getMemoryAccess(NewIncI);
714 assert(IncMUD &&
715 "MemoryUseOrDef cannot be null, all preds processed.");
716 }
717 }
718 NewPhi->addIncoming(IncMUD, IncBB);
719 } else {
720 MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess);
721 if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi))
722 NewPhi->addIncoming(NewDefPhi, IncBB);
723 else
724 NewPhi->addIncoming(IncPhi, IncBB);
725 }
726 }
Alina Sbirleac292fba2020-09-01 11:56:17 -0700727 if (auto *SingleAccess = onlySingleValue(NewPhi)) {
728 MPhiMap[Phi] = SingleAccess;
Alina Sbirlea63844c12020-08-31 16:56:05 -0700729 removeMemoryAccess(NewPhi);
730 }
Alina Sbirlea79800992018-09-10 20:13:01 +0000731 };
732
733 auto ProcessBlock = [&](BasicBlock *BB) {
734 BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB));
735 if (!NewBlock)
736 return;
737
738 assert(!MSSA->getWritableBlockAccesses(NewBlock) &&
739 "Cloned block should have no accesses");
740
741 // Add MemoryPhi.
742 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) {
743 MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock);
744 MPhiMap[MPhi] = NewPhi;
745 }
746 // Update Uses and Defs.
747 cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap);
748 };
749
750 for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
751 ProcessBlock(BB);
752
753 for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
754 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
755 if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi))
756 FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi));
757}
758
759void MemorySSAUpdater::updateForClonedBlockIntoPred(
760 BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) {
761 // All defs/phis from outside BB that are used in BB, are valid uses in P1.
762 // Since those defs/phis must have dominated BB, and also dominate P1.
763 // Defs from BB being used in BB will be replaced with the cloned defs from
764 // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the
765 // incoming def into the Phi from P1.
Alina Sbirlea7a0098a2019-06-17 18:58:40 +0000766 // Instructions cloned into the predecessor are in practice sometimes
767 // simplified, so disable the use of the template, and create an access from
768 // scratch.
Alina Sbirlea79800992018-09-10 20:13:01 +0000769 PhiToDefMap MPhiMap;
770 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
771 MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1);
Alina Sbirlea7a0098a2019-06-17 18:58:40 +0000772 cloneUsesAndDefs(BB, P1, VM, MPhiMap, /*CloneWasSimplified=*/true);
Alina Sbirlea79800992018-09-10 20:13:01 +0000773}
774
775template <typename Iter>
776void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
777 ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd,
778 DominatorTree &DT) {
779 SmallVector<CFGUpdate, 4> Updates;
780 // Update/insert phis in all successors of exit blocks.
781 for (auto *Exit : ExitBlocks)
782 for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd))
783 if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) {
784 BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0);
785 Updates.push_back({DT.Insert, NewExit, ExitSucc});
786 }
787 applyInsertUpdates(Updates, DT);
788}
789
790void MemorySSAUpdater::updateExitBlocksForClonedLoop(
791 ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap,
792 DominatorTree &DT) {
793 const ValueToValueMapTy *const Arr[] = {&VMap};
794 privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr),
795 std::end(Arr), DT);
796}
797
798void MemorySSAUpdater::updateExitBlocksForClonedLoop(
799 ArrayRef<BasicBlock *> ExitBlocks,
800 ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) {
801 auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) {
802 return I.get();
803 };
804 using MappedIteratorType =
805 mapped_iterator<const std::unique_ptr<ValueToValueMapTy> *,
806 decltype(GetPtr)>;
807 auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
808 auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
809 privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT);
810}
811
812void MemorySSAUpdater::applyUpdates(ArrayRef<CFGUpdate> Updates,
813 DominatorTree &DT) {
Alina Sbirlea688450c2020-03-27 15:02:23 -0700814 SmallVector<CFGUpdate, 4> DeleteUpdates;
Alina Sbirleaf55ad392020-02-26 13:33:02 -0800815 SmallVector<CFGUpdate, 4> RevDeleteUpdates;
Alina Sbirlea79800992018-09-10 20:13:01 +0000816 SmallVector<CFGUpdate, 4> InsertUpdates;
817 for (auto &Update : Updates) {
818 if (Update.getKind() == DT.Insert)
819 InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
Alina Sbirleaf55ad392020-02-26 13:33:02 -0800820 else {
Alina Sbirlea688450c2020-03-27 15:02:23 -0700821 DeleteUpdates.push_back({DT.Delete, Update.getFrom(), Update.getTo()});
Alina Sbirleaf55ad392020-02-26 13:33:02 -0800822 RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
823 }
Alina Sbirlea79800992018-09-10 20:13:01 +0000824 }
825
Alina Sbirlea688450c2020-03-27 15:02:23 -0700826 if (!DeleteUpdates.empty()) {
Alina Sbirleaf55ad392020-02-26 13:33:02 -0800827 SmallVector<CFGUpdate, 0> Empty;
828 // Deletes are reversed applied, because this CFGView is pretending the
829 // deletes did not happen yet, hence the edges still exist.
830 DT.applyUpdates(Empty, RevDeleteUpdates);
831
832 // Note: the MSSA update below doesn't distinguish between a GD with
833 // (RevDelete,false) and (Delete, true), but this matters for the DT
834 // updates above; for "children" purposes they are equivalent; but the
835 // updates themselves convey the desired update, used inside DT only.
836 GraphDiff<BasicBlock *> GD(RevDeleteUpdates);
837 applyInsertUpdates(InsertUpdates, DT, &GD);
838 // Update DT to redelete edges; this matches the real CFG so we can perform
839 // the standard update without a postview of the CFG.
840 DT.applyUpdates(DeleteUpdates);
Alina Sbirlea79800992018-09-10 20:13:01 +0000841 } else {
842 GraphDiff<BasicBlock *> GD;
843 applyInsertUpdates(InsertUpdates, DT, &GD);
844 }
845
846 // Update for deleted edges
Alina Sbirlea688450c2020-03-27 15:02:23 -0700847 for (auto &Update : DeleteUpdates)
Alina Sbirlea79800992018-09-10 20:13:01 +0000848 removeEdge(Update.getFrom(), Update.getTo());
849}
850
851void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
852 DominatorTree &DT) {
853 GraphDiff<BasicBlock *> GD;
854 applyInsertUpdates(Updates, DT, &GD);
855}
856
857void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
858 DominatorTree &DT,
859 const GraphDiff<BasicBlock *> *GD) {
860 // Get recursive last Def, assuming well formed MSSA and updated DT.
861 auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * {
862 while (true) {
863 MemorySSA::DefsList *Defs = MSSA->getWritableBlockDefs(BB);
864 // Return last Def or Phi in BB, if it exists.
865 if (Defs)
866 return &*(--Defs->end());
867
868 // Check number of predecessors, we only care if there's more than one.
869 unsigned Count = 0;
870 BasicBlock *Pred = nullptr;
Alina Sbirleaf1d4db42020-07-16 15:46:54 -0700871 for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
872 Pred = Pi;
Alina Sbirlea79800992018-09-10 20:13:01 +0000873 Count++;
874 if (Count == 2)
875 break;
876 }
877
878 // If BB has multiple predecessors, get last definition from IDom.
879 if (Count != 1) {
880 // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its
881 // DT is invalidated. Return LoE as its last def. This will be added to
882 // MemoryPhi node, and later deleted when the block is deleted.
883 if (!DT.getNode(BB))
884 return MSSA->getLiveOnEntryDef();
885 if (auto *IDom = DT.getNode(BB)->getIDom())
886 if (IDom->getBlock() != BB) {
887 BB = IDom->getBlock();
888 continue;
889 }
890 return MSSA->getLiveOnEntryDef();
891 } else {
892 // Single predecessor, BB cannot be dead. GetLastDef of Pred.
893 assert(Count == 1 && Pred && "Single predecessor expected.");
Alina Sbirlea890090f2019-10-01 19:09:50 +0000894 // BB can be unreachable though, return LoE if that is the case.
895 if (!DT.getNode(BB))
896 return MSSA->getLiveOnEntryDef();
Alina Sbirlea79800992018-09-10 20:13:01 +0000897 BB = Pred;
898 }
899 };
900 llvm_unreachable("Unable to get last definition.");
901 };
902
903 // Get nearest IDom given a set of blocks.
904 // TODO: this can be optimized by starting the search at the node with the
905 // lowest level (highest in the tree).
906 auto FindNearestCommonDominator =
907 [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * {
908 BasicBlock *PrevIDom = *BBSet.begin();
909 for (auto *BB : BBSet)
910 PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB);
911 return PrevIDom;
912 };
913
914 // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not
915 // include CurrIDom.
916 auto GetNoLongerDomBlocks =
917 [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom,
918 SmallVectorImpl<BasicBlock *> &BlocksPrevDom) {
919 if (PrevIDom == CurrIDom)
920 return;
921 BlocksPrevDom.push_back(PrevIDom);
922 BasicBlock *NextIDom = PrevIDom;
923 while (BasicBlock *UpIDom =
924 DT.getNode(NextIDom)->getIDom()->getBlock()) {
925 if (UpIDom == CurrIDom)
926 break;
927 BlocksPrevDom.push_back(UpIDom);
928 NextIDom = UpIDom;
929 }
930 };
931
932 // Map a BB to its predecessors: added + previously existing. To get a
933 // deterministic order, store predecessors as SetVectors. The order in each
Hiroshi Inoue02a2bb22019-02-05 08:30:48 +0000934 // will be defined by the order in Updates (fixed) and the order given by
Alina Sbirlea79800992018-09-10 20:13:01 +0000935 // children<> (also fixed). Since we further iterate over these ordered sets,
936 // we lose the information of multiple edges possibly existing between two
937 // blocks, so we'll keep and EdgeCount map for that.
938 // An alternate implementation could keep unordered set for the predecessors,
939 // traverse either Updates or children<> each time to get the deterministic
940 // order, and drop the usage of EdgeCount. This alternate approach would still
941 // require querying the maps for each predecessor, and children<> call has
942 // additional computation inside for creating the snapshot-graph predecessors.
943 // As such, we favor using a little additional storage and less compute time.
944 // This decision can be revisited if we find the alternative more favorable.
945
946 struct PredInfo {
947 SmallSetVector<BasicBlock *, 2> Added;
948 SmallSetVector<BasicBlock *, 2> Prev;
949 };
950 SmallDenseMap<BasicBlock *, PredInfo> PredMap;
951
952 for (auto &Edge : Updates) {
953 BasicBlock *BB = Edge.getTo();
954 auto &AddedBlockSet = PredMap[BB].Added;
955 AddedBlockSet.insert(Edge.getFrom());
956 }
957
958 // Store all existing predecessor for each BB, at least one must exist.
959 SmallDenseMap<std::pair<BasicBlock *, BasicBlock *>, int> EdgeCountMap;
960 SmallPtrSet<BasicBlock *, 2> NewBlocks;
961 for (auto &BBPredPair : PredMap) {
962 auto *BB = BBPredPair.first;
963 const auto &AddedBlockSet = BBPredPair.second.Added;
964 auto &PrevBlockSet = BBPredPair.second.Prev;
Alina Sbirleaf1d4db42020-07-16 15:46:54 -0700965 for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
Alina Sbirlea79800992018-09-10 20:13:01 +0000966 if (!AddedBlockSet.count(Pi))
967 PrevBlockSet.insert(Pi);
968 EdgeCountMap[{Pi, BB}]++;
969 }
970
971 if (PrevBlockSet.empty()) {
972 assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added.");
973 LLVM_DEBUG(
974 dbgs()
975 << "Adding a predecessor to a block with no predecessors. "
976 "This must be an edge added to a new, likely cloned, block. "
977 "Its memory accesses must be already correct, assuming completed "
978 "via the updateExitBlocksForClonedLoop API. "
979 "Assert a single such edge is added so no phi addition or "
980 "additional processing is required.\n");
981 assert(AddedBlockSet.size() == 1 &&
982 "Can only handle adding one predecessor to a new block.");
983 // Need to remove new blocks from PredMap. Remove below to not invalidate
984 // iterator here.
985 NewBlocks.insert(BB);
986 }
987 }
988 // Nothing to process for new/cloned blocks.
989 for (auto *BB : NewBlocks)
990 PredMap.erase(BB);
991
Alina Sbirlea79800992018-09-10 20:13:01 +0000992 SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace;
Alina Sbirleacb4ed8a2019-06-11 19:09:34 +0000993 SmallVector<WeakVH, 8> InsertedPhis;
Alina Sbirlea79800992018-09-10 20:13:01 +0000994
995 // First create MemoryPhis in all blocks that don't have one. Create in the
996 // order found in Updates, not in PredMap, to get deterministic numbering.
997 for (auto &Edge : Updates) {
998 BasicBlock *BB = Edge.getTo();
999 if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB))
Alina Sbirleacb4ed8a2019-06-11 19:09:34 +00001000 InsertedPhis.push_back(MSSA->createMemoryPhi(BB));
Alina Sbirlea79800992018-09-10 20:13:01 +00001001 }
1002
1003 // Now we'll fill in the MemoryPhis with the right incoming values.
1004 for (auto &BBPredPair : PredMap) {
1005 auto *BB = BBPredPair.first;
1006 const auto &PrevBlockSet = BBPredPair.second.Prev;
1007 const auto &AddedBlockSet = BBPredPair.second.Added;
1008 assert(!PrevBlockSet.empty() &&
1009 "At least one previous predecessor must exist.");
1010
1011 // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by
1012 // keeping this map before the loop. We can reuse already populated entries
1013 // if an edge is added from the same predecessor to two different blocks,
1014 // and this does happen in rotate. Note that the map needs to be updated
1015 // when deleting non-necessary phis below, if the phi is in the map by
1016 // replacing the value with DefP1.
1017 SmallDenseMap<BasicBlock *, MemoryAccess *> LastDefAddedPred;
1018 for (auto *AddedPred : AddedBlockSet) {
1019 auto *DefPn = GetLastDef(AddedPred);
1020 assert(DefPn != nullptr && "Unable to find last definition.");
1021 LastDefAddedPred[AddedPred] = DefPn;
1022 }
1023
1024 MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB);
1025 // If Phi is not empty, add an incoming edge from each added pred. Must
1026 // still compute blocks with defs to replace for this block below.
1027 if (NewPhi->getNumOperands()) {
1028 for (auto *Pred : AddedBlockSet) {
1029 auto *LastDefForPred = LastDefAddedPred[Pred];
1030 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1031 NewPhi->addIncoming(LastDefForPred, Pred);
1032 }
1033 } else {
1034 // Pick any existing predecessor and get its definition. All other
1035 // existing predecessors should have the same one, since no phi existed.
1036 auto *P1 = *PrevBlockSet.begin();
1037 MemoryAccess *DefP1 = GetLastDef(P1);
1038
1039 // Check DefP1 against all Defs in LastDefPredPair. If all the same,
1040 // nothing to add.
1041 bool InsertPhi = false;
1042 for (auto LastDefPredPair : LastDefAddedPred)
1043 if (DefP1 != LastDefPredPair.second) {
1044 InsertPhi = true;
1045 break;
1046 }
1047 if (!InsertPhi) {
1048 // Since NewPhi may be used in other newly added Phis, replace all uses
1049 // of NewPhi with the definition coming from all predecessors (DefP1),
1050 // before deleting it.
1051 NewPhi->replaceAllUsesWith(DefP1);
1052 removeMemoryAccess(NewPhi);
1053 continue;
1054 }
1055
1056 // Update Phi with new values for new predecessors and old value for all
1057 // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered
1058 // sets, the order of entries in NewPhi is deterministic.
1059 for (auto *Pred : AddedBlockSet) {
1060 auto *LastDefForPred = LastDefAddedPred[Pred];
1061 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1062 NewPhi->addIncoming(LastDefForPred, Pred);
1063 }
1064 for (auto *Pred : PrevBlockSet)
1065 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1066 NewPhi->addIncoming(DefP1, Pred);
Alina Sbirlea79800992018-09-10 20:13:01 +00001067 }
1068
1069 // Get all blocks that used to dominate BB and no longer do after adding
1070 // AddedBlockSet, where PrevBlockSet are the previously known predecessors.
1071 assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom");
1072 BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
1073 assert(PrevIDom && "Previous IDom should exists");
1074 BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock();
1075 assert(NewIDom && "BB should have a new valid idom");
1076 assert(DT.dominates(NewIDom, PrevIDom) &&
1077 "New idom should dominate old idom");
1078 GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
1079 }
1080
Alina Sbirlea109d2ea2019-06-19 21:33:09 +00001081 tryRemoveTrivialPhis(InsertedPhis);
1082 // Create the set of blocks that now have a definition. We'll use this to
1083 // compute IDF and add Phis there next.
1084 SmallVector<BasicBlock *, 8> BlocksToProcess;
1085 for (auto &VH : InsertedPhis)
1086 if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
1087 BlocksToProcess.push_back(MPhi->getBlock());
1088
Alina Sbirlea79800992018-09-10 20:13:01 +00001089 // Compute IDF and add Phis in all IDF blocks that do not have one.
1090 SmallVector<BasicBlock *, 32> IDFBlocks;
1091 if (!BlocksToProcess.empty()) {
Alina Sbirlea238b8e62019-06-19 21:17:31 +00001092 ForwardIDFCalculator IDFs(DT, GD);
Alina Sbirlea79800992018-09-10 20:13:01 +00001093 SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(),
1094 BlocksToProcess.end());
1095 IDFs.setDefiningBlocks(DefiningBlocks);
1096 IDFs.calculate(IDFBlocks);
Alina Sbirlea05f77802019-06-17 18:16:53 +00001097
1098 SmallSetVector<MemoryPhi *, 4> PhisToFill;
1099 // First create all needed Phis.
1100 for (auto *BBIDF : IDFBlocks)
1101 if (!MSSA->getMemoryAccess(BBIDF)) {
1102 auto *IDFPhi = MSSA->createMemoryPhi(BBIDF);
1103 InsertedPhis.push_back(IDFPhi);
1104 PhisToFill.insert(IDFPhi);
1105 }
1106 // Then update or insert their correct incoming values.
Alina Sbirlea79800992018-09-10 20:13:01 +00001107 for (auto *BBIDF : IDFBlocks) {
Alina Sbirlea05f77802019-06-17 18:16:53 +00001108 auto *IDFPhi = MSSA->getMemoryAccess(BBIDF);
1109 assert(IDFPhi && "Phi must exist");
1110 if (!PhisToFill.count(IDFPhi)) {
Alina Sbirlea79800992018-09-10 20:13:01 +00001111 // Update existing Phi.
1112 // FIXME: some updates may be redundant, try to optimize and skip some.
1113 for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I)
1114 IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I)));
1115 } else {
Alina Sbirleaf1d4db42020-07-16 15:46:54 -07001116 for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BBIDF))
Alina Sbirlea79800992018-09-10 20:13:01 +00001117 IDFPhi->addIncoming(GetLastDef(Pi), Pi);
Alina Sbirlea79800992018-09-10 20:13:01 +00001118 }
1119 }
1120 }
1121
1122 // Now for all defs in BlocksWithDefsToReplace, if there are uses they no
1123 // longer dominate, replace those with the closest dominating def.
1124 // This will also update optimized accesses, as they're also uses.
1125 for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
1126 if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) {
1127 for (auto &DefToReplaceUses : *DefsList) {
1128 BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
1129 Value::use_iterator UI = DefToReplaceUses.use_begin(),
1130 E = DefToReplaceUses.use_end();
1131 for (; UI != E;) {
1132 Use &U = *UI;
1133 ++UI;
Simon Pilgrimb635964a2019-10-02 13:09:12 +00001134 MemoryAccess *Usr = cast<MemoryAccess>(U.getUser());
Alina Sbirlea79800992018-09-10 20:13:01 +00001135 if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) {
1136 BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
1137 if (!DT.dominates(DominatingBlock, DominatedBlock))
1138 U.set(GetLastDef(DominatedBlock));
1139 } else {
1140 BasicBlock *DominatedBlock = Usr->getBlock();
1141 if (!DT.dominates(DominatingBlock, DominatedBlock)) {
1142 if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock))
1143 U.set(DomBlPhi);
1144 else {
1145 auto *IDom = DT.getNode(DominatedBlock)->getIDom();
1146 assert(IDom && "Block must have a valid IDom.");
1147 U.set(GetLastDef(IDom->getBlock()));
1148 }
1149 cast<MemoryUseOrDef>(Usr)->resetOptimized();
1150 }
1151 }
1152 }
1153 }
1154 }
1155 }
Alina Sbirleacb4ed8a2019-06-11 19:09:34 +00001156 tryRemoveTrivialPhis(InsertedPhis);
Alina Sbirlea79800992018-09-10 20:13:01 +00001157}
1158
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001159// Move What before Where in the MemorySSA IR.
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001160template <class WhereType>
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001161void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001162 WhereType Where) {
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +00001163 // Mark MemoryPhi users of What not to be optimized.
1164 for (auto *U : What->users())
George Burgess IVe7cdb7e2018-07-12 21:56:31 +00001165 if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U))
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +00001166 NonOptPhis.insert(PhiUser);
1167
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001168 // Replace all our users with our defining access.
1169 What->replaceAllUsesWith(What->getDefiningAccess());
1170
1171 // Let MemorySSA take care of moving it around in the lists.
1172 MSSA->moveTo(What, BB, Where);
1173
1174 // Now reinsert it into the IR and do whatever fixups needed.
1175 if (auto *MD = dyn_cast<MemoryDef>(What))
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +00001176 insertDef(MD, /*RenameUses=*/true);
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001177 else
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +00001178 insertUse(cast<MemoryUse>(What), /*RenameUses=*/true);
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +00001179
1180 // Clear dangling pointers. We added all MemoryPhi users, but not all
1181 // of them are removed by fixupDefs().
1182 NonOptPhis.clear();
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001183}
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001184
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001185// Move What before Where in the MemorySSA IR.
1186void MemorySSAUpdater::moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
1187 moveTo(What, Where->getBlock(), Where->getIterator());
1188}
1189
1190// Move What after Where in the MemorySSA IR.
1191void MemorySSAUpdater::moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
1192 moveTo(What, Where->getBlock(), ++Where->getIterator());
1193}
1194
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001195void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
1196 MemorySSA::InsertionPlace Where) {
Alina Sbirlea5c5cf892019-11-20 16:09:37 -08001197 if (Where != MemorySSA::InsertionPlace::BeforeTerminator)
1198 return moveTo(What, BB, Where);
1199
1200 if (auto *Where = MSSA->getMemoryAccess(BB->getTerminator()))
1201 return moveBefore(What, Where);
1202 else
1203 return moveTo(What, BB, MemorySSA::InsertionPlace::End);
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001204}
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001205
Alina Sbirlea0f533552018-07-11 22:11:46 +00001206// All accesses in To used to be in From. Move to end and update access lists.
1207void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To,
1208 Instruction *Start) {
1209
1210 MemorySSA::AccessList *Accs = MSSA->getWritableBlockAccesses(From);
1211 if (!Accs)
1212 return;
1213
Alina Sbirlea7faa14a2019-10-09 15:54:24 +00001214 assert(Start->getParent() == To && "Incorrect Start instruction");
Alina Sbirlea0f533552018-07-11 22:11:46 +00001215 MemoryAccess *FirstInNew = nullptr;
1216 for (Instruction &I : make_range(Start->getIterator(), To->end()))
1217 if ((FirstInNew = MSSA->getMemoryAccess(&I)))
1218 break;
Alina Sbirlea7faa14a2019-10-09 15:54:24 +00001219 if (FirstInNew) {
1220 auto *MUD = cast<MemoryUseOrDef>(FirstInNew);
1221 do {
1222 auto NextIt = ++MUD->getIterator();
1223 MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end())
1224 ? nullptr
1225 : cast<MemoryUseOrDef>(&*NextIt);
1226 MSSA->moveTo(MUD, To, MemorySSA::End);
1227 // Moving MUD from Accs in the moveTo above, may delete Accs, so we need
1228 // to retrieve it again.
1229 Accs = MSSA->getWritableBlockAccesses(From);
1230 MUD = NextMUD;
1231 } while (MUD);
1232 }
Alina Sbirlea0f533552018-07-11 22:11:46 +00001233
Alina Sbirlea7faa14a2019-10-09 15:54:24 +00001234 // If all accesses were moved and only a trivial Phi remains, we try to remove
1235 // that Phi. This is needed when From is going to be deleted.
1236 auto *Defs = MSSA->getWritableBlockDefs(From);
1237 if (Defs && !Defs->empty())
1238 if (auto *Phi = dyn_cast<MemoryPhi>(&*Defs->begin()))
1239 tryRemoveTrivialPhi(Phi);
Alina Sbirlea0f533552018-07-11 22:11:46 +00001240}
1241
1242void MemorySSAUpdater::moveAllAfterSpliceBlocks(BasicBlock *From,
1243 BasicBlock *To,
1244 Instruction *Start) {
1245 assert(MSSA->getBlockAccesses(To) == nullptr &&
1246 "To block is expected to be free of MemoryAccesses.");
1247 moveAllAccesses(From, To, Start);
1248 for (BasicBlock *Succ : successors(To))
1249 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1250 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1251}
1252
1253void MemorySSAUpdater::moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To,
1254 Instruction *Start) {
Alina Sbirlea7faa14a2019-10-09 15:54:24 +00001255 assert(From->getUniquePredecessor() == To &&
Alina Sbirlea0f533552018-07-11 22:11:46 +00001256 "From block is expected to have a single predecessor (To).");
1257 moveAllAccesses(From, To, Start);
1258 for (BasicBlock *Succ : successors(From))
1259 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1260 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1261}
1262
Alina Sbirlea20c29622018-07-20 17:13:05 +00001263void MemorySSAUpdater::wireOldPredecessorsToNewImmediatePredecessor(
Alina Sbirleaf98c2c52018-09-07 21:14:48 +00001264 BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
1265 bool IdenticalEdgesWereMerged) {
Alina Sbirlea20c29622018-07-20 17:13:05 +00001266 assert(!MSSA->getWritableBlockAccesses(New) &&
1267 "Access list should be null for a new block.");
1268 MemoryPhi *Phi = MSSA->getMemoryAccess(Old);
1269 if (!Phi)
1270 return;
Vedant Kumar4de31bb2018-11-19 19:54:27 +00001271 if (Old->hasNPredecessors(1)) {
Alina Sbirlea20c29622018-07-20 17:13:05 +00001272 assert(pred_size(New) == Preds.size() &&
1273 "Should have moved all predecessors.");
1274 MSSA->moveTo(Phi, New, MemorySSA::Beginning);
1275 } else {
1276 assert(!Preds.empty() && "Must be moving at least one predecessor to the "
1277 "new immediate predecessor.");
1278 MemoryPhi *NewPhi = MSSA->createMemoryPhi(New);
1279 SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end());
Alina Sbirleaf98c2c52018-09-07 21:14:48 +00001280 // Currently only support the case of removing a single incoming edge when
1281 // identical edges were not merged.
1282 if (!IdenticalEdgesWereMerged)
1283 assert(PredsSet.size() == Preds.size() &&
1284 "If identical edges were not merged, we cannot have duplicate "
1285 "blocks in the predecessors");
Alina Sbirlea20c29622018-07-20 17:13:05 +00001286 Phi->unorderedDeleteIncomingIf([&](MemoryAccess *MA, BasicBlock *B) {
1287 if (PredsSet.count(B)) {
1288 NewPhi->addIncoming(MA, B);
Alina Sbirleaf98c2c52018-09-07 21:14:48 +00001289 if (!IdenticalEdgesWereMerged)
1290 PredsSet.erase(B);
Alina Sbirlea20c29622018-07-20 17:13:05 +00001291 return true;
1292 }
1293 return false;
1294 });
1295 Phi->addIncoming(NewPhi, New);
Alina Sbirlea28637212019-08-20 22:47:58 +00001296 tryRemoveTrivialPhi(NewPhi);
Alina Sbirlea20c29622018-07-20 17:13:05 +00001297 }
1298}
1299
Alina Sbirlea240a90a2019-01-31 20:13:47 +00001300void MemorySSAUpdater::removeMemoryAccess(MemoryAccess *MA, bool OptimizePhis) {
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001301 assert(!MSSA->isLiveOnEntryDef(MA) &&
1302 "Trying to remove the live on entry def");
1303 // We can only delete phi nodes if they have no uses, or we can replace all
1304 // uses with a single definition.
1305 MemoryAccess *NewDefTarget = nullptr;
1306 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
1307 // Note that it is sufficient to know that all edges of the phi node have
1308 // the same argument. If they do, by the definition of dominance frontiers
1309 // (which we used to place this phi), that argument must dominate this phi,
1310 // and thus, must dominate the phi's uses, and so we will not hit the assert
1311 // below.
1312 NewDefTarget = onlySingleValue(MP);
1313 assert((NewDefTarget || MP->use_empty()) &&
1314 "We can't delete this memory phi");
1315 } else {
1316 NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
1317 }
1318
Alina Sbirlea240a90a2019-01-31 20:13:47 +00001319 SmallSetVector<MemoryPhi *, 4> PhisToCheck;
1320
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001321 // Re-point the uses at our defining access
1322 if (!isa<MemoryUse>(MA) && !MA->use_empty()) {
1323 // Reset optimized on users of this store, and reset the uses.
1324 // A few notes:
1325 // 1. This is a slightly modified version of RAUW to avoid walking the
1326 // uses twice here.
1327 // 2. If we wanted to be complete, we would have to reset the optimized
1328 // flags on users of phi nodes if doing the below makes a phi node have all
1329 // the same arguments. Instead, we prefer users to removeMemoryAccess those
1330 // phi nodes, because doing it here would be N^3.
1331 if (MA->hasValueHandle())
1332 ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget);
1333 // Note: We assume MemorySSA is not used in metadata since it's not really
1334 // part of the IR.
1335
Alina Sbirlea344a3d02020-09-14 18:07:44 -07001336 assert(NewDefTarget != MA && "Going into an infinite loop");
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001337 while (!MA->use_empty()) {
1338 Use &U = *MA->use_begin();
Daniel Berline33bc312017-04-04 23:43:10 +00001339 if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
1340 MUD->resetOptimized();
Alina Sbirlea240a90a2019-01-31 20:13:47 +00001341 if (OptimizePhis)
1342 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser()))
1343 PhisToCheck.insert(MP);
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001344 U.set(NewDefTarget);
1345 }
1346 }
1347
1348 // The call below to erase will destroy MA, so we can't change the order we
1349 // are doing things here
1350 MSSA->removeFromLookups(MA);
1351 MSSA->removeFromLists(MA);
Alina Sbirlea240a90a2019-01-31 20:13:47 +00001352
1353 // Optionally optimize Phi uses. This will recursively remove trivial phis.
1354 if (!PhisToCheck.empty()) {
1355 SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(),
1356 PhisToCheck.end()};
1357 PhisToCheck.clear();
1358
1359 unsigned PhisSize = PhisToOptimize.size();
1360 while (PhisSize-- > 0)
1361 if (MemoryPhi *MP =
Alina Sbirlea28637212019-08-20 22:47:58 +00001362 cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val()))
1363 tryRemoveTrivialPhi(MP);
Alina Sbirlea240a90a2019-01-31 20:13:47 +00001364 }
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001365}
1366
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001367void MemorySSAUpdater::removeBlocks(
Alina Sbirleadb101862019-07-12 22:30:30 +00001368 const SmallSetVector<BasicBlock *, 8> &DeadBlocks) {
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001369 // First delete all uses of BB in MemoryPhis.
1370 for (BasicBlock *BB : DeadBlocks) {
Chandler Carruthedb12a82018-10-15 10:04:59 +00001371 Instruction *TI = BB->getTerminator();
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001372 assert(TI && "Basic block expected to have a terminator instruction");
Chandler Carruth96fc1de2018-08-26 08:41:15 +00001373 for (BasicBlock *Succ : successors(TI))
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001374 if (!DeadBlocks.count(Succ))
1375 if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) {
1376 MP->unorderedDeleteIncomingBlock(BB);
Alina Sbirlea28637212019-08-20 22:47:58 +00001377 tryRemoveTrivialPhi(MP);
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001378 }
1379 // Drop all references of all accesses in BB
1380 if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB))
1381 for (MemoryAccess &MA : *Acc)
1382 MA.dropAllReferences();
1383 }
1384
1385 // Next, delete all memory accesses in each block
1386 for (BasicBlock *BB : DeadBlocks) {
1387 MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB);
1388 if (!Acc)
1389 continue;
1390 for (auto AB = Acc->begin(), AE = Acc->end(); AB != AE;) {
1391 MemoryAccess *MA = &*AB;
1392 ++AB;
1393 MSSA->removeFromLookups(MA);
1394 MSSA->removeFromLists(MA);
1395 }
1396 }
1397}
1398
Alina Sbirlea151ab482019-05-02 23:12:49 +00001399void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs) {
1400 for (auto &VH : UpdatedPHIs)
Alina Sbirlea28637212019-08-20 22:47:58 +00001401 if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
1402 tryRemoveTrivialPhi(MPhi);
Alina Sbirlea151ab482019-05-02 23:12:49 +00001403}
1404
Alina Sbirleaf31eba62019-05-08 17:05:36 +00001405void MemorySSAUpdater::changeToUnreachable(const Instruction *I) {
1406 const BasicBlock *BB = I->getParent();
1407 // Remove memory accesses in BB for I and all following instructions.
1408 auto BBI = I->getIterator(), BBE = BB->end();
1409 // FIXME: If this becomes too expensive, iterate until the first instruction
1410 // with a memory access, then iterate over MemoryAccesses.
1411 while (BBI != BBE)
1412 removeMemoryAccess(&*(BBI++));
1413 // Update phis in BB's successors to remove BB.
1414 SmallVector<WeakVH, 16> UpdatedPHIs;
1415 for (const BasicBlock *Successor : successors(BB)) {
1416 removeDuplicatePhiEdgesBetween(BB, Successor);
1417 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Successor)) {
1418 MPhi->unorderedDeleteIncomingBlock(BB);
1419 UpdatedPHIs.push_back(MPhi);
1420 }
1421 }
1422 // Optimize trivial phis.
1423 tryRemoveTrivialPhis(UpdatedPHIs);
1424}
1425
1426void MemorySSAUpdater::changeCondBranchToUnconditionalTo(const BranchInst *BI,
1427 const BasicBlock *To) {
1428 const BasicBlock *BB = BI->getParent();
1429 SmallVector<WeakVH, 16> UpdatedPHIs;
1430 for (const BasicBlock *Succ : successors(BB)) {
1431 removeDuplicatePhiEdgesBetween(BB, Succ);
1432 if (Succ != To)
1433 if (auto *MPhi = MSSA->getMemoryAccess(Succ)) {
1434 MPhi->unorderedDeleteIncomingBlock(BB);
1435 UpdatedPHIs.push_back(MPhi);
1436 }
1437 }
1438 // Optimize trivial phis.
1439 tryRemoveTrivialPhis(UpdatedPHIs);
1440}
1441
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001442MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB(
1443 Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
1444 MemorySSA::InsertionPlace Point) {
1445 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1446 MSSA->insertIntoListsForBlock(NewAccess, BB, Point);
1447 return NewAccess;
1448}
1449
1450MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessBefore(
1451 Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) {
1452 assert(I->getParent() == InsertPt->getBlock() &&
1453 "New and old access must be in the same block");
1454 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1455 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1456 InsertPt->getIterator());
1457 return NewAccess;
1458}
1459
1460MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessAfter(
1461 Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) {
1462 assert(I->getParent() == InsertPt->getBlock() &&
1463 "New and old access must be in the same block");
1464 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1465 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1466 ++InsertPt->getIterator());
1467 return NewAccess;
1468}