blob: 6018968c199d7fd1ad5e0d3bafc87a7549cb0d08 [file] [log] [blame]
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001//===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Daniel Berlinae6b8b62017-01-28 01:35:02 +00006//
7//===----------------------------------------------------------------===//
8//
9// This file implements the MemorySSAUpdater class.
10//
11//===----------------------------------------------------------------===//
Daniel Berlin554dcd82017-04-11 20:06:36 +000012#include "llvm/Analysis/MemorySSAUpdater.h"
Daniel Berlinae6b8b62017-01-28 01:35:02 +000013#include "llvm/ADT/STLExtras.h"
Alina Sbirlea79800992018-09-10 20:13:01 +000014#include "llvm/ADT/SetVector.h"
Daniel Berlinae6b8b62017-01-28 01:35:02 +000015#include "llvm/ADT/SmallPtrSet.h"
Alina Sbirlea79800992018-09-10 20:13:01 +000016#include "llvm/Analysis/IteratedDominanceFrontier.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000017#include "llvm/Analysis/MemorySSA.h"
Daniel Berlinae6b8b62017-01-28 01:35:02 +000018#include "llvm/IR/DataLayout.h"
19#include "llvm/IR/Dominators.h"
20#include "llvm/IR/GlobalVariable.h"
21#include "llvm/IR/IRBuilder.h"
Daniel Berlinae6b8b62017-01-28 01:35:02 +000022#include "llvm/IR/LLVMContext.h"
23#include "llvm/IR/Metadata.h"
24#include "llvm/IR/Module.h"
25#include "llvm/Support/Debug.h"
26#include "llvm/Support/FormattedStream.h"
Daniel Berlinae6b8b62017-01-28 01:35:02 +000027#include <algorithm>
28
29#define DEBUG_TYPE "memoryssa"
30using namespace llvm;
George Burgess IV56169ed2017-04-21 04:54:52 +000031
Daniel Berlinae6b8b62017-01-28 01:35:02 +000032// This is the marker algorithm from "Simple and Efficient Construction of
33// Static Single Assignment Form"
34// The simple, non-marker algorithm places phi nodes at any join
35// Here, we place markers, and only place phi nodes if they end up necessary.
36// They are only necessary if they break a cycle (IE we recursively visit
37// ourselves again), or we discover, while getting the value of the operands,
38// that there are two or more definitions needing to be merged.
39// This still will leave non-minimal form in the case of irreducible control
40// flow, where phi nodes may be in cycles with themselves, but unnecessary.
Eli Friedman88e2bac2018-03-26 19:52:54 +000041MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(
42 BasicBlock *BB,
43 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
44 // First, do a cache lookup. Without this cache, certain CFG structures
45 // (like a series of if statements) take exponential time to visit.
46 auto Cached = CachedPreviousDef.find(BB);
47 if (Cached != CachedPreviousDef.end()) {
48 return Cached->second;
George Burgess IV45f263d2018-05-26 02:28:55 +000049 }
50
51 if (BasicBlock *Pred = BB->getSinglePredecessor()) {
Eli Friedman88e2bac2018-03-26 19:52:54 +000052 // Single predecessor case, just recurse, we can only have one definition.
53 MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
54 CachedPreviousDef.insert({BB, Result});
55 return Result;
George Burgess IV45f263d2018-05-26 02:28:55 +000056 }
57
58 if (VisitedBlocks.count(BB)) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +000059 // We hit our node again, meaning we had a cycle, we must insert a phi
60 // node to break it so we have an operand. The only case this will
61 // insert useless phis is if we have irreducible control flow.
Eli Friedman88e2bac2018-03-26 19:52:54 +000062 MemoryAccess *Result = MSSA->createMemoryPhi(BB);
63 CachedPreviousDef.insert({BB, Result});
64 return Result;
George Burgess IV45f263d2018-05-26 02:28:55 +000065 }
66
67 if (VisitedBlocks.insert(BB).second) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +000068 // Mark us visited so we can detect a cycle
Alexandros Lamprineasbf6009c2018-07-23 10:56:30 +000069 SmallVector<TrackingVH<MemoryAccess>, 8> PhiOps;
Daniel Berlinae6b8b62017-01-28 01:35:02 +000070
71 // Recurse to get the values in our predecessors for placement of a
72 // potential phi node. This will insert phi nodes if we cycle in order to
73 // break the cycle and have an operand.
Alina Sbirlea6720ed82019-09-25 23:24:39 +000074 bool UniqueIncomingAccess = true;
75 MemoryAccess *SingleAccess = nullptr;
76 for (auto *Pred : predecessors(BB)) {
77 if (MSSA->DT->isReachableFromEntry(Pred)) {
78 auto *IncomingAccess = getPreviousDefFromEnd(Pred, CachedPreviousDef);
79 if (!SingleAccess)
80 SingleAccess = IncomingAccess;
81 else if (IncomingAccess != SingleAccess)
82 UniqueIncomingAccess = false;
83 PhiOps.push_back(IncomingAccess);
84 } else
Alina Sbirlea0363c3b2019-05-02 23:41:58 +000085 PhiOps.push_back(MSSA->getLiveOnEntryDef());
Alina Sbirlea6720ed82019-09-25 23:24:39 +000086 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +000087
88 // Now try to simplify the ops to avoid placing a phi.
89 // This may return null if we never created a phi yet, that's okay
90 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB));
Daniel Berlinae6b8b62017-01-28 01:35:02 +000091
92 // See if we can avoid the phi by simplifying it.
93 auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
94 // If we couldn't simplify, we may have to create a phi
Alina Sbirlea6720ed82019-09-25 23:24:39 +000095 if (Result == Phi && UniqueIncomingAccess && SingleAccess)
96 Result = SingleAccess;
97 else if (Result == Phi && !(UniqueIncomingAccess && SingleAccess)) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +000098 if (!Phi)
99 Phi = MSSA->createMemoryPhi(BB);
100
Alexandros Lamprineasbf6009c2018-07-23 10:56:30 +0000101 // See if the existing phi operands match what we need.
102 // Unlike normal SSA, we only allow one phi node per block, so we can't just
103 // create a new one.
104 if (Phi->getNumOperands() != 0) {
105 // FIXME: Figure out whether this is dead code and if so remove it.
106 if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) {
107 // These will have been filled in by the recursive read we did above.
Fangrui Song75709322018-11-17 01:44:25 +0000108 llvm::copy(PhiOps, Phi->op_begin());
Alexandros Lamprineasbf6009c2018-07-23 10:56:30 +0000109 std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin());
110 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000111 } else {
112 unsigned i = 0;
113 for (auto *Pred : predecessors(BB))
Alexandros Lamprineasbf6009c2018-07-23 10:56:30 +0000114 Phi->addIncoming(&*PhiOps[i++], Pred);
Daniel Berlin97f34e82017-09-27 05:35:19 +0000115 InsertedPHIs.push_back(Phi);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000116 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000117 Result = Phi;
118 }
Daniel Berlin97f34e82017-09-27 05:35:19 +0000119
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000120 // Set ourselves up for the next variable by resetting visited state.
121 VisitedBlocks.erase(BB);
Eli Friedman88e2bac2018-03-26 19:52:54 +0000122 CachedPreviousDef.insert({BB, Result});
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000123 return Result;
124 }
125 llvm_unreachable("Should have hit one of the three cases above");
126}
127
128// This starts at the memory access, and goes backwards in the block to find the
129// previous definition. If a definition is not found the block of the access,
130// it continues globally, creating phi nodes to ensure we have a single
131// definition.
132MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) {
Eli Friedman88e2bac2018-03-26 19:52:54 +0000133 if (auto *LocalResult = getPreviousDefInBlock(MA))
134 return LocalResult;
135 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
136 return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000137}
138
139// This starts at the memory access, and goes backwards in the block to the find
140// the previous definition. If the definition is not found in the block of the
141// access, it returns nullptr.
142MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) {
143 auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock());
144
145 // It's possible there are no defs, or we got handed the first def to start.
146 if (Defs) {
147 // If this is a def, we can just use the def iterators.
148 if (!isa<MemoryUse>(MA)) {
149 auto Iter = MA->getReverseDefsIterator();
150 ++Iter;
151 if (Iter != Defs->rend())
152 return &*Iter;
153 } else {
154 // Otherwise, have to walk the all access iterator.
Alina Sbirlea33e58722017-06-07 16:46:53 +0000155 auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend();
156 for (auto &U : make_range(++MA->getReverseIterator(), End))
157 if (!isa<MemoryUse>(U))
158 return cast<MemoryAccess>(&U);
159 // Note that if MA comes before Defs->begin(), we won't hit a def.
160 return nullptr;
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000161 }
162 }
163 return nullptr;
164}
165
166// This starts at the end of block
Eli Friedman88e2bac2018-03-26 19:52:54 +0000167MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(
168 BasicBlock *BB,
169 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000170 auto *Defs = MSSA->getWritableBlockDefs(BB);
171
Alina Sbirleaf9f073a2019-04-12 21:58:52 +0000172 if (Defs) {
173 CachedPreviousDef.insert({BB, &*Defs->rbegin()});
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000174 return &*Defs->rbegin();
Alina Sbirleaf9f073a2019-04-12 21:58:52 +0000175 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000176
Eli Friedman88e2bac2018-03-26 19:52:54 +0000177 return getPreviousDefRecursive(BB, CachedPreviousDef);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000178}
179// Recurse over a set of phi uses to eliminate the trivial ones
180MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) {
181 if (!Phi)
182 return nullptr;
183 TrackingVH<MemoryAccess> Res(Phi);
184 SmallVector<TrackingVH<Value>, 8> Uses;
185 std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses));
Alina Sbirlea28637212019-08-20 22:47:58 +0000186 for (auto &U : Uses)
187 if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U))
188 tryRemoveTrivialPhi(UsePhi);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000189 return Res;
190}
191
192// Eliminate trivial phis
193// Phis are trivial if they are defined either by themselves, or all the same
194// argument.
195// IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
196// We recursively try to remove them.
Alina Sbirlea28637212019-08-20 22:47:58 +0000197MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi) {
198 assert(Phi && "Can only remove concrete Phi.");
199 auto OperRange = Phi->operands();
200 return tryRemoveTrivialPhi(Phi, OperRange);
201}
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000202template <class RangeType>
203MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi,
204 RangeType &Operands) {
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +0000205 // Bail out on non-opt Phis.
206 if (NonOptPhis.count(Phi))
207 return Phi;
208
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000209 // Detect equal or self arguments
210 MemoryAccess *Same = nullptr;
211 for (auto &Op : Operands) {
212 // If the same or self, good so far
213 if (Op == Phi || Op == Same)
214 continue;
215 // not the same, return the phi since it's not eliminatable by us
216 if (Same)
217 return Phi;
Alexandros Lamprineasbf6009c2018-07-23 10:56:30 +0000218 Same = cast<MemoryAccess>(&*Op);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000219 }
220 // Never found a non-self reference, the phi is undef
221 if (Same == nullptr)
222 return MSSA->getLiveOnEntryDef();
223 if (Phi) {
224 Phi->replaceAllUsesWith(Same);
Daniel Berlin17e8d0e2017-02-22 22:19:55 +0000225 removeMemoryAccess(Phi);
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000226 }
227
228 // We should only end up recursing in case we replaced something, in which
229 // case, we may have made other Phis trivial.
230 return recursePhi(Same);
231}
232
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +0000233void MemorySSAUpdater::insertUse(MemoryUse *MU, bool RenameUses) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000234 InsertedPHIs.clear();
235 MU->setDefiningAccess(getPreviousDef(MU));
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +0000236 // In cases without unreachable blocks, because uses do not create new
237 // may-defs, there are only two cases:
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000238 // 1. There was a def already below us, and therefore, we should not have
239 // created a phi node because it was already needed for the def.
240 //
241 // 2. There is no def below us, and therefore, there is no extra renaming work
242 // to do.
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +0000243
244 // In cases with unreachable blocks, where the unnecessary Phis were
245 // optimized out, adding the Use may re-insert those Phis. Hence, when
246 // inserting Uses outside of the MSSA creation process, and new Phis were
247 // added, rename all uses if we are asked.
248
249 if (!RenameUses && !InsertedPHIs.empty()) {
250 auto *Defs = MSSA->getBlockDefs(MU->getBlock());
251 (void)Defs;
252 assert((!Defs || (++Defs->begin() == Defs->end())) &&
253 "Block may have only a Phi or no defs");
254 }
255
256 if (RenameUses && InsertedPHIs.size()) {
257 SmallPtrSet<BasicBlock *, 16> Visited;
258 BasicBlock *StartBlock = MU->getBlock();
259
260 if (auto *Defs = MSSA->getWritableBlockDefs(StartBlock)) {
261 MemoryAccess *FirstDef = &*Defs->begin();
262 // Convert to incoming value if it's a memorydef. A phi *is* already an
263 // incoming value.
264 if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
265 FirstDef = MD->getDefiningAccess();
266
267 MSSA->renamePass(MU->getBlock(), FirstDef, Visited);
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +0000268 }
Alina Sbirlea228ffac2019-08-27 00:34:47 +0000269 // We just inserted a phi into this block, so the incoming value will
270 // become the phi anyway, so it does not matter what we pass.
271 for (auto &MP : InsertedPHIs)
272 if (MemoryPhi *Phi = cast_or_null<MemoryPhi>(MP))
273 MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +0000274 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000275}
276
Daniel Berlin9d8a3352017-01-30 11:35:39 +0000277// Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
George Burgess IV56169ed2017-04-21 04:54:52 +0000278static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB,
279 MemoryAccess *NewDef) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000280 // Replace any operand with us an incoming block with the new defining
281 // access.
282 int i = MP->getBasicBlockIndex(BB);
283 assert(i != -1 && "Should have found the basic block in the phi");
Daniel Berlin9d8a3352017-01-30 11:35:39 +0000284 // We can't just compare i against getNumOperands since one is signed and the
285 // other not. So use it to index into the block iterator.
286 for (auto BBIter = MP->block_begin() + i; BBIter != MP->block_end();
287 ++BBIter) {
288 if (*BBIter != BB)
289 break;
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000290 MP->setIncomingValue(i, NewDef);
291 ++i;
292 }
293}
294
295// A brief description of the algorithm:
296// First, we compute what should define the new def, using the SSA
297// construction algorithm.
298// Then, we update the defs below us (and any new phi nodes) in the graph to
299// point to the correct new defs, to ensure we only have one variable, and no
300// disconnected stores.
Daniel Berlin78cbd282017-02-20 22:26:03 +0000301void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000302 InsertedPHIs.clear();
303
304 // See if we had a local def, and if not, go hunting.
Eli Friedman88e2bac2018-03-26 19:52:54 +0000305 MemoryAccess *DefBefore = getPreviousDef(MD);
Alina Sbirleaae40dfc2019-10-01 18:34:39 +0000306 bool DefBeforeSameBlock = false;
307 if (DefBefore->getBlock() == MD->getBlock() &&
308 !(isa<MemoryPhi>(DefBefore) &&
309 std::find(InsertedPHIs.begin(), InsertedPHIs.end(), DefBefore) !=
310 InsertedPHIs.end()))
311 DefBeforeSameBlock = true;
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000312
313 // There is a def before us, which means we can replace any store/phi uses
314 // of that thing with us, since we are in the way of whatever was there
315 // before.
316 // We now define that def's memorydefs and memoryphis
Daniel Berlin9d8a3352017-01-30 11:35:39 +0000317 if (DefBeforeSameBlock) {
Roman Lebedev081e9902019-08-01 12:32:08 +0000318 DefBefore->replaceUsesWithIf(MD, [MD](Use &U) {
Alexandros Lamprineas96762b32018-09-11 14:29:59 +0000319 // Leave the MemoryUses alone.
320 // Also make sure we skip ourselves to avoid self references.
Roman Lebedev081e9902019-08-01 12:32:08 +0000321 User *Usr = U.getUser();
322 return !isa<MemoryUse>(Usr) && Usr != MD;
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000323 // Defs are automatically unoptimized when the user is set to MD below,
324 // because the isOptimized() call will fail to find the same ID.
Roman Lebedev081e9902019-08-01 12:32:08 +0000325 });
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000326 }
Daniel Berlin9d8a3352017-01-30 11:35:39 +0000327
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000328 // and that def is now our defining access.
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000329 MD->setDefiningAccess(DefBefore);
330
Alexandros Lamprineasf854ce82018-07-16 07:51:27 +0000331 SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end());
Alina Sbirlea2c5e6642019-09-23 23:50:16 +0000332
Alina Sbirlea6720ed82019-09-25 23:24:39 +0000333 // Remember the index where we may insert new phis.
334 unsigned NewPhiIndex = InsertedPHIs.size();
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000335 if (!DefBeforeSameBlock) {
336 // If there was a local def before us, we must have the same effect it
337 // did. Because every may-def is the same, any phis/etc we would create, it
338 // would also have created. If there was no local def before us, we
339 // performed a global update, and have to search all successors and make
340 // sure we update the first def in each of them (following all paths until
341 // we hit the first def along each path). This may also insert phi nodes.
342 // TODO: There are other cases we can skip this work, such as when we have a
343 // single successor, and only used a straight line of single pred blocks
344 // backwards to find the def. To make that work, we'd have to track whether
345 // getDefRecursive only ever used the single predecessor case. These types
346 // of paths also only exist in between CFG simplifications.
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000347
348 // If this is the first def in the block and this insert is in an arbitrary
349 // place, compute IDF and place phis.
350 auto Iter = MD->getDefsIterator();
351 ++Iter;
352 auto IterEnd = MSSA->getBlockDefs(MD->getBlock())->end();
Alina Sbirlea6720ed82019-09-25 23:24:39 +0000353 if (Iter == IterEnd) {
354 SmallPtrSet<BasicBlock *, 2> DefiningBlocks;
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000355 DefiningBlocks.insert(MD->getBlock());
Alina Sbirlea6720ed82019-09-25 23:24:39 +0000356 for (const auto &VH : InsertedPHIs)
357 if (const auto *RealPHI = cast_or_null<MemoryPhi>(VH))
358 DefiningBlocks.insert(RealPHI->getBlock());
359 ForwardIDFCalculator IDFs(*MSSA->DT);
360 SmallVector<BasicBlock *, 32> IDFBlocks;
361 IDFs.setDefiningBlocks(DefiningBlocks);
362 IDFs.calculate(IDFBlocks);
363 SmallVector<AssertingVH<MemoryPhi>, 4> NewInsertedPHIs;
364 for (auto *BBIDF : IDFBlocks) {
365 auto *MPhi = MSSA->getMemoryAccess(BBIDF);
366 if (!MPhi) {
367 MPhi = MSSA->createMemoryPhi(BBIDF);
368 NewInsertedPHIs.push_back(MPhi);
369 }
370 // Add the phis created into the IDF blocks to NonOptPhis, so they are
371 // not optimized out as trivial by the call to getPreviousDefFromEnd
372 // below. Once they are complete, all these Phis are added to the
373 // FixupList, and removed from NonOptPhis inside fixupDefs(). Existing
374 // Phis in IDF may need fixing as well, and potentially be trivial
375 // before this insertion, hence add all IDF Phis. See PR43044.
376 NonOptPhis.insert(MPhi);
377 }
378 for (auto &MPhi : NewInsertedPHIs) {
379 auto *BBIDF = MPhi->getBlock();
380 for (auto *Pred : predecessors(BBIDF)) {
381 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
382 MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef),
383 Pred);
384 }
385 }
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000386
Alina Sbirlea6720ed82019-09-25 23:24:39 +0000387 // Re-take the index where we're adding the new phis, because the above
388 // call to getPreviousDefFromEnd, may have inserted into InsertedPHIs.
389 NewPhiIndex = InsertedPHIs.size();
390 for (auto &MPhi : NewInsertedPHIs) {
391 InsertedPHIs.push_back(&*MPhi);
392 FixupList.push_back(&*MPhi);
393 }
394 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000395 FixupList.push_back(MD);
396 }
397
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000398 // Remember the index where we stopped inserting new phis above, since the
399 // fixupDefs call in the loop below may insert more, that are already minimal.
400 unsigned NewPhiIndexEnd = InsertedPHIs.size();
401
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000402 while (!FixupList.empty()) {
403 unsigned StartingPHISize = InsertedPHIs.size();
404 fixupDefs(FixupList);
405 FixupList.clear();
406 // Put any new phis on the fixup list, and process them
Alexandros Lamprineasf854ce82018-07-16 07:51:27 +0000407 FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end());
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000408 }
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000409
410 // Optimize potentially non-minimal phis added in this method.
Alina Sbirlea151ab482019-05-02 23:12:49 +0000411 unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex;
412 if (NewPhiSize)
413 tryRemoveTrivialPhis(ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize));
Alina Sbirleafcfa7c52019-02-27 22:20:22 +0000414
Daniel Berlin78cbd282017-02-20 22:26:03 +0000415 // Now that all fixups are done, rename all uses if we are asked.
416 if (RenameUses) {
417 SmallPtrSet<BasicBlock *, 16> Visited;
418 BasicBlock *StartBlock = MD->getBlock();
419 // We are guaranteed there is a def in the block, because we just got it
420 // handed to us in this function.
421 MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin();
422 // Convert to incoming value if it's a memorydef. A phi *is* already an
423 // incoming value.
424 if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
425 FirstDef = MD->getDefiningAccess();
426
427 MSSA->renamePass(MD->getBlock(), FirstDef, Visited);
428 // We just inserted a phi into this block, so the incoming value will become
429 // the phi anyway, so it does not matter what we pass.
Alexandros Lamprineasf854ce82018-07-16 07:51:27 +0000430 for (auto &MP : InsertedPHIs) {
431 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
432 if (Phi)
433 MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
434 }
Daniel Berlin78cbd282017-02-20 22:26:03 +0000435 }
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000436}
437
Alexandros Lamprineasf854ce82018-07-16 07:51:27 +0000438void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) {
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000439 SmallPtrSet<const BasicBlock *, 8> Seen;
440 SmallVector<const BasicBlock *, 16> Worklist;
Alexandros Lamprineasf854ce82018-07-16 07:51:27 +0000441 for (auto &Var : Vars) {
442 MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var);
443 if (!NewDef)
444 continue;
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000445 // First, see if there is a local def after the operand.
446 auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock());
447 auto DefIter = NewDef->getDefsIterator();
448
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +0000449 // The temporary Phi is being fixed, unmark it for not to optimize.
George Burgess IVe7cdb7e2018-07-12 21:56:31 +0000450 if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef))
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +0000451 NonOptPhis.erase(Phi);
452
Daniel Berlinae6b8b62017-01-28 01:35:02 +0000453 // If there is a local def after us, we only have to rename that.
454 if (++DefIter != Defs->end()) {
455 cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
456 continue;
457 }
458
459 // Otherwise, we need to search down through the CFG.
460 // For each of our successors, handle it directly if their is a phi, or
461 // place on the fixup worklist.
462 for (const auto *S : successors(NewDef->getBlock())) {
463 if (auto *MP = MSSA->getMemoryAccess(S))
464 setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef);
465 else
466 Worklist.push_back(S);
467 }
468
469 while (!Worklist.empty()) {
470 const BasicBlock *FixupBlock = Worklist.back();
471 Worklist.pop_back();
472
473 // Get the first def in the block that isn't a phi node.
474 if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) {
475 auto *FirstDef = &*Defs->begin();
476 // The loop above and below should have taken care of phi nodes
477 assert(!isa<MemoryPhi>(FirstDef) &&
478 "Should have already handled phi nodes!");
479 // We are now this def's defining access, make sure we actually dominate
480 // it
481 assert(MSSA->dominates(NewDef, FirstDef) &&
482 "Should have dominated the new access");
483
484 // This may insert new phi nodes, because we are not guaranteed the
485 // block we are processing has a single pred, and depending where the
486 // store was inserted, it may require phi nodes below it.
487 cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
488 return;
489 }
490 // We didn't find a def, so we must continue.
491 for (const auto *S : successors(FixupBlock)) {
492 // If there is a phi node, handle it.
493 // Otherwise, put the block on the worklist
494 if (auto *MP = MSSA->getMemoryAccess(S))
495 setMemoryPhiValueForBlock(MP, FixupBlock, NewDef);
496 else {
497 // If we cycle, we should have ended up at a phi node that we already
498 // processed. FIXME: Double check this
499 if (!Seen.insert(S).second)
500 continue;
501 Worklist.push_back(S);
502 }
503 }
504 }
505 }
506}
507
Alina Sbirlea79800992018-09-10 20:13:01 +0000508void MemorySSAUpdater::removeEdge(BasicBlock *From, BasicBlock *To) {
509 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
510 MPhi->unorderedDeleteIncomingBlock(From);
Alina Sbirlea28637212019-08-20 22:47:58 +0000511 tryRemoveTrivialPhi(MPhi);
Alina Sbirlea79800992018-09-10 20:13:01 +0000512 }
513}
514
Alina Sbirleaf31eba62019-05-08 17:05:36 +0000515void MemorySSAUpdater::removeDuplicatePhiEdgesBetween(const BasicBlock *From,
516 const BasicBlock *To) {
Alina Sbirlea79800992018-09-10 20:13:01 +0000517 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
518 bool Found = false;
519 MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) {
520 if (From != B)
521 return false;
522 if (Found)
523 return true;
524 Found = true;
525 return false;
526 });
Alina Sbirlea28637212019-08-20 22:47:58 +0000527 tryRemoveTrivialPhi(MPhi);
Alina Sbirlea79800992018-09-10 20:13:01 +0000528 }
529}
530
Alina Sbirlea4bc625c2019-07-30 20:10:33 +0000531static MemoryAccess *getNewDefiningAccessForClone(MemoryAccess *MA,
532 const ValueToValueMapTy &VMap,
533 PhiToDefMap &MPhiMap,
534 bool CloneWasSimplified,
535 MemorySSA *MSSA) {
536 MemoryAccess *InsnDefining = MA;
537 if (MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) {
538 if (!MSSA->isLiveOnEntryDef(DefMUD)) {
539 Instruction *DefMUDI = DefMUD->getMemoryInst();
540 assert(DefMUDI && "Found MemoryUseOrDef with no Instruction.");
541 if (Instruction *NewDefMUDI =
542 cast_or_null<Instruction>(VMap.lookup(DefMUDI))) {
543 InsnDefining = MSSA->getMemoryAccess(NewDefMUDI);
544 if (!CloneWasSimplified)
545 assert(InsnDefining && "Defining instruction cannot be nullptr.");
546 else if (!InsnDefining || isa<MemoryUse>(InsnDefining)) {
547 // The clone was simplified, it's no longer a MemoryDef, look up.
548 auto DefIt = DefMUD->getDefsIterator();
549 // Since simplified clones only occur in single block cloning, a
550 // previous definition must exist, otherwise NewDefMUDI would not
551 // have been found in VMap.
552 assert(DefIt != MSSA->getBlockDefs(DefMUD->getBlock())->begin() &&
553 "Previous def must exist");
554 InsnDefining = getNewDefiningAccessForClone(
555 &*(--DefIt), VMap, MPhiMap, CloneWasSimplified, MSSA);
556 }
557 }
558 }
559 } else {
560 MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining);
561 if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi))
562 InsnDefining = NewDefPhi;
563 }
564 assert(InsnDefining && "Defining instruction cannot be nullptr.");
565 return InsnDefining;
566}
567
Alina Sbirlea79800992018-09-10 20:13:01 +0000568void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
569 const ValueToValueMapTy &VMap,
Alina Sbirlea7a0098a2019-06-17 18:58:40 +0000570 PhiToDefMap &MPhiMap,
571 bool CloneWasSimplified) {
Alina Sbirlea79800992018-09-10 20:13:01 +0000572 const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
573 if (!Acc)
574 return;
575 for (const MemoryAccess &MA : *Acc) {
576 if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) {
577 Instruction *Insn = MUD->getMemoryInst();
578 // Entry does not exist if the clone of the block did not clone all
579 // instructions. This occurs in LoopRotate when cloning instructions
580 // from the old header to the old preheader. The cloned instruction may
581 // also be a simplified Value, not an Instruction (see LoopRotate).
Alina Sbirlea7a0098a2019-06-17 18:58:40 +0000582 // Also in LoopRotate, even when it's an instruction, due to it being
583 // simplified, it may be a Use rather than a Def, so we cannot use MUD as
584 // template. Calls coming from updateForClonedBlockIntoPred, ensure this.
Alina Sbirlea79800992018-09-10 20:13:01 +0000585 if (Instruction *NewInsn =
586 dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) {
587 MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess(
Alina Sbirlea4bc625c2019-07-30 20:10:33 +0000588 NewInsn,
589 getNewDefiningAccessForClone(MUD->getDefiningAccess(), VMap,
590 MPhiMap, CloneWasSimplified, MSSA),
591 /*Template=*/CloneWasSimplified ? nullptr : MUD,
592 /*CreationMustSucceed=*/CloneWasSimplified ? false : true);
593 if (NewUseOrDef)
594 MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End);
Alina Sbirlea79800992018-09-10 20:13:01 +0000595 }
596 }
597 }
598}
599
Alina Sbirleaf31eba62019-05-08 17:05:36 +0000600void MemorySSAUpdater::updatePhisWhenInsertingUniqueBackedgeBlock(
601 BasicBlock *Header, BasicBlock *Preheader, BasicBlock *BEBlock) {
602 auto *MPhi = MSSA->getMemoryAccess(Header);
603 if (!MPhi)
604 return;
605
606 // Create phi node in the backedge block and populate it with the same
607 // incoming values as MPhi. Skip incoming values coming from Preheader.
608 auto *NewMPhi = MSSA->createMemoryPhi(BEBlock);
609 bool HasUniqueIncomingValue = true;
610 MemoryAccess *UniqueValue = nullptr;
611 for (unsigned I = 0, E = MPhi->getNumIncomingValues(); I != E; ++I) {
612 BasicBlock *IBB = MPhi->getIncomingBlock(I);
613 MemoryAccess *IV = MPhi->getIncomingValue(I);
614 if (IBB != Preheader) {
615 NewMPhi->addIncoming(IV, IBB);
616 if (HasUniqueIncomingValue) {
617 if (!UniqueValue)
618 UniqueValue = IV;
619 else if (UniqueValue != IV)
620 HasUniqueIncomingValue = false;
621 }
622 }
623 }
624
625 // Update incoming edges into MPhi. Remove all but the incoming edge from
626 // Preheader. Add an edge from NewMPhi
627 auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader);
628 MPhi->setIncomingValue(0, AccFromPreheader);
629 MPhi->setIncomingBlock(0, Preheader);
630 for (unsigned I = MPhi->getNumIncomingValues() - 1; I >= 1; --I)
631 MPhi->unorderedDeleteIncoming(I);
632 MPhi->addIncoming(NewMPhi, BEBlock);
633
634 // If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be
635 // replaced with the unique value.
Alina Sbirleaae40dfc2019-10-01 18:34:39 +0000636 tryRemoveTrivialPhi(NewMPhi);
Alina Sbirleaf31eba62019-05-08 17:05:36 +0000637}
638
Alina Sbirlea79800992018-09-10 20:13:01 +0000639void MemorySSAUpdater::updateForClonedLoop(const LoopBlocksRPO &LoopBlocks,
640 ArrayRef<BasicBlock *> ExitBlocks,
641 const ValueToValueMapTy &VMap,
642 bool IgnoreIncomingWithNoClones) {
643 PhiToDefMap MPhiMap;
644
645 auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) {
646 assert(Phi && NewPhi && "Invalid Phi nodes.");
647 BasicBlock *NewPhiBB = NewPhi->getBlock();
648 SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB),
649 pred_end(NewPhiBB));
650 for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) {
651 MemoryAccess *IncomingAccess = Phi->getIncomingValue(It);
652 BasicBlock *IncBB = Phi->getIncomingBlock(It);
653
654 if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB)))
655 IncBB = NewIncBB;
656 else if (IgnoreIncomingWithNoClones)
657 continue;
658
659 // Now we have IncBB, and will need to add incoming from it to NewPhi.
660
661 // If IncBB is not a predecessor of NewPhiBB, then do not add it.
662 // NewPhiBB was cloned without that edge.
663 if (!NewPhiBBPreds.count(IncBB))
664 continue;
665
666 // Determine incoming value and add it as incoming from IncBB.
667 if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) {
668 if (!MSSA->isLiveOnEntryDef(IncMUD)) {
669 Instruction *IncI = IncMUD->getMemoryInst();
670 assert(IncI && "Found MemoryUseOrDef with no Instruction.");
671 if (Instruction *NewIncI =
672 cast_or_null<Instruction>(VMap.lookup(IncI))) {
673 IncMUD = MSSA->getMemoryAccess(NewIncI);
674 assert(IncMUD &&
675 "MemoryUseOrDef cannot be null, all preds processed.");
676 }
677 }
678 NewPhi->addIncoming(IncMUD, IncBB);
679 } else {
680 MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess);
681 if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi))
682 NewPhi->addIncoming(NewDefPhi, IncBB);
683 else
684 NewPhi->addIncoming(IncPhi, IncBB);
685 }
686 }
687 };
688
689 auto ProcessBlock = [&](BasicBlock *BB) {
690 BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB));
691 if (!NewBlock)
692 return;
693
694 assert(!MSSA->getWritableBlockAccesses(NewBlock) &&
695 "Cloned block should have no accesses");
696
697 // Add MemoryPhi.
698 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) {
699 MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock);
700 MPhiMap[MPhi] = NewPhi;
701 }
702 // Update Uses and Defs.
703 cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap);
704 };
705
706 for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
707 ProcessBlock(BB);
708
709 for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
710 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
711 if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi))
712 FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi));
713}
714
715void MemorySSAUpdater::updateForClonedBlockIntoPred(
716 BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) {
717 // All defs/phis from outside BB that are used in BB, are valid uses in P1.
718 // Since those defs/phis must have dominated BB, and also dominate P1.
719 // Defs from BB being used in BB will be replaced with the cloned defs from
720 // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the
721 // incoming def into the Phi from P1.
Alina Sbirlea7a0098a2019-06-17 18:58:40 +0000722 // Instructions cloned into the predecessor are in practice sometimes
723 // simplified, so disable the use of the template, and create an access from
724 // scratch.
Alina Sbirlea79800992018-09-10 20:13:01 +0000725 PhiToDefMap MPhiMap;
726 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
727 MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1);
Alina Sbirlea7a0098a2019-06-17 18:58:40 +0000728 cloneUsesAndDefs(BB, P1, VM, MPhiMap, /*CloneWasSimplified=*/true);
Alina Sbirlea79800992018-09-10 20:13:01 +0000729}
730
731template <typename Iter>
732void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
733 ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd,
734 DominatorTree &DT) {
735 SmallVector<CFGUpdate, 4> Updates;
736 // Update/insert phis in all successors of exit blocks.
737 for (auto *Exit : ExitBlocks)
738 for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd))
739 if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) {
740 BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0);
741 Updates.push_back({DT.Insert, NewExit, ExitSucc});
742 }
743 applyInsertUpdates(Updates, DT);
744}
745
746void MemorySSAUpdater::updateExitBlocksForClonedLoop(
747 ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap,
748 DominatorTree &DT) {
749 const ValueToValueMapTy *const Arr[] = {&VMap};
750 privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr),
751 std::end(Arr), DT);
752}
753
754void MemorySSAUpdater::updateExitBlocksForClonedLoop(
755 ArrayRef<BasicBlock *> ExitBlocks,
756 ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) {
757 auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) {
758 return I.get();
759 };
760 using MappedIteratorType =
761 mapped_iterator<const std::unique_ptr<ValueToValueMapTy> *,
762 decltype(GetPtr)>;
763 auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
764 auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
765 privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT);
766}
767
768void MemorySSAUpdater::applyUpdates(ArrayRef<CFGUpdate> Updates,
769 DominatorTree &DT) {
770 SmallVector<CFGUpdate, 4> RevDeleteUpdates;
771 SmallVector<CFGUpdate, 4> InsertUpdates;
772 for (auto &Update : Updates) {
773 if (Update.getKind() == DT.Insert)
774 InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
775 else
776 RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
777 }
778
779 if (!RevDeleteUpdates.empty()) {
780 // Update for inserted edges: use newDT and snapshot CFG as if deletes had
Hiroshi Inoue02a2bb22019-02-05 08:30:48 +0000781 // not occurred.
Alina Sbirlea79800992018-09-10 20:13:01 +0000782 // FIXME: This creates a new DT, so it's more expensive to do mix
783 // delete/inserts vs just inserts. We can do an incremental update on the DT
784 // to revert deletes, than re-delete the edges. Teaching DT to do this, is
785 // part of a pending cleanup.
786 DominatorTree NewDT(DT, RevDeleteUpdates);
787 GraphDiff<BasicBlock *> GD(RevDeleteUpdates);
788 applyInsertUpdates(InsertUpdates, NewDT, &GD);
789 } else {
790 GraphDiff<BasicBlock *> GD;
791 applyInsertUpdates(InsertUpdates, DT, &GD);
792 }
793
794 // Update for deleted edges
795 for (auto &Update : RevDeleteUpdates)
796 removeEdge(Update.getFrom(), Update.getTo());
797}
798
799void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
800 DominatorTree &DT) {
801 GraphDiff<BasicBlock *> GD;
802 applyInsertUpdates(Updates, DT, &GD);
803}
804
805void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
806 DominatorTree &DT,
807 const GraphDiff<BasicBlock *> *GD) {
808 // Get recursive last Def, assuming well formed MSSA and updated DT.
809 auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * {
810 while (true) {
811 MemorySSA::DefsList *Defs = MSSA->getWritableBlockDefs(BB);
812 // Return last Def or Phi in BB, if it exists.
813 if (Defs)
814 return &*(--Defs->end());
815
816 // Check number of predecessors, we only care if there's more than one.
817 unsigned Count = 0;
818 BasicBlock *Pred = nullptr;
819 for (auto &Pair : children<GraphDiffInvBBPair>({GD, BB})) {
820 Pred = Pair.second;
821 Count++;
822 if (Count == 2)
823 break;
824 }
825
826 // If BB has multiple predecessors, get last definition from IDom.
827 if (Count != 1) {
828 // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its
829 // DT is invalidated. Return LoE as its last def. This will be added to
830 // MemoryPhi node, and later deleted when the block is deleted.
831 if (!DT.getNode(BB))
832 return MSSA->getLiveOnEntryDef();
833 if (auto *IDom = DT.getNode(BB)->getIDom())
834 if (IDom->getBlock() != BB) {
835 BB = IDom->getBlock();
836 continue;
837 }
838 return MSSA->getLiveOnEntryDef();
839 } else {
840 // Single predecessor, BB cannot be dead. GetLastDef of Pred.
841 assert(Count == 1 && Pred && "Single predecessor expected.");
Alina Sbirlea890090f2019-10-01 19:09:50 +0000842 // BB can be unreachable though, return LoE if that is the case.
843 if (!DT.getNode(BB))
844 return MSSA->getLiveOnEntryDef();
Alina Sbirlea79800992018-09-10 20:13:01 +0000845 BB = Pred;
846 }
847 };
848 llvm_unreachable("Unable to get last definition.");
849 };
850
851 // Get nearest IDom given a set of blocks.
852 // TODO: this can be optimized by starting the search at the node with the
853 // lowest level (highest in the tree).
854 auto FindNearestCommonDominator =
855 [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * {
856 BasicBlock *PrevIDom = *BBSet.begin();
857 for (auto *BB : BBSet)
858 PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB);
859 return PrevIDom;
860 };
861
862 // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not
863 // include CurrIDom.
864 auto GetNoLongerDomBlocks =
865 [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom,
866 SmallVectorImpl<BasicBlock *> &BlocksPrevDom) {
867 if (PrevIDom == CurrIDom)
868 return;
869 BlocksPrevDom.push_back(PrevIDom);
870 BasicBlock *NextIDom = PrevIDom;
871 while (BasicBlock *UpIDom =
872 DT.getNode(NextIDom)->getIDom()->getBlock()) {
873 if (UpIDom == CurrIDom)
874 break;
875 BlocksPrevDom.push_back(UpIDom);
876 NextIDom = UpIDom;
877 }
878 };
879
880 // Map a BB to its predecessors: added + previously existing. To get a
881 // deterministic order, store predecessors as SetVectors. The order in each
Hiroshi Inoue02a2bb22019-02-05 08:30:48 +0000882 // will be defined by the order in Updates (fixed) and the order given by
Alina Sbirlea79800992018-09-10 20:13:01 +0000883 // children<> (also fixed). Since we further iterate over these ordered sets,
884 // we lose the information of multiple edges possibly existing between two
885 // blocks, so we'll keep and EdgeCount map for that.
886 // An alternate implementation could keep unordered set for the predecessors,
887 // traverse either Updates or children<> each time to get the deterministic
888 // order, and drop the usage of EdgeCount. This alternate approach would still
889 // require querying the maps for each predecessor, and children<> call has
890 // additional computation inside for creating the snapshot-graph predecessors.
891 // As such, we favor using a little additional storage and less compute time.
892 // This decision can be revisited if we find the alternative more favorable.
893
894 struct PredInfo {
895 SmallSetVector<BasicBlock *, 2> Added;
896 SmallSetVector<BasicBlock *, 2> Prev;
897 };
898 SmallDenseMap<BasicBlock *, PredInfo> PredMap;
899
900 for (auto &Edge : Updates) {
901 BasicBlock *BB = Edge.getTo();
902 auto &AddedBlockSet = PredMap[BB].Added;
903 AddedBlockSet.insert(Edge.getFrom());
904 }
905
906 // Store all existing predecessor for each BB, at least one must exist.
907 SmallDenseMap<std::pair<BasicBlock *, BasicBlock *>, int> EdgeCountMap;
908 SmallPtrSet<BasicBlock *, 2> NewBlocks;
909 for (auto &BBPredPair : PredMap) {
910 auto *BB = BBPredPair.first;
911 const auto &AddedBlockSet = BBPredPair.second.Added;
912 auto &PrevBlockSet = BBPredPair.second.Prev;
913 for (auto &Pair : children<GraphDiffInvBBPair>({GD, BB})) {
914 BasicBlock *Pi = Pair.second;
915 if (!AddedBlockSet.count(Pi))
916 PrevBlockSet.insert(Pi);
917 EdgeCountMap[{Pi, BB}]++;
918 }
919
920 if (PrevBlockSet.empty()) {
921 assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added.");
922 LLVM_DEBUG(
923 dbgs()
924 << "Adding a predecessor to a block with no predecessors. "
925 "This must be an edge added to a new, likely cloned, block. "
926 "Its memory accesses must be already correct, assuming completed "
927 "via the updateExitBlocksForClonedLoop API. "
928 "Assert a single such edge is added so no phi addition or "
929 "additional processing is required.\n");
930 assert(AddedBlockSet.size() == 1 &&
931 "Can only handle adding one predecessor to a new block.");
932 // Need to remove new blocks from PredMap. Remove below to not invalidate
933 // iterator here.
934 NewBlocks.insert(BB);
935 }
936 }
937 // Nothing to process for new/cloned blocks.
938 for (auto *BB : NewBlocks)
939 PredMap.erase(BB);
940
Alina Sbirlea79800992018-09-10 20:13:01 +0000941 SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace;
Alina Sbirleacb4ed8a2019-06-11 19:09:34 +0000942 SmallVector<WeakVH, 8> InsertedPhis;
Alina Sbirlea79800992018-09-10 20:13:01 +0000943
944 // First create MemoryPhis in all blocks that don't have one. Create in the
945 // order found in Updates, not in PredMap, to get deterministic numbering.
946 for (auto &Edge : Updates) {
947 BasicBlock *BB = Edge.getTo();
948 if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB))
Alina Sbirleacb4ed8a2019-06-11 19:09:34 +0000949 InsertedPhis.push_back(MSSA->createMemoryPhi(BB));
Alina Sbirlea79800992018-09-10 20:13:01 +0000950 }
951
952 // Now we'll fill in the MemoryPhis with the right incoming values.
953 for (auto &BBPredPair : PredMap) {
954 auto *BB = BBPredPair.first;
955 const auto &PrevBlockSet = BBPredPair.second.Prev;
956 const auto &AddedBlockSet = BBPredPair.second.Added;
957 assert(!PrevBlockSet.empty() &&
958 "At least one previous predecessor must exist.");
959
960 // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by
961 // keeping this map before the loop. We can reuse already populated entries
962 // if an edge is added from the same predecessor to two different blocks,
963 // and this does happen in rotate. Note that the map needs to be updated
964 // when deleting non-necessary phis below, if the phi is in the map by
965 // replacing the value with DefP1.
966 SmallDenseMap<BasicBlock *, MemoryAccess *> LastDefAddedPred;
967 for (auto *AddedPred : AddedBlockSet) {
968 auto *DefPn = GetLastDef(AddedPred);
969 assert(DefPn != nullptr && "Unable to find last definition.");
970 LastDefAddedPred[AddedPred] = DefPn;
971 }
972
973 MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB);
974 // If Phi is not empty, add an incoming edge from each added pred. Must
975 // still compute blocks with defs to replace for this block below.
976 if (NewPhi->getNumOperands()) {
977 for (auto *Pred : AddedBlockSet) {
978 auto *LastDefForPred = LastDefAddedPred[Pred];
979 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
980 NewPhi->addIncoming(LastDefForPred, Pred);
981 }
982 } else {
983 // Pick any existing predecessor and get its definition. All other
984 // existing predecessors should have the same one, since no phi existed.
985 auto *P1 = *PrevBlockSet.begin();
986 MemoryAccess *DefP1 = GetLastDef(P1);
987
988 // Check DefP1 against all Defs in LastDefPredPair. If all the same,
989 // nothing to add.
990 bool InsertPhi = false;
991 for (auto LastDefPredPair : LastDefAddedPred)
992 if (DefP1 != LastDefPredPair.second) {
993 InsertPhi = true;
994 break;
995 }
996 if (!InsertPhi) {
997 // Since NewPhi may be used in other newly added Phis, replace all uses
998 // of NewPhi with the definition coming from all predecessors (DefP1),
999 // before deleting it.
1000 NewPhi->replaceAllUsesWith(DefP1);
1001 removeMemoryAccess(NewPhi);
1002 continue;
1003 }
1004
1005 // Update Phi with new values for new predecessors and old value for all
1006 // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered
1007 // sets, the order of entries in NewPhi is deterministic.
1008 for (auto *Pred : AddedBlockSet) {
1009 auto *LastDefForPred = LastDefAddedPred[Pred];
1010 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1011 NewPhi->addIncoming(LastDefForPred, Pred);
1012 }
1013 for (auto *Pred : PrevBlockSet)
1014 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1015 NewPhi->addIncoming(DefP1, Pred);
Alina Sbirlea79800992018-09-10 20:13:01 +00001016 }
1017
1018 // Get all blocks that used to dominate BB and no longer do after adding
1019 // AddedBlockSet, where PrevBlockSet are the previously known predecessors.
1020 assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom");
1021 BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
1022 assert(PrevIDom && "Previous IDom should exists");
1023 BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock();
1024 assert(NewIDom && "BB should have a new valid idom");
1025 assert(DT.dominates(NewIDom, PrevIDom) &&
1026 "New idom should dominate old idom");
1027 GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
1028 }
1029
Alina Sbirlea109d2ea2019-06-19 21:33:09 +00001030 tryRemoveTrivialPhis(InsertedPhis);
1031 // Create the set of blocks that now have a definition. We'll use this to
1032 // compute IDF and add Phis there next.
1033 SmallVector<BasicBlock *, 8> BlocksToProcess;
1034 for (auto &VH : InsertedPhis)
1035 if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
1036 BlocksToProcess.push_back(MPhi->getBlock());
1037
Alina Sbirlea79800992018-09-10 20:13:01 +00001038 // Compute IDF and add Phis in all IDF blocks that do not have one.
1039 SmallVector<BasicBlock *, 32> IDFBlocks;
1040 if (!BlocksToProcess.empty()) {
Alina Sbirlea238b8e62019-06-19 21:17:31 +00001041 ForwardIDFCalculator IDFs(DT, GD);
Alina Sbirlea79800992018-09-10 20:13:01 +00001042 SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(),
1043 BlocksToProcess.end());
1044 IDFs.setDefiningBlocks(DefiningBlocks);
1045 IDFs.calculate(IDFBlocks);
Alina Sbirlea05f77802019-06-17 18:16:53 +00001046
1047 SmallSetVector<MemoryPhi *, 4> PhisToFill;
1048 // First create all needed Phis.
1049 for (auto *BBIDF : IDFBlocks)
1050 if (!MSSA->getMemoryAccess(BBIDF)) {
1051 auto *IDFPhi = MSSA->createMemoryPhi(BBIDF);
1052 InsertedPhis.push_back(IDFPhi);
1053 PhisToFill.insert(IDFPhi);
1054 }
1055 // Then update or insert their correct incoming values.
Alina Sbirlea79800992018-09-10 20:13:01 +00001056 for (auto *BBIDF : IDFBlocks) {
Alina Sbirlea05f77802019-06-17 18:16:53 +00001057 auto *IDFPhi = MSSA->getMemoryAccess(BBIDF);
1058 assert(IDFPhi && "Phi must exist");
1059 if (!PhisToFill.count(IDFPhi)) {
Alina Sbirlea79800992018-09-10 20:13:01 +00001060 // Update existing Phi.
1061 // FIXME: some updates may be redundant, try to optimize and skip some.
1062 for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I)
1063 IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I)));
1064 } else {
Alina Sbirlea79800992018-09-10 20:13:01 +00001065 for (auto &Pair : children<GraphDiffInvBBPair>({GD, BBIDF})) {
1066 BasicBlock *Pi = Pair.second;
1067 IDFPhi->addIncoming(GetLastDef(Pi), Pi);
1068 }
1069 }
1070 }
1071 }
1072
1073 // Now for all defs in BlocksWithDefsToReplace, if there are uses they no
1074 // longer dominate, replace those with the closest dominating def.
1075 // This will also update optimized accesses, as they're also uses.
1076 for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
1077 if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) {
1078 for (auto &DefToReplaceUses : *DefsList) {
1079 BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
1080 Value::use_iterator UI = DefToReplaceUses.use_begin(),
1081 E = DefToReplaceUses.use_end();
1082 for (; UI != E;) {
1083 Use &U = *UI;
1084 ++UI;
Simon Pilgrimb635964a2019-10-02 13:09:12 +00001085 MemoryAccess *Usr = cast<MemoryAccess>(U.getUser());
Alina Sbirlea79800992018-09-10 20:13:01 +00001086 if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) {
1087 BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
1088 if (!DT.dominates(DominatingBlock, DominatedBlock))
1089 U.set(GetLastDef(DominatedBlock));
1090 } else {
1091 BasicBlock *DominatedBlock = Usr->getBlock();
1092 if (!DT.dominates(DominatingBlock, DominatedBlock)) {
1093 if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock))
1094 U.set(DomBlPhi);
1095 else {
1096 auto *IDom = DT.getNode(DominatedBlock)->getIDom();
1097 assert(IDom && "Block must have a valid IDom.");
1098 U.set(GetLastDef(IDom->getBlock()));
1099 }
1100 cast<MemoryUseOrDef>(Usr)->resetOptimized();
1101 }
1102 }
1103 }
1104 }
1105 }
1106 }
Alina Sbirleacb4ed8a2019-06-11 19:09:34 +00001107 tryRemoveTrivialPhis(InsertedPhis);
Alina Sbirlea79800992018-09-10 20:13:01 +00001108}
1109
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001110// Move What before Where in the MemorySSA IR.
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001111template <class WhereType>
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001112void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001113 WhereType Where) {
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +00001114 // Mark MemoryPhi users of What not to be optimized.
1115 for (auto *U : What->users())
George Burgess IVe7cdb7e2018-07-12 21:56:31 +00001116 if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U))
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +00001117 NonOptPhis.insert(PhiUser);
1118
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001119 // Replace all our users with our defining access.
1120 What->replaceAllUsesWith(What->getDefiningAccess());
1121
1122 // Let MemorySSA take care of moving it around in the lists.
1123 MSSA->moveTo(What, BB, Where);
1124
1125 // Now reinsert it into the IR and do whatever fixups needed.
1126 if (auto *MD = dyn_cast<MemoryDef>(What))
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +00001127 insertDef(MD, /*RenameUses=*/true);
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001128 else
Alina Sbirlea1a3fdaf2019-08-19 18:57:40 +00001129 insertUse(cast<MemoryUse>(What), /*RenameUses=*/true);
Zhaoshi Zheng43af17b2018-04-09 20:55:37 +00001130
1131 // Clear dangling pointers. We added all MemoryPhi users, but not all
1132 // of them are removed by fixupDefs().
1133 NonOptPhis.clear();
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001134}
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001135
Daniel Berlinae6b8b62017-01-28 01:35:02 +00001136// Move What before Where in the MemorySSA IR.
1137void MemorySSAUpdater::moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
1138 moveTo(What, Where->getBlock(), Where->getIterator());
1139}
1140
1141// Move What after Where in the MemorySSA IR.
1142void MemorySSAUpdater::moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
1143 moveTo(What, Where->getBlock(), ++Where->getIterator());
1144}
1145
Daniel Berlin9d8a3352017-01-30 11:35:39 +00001146void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
1147 MemorySSA::InsertionPlace Where) {
1148 return moveTo(What, BB, Where);
1149}
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001150
Alina Sbirlea0f533552018-07-11 22:11:46 +00001151// All accesses in To used to be in From. Move to end and update access lists.
1152void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To,
1153 Instruction *Start) {
1154
1155 MemorySSA::AccessList *Accs = MSSA->getWritableBlockAccesses(From);
1156 if (!Accs)
1157 return;
1158
1159 MemoryAccess *FirstInNew = nullptr;
1160 for (Instruction &I : make_range(Start->getIterator(), To->end()))
1161 if ((FirstInNew = MSSA->getMemoryAccess(&I)))
1162 break;
1163 if (!FirstInNew)
1164 return;
1165
1166 auto *MUD = cast<MemoryUseOrDef>(FirstInNew);
1167 do {
1168 auto NextIt = ++MUD->getIterator();
1169 MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end())
1170 ? nullptr
1171 : cast<MemoryUseOrDef>(&*NextIt);
1172 MSSA->moveTo(MUD, To, MemorySSA::End);
1173 // Moving MUD from Accs in the moveTo above, may delete Accs, so we need to
1174 // retrieve it again.
1175 Accs = MSSA->getWritableBlockAccesses(From);
1176 MUD = NextMUD;
1177 } while (MUD);
1178}
1179
1180void MemorySSAUpdater::moveAllAfterSpliceBlocks(BasicBlock *From,
1181 BasicBlock *To,
1182 Instruction *Start) {
1183 assert(MSSA->getBlockAccesses(To) == nullptr &&
1184 "To block is expected to be free of MemoryAccesses.");
1185 moveAllAccesses(From, To, Start);
1186 for (BasicBlock *Succ : successors(To))
1187 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1188 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1189}
1190
1191void MemorySSAUpdater::moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To,
1192 Instruction *Start) {
1193 assert(From->getSinglePredecessor() == To &&
1194 "From block is expected to have a single predecessor (To).");
1195 moveAllAccesses(From, To, Start);
1196 for (BasicBlock *Succ : successors(From))
1197 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1198 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1199}
1200
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00001201/// If all arguments of a MemoryPHI are defined by the same incoming
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001202/// argument, return that argument.
1203static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
1204 MemoryAccess *MA = nullptr;
1205
1206 for (auto &Arg : MP->operands()) {
1207 if (!MA)
1208 MA = cast<MemoryAccess>(Arg);
1209 else if (MA != Arg)
1210 return nullptr;
1211 }
1212 return MA;
1213}
George Burgess IV56169ed2017-04-21 04:54:52 +00001214
Alina Sbirlea20c29622018-07-20 17:13:05 +00001215void MemorySSAUpdater::wireOldPredecessorsToNewImmediatePredecessor(
Alina Sbirleaf98c2c52018-09-07 21:14:48 +00001216 BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
1217 bool IdenticalEdgesWereMerged) {
Alina Sbirlea20c29622018-07-20 17:13:05 +00001218 assert(!MSSA->getWritableBlockAccesses(New) &&
1219 "Access list should be null for a new block.");
1220 MemoryPhi *Phi = MSSA->getMemoryAccess(Old);
1221 if (!Phi)
1222 return;
Vedant Kumar4de31bb2018-11-19 19:54:27 +00001223 if (Old->hasNPredecessors(1)) {
Alina Sbirlea20c29622018-07-20 17:13:05 +00001224 assert(pred_size(New) == Preds.size() &&
1225 "Should have moved all predecessors.");
1226 MSSA->moveTo(Phi, New, MemorySSA::Beginning);
1227 } else {
1228 assert(!Preds.empty() && "Must be moving at least one predecessor to the "
1229 "new immediate predecessor.");
1230 MemoryPhi *NewPhi = MSSA->createMemoryPhi(New);
1231 SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end());
Alina Sbirleaf98c2c52018-09-07 21:14:48 +00001232 // Currently only support the case of removing a single incoming edge when
1233 // identical edges were not merged.
1234 if (!IdenticalEdgesWereMerged)
1235 assert(PredsSet.size() == Preds.size() &&
1236 "If identical edges were not merged, we cannot have duplicate "
1237 "blocks in the predecessors");
Alina Sbirlea20c29622018-07-20 17:13:05 +00001238 Phi->unorderedDeleteIncomingIf([&](MemoryAccess *MA, BasicBlock *B) {
1239 if (PredsSet.count(B)) {
1240 NewPhi->addIncoming(MA, B);
Alina Sbirleaf98c2c52018-09-07 21:14:48 +00001241 if (!IdenticalEdgesWereMerged)
1242 PredsSet.erase(B);
Alina Sbirlea20c29622018-07-20 17:13:05 +00001243 return true;
1244 }
1245 return false;
1246 });
1247 Phi->addIncoming(NewPhi, New);
Alina Sbirlea28637212019-08-20 22:47:58 +00001248 tryRemoveTrivialPhi(NewPhi);
Alina Sbirlea20c29622018-07-20 17:13:05 +00001249 }
1250}
1251
Alina Sbirlea240a90a2019-01-31 20:13:47 +00001252void MemorySSAUpdater::removeMemoryAccess(MemoryAccess *MA, bool OptimizePhis) {
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001253 assert(!MSSA->isLiveOnEntryDef(MA) &&
1254 "Trying to remove the live on entry def");
1255 // We can only delete phi nodes if they have no uses, or we can replace all
1256 // uses with a single definition.
1257 MemoryAccess *NewDefTarget = nullptr;
1258 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
1259 // Note that it is sufficient to know that all edges of the phi node have
1260 // the same argument. If they do, by the definition of dominance frontiers
1261 // (which we used to place this phi), that argument must dominate this phi,
1262 // and thus, must dominate the phi's uses, and so we will not hit the assert
1263 // below.
1264 NewDefTarget = onlySingleValue(MP);
1265 assert((NewDefTarget || MP->use_empty()) &&
1266 "We can't delete this memory phi");
1267 } else {
1268 NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
1269 }
1270
Alina Sbirlea240a90a2019-01-31 20:13:47 +00001271 SmallSetVector<MemoryPhi *, 4> PhisToCheck;
1272
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001273 // Re-point the uses at our defining access
1274 if (!isa<MemoryUse>(MA) && !MA->use_empty()) {
1275 // Reset optimized on users of this store, and reset the uses.
1276 // A few notes:
1277 // 1. This is a slightly modified version of RAUW to avoid walking the
1278 // uses twice here.
1279 // 2. If we wanted to be complete, we would have to reset the optimized
1280 // flags on users of phi nodes if doing the below makes a phi node have all
1281 // the same arguments. Instead, we prefer users to removeMemoryAccess those
1282 // phi nodes, because doing it here would be N^3.
1283 if (MA->hasValueHandle())
1284 ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget);
1285 // Note: We assume MemorySSA is not used in metadata since it's not really
1286 // part of the IR.
1287
1288 while (!MA->use_empty()) {
1289 Use &U = *MA->use_begin();
Daniel Berline33bc312017-04-04 23:43:10 +00001290 if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
1291 MUD->resetOptimized();
Alina Sbirlea240a90a2019-01-31 20:13:47 +00001292 if (OptimizePhis)
1293 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser()))
1294 PhisToCheck.insert(MP);
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001295 U.set(NewDefTarget);
1296 }
1297 }
1298
1299 // The call below to erase will destroy MA, so we can't change the order we
1300 // are doing things here
1301 MSSA->removeFromLookups(MA);
1302 MSSA->removeFromLists(MA);
Alina Sbirlea240a90a2019-01-31 20:13:47 +00001303
1304 // Optionally optimize Phi uses. This will recursively remove trivial phis.
1305 if (!PhisToCheck.empty()) {
1306 SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(),
1307 PhisToCheck.end()};
1308 PhisToCheck.clear();
1309
1310 unsigned PhisSize = PhisToOptimize.size();
1311 while (PhisSize-- > 0)
1312 if (MemoryPhi *MP =
Alina Sbirlea28637212019-08-20 22:47:58 +00001313 cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val()))
1314 tryRemoveTrivialPhi(MP);
Alina Sbirlea240a90a2019-01-31 20:13:47 +00001315 }
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001316}
1317
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001318void MemorySSAUpdater::removeBlocks(
Alina Sbirleadb101862019-07-12 22:30:30 +00001319 const SmallSetVector<BasicBlock *, 8> &DeadBlocks) {
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001320 // First delete all uses of BB in MemoryPhis.
1321 for (BasicBlock *BB : DeadBlocks) {
Chandler Carruthedb12a82018-10-15 10:04:59 +00001322 Instruction *TI = BB->getTerminator();
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001323 assert(TI && "Basic block expected to have a terminator instruction");
Chandler Carruth96fc1de2018-08-26 08:41:15 +00001324 for (BasicBlock *Succ : successors(TI))
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001325 if (!DeadBlocks.count(Succ))
1326 if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) {
1327 MP->unorderedDeleteIncomingBlock(BB);
Alina Sbirlea28637212019-08-20 22:47:58 +00001328 tryRemoveTrivialPhi(MP);
Alina Sbirleada1e80f2018-06-29 20:46:16 +00001329 }
1330 // Drop all references of all accesses in BB
1331 if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB))
1332 for (MemoryAccess &MA : *Acc)
1333 MA.dropAllReferences();
1334 }
1335
1336 // Next, delete all memory accesses in each block
1337 for (BasicBlock *BB : DeadBlocks) {
1338 MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB);
1339 if (!Acc)
1340 continue;
1341 for (auto AB = Acc->begin(), AE = Acc->end(); AB != AE;) {
1342 MemoryAccess *MA = &*AB;
1343 ++AB;
1344 MSSA->removeFromLookups(MA);
1345 MSSA->removeFromLists(MA);
1346 }
1347 }
1348}
1349
Alina Sbirlea151ab482019-05-02 23:12:49 +00001350void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs) {
1351 for (auto &VH : UpdatedPHIs)
Alina Sbirlea28637212019-08-20 22:47:58 +00001352 if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
1353 tryRemoveTrivialPhi(MPhi);
Alina Sbirlea151ab482019-05-02 23:12:49 +00001354}
1355
Alina Sbirleaf31eba62019-05-08 17:05:36 +00001356void MemorySSAUpdater::changeToUnreachable(const Instruction *I) {
1357 const BasicBlock *BB = I->getParent();
1358 // Remove memory accesses in BB for I and all following instructions.
1359 auto BBI = I->getIterator(), BBE = BB->end();
1360 // FIXME: If this becomes too expensive, iterate until the first instruction
1361 // with a memory access, then iterate over MemoryAccesses.
1362 while (BBI != BBE)
1363 removeMemoryAccess(&*(BBI++));
1364 // Update phis in BB's successors to remove BB.
1365 SmallVector<WeakVH, 16> UpdatedPHIs;
1366 for (const BasicBlock *Successor : successors(BB)) {
1367 removeDuplicatePhiEdgesBetween(BB, Successor);
1368 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Successor)) {
1369 MPhi->unorderedDeleteIncomingBlock(BB);
1370 UpdatedPHIs.push_back(MPhi);
1371 }
1372 }
1373 // Optimize trivial phis.
1374 tryRemoveTrivialPhis(UpdatedPHIs);
1375}
1376
1377void MemorySSAUpdater::changeCondBranchToUnconditionalTo(const BranchInst *BI,
1378 const BasicBlock *To) {
1379 const BasicBlock *BB = BI->getParent();
1380 SmallVector<WeakVH, 16> UpdatedPHIs;
1381 for (const BasicBlock *Succ : successors(BB)) {
1382 removeDuplicatePhiEdgesBetween(BB, Succ);
1383 if (Succ != To)
1384 if (auto *MPhi = MSSA->getMemoryAccess(Succ)) {
1385 MPhi->unorderedDeleteIncomingBlock(BB);
1386 UpdatedPHIs.push_back(MPhi);
1387 }
1388 }
1389 // Optimize trivial phis.
1390 tryRemoveTrivialPhis(UpdatedPHIs);
1391}
1392
Daniel Berlin17e8d0e2017-02-22 22:19:55 +00001393MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB(
1394 Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
1395 MemorySSA::InsertionPlace Point) {
1396 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1397 MSSA->insertIntoListsForBlock(NewAccess, BB, Point);
1398 return NewAccess;
1399}
1400
1401MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessBefore(
1402 Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) {
1403 assert(I->getParent() == InsertPt->getBlock() &&
1404 "New and old access must be in the same block");
1405 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1406 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1407 InsertPt->getIterator());
1408 return NewAccess;
1409}
1410
1411MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessAfter(
1412 Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) {
1413 assert(I->getParent() == InsertPt->getBlock() &&
1414 "New and old access must be in the same block");
1415 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1416 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1417 ++InsertPt->getIterator());
1418 return NewAccess;
1419}