blob: 5cf6e323b1ef7a50608e29e6c5a366c077761197 [file] [log] [blame]
Chandler Carruth713aa942012-09-14 09:22:59 +00001//===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This transformation implements the well known scalar replacement of
11/// aggregates transformation. It tries to identify promotable elements of an
12/// aggregate alloca, and promote them to registers. It will also try to
13/// convert uses of an element (or set of elements) of an alloca into a vector
14/// or bitfield-style integer scalar if appropriate.
15///
16/// It works to do this with minimal slicing of the alloca so that regions
17/// which are merely transferred in and out of external memory remain unchanged
18/// and are not decomposed to scalar code.
19///
20/// Because this also performs alloca promotion, it can be thought of as also
21/// serving the purpose of SSA formation. The algorithm iterates on the
22/// function until all opportunities for promotion have been realized.
23///
24//===----------------------------------------------------------------------===//
25
26#define DEBUG_TYPE "sroa"
27#include "llvm/Transforms/Scalar.h"
28#include "llvm/Constants.h"
29#include "llvm/DIBuilder.h"
30#include "llvm/DebugInfo.h"
31#include "llvm/DerivedTypes.h"
32#include "llvm/Function.h"
Chandler Carruth713aa942012-09-14 09:22:59 +000033#include "llvm/IRBuilder.h"
34#include "llvm/Instructions.h"
35#include "llvm/IntrinsicInst.h"
36#include "llvm/LLVMContext.h"
37#include "llvm/Module.h"
38#include "llvm/Operator.h"
39#include "llvm/Pass.h"
40#include "llvm/ADT/SetVector.h"
41#include "llvm/ADT/SmallVector.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
Chandler Carruth713aa942012-09-14 09:22:59 +000044#include "llvm/Analysis/Dominators.h"
45#include "llvm/Analysis/Loads.h"
46#include "llvm/Analysis/ValueTracking.h"
Chandler Carruth1c8db502012-09-15 11:43:14 +000047#include "llvm/Support/CommandLine.h"
Chandler Carruth713aa942012-09-14 09:22:59 +000048#include "llvm/Support/Debug.h"
49#include "llvm/Support/ErrorHandling.h"
50#include "llvm/Support/GetElementPtrTypeIterator.h"
51#include "llvm/Support/InstVisitor.h"
52#include "llvm/Support/MathExtras.h"
Chandler Carruth713aa942012-09-14 09:22:59 +000053#include "llvm/Support/raw_ostream.h"
54#include "llvm/Target/TargetData.h"
55#include "llvm/Transforms/Utils/Local.h"
56#include "llvm/Transforms/Utils/PromoteMemToReg.h"
57#include "llvm/Transforms/Utils/SSAUpdater.h"
58using namespace llvm;
59
60STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
61STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
62STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
63STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion");
64STATISTIC(NumDeleted, "Number of instructions deleted");
65STATISTIC(NumVectorized, "Number of vectorized aggregates");
66
Chandler Carruth1c8db502012-09-15 11:43:14 +000067/// Hidden option to force the pass to not use DomTree and mem2reg, instead
68/// forming SSA values through the SSAUpdater infrastructure.
69static cl::opt<bool>
70ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden);
71
Chandler Carruth713aa942012-09-14 09:22:59 +000072namespace {
73/// \brief Alloca partitioning representation.
74///
75/// This class represents a partitioning of an alloca into slices, and
76/// information about the nature of uses of each slice of the alloca. The goal
77/// is that this information is sufficient to decide if and how to split the
78/// alloca apart and replace slices with scalars. It is also intended that this
Chandler Carruth7f5bede2012-09-14 10:18:49 +000079/// structure can capture the relevant information needed both to decide about
Chandler Carruth713aa942012-09-14 09:22:59 +000080/// and to enact these transformations.
81class AllocaPartitioning {
82public:
83 /// \brief A common base class for representing a half-open byte range.
84 struct ByteRange {
85 /// \brief The beginning offset of the range.
86 uint64_t BeginOffset;
87
88 /// \brief The ending offset, not included in the range.
89 uint64_t EndOffset;
90
91 ByteRange() : BeginOffset(), EndOffset() {}
92 ByteRange(uint64_t BeginOffset, uint64_t EndOffset)
93 : BeginOffset(BeginOffset), EndOffset(EndOffset) {}
94
95 /// \brief Support for ordering ranges.
96 ///
97 /// This provides an ordering over ranges such that start offsets are
98 /// always increasing, and within equal start offsets, the end offsets are
Chandler Carruth7f5bede2012-09-14 10:18:49 +000099 /// decreasing. Thus the spanning range comes first in a cluster with the
Chandler Carruth713aa942012-09-14 09:22:59 +0000100 /// same start position.
101 bool operator<(const ByteRange &RHS) const {
102 if (BeginOffset < RHS.BeginOffset) return true;
103 if (BeginOffset > RHS.BeginOffset) return false;
104 if (EndOffset > RHS.EndOffset) return true;
105 return false;
106 }
107
108 /// \brief Support comparison with a single offset to allow binary searches.
Benjamin Kramer2d1c2a22012-09-17 16:42:36 +0000109 friend bool operator<(const ByteRange &LHS, uint64_t RHSOffset) {
110 return LHS.BeginOffset < RHSOffset;
111 }
112
113 friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
114 const ByteRange &RHS) {
115 return LHSOffset < RHS.BeginOffset;
Chandler Carruth713aa942012-09-14 09:22:59 +0000116 }
117
118 bool operator==(const ByteRange &RHS) const {
119 return BeginOffset == RHS.BeginOffset && EndOffset == RHS.EndOffset;
120 }
121 bool operator!=(const ByteRange &RHS) const { return !operator==(RHS); }
122 };
123
124 /// \brief A partition of an alloca.
125 ///
126 /// This structure represents a contiguous partition of the alloca. These are
127 /// formed by examining the uses of the alloca. During formation, they may
128 /// overlap but once an AllocaPartitioning is built, the Partitions within it
129 /// are all disjoint.
130 struct Partition : public ByteRange {
131 /// \brief Whether this partition is splittable into smaller partitions.
132 ///
133 /// We flag partitions as splittable when they are formed entirely due to
Chandler Carruth7f5bede2012-09-14 10:18:49 +0000134 /// accesses by trivially splittable operations such as memset and memcpy.
Chandler Carruth713aa942012-09-14 09:22:59 +0000135 ///
136 /// FIXME: At some point we should consider loads and stores of FCAs to be
137 /// splittable and eagerly split them into scalar values.
138 bool IsSplittable;
139
140 Partition() : ByteRange(), IsSplittable() {}
141 Partition(uint64_t BeginOffset, uint64_t EndOffset, bool IsSplittable)
142 : ByteRange(BeginOffset, EndOffset), IsSplittable(IsSplittable) {}
143 };
144
145 /// \brief A particular use of a partition of the alloca.
146 ///
147 /// This structure is used to associate uses of a partition with it. They
148 /// mark the range of bytes which are referenced by a particular instruction,
149 /// and includes a handle to the user itself and the pointer value in use.
150 /// The bounds of these uses are determined by intersecting the bounds of the
151 /// memory use itself with a particular partition. As a consequence there is
Chandler Carruth7f5bede2012-09-14 10:18:49 +0000152 /// intentionally overlap between various uses of the same partition.
Chandler Carruth713aa942012-09-14 09:22:59 +0000153 struct PartitionUse : public ByteRange {
Chandler Carruth77c12702012-10-01 01:49:22 +0000154 /// \brief The use in question. Provides access to both user and used value.
155 Use* U;
Chandler Carruth713aa942012-09-14 09:22:59 +0000156
Chandler Carruth77c12702012-10-01 01:49:22 +0000157 PartitionUse() : ByteRange(), U() {}
158 PartitionUse(uint64_t BeginOffset, uint64_t EndOffset, Use *U)
159 : ByteRange(BeginOffset, EndOffset), U(U) {}
Chandler Carruth713aa942012-09-14 09:22:59 +0000160 };
161
162 /// \brief Construct a partitioning of a particular alloca.
163 ///
164 /// Construction does most of the work for partitioning the alloca. This
165 /// performs the necessary walks of users and builds a partitioning from it.
166 AllocaPartitioning(const TargetData &TD, AllocaInst &AI);
167
168 /// \brief Test whether a pointer to the allocation escapes our analysis.
169 ///
170 /// If this is true, the partitioning is never fully built and should be
171 /// ignored.
172 bool isEscaped() const { return PointerEscapingInstr; }
173
174 /// \brief Support for iterating over the partitions.
175 /// @{
176 typedef SmallVectorImpl<Partition>::iterator iterator;
177 iterator begin() { return Partitions.begin(); }
178 iterator end() { return Partitions.end(); }
179
180 typedef SmallVectorImpl<Partition>::const_iterator const_iterator;
181 const_iterator begin() const { return Partitions.begin(); }
182 const_iterator end() const { return Partitions.end(); }
183 /// @}
184
185 /// \brief Support for iterating over and manipulating a particular
186 /// partition's uses.
187 ///
188 /// The iteration support provided for uses is more limited, but also
189 /// includes some manipulation routines to support rewriting the uses of
190 /// partitions during SROA.
191 /// @{
192 typedef SmallVectorImpl<PartitionUse>::iterator use_iterator;
193 use_iterator use_begin(unsigned Idx) { return Uses[Idx].begin(); }
194 use_iterator use_begin(const_iterator I) { return Uses[I - begin()].begin(); }
195 use_iterator use_end(unsigned Idx) { return Uses[Idx].end(); }
196 use_iterator use_end(const_iterator I) { return Uses[I - begin()].end(); }
Chandler Carruth713aa942012-09-14 09:22:59 +0000197
198 typedef SmallVectorImpl<PartitionUse>::const_iterator const_use_iterator;
199 const_use_iterator use_begin(unsigned Idx) const { return Uses[Idx].begin(); }
200 const_use_iterator use_begin(const_iterator I) const {
201 return Uses[I - begin()].begin();
202 }
203 const_use_iterator use_end(unsigned Idx) const { return Uses[Idx].end(); }
204 const_use_iterator use_end(const_iterator I) const {
205 return Uses[I - begin()].end();
206 }
Chandler Carrutha346f462012-10-02 17:49:47 +0000207
208 unsigned use_size(unsigned Idx) const { return Uses[Idx].size(); }
209 unsigned use_size(const_iterator I) const { return Uses[I - begin()].size(); }
210 const PartitionUse &getUse(unsigned PIdx, unsigned UIdx) const {
211 return Uses[PIdx][UIdx];
212 }
213 const PartitionUse &getUse(const_iterator I, unsigned UIdx) const {
214 return Uses[I - begin()][UIdx];
215 }
216
217 void use_push_back(unsigned Idx, const PartitionUse &PU) {
218 Uses[Idx].push_back(PU);
219 }
220 void use_push_back(const_iterator I, const PartitionUse &PU) {
221 Uses[I - begin()].push_back(PU);
222 }
223 void use_erase(unsigned Idx, use_iterator UI) { Uses[Idx].erase(UI); }
224 void use_erase(const_iterator I, use_iterator UI) {
225 Uses[I - begin()].erase(UI);
226 }
Chandler Carruth713aa942012-09-14 09:22:59 +0000227 /// @}
228
229 /// \brief Allow iterating the dead users for this alloca.
230 ///
231 /// These are instructions which will never actually use the alloca as they
232 /// are outside the allocated range. They are safe to replace with undef and
233 /// delete.
234 /// @{
235 typedef SmallVectorImpl<Instruction *>::const_iterator dead_user_iterator;
236 dead_user_iterator dead_user_begin() const { return DeadUsers.begin(); }
237 dead_user_iterator dead_user_end() const { return DeadUsers.end(); }
238 /// @}
239
Chandler Carruth7f5bede2012-09-14 10:18:49 +0000240 /// \brief Allow iterating the dead expressions referring to this alloca.
Chandler Carruth713aa942012-09-14 09:22:59 +0000241 ///
242 /// These are operands which have cannot actually be used to refer to the
243 /// alloca as they are outside its range and the user doesn't correct for
244 /// that. These mostly consist of PHI node inputs and the like which we just
245 /// need to replace with undef.
246 /// @{
247 typedef SmallVectorImpl<Use *>::const_iterator dead_op_iterator;
248 dead_op_iterator dead_op_begin() const { return DeadOperands.begin(); }
249 dead_op_iterator dead_op_end() const { return DeadOperands.end(); }
250 /// @}
251
252 /// \brief MemTransferInst auxiliary data.
253 /// This struct provides some auxiliary data about memory transfer
254 /// intrinsics such as memcpy and memmove. These intrinsics can use two
255 /// different ranges within the same alloca, and provide other challenges to
256 /// correctly represent. We stash extra data to help us untangle this
257 /// after the partitioning is complete.
258 struct MemTransferOffsets {
259 uint64_t DestBegin, DestEnd;
260 uint64_t SourceBegin, SourceEnd;
261 bool IsSplittable;
262 };
263 MemTransferOffsets getMemTransferOffsets(MemTransferInst &II) const {
264 return MemTransferInstData.lookup(&II);
265 }
266
267 /// \brief Map from a PHI or select operand back to a partition.
268 ///
269 /// When manipulating PHI nodes or selects, they can use more than one
270 /// partition of an alloca. We store a special mapping to allow finding the
271 /// partition referenced by each of these operands, if any.
Chandler Carruth77c12702012-10-01 01:49:22 +0000272 iterator findPartitionForPHIOrSelectOperand(Use *U) {
273 SmallDenseMap<Use *, std::pair<unsigned, unsigned> >::const_iterator MapIt
274 = PHIOrSelectOpMap.find(U);
Chandler Carruth713aa942012-09-14 09:22:59 +0000275 if (MapIt == PHIOrSelectOpMap.end())
276 return end();
277
278 return begin() + MapIt->second.first;
279 }
280
281 /// \brief Map from a PHI or select operand back to the specific use of
282 /// a partition.
283 ///
284 /// Similar to mapping these operands back to the partitions, this maps
285 /// directly to the use structure of that partition.
Chandler Carruth77c12702012-10-01 01:49:22 +0000286 use_iterator findPartitionUseForPHIOrSelectOperand(Use *U) {
287 SmallDenseMap<Use *, std::pair<unsigned, unsigned> >::const_iterator MapIt
288 = PHIOrSelectOpMap.find(U);
Chandler Carruth713aa942012-09-14 09:22:59 +0000289 assert(MapIt != PHIOrSelectOpMap.end());
290 return Uses[MapIt->second.first].begin() + MapIt->second.second;
291 }
292
293 /// \brief Compute a common type among the uses of a particular partition.
294 ///
295 /// This routines walks all of the uses of a particular partition and tries
296 /// to find a common type between them. Untyped operations such as memset and
297 /// memcpy are ignored.
298 Type *getCommonType(iterator I) const;
299
Chandler Carruthba13d2e2012-09-14 10:18:51 +0000300#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
Chandler Carruth713aa942012-09-14 09:22:59 +0000301 void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const;
302 void printUsers(raw_ostream &OS, const_iterator I,
303 StringRef Indent = " ") const;
304 void print(raw_ostream &OS) const;
NAKAMURA Takumiad9f5b82012-09-14 10:06:10 +0000305 void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump(const_iterator I) const;
306 void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump() const;
Chandler Carruthba13d2e2012-09-14 10:18:51 +0000307#endif
Chandler Carruth713aa942012-09-14 09:22:59 +0000308
309private:
310 template <typename DerivedT, typename RetT = void> class BuilderBase;
311 class PartitionBuilder;
312 friend class AllocaPartitioning::PartitionBuilder;
313 class UseBuilder;
314 friend class AllocaPartitioning::UseBuilder;
315
Benjamin Kramerd0807692012-09-14 13:08:09 +0000316#ifndef NDEBUG
Chandler Carruth713aa942012-09-14 09:22:59 +0000317 /// \brief Handle to alloca instruction to simplify method interfaces.
318 AllocaInst &AI;
Benjamin Kramerd0807692012-09-14 13:08:09 +0000319#endif
Chandler Carruth713aa942012-09-14 09:22:59 +0000320
321 /// \brief The instruction responsible for this alloca having no partitioning.
322 ///
323 /// When an instruction (potentially) escapes the pointer to the alloca, we
324 /// store a pointer to that here and abort trying to partition the alloca.
325 /// This will be null if the alloca is partitioned successfully.
326 Instruction *PointerEscapingInstr;
327
328 /// \brief The partitions of the alloca.
329 ///
330 /// We store a vector of the partitions over the alloca here. This vector is
331 /// sorted by increasing begin offset, and then by decreasing end offset. See
Chandler Carruth7f5bede2012-09-14 10:18:49 +0000332 /// the Partition inner class for more details. Initially (during
333 /// construction) there are overlaps, but we form a disjoint sequence of
334 /// partitions while finishing construction and a fully constructed object is
335 /// expected to always have this as a disjoint space.
Chandler Carruth713aa942012-09-14 09:22:59 +0000336 SmallVector<Partition, 8> Partitions;
337
338 /// \brief The uses of the partitions.
339 ///
340 /// This is essentially a mapping from each partition to a list of uses of
341 /// that partition. The mapping is done with a Uses vector that has the exact
342 /// same number of entries as the partition vector. Each entry is itself
343 /// a vector of the uses.
344 SmallVector<SmallVector<PartitionUse, 2>, 8> Uses;
345
346 /// \brief Instructions which will become dead if we rewrite the alloca.
347 ///
348 /// Note that these are not separated by partition. This is because we expect
349 /// a partitioned alloca to be completely rewritten or not rewritten at all.
350 /// If rewritten, all these instructions can simply be removed and replaced
351 /// with undef as they come from outside of the allocated space.
352 SmallVector<Instruction *, 8> DeadUsers;
353
354 /// \brief Operands which will become dead if we rewrite the alloca.
355 ///
356 /// These are operands that in their particular use can be replaced with
357 /// undef when we rewrite the alloca. These show up in out-of-bounds inputs
358 /// to PHI nodes and the like. They aren't entirely dead (there might be
359 /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we
360 /// want to swap this particular input for undef to simplify the use lists of
361 /// the alloca.
362 SmallVector<Use *, 8> DeadOperands;
363
364 /// \brief The underlying storage for auxiliary memcpy and memset info.
365 SmallDenseMap<MemTransferInst *, MemTransferOffsets, 4> MemTransferInstData;
366
367 /// \brief A side datastructure used when building up the partitions and uses.
368 ///
369 /// This mapping is only really used during the initial building of the
370 /// partitioning so that we can retain information about PHI and select nodes
371 /// processed.
372 SmallDenseMap<Instruction *, std::pair<uint64_t, bool> > PHIOrSelectSizes;
373
374 /// \brief Auxiliary information for particular PHI or select operands.
Chandler Carruth77c12702012-10-01 01:49:22 +0000375 SmallDenseMap<Use *, std::pair<unsigned, unsigned>, 4> PHIOrSelectOpMap;
Chandler Carruth713aa942012-09-14 09:22:59 +0000376
377 /// \brief A utility routine called from the constructor.
378 ///
379 /// This does what it says on the tin. It is the key of the alloca partition
380 /// splitting and merging. After it is called we have the desired disjoint
381 /// collection of partitions.
382 void splitAndMergePartitions();
383};
384}
385
386template <typename DerivedT, typename RetT>
387class AllocaPartitioning::BuilderBase
388 : public InstVisitor<DerivedT, RetT> {
389public:
390 BuilderBase(const TargetData &TD, AllocaInst &AI, AllocaPartitioning &P)
391 : TD(TD),
392 AllocSize(TD.getTypeAllocSize(AI.getAllocatedType())),
393 P(P) {
394 enqueueUsers(AI, 0);
395 }
396
397protected:
398 const TargetData &TD;
399 const uint64_t AllocSize;
400 AllocaPartitioning &P;
401
Chandler Carruth77c12702012-10-01 01:49:22 +0000402 SmallPtrSet<Use *, 8> VisitedUses;
403
Chandler Carruth713aa942012-09-14 09:22:59 +0000404 struct OffsetUse {
405 Use *U;
Chandler Carruth02e92a02012-09-23 11:43:14 +0000406 int64_t Offset;
Chandler Carruth713aa942012-09-14 09:22:59 +0000407 };
408 SmallVector<OffsetUse, 8> Queue;
409
410 // The active offset and use while visiting.
411 Use *U;
Chandler Carruth02e92a02012-09-23 11:43:14 +0000412 int64_t Offset;
Chandler Carruth713aa942012-09-14 09:22:59 +0000413
Chandler Carruth02e92a02012-09-23 11:43:14 +0000414 void enqueueUsers(Instruction &I, int64_t UserOffset) {
Chandler Carruth713aa942012-09-14 09:22:59 +0000415 for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
416 UI != UE; ++UI) {
Chandler Carruth77c12702012-10-01 01:49:22 +0000417 if (VisitedUses.insert(&UI.getUse())) {
418 OffsetUse OU = { &UI.getUse(), UserOffset };
419 Queue.push_back(OU);
420 }
Chandler Carruth713aa942012-09-14 09:22:59 +0000421 }
422 }
423
Chandler Carruth02e92a02012-09-23 11:43:14 +0000424 bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) {
Chandler Carruth713aa942012-09-14 09:22:59 +0000425 GEPOffset = Offset;
426 for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI);
427 GTI != GTE; ++GTI) {
428 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
429 if (!OpC)
430 return false;
431 if (OpC->isZero())
432 continue;
433
434 // Handle a struct index, which adds its field offset to the pointer.
435 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
436 unsigned ElementIdx = OpC->getZExtValue();
437 const StructLayout *SL = TD.getStructLayout(STy);
Chandler Carruth02e92a02012-09-23 11:43:14 +0000438 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
439 // Check that we can continue to model this GEP in a signed 64-bit offset.
440 if (ElementOffset > INT64_MAX ||
441 (GEPOffset >= 0 &&
442 ((uint64_t)GEPOffset + ElementOffset) > INT64_MAX)) {
443 DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding "
444 << "what can be represented in an int64_t!\n"
445 << " alloca: " << P.AI << "\n");
446 return false;
447 }
448 if (GEPOffset < 0)
449 GEPOffset = ElementOffset + (uint64_t)-GEPOffset;
450 else
451 GEPOffset += ElementOffset;
Chandler Carruth713aa942012-09-14 09:22:59 +0000452 continue;
453 }
454
Chandler Carruth02e92a02012-09-23 11:43:14 +0000455 APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits());
456 Index *= APInt(Index.getBitWidth(),
457 TD.getTypeAllocSize(GTI.getIndexedType()));
458 Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset,
459 /*isSigned*/true);
460 // Check if the result can be stored in our int64_t offset.
461 if (!Index.isSignedIntN(sizeof(GEPOffset) * 8)) {
462 DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding "
463 << "what can be represented in an int64_t!\n"
464 << " alloca: " << P.AI << "\n");
465 return false;
466 }
467
468 GEPOffset = Index.getSExtValue();
Chandler Carruth713aa942012-09-14 09:22:59 +0000469 }
470 return true;
471 }
472
473 Value *foldSelectInst(SelectInst &SI) {
474 // If the condition being selected on is a constant or the same value is
475 // being selected between, fold the select. Yes this does (rarely) happen
476 // early on.
477 if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition()))
478 return SI.getOperand(1+CI->isZero());
479 if (SI.getOperand(1) == SI.getOperand(2)) {
480 assert(*U == SI.getOperand(1));
481 return SI.getOperand(1);
482 }
483 return 0;
484 }
485};
486
487/// \brief Builder for the alloca partitioning.
488///
489/// This class builds an alloca partitioning by recursively visiting the uses
490/// of an alloca and splitting the partitions for each load and store at each
491/// offset.
492class AllocaPartitioning::PartitionBuilder
493 : public BuilderBase<PartitionBuilder, bool> {
494 friend class InstVisitor<PartitionBuilder, bool>;
495
496 SmallDenseMap<Instruction *, unsigned> MemTransferPartitionMap;
497
498public:
499 PartitionBuilder(const TargetData &TD, AllocaInst &AI, AllocaPartitioning &P)
Chandler Carruth2a9bf252012-09-14 09:30:33 +0000500 : BuilderBase<PartitionBuilder, bool>(TD, AI, P) {}
Chandler Carruth713aa942012-09-14 09:22:59 +0000501
502 /// \brief Run the builder over the allocation.
503 bool operator()() {
504 // Note that we have to re-evaluate size on each trip through the loop as
505 // the queue grows at the tail.
506 for (unsigned Idx = 0; Idx < Queue.size(); ++Idx) {
507 U = Queue[Idx].U;
508 Offset = Queue[Idx].Offset;
509 if (!visit(cast<Instruction>(U->getUser())))
510 return false;
511 }
512 return true;
513 }
514
515private:
516 bool markAsEscaping(Instruction &I) {
517 P.PointerEscapingInstr = &I;
518 return false;
519 }
520
Chandler Carruth02e92a02012-09-23 11:43:14 +0000521 void insertUse(Instruction &I, int64_t Offset, uint64_t Size,
Chandler Carruth63392ea2012-09-16 19:39:50 +0000522 bool IsSplittable = false) {
Chandler Carruthc3034632012-09-25 10:03:40 +0000523 // Completely skip uses which have a zero size or don't overlap the
524 // allocation.
525 if (Size == 0 ||
526 (Offset >= 0 && (uint64_t)Offset >= AllocSize) ||
Chandler Carruth02e92a02012-09-23 11:43:14 +0000527 (Offset < 0 && (uint64_t)-Offset >= Size)) {
Chandler Carruth713aa942012-09-14 09:22:59 +0000528 DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
529 << " which starts past the end of the " << AllocSize
530 << " byte alloca:\n"
531 << " alloca: " << P.AI << "\n"
532 << " use: " << I << "\n");
533 return;
534 }
535
Chandler Carruth02e92a02012-09-23 11:43:14 +0000536 // Clamp the start to the beginning of the allocation.
537 if (Offset < 0) {
538 DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
539 << " to start at the beginning of the alloca:\n"
540 << " alloca: " << P.AI << "\n"
541 << " use: " << I << "\n");
542 Size -= (uint64_t)-Offset;
543 Offset = 0;
544 }
545
546 uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size;
547
548 // Clamp the end offset to the end of the allocation. Note that this is
549 // formulated to handle even the case where "BeginOffset + Size" overflows.
550 assert(AllocSize >= BeginOffset); // Established above.
551 if (Size > AllocSize - BeginOffset) {
Chandler Carruth713aa942012-09-14 09:22:59 +0000552 DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
553 << " to remain within the " << AllocSize << " byte alloca:\n"
554 << " alloca: " << P.AI << "\n"
555 << " use: " << I << "\n");
556 EndOffset = AllocSize;
557 }
558
559 // See if we can just add a user onto the last slot currently occupied.
560 if (!P.Partitions.empty() &&
561 P.Partitions.back().BeginOffset == BeginOffset &&
562 P.Partitions.back().EndOffset == EndOffset) {
563 P.Partitions.back().IsSplittable &= IsSplittable;
564 return;
565 }
566
567 Partition New(BeginOffset, EndOffset, IsSplittable);
568 P.Partitions.push_back(New);
569 }
570
Chandler Carruth02e92a02012-09-23 11:43:14 +0000571 bool handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) {
Chandler Carruth713aa942012-09-14 09:22:59 +0000572 uint64_t Size = TD.getTypeStoreSize(Ty);
573
574 // If this memory access can be shown to *statically* extend outside the
575 // bounds of of the allocation, it's behavior is undefined, so simply
576 // ignore it. Note that this is more strict than the generic clamping
577 // behavior of insertUse. We also try to handle cases which might run the
578 // risk of overflow.
579 // FIXME: We should instead consider the pointer to have escaped if this
580 // function is being instrumented for addressing bugs or race conditions.
Chandler Carruth02e92a02012-09-23 11:43:14 +0000581 if (Offset < 0 || (uint64_t)Offset >= AllocSize ||
582 Size > (AllocSize - (uint64_t)Offset)) {
Chandler Carruth713aa942012-09-14 09:22:59 +0000583 DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte "
584 << (isa<LoadInst>(I) ? "load" : "store") << " @" << Offset
585 << " which extends past the end of the " << AllocSize
586 << " byte alloca:\n"
587 << " alloca: " << P.AI << "\n"
588 << " use: " << I << "\n");
589 return true;
590 }
591
Chandler Carruth63392ea2012-09-16 19:39:50 +0000592 insertUse(I, Offset, Size);
Chandler Carruth713aa942012-09-14 09:22:59 +0000593 return true;
594 }
595
596 bool visitBitCastInst(BitCastInst &BC) {
597 enqueueUsers(BC, Offset);
598 return true;
599 }
600
601 bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
Chandler Carruth02e92a02012-09-23 11:43:14 +0000602 int64_t GEPOffset;
Chandler Carruth713aa942012-09-14 09:22:59 +0000603 if (!computeConstantGEPOffset(GEPI, GEPOffset))
604 return markAsEscaping(GEPI);
605
606 enqueueUsers(GEPI, GEPOffset);
607 return true;
608 }
609
610 bool visitLoadInst(LoadInst &LI) {
Chandler Carruthc370acd2012-09-18 12:57:43 +0000611 assert((!LI.isSimple() || LI.getType()->isSingleValueType()) &&
612 "All simple FCA loads should have been pre-split");
Chandler Carruth63392ea2012-09-16 19:39:50 +0000613 return handleLoadOrStore(LI.getType(), LI, Offset);
Chandler Carruth713aa942012-09-14 09:22:59 +0000614 }
615
616 bool visitStoreInst(StoreInst &SI) {
Chandler Carruthc370acd2012-09-18 12:57:43 +0000617 Value *ValOp = SI.getValueOperand();
618 if (ValOp == *U)
Chandler Carruth713aa942012-09-14 09:22:59 +0000619 return markAsEscaping(SI);
620
Chandler Carruthc370acd2012-09-18 12:57:43 +0000621 assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
622 "All simple FCA stores should have been pre-split");
623 return handleLoadOrStore(ValOp->getType(), SI, Offset);
Chandler Carruth713aa942012-09-14 09:22:59 +0000624 }
625
626
627 bool visitMemSetInst(MemSetInst &II) {
Chandler Carruthb3dd9a12012-09-14 10:26:34 +0000628 assert(II.getRawDest() == *U && "Pointer use is not the destination?");
Chandler Carruth713aa942012-09-14 09:22:59 +0000629 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
Chandler Carruth63392ea2012-09-16 19:39:50 +0000630 uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
631 insertUse(II, Offset, Size, Length);
Chandler Carruth713aa942012-09-14 09:22:59 +0000632 return true;
633 }
634
635 bool visitMemTransferInst(MemTransferInst &II) {
636 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
637 uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
638 if (!Size)
639 // Zero-length mem transfer intrinsics can be ignored entirely.
640 return true;
641
642 MemTransferOffsets &Offsets = P.MemTransferInstData[&II];
643
644 // Only intrinsics with a constant length can be split.
645 Offsets.IsSplittable = Length;
646
647 if (*U != II.getRawDest()) {
648 assert(*U == II.getRawSource());
649 Offsets.SourceBegin = Offset;
650 Offsets.SourceEnd = Offset + Size;
651 } else {
652 Offsets.DestBegin = Offset;
653 Offsets.DestEnd = Offset + Size;
654 }
655
Chandler Carruth63392ea2012-09-16 19:39:50 +0000656 insertUse(II, Offset, Size, Offsets.IsSplittable);
Chandler Carruth713aa942012-09-14 09:22:59 +0000657 unsigned NewIdx = P.Partitions.size() - 1;
658
659 SmallDenseMap<Instruction *, unsigned>::const_iterator PMI;
660 bool Inserted = false;
661 llvm::tie(PMI, Inserted)
662 = MemTransferPartitionMap.insert(std::make_pair(&II, NewIdx));
Chandler Carruthb3dca3f2012-09-26 07:41:40 +0000663 if (Offsets.IsSplittable &&
664 (!Inserted || II.getRawSource() == II.getRawDest())) {
Chandler Carruth713aa942012-09-14 09:22:59 +0000665 // We've found a memory transfer intrinsic which refers to the alloca as
Chandler Carruthb3dca3f2012-09-26 07:41:40 +0000666 // both a source and dest. This is detected either by direct equality of
667 // the operand values, or when we visit the intrinsic twice due to two
668 // different chains of values leading to it. We refuse to split these to
669 // simplify splitting logic. If possible, SROA will still split them into
670 // separate allocas and then re-analyze.
Chandler Carruth713aa942012-09-14 09:22:59 +0000671 Offsets.IsSplittable = false;
672 P.Partitions[PMI->second].IsSplittable = false;
673 P.Partitions[NewIdx].IsSplittable = false;
674 }
675
676 return true;
677 }
678
679 // Disable SRoA for any intrinsics except for lifetime invariants.
Chandler Carruth50754f02012-09-14 10:26:36 +0000680 // FIXME: What about debug instrinsics? This matches old behavior, but
681 // doesn't make sense.
Chandler Carruth713aa942012-09-14 09:22:59 +0000682 bool visitIntrinsicInst(IntrinsicInst &II) {
683 if (II.getIntrinsicID() == Intrinsic::lifetime_start ||
684 II.getIntrinsicID() == Intrinsic::lifetime_end) {
685 ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
686 uint64_t Size = std::min(AllocSize - Offset, Length->getLimitedValue());
Chandler Carruth63392ea2012-09-16 19:39:50 +0000687 insertUse(II, Offset, Size, true);
Chandler Carruth713aa942012-09-14 09:22:59 +0000688 return true;
689 }
690
691 return markAsEscaping(II);
692 }
693
694 Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) {
695 // We consider any PHI or select that results in a direct load or store of
696 // the same offset to be a viable use for partitioning purposes. These uses
697 // are considered unsplittable and the size is the maximum loaded or stored
698 // size.
699 SmallPtrSet<Instruction *, 4> Visited;
700 SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses;
701 Visited.insert(Root);
702 Uses.push_back(std::make_pair(cast<Instruction>(*U), Root));
Chandler Carruthc3034632012-09-25 10:03:40 +0000703 // If there are no loads or stores, the access is dead. We mark that as
704 // a size zero access.
705 Size = 0;
Chandler Carruth713aa942012-09-14 09:22:59 +0000706 do {
707 Instruction *I, *UsedI;
708 llvm::tie(UsedI, I) = Uses.pop_back_val();
709
710 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
711 Size = std::max(Size, TD.getTypeStoreSize(LI->getType()));
712 continue;
713 }
714 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
715 Value *Op = SI->getOperand(0);
716 if (Op == UsedI)
717 return SI;
718 Size = std::max(Size, TD.getTypeStoreSize(Op->getType()));
719 continue;
720 }
721
722 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
723 if (!GEP->hasAllZeroIndices())
724 return GEP;
725 } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) &&
726 !isa<SelectInst>(I)) {
727 return I;
728 }
729
730 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
731 ++UI)
732 if (Visited.insert(cast<Instruction>(*UI)))
733 Uses.push_back(std::make_pair(I, cast<Instruction>(*UI)));
734 } while (!Uses.empty());
735
736 return 0;
737 }
738
739 bool visitPHINode(PHINode &PN) {
740 // See if we already have computed info on this node.
741 std::pair<uint64_t, bool> &PHIInfo = P.PHIOrSelectSizes[&PN];
742 if (PHIInfo.first) {
743 PHIInfo.second = true;
Chandler Carruth63392ea2012-09-16 19:39:50 +0000744 insertUse(PN, Offset, PHIInfo.first);
Chandler Carruth713aa942012-09-14 09:22:59 +0000745 return true;
746 }
747
748 // Check for an unsafe use of the PHI node.
749 if (Instruction *EscapingI = hasUnsafePHIOrSelectUse(&PN, PHIInfo.first))
750 return markAsEscaping(*EscapingI);
751
Chandler Carruth63392ea2012-09-16 19:39:50 +0000752 insertUse(PN, Offset, PHIInfo.first);
Chandler Carruth713aa942012-09-14 09:22:59 +0000753 return true;
754 }
755
756 bool visitSelectInst(SelectInst &SI) {
757 if (Value *Result = foldSelectInst(SI)) {
758 if (Result == *U)
759 // If the result of the constant fold will be the pointer, recurse
760 // through the select as if we had RAUW'ed it.
761 enqueueUsers(SI, Offset);
762
763 return true;
764 }
765
766 // See if we already have computed info on this node.
767 std::pair<uint64_t, bool> &SelectInfo = P.PHIOrSelectSizes[&SI];
768 if (SelectInfo.first) {
769 SelectInfo.second = true;
Chandler Carruth63392ea2012-09-16 19:39:50 +0000770 insertUse(SI, Offset, SelectInfo.first);
Chandler Carruth713aa942012-09-14 09:22:59 +0000771 return true;
772 }
773
774 // Check for an unsafe use of the PHI node.
775 if (Instruction *EscapingI = hasUnsafePHIOrSelectUse(&SI, SelectInfo.first))
776 return markAsEscaping(*EscapingI);
777
Chandler Carruth63392ea2012-09-16 19:39:50 +0000778 insertUse(SI, Offset, SelectInfo.first);
Chandler Carruth713aa942012-09-14 09:22:59 +0000779 return true;
780 }
781
782 /// \brief Disable SROA entirely if there are unhandled users of the alloca.
783 bool visitInstruction(Instruction &I) { return markAsEscaping(I); }
784};
785
786
787/// \brief Use adder for the alloca partitioning.
788///
Chandler Carruth7f5bede2012-09-14 10:18:49 +0000789/// This class adds the uses of an alloca to all of the partitions which they
790/// use. For splittable partitions, this can end up doing essentially a linear
Chandler Carruth713aa942012-09-14 09:22:59 +0000791/// walk of the partitions, but the number of steps remains bounded by the
792/// total result instruction size:
793/// - The number of partitions is a result of the number unsplittable
794/// instructions using the alloca.
795/// - The number of users of each partition is at worst the total number of
796/// splittable instructions using the alloca.
797/// Thus we will produce N * M instructions in the end, where N are the number
798/// of unsplittable uses and M are the number of splittable. This visitor does
799/// the exact same number of updates to the partitioning.
800///
801/// In the more common case, this visitor will leverage the fact that the
802/// partition space is pre-sorted, and do a logarithmic search for the
803/// partition needed, making the total visit a classical ((N + M) * log(N))
804/// complexity operation.
805class AllocaPartitioning::UseBuilder : public BuilderBase<UseBuilder> {
806 friend class InstVisitor<UseBuilder>;
807
808 /// \brief Set to de-duplicate dead instructions found in the use walk.
809 SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
810
811public:
812 UseBuilder(const TargetData &TD, AllocaInst &AI, AllocaPartitioning &P)
Chandler Carruth2a9bf252012-09-14 09:30:33 +0000813 : BuilderBase<UseBuilder>(TD, AI, P) {}
Chandler Carruth713aa942012-09-14 09:22:59 +0000814
815 /// \brief Run the builder over the allocation.
816 void operator()() {
817 // Note that we have to re-evaluate size on each trip through the loop as
818 // the queue grows at the tail.
819 for (unsigned Idx = 0; Idx < Queue.size(); ++Idx) {
820 U = Queue[Idx].U;
821 Offset = Queue[Idx].Offset;
822 this->visit(cast<Instruction>(U->getUser()));
823 }
824 }
825
826private:
827 void markAsDead(Instruction &I) {
828 if (VisitedDeadInsts.insert(&I))
829 P.DeadUsers.push_back(&I);
830 }
831
Chandler Carruth02e92a02012-09-23 11:43:14 +0000832 void insertUse(Instruction &User, int64_t Offset, uint64_t Size) {
Chandler Carruthc3034632012-09-25 10:03:40 +0000833 // If the use has a zero size or extends outside of the allocation, record
834 // it as a dead use for elimination later.
835 if (Size == 0 || (uint64_t)Offset >= AllocSize ||
Chandler Carruth02e92a02012-09-23 11:43:14 +0000836 (Offset < 0 && (uint64_t)-Offset >= Size))
Chandler Carruth713aa942012-09-14 09:22:59 +0000837 return markAsDead(User);
838
Chandler Carruth02e92a02012-09-23 11:43:14 +0000839 // Clamp the start to the beginning of the allocation.
840 if (Offset < 0) {
841 Size -= (uint64_t)-Offset;
842 Offset = 0;
843 }
844
845 uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size;
846
847 // Clamp the end offset to the end of the allocation. Note that this is
848 // formulated to handle even the case where "BeginOffset + Size" overflows.
849 assert(AllocSize >= BeginOffset); // Established above.
850 if (Size > AllocSize - BeginOffset)
Chandler Carruth713aa942012-09-14 09:22:59 +0000851 EndOffset = AllocSize;
852
853 // NB: This only works if we have zero overlapping partitions.
854 iterator B = std::lower_bound(P.begin(), P.end(), BeginOffset);
855 if (B != P.begin() && llvm::prior(B)->EndOffset > BeginOffset)
856 B = llvm::prior(B);
857 for (iterator I = B, E = P.end(); I != E && I->BeginOffset < EndOffset;
858 ++I) {
Chandler Carruth77c12702012-10-01 01:49:22 +0000859 PartitionUse NewPU(std::max(I->BeginOffset, BeginOffset),
860 std::min(I->EndOffset, EndOffset), U);
861 P.use_push_back(I, NewPU);
Chandler Carruth713aa942012-09-14 09:22:59 +0000862 if (isa<PHINode>(U->getUser()) || isa<SelectInst>(U->getUser()))
Chandler Carruth77c12702012-10-01 01:49:22 +0000863 P.PHIOrSelectOpMap[U]
Chandler Carruth713aa942012-09-14 09:22:59 +0000864 = std::make_pair(I - P.begin(), P.Uses[I - P.begin()].size() - 1);
865 }
866 }
867
Chandler Carruth02e92a02012-09-23 11:43:14 +0000868 void handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) {
Chandler Carruth713aa942012-09-14 09:22:59 +0000869 uint64_t Size = TD.getTypeStoreSize(Ty);
870
871 // If this memory access can be shown to *statically* extend outside the
872 // bounds of of the allocation, it's behavior is undefined, so simply
873 // ignore it. Note that this is more strict than the generic clamping
874 // behavior of insertUse.
Chandler Carruth02e92a02012-09-23 11:43:14 +0000875 if (Offset < 0 || (uint64_t)Offset >= AllocSize ||
876 Size > (AllocSize - (uint64_t)Offset))
Chandler Carruth713aa942012-09-14 09:22:59 +0000877 return markAsDead(I);
878
Chandler Carruth63392ea2012-09-16 19:39:50 +0000879 insertUse(I, Offset, Size);
Chandler Carruth713aa942012-09-14 09:22:59 +0000880 }
881
882 void visitBitCastInst(BitCastInst &BC) {
883 if (BC.use_empty())
884 return markAsDead(BC);
885
886 enqueueUsers(BC, Offset);
887 }
888
889 void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
890 if (GEPI.use_empty())
891 return markAsDead(GEPI);
892
Chandler Carruth02e92a02012-09-23 11:43:14 +0000893 int64_t GEPOffset;
Chandler Carruth713aa942012-09-14 09:22:59 +0000894 if (!computeConstantGEPOffset(GEPI, GEPOffset))
895 llvm_unreachable("Unable to compute constant offset for use");
896
897 enqueueUsers(GEPI, GEPOffset);
898 }
899
900 void visitLoadInst(LoadInst &LI) {
Chandler Carruth63392ea2012-09-16 19:39:50 +0000901 handleLoadOrStore(LI.getType(), LI, Offset);
Chandler Carruth713aa942012-09-14 09:22:59 +0000902 }
903
904 void visitStoreInst(StoreInst &SI) {
Chandler Carruth63392ea2012-09-16 19:39:50 +0000905 handleLoadOrStore(SI.getOperand(0)->getType(), SI, Offset);
Chandler Carruth713aa942012-09-14 09:22:59 +0000906 }
907
908 void visitMemSetInst(MemSetInst &II) {
909 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
Chandler Carruth63392ea2012-09-16 19:39:50 +0000910 uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
911 insertUse(II, Offset, Size);
Chandler Carruth713aa942012-09-14 09:22:59 +0000912 }
913
914 void visitMemTransferInst(MemTransferInst &II) {
915 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
Chandler Carruth63392ea2012-09-16 19:39:50 +0000916 uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
917 insertUse(II, Offset, Size);
Chandler Carruth713aa942012-09-14 09:22:59 +0000918 }
919
920 void visitIntrinsicInst(IntrinsicInst &II) {
921 assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
922 II.getIntrinsicID() == Intrinsic::lifetime_end);
923
924 ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
Chandler Carruth63392ea2012-09-16 19:39:50 +0000925 insertUse(II, Offset,
926 std::min(AllocSize - Offset, Length->getLimitedValue()));
Chandler Carruth713aa942012-09-14 09:22:59 +0000927 }
928
Chandler Carruth63392ea2012-09-16 19:39:50 +0000929 void insertPHIOrSelect(Instruction &User, uint64_t Offset) {
Chandler Carruth713aa942012-09-14 09:22:59 +0000930 uint64_t Size = P.PHIOrSelectSizes.lookup(&User).first;
931
932 // For PHI and select operands outside the alloca, we can't nuke the entire
933 // phi or select -- the other side might still be relevant, so we special
934 // case them here and use a separate structure to track the operands
935 // themselves which should be replaced with undef.
936 if (Offset >= AllocSize) {
937 P.DeadOperands.push_back(U);
938 return;
939 }
940
Chandler Carruth63392ea2012-09-16 19:39:50 +0000941 insertUse(User, Offset, Size);
Chandler Carruth713aa942012-09-14 09:22:59 +0000942 }
943 void visitPHINode(PHINode &PN) {
944 if (PN.use_empty())
945 return markAsDead(PN);
946
Chandler Carruth63392ea2012-09-16 19:39:50 +0000947 insertPHIOrSelect(PN, Offset);
Chandler Carruth713aa942012-09-14 09:22:59 +0000948 }
949 void visitSelectInst(SelectInst &SI) {
950 if (SI.use_empty())
951 return markAsDead(SI);
952
953 if (Value *Result = foldSelectInst(SI)) {
954 if (Result == *U)
955 // If the result of the constant fold will be the pointer, recurse
956 // through the select as if we had RAUW'ed it.
957 enqueueUsers(SI, Offset);
Chandler Carruthd54a6b52012-09-21 23:36:40 +0000958 else
959 // Otherwise the operand to the select is dead, and we can replace it
960 // with undef.
961 P.DeadOperands.push_back(U);
Chandler Carruth713aa942012-09-14 09:22:59 +0000962
963 return;
964 }
965
Chandler Carruth63392ea2012-09-16 19:39:50 +0000966 insertPHIOrSelect(SI, Offset);
Chandler Carruth713aa942012-09-14 09:22:59 +0000967 }
968
969 /// \brief Unreachable, we've already visited the alloca once.
970 void visitInstruction(Instruction &I) {
971 llvm_unreachable("Unhandled instruction in use builder.");
972 }
973};
974
975void AllocaPartitioning::splitAndMergePartitions() {
976 size_t NumDeadPartitions = 0;
977
978 // Track the range of splittable partitions that we pass when accumulating
979 // overlapping unsplittable partitions.
980 uint64_t SplitEndOffset = 0ull;
981
982 Partition New(0ull, 0ull, false);
983
984 for (unsigned i = 0, j = i, e = Partitions.size(); i != e; i = j) {
985 ++j;
986
987 if (!Partitions[i].IsSplittable || New.BeginOffset == New.EndOffset) {
988 assert(New.BeginOffset == New.EndOffset);
989 New = Partitions[i];
990 } else {
991 assert(New.IsSplittable);
992 New.EndOffset = std::max(New.EndOffset, Partitions[i].EndOffset);
993 }
994 assert(New.BeginOffset != New.EndOffset);
995
996 // Scan the overlapping partitions.
997 while (j != e && New.EndOffset > Partitions[j].BeginOffset) {
998 // If the new partition we are forming is splittable, stop at the first
999 // unsplittable partition.
1000 if (New.IsSplittable && !Partitions[j].IsSplittable)
1001 break;
1002
1003 // Grow the new partition to include any equally splittable range. 'j' is
1004 // always equally splittable when New is splittable, but when New is not
1005 // splittable, we may subsume some (or part of some) splitable partition
1006 // without growing the new one.
1007 if (New.IsSplittable == Partitions[j].IsSplittable) {
1008 New.EndOffset = std::max(New.EndOffset, Partitions[j].EndOffset);
1009 } else {
1010 assert(!New.IsSplittable);
1011 assert(Partitions[j].IsSplittable);
1012 SplitEndOffset = std::max(SplitEndOffset, Partitions[j].EndOffset);
1013 }
1014
1015 Partitions[j].BeginOffset = Partitions[j].EndOffset = UINT64_MAX;
1016 ++NumDeadPartitions;
1017 ++j;
1018 }
1019
1020 // If the new partition is splittable, chop off the end as soon as the
1021 // unsplittable subsequent partition starts and ensure we eventually cover
1022 // the splittable area.
1023 if (j != e && New.IsSplittable) {
1024 SplitEndOffset = std::max(SplitEndOffset, New.EndOffset);
1025 New.EndOffset = std::min(New.EndOffset, Partitions[j].BeginOffset);
1026 }
1027
1028 // Add the new partition if it differs from the original one and is
1029 // non-empty. We can end up with an empty partition here if it was
1030 // splittable but there is an unsplittable one that starts at the same
1031 // offset.
1032 if (New != Partitions[i]) {
1033 if (New.BeginOffset != New.EndOffset)
1034 Partitions.push_back(New);
1035 // Mark the old one for removal.
1036 Partitions[i].BeginOffset = Partitions[i].EndOffset = UINT64_MAX;
1037 ++NumDeadPartitions;
1038 }
1039
1040 New.BeginOffset = New.EndOffset;
1041 if (!New.IsSplittable) {
1042 New.EndOffset = std::max(New.EndOffset, SplitEndOffset);
1043 if (j != e && !Partitions[j].IsSplittable)
1044 New.EndOffset = std::min(New.EndOffset, Partitions[j].BeginOffset);
1045 New.IsSplittable = true;
1046 // If there is a trailing splittable partition which won't be fused into
1047 // the next splittable partition go ahead and add it onto the partitions
1048 // list.
1049 if (New.BeginOffset < New.EndOffset &&
1050 (j == e || !Partitions[j].IsSplittable ||
1051 New.EndOffset < Partitions[j].BeginOffset)) {
1052 Partitions.push_back(New);
1053 New.BeginOffset = New.EndOffset = 0ull;
1054 }
1055 }
1056 }
1057
1058 // Re-sort the partitions now that they have been split and merged into
1059 // disjoint set of partitions. Also remove any of the dead partitions we've
1060 // replaced in the process.
1061 std::sort(Partitions.begin(), Partitions.end());
1062 if (NumDeadPartitions) {
1063 assert(Partitions.back().BeginOffset == UINT64_MAX);
1064 assert(Partitions.back().EndOffset == UINT64_MAX);
1065 assert((ptrdiff_t)NumDeadPartitions ==
1066 std::count(Partitions.begin(), Partitions.end(), Partitions.back()));
1067 }
1068 Partitions.erase(Partitions.end() - NumDeadPartitions, Partitions.end());
1069}
1070
1071AllocaPartitioning::AllocaPartitioning(const TargetData &TD, AllocaInst &AI)
Benjamin Kramerd0807692012-09-14 13:08:09 +00001072 :
1073#ifndef NDEBUG
1074 AI(AI),
1075#endif
1076 PointerEscapingInstr(0) {
Chandler Carruth713aa942012-09-14 09:22:59 +00001077 PartitionBuilder PB(TD, AI, *this);
1078 if (!PB())
1079 return;
1080
1081 if (Partitions.size() > 1) {
1082 // Sort the uses. This arranges for the offsets to be in ascending order,
1083 // and the sizes to be in descending order.
1084 std::sort(Partitions.begin(), Partitions.end());
1085
1086 // Intersect splittability for all partitions with equal offsets and sizes.
1087 // Then remove all but the first so that we have a sequence of non-equal but
1088 // potentially overlapping partitions.
1089 for (iterator I = Partitions.begin(), J = I, E = Partitions.end(); I != E;
1090 I = J) {
1091 ++J;
1092 while (J != E && *I == *J) {
1093 I->IsSplittable &= J->IsSplittable;
1094 ++J;
1095 }
1096 }
1097 Partitions.erase(std::unique(Partitions.begin(), Partitions.end()),
1098 Partitions.end());
1099
1100 // Split splittable and merge unsplittable partitions into a disjoint set
1101 // of partitions over the used space of the allocation.
1102 splitAndMergePartitions();
1103 }
1104
1105 // Now build up the user lists for each of these disjoint partitions by
1106 // re-walking the recursive users of the alloca.
1107 Uses.resize(Partitions.size());
1108 UseBuilder UB(TD, AI, *this);
1109 UB();
Chandler Carruth713aa942012-09-14 09:22:59 +00001110}
1111
1112Type *AllocaPartitioning::getCommonType(iterator I) const {
1113 Type *Ty = 0;
1114 for (const_use_iterator UI = use_begin(I), UE = use_end(I); UI != UE; ++UI) {
Chandler Carruth77c12702012-10-01 01:49:22 +00001115 if (isa<IntrinsicInst>(*UI->U->getUser()))
Chandler Carruth713aa942012-09-14 09:22:59 +00001116 continue;
1117 if (UI->BeginOffset != I->BeginOffset || UI->EndOffset != I->EndOffset)
Chandler Carruth7c8df7a2012-09-18 17:49:37 +00001118 continue;
Chandler Carruth713aa942012-09-14 09:22:59 +00001119
1120 Type *UserTy = 0;
Chandler Carruth77c12702012-10-01 01:49:22 +00001121 if (LoadInst *LI = dyn_cast<LoadInst>(UI->U->getUser())) {
Chandler Carruth713aa942012-09-14 09:22:59 +00001122 UserTy = LI->getType();
Chandler Carruth77c12702012-10-01 01:49:22 +00001123 } else if (StoreInst *SI = dyn_cast<StoreInst>(UI->U->getUser())) {
Chandler Carruth713aa942012-09-14 09:22:59 +00001124 UserTy = SI->getValueOperand()->getType();
Chandler Carruth713aa942012-09-14 09:22:59 +00001125 }
1126
1127 if (Ty && Ty != UserTy)
1128 return 0;
1129
1130 Ty = UserTy;
1131 }
1132 return Ty;
1133}
1134
Chandler Carruthba13d2e2012-09-14 10:18:51 +00001135#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1136
Chandler Carruth713aa942012-09-14 09:22:59 +00001137void AllocaPartitioning::print(raw_ostream &OS, const_iterator I,
1138 StringRef Indent) const {
1139 OS << Indent << "partition #" << (I - begin())
1140 << " [" << I->BeginOffset << "," << I->EndOffset << ")"
1141 << (I->IsSplittable ? " (splittable)" : "")
1142 << (Uses[I - begin()].empty() ? " (zero uses)" : "")
1143 << "\n";
1144}
1145
1146void AllocaPartitioning::printUsers(raw_ostream &OS, const_iterator I,
1147 StringRef Indent) const {
1148 for (const_use_iterator UI = use_begin(I), UE = use_end(I);
1149 UI != UE; ++UI) {
1150 OS << Indent << " [" << UI->BeginOffset << "," << UI->EndOffset << ") "
Chandler Carruth77c12702012-10-01 01:49:22 +00001151 << "used by: " << *UI->U->getUser() << "\n";
1152 if (MemTransferInst *II = dyn_cast<MemTransferInst>(UI->U->getUser())) {
Chandler Carruth713aa942012-09-14 09:22:59 +00001153 const MemTransferOffsets &MTO = MemTransferInstData.lookup(II);
1154 bool IsDest;
1155 if (!MTO.IsSplittable)
1156 IsDest = UI->BeginOffset == MTO.DestBegin;
1157 else
1158 IsDest = MTO.DestBegin != 0u;
1159 OS << Indent << " (original " << (IsDest ? "dest" : "source") << ": "
1160 << "[" << (IsDest ? MTO.DestBegin : MTO.SourceBegin)
1161 << "," << (IsDest ? MTO.DestEnd : MTO.SourceEnd) << ")\n";
1162 }
1163 }
1164}
1165
1166void AllocaPartitioning::print(raw_ostream &OS) const {
1167 if (PointerEscapingInstr) {
1168 OS << "No partitioning for alloca: " << AI << "\n"
1169 << " A pointer to this alloca escaped by:\n"
1170 << " " << *PointerEscapingInstr << "\n";
1171 return;
1172 }
1173
1174 OS << "Partitioning of alloca: " << AI << "\n";
1175 unsigned Num = 0;
1176 for (const_iterator I = begin(), E = end(); I != E; ++I, ++Num) {
1177 print(OS, I);
1178 printUsers(OS, I);
1179 }
1180}
1181
1182void AllocaPartitioning::dump(const_iterator I) const { print(dbgs(), I); }
1183void AllocaPartitioning::dump() const { print(dbgs()); }
1184
Chandler Carruthba13d2e2012-09-14 10:18:51 +00001185#endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1186
Chandler Carruth713aa942012-09-14 09:22:59 +00001187
1188namespace {
Chandler Carruth1c8db502012-09-15 11:43:14 +00001189/// \brief Implementation of LoadAndStorePromoter for promoting allocas.
1190///
1191/// This subclass of LoadAndStorePromoter adds overrides to handle promoting
1192/// the loads and stores of an alloca instruction, as well as updating its
1193/// debug information. This is used when a domtree is unavailable and thus
1194/// mem2reg in its full form can't be used to handle promotion of allocas to
1195/// scalar values.
1196class AllocaPromoter : public LoadAndStorePromoter {
1197 AllocaInst &AI;
1198 DIBuilder &DIB;
1199
1200 SmallVector<DbgDeclareInst *, 4> DDIs;
1201 SmallVector<DbgValueInst *, 4> DVIs;
1202
1203public:
1204 AllocaPromoter(const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
1205 AllocaInst &AI, DIBuilder &DIB)
1206 : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {}
1207
1208 void run(const SmallVectorImpl<Instruction*> &Insts) {
1209 // Remember which alloca we're promoting (for isInstInList).
1210 if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) {
1211 for (Value::use_iterator UI = DebugNode->use_begin(),
1212 UE = DebugNode->use_end();
1213 UI != UE; ++UI)
1214 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
1215 DDIs.push_back(DDI);
1216 else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(*UI))
1217 DVIs.push_back(DVI);
1218 }
1219
1220 LoadAndStorePromoter::run(Insts);
1221 AI.eraseFromParent();
1222 while (!DDIs.empty())
1223 DDIs.pop_back_val()->eraseFromParent();
1224 while (!DVIs.empty())
1225 DVIs.pop_back_val()->eraseFromParent();
1226 }
1227
1228 virtual bool isInstInList(Instruction *I,
1229 const SmallVectorImpl<Instruction*> &Insts) const {
1230 if (LoadInst *LI = dyn_cast<LoadInst>(I))
1231 return LI->getOperand(0) == &AI;
1232 return cast<StoreInst>(I)->getPointerOperand() == &AI;
1233 }
1234
1235 virtual void updateDebugInfo(Instruction *Inst) const {
1236 for (SmallVector<DbgDeclareInst *, 4>::const_iterator I = DDIs.begin(),
1237 E = DDIs.end(); I != E; ++I) {
1238 DbgDeclareInst *DDI = *I;
1239 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
1240 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1241 else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
1242 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1243 }
1244 for (SmallVector<DbgValueInst *, 4>::const_iterator I = DVIs.begin(),
1245 E = DVIs.end(); I != E; ++I) {
1246 DbgValueInst *DVI = *I;
1247 Value *Arg = NULL;
1248 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
1249 // If an argument is zero extended then use argument directly. The ZExt
1250 // may be zapped by an optimization pass in future.
1251 if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
1252 Arg = dyn_cast<Argument>(ZExt->getOperand(0));
1253 if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
1254 Arg = dyn_cast<Argument>(SExt->getOperand(0));
1255 if (!Arg)
1256 Arg = SI->getOperand(0);
1257 } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
1258 Arg = LI->getOperand(0);
1259 } else {
1260 continue;
1261 }
1262 Instruction *DbgVal =
1263 DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()),
1264 Inst);
1265 DbgVal->setDebugLoc(DVI->getDebugLoc());
1266 }
1267 }
1268};
1269} // end anon namespace
1270
1271
1272namespace {
Chandler Carruth713aa942012-09-14 09:22:59 +00001273/// \brief An optimization pass providing Scalar Replacement of Aggregates.
1274///
1275/// This pass takes allocations which can be completely analyzed (that is, they
1276/// don't escape) and tries to turn them into scalar SSA values. There are
1277/// a few steps to this process.
1278///
1279/// 1) It takes allocations of aggregates and analyzes the ways in which they
1280/// are used to try to split them into smaller allocations, ideally of
1281/// a single scalar data type. It will split up memcpy and memset accesses
1282/// as necessary and try to isolate invidual scalar accesses.
1283/// 2) It will transform accesses into forms which are suitable for SSA value
1284/// promotion. This can be replacing a memset with a scalar store of an
1285/// integer value, or it can involve speculating operations on a PHI or
1286/// select to be a PHI or select of the results.
1287/// 3) Finally, this will try to detect a pattern of accesses which map cleanly
1288/// onto insert and extract operations on a vector value, and convert them to
1289/// this form. By doing so, it will enable promotion of vector aggregates to
1290/// SSA vector values.
1291class SROA : public FunctionPass {
Chandler Carruth1c8db502012-09-15 11:43:14 +00001292 const bool RequiresDomTree;
1293
Chandler Carruth713aa942012-09-14 09:22:59 +00001294 LLVMContext *C;
1295 const TargetData *TD;
1296 DominatorTree *DT;
1297
1298 /// \brief Worklist of alloca instructions to simplify.
1299 ///
1300 /// Each alloca in the function is added to this. Each new alloca formed gets
1301 /// added to it as well to recursively simplify unless that alloca can be
1302 /// directly promoted. Finally, each time we rewrite a use of an alloca other
1303 /// the one being actively rewritten, we add it back onto the list if not
1304 /// already present to ensure it is re-visited.
1305 SetVector<AllocaInst *, SmallVector<AllocaInst *, 16> > Worklist;
1306
1307 /// \brief A collection of instructions to delete.
1308 /// We try to batch deletions to simplify code and make things a bit more
1309 /// efficient.
1310 SmallVector<Instruction *, 8> DeadInsts;
1311
1312 /// \brief A set to prevent repeatedly marking an instruction split into many
1313 /// uses as dead. Only used to guard insertion into DeadInsts.
1314 SmallPtrSet<Instruction *, 4> DeadSplitInsts;
1315
Chandler Carruth713aa942012-09-14 09:22:59 +00001316 /// \brief A collection of alloca instructions we can directly promote.
1317 std::vector<AllocaInst *> PromotableAllocas;
1318
1319public:
Chandler Carruth1c8db502012-09-15 11:43:14 +00001320 SROA(bool RequiresDomTree = true)
1321 : FunctionPass(ID), RequiresDomTree(RequiresDomTree),
1322 C(0), TD(0), DT(0) {
Chandler Carruth713aa942012-09-14 09:22:59 +00001323 initializeSROAPass(*PassRegistry::getPassRegistry());
1324 }
1325 bool runOnFunction(Function &F);
1326 void getAnalysisUsage(AnalysisUsage &AU) const;
1327
1328 const char *getPassName() const { return "SROA"; }
1329 static char ID;
1330
1331private:
Chandler Carruth1e1b16c2012-10-01 10:54:05 +00001332 friend class PHIOrSelectSpeculator;
Chandler Carruth713aa942012-09-14 09:22:59 +00001333 friend class AllocaPartitionRewriter;
1334 friend class AllocaPartitionVectorRewriter;
1335
1336 bool rewriteAllocaPartition(AllocaInst &AI,
1337 AllocaPartitioning &P,
1338 AllocaPartitioning::iterator PI);
1339 bool splitAlloca(AllocaInst &AI, AllocaPartitioning &P);
1340 bool runOnAlloca(AllocaInst &AI);
Chandler Carruth8615cd22012-09-14 10:26:38 +00001341 void deleteDeadInstructions(SmallPtrSet<AllocaInst *, 4> &DeletedAllocas);
Chandler Carruth1c8db502012-09-15 11:43:14 +00001342 bool promoteAllocas(Function &F);
Chandler Carruth713aa942012-09-14 09:22:59 +00001343};
1344}
1345
1346char SROA::ID = 0;
1347
Chandler Carruth1c8db502012-09-15 11:43:14 +00001348FunctionPass *llvm::createSROAPass(bool RequiresDomTree) {
1349 return new SROA(RequiresDomTree);
Chandler Carruth713aa942012-09-14 09:22:59 +00001350}
1351
1352INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates",
1353 false, false)
1354INITIALIZE_PASS_DEPENDENCY(DominatorTree)
1355INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates",
1356 false, false)
1357
1358/// \brief Accumulate the constant offsets in a GEP into a single APInt offset.
1359///
1360/// If the provided GEP is all-constant, the total byte offset formed by the
1361/// GEP is computed and Offset is set to it. If the GEP has any non-constant
1362/// operands, the function returns false and the value of Offset is unmodified.
1363static bool accumulateGEPOffsets(const TargetData &TD, GEPOperator &GEP,
1364 APInt &Offset) {
1365 APInt GEPOffset(Offset.getBitWidth(), 0);
1366 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1367 GTI != GTE; ++GTI) {
1368 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
1369 if (!OpC)
1370 return false;
1371 if (OpC->isZero()) continue;
1372
1373 // Handle a struct index, which adds its field offset to the pointer.
1374 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1375 unsigned ElementIdx = OpC->getZExtValue();
1376 const StructLayout *SL = TD.getStructLayout(STy);
1377 GEPOffset += APInt(Offset.getBitWidth(),
1378 SL->getElementOffset(ElementIdx));
1379 continue;
1380 }
1381
1382 APInt TypeSize(Offset.getBitWidth(),
1383 TD.getTypeAllocSize(GTI.getIndexedType()));
1384 if (VectorType *VTy = dyn_cast<VectorType>(*GTI)) {
1385 assert((VTy->getScalarSizeInBits() % 8) == 0 &&
1386 "vector element size is not a multiple of 8, cannot GEP over it");
1387 TypeSize = VTy->getScalarSizeInBits() / 8;
1388 }
1389
1390 GEPOffset += OpC->getValue().sextOrTrunc(Offset.getBitWidth()) * TypeSize;
1391 }
1392 Offset = GEPOffset;
1393 return true;
1394}
1395
1396/// \brief Build a GEP out of a base pointer and indices.
1397///
1398/// This will return the BasePtr if that is valid, or build a new GEP
1399/// instruction using the IRBuilder if GEP-ing is needed.
1400static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr,
1401 SmallVectorImpl<Value *> &Indices,
1402 const Twine &Prefix) {
1403 if (Indices.empty())
1404 return BasePtr;
1405
1406 // A single zero index is a no-op, so check for this and avoid building a GEP
1407 // in that case.
1408 if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
1409 return BasePtr;
1410
1411 return IRB.CreateInBoundsGEP(BasePtr, Indices, Prefix + ".idx");
1412}
1413
1414/// \brief Get a natural GEP off of the BasePtr walking through Ty toward
1415/// TargetTy without changing the offset of the pointer.
1416///
1417/// This routine assumes we've already established a properly offset GEP with
1418/// Indices, and arrived at the Ty type. The goal is to continue to GEP with
1419/// zero-indices down through type layers until we find one the same as
1420/// TargetTy. If we can't find one with the same type, we at least try to use
1421/// one with the same size. If none of that works, we just produce the GEP as
1422/// indicated by Indices to have the correct offset.
1423static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const TargetData &TD,
1424 Value *BasePtr, Type *Ty, Type *TargetTy,
1425 SmallVectorImpl<Value *> &Indices,
1426 const Twine &Prefix) {
1427 if (Ty == TargetTy)
1428 return buildGEP(IRB, BasePtr, Indices, Prefix);
1429
1430 // See if we can descend into a struct and locate a field with the correct
1431 // type.
1432 unsigned NumLayers = 0;
1433 Type *ElementTy = Ty;
1434 do {
1435 if (ElementTy->isPointerTy())
1436 break;
1437 if (SequentialType *SeqTy = dyn_cast<SequentialType>(ElementTy)) {
1438 ElementTy = SeqTy->getElementType();
1439 Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits(), 0)));
1440 } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
1441 ElementTy = *STy->element_begin();
1442 Indices.push_back(IRB.getInt32(0));
1443 } else {
1444 break;
1445 }
1446 ++NumLayers;
1447 } while (ElementTy != TargetTy);
1448 if (ElementTy != TargetTy)
1449 Indices.erase(Indices.end() - NumLayers, Indices.end());
1450
1451 return buildGEP(IRB, BasePtr, Indices, Prefix);
1452}
1453
1454/// \brief Recursively compute indices for a natural GEP.
1455///
1456/// This is the recursive step for getNaturalGEPWithOffset that walks down the
1457/// element types adding appropriate indices for the GEP.
1458static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const TargetData &TD,
1459 Value *Ptr, Type *Ty, APInt &Offset,
1460 Type *TargetTy,
1461 SmallVectorImpl<Value *> &Indices,
1462 const Twine &Prefix) {
1463 if (Offset == 0)
1464 return getNaturalGEPWithType(IRB, TD, Ptr, Ty, TargetTy, Indices, Prefix);
1465
1466 // We can't recurse through pointer types.
1467 if (Ty->isPointerTy())
1468 return 0;
1469
Chandler Carruth8ed1ed82012-09-14 10:30:40 +00001470 // We try to analyze GEPs over vectors here, but note that these GEPs are
1471 // extremely poorly defined currently. The long-term goal is to remove GEPing
1472 // over a vector from the IR completely.
Chandler Carruth713aa942012-09-14 09:22:59 +00001473 if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
1474 unsigned ElementSizeInBits = VecTy->getScalarSizeInBits();
1475 if (ElementSizeInBits % 8)
Chandler Carruth8ed1ed82012-09-14 10:30:40 +00001476 return 0; // GEPs over non-multiple of 8 size vector elements are invalid.
Chandler Carruth713aa942012-09-14 09:22:59 +00001477 APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
1478 APInt NumSkippedElements = Offset.udiv(ElementSize);
1479 if (NumSkippedElements.ugt(VecTy->getNumElements()))
1480 return 0;
1481 Offset -= NumSkippedElements * ElementSize;
1482 Indices.push_back(IRB.getInt(NumSkippedElements));
1483 return getNaturalGEPRecursively(IRB, TD, Ptr, VecTy->getElementType(),
1484 Offset, TargetTy, Indices, Prefix);
1485 }
1486
1487 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
1488 Type *ElementTy = ArrTy->getElementType();
1489 APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy));
1490 APInt NumSkippedElements = Offset.udiv(ElementSize);
1491 if (NumSkippedElements.ugt(ArrTy->getNumElements()))
1492 return 0;
1493
1494 Offset -= NumSkippedElements * ElementSize;
1495 Indices.push_back(IRB.getInt(NumSkippedElements));
1496 return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
1497 Indices, Prefix);
1498 }
1499
1500 StructType *STy = dyn_cast<StructType>(Ty);
1501 if (!STy)
1502 return 0;
1503
1504 const StructLayout *SL = TD.getStructLayout(STy);
1505 uint64_t StructOffset = Offset.getZExtValue();
Chandler Carruthad41dcf2012-09-14 10:30:42 +00001506 if (StructOffset >= SL->getSizeInBytes())
Chandler Carruth713aa942012-09-14 09:22:59 +00001507 return 0;
1508 unsigned Index = SL->getElementContainingOffset(StructOffset);
1509 Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
1510 Type *ElementTy = STy->getElementType(Index);
1511 if (Offset.uge(TD.getTypeAllocSize(ElementTy)))
1512 return 0; // The offset points into alignment padding.
1513
1514 Indices.push_back(IRB.getInt32(Index));
1515 return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
1516 Indices, Prefix);
1517}
1518
1519/// \brief Get a natural GEP from a base pointer to a particular offset and
1520/// resulting in a particular type.
1521///
1522/// The goal is to produce a "natural" looking GEP that works with the existing
1523/// composite types to arrive at the appropriate offset and element type for
1524/// a pointer. TargetTy is the element type the returned GEP should point-to if
1525/// possible. We recurse by decreasing Offset, adding the appropriate index to
1526/// Indices, and setting Ty to the result subtype.
1527///
Chandler Carruth7f5bede2012-09-14 10:18:49 +00001528/// If no natural GEP can be constructed, this function returns null.
Chandler Carruth713aa942012-09-14 09:22:59 +00001529static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const TargetData &TD,
1530 Value *Ptr, APInt Offset, Type *TargetTy,
1531 SmallVectorImpl<Value *> &Indices,
1532 const Twine &Prefix) {
1533 PointerType *Ty = cast<PointerType>(Ptr->getType());
1534
1535 // Don't consider any GEPs through an i8* as natural unless the TargetTy is
1536 // an i8.
1537 if (Ty == IRB.getInt8PtrTy() && TargetTy->isIntegerTy(8))
1538 return 0;
1539
1540 Type *ElementTy = Ty->getElementType();
Chandler Carruth38f35fd2012-09-18 22:37:19 +00001541 if (!ElementTy->isSized())
1542 return 0; // We can't GEP through an unsized element.
Chandler Carruth713aa942012-09-14 09:22:59 +00001543 APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy));
1544 if (ElementSize == 0)
1545 return 0; // Zero-length arrays can't help us build a natural GEP.
1546 APInt NumSkippedElements = Offset.udiv(ElementSize);
1547
1548 Offset -= NumSkippedElements * ElementSize;
1549 Indices.push_back(IRB.getInt(NumSkippedElements));
1550 return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
1551 Indices, Prefix);
1552}
1553
1554/// \brief Compute an adjusted pointer from Ptr by Offset bytes where the
1555/// resulting pointer has PointerTy.
1556///
1557/// This tries very hard to compute a "natural" GEP which arrives at the offset
1558/// and produces the pointer type desired. Where it cannot, it will try to use
1559/// the natural GEP to arrive at the offset and bitcast to the type. Where that
1560/// fails, it will try to use an existing i8* and GEP to the byte offset and
1561/// bitcast to the type.
1562///
1563/// The strategy for finding the more natural GEPs is to peel off layers of the
1564/// pointer, walking back through bit casts and GEPs, searching for a base
1565/// pointer from which we can compute a natural GEP with the desired
1566/// properities. The algorithm tries to fold as many constant indices into
1567/// a single GEP as possible, thus making each GEP more independent of the
1568/// surrounding code.
1569static Value *getAdjustedPtr(IRBuilder<> &IRB, const TargetData &TD,
1570 Value *Ptr, APInt Offset, Type *PointerTy,
1571 const Twine &Prefix) {
1572 // Even though we don't look through PHI nodes, we could be called on an
1573 // instruction in an unreachable block, which may be on a cycle.
1574 SmallPtrSet<Value *, 4> Visited;
1575 Visited.insert(Ptr);
1576 SmallVector<Value *, 4> Indices;
1577
1578 // We may end up computing an offset pointer that has the wrong type. If we
1579 // never are able to compute one directly that has the correct type, we'll
1580 // fall back to it, so keep it around here.
1581 Value *OffsetPtr = 0;
1582
1583 // Remember any i8 pointer we come across to re-use if we need to do a raw
1584 // byte offset.
1585 Value *Int8Ptr = 0;
1586 APInt Int8PtrOffset(Offset.getBitWidth(), 0);
1587
1588 Type *TargetTy = PointerTy->getPointerElementType();
1589
1590 do {
1591 // First fold any existing GEPs into the offset.
1592 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
1593 APInt GEPOffset(Offset.getBitWidth(), 0);
1594 if (!accumulateGEPOffsets(TD, *GEP, GEPOffset))
1595 break;
1596 Offset += GEPOffset;
1597 Ptr = GEP->getPointerOperand();
1598 if (!Visited.insert(Ptr))
1599 break;
1600 }
1601
1602 // See if we can perform a natural GEP here.
1603 Indices.clear();
1604 if (Value *P = getNaturalGEPWithOffset(IRB, TD, Ptr, Offset, TargetTy,
1605 Indices, Prefix)) {
1606 if (P->getType() == PointerTy) {
1607 // Zap any offset pointer that we ended up computing in previous rounds.
1608 if (OffsetPtr && OffsetPtr->use_empty())
1609 if (Instruction *I = dyn_cast<Instruction>(OffsetPtr))
1610 I->eraseFromParent();
1611 return P;
1612 }
1613 if (!OffsetPtr) {
1614 OffsetPtr = P;
1615 }
1616 }
1617
1618 // Stash this pointer if we've found an i8*.
1619 if (Ptr->getType()->isIntegerTy(8)) {
1620 Int8Ptr = Ptr;
1621 Int8PtrOffset = Offset;
1622 }
1623
1624 // Peel off a layer of the pointer and update the offset appropriately.
1625 if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
1626 Ptr = cast<Operator>(Ptr)->getOperand(0);
1627 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
1628 if (GA->mayBeOverridden())
1629 break;
1630 Ptr = GA->getAliasee();
1631 } else {
1632 break;
1633 }
1634 assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!");
1635 } while (Visited.insert(Ptr));
1636
1637 if (!OffsetPtr) {
1638 if (!Int8Ptr) {
1639 Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(),
1640 Prefix + ".raw_cast");
1641 Int8PtrOffset = Offset;
1642 }
1643
1644 OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr :
1645 IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset),
1646 Prefix + ".raw_idx");
1647 }
1648 Ptr = OffsetPtr;
1649
1650 // On the off chance we were targeting i8*, guard the bitcast here.
1651 if (Ptr->getType() != PointerTy)
1652 Ptr = IRB.CreateBitCast(Ptr, PointerTy, Prefix + ".cast");
1653
1654 return Ptr;
1655}
1656
1657/// \brief Test whether the given alloca partition can be promoted to a vector.
1658///
1659/// This is a quick test to check whether we can rewrite a particular alloca
1660/// partition (and its newly formed alloca) into a vector alloca with only
1661/// whole-vector loads and stores such that it could be promoted to a vector
1662/// SSA value. We only can ensure this for a limited set of operations, and we
1663/// don't want to do the rewrites unless we are confident that the result will
1664/// be promotable, so we have an early test here.
1665static bool isVectorPromotionViable(const TargetData &TD,
1666 Type *AllocaTy,
1667 AllocaPartitioning &P,
1668 uint64_t PartitionBeginOffset,
1669 uint64_t PartitionEndOffset,
1670 AllocaPartitioning::const_use_iterator I,
1671 AllocaPartitioning::const_use_iterator E) {
1672 VectorType *Ty = dyn_cast<VectorType>(AllocaTy);
1673 if (!Ty)
1674 return false;
1675
1676 uint64_t VecSize = TD.getTypeSizeInBits(Ty);
1677 uint64_t ElementSize = Ty->getScalarSizeInBits();
1678
1679 // While the definition of LLVM vectors is bitpacked, we don't support sizes
1680 // that aren't byte sized.
1681 if (ElementSize % 8)
1682 return false;
1683 assert((VecSize % 8) == 0 && "vector size not a multiple of element size?");
1684 VecSize /= 8;
1685 ElementSize /= 8;
1686
1687 for (; I != E; ++I) {
1688 uint64_t BeginOffset = I->BeginOffset - PartitionBeginOffset;
1689 uint64_t BeginIndex = BeginOffset / ElementSize;
1690 if (BeginIndex * ElementSize != BeginOffset ||
1691 BeginIndex >= Ty->getNumElements())
1692 return false;
1693 uint64_t EndOffset = I->EndOffset - PartitionBeginOffset;
1694 uint64_t EndIndex = EndOffset / ElementSize;
1695 if (EndIndex * ElementSize != EndOffset ||
1696 EndIndex > Ty->getNumElements())
1697 return false;
1698
1699 // FIXME: We should build shuffle vector instructions to handle
1700 // non-element-sized accesses.
1701 if ((EndOffset - BeginOffset) != ElementSize &&
1702 (EndOffset - BeginOffset) != VecSize)
1703 return false;
1704
Chandler Carruth77c12702012-10-01 01:49:22 +00001705 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I->U->getUser())) {
Chandler Carruth713aa942012-09-14 09:22:59 +00001706 if (MI->isVolatile())
1707 return false;
Chandler Carruth77c12702012-10-01 01:49:22 +00001708 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(I->U->getUser())) {
Chandler Carruth713aa942012-09-14 09:22:59 +00001709 const AllocaPartitioning::MemTransferOffsets &MTO
1710 = P.getMemTransferOffsets(*MTI);
1711 if (!MTO.IsSplittable)
1712 return false;
1713 }
Chandler Carruth77c12702012-10-01 01:49:22 +00001714 } else if (I->U->get()->getType()->getPointerElementType()->isStructTy()) {
Chandler Carruth713aa942012-09-14 09:22:59 +00001715 // Disable vector promotion when there are loads or stores of an FCA.
1716 return false;
Chandler Carruth77c12702012-10-01 01:49:22 +00001717 } else if (!isa<LoadInst>(I->U->getUser()) &&
1718 !isa<StoreInst>(I->U->getUser())) {
Chandler Carruth713aa942012-09-14 09:22:59 +00001719 return false;
1720 }
1721 }
1722 return true;
1723}
1724
Chandler Carruthbc4021f2012-09-24 00:34:20 +00001725/// \brief Test whether the given alloca partition can be promoted to an int.
1726///
1727/// This is a quick test to check whether we can rewrite a particular alloca
1728/// partition (and its newly formed alloca) into an integer alloca suitable for
1729/// promotion to an SSA value. We only can ensure this for a limited set of
1730/// operations, and we don't want to do the rewrites unless we are confident
1731/// that the result will be promotable, so we have an early test here.
1732static bool isIntegerPromotionViable(const TargetData &TD,
1733 Type *AllocaTy,
1734 AllocaPartitioning &P,
1735 AllocaPartitioning::const_use_iterator I,
1736 AllocaPartitioning::const_use_iterator E) {
1737 IntegerType *Ty = dyn_cast<IntegerType>(AllocaTy);
1738 if (!Ty)
1739 return false;
1740
1741 // Check the uses to ensure the uses are (likely) promoteable integer uses.
1742 // Also ensure that the alloca has a covering load or store. We don't want
1743 // promote because of some other unsplittable entry (which we may make
1744 // splittable later) and lose the ability to promote each element access.
1745 bool WholeAllocaOp = false;
1746 for (; I != E; ++I) {
Chandler Carruth77c12702012-10-01 01:49:22 +00001747 if (LoadInst *LI = dyn_cast<LoadInst>(I->U->getUser())) {
Chandler Carruthbc4021f2012-09-24 00:34:20 +00001748 if (LI->isVolatile() || !LI->getType()->isIntegerTy())
1749 return false;
1750 if (LI->getType() == Ty)
1751 WholeAllocaOp = true;
Chandler Carruth77c12702012-10-01 01:49:22 +00001752 } else if (StoreInst *SI = dyn_cast<StoreInst>(I->U->getUser())) {
Chandler Carruthbc4021f2012-09-24 00:34:20 +00001753 if (SI->isVolatile() || !SI->getValueOperand()->getType()->isIntegerTy())
1754 return false;
1755 if (SI->getValueOperand()->getType() == Ty)
1756 WholeAllocaOp = true;
Chandler Carruth77c12702012-10-01 01:49:22 +00001757 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I->U->getUser())) {
Chandler Carruthbc4021f2012-09-24 00:34:20 +00001758 if (MI->isVolatile())
1759 return false;
Chandler Carruth77c12702012-10-01 01:49:22 +00001760 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(I->U->getUser())) {
Chandler Carruthbc4021f2012-09-24 00:34:20 +00001761 const AllocaPartitioning::MemTransferOffsets &MTO
1762 = P.getMemTransferOffsets(*MTI);
1763 if (!MTO.IsSplittable)
1764 return false;
1765 }
1766 } else {
1767 return false;
1768 }
1769 }
1770 return WholeAllocaOp;
1771}
1772
Chandler Carruth713aa942012-09-14 09:22:59 +00001773namespace {
Chandler Carruth1e1b16c2012-10-01 10:54:05 +00001774/// \brief Visitor to speculate PHIs and Selects where possible.
1775class PHIOrSelectSpeculator : public InstVisitor<PHIOrSelectSpeculator> {
1776 // Befriend the base class so it can delegate to private visit methods.
1777 friend class llvm::InstVisitor<PHIOrSelectSpeculator>;
1778
1779 const TargetData &TD;
1780 AllocaPartitioning &P;
1781 SROA &Pass;
1782
1783public:
1784 PHIOrSelectSpeculator(const TargetData &TD, AllocaPartitioning &P, SROA &Pass)
1785 : TD(TD), P(P), Pass(Pass) {}
1786
Chandler Carrutha346f462012-10-02 17:49:47 +00001787 /// \brief Visit the users of an alloca partition and rewrite them.
1788 void visitUsers(AllocaPartitioning::const_iterator PI) {
1789 // Note that we need to use an index here as the underlying vector of uses
1790 // may be grown during speculation. However, we never need to re-visit the
1791 // new uses, and so we can use the initial size bound.
1792 for (unsigned Idx = 0, Size = P.use_size(PI); Idx != Size; ++Idx)
1793 visit(cast<Instruction>(P.getUse(PI, Idx).U->getUser()));
Chandler Carruth1e1b16c2012-10-01 10:54:05 +00001794 }
1795
1796private:
1797 // By default, skip this instruction.
1798 void visitInstruction(Instruction &I) {}
1799
1800 /// PHI instructions that use an alloca and are subsequently loaded can be
1801 /// rewritten to load both input pointers in the pred blocks and then PHI the
1802 /// results, allowing the load of the alloca to be promoted.
1803 /// From this:
1804 /// %P2 = phi [i32* %Alloca, i32* %Other]
1805 /// %V = load i32* %P2
1806 /// to:
1807 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1808 /// ...
1809 /// %V2 = load i32* %Other
1810 /// ...
1811 /// %V = phi [i32 %V1, i32 %V2]
1812 ///
Chandler Carruthc7a4ca72012-10-01 12:24:42 +00001813 /// We can do this to a select if its only uses are loads and if the operands
Chandler Carruth1e1b16c2012-10-01 10:54:05 +00001814 /// to the select can be loaded unconditionally.
1815 ///
1816 /// FIXME: This should be hoisted into a generic utility, likely in
1817 /// Transforms/Util/Local.h
1818 bool isSafePHIToSpeculate(PHINode &PN, SmallVectorImpl<LoadInst *> &Loads) {
1819 // For now, we can only do this promotion if the load is in the same block
1820 // as the PHI, and if there are no stores between the phi and load.
1821 // TODO: Allow recursive phi users.
1822 // TODO: Allow stores.
1823 BasicBlock *BB = PN.getParent();
1824 unsigned MaxAlign = 0;
1825 for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end();
1826 UI != UE; ++UI) {
1827 LoadInst *LI = dyn_cast<LoadInst>(*UI);
1828 if (LI == 0 || !LI->isSimple()) return false;
1829
1830 // For now we only allow loads in the same block as the PHI. This is
1831 // a common case that happens when instcombine merges two loads through
1832 // a PHI.
1833 if (LI->getParent() != BB) return false;
1834
1835 // Ensure that there are no instructions between the PHI and the load that
1836 // could store.
1837 for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI)
1838 if (BBI->mayWriteToMemory())
1839 return false;
1840
1841 MaxAlign = std::max(MaxAlign, LI->getAlignment());
1842 Loads.push_back(LI);
1843 }
1844
1845 // We can only transform this if it is safe to push the loads into the
1846 // predecessor blocks. The only thing to watch out for is that we can't put
1847 // a possibly trapping load in the predecessor if it is a critical edge.
1848 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num;
1849 ++Idx) {
1850 TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator();
1851 Value *InVal = PN.getIncomingValue(Idx);
1852
1853 // If the value is produced by the terminator of the predecessor (an
1854 // invoke) or it has side-effects, there is no valid place to put a load
1855 // in the predecessor.
1856 if (TI == InVal || TI->mayHaveSideEffects())
1857 return false;
1858
1859 // If the predecessor has a single successor, then the edge isn't
1860 // critical.
1861 if (TI->getNumSuccessors() == 1)
1862 continue;
1863
1864 // If this pointer is always safe to load, or if we can prove that there
1865 // is already a load in the block, then we can move the load to the pred
1866 // block.
1867 if (InVal->isDereferenceablePointer() ||
1868 isSafeToLoadUnconditionally(InVal, TI, MaxAlign, &TD))
1869 continue;
1870
1871 return false;
1872 }
1873
1874 return true;
1875 }
1876
1877 void visitPHINode(PHINode &PN) {
1878 DEBUG(dbgs() << " original: " << PN << "\n");
1879
1880 SmallVector<LoadInst *, 4> Loads;
1881 if (!isSafePHIToSpeculate(PN, Loads))
1882 return;
1883
1884 assert(!Loads.empty());
1885
1886 Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
1887 IRBuilder<> PHIBuilder(&PN);
1888 PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
1889 PN.getName() + ".sroa.speculated");
1890
1891 // Get the TBAA tag and alignment to use from one of the loads. It doesn't
1892 // matter which one we get and if any differ, it doesn't matter.
1893 LoadInst *SomeLoad = cast<LoadInst>(Loads.back());
1894 MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
1895 unsigned Align = SomeLoad->getAlignment();
1896
1897 // Rewrite all loads of the PN to use the new PHI.
1898 do {
1899 LoadInst *LI = Loads.pop_back_val();
1900 LI->replaceAllUsesWith(NewPN);
1901 Pass.DeadInsts.push_back(LI);
1902 } while (!Loads.empty());
1903
1904 // Inject loads into all of the pred blocks.
1905 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
1906 BasicBlock *Pred = PN.getIncomingBlock(Idx);
1907 TerminatorInst *TI = Pred->getTerminator();
1908 Use *InUse = &PN.getOperandUse(PN.getOperandNumForIncomingValue(Idx));
1909 Value *InVal = PN.getIncomingValue(Idx);
1910 IRBuilder<> PredBuilder(TI);
1911
1912 LoadInst *Load
1913 = PredBuilder.CreateLoad(InVal, (PN.getName() + ".sroa.speculate.load." +
1914 Pred->getName()));
1915 ++NumLoadsSpeculated;
1916 Load->setAlignment(Align);
1917 if (TBAATag)
1918 Load->setMetadata(LLVMContext::MD_tbaa, TBAATag);
1919 NewPN->addIncoming(Load, Pred);
1920
1921 Instruction *Ptr = dyn_cast<Instruction>(InVal);
1922 if (!Ptr)
1923 // No uses to rewrite.
1924 continue;
1925
1926 // Try to lookup and rewrite any partition uses corresponding to this phi
1927 // input.
1928 AllocaPartitioning::iterator PI
1929 = P.findPartitionForPHIOrSelectOperand(InUse);
1930 if (PI == P.end())
1931 continue;
1932
1933 // Replace the Use in the PartitionUse for this operand with the Use
1934 // inside the load.
1935 AllocaPartitioning::use_iterator UI
1936 = P.findPartitionUseForPHIOrSelectOperand(InUse);
1937 assert(isa<PHINode>(*UI->U->getUser()));
1938 UI->U = &Load->getOperandUse(Load->getPointerOperandIndex());
1939 }
1940 DEBUG(dbgs() << " speculated to: " << *NewPN << "\n");
1941 }
1942
1943 /// Select instructions that use an alloca and are subsequently loaded can be
1944 /// rewritten to load both input pointers and then select between the result,
1945 /// allowing the load of the alloca to be promoted.
1946 /// From this:
1947 /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
1948 /// %V = load i32* %P2
1949 /// to:
1950 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1951 /// %V2 = load i32* %Other
1952 /// %V = select i1 %cond, i32 %V1, i32 %V2
1953 ///
1954 /// We can do this to a select if its only uses are loads and if the operand
1955 /// to the select can be loaded unconditionally.
1956 bool isSafeSelectToSpeculate(SelectInst &SI,
1957 SmallVectorImpl<LoadInst *> &Loads) {
1958 Value *TValue = SI.getTrueValue();
1959 Value *FValue = SI.getFalseValue();
1960 bool TDerefable = TValue->isDereferenceablePointer();
1961 bool FDerefable = FValue->isDereferenceablePointer();
1962
1963 for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end();
1964 UI != UE; ++UI) {
1965 LoadInst *LI = dyn_cast<LoadInst>(*UI);
1966 if (LI == 0 || !LI->isSimple()) return false;
1967
1968 // Both operands to the select need to be dereferencable, either
1969 // absolutely (e.g. allocas) or at this point because we can see other
1970 // accesses to it.
1971 if (!TDerefable && !isSafeToLoadUnconditionally(TValue, LI,
1972 LI->getAlignment(), &TD))
1973 return false;
1974 if (!FDerefable && !isSafeToLoadUnconditionally(FValue, LI,
1975 LI->getAlignment(), &TD))
1976 return false;
1977 Loads.push_back(LI);
1978 }
1979
1980 return true;
1981 }
1982
1983 void visitSelectInst(SelectInst &SI) {
1984 DEBUG(dbgs() << " original: " << SI << "\n");
1985 IRBuilder<> IRB(&SI);
1986
1987 // If the select isn't safe to speculate, just use simple logic to emit it.
1988 SmallVector<LoadInst *, 4> Loads;
1989 if (!isSafeSelectToSpeculate(SI, Loads))
1990 return;
1991
1992 Use *Ops[2] = { &SI.getOperandUse(1), &SI.getOperandUse(2) };
1993 AllocaPartitioning::iterator PIs[2];
1994 AllocaPartitioning::PartitionUse PUs[2];
1995 for (unsigned i = 0, e = 2; i != e; ++i) {
1996 PIs[i] = P.findPartitionForPHIOrSelectOperand(Ops[i]);
1997 if (PIs[i] != P.end()) {
1998 // If the pointer is within the partitioning, remove the select from
1999 // its uses. We'll add in the new loads below.
2000 AllocaPartitioning::use_iterator UI
2001 = P.findPartitionUseForPHIOrSelectOperand(Ops[i]);
2002 PUs[i] = *UI;
2003 P.use_erase(PIs[i], UI);
2004 }
2005 }
2006
2007 Value *TV = SI.getTrueValue();
2008 Value *FV = SI.getFalseValue();
2009 // Replace the loads of the select with a select of two loads.
2010 while (!Loads.empty()) {
2011 LoadInst *LI = Loads.pop_back_val();
2012
2013 IRB.SetInsertPoint(LI);
2014 LoadInst *TL =
2015 IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true");
2016 LoadInst *FL =
2017 IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false");
2018 NumLoadsSpeculated += 2;
2019
2020 // Transfer alignment and TBAA info if present.
2021 TL->setAlignment(LI->getAlignment());
2022 FL->setAlignment(LI->getAlignment());
2023 if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) {
2024 TL->setMetadata(LLVMContext::MD_tbaa, Tag);
2025 FL->setMetadata(LLVMContext::MD_tbaa, Tag);
2026 }
2027
2028 Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
2029 LI->getName() + ".sroa.speculated");
2030
2031 LoadInst *Loads[2] = { TL, FL };
2032 for (unsigned i = 0, e = 2; i != e; ++i) {
2033 if (PIs[i] != P.end()) {
2034 Use *LoadUse = &Loads[i]->getOperandUse(0);
2035 assert(PUs[i].U->get() == LoadUse->get());
2036 PUs[i].U = LoadUse;
2037 P.use_push_back(PIs[i], PUs[i]);
2038 }
2039 }
2040
2041 DEBUG(dbgs() << " speculated to: " << *V << "\n");
2042 LI->replaceAllUsesWith(V);
2043 Pass.DeadInsts.push_back(LI);
2044 }
2045 }
2046};
2047
Chandler Carruth713aa942012-09-14 09:22:59 +00002048/// \brief Visitor to rewrite instructions using a partition of an alloca to
2049/// use a new alloca.
2050///
2051/// Also implements the rewriting to vector-based accesses when the partition
2052/// passes the isVectorPromotionViable predicate. Most of the rewriting logic
2053/// lives here.
2054class AllocaPartitionRewriter : public InstVisitor<AllocaPartitionRewriter,
2055 bool> {
2056 // Befriend the base class so it can delegate to private visit methods.
2057 friend class llvm::InstVisitor<AllocaPartitionRewriter, bool>;
2058
2059 const TargetData &TD;
2060 AllocaPartitioning &P;
2061 SROA &Pass;
2062 AllocaInst &OldAI, &NewAI;
2063 const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset;
2064
2065 // If we are rewriting an alloca partition which can be written as pure
2066 // vector operations, we stash extra information here. When VecTy is
2067 // non-null, we have some strict guarantees about the rewriten alloca:
2068 // - The new alloca is exactly the size of the vector type here.
2069 // - The accesses all either map to the entire vector or to a single
2070 // element.
2071 // - The set of accessing instructions is only one of those handled above
2072 // in isVectorPromotionViable. Generally these are the same access kinds
2073 // which are promotable via mem2reg.
2074 VectorType *VecTy;
2075 Type *ElementTy;
2076 uint64_t ElementSize;
2077
Chandler Carruthbc4021f2012-09-24 00:34:20 +00002078 // This is a convenience and flag variable that will be null unless the new
2079 // alloca has a promotion-targeted integer type due to passing
2080 // isIntegerPromotionViable above. If it is non-null does, the desired
2081 // integer type will be stored here for easy access during rewriting.
2082 IntegerType *IntPromotionTy;
2083
Chandler Carruth713aa942012-09-14 09:22:59 +00002084 // The offset of the partition user currently being rewritten.
2085 uint64_t BeginOffset, EndOffset;
Chandler Carruth77c12702012-10-01 01:49:22 +00002086 Use *OldUse;
Chandler Carruth713aa942012-09-14 09:22:59 +00002087 Instruction *OldPtr;
2088
2089 // The name prefix to use when rewriting instructions for this alloca.
2090 std::string NamePrefix;
2091
2092public:
2093 AllocaPartitionRewriter(const TargetData &TD, AllocaPartitioning &P,
2094 AllocaPartitioning::iterator PI,
2095 SROA &Pass, AllocaInst &OldAI, AllocaInst &NewAI,
2096 uint64_t NewBeginOffset, uint64_t NewEndOffset)
2097 : TD(TD), P(P), Pass(Pass),
2098 OldAI(OldAI), NewAI(NewAI),
2099 NewAllocaBeginOffset(NewBeginOffset),
2100 NewAllocaEndOffset(NewEndOffset),
Chandler Carruthbc4021f2012-09-24 00:34:20 +00002101 VecTy(), ElementTy(), ElementSize(), IntPromotionTy(),
Chandler Carruth713aa942012-09-14 09:22:59 +00002102 BeginOffset(), EndOffset() {
2103 }
2104
2105 /// \brief Visit the users of the alloca partition and rewrite them.
2106 bool visitUsers(AllocaPartitioning::const_use_iterator I,
2107 AllocaPartitioning::const_use_iterator E) {
2108 if (isVectorPromotionViable(TD, NewAI.getAllocatedType(), P,
2109 NewAllocaBeginOffset, NewAllocaEndOffset,
2110 I, E)) {
2111 ++NumVectorized;
2112 VecTy = cast<VectorType>(NewAI.getAllocatedType());
2113 ElementTy = VecTy->getElementType();
2114 assert((VecTy->getScalarSizeInBits() % 8) == 0 &&
2115 "Only multiple-of-8 sized vector elements are viable");
2116 ElementSize = VecTy->getScalarSizeInBits() / 8;
Chandler Carruthbc4021f2012-09-24 00:34:20 +00002117 } else if (isIntegerPromotionViable(TD, NewAI.getAllocatedType(),
2118 P, I, E)) {
2119 IntPromotionTy = cast<IntegerType>(NewAI.getAllocatedType());
Chandler Carruth713aa942012-09-14 09:22:59 +00002120 }
2121 bool CanSROA = true;
2122 for (; I != E; ++I) {
2123 BeginOffset = I->BeginOffset;
2124 EndOffset = I->EndOffset;
Chandler Carruth77c12702012-10-01 01:49:22 +00002125 OldUse = I->U;
2126 OldPtr = cast<Instruction>(I->U->get());
Chandler Carruth713aa942012-09-14 09:22:59 +00002127 NamePrefix = (Twine(NewAI.getName()) + "." + Twine(BeginOffset)).str();
Chandler Carruth77c12702012-10-01 01:49:22 +00002128 CanSROA &= visit(cast<Instruction>(I->U->getUser()));
Chandler Carruth713aa942012-09-14 09:22:59 +00002129 }
2130 if (VecTy) {
2131 assert(CanSROA);
2132 VecTy = 0;
2133 ElementTy = 0;
2134 ElementSize = 0;
2135 }
2136 return CanSROA;
2137 }
2138
2139private:
2140 // Every instruction which can end up as a user must have a rewrite rule.
2141 bool visitInstruction(Instruction &I) {
2142 DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n");
2143 llvm_unreachable("No rewrite rule for this instruction!");
2144 }
2145
2146 Twine getName(const Twine &Suffix) {
2147 return NamePrefix + Suffix;
2148 }
2149
2150 Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) {
2151 assert(BeginOffset >= NewAllocaBeginOffset);
2152 APInt Offset(TD.getPointerSizeInBits(), BeginOffset - NewAllocaBeginOffset);
2153 return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName(""));
2154 }
2155
Chandler Carruth673850a2012-10-01 12:16:54 +00002156 unsigned getAdjustedAlign(uint64_t Offset) {
2157 unsigned NewAIAlign = NewAI.getAlignment();
2158 if (!NewAIAlign)
2159 NewAIAlign = TD.getABITypeAlignment(NewAI.getAllocatedType());
2160 return MinAlign(NewAIAlign, Offset);
2161 }
2162 unsigned getAdjustedAlign() {
2163 return getAdjustedAlign(BeginOffset - NewAllocaBeginOffset);
2164 }
2165
2166 bool isTypeAlignSufficient(Type *Ty) {
2167 return TD.getABITypeAlignment(Ty) >= getAdjustedAlign();
2168 }
2169
Chandler Carruth713aa942012-09-14 09:22:59 +00002170 ConstantInt *getIndex(IRBuilder<> &IRB, uint64_t Offset) {
2171 assert(VecTy && "Can only call getIndex when rewriting a vector");
2172 uint64_t RelOffset = Offset - NewAllocaBeginOffset;
2173 assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds");
2174 uint32_t Index = RelOffset / ElementSize;
2175 assert(Index * ElementSize == RelOffset);
2176 return IRB.getInt32(Index);
2177 }
2178
Chandler Carruthbc4021f2012-09-24 00:34:20 +00002179 Value *extractInteger(IRBuilder<> &IRB, IntegerType *TargetTy,
2180 uint64_t Offset) {
2181 assert(IntPromotionTy && "Alloca is not an integer we can extract from");
Chandler Carruth81b001a2012-09-26 10:27:46 +00002182 Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2183 getName(".load"));
Chandler Carruthbc4021f2012-09-24 00:34:20 +00002184 assert(Offset >= NewAllocaBeginOffset && "Out of bounds offset");
2185 uint64_t RelOffset = Offset - NewAllocaBeginOffset;
2186 if (RelOffset)
2187 V = IRB.CreateLShr(V, RelOffset*8, getName(".shift"));
2188 if (TargetTy != IntPromotionTy) {
2189 assert(TargetTy->getBitWidth() < IntPromotionTy->getBitWidth() &&
2190 "Cannot extract to a larger integer!");
2191 V = IRB.CreateTrunc(V, TargetTy, getName(".trunc"));
2192 }
2193 return V;
2194 }
2195
2196 StoreInst *insertInteger(IRBuilder<> &IRB, Value *V, uint64_t Offset) {
2197 IntegerType *Ty = cast<IntegerType>(V->getType());
2198 if (Ty == IntPromotionTy)
Chandler Carruth81b001a2012-09-26 10:27:46 +00002199 return IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
Chandler Carruthbc4021f2012-09-24 00:34:20 +00002200
2201 assert(Ty->getBitWidth() < IntPromotionTy->getBitWidth() &&
2202 "Cannot insert a larger integer!");
2203 V = IRB.CreateZExt(V, IntPromotionTy, getName(".ext"));
2204 assert(Offset >= NewAllocaBeginOffset && "Out of bounds offset");
2205 uint64_t RelOffset = Offset - NewAllocaBeginOffset;
2206 if (RelOffset)
2207 V = IRB.CreateShl(V, RelOffset*8, getName(".shift"));
2208
2209 APInt Mask = ~Ty->getMask().zext(IntPromotionTy->getBitWidth())
2210 .shl(RelOffset*8);
Chandler Carruth81b001a2012-09-26 10:27:46 +00002211 Value *Old = IRB.CreateAnd(IRB.CreateAlignedLoad(&NewAI,
2212 NewAI.getAlignment(),
2213 getName(".oldload")),
Chandler Carruthbc4021f2012-09-24 00:34:20 +00002214 Mask, getName(".mask"));
Chandler Carruth81b001a2012-09-26 10:27:46 +00002215 return IRB.CreateAlignedStore(IRB.CreateOr(Old, V, getName(".insert")),
2216 &NewAI, NewAI.getAlignment());
Chandler Carruthbc4021f2012-09-24 00:34:20 +00002217 }
2218
Chandler Carruth713aa942012-09-14 09:22:59 +00002219 void deleteIfTriviallyDead(Value *V) {
2220 Instruction *I = cast<Instruction>(V);
2221 if (isInstructionTriviallyDead(I))
2222 Pass.DeadInsts.push_back(I);
2223 }
2224
2225 Value *getValueCast(IRBuilder<> &IRB, Value *V, Type *Ty) {
2226 if (V->getType()->isIntegerTy() && Ty->isPointerTy())
2227 return IRB.CreateIntToPtr(V, Ty);
2228 if (V->getType()->isPointerTy() && Ty->isIntegerTy())
2229 return IRB.CreatePtrToInt(V, Ty);
2230
2231 return IRB.CreateBitCast(V, Ty);
2232 }
2233
2234 bool rewriteVectorizedLoadInst(IRBuilder<> &IRB, LoadInst &LI, Value *OldOp) {
2235 Value *Result;
2236 if (LI.getType() == VecTy->getElementType() ||
2237 BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) {
Chandler Carruth81b001a2012-09-26 10:27:46 +00002238 Result = IRB.CreateExtractElement(
2239 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), getName(".load")),
2240 getIndex(IRB, BeginOffset), getName(".extract"));
Chandler Carruth713aa942012-09-14 09:22:59 +00002241 } else {
Chandler Carruth81b001a2012-09-26 10:27:46 +00002242 Result = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2243 getName(".load"));
Chandler Carruth713aa942012-09-14 09:22:59 +00002244 }
2245 if (Result->getType() != LI.getType())
2246 Result = getValueCast(IRB, Result, LI.getType());
2247 LI.replaceAllUsesWith(Result);
2248 Pass.DeadInsts.push_back(&LI);
2249
2250 DEBUG(dbgs() << " to: " << *Result << "\n");
2251 return true;
2252 }
2253
Chandler Carruthbc4021f2012-09-24 00:34:20 +00002254 bool rewriteIntegerLoad(IRBuilder<> &IRB, LoadInst &LI) {
2255 assert(!LI.isVolatile());
2256 Value *Result = extractInteger(IRB, cast<IntegerType>(LI.getType()),
2257 BeginOffset);
2258 LI.replaceAllUsesWith(Result);
2259 Pass.DeadInsts.push_back(&LI);
2260 DEBUG(dbgs() << " to: " << *Result << "\n");
2261 return true;
2262 }
2263
Chandler Carruth713aa942012-09-14 09:22:59 +00002264 bool visitLoadInst(LoadInst &LI) {
2265 DEBUG(dbgs() << " original: " << LI << "\n");
2266 Value *OldOp = LI.getOperand(0);
2267 assert(OldOp == OldPtr);
2268 IRBuilder<> IRB(&LI);
2269
2270 if (VecTy)
2271 return rewriteVectorizedLoadInst(IRB, LI, OldOp);
Chandler Carruthbc4021f2012-09-24 00:34:20 +00002272 if (IntPromotionTy)
2273 return rewriteIntegerLoad(IRB, LI);
Chandler Carruth713aa942012-09-14 09:22:59 +00002274
2275 Value *NewPtr = getAdjustedAllocaPtr(IRB,
2276 LI.getPointerOperand()->getType());
2277 LI.setOperand(0, NewPtr);
Chandler Carruth673850a2012-10-01 12:16:54 +00002278 if (LI.getAlignment() || !isTypeAlignSufficient(LI.getType()))
2279 LI.setAlignment(getAdjustedAlign());
Chandler Carruth713aa942012-09-14 09:22:59 +00002280 DEBUG(dbgs() << " to: " << LI << "\n");
2281
2282 deleteIfTriviallyDead(OldOp);
2283 return NewPtr == &NewAI && !LI.isVolatile();
2284 }
2285
2286 bool rewriteVectorizedStoreInst(IRBuilder<> &IRB, StoreInst &SI,
2287 Value *OldOp) {
2288 Value *V = SI.getValueOperand();
2289 if (V->getType() == ElementTy ||
2290 BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) {
2291 if (V->getType() != ElementTy)
2292 V = getValueCast(IRB, V, ElementTy);
Chandler Carruth81b001a2012-09-26 10:27:46 +00002293 LoadInst *LI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2294 getName(".load"));
2295 V = IRB.CreateInsertElement(LI, V, getIndex(IRB, BeginOffset),
Chandler Carruth713aa942012-09-14 09:22:59 +00002296 getName(".insert"));
2297 } else if (V->getType() != VecTy) {
2298 V = getValueCast(IRB, V, VecTy);
2299 }
Chandler Carruth81b001a2012-09-26 10:27:46 +00002300 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
Chandler Carruth713aa942012-09-14 09:22:59 +00002301 Pass.DeadInsts.push_back(&SI);
2302
2303 (void)Store;
2304 DEBUG(dbgs() << " to: " << *Store << "\n");
2305 return true;
2306 }
2307
Chandler Carruthbc4021f2012-09-24 00:34:20 +00002308 bool rewriteIntegerStore(IRBuilder<> &IRB, StoreInst &SI) {
2309 assert(!SI.isVolatile());
2310 StoreInst *Store = insertInteger(IRB, SI.getValueOperand(), BeginOffset);
2311 Pass.DeadInsts.push_back(&SI);
2312 (void)Store;
2313 DEBUG(dbgs() << " to: " << *Store << "\n");
2314 return true;
2315 }
2316
Chandler Carruth713aa942012-09-14 09:22:59 +00002317 bool visitStoreInst(StoreInst &SI) {
2318 DEBUG(dbgs() << " original: " << SI << "\n");
2319 Value *OldOp = SI.getOperand(1);
2320 assert(OldOp == OldPtr);
2321 IRBuilder<> IRB(&SI);
2322
2323 if (VecTy)
2324 return rewriteVectorizedStoreInst(IRB, SI, OldOp);
Chandler Carruthbc4021f2012-09-24 00:34:20 +00002325 if (IntPromotionTy)
2326 return rewriteIntegerStore(IRB, SI);
Chandler Carruth713aa942012-09-14 09:22:59 +00002327
2328 Value *NewPtr = getAdjustedAllocaPtr(IRB,
2329 SI.getPointerOperand()->getType());
2330 SI.setOperand(1, NewPtr);
Chandler Carruth673850a2012-10-01 12:16:54 +00002331 if (SI.getAlignment() ||
2332 !isTypeAlignSufficient(SI.getValueOperand()->getType()))
2333 SI.setAlignment(getAdjustedAlign());
Chandler Carruth238fd152012-09-26 10:45:28 +00002334 if (SI.getAlignment())
2335 SI.setAlignment(MinAlign(NewAI.getAlignment(),
2336 BeginOffset - NewAllocaBeginOffset));
Chandler Carruth713aa942012-09-14 09:22:59 +00002337 DEBUG(dbgs() << " to: " << SI << "\n");
2338
2339 deleteIfTriviallyDead(OldOp);
2340 return NewPtr == &NewAI && !SI.isVolatile();
2341 }
2342
2343 bool visitMemSetInst(MemSetInst &II) {
2344 DEBUG(dbgs() << " original: " << II << "\n");
2345 IRBuilder<> IRB(&II);
2346 assert(II.getRawDest() == OldPtr);
2347
2348 // If the memset has a variable size, it cannot be split, just adjust the
2349 // pointer to the new alloca.
2350 if (!isa<Constant>(II.getLength())) {
2351 II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType()));
Chandler Carruthd0ac06d2012-09-26 10:59:22 +00002352 Type *CstTy = II.getAlignmentCst()->getType();
Chandler Carruth673850a2012-10-01 12:16:54 +00002353 II.setAlignment(ConstantInt::get(CstTy, getAdjustedAlign()));
Chandler Carruthd0ac06d2012-09-26 10:59:22 +00002354
Chandler Carruth713aa942012-09-14 09:22:59 +00002355 deleteIfTriviallyDead(OldPtr);
2356 return false;
2357 }
2358
2359 // Record this instruction for deletion.
2360 if (Pass.DeadSplitInsts.insert(&II))
2361 Pass.DeadInsts.push_back(&II);
2362
2363 Type *AllocaTy = NewAI.getAllocatedType();
2364 Type *ScalarTy = AllocaTy->getScalarType();
2365
2366 // If this doesn't map cleanly onto the alloca type, and that type isn't
2367 // a single value type, just emit a memset.
2368 if (!VecTy && (BeginOffset != NewAllocaBeginOffset ||
2369 EndOffset != NewAllocaEndOffset ||
2370 !AllocaTy->isSingleValueType() ||
2371 !TD.isLegalInteger(TD.getTypeSizeInBits(ScalarTy)))) {
2372 Type *SizeTy = II.getLength()->getType();
2373 Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset);
Chandler Carruth713aa942012-09-14 09:22:59 +00002374 CallInst *New
2375 = IRB.CreateMemSet(getAdjustedAllocaPtr(IRB,
2376 II.getRawDest()->getType()),
Chandler Carruth673850a2012-10-01 12:16:54 +00002377 II.getValue(), Size, getAdjustedAlign(),
Chandler Carruth713aa942012-09-14 09:22:59 +00002378 II.isVolatile());
2379 (void)New;
2380 DEBUG(dbgs() << " to: " << *New << "\n");
2381 return false;
2382 }
2383
2384 // If we can represent this as a simple value, we have to build the actual
2385 // value to store, which requires expanding the byte present in memset to
2386 // a sensible representation for the alloca type. This is essentially
2387 // splatting the byte to a sufficiently wide integer, bitcasting to the
2388 // desired scalar type, and splatting it across any desired vector type.
2389 Value *V = II.getValue();
2390 IntegerType *VTy = cast<IntegerType>(V->getType());
2391 Type *IntTy = Type::getIntNTy(VTy->getContext(),
2392 TD.getTypeSizeInBits(ScalarTy));
2393 if (TD.getTypeSizeInBits(ScalarTy) > VTy->getBitWidth())
2394 V = IRB.CreateMul(IRB.CreateZExt(V, IntTy, getName(".zext")),
2395 ConstantExpr::getUDiv(
2396 Constant::getAllOnesValue(IntTy),
2397 ConstantExpr::getZExt(
2398 Constant::getAllOnesValue(V->getType()),
2399 IntTy)),
2400 getName(".isplat"));
2401 if (V->getType() != ScalarTy) {
2402 if (ScalarTy->isPointerTy())
2403 V = IRB.CreateIntToPtr(V, ScalarTy);
2404 else if (ScalarTy->isPrimitiveType() || ScalarTy->isVectorTy())
2405 V = IRB.CreateBitCast(V, ScalarTy);
2406 else if (ScalarTy->isIntegerTy())
2407 llvm_unreachable("Computed different integer types with equal widths");
2408 else
2409 llvm_unreachable("Invalid scalar type");
2410 }
2411
2412 // If this is an element-wide memset of a vectorizable alloca, insert it.
2413 if (VecTy && (BeginOffset > NewAllocaBeginOffset ||
2414 EndOffset < NewAllocaEndOffset)) {
Chandler Carruth81b001a2012-09-26 10:27:46 +00002415 StoreInst *Store = IRB.CreateAlignedStore(
2416 IRB.CreateInsertElement(IRB.CreateAlignedLoad(&NewAI,
2417 NewAI.getAlignment(),
2418 getName(".load")),
2419 V, getIndex(IRB, BeginOffset),
Chandler Carruth713aa942012-09-14 09:22:59 +00002420 getName(".insert")),
Chandler Carruth81b001a2012-09-26 10:27:46 +00002421 &NewAI, NewAI.getAlignment());
Chandler Carruth713aa942012-09-14 09:22:59 +00002422 (void)Store;
2423 DEBUG(dbgs() << " to: " << *Store << "\n");
2424 return true;
2425 }
2426
2427 // Splat to a vector if needed.
2428 if (VectorType *VecTy = dyn_cast<VectorType>(AllocaTy)) {
2429 VectorType *SplatSourceTy = VectorType::get(V->getType(), 1);
2430 V = IRB.CreateShuffleVector(
2431 IRB.CreateInsertElement(UndefValue::get(SplatSourceTy), V,
2432 IRB.getInt32(0), getName(".vsplat.insert")),
2433 UndefValue::get(SplatSourceTy),
2434 ConstantVector::getSplat(VecTy->getNumElements(), IRB.getInt32(0)),
2435 getName(".vsplat.shuffle"));
2436 assert(V->getType() == VecTy);
2437 }
2438
Chandler Carruth81b001a2012-09-26 10:27:46 +00002439 Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
2440 II.isVolatile());
Chandler Carruth713aa942012-09-14 09:22:59 +00002441 (void)New;
2442 DEBUG(dbgs() << " to: " << *New << "\n");
2443 return !II.isVolatile();
2444 }
2445
2446 bool visitMemTransferInst(MemTransferInst &II) {
2447 // Rewriting of memory transfer instructions can be a bit tricky. We break
2448 // them into two categories: split intrinsics and unsplit intrinsics.
2449
2450 DEBUG(dbgs() << " original: " << II << "\n");
2451 IRBuilder<> IRB(&II);
2452
2453 assert(II.getRawSource() == OldPtr || II.getRawDest() == OldPtr);
2454 bool IsDest = II.getRawDest() == OldPtr;
2455
2456 const AllocaPartitioning::MemTransferOffsets &MTO
2457 = P.getMemTransferOffsets(II);
2458
Chandler Carruth673850a2012-10-01 12:16:54 +00002459 // Compute the relative offset within the transfer.
2460 unsigned IntPtrWidth = TD.getPointerSizeInBits();
2461 APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin
2462 : MTO.SourceBegin));
2463
2464 unsigned Align = II.getAlignment();
2465 if (Align > 1)
2466 Align = MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(),
2467 MinAlign(II.getAlignment(), getAdjustedAlign()));
2468
Chandler Carruth713aa942012-09-14 09:22:59 +00002469 // For unsplit intrinsics, we simply modify the source and destination
2470 // pointers in place. This isn't just an optimization, it is a matter of
2471 // correctness. With unsplit intrinsics we may be dealing with transfers
2472 // within a single alloca before SROA ran, or with transfers that have
2473 // a variable length. We may also be dealing with memmove instead of
2474 // memcpy, and so simply updating the pointers is the necessary for us to
2475 // update both source and dest of a single call.
2476 if (!MTO.IsSplittable) {
2477 Value *OldOp = IsDest ? II.getRawDest() : II.getRawSource();
2478 if (IsDest)
2479 II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType()));
2480 else
2481 II.setSource(getAdjustedAllocaPtr(IRB, II.getRawSource()->getType()));
2482
Chandler Carruthd0ac06d2012-09-26 10:59:22 +00002483 Type *CstTy = II.getAlignmentCst()->getType();
Chandler Carruth673850a2012-10-01 12:16:54 +00002484 II.setAlignment(ConstantInt::get(CstTy, Align));
Chandler Carruthd0ac06d2012-09-26 10:59:22 +00002485
Chandler Carruth713aa942012-09-14 09:22:59 +00002486 DEBUG(dbgs() << " to: " << II << "\n");
2487 deleteIfTriviallyDead(OldOp);
2488 return false;
2489 }
2490 // For split transfer intrinsics we have an incredibly useful assurance:
2491 // the source and destination do not reside within the same alloca, and at
2492 // least one of them does not escape. This means that we can replace
2493 // memmove with memcpy, and we don't need to worry about all manner of
2494 // downsides to splitting and transforming the operations.
2495
Chandler Carruth713aa942012-09-14 09:22:59 +00002496 // If this doesn't map cleanly onto the alloca type, and that type isn't
2497 // a single value type, just emit a memcpy.
2498 bool EmitMemCpy
2499 = !VecTy && (BeginOffset != NewAllocaBeginOffset ||
2500 EndOffset != NewAllocaEndOffset ||
2501 !NewAI.getAllocatedType()->isSingleValueType());
2502
2503 // If we're just going to emit a memcpy, the alloca hasn't changed, and the
2504 // size hasn't been shrunk based on analysis of the viable range, this is
2505 // a no-op.
2506 if (EmitMemCpy && &OldAI == &NewAI) {
2507 uint64_t OrigBegin = IsDest ? MTO.DestBegin : MTO.SourceBegin;
2508 uint64_t OrigEnd = IsDest ? MTO.DestEnd : MTO.SourceEnd;
2509 // Ensure the start lines up.
2510 assert(BeginOffset == OrigBegin);
Benjamin Kramerd0807692012-09-14 13:08:09 +00002511 (void)OrigBegin;
Chandler Carruth713aa942012-09-14 09:22:59 +00002512
2513 // Rewrite the size as needed.
2514 if (EndOffset != OrigEnd)
2515 II.setLength(ConstantInt::get(II.getLength()->getType(),
2516 EndOffset - BeginOffset));
2517 return false;
2518 }
2519 // Record this instruction for deletion.
2520 if (Pass.DeadSplitInsts.insert(&II))
2521 Pass.DeadInsts.push_back(&II);
2522
2523 bool IsVectorElement = VecTy && (BeginOffset > NewAllocaBeginOffset ||
2524 EndOffset < NewAllocaEndOffset);
2525
2526 Type *OtherPtrTy = IsDest ? II.getRawSource()->getType()
2527 : II.getRawDest()->getType();
2528 if (!EmitMemCpy)
2529 OtherPtrTy = IsVectorElement ? VecTy->getElementType()->getPointerTo()
2530 : NewAI.getType();
2531
2532 // Compute the other pointer, folding as much as possible to produce
2533 // a single, simple GEP in most cases.
2534 Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest();
2535 OtherPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy,
2536 getName("." + OtherPtr->getName()));
2537
2538 // Strip all inbounds GEPs and pointer casts to try to dig out any root
2539 // alloca that should be re-examined after rewriting this instruction.
2540 if (AllocaInst *AI
2541 = dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets()))
Chandler Carruthb3dca3f2012-09-26 07:41:40 +00002542 Pass.Worklist.insert(AI);
Chandler Carruth713aa942012-09-14 09:22:59 +00002543
2544 if (EmitMemCpy) {
2545 Value *OurPtr
2546 = getAdjustedAllocaPtr(IRB, IsDest ? II.getRawDest()->getType()
2547 : II.getRawSource()->getType());
2548 Type *SizeTy = II.getLength()->getType();
2549 Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset);
2550
2551 CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr,
2552 IsDest ? OtherPtr : OurPtr,
Chandler Carruth81b001a2012-09-26 10:27:46 +00002553 Size, Align, II.isVolatile());
Chandler Carruth713aa942012-09-14 09:22:59 +00002554 (void)New;
2555 DEBUG(dbgs() << " to: " << *New << "\n");
2556 return false;
2557 }
2558
2559 Value *SrcPtr = OtherPtr;
2560 Value *DstPtr = &NewAI;
2561 if (!IsDest)
2562 std::swap(SrcPtr, DstPtr);
2563
2564 Value *Src;
2565 if (IsVectorElement && !IsDest) {
2566 // We have to extract rather than load.
Chandler Carruth81b001a2012-09-26 10:27:46 +00002567 Src = IRB.CreateExtractElement(
2568 IRB.CreateAlignedLoad(SrcPtr, Align, getName(".copyload")),
2569 getIndex(IRB, BeginOffset),
2570 getName(".copyextract"));
Chandler Carruth713aa942012-09-14 09:22:59 +00002571 } else {
Chandler Carruth81b001a2012-09-26 10:27:46 +00002572 Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(),
2573 getName(".copyload"));
Chandler Carruth713aa942012-09-14 09:22:59 +00002574 }
2575
2576 if (IsVectorElement && IsDest) {
2577 // We have to insert into a loaded copy before storing.
Chandler Carruth81b001a2012-09-26 10:27:46 +00002578 Src = IRB.CreateInsertElement(
2579 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), getName(".load")),
2580 Src, getIndex(IRB, BeginOffset),
2581 getName(".insert"));
Chandler Carruth713aa942012-09-14 09:22:59 +00002582 }
2583
Chandler Carruth81b001a2012-09-26 10:27:46 +00002584 StoreInst *Store = cast<StoreInst>(
2585 IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile()));
2586 (void)Store;
Chandler Carruth713aa942012-09-14 09:22:59 +00002587 DEBUG(dbgs() << " to: " << *Store << "\n");
2588 return !II.isVolatile();
2589 }
2590
2591 bool visitIntrinsicInst(IntrinsicInst &II) {
2592 assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
2593 II.getIntrinsicID() == Intrinsic::lifetime_end);
2594 DEBUG(dbgs() << " original: " << II << "\n");
2595 IRBuilder<> IRB(&II);
2596 assert(II.getArgOperand(1) == OldPtr);
2597
2598 // Record this instruction for deletion.
2599 if (Pass.DeadSplitInsts.insert(&II))
2600 Pass.DeadInsts.push_back(&II);
2601
2602 ConstantInt *Size
2603 = ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
2604 EndOffset - BeginOffset);
2605 Value *Ptr = getAdjustedAllocaPtr(IRB, II.getArgOperand(1)->getType());
2606 Value *New;
2607 if (II.getIntrinsicID() == Intrinsic::lifetime_start)
2608 New = IRB.CreateLifetimeStart(Ptr, Size);
2609 else
2610 New = IRB.CreateLifetimeEnd(Ptr, Size);
2611
2612 DEBUG(dbgs() << " to: " << *New << "\n");
2613 return true;
2614 }
2615
Chandler Carruth713aa942012-09-14 09:22:59 +00002616 bool visitPHINode(PHINode &PN) {
2617 DEBUG(dbgs() << " original: " << PN << "\n");
Chandler Carruth1e1b16c2012-10-01 10:54:05 +00002618
Chandler Carruth713aa942012-09-14 09:22:59 +00002619 // We would like to compute a new pointer in only one place, but have it be
2620 // as local as possible to the PHI. To do that, we re-use the location of
2621 // the old pointer, which necessarily must be in the right position to
2622 // dominate the PHI.
2623 IRBuilder<> PtrBuilder(cast<Instruction>(OldPtr));
2624
Chandler Carruth713aa942012-09-14 09:22:59 +00002625 Value *NewPtr = getAdjustedAllocaPtr(PtrBuilder, OldPtr->getType());
Chandler Carruth1e1b16c2012-10-01 10:54:05 +00002626 // Replace the operands which were using the old pointer.
2627 User::op_iterator OI = PN.op_begin(), OE = PN.op_end();
2628 for (; OI != OE; ++OI)
2629 if (*OI == OldPtr)
2630 *OI = NewPtr;
Chandler Carruth713aa942012-09-14 09:22:59 +00002631
Chandler Carruth1e1b16c2012-10-01 10:54:05 +00002632 DEBUG(dbgs() << " to: " << PN << "\n");
2633 deleteIfTriviallyDead(OldPtr);
2634 return false;
Chandler Carruth713aa942012-09-14 09:22:59 +00002635 }
2636
2637 bool visitSelectInst(SelectInst &SI) {
2638 DEBUG(dbgs() << " original: " << SI << "\n");
2639 IRBuilder<> IRB(&SI);
2640
2641 // Find the operand we need to rewrite here.
2642 bool IsTrueVal = SI.getTrueValue() == OldPtr;
2643 if (IsTrueVal)
2644 assert(SI.getFalseValue() != OldPtr && "Pointer is both operands!");
2645 else
2646 assert(SI.getFalseValue() == OldPtr && "Pointer isn't an operand!");
Chandler Carruth1e1b16c2012-10-01 10:54:05 +00002647
Chandler Carruth713aa942012-09-14 09:22:59 +00002648 Value *NewPtr = getAdjustedAllocaPtr(IRB, OldPtr->getType());
Chandler Carruth1e1b16c2012-10-01 10:54:05 +00002649 SI.setOperand(IsTrueVal ? 1 : 2, NewPtr);
2650 DEBUG(dbgs() << " to: " << SI << "\n");
Chandler Carruth713aa942012-09-14 09:22:59 +00002651 deleteIfTriviallyDead(OldPtr);
Chandler Carruth1e1b16c2012-10-01 10:54:05 +00002652 return false;
Chandler Carruth713aa942012-09-14 09:22:59 +00002653 }
2654
2655};
2656}
2657
Chandler Carruthc370acd2012-09-18 12:57:43 +00002658namespace {
2659/// \brief Visitor to rewrite aggregate loads and stores as scalar.
2660///
2661/// This pass aggressively rewrites all aggregate loads and stores on
2662/// a particular pointer (or any pointer derived from it which we can identify)
2663/// with scalar loads and stores.
2664class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
2665 // Befriend the base class so it can delegate to private visit methods.
2666 friend class llvm::InstVisitor<AggLoadStoreRewriter, bool>;
2667
2668 const TargetData &TD;
2669
2670 /// Queue of pointer uses to analyze and potentially rewrite.
2671 SmallVector<Use *, 8> Queue;
2672
2673 /// Set to prevent us from cycling with phi nodes and loops.
2674 SmallPtrSet<User *, 8> Visited;
2675
2676 /// The current pointer use being rewritten. This is used to dig up the used
2677 /// value (as opposed to the user).
2678 Use *U;
2679
2680public:
2681 AggLoadStoreRewriter(const TargetData &TD) : TD(TD) {}
2682
2683 /// Rewrite loads and stores through a pointer and all pointers derived from
2684 /// it.
2685 bool rewrite(Instruction &I) {
2686 DEBUG(dbgs() << " Rewriting FCA loads and stores...\n");
2687 enqueueUsers(I);
2688 bool Changed = false;
2689 while (!Queue.empty()) {
2690 U = Queue.pop_back_val();
2691 Changed |= visit(cast<Instruction>(U->getUser()));
2692 }
2693 return Changed;
2694 }
2695
2696private:
2697 /// Enqueue all the users of the given instruction for further processing.
2698 /// This uses a set to de-duplicate users.
2699 void enqueueUsers(Instruction &I) {
2700 for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
2701 ++UI)
2702 if (Visited.insert(*UI))
2703 Queue.push_back(&UI.getUse());
2704 }
2705
2706 // Conservative default is to not rewrite anything.
2707 bool visitInstruction(Instruction &I) { return false; }
2708
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002709 /// \brief Generic recursive split emission class.
Benjamin Kramer371d5d82012-09-18 17:06:32 +00002710 template <typename Derived>
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002711 class OpSplitter {
2712 protected:
2713 /// The builder used to form new instructions.
2714 IRBuilder<> IRB;
2715 /// The indices which to be used with insert- or extractvalue to select the
2716 /// appropriate value within the aggregate.
2717 SmallVector<unsigned, 4> Indices;
2718 /// The indices to a GEP instruction which will move Ptr to the correct slot
2719 /// within the aggregate.
2720 SmallVector<Value *, 4> GEPIndices;
2721 /// The base pointer of the original op, used as a base for GEPing the
2722 /// split operations.
2723 Value *Ptr;
Chandler Carruthc370acd2012-09-18 12:57:43 +00002724
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002725 /// Initialize the splitter with an insertion point, Ptr and start with a
2726 /// single zero GEP index.
2727 OpSplitter(Instruction *InsertionPoint, Value *Ptr)
Benjamin Kramer371d5d82012-09-18 17:06:32 +00002728 : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {}
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002729
2730 public:
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002731 /// \brief Generic recursive split emission routine.
2732 ///
2733 /// This method recursively splits an aggregate op (load or store) into
2734 /// scalar or vector ops. It splits recursively until it hits a single value
2735 /// and emits that single value operation via the template argument.
2736 ///
2737 /// The logic of this routine relies on GEPs and insertvalue and
2738 /// extractvalue all operating with the same fundamental index list, merely
2739 /// formatted differently (GEPs need actual values).
2740 ///
2741 /// \param Ty The type being split recursively into smaller ops.
2742 /// \param Agg The aggregate value being built up or stored, depending on
2743 /// whether this is splitting a load or a store respectively.
2744 void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) {
2745 if (Ty->isSingleValueType())
Benjamin Kramer371d5d82012-09-18 17:06:32 +00002746 return static_cast<Derived *>(this)->emitFunc(Ty, Agg, Name);
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002747
2748 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2749 unsigned OldSize = Indices.size();
2750 (void)OldSize;
2751 for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size;
2752 ++Idx) {
2753 assert(Indices.size() == OldSize && "Did not return to the old size");
2754 Indices.push_back(Idx);
2755 GEPIndices.push_back(IRB.getInt32(Idx));
2756 emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx));
2757 GEPIndices.pop_back();
2758 Indices.pop_back();
2759 }
2760 return;
Chandler Carruthc370acd2012-09-18 12:57:43 +00002761 }
Chandler Carruthc370acd2012-09-18 12:57:43 +00002762
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002763 if (StructType *STy = dyn_cast<StructType>(Ty)) {
2764 unsigned OldSize = Indices.size();
2765 (void)OldSize;
2766 for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size;
2767 ++Idx) {
2768 assert(Indices.size() == OldSize && "Did not return to the old size");
2769 Indices.push_back(Idx);
2770 GEPIndices.push_back(IRB.getInt32(Idx));
2771 emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx));
2772 GEPIndices.pop_back();
2773 Indices.pop_back();
2774 }
2775 return;
Chandler Carruthc370acd2012-09-18 12:57:43 +00002776 }
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002777
2778 llvm_unreachable("Only arrays and structs are aggregate loadable types");
Chandler Carruthc370acd2012-09-18 12:57:43 +00002779 }
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002780 };
Chandler Carruthc370acd2012-09-18 12:57:43 +00002781
Benjamin Kramer371d5d82012-09-18 17:06:32 +00002782 struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> {
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002783 LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr)
Benjamin Kramer3b682bd2012-09-18 17:11:47 +00002784 : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr) {}
Chandler Carruthc370acd2012-09-18 12:57:43 +00002785
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002786 /// Emit a leaf load of a single value. This is called at the leaves of the
2787 /// recursive emission to actually load values.
Benjamin Kramer371d5d82012-09-18 17:06:32 +00002788 void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002789 assert(Ty->isSingleValueType());
2790 // Load the single value and insert it using the indices.
2791 Value *Load = IRB.CreateLoad(IRB.CreateInBoundsGEP(Ptr, GEPIndices,
2792 Name + ".gep"),
2793 Name + ".load");
2794 Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
2795 DEBUG(dbgs() << " to: " << *Load << "\n");
2796 }
2797 };
Chandler Carruthc370acd2012-09-18 12:57:43 +00002798
2799 bool visitLoadInst(LoadInst &LI) {
2800 assert(LI.getPointerOperand() == *U);
2801 if (!LI.isSimple() || LI.getType()->isSingleValueType())
2802 return false;
2803
2804 // We have an aggregate being loaded, split it apart.
2805 DEBUG(dbgs() << " original: " << LI << "\n");
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002806 LoadOpSplitter Splitter(&LI, *U);
Chandler Carruthc370acd2012-09-18 12:57:43 +00002807 Value *V = UndefValue::get(LI.getType());
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002808 Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
Chandler Carruthc370acd2012-09-18 12:57:43 +00002809 LI.replaceAllUsesWith(V);
2810 LI.eraseFromParent();
2811 return true;
2812 }
2813
Benjamin Kramer371d5d82012-09-18 17:06:32 +00002814 struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> {
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002815 StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr)
Benjamin Kramer3b682bd2012-09-18 17:11:47 +00002816 : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr) {}
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002817
2818 /// Emit a leaf store of a single value. This is called at the leaves of the
2819 /// recursive emission to actually produce stores.
Benjamin Kramer371d5d82012-09-18 17:06:32 +00002820 void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002821 assert(Ty->isSingleValueType());
2822 // Extract the single value and store it using the indices.
2823 Value *Store = IRB.CreateStore(
2824 IRB.CreateExtractValue(Agg, Indices, Name + ".extract"),
2825 IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep"));
2826 (void)Store;
2827 DEBUG(dbgs() << " to: " << *Store << "\n");
2828 }
2829 };
Chandler Carruthc370acd2012-09-18 12:57:43 +00002830
2831 bool visitStoreInst(StoreInst &SI) {
2832 if (!SI.isSimple() || SI.getPointerOperand() != *U)
2833 return false;
2834 Value *V = SI.getValueOperand();
2835 if (V->getType()->isSingleValueType())
2836 return false;
2837
2838 // We have an aggregate being stored, split it apart.
2839 DEBUG(dbgs() << " original: " << SI << "\n");
Benjamin Kramer6e67b252012-09-18 16:20:46 +00002840 StoreOpSplitter Splitter(&SI, *U);
2841 Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
Chandler Carruthc370acd2012-09-18 12:57:43 +00002842 SI.eraseFromParent();
2843 return true;
2844 }
2845
2846 bool visitBitCastInst(BitCastInst &BC) {
2847 enqueueUsers(BC);
2848 return false;
2849 }
2850
2851 bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
2852 enqueueUsers(GEPI);
2853 return false;
2854 }
2855
2856 bool visitPHINode(PHINode &PN) {
2857 enqueueUsers(PN);
2858 return false;
2859 }
2860
2861 bool visitSelectInst(SelectInst &SI) {
2862 enqueueUsers(SI);
2863 return false;
2864 }
2865};
2866}
2867
Chandler Carruth713aa942012-09-14 09:22:59 +00002868/// \brief Try to find a partition of the aggregate type passed in for a given
2869/// offset and size.
2870///
2871/// This recurses through the aggregate type and tries to compute a subtype
2872/// based on the offset and size. When the offset and size span a sub-section
Chandler Carruth6b547a22012-09-14 11:08:31 +00002873/// of an array, it will even compute a new array type for that sub-section,
2874/// and the same for structs.
2875///
2876/// Note that this routine is very strict and tries to find a partition of the
2877/// type which produces the *exact* right offset and size. It is not forgiving
2878/// when the size or offset cause either end of type-based partition to be off.
2879/// Also, this is a best-effort routine. It is reasonable to give up and not
2880/// return a type if necessary.
Chandler Carruth713aa942012-09-14 09:22:59 +00002881static Type *getTypePartition(const TargetData &TD, Type *Ty,
2882 uint64_t Offset, uint64_t Size) {
2883 if (Offset == 0 && TD.getTypeAllocSize(Ty) == Size)
2884 return Ty;
2885
2886 if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
2887 // We can't partition pointers...
2888 if (SeqTy->isPointerTy())
2889 return 0;
2890
2891 Type *ElementTy = SeqTy->getElementType();
2892 uint64_t ElementSize = TD.getTypeAllocSize(ElementTy);
2893 uint64_t NumSkippedElements = Offset / ElementSize;
2894 if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy))
2895 if (NumSkippedElements >= ArrTy->getNumElements())
2896 return 0;
2897 if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy))
2898 if (NumSkippedElements >= VecTy->getNumElements())
2899 return 0;
2900 Offset -= NumSkippedElements * ElementSize;
2901
2902 // First check if we need to recurse.
2903 if (Offset > 0 || Size < ElementSize) {
2904 // Bail if the partition ends in a different array element.
2905 if ((Offset + Size) > ElementSize)
2906 return 0;
2907 // Recurse through the element type trying to peel off offset bytes.
2908 return getTypePartition(TD, ElementTy, Offset, Size);
2909 }
2910 assert(Offset == 0);
2911
2912 if (Size == ElementSize)
2913 return ElementTy;
2914 assert(Size > ElementSize);
2915 uint64_t NumElements = Size / ElementSize;
2916 if (NumElements * ElementSize != Size)
2917 return 0;
2918 return ArrayType::get(ElementTy, NumElements);
2919 }
2920
2921 StructType *STy = dyn_cast<StructType>(Ty);
2922 if (!STy)
2923 return 0;
2924
2925 const StructLayout *SL = TD.getStructLayout(STy);
Chandler Carruth6b547a22012-09-14 11:08:31 +00002926 if (Offset >= SL->getSizeInBytes())
Chandler Carruth713aa942012-09-14 09:22:59 +00002927 return 0;
2928 uint64_t EndOffset = Offset + Size;
2929 if (EndOffset > SL->getSizeInBytes())
2930 return 0;
2931
2932 unsigned Index = SL->getElementContainingOffset(Offset);
Chandler Carruth713aa942012-09-14 09:22:59 +00002933 Offset -= SL->getElementOffset(Index);
2934
2935 Type *ElementTy = STy->getElementType(Index);
2936 uint64_t ElementSize = TD.getTypeAllocSize(ElementTy);
2937 if (Offset >= ElementSize)
2938 return 0; // The offset points into alignment padding.
2939
2940 // See if any partition must be contained by the element.
2941 if (Offset > 0 || Size < ElementSize) {
2942 if ((Offset + Size) > ElementSize)
2943 return 0;
Chandler Carruth713aa942012-09-14 09:22:59 +00002944 return getTypePartition(TD, ElementTy, Offset, Size);
2945 }
2946 assert(Offset == 0);
2947
2948 if (Size == ElementSize)
2949 return ElementTy;
2950
2951 StructType::element_iterator EI = STy->element_begin() + Index,
2952 EE = STy->element_end();
2953 if (EndOffset < SL->getSizeInBytes()) {
2954 unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
2955 if (Index == EndIndex)
2956 return 0; // Within a single element and its padding.
Chandler Carruth6b547a22012-09-14 11:08:31 +00002957
2958 // Don't try to form "natural" types if the elements don't line up with the
2959 // expected size.
2960 // FIXME: We could potentially recurse down through the last element in the
2961 // sub-struct to find a natural end point.
2962 if (SL->getElementOffset(EndIndex) != EndOffset)
2963 return 0;
2964
Chandler Carruth713aa942012-09-14 09:22:59 +00002965 assert(Index < EndIndex);
Chandler Carruth713aa942012-09-14 09:22:59 +00002966 EE = STy->element_begin() + EndIndex;
2967 }
2968
2969 // Try to build up a sub-structure.
2970 SmallVector<Type *, 4> ElementTys;
2971 do {
2972 ElementTys.push_back(*EI++);
2973 } while (EI != EE);
2974 StructType *SubTy = StructType::get(STy->getContext(), ElementTys,
2975 STy->isPacked());
2976 const StructLayout *SubSL = TD.getStructLayout(SubTy);
Chandler Carruth6b547a22012-09-14 11:08:31 +00002977 if (Size != SubSL->getSizeInBytes())
2978 return 0; // The sub-struct doesn't have quite the size needed.
Chandler Carruth713aa942012-09-14 09:22:59 +00002979
Chandler Carruth6b547a22012-09-14 11:08:31 +00002980 return SubTy;
Chandler Carruth713aa942012-09-14 09:22:59 +00002981}
2982
2983/// \brief Rewrite an alloca partition's users.
2984///
2985/// This routine drives both of the rewriting goals of the SROA pass. It tries
2986/// to rewrite uses of an alloca partition to be conducive for SSA value
2987/// promotion. If the partition needs a new, more refined alloca, this will
2988/// build that new alloca, preserving as much type information as possible, and
2989/// rewrite the uses of the old alloca to point at the new one and have the
2990/// appropriate new offsets. It also evaluates how successful the rewrite was
2991/// at enabling promotion and if it was successful queues the alloca to be
2992/// promoted.
2993bool SROA::rewriteAllocaPartition(AllocaInst &AI,
2994 AllocaPartitioning &P,
2995 AllocaPartitioning::iterator PI) {
2996 uint64_t AllocaSize = PI->EndOffset - PI->BeginOffset;
2997 if (P.use_begin(PI) == P.use_end(PI))
2998 return false; // No live uses left of this partition.
2999
Chandler Carruth1e1b16c2012-10-01 10:54:05 +00003000 DEBUG(dbgs() << "Speculating PHIs and selects in partition "
3001 << "[" << PI->BeginOffset << "," << PI->EndOffset << ")\n");
3002
3003 PHIOrSelectSpeculator Speculator(*TD, P, *this);
3004 DEBUG(dbgs() << " speculating ");
3005 DEBUG(P.print(dbgs(), PI, ""));
Chandler Carrutha346f462012-10-02 17:49:47 +00003006 Speculator.visitUsers(PI);
Chandler Carruth1e1b16c2012-10-01 10:54:05 +00003007
Chandler Carruth713aa942012-09-14 09:22:59 +00003008 // Try to compute a friendly type for this partition of the alloca. This
3009 // won't always succeed, in which case we fall back to a legal integer type
3010 // or an i8 array of an appropriate size.
3011 Type *AllocaTy = 0;
3012 if (Type *PartitionTy = P.getCommonType(PI))
3013 if (TD->getTypeAllocSize(PartitionTy) >= AllocaSize)
3014 AllocaTy = PartitionTy;
3015 if (!AllocaTy)
3016 if (Type *PartitionTy = getTypePartition(*TD, AI.getAllocatedType(),
3017 PI->BeginOffset, AllocaSize))
3018 AllocaTy = PartitionTy;
3019 if ((!AllocaTy ||
3020 (AllocaTy->isArrayTy() &&
3021 AllocaTy->getArrayElementType()->isIntegerTy())) &&
3022 TD->isLegalInteger(AllocaSize * 8))
3023 AllocaTy = Type::getIntNTy(*C, AllocaSize * 8);
3024 if (!AllocaTy)
3025 AllocaTy = ArrayType::get(Type::getInt8Ty(*C), AllocaSize);
Chandler Carruthb3dd9a12012-09-14 10:26:34 +00003026 assert(TD->getTypeAllocSize(AllocaTy) >= AllocaSize);
Chandler Carruth713aa942012-09-14 09:22:59 +00003027
3028 // Check for the case where we're going to rewrite to a new alloca of the
3029 // exact same type as the original, and with the same access offsets. In that
3030 // case, re-use the existing alloca, but still run through the rewriter to
3031 // performe phi and select speculation.
3032 AllocaInst *NewAI;
3033 if (AllocaTy == AI.getAllocatedType()) {
3034 assert(PI->BeginOffset == 0 &&
3035 "Non-zero begin offset but same alloca type");
3036 assert(PI == P.begin() && "Begin offset is zero on later partition");
3037 NewAI = &AI;
3038 } else {
Chandler Carruthb67c9a52012-09-29 10:41:21 +00003039 unsigned Alignment = AI.getAlignment();
3040 if (!Alignment) {
3041 // The minimum alignment which users can rely on when the explicit
3042 // alignment is omitted or zero is that required by the ABI for this
3043 // type.
3044 Alignment = TD->getABITypeAlignment(AI.getAllocatedType());
3045 }
3046 Alignment = MinAlign(Alignment, PI->BeginOffset);
3047 // If we will get at least this much alignment from the type alone, leave
3048 // the alloca's alignment unconstrained.
3049 if (Alignment <= TD->getABITypeAlignment(AllocaTy))
3050 Alignment = 0;
3051 NewAI = new AllocaInst(AllocaTy, 0, Alignment,
Chandler Carruth713aa942012-09-14 09:22:59 +00003052 AI.getName() + ".sroa." + Twine(PI - P.begin()),
3053 &AI);
3054 ++NumNewAllocas;
3055 }
3056
3057 DEBUG(dbgs() << "Rewriting alloca partition "
3058 << "[" << PI->BeginOffset << "," << PI->EndOffset << ") to: "
3059 << *NewAI << "\n");
3060
3061 AllocaPartitionRewriter Rewriter(*TD, P, PI, *this, AI, *NewAI,
3062 PI->BeginOffset, PI->EndOffset);
3063 DEBUG(dbgs() << " rewriting ");
3064 DEBUG(P.print(dbgs(), PI, ""));
3065 if (Rewriter.visitUsers(P.use_begin(PI), P.use_end(PI))) {
3066 DEBUG(dbgs() << " and queuing for promotion\n");
3067 PromotableAllocas.push_back(NewAI);
3068 } else if (NewAI != &AI) {
3069 // If we can't promote the alloca, iterate on it to check for new
3070 // refinements exposed by splitting the current alloca. Don't iterate on an
3071 // alloca which didn't actually change and didn't get promoted.
3072 Worklist.insert(NewAI);
3073 }
3074 return true;
3075}
3076
3077/// \brief Walks the partitioning of an alloca rewriting uses of each partition.
3078bool SROA::splitAlloca(AllocaInst &AI, AllocaPartitioning &P) {
3079 bool Changed = false;
3080 for (AllocaPartitioning::iterator PI = P.begin(), PE = P.end(); PI != PE;
3081 ++PI)
3082 Changed |= rewriteAllocaPartition(AI, P, PI);
3083
3084 return Changed;
3085}
3086
3087/// \brief Analyze an alloca for SROA.
3088///
3089/// This analyzes the alloca to ensure we can reason about it, builds
3090/// a partitioning of the alloca, and then hands it off to be split and
3091/// rewritten as needed.
3092bool SROA::runOnAlloca(AllocaInst &AI) {
3093 DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
3094 ++NumAllocasAnalyzed;
3095
3096 // Special case dead allocas, as they're trivial.
3097 if (AI.use_empty()) {
3098 AI.eraseFromParent();
3099 return true;
3100 }
3101
3102 // Skip alloca forms that this analysis can't handle.
3103 if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() ||
3104 TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
3105 return false;
3106
3107 // First check if this is a non-aggregate type that we should simply promote.
3108 if (!AI.getAllocatedType()->isAggregateType() && isAllocaPromotable(&AI)) {
3109 DEBUG(dbgs() << " Trivially scalar type, queuing for promotion...\n");
3110 PromotableAllocas.push_back(&AI);
3111 return false;
3112 }
3113
Chandler Carruthc370acd2012-09-18 12:57:43 +00003114 bool Changed = false;
3115
3116 // First, split any FCA loads and stores touching this alloca to promote
3117 // better splitting and promotion opportunities.
3118 AggLoadStoreRewriter AggRewriter(*TD);
3119 Changed |= AggRewriter.rewrite(AI);
3120
Chandler Carruth713aa942012-09-14 09:22:59 +00003121 // Build the partition set using a recursive instruction-visiting builder.
3122 AllocaPartitioning P(*TD, AI);
3123 DEBUG(P.print(dbgs()));
3124 if (P.isEscaped())
Chandler Carruthc370acd2012-09-18 12:57:43 +00003125 return Changed;
Chandler Carruth713aa942012-09-14 09:22:59 +00003126
3127 // No partitions to split. Leave the dead alloca for a later pass to clean up.
3128 if (P.begin() == P.end())
Chandler Carruthc370acd2012-09-18 12:57:43 +00003129 return Changed;
Chandler Carruth713aa942012-09-14 09:22:59 +00003130
3131 // Delete all the dead users of this alloca before splitting and rewriting it.
Chandler Carruth713aa942012-09-14 09:22:59 +00003132 for (AllocaPartitioning::dead_user_iterator DI = P.dead_user_begin(),
3133 DE = P.dead_user_end();
3134 DI != DE; ++DI) {
3135 Changed = true;
3136 (*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType()));
3137 DeadInsts.push_back(*DI);
3138 }
3139 for (AllocaPartitioning::dead_op_iterator DO = P.dead_op_begin(),
3140 DE = P.dead_op_end();
3141 DO != DE; ++DO) {
3142 Value *OldV = **DO;
3143 // Clobber the use with an undef value.
3144 **DO = UndefValue::get(OldV->getType());
3145 if (Instruction *OldI = dyn_cast<Instruction>(OldV))
3146 if (isInstructionTriviallyDead(OldI)) {
3147 Changed = true;
3148 DeadInsts.push_back(OldI);
3149 }
3150 }
3151
3152 return splitAlloca(AI, P) || Changed;
3153}
3154
Chandler Carruth8615cd22012-09-14 10:26:38 +00003155/// \brief Delete the dead instructions accumulated in this run.
3156///
3157/// Recursively deletes the dead instructions we've accumulated. This is done
3158/// at the very end to maximize locality of the recursive delete and to
3159/// minimize the problems of invalidated instruction pointers as such pointers
3160/// are used heavily in the intermediate stages of the algorithm.
3161///
3162/// We also record the alloca instructions deleted here so that they aren't
3163/// subsequently handed to mem2reg to promote.
3164void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
Chandler Carruth713aa942012-09-14 09:22:59 +00003165 DeadSplitInsts.clear();
3166 while (!DeadInsts.empty()) {
3167 Instruction *I = DeadInsts.pop_back_val();
3168 DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
3169
3170 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
3171 if (Instruction *U = dyn_cast<Instruction>(*OI)) {
3172 // Zero out the operand and see if it becomes trivially dead.
3173 *OI = 0;
3174 if (isInstructionTriviallyDead(U))
3175 DeadInsts.push_back(U);
3176 }
3177
3178 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
3179 DeletedAllocas.insert(AI);
3180
3181 ++NumDeleted;
3182 I->eraseFromParent();
3183 }
3184}
3185
Chandler Carruth1c8db502012-09-15 11:43:14 +00003186/// \brief Promote the allocas, using the best available technique.
3187///
3188/// This attempts to promote whatever allocas have been identified as viable in
3189/// the PromotableAllocas list. If that list is empty, there is nothing to do.
3190/// If there is a domtree available, we attempt to promote using the full power
3191/// of mem2reg. Otherwise, we build and use the AllocaPromoter above which is
3192/// based on the SSAUpdater utilities. This function returns whether any
3193/// promotion occured.
3194bool SROA::promoteAllocas(Function &F) {
3195 if (PromotableAllocas.empty())
3196 return false;
3197
3198 NumPromoted += PromotableAllocas.size();
3199
3200 if (DT && !ForceSSAUpdater) {
3201 DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
3202 PromoteMemToReg(PromotableAllocas, *DT);
3203 PromotableAllocas.clear();
3204 return true;
3205 }
3206
3207 DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n");
3208 SSAUpdater SSA;
3209 DIBuilder DIB(*F.getParent());
3210 SmallVector<Instruction*, 64> Insts;
3211
3212 for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) {
3213 AllocaInst *AI = PromotableAllocas[Idx];
3214 for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end();
3215 UI != UE;) {
3216 Instruction *I = cast<Instruction>(*UI++);
3217 // FIXME: Currently the SSAUpdater infrastructure doesn't reason about
3218 // lifetime intrinsics and so we strip them (and the bitcasts+GEPs
3219 // leading to them) here. Eventually it should use them to optimize the
3220 // scalar values produced.
3221 if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
3222 assert(onlyUsedByLifetimeMarkers(I) &&
3223 "Found a bitcast used outside of a lifetime marker.");
3224 while (!I->use_empty())
3225 cast<Instruction>(*I->use_begin())->eraseFromParent();
3226 I->eraseFromParent();
3227 continue;
3228 }
3229 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3230 assert(II->getIntrinsicID() == Intrinsic::lifetime_start ||
3231 II->getIntrinsicID() == Intrinsic::lifetime_end);
3232 II->eraseFromParent();
3233 continue;
3234 }
3235
3236 Insts.push_back(I);
3237 }
3238 AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts);
3239 Insts.clear();
3240 }
3241
3242 PromotableAllocas.clear();
3243 return true;
3244}
3245
Chandler Carruth713aa942012-09-14 09:22:59 +00003246namespace {
3247 /// \brief A predicate to test whether an alloca belongs to a set.
3248 class IsAllocaInSet {
3249 typedef SmallPtrSet<AllocaInst *, 4> SetType;
3250 const SetType &Set;
3251
3252 public:
3253 IsAllocaInSet(const SetType &Set) : Set(Set) {}
3254 bool operator()(AllocaInst *AI) { return Set.count(AI); }
3255 };
3256}
3257
3258bool SROA::runOnFunction(Function &F) {
3259 DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
3260 C = &F.getContext();
3261 TD = getAnalysisIfAvailable<TargetData>();
3262 if (!TD) {
3263 DEBUG(dbgs() << " Skipping SROA -- no target data!\n");
3264 return false;
3265 }
Chandler Carruth1c8db502012-09-15 11:43:14 +00003266 DT = getAnalysisIfAvailable<DominatorTree>();
Chandler Carruth713aa942012-09-14 09:22:59 +00003267
3268 BasicBlock &EntryBB = F.getEntryBlock();
3269 for (BasicBlock::iterator I = EntryBB.begin(), E = llvm::prior(EntryBB.end());
3270 I != E; ++I)
3271 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
3272 Worklist.insert(AI);
3273
3274 bool Changed = false;
Chandler Carruth8615cd22012-09-14 10:26:38 +00003275 // A set of deleted alloca instruction pointers which should be removed from
3276 // the list of promotable allocas.
3277 SmallPtrSet<AllocaInst *, 4> DeletedAllocas;
3278
Chandler Carruth713aa942012-09-14 09:22:59 +00003279 while (!Worklist.empty()) {
3280 Changed |= runOnAlloca(*Worklist.pop_back_val());
Chandler Carruth8615cd22012-09-14 10:26:38 +00003281 deleteDeadInstructions(DeletedAllocas);
Chandler Carruth713aa942012-09-14 09:22:59 +00003282 if (!DeletedAllocas.empty()) {
3283 PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(),
3284 PromotableAllocas.end(),
3285 IsAllocaInSet(DeletedAllocas)),
3286 PromotableAllocas.end());
3287 DeletedAllocas.clear();
3288 }
3289 }
3290
Chandler Carruth1c8db502012-09-15 11:43:14 +00003291 Changed |= promoteAllocas(F);
Chandler Carruth713aa942012-09-14 09:22:59 +00003292
3293 return Changed;
3294}
3295
3296void SROA::getAnalysisUsage(AnalysisUsage &AU) const {
Chandler Carruth1c8db502012-09-15 11:43:14 +00003297 if (RequiresDomTree)
3298 AU.addRequired<DominatorTree>();
Chandler Carruth713aa942012-09-14 09:22:59 +00003299 AU.setPreservesCFG();
3300}