blob: 114411e3f0c3ec81bc577e38560e66dafa077ef6 [file] [log] [blame]
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001//===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This transformation implements the well known scalar replacement of
11/// aggregates transformation. It tries to identify promotable elements of an
12/// aggregate alloca, and promote them to registers. It will also try to
13/// convert uses of an element (or set of elements) of an alloca into a vector
14/// or bitfield-style integer scalar if appropriate.
15///
16/// It works to do this with minimal slicing of the alloca so that regions
17/// which are merely transferred in and out of external memory remain unchanged
18/// and are not decomposed to scalar code.
19///
20/// Because this also performs alloca promotion, it can be thought of as also
21/// serving the purpose of SSA formation. The algorithm iterates on the
22/// function until all opportunities for promotion have been realized.
23///
24//===----------------------------------------------------------------------===//
25
26#define DEBUG_TYPE "sroa"
27#include "llvm/Transforms/Scalar.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000028#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/SetVector.h"
30#include "llvm/ADT/SmallVector.h"
31#include "llvm/ADT/Statistic.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000032#include "llvm/Analysis/Loads.h"
Chandler Carruthe41e7b72012-12-10 08:28:39 +000033#include "llvm/Analysis/PtrUseVisitor.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000034#include "llvm/Analysis/ValueTracking.h"
Chandler Carruth1b398ae2012-09-14 09:22:59 +000035#include "llvm/DIBuilder.h"
36#include "llvm/DebugInfo.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000037#include "llvm/IR/Constants.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/DerivedTypes.h"
Chandler Carruth5ad5f152014-01-13 09:26:24 +000040#include "llvm/IR/Dominators.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000041#include "llvm/IR/Function.h"
42#include "llvm/IR/IRBuilder.h"
43#include "llvm/IR/Instructions.h"
44#include "llvm/IR/IntrinsicInst.h"
45#include "llvm/IR/LLVMContext.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000046#include "llvm/IR/Operator.h"
Chandler Carruthdbd69582012-11-30 03:08:41 +000047#include "llvm/InstVisitor.h"
Chandler Carruth1b398ae2012-09-14 09:22:59 +000048#include "llvm/Pass.h"
Chandler Carruth70b44c52012-09-15 11:43:14 +000049#include "llvm/Support/CommandLine.h"
Chandler Carruthf0546402013-07-18 07:15:00 +000050#include "llvm/Support/Compiler.h"
Chandler Carruth1b398ae2012-09-14 09:22:59 +000051#include "llvm/Support/Debug.h"
52#include "llvm/Support/ErrorHandling.h"
Chandler Carruth1b398ae2012-09-14 09:22:59 +000053#include "llvm/Support/MathExtras.h"
Chandler Carruth1b398ae2012-09-14 09:22:59 +000054#include "llvm/Support/raw_ostream.h"
Chandler Carruth1b398ae2012-09-14 09:22:59 +000055#include "llvm/Transforms/Utils/Local.h"
56#include "llvm/Transforms/Utils/PromoteMemToReg.h"
57#include "llvm/Transforms/Utils/SSAUpdater.h"
58using namespace llvm;
59
60STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
Chandler Carruth5f5b6162013-03-20 06:30:46 +000061STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed");
Chandler Carruth6c321c12013-07-19 10:57:36 +000062STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca");
63STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten");
64STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition");
Chandler Carruth5f5b6162013-03-20 06:30:46 +000065STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
66STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
Chandler Carruth1b398ae2012-09-14 09:22:59 +000067STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion");
Chandler Carruth5f5b6162013-03-20 06:30:46 +000068STATISTIC(NumDeleted, "Number of instructions deleted");
69STATISTIC(NumVectorized, "Number of vectorized aggregates");
Chandler Carruth1b398ae2012-09-14 09:22:59 +000070
Chandler Carruth70b44c52012-09-15 11:43:14 +000071/// Hidden option to force the pass to not use DomTree and mem2reg, instead
72/// forming SSA values through the SSAUpdater infrastructure.
73static cl::opt<bool>
74ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden);
75
Chandler Carruth1b398ae2012-09-14 09:22:59 +000076namespace {
Chandler Carruth34f0c7f2013-03-21 09:52:18 +000077/// \brief A custom IRBuilder inserter which prefixes all names if they are
78/// preserved.
79template <bool preserveNames = true>
80class IRBuilderPrefixedInserter :
81 public IRBuilderDefaultInserter<preserveNames> {
82 std::string Prefix;
83
84public:
85 void SetNamePrefix(const Twine &P) { Prefix = P.str(); }
86
87protected:
88 void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB,
89 BasicBlock::iterator InsertPt) const {
90 IRBuilderDefaultInserter<preserveNames>::InsertHelper(
91 I, Name.isTriviallyEmpty() ? Name : Prefix + Name, BB, InsertPt);
92 }
93};
94
95// Specialization for not preserving the name is trivial.
96template <>
97class IRBuilderPrefixedInserter<false> :
98 public IRBuilderDefaultInserter<false> {
99public:
100 void SetNamePrefix(const Twine &P) {}
101};
102
Chandler Carruthd177f862013-03-20 07:30:36 +0000103/// \brief Provide a typedef for IRBuilder that drops names in release builds.
104#ifndef NDEBUG
Chandler Carruth34f0c7f2013-03-21 09:52:18 +0000105typedef llvm::IRBuilder<true, ConstantFolder,
106 IRBuilderPrefixedInserter<true> > IRBuilderTy;
Chandler Carruthd177f862013-03-20 07:30:36 +0000107#else
Chandler Carruth34f0c7f2013-03-21 09:52:18 +0000108typedef llvm::IRBuilder<false, ConstantFolder,
109 IRBuilderPrefixedInserter<false> > IRBuilderTy;
Chandler Carruthd177f862013-03-20 07:30:36 +0000110#endif
111}
112
113namespace {
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000114/// \brief A used slice of an alloca.
Chandler Carruthf0546402013-07-18 07:15:00 +0000115///
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000116/// This structure represents a slice of an alloca used by some instruction. It
117/// stores both the begin and end offsets of this use, a pointer to the use
118/// itself, and a flag indicating whether we can classify the use as splittable
119/// or not when forming partitions of the alloca.
120class Slice {
Chandler Carruthf74654d2013-03-18 08:36:46 +0000121 /// \brief The beginning offset of the range.
122 uint64_t BeginOffset;
123
124 /// \brief The ending offset, not included in the range.
125 uint64_t EndOffset;
126
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000127 /// \brief Storage for both the use of this slice and whether it can be
Chandler Carruthf0546402013-07-18 07:15:00 +0000128 /// split.
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000129 PointerIntPair<Use *, 1, bool> UseAndIsSplittable;
Chandler Carruthf0546402013-07-18 07:15:00 +0000130
131public:
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000132 Slice() : BeginOffset(), EndOffset() {}
133 Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable)
Chandler Carruthf0546402013-07-18 07:15:00 +0000134 : BeginOffset(BeginOffset), EndOffset(EndOffset),
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000135 UseAndIsSplittable(U, IsSplittable) {}
Chandler Carruthf0546402013-07-18 07:15:00 +0000136
137 uint64_t beginOffset() const { return BeginOffset; }
138 uint64_t endOffset() const { return EndOffset; }
139
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000140 bool isSplittable() const { return UseAndIsSplittable.getInt(); }
141 void makeUnsplittable() { UseAndIsSplittable.setInt(false); }
Chandler Carruthf0546402013-07-18 07:15:00 +0000142
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000143 Use *getUse() const { return UseAndIsSplittable.getPointer(); }
Chandler Carruthf0546402013-07-18 07:15:00 +0000144
145 bool isDead() const { return getUse() == 0; }
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000146 void kill() { UseAndIsSplittable.setPointer(0); }
Chandler Carruthf74654d2013-03-18 08:36:46 +0000147
148 /// \brief Support for ordering ranges.
149 ///
150 /// This provides an ordering over ranges such that start offsets are
151 /// always increasing, and within equal start offsets, the end offsets are
152 /// decreasing. Thus the spanning range comes first in a cluster with the
153 /// same start position.
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000154 bool operator<(const Slice &RHS) const {
Chandler Carruthf0546402013-07-18 07:15:00 +0000155 if (beginOffset() < RHS.beginOffset()) return true;
156 if (beginOffset() > RHS.beginOffset()) return false;
157 if (isSplittable() != RHS.isSplittable()) return !isSplittable();
158 if (endOffset() > RHS.endOffset()) return true;
Chandler Carruthf74654d2013-03-18 08:36:46 +0000159 return false;
160 }
161
162 /// \brief Support comparison with a single offset to allow binary searches.
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000163 friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS,
Chandler Carruthf0546402013-07-18 07:15:00 +0000164 uint64_t RHSOffset) {
165 return LHS.beginOffset() < RHSOffset;
Chandler Carruthf74654d2013-03-18 08:36:46 +0000166 }
Chandler Carruthe3899f22013-07-15 17:36:21 +0000167 friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000168 const Slice &RHS) {
Chandler Carruthf0546402013-07-18 07:15:00 +0000169 return LHSOffset < RHS.beginOffset();
Chandler Carruthf74654d2013-03-18 08:36:46 +0000170 }
Chandler Carruthe3899f22013-07-15 17:36:21 +0000171
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000172 bool operator==(const Slice &RHS) const {
Chandler Carruthf0546402013-07-18 07:15:00 +0000173 return isSplittable() == RHS.isSplittable() &&
174 beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset();
Chandler Carruthe3899f22013-07-15 17:36:21 +0000175 }
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000176 bool operator!=(const Slice &RHS) const { return !operator==(RHS); }
Chandler Carruthf74654d2013-03-18 08:36:46 +0000177};
Chandler Carruthf0546402013-07-18 07:15:00 +0000178} // end anonymous namespace
Chandler Carruthf74654d2013-03-18 08:36:46 +0000179
180namespace llvm {
Chandler Carruthf0546402013-07-18 07:15:00 +0000181template <typename T> struct isPodLike;
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000182template <> struct isPodLike<Slice> {
Chandler Carruthf0546402013-07-18 07:15:00 +0000183 static const bool value = true;
184};
Chandler Carruthf74654d2013-03-18 08:36:46 +0000185}
186
187namespace {
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000188/// \brief Representation of the alloca slices.
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000189///
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000190/// This class represents the slices of an alloca which are formed by its
191/// various uses. If a pointer escapes, we can't fully build a representation
192/// for the slices used and we reflect that in this structure. The uses are
193/// stored, sorted by increasing beginning offset and with unsplittable slices
194/// starting at a particular offset before splittable slices.
195class AllocaSlices {
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000196public:
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000197 /// \brief Construct the slices of a particular alloca.
198 AllocaSlices(const DataLayout &DL, AllocaInst &AI);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000199
200 /// \brief Test whether a pointer to the allocation escapes our analysis.
201 ///
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000202 /// If this is true, the slices are never fully built and should be
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000203 /// ignored.
204 bool isEscaped() const { return PointerEscapingInstr; }
205
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000206 /// \brief Support for iterating over the slices.
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000207 /// @{
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000208 typedef SmallVectorImpl<Slice>::iterator iterator;
209 iterator begin() { return Slices.begin(); }
210 iterator end() { return Slices.end(); }
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000211
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000212 typedef SmallVectorImpl<Slice>::const_iterator const_iterator;
213 const_iterator begin() const { return Slices.begin(); }
214 const_iterator end() const { return Slices.end(); }
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000215 /// @}
216
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000217 /// \brief Allow iterating the dead users for this alloca.
218 ///
219 /// These are instructions which will never actually use the alloca as they
220 /// are outside the allocated range. They are safe to replace with undef and
221 /// delete.
222 /// @{
223 typedef SmallVectorImpl<Instruction *>::const_iterator dead_user_iterator;
224 dead_user_iterator dead_user_begin() const { return DeadUsers.begin(); }
225 dead_user_iterator dead_user_end() const { return DeadUsers.end(); }
226 /// @}
227
Chandler Carruth93a21e72012-09-14 10:18:49 +0000228 /// \brief Allow iterating the dead expressions referring to this alloca.
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000229 ///
230 /// These are operands which have cannot actually be used to refer to the
231 /// alloca as they are outside its range and the user doesn't correct for
232 /// that. These mostly consist of PHI node inputs and the like which we just
233 /// need to replace with undef.
234 /// @{
235 typedef SmallVectorImpl<Use *>::const_iterator dead_op_iterator;
236 dead_op_iterator dead_op_begin() const { return DeadOperands.begin(); }
237 dead_op_iterator dead_op_end() const { return DeadOperands.end(); }
238 /// @}
239
Chandler Carruth25fb23d2012-09-14 10:18:51 +0000240#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000241 void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const;
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000242 void printSlice(raw_ostream &OS, const_iterator I,
243 StringRef Indent = " ") const;
Chandler Carruthf0546402013-07-18 07:15:00 +0000244 void printUse(raw_ostream &OS, const_iterator I,
245 StringRef Indent = " ") const;
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000246 void print(raw_ostream &OS) const;
Alp Tokerf929e092014-01-04 22:47:48 +0000247 void dump(const_iterator I) const;
248 void dump() const;
Chandler Carruth25fb23d2012-09-14 10:18:51 +0000249#endif
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000250
251private:
252 template <typename DerivedT, typename RetT = void> class BuilderBase;
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000253 class SliceBuilder;
254 friend class AllocaSlices::SliceBuilder;
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000255
Nick Lewyckyc7776f72013-08-13 22:51:58 +0000256#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000257 /// \brief Handle to alloca instruction to simplify method interfaces.
258 AllocaInst &AI;
Nick Lewyckyc7776f72013-08-13 22:51:58 +0000259#endif
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000260
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000261 /// \brief The instruction responsible for this alloca not having a known set
262 /// of slices.
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000263 ///
264 /// When an instruction (potentially) escapes the pointer to the alloca, we
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000265 /// store a pointer to that here and abort trying to form slices of the
266 /// alloca. This will be null if the alloca slices are analyzed successfully.
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000267 Instruction *PointerEscapingInstr;
268
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000269 /// \brief The slices of the alloca.
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000270 ///
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000271 /// We store a vector of the slices formed by uses of the alloca here. This
272 /// vector is sorted by increasing begin offset, and then the unsplittable
273 /// slices before the splittable ones. See the Slice inner class for more
274 /// details.
275 SmallVector<Slice, 8> Slices;
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000276
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000277 /// \brief Instructions which will become dead if we rewrite the alloca.
278 ///
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000279 /// Note that these are not separated by slice. This is because we expect an
280 /// alloca to be completely rewritten or not rewritten at all. If rewritten,
281 /// all these instructions can simply be removed and replaced with undef as
282 /// they come from outside of the allocated space.
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000283 SmallVector<Instruction *, 8> DeadUsers;
284
285 /// \brief Operands which will become dead if we rewrite the alloca.
286 ///
287 /// These are operands that in their particular use can be replaced with
288 /// undef when we rewrite the alloca. These show up in out-of-bounds inputs
289 /// to PHI nodes and the like. They aren't entirely dead (there might be
290 /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we
291 /// want to swap this particular input for undef to simplify the use lists of
292 /// the alloca.
293 SmallVector<Use *, 8> DeadOperands;
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000294};
295}
296
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000297static Value *foldSelectInst(SelectInst &SI) {
298 // If the condition being selected on is a constant or the same value is
299 // being selected between, fold the select. Yes this does (rarely) happen
300 // early on.
301 if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition()))
302 return SI.getOperand(1+CI->isZero());
Jakub Staszak3c6583a2013-02-19 22:14:45 +0000303 if (SI.getOperand(1) == SI.getOperand(2))
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000304 return SI.getOperand(1);
Jakub Staszak3c6583a2013-02-19 22:14:45 +0000305
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000306 return 0;
307}
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000308
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000309/// \brief Builder for the alloca slices.
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000310///
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000311/// This class builds a set of alloca slices by recursively visiting the uses
312/// of an alloca and making a slice for each load and store at each offset.
313class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> {
314 friend class PtrUseVisitor<SliceBuilder>;
315 friend class InstVisitor<SliceBuilder>;
316 typedef PtrUseVisitor<SliceBuilder> Base;
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000317
318 const uint64_t AllocSize;
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000319 AllocaSlices &S;
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000320
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000321 SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap;
Chandler Carruthf0546402013-07-18 07:15:00 +0000322 SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes;
323
324 /// \brief Set to de-duplicate dead instructions found in the use walk.
325 SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000326
327public:
Nick Lewyckyc7776f72013-08-13 22:51:58 +0000328 SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &S)
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000329 : PtrUseVisitor<SliceBuilder>(DL),
Nick Lewyckyc7776f72013-08-13 22:51:58 +0000330 AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), S(S) {}
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000331
332private:
Chandler Carruthf0546402013-07-18 07:15:00 +0000333 void markAsDead(Instruction &I) {
334 if (VisitedDeadInsts.insert(&I))
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000335 S.DeadUsers.push_back(&I);
Chandler Carruthf0546402013-07-18 07:15:00 +0000336 }
337
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000338 void insertUse(Instruction &I, const APInt &Offset, uint64_t Size,
Chandler Carruth97121172012-09-16 19:39:50 +0000339 bool IsSplittable = false) {
Chandler Carruthf02b8bf2012-12-03 10:59:55 +0000340 // Completely skip uses which have a zero size or start either before or
341 // past the end of the allocation.
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000342 if (Size == 0 || Offset.isNegative() || Offset.uge(AllocSize)) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000343 DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
Chandler Carruthf02b8bf2012-12-03 10:59:55 +0000344 << " which has zero size or starts outside of the "
345 << AllocSize << " byte alloca:\n"
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000346 << " alloca: " << S.AI << "\n"
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000347 << " use: " << I << "\n");
Chandler Carruthf0546402013-07-18 07:15:00 +0000348 return markAsDead(I);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000349 }
350
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000351 uint64_t BeginOffset = Offset.getZExtValue();
352 uint64_t EndOffset = BeginOffset + Size;
Chandler Carruthe7a1ba52012-09-23 11:43:14 +0000353
354 // Clamp the end offset to the end of the allocation. Note that this is
355 // formulated to handle even the case where "BeginOffset + Size" overflows.
Chandler Carrutha1c54bb2013-03-14 11:32:24 +0000356 // This may appear superficially to be something we could ignore entirely,
357 // but that is not so! There may be widened loads or PHI-node uses where
358 // some instructions are dead but not others. We can't completely ignore
359 // them, and so have to record at least the information here.
Chandler Carruthe7a1ba52012-09-23 11:43:14 +0000360 assert(AllocSize >= BeginOffset); // Established above.
361 if (Size > AllocSize - BeginOffset) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000362 DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
363 << " to remain within the " << AllocSize << " byte alloca:\n"
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000364 << " alloca: " << S.AI << "\n"
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000365 << " use: " << I << "\n");
366 EndOffset = AllocSize;
367 }
368
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000369 S.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable));
Chandler Carruthf0546402013-07-18 07:15:00 +0000370 }
371
372 void visitBitCastInst(BitCastInst &BC) {
373 if (BC.use_empty())
374 return markAsDead(BC);
375
376 return Base::visitBitCastInst(BC);
377 }
378
379 void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
380 if (GEPI.use_empty())
381 return markAsDead(GEPI);
382
383 return Base::visitGetElementPtrInst(GEPI);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000384 }
385
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000386 void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset,
Chandler Carrutha1c54bb2013-03-14 11:32:24 +0000387 uint64_t Size, bool IsVolatile) {
Chandler Carruth58d05562012-10-25 04:37:07 +0000388 // We allow splitting of loads and stores where the type is an integer type
Chandler Carrutha1c54bb2013-03-14 11:32:24 +0000389 // and cover the entire alloca. This prevents us from splitting over
390 // eagerly.
391 // FIXME: In the great blue eventually, we should eagerly split all integer
392 // loads and stores, and then have a separate step that merges adjacent
393 // alloca partitions into a single partition suitable for integer widening.
394 // Or we should skip the merge step and rely on GVN and other passes to
395 // merge adjacent loads and stores that survive mem2reg.
396 bool IsSplittable =
397 Ty->isIntegerTy() && !IsVolatile && Offset == 0 && Size >= AllocSize;
Chandler Carruth58d05562012-10-25 04:37:07 +0000398
399 insertUse(I, Offset, Size, IsSplittable);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000400 }
401
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000402 void visitLoadInst(LoadInst &LI) {
Chandler Carruth42cb9cb2012-09-18 12:57:43 +0000403 assert((!LI.isSimple() || LI.getType()->isSingleValueType()) &&
404 "All simple FCA loads should have been pre-split");
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000405
406 if (!IsOffsetKnown)
407 return PI.setAborted(&LI);
408
Chandler Carrutha1c54bb2013-03-14 11:32:24 +0000409 uint64_t Size = DL.getTypeStoreSize(LI.getType());
410 return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile());
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000411 }
412
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000413 void visitStoreInst(StoreInst &SI) {
Chandler Carruth42cb9cb2012-09-18 12:57:43 +0000414 Value *ValOp = SI.getValueOperand();
415 if (ValOp == *U)
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000416 return PI.setEscapedAndAborted(&SI);
417 if (!IsOffsetKnown)
418 return PI.setAborted(&SI);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000419
Chandler Carrutha1c54bb2013-03-14 11:32:24 +0000420 uint64_t Size = DL.getTypeStoreSize(ValOp->getType());
421
422 // If this memory access can be shown to *statically* extend outside the
423 // bounds of of the allocation, it's behavior is undefined, so simply
424 // ignore it. Note that this is more strict than the generic clamping
425 // behavior of insertUse. We also try to handle cases which might run the
426 // risk of overflow.
427 // FIXME: We should instead consider the pointer to have escaped if this
428 // function is being instrumented for addressing bugs or race conditions.
429 if (Offset.isNegative() || Size > AllocSize ||
430 Offset.ugt(AllocSize - Size)) {
431 DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset
432 << " which extends past the end of the " << AllocSize
433 << " byte alloca:\n"
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000434 << " alloca: " << S.AI << "\n"
Chandler Carrutha1c54bb2013-03-14 11:32:24 +0000435 << " use: " << SI << "\n");
Chandler Carruthf0546402013-07-18 07:15:00 +0000436 return markAsDead(SI);
Chandler Carrutha1c54bb2013-03-14 11:32:24 +0000437 }
438
Chandler Carruth42cb9cb2012-09-18 12:57:43 +0000439 assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
440 "All simple FCA stores should have been pre-split");
Chandler Carrutha1c54bb2013-03-14 11:32:24 +0000441 handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile());
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000442 }
443
444
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000445 void visitMemSetInst(MemSetInst &II) {
Chandler Carruthb0de6dd2012-09-14 10:26:34 +0000446 assert(II.getRawDest() == *U && "Pointer use is not the destination?");
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000447 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000448 if ((Length && Length->getValue() == 0) ||
449 (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
450 // Zero-length mem transfer intrinsics can be ignored entirely.
Chandler Carruthf0546402013-07-18 07:15:00 +0000451 return markAsDead(II);
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000452
453 if (!IsOffsetKnown)
454 return PI.setAborted(&II);
455
456 insertUse(II, Offset,
457 Length ? Length->getLimitedValue()
458 : AllocSize - Offset.getLimitedValue(),
459 (bool)Length);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000460 }
461
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000462 void visitMemTransferInst(MemTransferInst &II) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000463 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
Chandler Carruth1bf38c62014-01-19 12:16:54 +0000464 if (Length && Length->getValue() == 0)
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000465 // Zero-length mem transfer intrinsics can be ignored entirely.
Chandler Carruthf0546402013-07-18 07:15:00 +0000466 return markAsDead(II);
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000467
Chandler Carruth1bf38c62014-01-19 12:16:54 +0000468 // Because we can visit these intrinsics twice, also check to see if the
469 // first time marked this instruction as dead. If so, skip it.
470 if (VisitedDeadInsts.count(&II))
471 return;
472
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000473 if (!IsOffsetKnown)
474 return PI.setAborted(&II);
475
Chandler Carruth1bf38c62014-01-19 12:16:54 +0000476 // This side of the transfer is completely out-of-bounds, and so we can
477 // nuke the entire transfer. However, we also need to nuke the other side
478 // if already added to our partitions.
479 // FIXME: Yet another place we really should bypass this when
480 // instrumenting for ASan.
481 if (!Offset.isNegative() && Offset.uge(AllocSize)) {
482 SmallDenseMap<Instruction *, unsigned>::iterator MTPI = MemTransferSliceMap.find(&II);
483 if (MTPI != MemTransferSliceMap.end())
484 S.Slices[MTPI->second].kill();
485 return markAsDead(II);
486 }
487
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000488 uint64_t RawOffset = Offset.getLimitedValue();
489 uint64_t Size = Length ? Length->getLimitedValue()
490 : AllocSize - RawOffset;
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000491
Chandler Carruthf0546402013-07-18 07:15:00 +0000492 // Check for the special case where the same exact value is used for both
493 // source and dest.
494 if (*U == II.getRawDest() && *U == II.getRawSource()) {
495 // For non-volatile transfers this is a no-op.
496 if (!II.isVolatile())
497 return markAsDead(II);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000498
Nick Lewycky6ab9d932013-07-22 23:38:27 +0000499 return insertUse(II, Offset, Size, /*IsSplittable=*/false);
Chandler Carruthe5b7a2c2012-10-05 01:29:09 +0000500 }
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000501
Chandler Carruthf0546402013-07-18 07:15:00 +0000502 // If we have seen both source and destination for a mem transfer, then
503 // they both point to the same alloca.
504 bool Inserted;
505 SmallDenseMap<Instruction *, unsigned>::iterator MTPI;
506 llvm::tie(MTPI, Inserted) =
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000507 MemTransferSliceMap.insert(std::make_pair(&II, S.Slices.size()));
Chandler Carruthf0546402013-07-18 07:15:00 +0000508 unsigned PrevIdx = MTPI->second;
509 if (!Inserted) {
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000510 Slice &PrevP = S.Slices[PrevIdx];
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000511
Chandler Carruthe5b7a2c2012-10-05 01:29:09 +0000512 // Check if the begin offsets match and this is a non-volatile transfer.
513 // In that case, we can completely elide the transfer.
Chandler Carruthf0546402013-07-18 07:15:00 +0000514 if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) {
515 PrevP.kill();
516 return markAsDead(II);
Chandler Carruthe5b7a2c2012-10-05 01:29:09 +0000517 }
518
519 // Otherwise we have an offset transfer within the same alloca. We can't
520 // split those.
Chandler Carruthf0546402013-07-18 07:15:00 +0000521 PrevP.makeUnsplittable();
Chandler Carruthe5b7a2c2012-10-05 01:29:09 +0000522 }
523
Chandler Carruthe3899f22013-07-15 17:36:21 +0000524 // Insert the use now that we've fixed up the splittable nature.
Chandler Carruthf0546402013-07-18 07:15:00 +0000525 insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length);
Chandler Carruthe3899f22013-07-15 17:36:21 +0000526
Chandler Carruthf0546402013-07-18 07:15:00 +0000527 // Check that we ended up with a valid index in the map.
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000528 assert(S.Slices[PrevIdx].getUse()->getUser() == &II &&
529 "Map index doesn't point back to a slice with this user.");
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000530 }
531
532 // Disable SRoA for any intrinsics except for lifetime invariants.
Jakub Staszak086f6cd2013-02-19 22:02:21 +0000533 // FIXME: What about debug intrinsics? This matches old behavior, but
Chandler Carruth4b40e002012-09-14 10:26:36 +0000534 // doesn't make sense.
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000535 void visitIntrinsicInst(IntrinsicInst &II) {
536 if (!IsOffsetKnown)
537 return PI.setAborted(&II);
538
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000539 if (II.getIntrinsicID() == Intrinsic::lifetime_start ||
540 II.getIntrinsicID() == Intrinsic::lifetime_end) {
541 ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000542 uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(),
543 Length->getLimitedValue());
Chandler Carruth97121172012-09-16 19:39:50 +0000544 insertUse(II, Offset, Size, true);
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000545 return;
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000546 }
547
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000548 Base::visitIntrinsicInst(II);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000549 }
550
551 Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) {
552 // We consider any PHI or select that results in a direct load or store of
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000553 // the same offset to be a viable use for slicing purposes. These uses
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000554 // are considered unsplittable and the size is the maximum loaded or stored
555 // size.
556 SmallPtrSet<Instruction *, 4> Visited;
557 SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses;
558 Visited.insert(Root);
559 Uses.push_back(std::make_pair(cast<Instruction>(*U), Root));
Chandler Carruth8b907e82012-09-25 10:03:40 +0000560 // If there are no loads or stores, the access is dead. We mark that as
561 // a size zero access.
562 Size = 0;
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000563 do {
564 Instruction *I, *UsedI;
565 llvm::tie(UsedI, I) = Uses.pop_back_val();
566
567 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000568 Size = std::max(Size, DL.getTypeStoreSize(LI->getType()));
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000569 continue;
570 }
571 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
572 Value *Op = SI->getOperand(0);
573 if (Op == UsedI)
574 return SI;
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000575 Size = std::max(Size, DL.getTypeStoreSize(Op->getType()));
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000576 continue;
577 }
578
579 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
580 if (!GEP->hasAllZeroIndices())
581 return GEP;
582 } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) &&
583 !isa<SelectInst>(I)) {
584 return I;
585 }
586
587 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
588 ++UI)
589 if (Visited.insert(cast<Instruction>(*UI)))
590 Uses.push_back(std::make_pair(I, cast<Instruction>(*UI)));
591 } while (!Uses.empty());
592
593 return 0;
594 }
595
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000596 void visitPHINode(PHINode &PN) {
597 if (PN.use_empty())
Chandler Carruthf0546402013-07-18 07:15:00 +0000598 return markAsDead(PN);
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000599 if (!IsOffsetKnown)
600 return PI.setAborted(&PN);
601
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000602 // See if we already have computed info on this node.
Chandler Carruthf0546402013-07-18 07:15:00 +0000603 uint64_t &PHISize = PHIOrSelectSizes[&PN];
604 if (!PHISize) {
605 // This is a new PHI node, check for an unsafe use of the PHI node.
606 if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&PN, PHISize))
607 return PI.setAborted(UnsafeI);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000608 }
609
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000610 // For PHI and select operands outside the alloca, we can't nuke the entire
611 // phi or select -- the other side might still be relevant, so we special
612 // case them here and use a separate structure to track the operands
613 // themselves which should be replaced with undef.
Chandler Carruthf0546402013-07-18 07:15:00 +0000614 // FIXME: This should instead be escaped in the event we're instrumenting
615 // for address sanitization.
616 if ((Offset.isNegative() && (-Offset).uge(PHISize)) ||
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000617 (!Offset.isNegative() && Offset.uge(AllocSize))) {
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000618 S.DeadOperands.push_back(U);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000619 return;
620 }
621
Chandler Carruthf0546402013-07-18 07:15:00 +0000622 insertUse(PN, Offset, PHISize);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000623 }
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000624
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000625 void visitSelectInst(SelectInst &SI) {
626 if (SI.use_empty())
627 return markAsDead(SI);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000628 if (Value *Result = foldSelectInst(SI)) {
Nick Lewyckyc7776f72013-08-13 22:51:58 +0000629 if (Result == *U)
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000630 // If the result of the constant fold will be the pointer, recurse
631 // through the select as if we had RAUW'ed it.
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000632 enqueueUsers(SI);
Nick Lewyckyc7776f72013-08-13 22:51:58 +0000633 else
Chandler Carruth225d4bd2012-09-21 23:36:40 +0000634 // Otherwise the operand to the select is dead, and we can replace it
635 // with undef.
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000636 S.DeadOperands.push_back(U);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000637
638 return;
639 }
Chandler Carruthf0546402013-07-18 07:15:00 +0000640 if (!IsOffsetKnown)
641 return PI.setAborted(&SI);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000642
Chandler Carruthf0546402013-07-18 07:15:00 +0000643 // See if we already have computed info on this node.
644 uint64_t &SelectSize = PHIOrSelectSizes[&SI];
645 if (!SelectSize) {
646 // This is a new Select, check for an unsafe use of it.
647 if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&SI, SelectSize))
648 return PI.setAborted(UnsafeI);
649 }
650
651 // For PHI and select operands outside the alloca, we can't nuke the entire
652 // phi or select -- the other side might still be relevant, so we special
653 // case them here and use a separate structure to track the operands
654 // themselves which should be replaced with undef.
655 // FIXME: This should instead be escaped in the event we're instrumenting
656 // for address sanitization.
657 if ((Offset.isNegative() && Offset.uge(SelectSize)) ||
658 (!Offset.isNegative() && Offset.uge(AllocSize))) {
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000659 S.DeadOperands.push_back(U);
Chandler Carruthf0546402013-07-18 07:15:00 +0000660 return;
661 }
662
663 insertUse(SI, Offset, SelectSize);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000664 }
665
Chandler Carruthf0546402013-07-18 07:15:00 +0000666 /// \brief Disable SROA entirely if there are unhandled users of the alloca.
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000667 void visitInstruction(Instruction &I) {
Chandler Carruthf0546402013-07-18 07:15:00 +0000668 PI.setAborted(&I);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000669 }
670};
671
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000672AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
Nick Lewyckyc7776f72013-08-13 22:51:58 +0000673 :
674#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
675 AI(AI),
676#endif
677 PointerEscapingInstr(0) {
678 SliceBuilder PB(DL, AI, *this);
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000679 SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI);
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000680 if (PtrI.isEscaped() || PtrI.isAborted()) {
681 // FIXME: We should sink the escape vs. abort info into the caller nicely,
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000682 // possibly by just storing the PtrInfo in the AllocaSlices.
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000683 PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst()
684 : PtrI.getAbortingInst();
685 assert(PointerEscapingInstr && "Did not track a bad instruction");
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000686 return;
Chandler Carruthe41e7b72012-12-10 08:28:39 +0000687 }
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000688
Benjamin Kramer08e50702013-07-20 08:38:34 +0000689 Slices.erase(std::remove_if(Slices.begin(), Slices.end(),
690 std::mem_fun_ref(&Slice::isDead)),
691 Slices.end());
692
Chandler Carruthe5b7a2c2012-10-05 01:29:09 +0000693 // Sort the uses. This arranges for the offsets to be in ascending order,
694 // and the sizes to be in descending order.
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000695 std::sort(Slices.begin(), Slices.end());
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000696}
697
Chandler Carruth25fb23d2012-09-14 10:18:51 +0000698#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
699
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000700void AllocaSlices::print(raw_ostream &OS, const_iterator I,
701 StringRef Indent) const {
702 printSlice(OS, I, Indent);
Chandler Carruthf0546402013-07-18 07:15:00 +0000703 printUse(OS, I, Indent);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000704}
705
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000706void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I,
707 StringRef Indent) const {
Chandler Carruthf0546402013-07-18 07:15:00 +0000708 OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")"
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000709 << " slice #" << (I - begin())
Chandler Carruthf0546402013-07-18 07:15:00 +0000710 << (I->isSplittable() ? " (splittable)" : "") << "\n";
711}
712
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000713void AllocaSlices::printUse(raw_ostream &OS, const_iterator I,
714 StringRef Indent) const {
Chandler Carruthf0546402013-07-18 07:15:00 +0000715 OS << Indent << " used by: " << *I->getUse()->getUser() << "\n";
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000716}
717
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000718void AllocaSlices::print(raw_ostream &OS) const {
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000719 if (PointerEscapingInstr) {
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000720 OS << "Can't analyze slices for alloca: " << AI << "\n"
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000721 << " A pointer to this alloca escaped by:\n"
722 << " " << *PointerEscapingInstr << "\n";
723 return;
724 }
725
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000726 OS << "Slices of alloca: " << AI << "\n";
Chandler Carruthf0546402013-07-18 07:15:00 +0000727 for (const_iterator I = begin(), E = end(); I != E; ++I)
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000728 print(OS, I);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000729}
730
Alp Tokerf929e092014-01-04 22:47:48 +0000731LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const {
732 print(dbgs(), I);
733}
734LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); }
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000735
Chandler Carruth25fb23d2012-09-14 10:18:51 +0000736#endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
737
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000738namespace {
Chandler Carruth70b44c52012-09-15 11:43:14 +0000739/// \brief Implementation of LoadAndStorePromoter for promoting allocas.
740///
741/// This subclass of LoadAndStorePromoter adds overrides to handle promoting
742/// the loads and stores of an alloca instruction, as well as updating its
743/// debug information. This is used when a domtree is unavailable and thus
744/// mem2reg in its full form can't be used to handle promotion of allocas to
745/// scalar values.
746class AllocaPromoter : public LoadAndStorePromoter {
747 AllocaInst &AI;
748 DIBuilder &DIB;
749
750 SmallVector<DbgDeclareInst *, 4> DDIs;
751 SmallVector<DbgValueInst *, 4> DVIs;
752
753public:
Chandler Carruth45b136f2013-08-11 01:03:18 +0000754 AllocaPromoter(const SmallVectorImpl<Instruction *> &Insts, SSAUpdater &S,
Chandler Carruth70b44c52012-09-15 11:43:14 +0000755 AllocaInst &AI, DIBuilder &DIB)
Chandler Carruth45b136f2013-08-11 01:03:18 +0000756 : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {}
Chandler Carruth70b44c52012-09-15 11:43:14 +0000757
758 void run(const SmallVectorImpl<Instruction*> &Insts) {
Chandler Carruthcd7c8cd2013-07-29 09:06:53 +0000759 // Retain the debug information attached to the alloca for use when
760 // rewriting loads and stores.
Chandler Carruth70b44c52012-09-15 11:43:14 +0000761 if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) {
762 for (Value::use_iterator UI = DebugNode->use_begin(),
763 UE = DebugNode->use_end();
764 UI != UE; ++UI)
765 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
766 DDIs.push_back(DDI);
767 else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(*UI))
768 DVIs.push_back(DVI);
769 }
770
771 LoadAndStorePromoter::run(Insts);
Chandler Carruthcd7c8cd2013-07-29 09:06:53 +0000772
773 // While we have the debug information, clear it off of the alloca. The
774 // caller takes care of deleting the alloca.
Chandler Carruth70b44c52012-09-15 11:43:14 +0000775 while (!DDIs.empty())
776 DDIs.pop_back_val()->eraseFromParent();
777 while (!DVIs.empty())
778 DVIs.pop_back_val()->eraseFromParent();
779 }
780
781 virtual bool isInstInList(Instruction *I,
782 const SmallVectorImpl<Instruction*> &Insts) const {
Chandler Carruthc17283b2013-08-11 01:56:15 +0000783 Value *Ptr;
Chandler Carruth70b44c52012-09-15 11:43:14 +0000784 if (LoadInst *LI = dyn_cast<LoadInst>(I))
Chandler Carruthc17283b2013-08-11 01:56:15 +0000785 Ptr = LI->getOperand(0);
786 else
787 Ptr = cast<StoreInst>(I)->getPointerOperand();
788
789 // Only used to detect cycles, which will be rare and quickly found as
790 // we're walking up a chain of defs rather than down through uses.
791 SmallPtrSet<Value *, 4> Visited;
792
793 do {
794 if (Ptr == &AI)
795 return true;
796
797 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Ptr))
798 Ptr = BCI->getOperand(0);
799 else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
800 Ptr = GEPI->getPointerOperand();
801 else
802 return false;
803
804 } while (Visited.insert(Ptr));
805
806 return false;
Chandler Carruth70b44c52012-09-15 11:43:14 +0000807 }
808
809 virtual void updateDebugInfo(Instruction *Inst) const {
Craig Topper31ee5862013-07-03 15:07:05 +0000810 for (SmallVectorImpl<DbgDeclareInst *>::const_iterator I = DDIs.begin(),
Chandler Carruth70b44c52012-09-15 11:43:14 +0000811 E = DDIs.end(); I != E; ++I) {
812 DbgDeclareInst *DDI = *I;
813 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
814 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
815 else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
816 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
817 }
Craig Topper31ee5862013-07-03 15:07:05 +0000818 for (SmallVectorImpl<DbgValueInst *>::const_iterator I = DVIs.begin(),
Chandler Carruth70b44c52012-09-15 11:43:14 +0000819 E = DVIs.end(); I != E; ++I) {
820 DbgValueInst *DVI = *I;
Jakub Staszak3c6583a2013-02-19 22:14:45 +0000821 Value *Arg = 0;
Chandler Carruth70b44c52012-09-15 11:43:14 +0000822 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
823 // If an argument is zero extended then use argument directly. The ZExt
824 // may be zapped by an optimization pass in future.
825 if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
826 Arg = dyn_cast<Argument>(ZExt->getOperand(0));
Jakub Staszak4f9d1e82013-03-24 09:56:28 +0000827 else if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
Chandler Carruth70b44c52012-09-15 11:43:14 +0000828 Arg = dyn_cast<Argument>(SExt->getOperand(0));
829 if (!Arg)
Jakub Staszak4f9d1e82013-03-24 09:56:28 +0000830 Arg = SI->getValueOperand();
Chandler Carruth70b44c52012-09-15 11:43:14 +0000831 } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
Jakub Staszak4f9d1e82013-03-24 09:56:28 +0000832 Arg = LI->getPointerOperand();
Chandler Carruth70b44c52012-09-15 11:43:14 +0000833 } else {
834 continue;
835 }
836 Instruction *DbgVal =
837 DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()),
838 Inst);
839 DbgVal->setDebugLoc(DVI->getDebugLoc());
840 }
841 }
842};
843} // end anon namespace
844
845
846namespace {
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000847/// \brief An optimization pass providing Scalar Replacement of Aggregates.
848///
849/// This pass takes allocations which can be completely analyzed (that is, they
850/// don't escape) and tries to turn them into scalar SSA values. There are
851/// a few steps to this process.
852///
853/// 1) It takes allocations of aggregates and analyzes the ways in which they
854/// are used to try to split them into smaller allocations, ideally of
855/// a single scalar data type. It will split up memcpy and memset accesses
Jakub Staszak086f6cd2013-02-19 22:02:21 +0000856/// as necessary and try to isolate individual scalar accesses.
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000857/// 2) It will transform accesses into forms which are suitable for SSA value
858/// promotion. This can be replacing a memset with a scalar store of an
859/// integer value, or it can involve speculating operations on a PHI or
860/// select to be a PHI or select of the results.
861/// 3) Finally, this will try to detect a pattern of accesses which map cleanly
862/// onto insert and extract operations on a vector value, and convert them to
863/// this form. By doing so, it will enable promotion of vector aggregates to
864/// SSA vector values.
865class SROA : public FunctionPass {
Chandler Carruth70b44c52012-09-15 11:43:14 +0000866 const bool RequiresDomTree;
867
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000868 LLVMContext *C;
Chandler Carruth90a735d2013-07-19 07:21:28 +0000869 const DataLayout *DL;
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000870 DominatorTree *DT;
871
872 /// \brief Worklist of alloca instructions to simplify.
873 ///
874 /// Each alloca in the function is added to this. Each new alloca formed gets
875 /// added to it as well to recursively simplify unless that alloca can be
876 /// directly promoted. Finally, each time we rewrite a use of an alloca other
877 /// the one being actively rewritten, we add it back onto the list if not
878 /// already present to ensure it is re-visited.
879 SetVector<AllocaInst *, SmallVector<AllocaInst *, 16> > Worklist;
880
881 /// \brief A collection of instructions to delete.
882 /// We try to batch deletions to simplify code and make things a bit more
883 /// efficient.
Chandler Carruth18db7952012-11-20 01:12:50 +0000884 SetVector<Instruction *, SmallVector<Instruction *, 8> > DeadInsts;
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000885
Chandler Carruthac8317f2012-10-04 12:33:50 +0000886 /// \brief Post-promotion worklist.
887 ///
888 /// Sometimes we discover an alloca which has a high probability of becoming
889 /// viable for SROA after a round of promotion takes place. In those cases,
890 /// the alloca is enqueued here for re-processing.
891 ///
892 /// Note that we have to be very careful to clear allocas out of this list in
893 /// the event they are deleted.
894 SetVector<AllocaInst *, SmallVector<AllocaInst *, 16> > PostPromotionWorklist;
895
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000896 /// \brief A collection of alloca instructions we can directly promote.
897 std::vector<AllocaInst *> PromotableAllocas;
898
Chandler Carruthf0546402013-07-18 07:15:00 +0000899 /// \brief A worklist of PHIs to speculate prior to promoting allocas.
900 ///
901 /// All of these PHIs have been checked for the safety of speculation and by
902 /// being speculated will allow promoting allocas currently in the promotable
903 /// queue.
904 SetVector<PHINode *, SmallVector<PHINode *, 2> > SpeculatablePHIs;
905
906 /// \brief A worklist of select instructions to speculate prior to promoting
907 /// allocas.
908 ///
909 /// All of these select instructions have been checked for the safety of
910 /// speculation and by being speculated will allow promoting allocas
911 /// currently in the promotable queue.
912 SetVector<SelectInst *, SmallVector<SelectInst *, 2> > SpeculatableSelects;
913
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000914public:
Chandler Carruth70b44c52012-09-15 11:43:14 +0000915 SROA(bool RequiresDomTree = true)
916 : FunctionPass(ID), RequiresDomTree(RequiresDomTree),
Chandler Carruth90a735d2013-07-19 07:21:28 +0000917 C(0), DL(0), DT(0) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000918 initializeSROAPass(*PassRegistry::getPassRegistry());
919 }
920 bool runOnFunction(Function &F);
921 void getAnalysisUsage(AnalysisUsage &AU) const;
922
923 const char *getPassName() const { return "SROA"; }
924 static char ID;
925
926private:
Chandler Carruth82a57542012-10-01 10:54:05 +0000927 friend class PHIOrSelectSpeculator;
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000928 friend class AllocaSliceRewriter;
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000929
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000930 bool rewritePartition(AllocaInst &AI, AllocaSlices &S,
931 AllocaSlices::iterator B, AllocaSlices::iterator E,
932 int64_t BeginOffset, int64_t EndOffset,
933 ArrayRef<AllocaSlices::iterator> SplitUses);
934 bool splitAlloca(AllocaInst &AI, AllocaSlices &S);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000935 bool runOnAlloca(AllocaInst &AI);
Chandler Carruth1bf38c62014-01-19 12:16:54 +0000936 void clobberUse(Use &U);
Chandler Carruth19450da2012-09-14 10:26:38 +0000937 void deleteDeadInstructions(SmallPtrSet<AllocaInst *, 4> &DeletedAllocas);
Chandler Carruth70b44c52012-09-15 11:43:14 +0000938 bool promoteAllocas(Function &F);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000939};
940}
941
942char SROA::ID = 0;
943
Chandler Carruth70b44c52012-09-15 11:43:14 +0000944FunctionPass *llvm::createSROAPass(bool RequiresDomTree) {
945 return new SROA(RequiresDomTree);
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000946}
947
948INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates",
949 false, false)
Chandler Carruth73523022014-01-13 13:07:17 +0000950INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
Chandler Carruth1b398ae2012-09-14 09:22:59 +0000951INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates",
952 false, false)
953
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000954/// Walk the range of a partitioning looking for a common type to cover this
955/// sequence of slices.
956static Type *findCommonType(AllocaSlices::const_iterator B,
957 AllocaSlices::const_iterator E,
Chandler Carruthf0546402013-07-18 07:15:00 +0000958 uint64_t EndOffset) {
959 Type *Ty = 0;
Chandler Carruth4de31542014-01-21 23:16:05 +0000960 bool TyIsCommon = true;
961 IntegerType *ITy = 0;
962
963 // Note that we need to look at *every* alloca slice's Use to ensure we
964 // always get consistent results regardless of the order of slices.
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000965 for (AllocaSlices::const_iterator I = B; I != E; ++I) {
Chandler Carruthf0546402013-07-18 07:15:00 +0000966 Use *U = I->getUse();
967 if (isa<IntrinsicInst>(*U->getUser()))
968 continue;
969 if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset)
970 continue;
Chandler Carruth90c4a3a2012-10-05 01:29:06 +0000971
Chandler Carruthf0546402013-07-18 07:15:00 +0000972 Type *UserTy = 0;
Chandler Carrutha1262002013-11-19 09:03:18 +0000973 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
Chandler Carruthf0546402013-07-18 07:15:00 +0000974 UserTy = LI->getType();
Chandler Carrutha1262002013-11-19 09:03:18 +0000975 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
Chandler Carruthf0546402013-07-18 07:15:00 +0000976 UserTy = SI->getValueOperand()->getType();
Chandler Carrutha1262002013-11-19 09:03:18 +0000977 }
Chandler Carruth90c4a3a2012-10-05 01:29:06 +0000978
Chandler Carruth4de31542014-01-21 23:16:05 +0000979 if (!UserTy || (Ty && Ty != UserTy))
980 TyIsCommon = false; // Give up on anything but an iN type.
981 else
982 Ty = UserTy;
983
984 if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) {
Chandler Carruthf0546402013-07-18 07:15:00 +0000985 // If the type is larger than the partition, skip it. We only encounter
Chandler Carruth9f21fe12013-07-19 09:13:58 +0000986 // this for split integer operations where we want to use the type of the
Chandler Carrutha1262002013-11-19 09:03:18 +0000987 // entity causing the split. Also skip if the type is not a byte width
988 // multiple.
Chandler Carruth4de31542014-01-21 23:16:05 +0000989 if (UserITy->getBitWidth() % 8 != 0 ||
990 UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset()))
Chandler Carruthf0546402013-07-18 07:15:00 +0000991 continue;
Chandler Carruth90c4a3a2012-10-05 01:29:06 +0000992
Chandler Carruth4de31542014-01-21 23:16:05 +0000993 // Track the largest bitwidth integer type used in this way in case there
994 // is no common type.
995 if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth())
996 ITy = UserITy;
Chandler Carruthe3899f22013-07-15 17:36:21 +0000997 }
998 }
Chandler Carruth4de31542014-01-21 23:16:05 +0000999
1000 return TyIsCommon ? Ty : ITy;
Chandler Carruthf0546402013-07-18 07:15:00 +00001001}
Chandler Carruthe3899f22013-07-15 17:36:21 +00001002
Chandler Carruthf0546402013-07-18 07:15:00 +00001003/// PHI instructions that use an alloca and are subsequently loaded can be
1004/// rewritten to load both input pointers in the pred blocks and then PHI the
1005/// results, allowing the load of the alloca to be promoted.
1006/// From this:
1007/// %P2 = phi [i32* %Alloca, i32* %Other]
1008/// %V = load i32* %P2
1009/// to:
1010/// %V1 = load i32* %Alloca -> will be mem2reg'd
1011/// ...
1012/// %V2 = load i32* %Other
1013/// ...
1014/// %V = phi [i32 %V1, i32 %V2]
1015///
1016/// We can do this to a select if its only uses are loads and if the operands
1017/// to the select can be loaded unconditionally.
1018///
1019/// FIXME: This should be hoisted into a generic utility, likely in
1020/// Transforms/Util/Local.h
1021static bool isSafePHIToSpeculate(PHINode &PN,
Chandler Carruth90a735d2013-07-19 07:21:28 +00001022 const DataLayout *DL = 0) {
Chandler Carruthf0546402013-07-18 07:15:00 +00001023 // For now, we can only do this promotion if the load is in the same block
1024 // as the PHI, and if there are no stores between the phi and load.
1025 // TODO: Allow recursive phi users.
1026 // TODO: Allow stores.
1027 BasicBlock *BB = PN.getParent();
1028 unsigned MaxAlign = 0;
1029 bool HaveLoad = false;
1030 for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end(); UI != UE;
1031 ++UI) {
1032 LoadInst *LI = dyn_cast<LoadInst>(*UI);
1033 if (LI == 0 || !LI->isSimple())
Chandler Carruthe74ff4c2013-07-15 10:30:19 +00001034 return false;
Chandler Carruthe74ff4c2013-07-15 10:30:19 +00001035
Chandler Carruthf0546402013-07-18 07:15:00 +00001036 // For now we only allow loads in the same block as the PHI. This is
1037 // a common case that happens when instcombine merges two loads through
1038 // a PHI.
1039 if (LI->getParent() != BB)
1040 return false;
Chandler Carruthe3899f22013-07-15 17:36:21 +00001041
Chandler Carruthf0546402013-07-18 07:15:00 +00001042 // Ensure that there are no instructions between the PHI and the load that
1043 // could store.
1044 for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI)
1045 if (BBI->mayWriteToMemory())
Chandler Carruthe3899f22013-07-15 17:36:21 +00001046 return false;
Chandler Carruthe3899f22013-07-15 17:36:21 +00001047
Chandler Carruthf0546402013-07-18 07:15:00 +00001048 MaxAlign = std::max(MaxAlign, LI->getAlignment());
1049 HaveLoad = true;
Chandler Carruthe3899f22013-07-15 17:36:21 +00001050 }
1051
Chandler Carruthf0546402013-07-18 07:15:00 +00001052 if (!HaveLoad)
1053 return false;
Chandler Carruthe3899f22013-07-15 17:36:21 +00001054
Chandler Carruthf0546402013-07-18 07:15:00 +00001055 // We can only transform this if it is safe to push the loads into the
1056 // predecessor blocks. The only thing to watch out for is that we can't put
1057 // a possibly trapping load in the predecessor if it is a critical edge.
1058 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
1059 TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator();
1060 Value *InVal = PN.getIncomingValue(Idx);
Chandler Carruthe3899f22013-07-15 17:36:21 +00001061
Chandler Carruthf0546402013-07-18 07:15:00 +00001062 // If the value is produced by the terminator of the predecessor (an
1063 // invoke) or it has side-effects, there is no valid place to put a load
1064 // in the predecessor.
1065 if (TI == InVal || TI->mayHaveSideEffects())
1066 return false;
Chandler Carruthe3899f22013-07-15 17:36:21 +00001067
Chandler Carruthf0546402013-07-18 07:15:00 +00001068 // If the predecessor has a single successor, then the edge isn't
1069 // critical.
1070 if (TI->getNumSuccessors() == 1)
1071 continue;
Chandler Carruthe3899f22013-07-15 17:36:21 +00001072
Chandler Carruthf0546402013-07-18 07:15:00 +00001073 // If this pointer is always safe to load, or if we can prove that there
1074 // is already a load in the block, then we can move the load to the pred
1075 // block.
1076 if (InVal->isDereferenceablePointer() ||
Chandler Carruth90a735d2013-07-19 07:21:28 +00001077 isSafeToLoadUnconditionally(InVal, TI, MaxAlign, DL))
Chandler Carruthf0546402013-07-18 07:15:00 +00001078 continue;
1079
1080 return false;
1081 }
1082
1083 return true;
1084}
1085
1086static void speculatePHINodeLoads(PHINode &PN) {
1087 DEBUG(dbgs() << " original: " << PN << "\n");
1088
1089 Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
1090 IRBuilderTy PHIBuilder(&PN);
1091 PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
1092 PN.getName() + ".sroa.speculated");
1093
1094 // Get the TBAA tag and alignment to use from one of the loads. It doesn't
1095 // matter which one we get and if any differ.
1096 LoadInst *SomeLoad = cast<LoadInst>(*PN.use_begin());
1097 MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
1098 unsigned Align = SomeLoad->getAlignment();
1099
1100 // Rewrite all loads of the PN to use the new PHI.
1101 while (!PN.use_empty()) {
1102 LoadInst *LI = cast<LoadInst>(*PN.use_begin());
1103 LI->replaceAllUsesWith(NewPN);
1104 LI->eraseFromParent();
1105 }
1106
1107 // Inject loads into all of the pred blocks.
1108 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
1109 BasicBlock *Pred = PN.getIncomingBlock(Idx);
1110 TerminatorInst *TI = Pred->getTerminator();
1111 Value *InVal = PN.getIncomingValue(Idx);
1112 IRBuilderTy PredBuilder(TI);
1113
1114 LoadInst *Load = PredBuilder.CreateLoad(
1115 InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName()));
1116 ++NumLoadsSpeculated;
1117 Load->setAlignment(Align);
1118 if (TBAATag)
1119 Load->setMetadata(LLVMContext::MD_tbaa, TBAATag);
1120 NewPN->addIncoming(Load, Pred);
1121 }
1122
1123 DEBUG(dbgs() << " speculated to: " << *NewPN << "\n");
1124 PN.eraseFromParent();
1125}
1126
1127/// Select instructions that use an alloca and are subsequently loaded can be
1128/// rewritten to load both input pointers and then select between the result,
1129/// allowing the load of the alloca to be promoted.
1130/// From this:
1131/// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
1132/// %V = load i32* %P2
1133/// to:
1134/// %V1 = load i32* %Alloca -> will be mem2reg'd
1135/// %V2 = load i32* %Other
1136/// %V = select i1 %cond, i32 %V1, i32 %V2
1137///
1138/// We can do this to a select if its only uses are loads and if the operand
1139/// to the select can be loaded unconditionally.
Chandler Carruth90a735d2013-07-19 07:21:28 +00001140static bool isSafeSelectToSpeculate(SelectInst &SI, const DataLayout *DL = 0) {
Chandler Carruthf0546402013-07-18 07:15:00 +00001141 Value *TValue = SI.getTrueValue();
1142 Value *FValue = SI.getFalseValue();
1143 bool TDerefable = TValue->isDereferenceablePointer();
1144 bool FDerefable = FValue->isDereferenceablePointer();
1145
1146 for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end(); UI != UE;
1147 ++UI) {
1148 LoadInst *LI = dyn_cast<LoadInst>(*UI);
1149 if (LI == 0 || !LI->isSimple())
1150 return false;
1151
1152 // Both operands to the select need to be dereferencable, either
1153 // absolutely (e.g. allocas) or at this point because we can see other
1154 // accesses to it.
1155 if (!TDerefable &&
Chandler Carruth90a735d2013-07-19 07:21:28 +00001156 !isSafeToLoadUnconditionally(TValue, LI, LI->getAlignment(), DL))
Chandler Carruthf0546402013-07-18 07:15:00 +00001157 return false;
1158 if (!FDerefable &&
Chandler Carruth90a735d2013-07-19 07:21:28 +00001159 !isSafeToLoadUnconditionally(FValue, LI, LI->getAlignment(), DL))
Chandler Carruthf0546402013-07-18 07:15:00 +00001160 return false;
1161 }
1162
1163 return true;
1164}
1165
1166static void speculateSelectInstLoads(SelectInst &SI) {
1167 DEBUG(dbgs() << " original: " << SI << "\n");
1168
1169 IRBuilderTy IRB(&SI);
1170 Value *TV = SI.getTrueValue();
1171 Value *FV = SI.getFalseValue();
1172 // Replace the loads of the select with a select of two loads.
1173 while (!SI.use_empty()) {
1174 LoadInst *LI = cast<LoadInst>(*SI.use_begin());
1175 assert(LI->isSimple() && "We only speculate simple loads");
1176
1177 IRB.SetInsertPoint(LI);
1178 LoadInst *TL =
Chandler Carruthe3899f22013-07-15 17:36:21 +00001179 IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true");
Chandler Carruthf0546402013-07-18 07:15:00 +00001180 LoadInst *FL =
Chandler Carruthe3899f22013-07-15 17:36:21 +00001181 IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false");
Chandler Carruthf0546402013-07-18 07:15:00 +00001182 NumLoadsSpeculated += 2;
Chandler Carruthe3899f22013-07-15 17:36:21 +00001183
Chandler Carruthf0546402013-07-18 07:15:00 +00001184 // Transfer alignment and TBAA info if present.
1185 TL->setAlignment(LI->getAlignment());
1186 FL->setAlignment(LI->getAlignment());
1187 if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) {
1188 TL->setMetadata(LLVMContext::MD_tbaa, Tag);
1189 FL->setMetadata(LLVMContext::MD_tbaa, Tag);
Chandler Carruthe3899f22013-07-15 17:36:21 +00001190 }
Chandler Carruthf0546402013-07-18 07:15:00 +00001191
1192 Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
1193 LI->getName() + ".sroa.speculated");
1194
1195 DEBUG(dbgs() << " speculated to: " << *V << "\n");
1196 LI->replaceAllUsesWith(V);
1197 LI->eraseFromParent();
Chandler Carruthe3899f22013-07-15 17:36:21 +00001198 }
Chandler Carruthf0546402013-07-18 07:15:00 +00001199 SI.eraseFromParent();
Chandler Carruth90c4a3a2012-10-05 01:29:06 +00001200}
1201
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001202/// \brief Build a GEP out of a base pointer and indices.
1203///
1204/// This will return the BasePtr if that is valid, or build a new GEP
1205/// instruction using the IRBuilder if GEP-ing is needed.
Chandler Carruthd177f862013-03-20 07:30:36 +00001206static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001207 SmallVectorImpl<Value *> &Indices) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001208 if (Indices.empty())
1209 return BasePtr;
1210
1211 // A single zero index is a no-op, so check for this and avoid building a GEP
1212 // in that case.
1213 if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
1214 return BasePtr;
1215
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001216 return IRB.CreateInBoundsGEP(BasePtr, Indices, "idx");
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001217}
1218
1219/// \brief Get a natural GEP off of the BasePtr walking through Ty toward
1220/// TargetTy without changing the offset of the pointer.
1221///
1222/// This routine assumes we've already established a properly offset GEP with
1223/// Indices, and arrived at the Ty type. The goal is to continue to GEP with
1224/// zero-indices down through type layers until we find one the same as
1225/// TargetTy. If we can't find one with the same type, we at least try to use
1226/// one with the same size. If none of that works, we just produce the GEP as
1227/// indicated by Indices to have the correct offset.
Chandler Carruth90a735d2013-07-19 07:21:28 +00001228static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001229 Value *BasePtr, Type *Ty, Type *TargetTy,
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001230 SmallVectorImpl<Value *> &Indices) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001231 if (Ty == TargetTy)
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001232 return buildGEP(IRB, BasePtr, Indices);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001233
1234 // See if we can descend into a struct and locate a field with the correct
1235 // type.
1236 unsigned NumLayers = 0;
1237 Type *ElementTy = Ty;
1238 do {
1239 if (ElementTy->isPointerTy())
1240 break;
1241 if (SequentialType *SeqTy = dyn_cast<SequentialType>(ElementTy)) {
1242 ElementTy = SeqTy->getElementType();
Chandler Carruth40617f52012-10-17 07:22:16 +00001243 // Note that we use the default address space as this index is over an
1244 // array or a vector, not a pointer.
Chandler Carruth90a735d2013-07-19 07:21:28 +00001245 Indices.push_back(IRB.getInt(APInt(DL.getPointerSizeInBits(0), 0)));
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001246 } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
Chandler Carruth503eb2b2012-10-09 01:58:35 +00001247 if (STy->element_begin() == STy->element_end())
1248 break; // Nothing left to descend into.
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001249 ElementTy = *STy->element_begin();
1250 Indices.push_back(IRB.getInt32(0));
1251 } else {
1252 break;
1253 }
1254 ++NumLayers;
1255 } while (ElementTy != TargetTy);
1256 if (ElementTy != TargetTy)
1257 Indices.erase(Indices.end() - NumLayers, Indices.end());
1258
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001259 return buildGEP(IRB, BasePtr, Indices);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001260}
1261
1262/// \brief Recursively compute indices for a natural GEP.
1263///
1264/// This is the recursive step for getNaturalGEPWithOffset that walks down the
1265/// element types adding appropriate indices for the GEP.
Chandler Carruth90a735d2013-07-19 07:21:28 +00001266static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL,
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001267 Value *Ptr, Type *Ty, APInt &Offset,
1268 Type *TargetTy,
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001269 SmallVectorImpl<Value *> &Indices) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001270 if (Offset == 0)
Chandler Carruth90a735d2013-07-19 07:21:28 +00001271 return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001272
1273 // We can't recurse through pointer types.
1274 if (Ty->isPointerTy())
1275 return 0;
1276
Chandler Carruthdd3cea82012-09-14 10:30:40 +00001277 // We try to analyze GEPs over vectors here, but note that these GEPs are
1278 // extremely poorly defined currently. The long-term goal is to remove GEPing
1279 // over a vector from the IR completely.
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001280 if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
Chandler Carruth90a735d2013-07-19 07:21:28 +00001281 unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType());
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001282 if (ElementSizeInBits % 8)
Chandler Carruthdd3cea82012-09-14 10:30:40 +00001283 return 0; // GEPs over non-multiple of 8 size vector elements are invalid.
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001284 APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
Chandler Carruth6fab42a2012-10-17 09:23:48 +00001285 APInt NumSkippedElements = Offset.sdiv(ElementSize);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001286 if (NumSkippedElements.ugt(VecTy->getNumElements()))
1287 return 0;
1288 Offset -= NumSkippedElements * ElementSize;
1289 Indices.push_back(IRB.getInt(NumSkippedElements));
Chandler Carruth90a735d2013-07-19 07:21:28 +00001290 return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(),
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001291 Offset, TargetTy, Indices);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001292 }
1293
1294 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
1295 Type *ElementTy = ArrTy->getElementType();
Chandler Carruth90a735d2013-07-19 07:21:28 +00001296 APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
Chandler Carruth6fab42a2012-10-17 09:23:48 +00001297 APInt NumSkippedElements = Offset.sdiv(ElementSize);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001298 if (NumSkippedElements.ugt(ArrTy->getNumElements()))
1299 return 0;
1300
1301 Offset -= NumSkippedElements * ElementSize;
1302 Indices.push_back(IRB.getInt(NumSkippedElements));
Chandler Carruth90a735d2013-07-19 07:21:28 +00001303 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001304 Indices);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001305 }
1306
1307 StructType *STy = dyn_cast<StructType>(Ty);
1308 if (!STy)
1309 return 0;
1310
Chandler Carruth90a735d2013-07-19 07:21:28 +00001311 const StructLayout *SL = DL.getStructLayout(STy);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001312 uint64_t StructOffset = Offset.getZExtValue();
Chandler Carruthcabd96c2012-09-14 10:30:42 +00001313 if (StructOffset >= SL->getSizeInBytes())
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001314 return 0;
1315 unsigned Index = SL->getElementContainingOffset(StructOffset);
1316 Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
1317 Type *ElementTy = STy->getElementType(Index);
Chandler Carruth90a735d2013-07-19 07:21:28 +00001318 if (Offset.uge(DL.getTypeAllocSize(ElementTy)))
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001319 return 0; // The offset points into alignment padding.
1320
1321 Indices.push_back(IRB.getInt32(Index));
Chandler Carruth90a735d2013-07-19 07:21:28 +00001322 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001323 Indices);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001324}
1325
1326/// \brief Get a natural GEP from a base pointer to a particular offset and
1327/// resulting in a particular type.
1328///
1329/// The goal is to produce a "natural" looking GEP that works with the existing
1330/// composite types to arrive at the appropriate offset and element type for
1331/// a pointer. TargetTy is the element type the returned GEP should point-to if
1332/// possible. We recurse by decreasing Offset, adding the appropriate index to
1333/// Indices, and setting Ty to the result subtype.
1334///
Chandler Carruth93a21e72012-09-14 10:18:49 +00001335/// If no natural GEP can be constructed, this function returns null.
Chandler Carruth90a735d2013-07-19 07:21:28 +00001336static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001337 Value *Ptr, APInt Offset, Type *TargetTy,
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001338 SmallVectorImpl<Value *> &Indices) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001339 PointerType *Ty = cast<PointerType>(Ptr->getType());
1340
1341 // Don't consider any GEPs through an i8* as natural unless the TargetTy is
1342 // an i8.
1343 if (Ty == IRB.getInt8PtrTy() && TargetTy->isIntegerTy(8))
1344 return 0;
1345
1346 Type *ElementTy = Ty->getElementType();
Chandler Carruth3f882d42012-09-18 22:37:19 +00001347 if (!ElementTy->isSized())
1348 return 0; // We can't GEP through an unsized element.
Chandler Carruth90a735d2013-07-19 07:21:28 +00001349 APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001350 if (ElementSize == 0)
1351 return 0; // Zero-length arrays can't help us build a natural GEP.
Chandler Carruth6fab42a2012-10-17 09:23:48 +00001352 APInt NumSkippedElements = Offset.sdiv(ElementSize);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001353
1354 Offset -= NumSkippedElements * ElementSize;
1355 Indices.push_back(IRB.getInt(NumSkippedElements));
Chandler Carruth90a735d2013-07-19 07:21:28 +00001356 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001357 Indices);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001358}
1359
1360/// \brief Compute an adjusted pointer from Ptr by Offset bytes where the
1361/// resulting pointer has PointerTy.
1362///
1363/// This tries very hard to compute a "natural" GEP which arrives at the offset
1364/// and produces the pointer type desired. Where it cannot, it will try to use
1365/// the natural GEP to arrive at the offset and bitcast to the type. Where that
1366/// fails, it will try to use an existing i8* and GEP to the byte offset and
1367/// bitcast to the type.
1368///
1369/// The strategy for finding the more natural GEPs is to peel off layers of the
1370/// pointer, walking back through bit casts and GEPs, searching for a base
1371/// pointer from which we can compute a natural GEP with the desired
Jakub Staszak086f6cd2013-02-19 22:02:21 +00001372/// properties. The algorithm tries to fold as many constant indices into
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001373/// a single GEP as possible, thus making each GEP more independent of the
1374/// surrounding code.
Chandler Carruth90a735d2013-07-19 07:21:28 +00001375static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL,
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001376 Value *Ptr, APInt Offset, Type *PointerTy) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001377 // Even though we don't look through PHI nodes, we could be called on an
1378 // instruction in an unreachable block, which may be on a cycle.
1379 SmallPtrSet<Value *, 4> Visited;
1380 Visited.insert(Ptr);
1381 SmallVector<Value *, 4> Indices;
1382
1383 // We may end up computing an offset pointer that has the wrong type. If we
1384 // never are able to compute one directly that has the correct type, we'll
1385 // fall back to it, so keep it around here.
1386 Value *OffsetPtr = 0;
1387
1388 // Remember any i8 pointer we come across to re-use if we need to do a raw
1389 // byte offset.
1390 Value *Int8Ptr = 0;
1391 APInt Int8PtrOffset(Offset.getBitWidth(), 0);
1392
1393 Type *TargetTy = PointerTy->getPointerElementType();
1394
1395 do {
1396 // First fold any existing GEPs into the offset.
1397 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
1398 APInt GEPOffset(Offset.getBitWidth(), 0);
Chandler Carruth90a735d2013-07-19 07:21:28 +00001399 if (!GEP->accumulateConstantOffset(DL, GEPOffset))
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001400 break;
1401 Offset += GEPOffset;
1402 Ptr = GEP->getPointerOperand();
1403 if (!Visited.insert(Ptr))
1404 break;
1405 }
1406
1407 // See if we can perform a natural GEP here.
1408 Indices.clear();
Chandler Carruth90a735d2013-07-19 07:21:28 +00001409 if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy,
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001410 Indices)) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001411 if (P->getType() == PointerTy) {
1412 // Zap any offset pointer that we ended up computing in previous rounds.
1413 if (OffsetPtr && OffsetPtr->use_empty())
1414 if (Instruction *I = dyn_cast<Instruction>(OffsetPtr))
1415 I->eraseFromParent();
1416 return P;
1417 }
1418 if (!OffsetPtr) {
1419 OffsetPtr = P;
1420 }
1421 }
1422
1423 // Stash this pointer if we've found an i8*.
1424 if (Ptr->getType()->isIntegerTy(8)) {
1425 Int8Ptr = Ptr;
1426 Int8PtrOffset = Offset;
1427 }
1428
1429 // Peel off a layer of the pointer and update the offset appropriately.
1430 if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
1431 Ptr = cast<Operator>(Ptr)->getOperand(0);
1432 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
1433 if (GA->mayBeOverridden())
1434 break;
1435 Ptr = GA->getAliasee();
1436 } else {
1437 break;
1438 }
1439 assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!");
1440 } while (Visited.insert(Ptr));
1441
1442 if (!OffsetPtr) {
1443 if (!Int8Ptr) {
1444 Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(),
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001445 "raw_cast");
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001446 Int8PtrOffset = Offset;
1447 }
1448
1449 OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr :
1450 IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset),
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001451 "raw_idx");
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001452 }
1453 Ptr = OffsetPtr;
1454
1455 // On the off chance we were targeting i8*, guard the bitcast here.
1456 if (Ptr->getType() != PointerTy)
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001457 Ptr = IRB.CreateBitCast(Ptr, PointerTy, "cast");
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001458
1459 return Ptr;
1460}
1461
Chandler Carruthaa6afbb2012-10-15 08:40:22 +00001462/// \brief Test whether we can convert a value from the old to the new type.
1463///
1464/// This predicate should be used to guard calls to convertValue in order to
1465/// ensure that we only try to convert viable values. The strategy is that we
1466/// will peel off single element struct and array wrappings to get to an
1467/// underlying value, and convert that value.
1468static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) {
1469 if (OldTy == NewTy)
1470 return true;
Chandler Carrutha1c54bb2013-03-14 11:32:24 +00001471 if (IntegerType *OldITy = dyn_cast<IntegerType>(OldTy))
1472 if (IntegerType *NewITy = dyn_cast<IntegerType>(NewTy))
1473 if (NewITy->getBitWidth() >= OldITy->getBitWidth())
1474 return true;
Chandler Carruthaa6afbb2012-10-15 08:40:22 +00001475 if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy))
1476 return false;
1477 if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType())
1478 return false;
1479
Benjamin Kramer56262592013-09-22 11:24:58 +00001480 // We can convert pointers to integers and vice-versa. Same for vectors
Benjamin Kramer90901a32013-09-21 20:36:04 +00001481 // of pointers and integers.
1482 OldTy = OldTy->getScalarType();
1483 NewTy = NewTy->getScalarType();
Chandler Carruthaa6afbb2012-10-15 08:40:22 +00001484 if (NewTy->isPointerTy() || OldTy->isPointerTy()) {
1485 if (NewTy->isPointerTy() && OldTy->isPointerTy())
1486 return true;
1487 if (NewTy->isIntegerTy() || OldTy->isIntegerTy())
1488 return true;
1489 return false;
1490 }
1491
1492 return true;
1493}
1494
1495/// \brief Generic routine to convert an SSA value to a value of a different
1496/// type.
1497///
1498/// This will try various different casting techniques, such as bitcasts,
1499/// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test
1500/// two types for viability with this routine.
Chandler Carruthd177f862013-03-20 07:30:36 +00001501static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
Benjamin Kramer90901a32013-09-21 20:36:04 +00001502 Type *NewTy) {
1503 Type *OldTy = V->getType();
1504 assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type");
1505
1506 if (OldTy == NewTy)
Chandler Carruthaa6afbb2012-10-15 08:40:22 +00001507 return V;
Benjamin Kramer90901a32013-09-21 20:36:04 +00001508
1509 if (IntegerType *OldITy = dyn_cast<IntegerType>(OldTy))
1510 if (IntegerType *NewITy = dyn_cast<IntegerType>(NewTy))
Chandler Carrutha1c54bb2013-03-14 11:32:24 +00001511 if (NewITy->getBitWidth() > OldITy->getBitWidth())
1512 return IRB.CreateZExt(V, NewITy);
Chandler Carruthaa6afbb2012-10-15 08:40:22 +00001513
Benjamin Kramer90901a32013-09-21 20:36:04 +00001514 // See if we need inttoptr for this type pair. A cast involving both scalars
1515 // and vectors requires and additional bitcast.
1516 if (OldTy->getScalarType()->isIntegerTy() &&
1517 NewTy->getScalarType()->isPointerTy()) {
1518 // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8*
1519 if (OldTy->isVectorTy() && !NewTy->isVectorTy())
1520 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
1521 NewTy);
1522
1523 // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*>
1524 if (!OldTy->isVectorTy() && NewTy->isVectorTy())
1525 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
1526 NewTy);
1527
1528 return IRB.CreateIntToPtr(V, NewTy);
1529 }
1530
1531 // See if we need ptrtoint for this type pair. A cast involving both scalars
1532 // and vectors requires and additional bitcast.
1533 if (OldTy->getScalarType()->isPointerTy() &&
1534 NewTy->getScalarType()->isIntegerTy()) {
1535 // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128
1536 if (OldTy->isVectorTy() && !NewTy->isVectorTy())
1537 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
1538 NewTy);
1539
1540 // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32>
1541 if (!OldTy->isVectorTy() && NewTy->isVectorTy())
1542 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
1543 NewTy);
1544
1545 return IRB.CreatePtrToInt(V, NewTy);
1546 }
1547
1548 return IRB.CreateBitCast(V, NewTy);
Chandler Carruthaa6afbb2012-10-15 08:40:22 +00001549}
1550
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001551/// \brief Test whether the given slice use can be promoted to a vector.
Chandler Carruthf0546402013-07-18 07:15:00 +00001552///
1553/// This function is called to test each entry in a partioning which is slated
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001554/// for a single slice.
1555static bool isVectorPromotionViableForSlice(
1556 const DataLayout &DL, AllocaSlices &S, uint64_t SliceBeginOffset,
1557 uint64_t SliceEndOffset, VectorType *Ty, uint64_t ElementSize,
1558 AllocaSlices::const_iterator I) {
1559 // First validate the slice offsets.
Chandler Carruthf0546402013-07-18 07:15:00 +00001560 uint64_t BeginOffset =
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001561 std::max(I->beginOffset(), SliceBeginOffset) - SliceBeginOffset;
Chandler Carruthf0546402013-07-18 07:15:00 +00001562 uint64_t BeginIndex = BeginOffset / ElementSize;
1563 if (BeginIndex * ElementSize != BeginOffset ||
1564 BeginIndex >= Ty->getNumElements())
1565 return false;
1566 uint64_t EndOffset =
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001567 std::min(I->endOffset(), SliceEndOffset) - SliceBeginOffset;
Chandler Carruthf0546402013-07-18 07:15:00 +00001568 uint64_t EndIndex = EndOffset / ElementSize;
1569 if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements())
1570 return false;
1571
1572 assert(EndIndex > BeginIndex && "Empty vector!");
1573 uint64_t NumElements = EndIndex - BeginIndex;
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001574 Type *SliceTy =
Chandler Carruthf0546402013-07-18 07:15:00 +00001575 (NumElements == 1) ? Ty->getElementType()
1576 : VectorType::get(Ty->getElementType(), NumElements);
1577
1578 Type *SplitIntTy =
1579 Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8);
1580
1581 Use *U = I->getUse();
1582
1583 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
1584 if (MI->isVolatile())
1585 return false;
1586 if (!I->isSplittable())
1587 return false; // Skip any unsplittable intrinsics.
1588 } else if (U->get()->getType()->getPointerElementType()->isStructTy()) {
1589 // Disable vector promotion when there are loads or stores of an FCA.
1590 return false;
1591 } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1592 if (LI->isVolatile())
1593 return false;
1594 Type *LTy = LI->getType();
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001595 if (SliceBeginOffset > I->beginOffset() ||
1596 SliceEndOffset < I->endOffset()) {
Chandler Carruthf0546402013-07-18 07:15:00 +00001597 assert(LTy->isIntegerTy());
1598 LTy = SplitIntTy;
1599 }
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001600 if (!canConvertValue(DL, SliceTy, LTy))
Chandler Carruthf0546402013-07-18 07:15:00 +00001601 return false;
1602 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
1603 if (SI->isVolatile())
1604 return false;
1605 Type *STy = SI->getValueOperand()->getType();
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001606 if (SliceBeginOffset > I->beginOffset() ||
1607 SliceEndOffset < I->endOffset()) {
Chandler Carruthf0546402013-07-18 07:15:00 +00001608 assert(STy->isIntegerTy());
1609 STy = SplitIntTy;
1610 }
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001611 if (!canConvertValue(DL, STy, SliceTy))
Chandler Carruthf0546402013-07-18 07:15:00 +00001612 return false;
Chandler Carruth1ed848d2013-07-19 10:57:32 +00001613 } else {
1614 return false;
Chandler Carruthf0546402013-07-18 07:15:00 +00001615 }
1616
1617 return true;
1618}
1619
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001620/// \brief Test whether the given alloca partitioning and range of slices can be
1621/// promoted to a vector.
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001622///
1623/// This is a quick test to check whether we can rewrite a particular alloca
1624/// partition (and its newly formed alloca) into a vector alloca with only
1625/// whole-vector loads and stores such that it could be promoted to a vector
1626/// SSA value. We only can ensure this for a limited set of operations, and we
1627/// don't want to do the rewrites unless we are confident that the result will
1628/// be promotable, so we have an early test here.
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001629static bool
1630isVectorPromotionViable(const DataLayout &DL, Type *AllocaTy, AllocaSlices &S,
1631 uint64_t SliceBeginOffset, uint64_t SliceEndOffset,
1632 AllocaSlices::const_iterator I,
1633 AllocaSlices::const_iterator E,
1634 ArrayRef<AllocaSlices::iterator> SplitUses) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001635 VectorType *Ty = dyn_cast<VectorType>(AllocaTy);
1636 if (!Ty)
1637 return false;
1638
Chandler Carruth90a735d2013-07-19 07:21:28 +00001639 uint64_t ElementSize = DL.getTypeSizeInBits(Ty->getScalarType());
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001640
1641 // While the definition of LLVM vectors is bitpacked, we don't support sizes
1642 // that aren't byte sized.
1643 if (ElementSize % 8)
1644 return false;
Chandler Carruth90a735d2013-07-19 07:21:28 +00001645 assert((DL.getTypeSizeInBits(Ty) % 8) == 0 &&
Benjamin Kramerc003a452013-01-01 16:13:35 +00001646 "vector size not a multiple of element size?");
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001647 ElementSize /= 8;
1648
Chandler Carruthf0546402013-07-18 07:15:00 +00001649 for (; I != E; ++I)
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001650 if (!isVectorPromotionViableForSlice(DL, S, SliceBeginOffset,
1651 SliceEndOffset, Ty, ElementSize, I))
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001652 return false;
1653
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001654 for (ArrayRef<AllocaSlices::iterator>::const_iterator SUI = SplitUses.begin(),
1655 SUE = SplitUses.end();
Chandler Carruthf0546402013-07-18 07:15:00 +00001656 SUI != SUE; ++SUI)
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001657 if (!isVectorPromotionViableForSlice(DL, S, SliceBeginOffset,
1658 SliceEndOffset, Ty, ElementSize, *SUI))
Chandler Carruthe3899f22013-07-15 17:36:21 +00001659 return false;
Chandler Carruthf0546402013-07-18 07:15:00 +00001660
1661 return true;
1662}
1663
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001664/// \brief Test whether a slice of an alloca is valid for integer widening.
Chandler Carruthf0546402013-07-18 07:15:00 +00001665///
1666/// This implements the necessary checking for the \c isIntegerWideningViable
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001667/// test below on a single slice of the alloca.
1668static bool isIntegerWideningViableForSlice(const DataLayout &DL,
1669 Type *AllocaTy,
1670 uint64_t AllocBeginOffset,
1671 uint64_t Size, AllocaSlices &S,
1672 AllocaSlices::const_iterator I,
1673 bool &WholeAllocaOp) {
Chandler Carruthf0546402013-07-18 07:15:00 +00001674 uint64_t RelBegin = I->beginOffset() - AllocBeginOffset;
1675 uint64_t RelEnd = I->endOffset() - AllocBeginOffset;
1676
1677 // We can't reasonably handle cases where the load or store extends past
1678 // the end of the aloca's type and into its padding.
1679 if (RelEnd > Size)
1680 return false;
1681
1682 Use *U = I->getUse();
1683
1684 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1685 if (LI->isVolatile())
1686 return false;
1687 if (RelBegin == 0 && RelEnd == Size)
1688 WholeAllocaOp = true;
1689 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
Chandler Carruth90a735d2013-07-19 07:21:28 +00001690 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
Chandler Carruthe3899f22013-07-15 17:36:21 +00001691 return false;
Chandler Carruthf0546402013-07-18 07:15:00 +00001692 } else if (RelBegin != 0 || RelEnd != Size ||
Chandler Carruth90a735d2013-07-19 07:21:28 +00001693 !canConvertValue(DL, AllocaTy, LI->getType())) {
Chandler Carruthf0546402013-07-18 07:15:00 +00001694 // Non-integer loads need to be convertible from the alloca type so that
1695 // they are promotable.
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001696 return false;
1697 }
Chandler Carruthf0546402013-07-18 07:15:00 +00001698 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
1699 Type *ValueTy = SI->getValueOperand()->getType();
1700 if (SI->isVolatile())
1701 return false;
1702 if (RelBegin == 0 && RelEnd == Size)
1703 WholeAllocaOp = true;
1704 if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) {
Chandler Carruth90a735d2013-07-19 07:21:28 +00001705 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
Chandler Carruthf0546402013-07-18 07:15:00 +00001706 return false;
1707 } else if (RelBegin != 0 || RelEnd != Size ||
Chandler Carruth90a735d2013-07-19 07:21:28 +00001708 !canConvertValue(DL, ValueTy, AllocaTy)) {
Chandler Carruthf0546402013-07-18 07:15:00 +00001709 // Non-integer stores need to be convertible to the alloca type so that
1710 // they are promotable.
1711 return false;
1712 }
1713 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
1714 if (MI->isVolatile() || !isa<Constant>(MI->getLength()))
1715 return false;
1716 if (!I->isSplittable())
1717 return false; // Skip any unsplittable intrinsics.
1718 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
1719 if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
1720 II->getIntrinsicID() != Intrinsic::lifetime_end)
1721 return false;
1722 } else {
1723 return false;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001724 }
Chandler Carruthf0546402013-07-18 07:15:00 +00001725
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001726 return true;
1727}
1728
Chandler Carruth435c4e02012-10-15 08:40:30 +00001729/// \brief Test whether the given alloca partition's integer operations can be
1730/// widened to promotable ones.
Chandler Carruth92924fd2012-09-24 00:34:20 +00001731///
Chandler Carruth435c4e02012-10-15 08:40:30 +00001732/// This is a quick test to check whether we can rewrite the integer loads and
1733/// stores to a particular alloca into wider loads and stores and be able to
1734/// promote the resulting alloca.
Chandler Carruthf0546402013-07-18 07:15:00 +00001735static bool
Chandler Carruth90a735d2013-07-19 07:21:28 +00001736isIntegerWideningViable(const DataLayout &DL, Type *AllocaTy,
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001737 uint64_t AllocBeginOffset, AllocaSlices &S,
1738 AllocaSlices::const_iterator I,
1739 AllocaSlices::const_iterator E,
1740 ArrayRef<AllocaSlices::iterator> SplitUses) {
Chandler Carruth90a735d2013-07-19 07:21:28 +00001741 uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy);
Benjamin Kramer47534c72012-12-01 11:53:32 +00001742 // Don't create integer types larger than the maximum bitwidth.
1743 if (SizeInBits > IntegerType::MAX_INT_BITS)
1744 return false;
Chandler Carruth435c4e02012-10-15 08:40:30 +00001745
1746 // Don't try to handle allocas with bit-padding.
Chandler Carruth90a735d2013-07-19 07:21:28 +00001747 if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy))
Chandler Carruth92924fd2012-09-24 00:34:20 +00001748 return false;
1749
Chandler Carruth58d05562012-10-25 04:37:07 +00001750 // We need to ensure that an integer type with the appropriate bitwidth can
1751 // be converted to the alloca type, whatever that is. We don't want to force
1752 // the alloca itself to have an integer type if there is a more suitable one.
1753 Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits);
Chandler Carruth90a735d2013-07-19 07:21:28 +00001754 if (!canConvertValue(DL, AllocaTy, IntTy) ||
1755 !canConvertValue(DL, IntTy, AllocaTy))
Chandler Carruth58d05562012-10-25 04:37:07 +00001756 return false;
1757
Chandler Carruth90a735d2013-07-19 07:21:28 +00001758 uint64_t Size = DL.getTypeStoreSize(AllocaTy);
Chandler Carruth435c4e02012-10-15 08:40:30 +00001759
Chandler Carruthf0546402013-07-18 07:15:00 +00001760 // While examining uses, we ensure that the alloca has a covering load or
1761 // store. We don't want to widen the integer operations only to fail to
1762 // promote due to some other unsplittable entry (which we may make splittable
Chandler Carruth5955c9e2013-07-19 07:12:23 +00001763 // later). However, if there are only splittable uses, go ahead and assume
1764 // that we cover the alloca.
Chandler Carruth90a735d2013-07-19 07:21:28 +00001765 bool WholeAllocaOp = (I != E) ? false : DL.isLegalInteger(SizeInBits);
Chandler Carruth43c8b462012-10-04 10:39:28 +00001766
Chandler Carruthf0546402013-07-18 07:15:00 +00001767 for (; I != E; ++I)
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001768 if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size,
1769 S, I, WholeAllocaOp))
Chandler Carruth43c8b462012-10-04 10:39:28 +00001770 return false;
1771
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001772 for (ArrayRef<AllocaSlices::iterator>::const_iterator SUI = SplitUses.begin(),
1773 SUE = SplitUses.end();
Chandler Carruthf0546402013-07-18 07:15:00 +00001774 SUI != SUE; ++SUI)
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001775 if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size,
1776 S, *SUI, WholeAllocaOp))
Chandler Carruth92924fd2012-09-24 00:34:20 +00001777 return false;
Chandler Carruthf0546402013-07-18 07:15:00 +00001778
Chandler Carruth92924fd2012-09-24 00:34:20 +00001779 return WholeAllocaOp;
1780}
1781
Chandler Carruthd177f862013-03-20 07:30:36 +00001782static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
Chandler Carruth59ff93af2012-10-18 09:56:08 +00001783 IntegerType *Ty, uint64_t Offset,
1784 const Twine &Name) {
Chandler Carruth18db7952012-11-20 01:12:50 +00001785 DEBUG(dbgs() << " start: " << *V << "\n");
Chandler Carruth59ff93af2012-10-18 09:56:08 +00001786 IntegerType *IntTy = cast<IntegerType>(V->getType());
1787 assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
1788 "Element extends past full value");
1789 uint64_t ShAmt = 8*Offset;
1790 if (DL.isBigEndian())
1791 ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
Chandler Carruth18db7952012-11-20 01:12:50 +00001792 if (ShAmt) {
Chandler Carruth59ff93af2012-10-18 09:56:08 +00001793 V = IRB.CreateLShr(V, ShAmt, Name + ".shift");
Chandler Carruth18db7952012-11-20 01:12:50 +00001794 DEBUG(dbgs() << " shifted: " << *V << "\n");
1795 }
Chandler Carruth59ff93af2012-10-18 09:56:08 +00001796 assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
1797 "Cannot extract to a larger integer!");
Chandler Carruth18db7952012-11-20 01:12:50 +00001798 if (Ty != IntTy) {
Chandler Carruth59ff93af2012-10-18 09:56:08 +00001799 V = IRB.CreateTrunc(V, Ty, Name + ".trunc");
Chandler Carruth18db7952012-11-20 01:12:50 +00001800 DEBUG(dbgs() << " trunced: " << *V << "\n");
1801 }
Chandler Carruth59ff93af2012-10-18 09:56:08 +00001802 return V;
1803}
1804
Chandler Carruthd177f862013-03-20 07:30:36 +00001805static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old,
Chandler Carruth59ff93af2012-10-18 09:56:08 +00001806 Value *V, uint64_t Offset, const Twine &Name) {
1807 IntegerType *IntTy = cast<IntegerType>(Old->getType());
1808 IntegerType *Ty = cast<IntegerType>(V->getType());
1809 assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
1810 "Cannot insert a larger integer!");
Chandler Carruth18db7952012-11-20 01:12:50 +00001811 DEBUG(dbgs() << " start: " << *V << "\n");
1812 if (Ty != IntTy) {
Chandler Carruth59ff93af2012-10-18 09:56:08 +00001813 V = IRB.CreateZExt(V, IntTy, Name + ".ext");
Chandler Carruth18db7952012-11-20 01:12:50 +00001814 DEBUG(dbgs() << " extended: " << *V << "\n");
1815 }
Chandler Carruth59ff93af2012-10-18 09:56:08 +00001816 assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
1817 "Element store outside of alloca store");
1818 uint64_t ShAmt = 8*Offset;
1819 if (DL.isBigEndian())
1820 ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
Chandler Carruth18db7952012-11-20 01:12:50 +00001821 if (ShAmt) {
Chandler Carruth59ff93af2012-10-18 09:56:08 +00001822 V = IRB.CreateShl(V, ShAmt, Name + ".shift");
Chandler Carruth18db7952012-11-20 01:12:50 +00001823 DEBUG(dbgs() << " shifted: " << *V << "\n");
1824 }
Chandler Carruth59ff93af2012-10-18 09:56:08 +00001825
1826 if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) {
1827 APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt);
1828 Old = IRB.CreateAnd(Old, Mask, Name + ".mask");
Chandler Carruth18db7952012-11-20 01:12:50 +00001829 DEBUG(dbgs() << " masked: " << *Old << "\n");
Chandler Carruth59ff93af2012-10-18 09:56:08 +00001830 V = IRB.CreateOr(Old, V, Name + ".insert");
Chandler Carruth18db7952012-11-20 01:12:50 +00001831 DEBUG(dbgs() << " inserted: " << *V << "\n");
Chandler Carruth59ff93af2012-10-18 09:56:08 +00001832 }
1833 return V;
1834}
1835
Chandler Carruthd177f862013-03-20 07:30:36 +00001836static Value *extractVector(IRBuilderTy &IRB, Value *V,
Chandler Carruthb6bc8742012-12-17 13:07:30 +00001837 unsigned BeginIndex, unsigned EndIndex,
1838 const Twine &Name) {
1839 VectorType *VecTy = cast<VectorType>(V->getType());
1840 unsigned NumElements = EndIndex - BeginIndex;
1841 assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
1842
1843 if (NumElements == VecTy->getNumElements())
1844 return V;
1845
1846 if (NumElements == 1) {
1847 V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex),
1848 Name + ".extract");
1849 DEBUG(dbgs() << " extract: " << *V << "\n");
1850 return V;
1851 }
1852
1853 SmallVector<Constant*, 8> Mask;
1854 Mask.reserve(NumElements);
1855 for (unsigned i = BeginIndex; i != EndIndex; ++i)
1856 Mask.push_back(IRB.getInt32(i));
1857 V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
1858 ConstantVector::get(Mask),
1859 Name + ".extract");
1860 DEBUG(dbgs() << " shuffle: " << *V << "\n");
1861 return V;
1862}
1863
Chandler Carruthd177f862013-03-20 07:30:36 +00001864static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V,
Chandler Carruthce4562b2012-12-17 13:41:21 +00001865 unsigned BeginIndex, const Twine &Name) {
1866 VectorType *VecTy = cast<VectorType>(Old->getType());
1867 assert(VecTy && "Can only insert a vector into a vector");
1868
1869 VectorType *Ty = dyn_cast<VectorType>(V->getType());
1870 if (!Ty) {
1871 // Single element to insert.
1872 V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex),
1873 Name + ".insert");
1874 DEBUG(dbgs() << " insert: " << *V << "\n");
1875 return V;
1876 }
1877
1878 assert(Ty->getNumElements() <= VecTy->getNumElements() &&
1879 "Too many elements!");
1880 if (Ty->getNumElements() == VecTy->getNumElements()) {
1881 assert(V->getType() == VecTy && "Vector type mismatch");
1882 return V;
1883 }
1884 unsigned EndIndex = BeginIndex + Ty->getNumElements();
1885
1886 // When inserting a smaller vector into the larger to store, we first
1887 // use a shuffle vector to widen it with undef elements, and then
1888 // a second shuffle vector to select between the loaded vector and the
1889 // incoming vector.
1890 SmallVector<Constant*, 8> Mask;
1891 Mask.reserve(VecTy->getNumElements());
1892 for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
1893 if (i >= BeginIndex && i < EndIndex)
1894 Mask.push_back(IRB.getInt32(i - BeginIndex));
1895 else
1896 Mask.push_back(UndefValue::get(IRB.getInt32Ty()));
1897 V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
1898 ConstantVector::get(Mask),
1899 Name + ".expand");
Nadav Rotem1e211912013-05-01 19:53:30 +00001900 DEBUG(dbgs() << " shuffle: " << *V << "\n");
Chandler Carruthce4562b2012-12-17 13:41:21 +00001901
1902 Mask.clear();
1903 for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
Nadav Rotem1e211912013-05-01 19:53:30 +00001904 Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex));
1905
1906 V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend");
1907
1908 DEBUG(dbgs() << " blend: " << *V << "\n");
Chandler Carruthce4562b2012-12-17 13:41:21 +00001909 return V;
1910}
1911
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001912namespace {
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001913/// \brief Visitor to rewrite instructions using p particular slice of an alloca
1914/// to use a new alloca.
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001915///
1916/// Also implements the rewriting to vector-based accesses when the partition
1917/// passes the isVectorPromotionViable predicate. Most of the rewriting logic
1918/// lives here.
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001919class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001920 // Befriend the base class so it can delegate to private visit methods.
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001921 friend class llvm::InstVisitor<AllocaSliceRewriter, bool>;
1922 typedef llvm::InstVisitor<AllocaSliceRewriter, bool> Base;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001923
Chandler Carruth90a735d2013-07-19 07:21:28 +00001924 const DataLayout &DL;
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001925 AllocaSlices &S;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001926 SROA &Pass;
1927 AllocaInst &OldAI, &NewAI;
1928 const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset;
Chandler Carruth891fec02012-10-13 02:41:05 +00001929 Type *NewAllocaTy;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001930
1931 // If we are rewriting an alloca partition which can be written as pure
1932 // vector operations, we stash extra information here. When VecTy is
Jakub Staszak086f6cd2013-02-19 22:02:21 +00001933 // non-null, we have some strict guarantees about the rewritten alloca:
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001934 // - The new alloca is exactly the size of the vector type here.
1935 // - The accesses all either map to the entire vector or to a single
1936 // element.
1937 // - The set of accessing instructions is only one of those handled above
1938 // in isVectorPromotionViable. Generally these are the same access kinds
1939 // which are promotable via mem2reg.
1940 VectorType *VecTy;
1941 Type *ElementTy;
1942 uint64_t ElementSize;
1943
Chandler Carruth92924fd2012-09-24 00:34:20 +00001944 // This is a convenience and flag variable that will be null unless the new
Chandler Carruth435c4e02012-10-15 08:40:30 +00001945 // alloca's integer operations should be widened to this integer type due to
1946 // passing isIntegerWideningViable above. If it is non-null, the desired
Chandler Carruth92924fd2012-09-24 00:34:20 +00001947 // integer type will be stored here for easy access during rewriting.
Chandler Carruth435c4e02012-10-15 08:40:30 +00001948 IntegerType *IntTy;
Chandler Carruth92924fd2012-09-24 00:34:20 +00001949
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001950 // The offset of the slice currently being rewritten.
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001951 uint64_t BeginOffset, EndOffset;
Chandler Carruthf0546402013-07-18 07:15:00 +00001952 bool IsSplittable;
Chandler Carrutha1c54bb2013-03-14 11:32:24 +00001953 bool IsSplit;
Chandler Carruth54e8f0b2012-10-01 01:49:22 +00001954 Use *OldUse;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001955 Instruction *OldPtr;
1956
Chandler Carruth83ea1952013-07-24 09:47:28 +00001957 // Output members carrying state about the result of visiting and rewriting
1958 // the slice of the alloca.
1959 bool IsUsedByRewrittenSpeculatableInstructions;
1960
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00001961 // Utility IR builder, whose name prefix is setup for each visited use, and
1962 // the insertion point is set to point to the user.
1963 IRBuilderTy IRB;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001964
1965public:
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001966 AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &S, SROA &Pass,
1967 AllocaInst &OldAI, AllocaInst &NewAI,
1968 uint64_t NewBeginOffset, uint64_t NewEndOffset,
1969 bool IsVectorPromotable = false,
1970 bool IsIntegerPromotable = false)
1971 : DL(DL), S(S), Pass(Pass), OldAI(OldAI), NewAI(NewAI),
Chandler Carruthf0546402013-07-18 07:15:00 +00001972 NewAllocaBeginOffset(NewBeginOffset), NewAllocaEndOffset(NewEndOffset),
1973 NewAllocaTy(NewAI.getAllocatedType()),
1974 VecTy(IsVectorPromotable ? cast<VectorType>(NewAllocaTy) : 0),
1975 ElementTy(VecTy ? VecTy->getElementType() : 0),
Chandler Carruth90a735d2013-07-19 07:21:28 +00001976 ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0),
Chandler Carruthf0546402013-07-18 07:15:00 +00001977 IntTy(IsIntegerPromotable
1978 ? Type::getIntNTy(
1979 NewAI.getContext(),
Chandler Carruth90a735d2013-07-19 07:21:28 +00001980 DL.getTypeSizeInBits(NewAI.getAllocatedType()))
Chandler Carruthf0546402013-07-18 07:15:00 +00001981 : 0),
1982 BeginOffset(), EndOffset(), IsSplittable(), IsSplit(), OldUse(),
Chandler Carruth83ea1952013-07-24 09:47:28 +00001983 OldPtr(), IsUsedByRewrittenSpeculatableInstructions(false),
1984 IRB(NewAI.getContext(), ConstantFolder()) {
Chandler Carruthf0546402013-07-18 07:15:00 +00001985 if (VecTy) {
Chandler Carruth90a735d2013-07-19 07:21:28 +00001986 assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 &&
Chandler Carruthf0546402013-07-18 07:15:00 +00001987 "Only multiple-of-8 sized vector elements are viable");
1988 ++NumVectorized;
1989 }
1990 assert((!IsVectorPromotable && !IsIntegerPromotable) ||
1991 IsVectorPromotable != IsIntegerPromotable);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001992 }
1993
Chandler Carruth9f21fe12013-07-19 09:13:58 +00001994 bool visit(AllocaSlices::const_iterator I) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00001995 bool CanSROA = true;
Chandler Carruthf0546402013-07-18 07:15:00 +00001996 BeginOffset = I->beginOffset();
1997 EndOffset = I->endOffset();
1998 IsSplittable = I->isSplittable();
1999 IsSplit =
2000 BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset;
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002001
Chandler Carruthf0546402013-07-18 07:15:00 +00002002 OldUse = I->getUse();
2003 OldPtr = cast<Instruction>(OldUse->get());
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002004
Chandler Carruthf0546402013-07-18 07:15:00 +00002005 Instruction *OldUserI = cast<Instruction>(OldUse->getUser());
2006 IRB.SetInsertPoint(OldUserI);
2007 IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc());
2008 IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + ".");
2009
2010 CanSROA &= visit(cast<Instruction>(OldUse->getUser()));
2011 if (VecTy || IntTy)
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002012 assert(CanSROA);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002013 return CanSROA;
2014 }
2015
Chandler Carruth83ea1952013-07-24 09:47:28 +00002016 /// \brief Query whether this slice is used by speculatable instructions after
2017 /// rewriting.
2018 ///
2019 /// These instructions (PHIs and Selects currently) require the alloca slice
2020 /// to run back through the rewriter. Thus, they are promotable, but not on
2021 /// this iteration. This is distinct from a slice which is unpromotable for
2022 /// some other reason, in which case we don't even want to perform the
2023 /// speculation. This can be querried at any time and reflects whether (at
2024 /// that point) a visit call has rewritten a speculatable instruction on the
2025 /// current slice.
2026 bool isUsedByRewrittenSpeculatableInstructions() const {
2027 return IsUsedByRewrittenSpeculatableInstructions;
2028 }
2029
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002030private:
Chandler Carruthf0546402013-07-18 07:15:00 +00002031 // Make sure the other visit overloads are visible.
2032 using Base::visit;
2033
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002034 // Every instruction which can end up as a user must have a rewrite rule.
2035 bool visitInstruction(Instruction &I) {
2036 DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n");
2037 llvm_unreachable("No rewrite rule for this instruction!");
2038 }
2039
Chandler Carruthf0546402013-07-18 07:15:00 +00002040 Value *getAdjustedAllocaPtr(IRBuilderTy &IRB, uint64_t Offset,
2041 Type *PointerTy) {
2042 assert(Offset >= NewAllocaBeginOffset);
Chandler Carruth90a735d2013-07-19 07:21:28 +00002043 return getAdjustedPtr(IRB, DL, &NewAI, APInt(DL.getPointerSizeInBits(),
Chandler Carruthf0546402013-07-18 07:15:00 +00002044 Offset - NewAllocaBeginOffset),
2045 PointerTy);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002046 }
2047
Chandler Carruth4b2b38d2012-10-03 08:14:02 +00002048 /// \brief Compute suitable alignment to access an offset into the new alloca.
2049 unsigned getOffsetAlign(uint64_t Offset) {
Chandler Carruth176ca712012-10-01 12:16:54 +00002050 unsigned NewAIAlign = NewAI.getAlignment();
2051 if (!NewAIAlign)
Chandler Carruth90a735d2013-07-19 07:21:28 +00002052 NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType());
Chandler Carruth176ca712012-10-01 12:16:54 +00002053 return MinAlign(NewAIAlign, Offset);
2054 }
Chandler Carruth4b2b38d2012-10-03 08:14:02 +00002055
Chandler Carruth4b2b38d2012-10-03 08:14:02 +00002056 /// \brief Compute suitable alignment to access a type at an offset of the
2057 /// new alloca.
2058 ///
2059 /// \returns zero if the type's ABI alignment is a suitable alignment,
2060 /// otherwise returns the maximal suitable alignment.
2061 unsigned getOffsetTypeAlign(Type *Ty, uint64_t Offset) {
2062 unsigned Align = getOffsetAlign(Offset);
Chandler Carruth90a735d2013-07-19 07:21:28 +00002063 return Align == DL.getABITypeAlignment(Ty) ? 0 : Align;
Chandler Carruth4b2b38d2012-10-03 08:14:02 +00002064 }
2065
Chandler Carruth845b73c2012-11-21 08:16:30 +00002066 unsigned getIndex(uint64_t Offset) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002067 assert(VecTy && "Can only call getIndex when rewriting a vector");
2068 uint64_t RelOffset = Offset - NewAllocaBeginOffset;
2069 assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds");
2070 uint32_t Index = RelOffset / ElementSize;
2071 assert(Index * ElementSize == RelOffset);
Chandler Carruth845b73c2012-11-21 08:16:30 +00002072 return Index;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002073 }
2074
2075 void deleteIfTriviallyDead(Value *V) {
2076 Instruction *I = cast<Instruction>(V);
2077 if (isInstructionTriviallyDead(I))
Chandler Carruth18db7952012-11-20 01:12:50 +00002078 Pass.DeadInsts.insert(I);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002079 }
2080
Chandler Carruthf0546402013-07-18 07:15:00 +00002081 Value *rewriteVectorizedLoadInst(uint64_t NewBeginOffset,
2082 uint64_t NewEndOffset) {
2083 unsigned BeginIndex = getIndex(NewBeginOffset);
2084 unsigned EndIndex = getIndex(NewEndOffset);
Chandler Carruth769445e2012-12-17 12:50:21 +00002085 assert(EndIndex > BeginIndex && "Empty vector!");
Chandler Carruthb6bc8742012-12-17 13:07:30 +00002086
2087 Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002088 "load");
2089 return extractVector(IRB, V, BeginIndex, EndIndex, "vec");
Chandler Carruth769445e2012-12-17 12:50:21 +00002090 }
2091
Chandler Carruthf0546402013-07-18 07:15:00 +00002092 Value *rewriteIntegerLoad(LoadInst &LI, uint64_t NewBeginOffset,
2093 uint64_t NewEndOffset) {
Chandler Carruth59ff93af2012-10-18 09:56:08 +00002094 assert(IntTy && "We cannot insert an integer to the alloca");
Chandler Carruth92924fd2012-09-24 00:34:20 +00002095 assert(!LI.isVolatile());
Chandler Carruth59ff93af2012-10-18 09:56:08 +00002096 Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002097 "load");
Chandler Carruth90a735d2013-07-19 07:21:28 +00002098 V = convertValue(DL, IRB, V, IntTy);
Chandler Carruthf0546402013-07-18 07:15:00 +00002099 assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
2100 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2101 if (Offset > 0 || NewEndOffset < NewAllocaEndOffset)
Chandler Carruth90a735d2013-07-19 07:21:28 +00002102 V = extractInteger(DL, IRB, V, cast<IntegerType>(LI.getType()), Offset,
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002103 "extract");
Chandler Carruth18db7952012-11-20 01:12:50 +00002104 return V;
Chandler Carruth92924fd2012-09-24 00:34:20 +00002105 }
2106
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002107 bool visitLoadInst(LoadInst &LI) {
2108 DEBUG(dbgs() << " original: " << LI << "\n");
2109 Value *OldOp = LI.getOperand(0);
2110 assert(OldOp == OldPtr);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002111
Chandler Carruthf0546402013-07-18 07:15:00 +00002112 // Compute the intersecting offset range.
2113 assert(BeginOffset < NewAllocaEndOffset);
2114 assert(EndOffset > NewAllocaBeginOffset);
2115 uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
2116 uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
2117
2118 uint64_t Size = NewEndOffset - NewBeginOffset;
Chandler Carruth3e994a22012-11-20 10:02:19 +00002119
Chandler Carrutha1c54bb2013-03-14 11:32:24 +00002120 Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), Size * 8)
2121 : LI.getType();
Chandler Carruth18db7952012-11-20 01:12:50 +00002122 bool IsPtrAdjusted = false;
2123 Value *V;
2124 if (VecTy) {
Chandler Carruthf0546402013-07-18 07:15:00 +00002125 V = rewriteVectorizedLoadInst(NewBeginOffset, NewEndOffset);
Chandler Carruth18db7952012-11-20 01:12:50 +00002126 } else if (IntTy && LI.getType()->isIntegerTy()) {
Chandler Carruthf0546402013-07-18 07:15:00 +00002127 V = rewriteIntegerLoad(LI, NewBeginOffset, NewEndOffset);
2128 } else if (NewBeginOffset == NewAllocaBeginOffset &&
Chandler Carruth90a735d2013-07-19 07:21:28 +00002129 canConvertValue(DL, NewAllocaTy, LI.getType())) {
Chandler Carruth18db7952012-11-20 01:12:50 +00002130 V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002131 LI.isVolatile(), "load");
Chandler Carruth18db7952012-11-20 01:12:50 +00002132 } else {
2133 Type *LTy = TargetTy->getPointerTo();
Chandler Carruthf0546402013-07-18 07:15:00 +00002134 V = IRB.CreateAlignedLoad(
2135 getAdjustedAllocaPtr(IRB, NewBeginOffset, LTy),
2136 getOffsetTypeAlign(TargetTy, NewBeginOffset - NewAllocaBeginOffset),
2137 LI.isVolatile(), "load");
Chandler Carruth18db7952012-11-20 01:12:50 +00002138 IsPtrAdjusted = true;
2139 }
Chandler Carruth90a735d2013-07-19 07:21:28 +00002140 V = convertValue(DL, IRB, V, TargetTy);
Chandler Carruth18db7952012-11-20 01:12:50 +00002141
Chandler Carrutha1c54bb2013-03-14 11:32:24 +00002142 if (IsSplit) {
Chandler Carruth58d05562012-10-25 04:37:07 +00002143 assert(!LI.isVolatile());
2144 assert(LI.getType()->isIntegerTy() &&
2145 "Only integer type loads and stores are split");
Chandler Carruth90a735d2013-07-19 07:21:28 +00002146 assert(Size < DL.getTypeStoreSize(LI.getType()) &&
Chandler Carrutha1c54bb2013-03-14 11:32:24 +00002147 "Split load isn't smaller than original load");
Chandler Carruth58d05562012-10-25 04:37:07 +00002148 assert(LI.getType()->getIntegerBitWidth() ==
Chandler Carruth90a735d2013-07-19 07:21:28 +00002149 DL.getTypeStoreSizeInBits(LI.getType()) &&
Chandler Carruth58d05562012-10-25 04:37:07 +00002150 "Non-byte-multiple bit width");
Chandler Carruth58d05562012-10-25 04:37:07 +00002151 // Move the insertion point just past the load so that we can refer to it.
2152 IRB.SetInsertPoint(llvm::next(BasicBlock::iterator(&LI)));
Chandler Carruth58d05562012-10-25 04:37:07 +00002153 // Create a placeholder value with the same type as LI to use as the
2154 // basis for the new value. This allows us to replace the uses of LI with
2155 // the computed value, and then replace the placeholder with LI, leaving
2156 // LI only used for this computation.
2157 Value *Placeholder
Jakub Staszak4e45abf2012-11-01 01:10:43 +00002158 = new LoadInst(UndefValue::get(LI.getType()->getPointerTo()));
Chandler Carruth90a735d2013-07-19 07:21:28 +00002159 V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset,
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002160 "insert");
Chandler Carruth58d05562012-10-25 04:37:07 +00002161 LI.replaceAllUsesWith(V);
2162 Placeholder->replaceAllUsesWith(&LI);
Jakub Staszak4e45abf2012-11-01 01:10:43 +00002163 delete Placeholder;
Chandler Carruth18db7952012-11-20 01:12:50 +00002164 } else {
2165 LI.replaceAllUsesWith(V);
Chandler Carruth58d05562012-10-25 04:37:07 +00002166 }
2167
Chandler Carruth18db7952012-11-20 01:12:50 +00002168 Pass.DeadInsts.insert(&LI);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002169 deleteIfTriviallyDead(OldOp);
Chandler Carruth18db7952012-11-20 01:12:50 +00002170 DEBUG(dbgs() << " to: " << *V << "\n");
2171 return !LI.isVolatile() && !IsPtrAdjusted;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002172 }
2173
Chandler Carruthf0546402013-07-18 07:15:00 +00002174 bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp,
2175 uint64_t NewBeginOffset,
2176 uint64_t NewEndOffset) {
Bob Wilsonacfc01d2013-06-25 19:09:50 +00002177 if (V->getType() != VecTy) {
Chandler Carruthf0546402013-07-18 07:15:00 +00002178 unsigned BeginIndex = getIndex(NewBeginOffset);
2179 unsigned EndIndex = getIndex(NewEndOffset);
Bob Wilsonacfc01d2013-06-25 19:09:50 +00002180 assert(EndIndex > BeginIndex && "Empty vector!");
2181 unsigned NumElements = EndIndex - BeginIndex;
2182 assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
Chandler Carruth9f21fe12013-07-19 09:13:58 +00002183 Type *SliceTy =
2184 (NumElements == 1) ? ElementTy
2185 : VectorType::get(ElementTy, NumElements);
2186 if (V->getType() != SliceTy)
2187 V = convertValue(DL, IRB, V, SliceTy);
Chandler Carruth845b73c2012-11-21 08:16:30 +00002188
Bob Wilsonacfc01d2013-06-25 19:09:50 +00002189 // Mix in the existing elements.
2190 Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2191 "load");
2192 V = insertVector(IRB, Old, V, BeginIndex, "vec");
2193 }
Chandler Carruth871ba722012-09-26 10:27:46 +00002194 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
Chandler Carruth18db7952012-11-20 01:12:50 +00002195 Pass.DeadInsts.insert(&SI);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002196
2197 (void)Store;
2198 DEBUG(dbgs() << " to: " << *Store << "\n");
2199 return true;
2200 }
2201
Chandler Carruthf0546402013-07-18 07:15:00 +00002202 bool rewriteIntegerStore(Value *V, StoreInst &SI,
2203 uint64_t NewBeginOffset, uint64_t NewEndOffset) {
Chandler Carruth59ff93af2012-10-18 09:56:08 +00002204 assert(IntTy && "We cannot extract an integer from the alloca");
Chandler Carruth92924fd2012-09-24 00:34:20 +00002205 assert(!SI.isVolatile());
Chandler Carruth90a735d2013-07-19 07:21:28 +00002206 if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
Chandler Carruth59ff93af2012-10-18 09:56:08 +00002207 Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002208 "oldload");
Chandler Carruth90a735d2013-07-19 07:21:28 +00002209 Old = convertValue(DL, IRB, Old, IntTy);
Chandler Carruth59ff93af2012-10-18 09:56:08 +00002210 assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
2211 uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
Chandler Carruth90a735d2013-07-19 07:21:28 +00002212 V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset,
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002213 "insert");
Chandler Carruth59ff93af2012-10-18 09:56:08 +00002214 }
Chandler Carruth90a735d2013-07-19 07:21:28 +00002215 V = convertValue(DL, IRB, V, NewAllocaTy);
Chandler Carruth59ff93af2012-10-18 09:56:08 +00002216 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
Chandler Carruth18db7952012-11-20 01:12:50 +00002217 Pass.DeadInsts.insert(&SI);
Chandler Carruth92924fd2012-09-24 00:34:20 +00002218 (void)Store;
2219 DEBUG(dbgs() << " to: " << *Store << "\n");
2220 return true;
2221 }
2222
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002223 bool visitStoreInst(StoreInst &SI) {
2224 DEBUG(dbgs() << " original: " << SI << "\n");
2225 Value *OldOp = SI.getOperand(1);
2226 assert(OldOp == OldPtr);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002227
Chandler Carruth18db7952012-11-20 01:12:50 +00002228 Value *V = SI.getValueOperand();
Chandler Carruth891fec02012-10-13 02:41:05 +00002229
Chandler Carruthac8317f2012-10-04 12:33:50 +00002230 // Strip all inbounds GEPs and pointer casts to try to dig out any root
2231 // alloca that should be re-examined after promoting this alloca.
Chandler Carruth18db7952012-11-20 01:12:50 +00002232 if (V->getType()->isPointerTy())
2233 if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets()))
Chandler Carruthac8317f2012-10-04 12:33:50 +00002234 Pass.PostPromotionWorklist.insert(AI);
2235
Chandler Carruthf0546402013-07-18 07:15:00 +00002236 // Compute the intersecting offset range.
2237 assert(BeginOffset < NewAllocaEndOffset);
2238 assert(EndOffset > NewAllocaBeginOffset);
2239 uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
2240 uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
2241
2242 uint64_t Size = NewEndOffset - NewBeginOffset;
Chandler Carruth90a735d2013-07-19 07:21:28 +00002243 if (Size < DL.getTypeStoreSize(V->getType())) {
Chandler Carruth18db7952012-11-20 01:12:50 +00002244 assert(!SI.isVolatile());
2245 assert(V->getType()->isIntegerTy() &&
2246 "Only integer type loads and stores are split");
2247 assert(V->getType()->getIntegerBitWidth() ==
Chandler Carruth90a735d2013-07-19 07:21:28 +00002248 DL.getTypeStoreSizeInBits(V->getType()) &&
Chandler Carruth18db7952012-11-20 01:12:50 +00002249 "Non-byte-multiple bit width");
Chandler Carruth18db7952012-11-20 01:12:50 +00002250 IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), Size * 8);
Chandler Carruth90a735d2013-07-19 07:21:28 +00002251 V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset,
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002252 "extract");
Chandler Carruth891fec02012-10-13 02:41:05 +00002253 }
2254
Chandler Carruth18db7952012-11-20 01:12:50 +00002255 if (VecTy)
Chandler Carruthf0546402013-07-18 07:15:00 +00002256 return rewriteVectorizedStoreInst(V, SI, OldOp, NewBeginOffset,
2257 NewEndOffset);
Chandler Carruth18db7952012-11-20 01:12:50 +00002258 if (IntTy && V->getType()->isIntegerTy())
Chandler Carruthf0546402013-07-18 07:15:00 +00002259 return rewriteIntegerStore(V, SI, NewBeginOffset, NewEndOffset);
Chandler Carruth435c4e02012-10-15 08:40:30 +00002260
Chandler Carruth18db7952012-11-20 01:12:50 +00002261 StoreInst *NewSI;
Chandler Carruthf0546402013-07-18 07:15:00 +00002262 if (NewBeginOffset == NewAllocaBeginOffset &&
2263 NewEndOffset == NewAllocaEndOffset &&
Chandler Carruth90a735d2013-07-19 07:21:28 +00002264 canConvertValue(DL, V->getType(), NewAllocaTy)) {
2265 V = convertValue(DL, IRB, V, NewAllocaTy);
Chandler Carruth18db7952012-11-20 01:12:50 +00002266 NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
2267 SI.isVolatile());
2268 } else {
Chandler Carruthf0546402013-07-18 07:15:00 +00002269 Value *NewPtr = getAdjustedAllocaPtr(IRB, NewBeginOffset,
2270 V->getType()->getPointerTo());
2271 NewSI = IRB.CreateAlignedStore(
2272 V, NewPtr, getOffsetTypeAlign(
2273 V->getType(), NewBeginOffset - NewAllocaBeginOffset),
2274 SI.isVolatile());
Chandler Carruth18db7952012-11-20 01:12:50 +00002275 }
2276 (void)NewSI;
2277 Pass.DeadInsts.insert(&SI);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002278 deleteIfTriviallyDead(OldOp);
Chandler Carruth18db7952012-11-20 01:12:50 +00002279
2280 DEBUG(dbgs() << " to: " << *NewSI << "\n");
2281 return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile();
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002282 }
2283
Chandler Carruth514f34f2012-12-17 04:07:30 +00002284 /// \brief Compute an integer value from splatting an i8 across the given
2285 /// number of bytes.
2286 ///
2287 /// Note that this routine assumes an i8 is a byte. If that isn't true, don't
2288 /// call this routine.
Jakub Staszak086f6cd2013-02-19 22:02:21 +00002289 /// FIXME: Heed the advice above.
Chandler Carruth514f34f2012-12-17 04:07:30 +00002290 ///
2291 /// \param V The i8 value to splat.
2292 /// \param Size The number of bytes in the output (assuming i8 is one byte)
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002293 Value *getIntegerSplat(Value *V, unsigned Size) {
Chandler Carruth514f34f2012-12-17 04:07:30 +00002294 assert(Size > 0 && "Expected a positive number of bytes.");
2295 IntegerType *VTy = cast<IntegerType>(V->getType());
2296 assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte");
2297 if (Size == 1)
2298 return V;
2299
2300 Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size*8);
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002301 V = IRB.CreateMul(IRB.CreateZExt(V, SplatIntTy, "zext"),
Chandler Carruth514f34f2012-12-17 04:07:30 +00002302 ConstantExpr::getUDiv(
2303 Constant::getAllOnesValue(SplatIntTy),
2304 ConstantExpr::getZExt(
2305 Constant::getAllOnesValue(V->getType()),
2306 SplatIntTy)),
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002307 "isplat");
Chandler Carruth514f34f2012-12-17 04:07:30 +00002308 return V;
2309 }
2310
Chandler Carruthccca5042012-12-17 04:07:37 +00002311 /// \brief Compute a vector splat for a given element value.
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002312 Value *getVectorSplat(Value *V, unsigned NumElements) {
2313 V = IRB.CreateVectorSplat(NumElements, V, "vsplat");
Chandler Carruthccca5042012-12-17 04:07:37 +00002314 DEBUG(dbgs() << " splat: " << *V << "\n");
2315 return V;
2316 }
2317
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002318 bool visitMemSetInst(MemSetInst &II) {
2319 DEBUG(dbgs() << " original: " << II << "\n");
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002320 assert(II.getRawDest() == OldPtr);
2321
2322 // If the memset has a variable size, it cannot be split, just adjust the
2323 // pointer to the new alloca.
2324 if (!isa<Constant>(II.getLength())) {
Chandler Carruthf0546402013-07-18 07:15:00 +00002325 assert(!IsSplit);
2326 assert(BeginOffset >= NewAllocaBeginOffset);
2327 II.setDest(
2328 getAdjustedAllocaPtr(IRB, BeginOffset, II.getRawDest()->getType()));
Chandler Carruth208124f2012-09-26 10:59:22 +00002329 Type *CstTy = II.getAlignmentCst()->getType();
Chandler Carruthf0546402013-07-18 07:15:00 +00002330 II.setAlignment(ConstantInt::get(CstTy, getOffsetAlign(BeginOffset)));
Chandler Carruth208124f2012-09-26 10:59:22 +00002331
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002332 deleteIfTriviallyDead(OldPtr);
2333 return false;
2334 }
2335
2336 // Record this instruction for deletion.
Chandler Carruth18db7952012-11-20 01:12:50 +00002337 Pass.DeadInsts.insert(&II);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002338
2339 Type *AllocaTy = NewAI.getAllocatedType();
2340 Type *ScalarTy = AllocaTy->getScalarType();
2341
Chandler Carruthf0546402013-07-18 07:15:00 +00002342 // Compute the intersecting offset range.
2343 assert(BeginOffset < NewAllocaEndOffset);
2344 assert(EndOffset > NewAllocaBeginOffset);
2345 uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
2346 uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
Chandler Carruth9f21fe12013-07-19 09:13:58 +00002347 uint64_t SliceOffset = NewBeginOffset - NewAllocaBeginOffset;
Chandler Carruthf0546402013-07-18 07:15:00 +00002348
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002349 // If this doesn't map cleanly onto the alloca type, and that type isn't
2350 // a single value type, just emit a memset.
Chandler Carruth9d966a22012-10-15 10:24:40 +00002351 if (!VecTy && !IntTy &&
Chandler Carruthf0546402013-07-18 07:15:00 +00002352 (BeginOffset > NewAllocaBeginOffset ||
2353 EndOffset < NewAllocaEndOffset ||
Chandler Carruth9d966a22012-10-15 10:24:40 +00002354 !AllocaTy->isSingleValueType() ||
Chandler Carruth90a735d2013-07-19 07:21:28 +00002355 !DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy)) ||
2356 DL.getTypeSizeInBits(ScalarTy)%8 != 0)) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002357 Type *SizeTy = II.getLength()->getType();
Chandler Carruthf0546402013-07-18 07:15:00 +00002358 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
2359 CallInst *New = IRB.CreateMemSet(
2360 getAdjustedAllocaPtr(IRB, NewBeginOffset, II.getRawDest()->getType()),
Chandler Carruth9f21fe12013-07-19 09:13:58 +00002361 II.getValue(), Size, getOffsetAlign(SliceOffset), II.isVolatile());
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002362 (void)New;
2363 DEBUG(dbgs() << " to: " << *New << "\n");
2364 return false;
2365 }
2366
2367 // If we can represent this as a simple value, we have to build the actual
2368 // value to store, which requires expanding the byte present in memset to
2369 // a sensible representation for the alloca type. This is essentially
Chandler Carruthccca5042012-12-17 04:07:37 +00002370 // splatting the byte to a sufficiently wide integer, splatting it across
2371 // any desired vector width, and bitcasting to the final type.
Benjamin Kramerc003a452013-01-01 16:13:35 +00002372 Value *V;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002373
Chandler Carruthccca5042012-12-17 04:07:37 +00002374 if (VecTy) {
2375 // If this is a memset of a vectorized alloca, insert it.
2376 assert(ElementTy == ScalarTy);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002377
Chandler Carruthf0546402013-07-18 07:15:00 +00002378 unsigned BeginIndex = getIndex(NewBeginOffset);
2379 unsigned EndIndex = getIndex(NewEndOffset);
Chandler Carruthccca5042012-12-17 04:07:37 +00002380 assert(EndIndex > BeginIndex && "Empty vector!");
2381 unsigned NumElements = EndIndex - BeginIndex;
2382 assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
2383
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002384 Value *Splat =
Chandler Carruth90a735d2013-07-19 07:21:28 +00002385 getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8);
2386 Splat = convertValue(DL, IRB, Splat, ElementTy);
Chandler Carruthcacda252012-12-17 14:03:01 +00002387 if (NumElements > 1)
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002388 Splat = getVectorSplat(Splat, NumElements);
Chandler Carruthccca5042012-12-17 04:07:37 +00002389
Chandler Carruthce4562b2012-12-17 13:41:21 +00002390 Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002391 "oldload");
2392 V = insertVector(IRB, Old, Splat, BeginIndex, "vec");
Chandler Carruthccca5042012-12-17 04:07:37 +00002393 } else if (IntTy) {
2394 // If this is a memset on an alloca where we can widen stores, insert the
2395 // set integer.
Chandler Carruth9d966a22012-10-15 10:24:40 +00002396 assert(!II.isVolatile());
Chandler Carruthccca5042012-12-17 04:07:37 +00002397
Chandler Carruthf0546402013-07-18 07:15:00 +00002398 uint64_t Size = NewEndOffset - NewBeginOffset;
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002399 V = getIntegerSplat(II.getValue(), Size);
Chandler Carruthccca5042012-12-17 04:07:37 +00002400
2401 if (IntTy && (BeginOffset != NewAllocaBeginOffset ||
2402 EndOffset != NewAllocaBeginOffset)) {
2403 Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002404 "oldload");
Chandler Carruth90a735d2013-07-19 07:21:28 +00002405 Old = convertValue(DL, IRB, Old, IntTy);
Chandler Carruthf0546402013-07-18 07:15:00 +00002406 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
Chandler Carruth90a735d2013-07-19 07:21:28 +00002407 V = insertInteger(DL, IRB, Old, V, Offset, "insert");
Chandler Carruthccca5042012-12-17 04:07:37 +00002408 } else {
2409 assert(V->getType() == IntTy &&
2410 "Wrong type for an alloca wide integer!");
2411 }
Chandler Carruth90a735d2013-07-19 07:21:28 +00002412 V = convertValue(DL, IRB, V, AllocaTy);
Chandler Carruthccca5042012-12-17 04:07:37 +00002413 } else {
2414 // Established these invariants above.
Chandler Carruthf0546402013-07-18 07:15:00 +00002415 assert(NewBeginOffset == NewAllocaBeginOffset);
2416 assert(NewEndOffset == NewAllocaEndOffset);
Chandler Carruthccca5042012-12-17 04:07:37 +00002417
Chandler Carruth90a735d2013-07-19 07:21:28 +00002418 V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8);
Chandler Carruthccca5042012-12-17 04:07:37 +00002419 if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy))
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002420 V = getVectorSplat(V, AllocaVecTy->getNumElements());
Chandler Carruth95e1fb82012-12-17 13:51:03 +00002421
Chandler Carruth90a735d2013-07-19 07:21:28 +00002422 V = convertValue(DL, IRB, V, AllocaTy);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002423 }
2424
Chandler Carruth95e1fb82012-12-17 13:51:03 +00002425 Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
Chandler Carruth871ba722012-09-26 10:27:46 +00002426 II.isVolatile());
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002427 (void)New;
2428 DEBUG(dbgs() << " to: " << *New << "\n");
2429 return !II.isVolatile();
2430 }
2431
2432 bool visitMemTransferInst(MemTransferInst &II) {
2433 // Rewriting of memory transfer instructions can be a bit tricky. We break
2434 // them into two categories: split intrinsics and unsplit intrinsics.
2435
2436 DEBUG(dbgs() << " original: " << II << "\n");
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002437
Chandler Carruthf0546402013-07-18 07:15:00 +00002438 // Compute the intersecting offset range.
2439 assert(BeginOffset < NewAllocaEndOffset);
2440 assert(EndOffset > NewAllocaBeginOffset);
2441 uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
2442 uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
2443
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002444 assert(II.getRawSource() == OldPtr || II.getRawDest() == OldPtr);
2445 bool IsDest = II.getRawDest() == OldPtr;
2446
Chandler Carruth176ca712012-10-01 12:16:54 +00002447 // Compute the relative offset within the transfer.
Chandler Carruth90a735d2013-07-19 07:21:28 +00002448 unsigned IntPtrWidth = DL.getPointerSizeInBits();
Chandler Carruthf0546402013-07-18 07:15:00 +00002449 APInt RelOffset(IntPtrWidth, NewBeginOffset - BeginOffset);
Chandler Carruth176ca712012-10-01 12:16:54 +00002450
2451 unsigned Align = II.getAlignment();
Chandler Carruth9f21fe12013-07-19 09:13:58 +00002452 uint64_t SliceOffset = NewBeginOffset - NewAllocaBeginOffset;
Chandler Carruth176ca712012-10-01 12:16:54 +00002453 if (Align > 1)
Chandler Carruth9f21fe12013-07-19 09:13:58 +00002454 Align =
2455 MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(),
2456 MinAlign(II.getAlignment(), getOffsetAlign(SliceOffset)));
Chandler Carruth176ca712012-10-01 12:16:54 +00002457
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002458 // For unsplit intrinsics, we simply modify the source and destination
2459 // pointers in place. This isn't just an optimization, it is a matter of
2460 // correctness. With unsplit intrinsics we may be dealing with transfers
2461 // within a single alloca before SROA ran, or with transfers that have
2462 // a variable length. We may also be dealing with memmove instead of
2463 // memcpy, and so simply updating the pointers is the necessary for us to
2464 // update both source and dest of a single call.
Chandler Carruthf0546402013-07-18 07:15:00 +00002465 if (!IsSplittable) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002466 Value *OldOp = IsDest ? II.getRawDest() : II.getRawSource();
2467 if (IsDest)
Chandler Carruthf0546402013-07-18 07:15:00 +00002468 II.setDest(
2469 getAdjustedAllocaPtr(IRB, BeginOffset, II.getRawDest()->getType()));
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002470 else
Chandler Carruthf0546402013-07-18 07:15:00 +00002471 II.setSource(getAdjustedAllocaPtr(IRB, BeginOffset,
2472 II.getRawSource()->getType()));
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002473
Chandler Carruth208124f2012-09-26 10:59:22 +00002474 Type *CstTy = II.getAlignmentCst()->getType();
Chandler Carruth176ca712012-10-01 12:16:54 +00002475 II.setAlignment(ConstantInt::get(CstTy, Align));
Chandler Carruth208124f2012-09-26 10:59:22 +00002476
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002477 DEBUG(dbgs() << " to: " << II << "\n");
2478 deleteIfTriviallyDead(OldOp);
2479 return false;
2480 }
2481 // For split transfer intrinsics we have an incredibly useful assurance:
2482 // the source and destination do not reside within the same alloca, and at
2483 // least one of them does not escape. This means that we can replace
2484 // memmove with memcpy, and we don't need to worry about all manner of
2485 // downsides to splitting and transforming the operations.
2486
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002487 // If this doesn't map cleanly onto the alloca type, and that type isn't
2488 // a single value type, just emit a memcpy.
2489 bool EmitMemCpy
Chandler Carruthf0546402013-07-18 07:15:00 +00002490 = !VecTy && !IntTy && (BeginOffset > NewAllocaBeginOffset ||
2491 EndOffset < NewAllocaEndOffset ||
Chandler Carruth49c8eea2012-10-15 10:24:43 +00002492 !NewAI.getAllocatedType()->isSingleValueType());
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002493
2494 // If we're just going to emit a memcpy, the alloca hasn't changed, and the
2495 // size hasn't been shrunk based on analysis of the viable range, this is
2496 // a no-op.
2497 if (EmitMemCpy && &OldAI == &NewAI) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002498 // Ensure the start lines up.
Chandler Carruthf0546402013-07-18 07:15:00 +00002499 assert(NewBeginOffset == BeginOffset);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002500
2501 // Rewrite the size as needed.
Chandler Carruthf0546402013-07-18 07:15:00 +00002502 if (NewEndOffset != EndOffset)
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002503 II.setLength(ConstantInt::get(II.getLength()->getType(),
Chandler Carruthf0546402013-07-18 07:15:00 +00002504 NewEndOffset - NewBeginOffset));
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002505 return false;
2506 }
2507 // Record this instruction for deletion.
Chandler Carruth18db7952012-11-20 01:12:50 +00002508 Pass.DeadInsts.insert(&II);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002509
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002510 // Strip all inbounds GEPs and pointer casts to try to dig out any root
2511 // alloca that should be re-examined after rewriting this instruction.
Chandler Carruth21eb4e92012-12-17 14:51:24 +00002512 Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest();
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002513 if (AllocaInst *AI
Chandler Carruth1bf38c62014-01-19 12:16:54 +00002514 = dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) {
2515 assert(AI != &OldAI && AI != &NewAI &&
2516 "Splittable transfers cannot reach the same alloca on both ends.");
Chandler Carruth4bd8f662012-09-26 07:41:40 +00002517 Pass.Worklist.insert(AI);
Chandler Carruth1bf38c62014-01-19 12:16:54 +00002518 }
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002519
2520 if (EmitMemCpy) {
Rafael Espindola8eee97d2014-02-14 19:02:01 +00002521 Type *OtherPtrTy = OtherPtr->getType();
Chandler Carruth21eb4e92012-12-17 14:51:24 +00002522
2523 // Compute the other pointer, folding as much as possible to produce
2524 // a single, simple GEP in most cases.
Chandler Carruth90a735d2013-07-19 07:21:28 +00002525 OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, RelOffset, OtherPtrTy);
Chandler Carruth21eb4e92012-12-17 14:51:24 +00002526
Chandler Carruthf0546402013-07-18 07:15:00 +00002527 Value *OurPtr = getAdjustedAllocaPtr(
2528 IRB, NewBeginOffset,
2529 IsDest ? II.getRawDest()->getType() : II.getRawSource()->getType());
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002530 Type *SizeTy = II.getLength()->getType();
Chandler Carruthf0546402013-07-18 07:15:00 +00002531 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002532
2533 CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr,
2534 IsDest ? OtherPtr : OurPtr,
Chandler Carruth871ba722012-09-26 10:27:46 +00002535 Size, Align, II.isVolatile());
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002536 (void)New;
2537 DEBUG(dbgs() << " to: " << *New << "\n");
2538 return false;
2539 }
2540
Chandler Carruth08e5f492012-10-03 08:26:28 +00002541 // Note that we clamp the alignment to 1 here as a 0 alignment for a memcpy
2542 // is equivalent to 1, but that isn't true if we end up rewriting this as
2543 // a load or store.
2544 if (!Align)
2545 Align = 1;
2546
Chandler Carruthf0546402013-07-18 07:15:00 +00002547 bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset &&
2548 NewEndOffset == NewAllocaEndOffset;
2549 uint64_t Size = NewEndOffset - NewBeginOffset;
2550 unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0;
2551 unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0;
Chandler Carruth21eb4e92012-12-17 14:51:24 +00002552 unsigned NumElements = EndIndex - BeginIndex;
2553 IntegerType *SubIntTy
2554 = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : 0;
2555
2556 Type *OtherPtrTy = NewAI.getType();
2557 if (VecTy && !IsWholeAlloca) {
2558 if (NumElements == 1)
2559 OtherPtrTy = VecTy->getElementType();
2560 else
2561 OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements);
2562
2563 OtherPtrTy = OtherPtrTy->getPointerTo();
2564 } else if (IntTy && !IsWholeAlloca) {
2565 OtherPtrTy = SubIntTy->getPointerTo();
2566 }
2567
Chandler Carruth90a735d2013-07-19 07:21:28 +00002568 Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, RelOffset, OtherPtrTy);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002569 Value *DstPtr = &NewAI;
2570 if (!IsDest)
2571 std::swap(SrcPtr, DstPtr);
2572
2573 Value *Src;
Chandler Carruth21eb4e92012-12-17 14:51:24 +00002574 if (VecTy && !IsWholeAlloca && !IsDest) {
2575 Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002576 "load");
2577 Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec");
Chandler Carruth49c8eea2012-10-15 10:24:43 +00002578 } else if (IntTy && !IsWholeAlloca && !IsDest) {
Chandler Carruth59ff93af2012-10-18 09:56:08 +00002579 Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002580 "load");
Chandler Carruth90a735d2013-07-19 07:21:28 +00002581 Src = convertValue(DL, IRB, Src, IntTy);
Chandler Carruthf0546402013-07-18 07:15:00 +00002582 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
Chandler Carruth90a735d2013-07-19 07:21:28 +00002583 Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002584 } else {
Chandler Carruth871ba722012-09-26 10:27:46 +00002585 Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(),
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002586 "copyload");
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002587 }
2588
Chandler Carruth21eb4e92012-12-17 14:51:24 +00002589 if (VecTy && !IsWholeAlloca && IsDest) {
2590 Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002591 "oldload");
2592 Src = insertVector(IRB, Old, Src, BeginIndex, "vec");
Chandler Carruth21eb4e92012-12-17 14:51:24 +00002593 } else if (IntTy && !IsWholeAlloca && IsDest) {
Chandler Carruth59ff93af2012-10-18 09:56:08 +00002594 Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002595 "oldload");
Chandler Carruth90a735d2013-07-19 07:21:28 +00002596 Old = convertValue(DL, IRB, Old, IntTy);
Chandler Carruthf0546402013-07-18 07:15:00 +00002597 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
Chandler Carruth90a735d2013-07-19 07:21:28 +00002598 Src = insertInteger(DL, IRB, Old, Src, Offset, "insert");
2599 Src = convertValue(DL, IRB, Src, NewAllocaTy);
Chandler Carruth49c8eea2012-10-15 10:24:43 +00002600 }
2601
Chandler Carruth871ba722012-09-26 10:27:46 +00002602 StoreInst *Store = cast<StoreInst>(
2603 IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile()));
2604 (void)Store;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002605 DEBUG(dbgs() << " to: " << *Store << "\n");
2606 return !II.isVolatile();
2607 }
2608
2609 bool visitIntrinsicInst(IntrinsicInst &II) {
2610 assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
2611 II.getIntrinsicID() == Intrinsic::lifetime_end);
2612 DEBUG(dbgs() << " original: " << II << "\n");
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002613 assert(II.getArgOperand(1) == OldPtr);
2614
Chandler Carruthf0546402013-07-18 07:15:00 +00002615 // Compute the intersecting offset range.
2616 assert(BeginOffset < NewAllocaEndOffset);
2617 assert(EndOffset > NewAllocaBeginOffset);
2618 uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
2619 uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
2620
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002621 // Record this instruction for deletion.
Chandler Carruth18db7952012-11-20 01:12:50 +00002622 Pass.DeadInsts.insert(&II);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002623
2624 ConstantInt *Size
2625 = ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
Chandler Carruthf0546402013-07-18 07:15:00 +00002626 NewEndOffset - NewBeginOffset);
2627 Value *Ptr =
2628 getAdjustedAllocaPtr(IRB, NewBeginOffset, II.getArgOperand(1)->getType());
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002629 Value *New;
2630 if (II.getIntrinsicID() == Intrinsic::lifetime_start)
2631 New = IRB.CreateLifetimeStart(Ptr, Size);
2632 else
2633 New = IRB.CreateLifetimeEnd(Ptr, Size);
2634
Edwin Vane82f80d42013-01-29 17:42:24 +00002635 (void)New;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002636 DEBUG(dbgs() << " to: " << *New << "\n");
2637 return true;
2638 }
2639
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002640 bool visitPHINode(PHINode &PN) {
2641 DEBUG(dbgs() << " original: " << PN << "\n");
Chandler Carruthf0546402013-07-18 07:15:00 +00002642 assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable");
2643 assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable");
Chandler Carruth82a57542012-10-01 10:54:05 +00002644
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002645 // We would like to compute a new pointer in only one place, but have it be
2646 // as local as possible to the PHI. To do that, we re-use the location of
2647 // the old pointer, which necessarily must be in the right position to
2648 // dominate the PHI.
Jakub Staszakcb132fa2013-07-22 22:10:43 +00002649 IRBuilderTy PtrBuilder(OldPtr);
Chandler Carruth34f0c7f2013-03-21 09:52:18 +00002650 PtrBuilder.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) +
2651 ".");
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002652
Chandler Carruthf0546402013-07-18 07:15:00 +00002653 Value *NewPtr =
2654 getAdjustedAllocaPtr(PtrBuilder, BeginOffset, OldPtr->getType());
Chandler Carruth82a57542012-10-01 10:54:05 +00002655 // Replace the operands which were using the old pointer.
Benjamin Kramer7ddd7052012-10-20 12:04:57 +00002656 std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002657
Chandler Carruth82a57542012-10-01 10:54:05 +00002658 DEBUG(dbgs() << " to: " << PN << "\n");
2659 deleteIfTriviallyDead(OldPtr);
Chandler Carruthf0546402013-07-18 07:15:00 +00002660
2661 // Check whether we can speculate this PHI node, and if so remember that
Chandler Carruth83ea1952013-07-24 09:47:28 +00002662 // fact and queue it up for another iteration after the speculation
2663 // occurs.
Chandler Carruth90a735d2013-07-19 07:21:28 +00002664 if (isSafePHIToSpeculate(PN, &DL)) {
Chandler Carruthf0546402013-07-18 07:15:00 +00002665 Pass.SpeculatablePHIs.insert(&PN);
Chandler Carruth83ea1952013-07-24 09:47:28 +00002666 IsUsedByRewrittenSpeculatableInstructions = true;
Chandler Carruthf0546402013-07-18 07:15:00 +00002667 return true;
2668 }
2669
2670 return false; // PHIs can't be promoted on their own.
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002671 }
2672
2673 bool visitSelectInst(SelectInst &SI) {
2674 DEBUG(dbgs() << " original: " << SI << "\n");
Benjamin Kramer0212dc22013-04-21 17:48:39 +00002675 assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) &&
2676 "Pointer isn't an operand!");
Chandler Carruthf0546402013-07-18 07:15:00 +00002677 assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable");
2678 assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable");
Chandler Carruth82a57542012-10-01 10:54:05 +00002679
Chandler Carruthf0546402013-07-18 07:15:00 +00002680 Value *NewPtr = getAdjustedAllocaPtr(IRB, BeginOffset, OldPtr->getType());
Benjamin Kramer0212dc22013-04-21 17:48:39 +00002681 // Replace the operands which were using the old pointer.
2682 if (SI.getOperand(1) == OldPtr)
2683 SI.setOperand(1, NewPtr);
2684 if (SI.getOperand(2) == OldPtr)
2685 SI.setOperand(2, NewPtr);
2686
Chandler Carruth82a57542012-10-01 10:54:05 +00002687 DEBUG(dbgs() << " to: " << SI << "\n");
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002688 deleteIfTriviallyDead(OldPtr);
Chandler Carruthf0546402013-07-18 07:15:00 +00002689
2690 // Check whether we can speculate this select instruction, and if so
Chandler Carruth83ea1952013-07-24 09:47:28 +00002691 // remember that fact and queue it up for another iteration after the
2692 // speculation occurs.
Chandler Carruth90a735d2013-07-19 07:21:28 +00002693 if (isSafeSelectToSpeculate(SI, &DL)) {
Chandler Carruthf0546402013-07-18 07:15:00 +00002694 Pass.SpeculatableSelects.insert(&SI);
Chandler Carruth83ea1952013-07-24 09:47:28 +00002695 IsUsedByRewrittenSpeculatableInstructions = true;
Chandler Carruthf0546402013-07-18 07:15:00 +00002696 return true;
2697 }
2698
2699 return false; // Selects can't be promoted on their own.
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002700 }
2701
2702};
2703}
2704
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00002705namespace {
2706/// \brief Visitor to rewrite aggregate loads and stores as scalar.
2707///
2708/// This pass aggressively rewrites all aggregate loads and stores on
2709/// a particular pointer (or any pointer derived from it which we can identify)
2710/// with scalar loads and stores.
2711class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
2712 // Befriend the base class so it can delegate to private visit methods.
2713 friend class llvm::InstVisitor<AggLoadStoreRewriter, bool>;
2714
Chandler Carruth90a735d2013-07-19 07:21:28 +00002715 const DataLayout &DL;
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00002716
2717 /// Queue of pointer uses to analyze and potentially rewrite.
2718 SmallVector<Use *, 8> Queue;
2719
2720 /// Set to prevent us from cycling with phi nodes and loops.
2721 SmallPtrSet<User *, 8> Visited;
2722
2723 /// The current pointer use being rewritten. This is used to dig up the used
2724 /// value (as opposed to the user).
2725 Use *U;
2726
2727public:
Chandler Carruth90a735d2013-07-19 07:21:28 +00002728 AggLoadStoreRewriter(const DataLayout &DL) : DL(DL) {}
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00002729
2730 /// Rewrite loads and stores through a pointer and all pointers derived from
2731 /// it.
2732 bool rewrite(Instruction &I) {
2733 DEBUG(dbgs() << " Rewriting FCA loads and stores...\n");
2734 enqueueUsers(I);
2735 bool Changed = false;
2736 while (!Queue.empty()) {
2737 U = Queue.pop_back_val();
2738 Changed |= visit(cast<Instruction>(U->getUser()));
2739 }
2740 return Changed;
2741 }
2742
2743private:
2744 /// Enqueue all the users of the given instruction for further processing.
2745 /// This uses a set to de-duplicate users.
2746 void enqueueUsers(Instruction &I) {
2747 for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
2748 ++UI)
2749 if (Visited.insert(*UI))
2750 Queue.push_back(&UI.getUse());
2751 }
2752
2753 // Conservative default is to not rewrite anything.
2754 bool visitInstruction(Instruction &I) { return false; }
2755
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002756 /// \brief Generic recursive split emission class.
Benjamin Kramer73a9e4a2012-09-18 17:06:32 +00002757 template <typename Derived>
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002758 class OpSplitter {
2759 protected:
2760 /// The builder used to form new instructions.
Chandler Carruthd177f862013-03-20 07:30:36 +00002761 IRBuilderTy IRB;
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002762 /// The indices which to be used with insert- or extractvalue to select the
2763 /// appropriate value within the aggregate.
2764 SmallVector<unsigned, 4> Indices;
2765 /// The indices to a GEP instruction which will move Ptr to the correct slot
2766 /// within the aggregate.
2767 SmallVector<Value *, 4> GEPIndices;
2768 /// The base pointer of the original op, used as a base for GEPing the
2769 /// split operations.
2770 Value *Ptr;
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00002771
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002772 /// Initialize the splitter with an insertion point, Ptr and start with a
2773 /// single zero GEP index.
2774 OpSplitter(Instruction *InsertionPoint, Value *Ptr)
Benjamin Kramer73a9e4a2012-09-18 17:06:32 +00002775 : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {}
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002776
2777 public:
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002778 /// \brief Generic recursive split emission routine.
2779 ///
2780 /// This method recursively splits an aggregate op (load or store) into
2781 /// scalar or vector ops. It splits recursively until it hits a single value
2782 /// and emits that single value operation via the template argument.
2783 ///
2784 /// The logic of this routine relies on GEPs and insertvalue and
2785 /// extractvalue all operating with the same fundamental index list, merely
2786 /// formatted differently (GEPs need actual values).
2787 ///
2788 /// \param Ty The type being split recursively into smaller ops.
2789 /// \param Agg The aggregate value being built up or stored, depending on
2790 /// whether this is splitting a load or a store respectively.
2791 void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) {
2792 if (Ty->isSingleValueType())
Benjamin Kramer73a9e4a2012-09-18 17:06:32 +00002793 return static_cast<Derived *>(this)->emitFunc(Ty, Agg, Name);
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002794
2795 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2796 unsigned OldSize = Indices.size();
2797 (void)OldSize;
2798 for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size;
2799 ++Idx) {
2800 assert(Indices.size() == OldSize && "Did not return to the old size");
2801 Indices.push_back(Idx);
2802 GEPIndices.push_back(IRB.getInt32(Idx));
2803 emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx));
2804 GEPIndices.pop_back();
2805 Indices.pop_back();
2806 }
2807 return;
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00002808 }
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00002809
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002810 if (StructType *STy = dyn_cast<StructType>(Ty)) {
2811 unsigned OldSize = Indices.size();
2812 (void)OldSize;
2813 for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size;
2814 ++Idx) {
2815 assert(Indices.size() == OldSize && "Did not return to the old size");
2816 Indices.push_back(Idx);
2817 GEPIndices.push_back(IRB.getInt32(Idx));
2818 emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx));
2819 GEPIndices.pop_back();
2820 Indices.pop_back();
2821 }
2822 return;
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00002823 }
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002824
2825 llvm_unreachable("Only arrays and structs are aggregate loadable types");
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00002826 }
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002827 };
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00002828
Benjamin Kramer73a9e4a2012-09-18 17:06:32 +00002829 struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> {
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002830 LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr)
Benjamin Kramera59ef572012-09-18 17:11:47 +00002831 : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr) {}
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00002832
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002833 /// Emit a leaf load of a single value. This is called at the leaves of the
2834 /// recursive emission to actually load values.
Benjamin Kramer73a9e4a2012-09-18 17:06:32 +00002835 void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002836 assert(Ty->isSingleValueType());
2837 // Load the single value and insert it using the indices.
Jakub Staszak3c6583a2013-02-19 22:14:45 +00002838 Value *GEP = IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep");
2839 Value *Load = IRB.CreateLoad(GEP, Name + ".load");
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002840 Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
2841 DEBUG(dbgs() << " to: " << *Load << "\n");
2842 }
2843 };
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00002844
2845 bool visitLoadInst(LoadInst &LI) {
2846 assert(LI.getPointerOperand() == *U);
2847 if (!LI.isSimple() || LI.getType()->isSingleValueType())
2848 return false;
2849
2850 // We have an aggregate being loaded, split it apart.
2851 DEBUG(dbgs() << " original: " << LI << "\n");
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002852 LoadOpSplitter Splitter(&LI, *U);
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00002853 Value *V = UndefValue::get(LI.getType());
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002854 Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00002855 LI.replaceAllUsesWith(V);
2856 LI.eraseFromParent();
2857 return true;
2858 }
2859
Benjamin Kramer73a9e4a2012-09-18 17:06:32 +00002860 struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> {
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002861 StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr)
Benjamin Kramera59ef572012-09-18 17:11:47 +00002862 : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr) {}
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002863
2864 /// Emit a leaf store of a single value. This is called at the leaves of the
2865 /// recursive emission to actually produce stores.
Benjamin Kramer73a9e4a2012-09-18 17:06:32 +00002866 void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002867 assert(Ty->isSingleValueType());
2868 // Extract the single value and store it using the indices.
2869 Value *Store = IRB.CreateStore(
2870 IRB.CreateExtractValue(Agg, Indices, Name + ".extract"),
2871 IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep"));
2872 (void)Store;
2873 DEBUG(dbgs() << " to: " << *Store << "\n");
2874 }
2875 };
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00002876
2877 bool visitStoreInst(StoreInst &SI) {
2878 if (!SI.isSimple() || SI.getPointerOperand() != *U)
2879 return false;
2880 Value *V = SI.getValueOperand();
2881 if (V->getType()->isSingleValueType())
2882 return false;
2883
2884 // We have an aggregate being stored, split it apart.
2885 DEBUG(dbgs() << " original: " << SI << "\n");
Benjamin Kramer65f8c882012-09-18 16:20:46 +00002886 StoreOpSplitter Splitter(&SI, *U);
2887 Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00002888 SI.eraseFromParent();
2889 return true;
2890 }
2891
2892 bool visitBitCastInst(BitCastInst &BC) {
2893 enqueueUsers(BC);
2894 return false;
2895 }
2896
2897 bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
2898 enqueueUsers(GEPI);
2899 return false;
2900 }
2901
2902 bool visitPHINode(PHINode &PN) {
2903 enqueueUsers(PN);
2904 return false;
2905 }
2906
2907 bool visitSelectInst(SelectInst &SI) {
2908 enqueueUsers(SI);
2909 return false;
2910 }
2911};
2912}
2913
Chandler Carruthba931992012-10-13 10:49:33 +00002914/// \brief Strip aggregate type wrapping.
2915///
2916/// This removes no-op aggregate types wrapping an underlying type. It will
2917/// strip as many layers of types as it can without changing either the type
2918/// size or the allocated size.
2919static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) {
2920 if (Ty->isSingleValueType())
2921 return Ty;
2922
2923 uint64_t AllocSize = DL.getTypeAllocSize(Ty);
2924 uint64_t TypeSize = DL.getTypeSizeInBits(Ty);
2925
2926 Type *InnerTy;
2927 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
2928 InnerTy = ArrTy->getElementType();
2929 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
2930 const StructLayout *SL = DL.getStructLayout(STy);
2931 unsigned Index = SL->getElementContainingOffset(0);
2932 InnerTy = STy->getElementType(Index);
2933 } else {
2934 return Ty;
2935 }
2936
2937 if (AllocSize > DL.getTypeAllocSize(InnerTy) ||
2938 TypeSize > DL.getTypeSizeInBits(InnerTy))
2939 return Ty;
2940
2941 return stripAggregateTypeWrapping(DL, InnerTy);
2942}
2943
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002944/// \brief Try to find a partition of the aggregate type passed in for a given
2945/// offset and size.
2946///
2947/// This recurses through the aggregate type and tries to compute a subtype
2948/// based on the offset and size. When the offset and size span a sub-section
Chandler Carruth054a40a2012-09-14 11:08:31 +00002949/// of an array, it will even compute a new array type for that sub-section,
2950/// and the same for structs.
2951///
2952/// Note that this routine is very strict and tries to find a partition of the
2953/// type which produces the *exact* right offset and size. It is not forgiving
2954/// when the size or offset cause either end of type-based partition to be off.
2955/// Also, this is a best-effort routine. It is reasonable to give up and not
2956/// return a type if necessary.
Chandler Carruth90a735d2013-07-19 07:21:28 +00002957static Type *getTypePartition(const DataLayout &DL, Type *Ty,
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002958 uint64_t Offset, uint64_t Size) {
Chandler Carruth90a735d2013-07-19 07:21:28 +00002959 if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size)
2960 return stripAggregateTypeWrapping(DL, Ty);
2961 if (Offset > DL.getTypeAllocSize(Ty) ||
2962 (DL.getTypeAllocSize(Ty) - Offset) < Size)
Chandler Carruth58d05562012-10-25 04:37:07 +00002963 return 0;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002964
2965 if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
2966 // We can't partition pointers...
2967 if (SeqTy->isPointerTy())
2968 return 0;
2969
2970 Type *ElementTy = SeqTy->getElementType();
Chandler Carruth90a735d2013-07-19 07:21:28 +00002971 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002972 uint64_t NumSkippedElements = Offset / ElementSize;
Jakub Staszak4f9d1e82013-03-24 09:56:28 +00002973 if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy)) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002974 if (NumSkippedElements >= ArrTy->getNumElements())
2975 return 0;
Jakub Staszak4f9d1e82013-03-24 09:56:28 +00002976 } else if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy)) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002977 if (NumSkippedElements >= VecTy->getNumElements())
2978 return 0;
Jakub Staszak4f9d1e82013-03-24 09:56:28 +00002979 }
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002980 Offset -= NumSkippedElements * ElementSize;
2981
2982 // First check if we need to recurse.
2983 if (Offset > 0 || Size < ElementSize) {
2984 // Bail if the partition ends in a different array element.
2985 if ((Offset + Size) > ElementSize)
2986 return 0;
2987 // Recurse through the element type trying to peel off offset bytes.
Chandler Carruth90a735d2013-07-19 07:21:28 +00002988 return getTypePartition(DL, ElementTy, Offset, Size);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002989 }
2990 assert(Offset == 0);
2991
2992 if (Size == ElementSize)
Chandler Carruth90a735d2013-07-19 07:21:28 +00002993 return stripAggregateTypeWrapping(DL, ElementTy);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00002994 assert(Size > ElementSize);
2995 uint64_t NumElements = Size / ElementSize;
2996 if (NumElements * ElementSize != Size)
2997 return 0;
2998 return ArrayType::get(ElementTy, NumElements);
2999 }
3000
3001 StructType *STy = dyn_cast<StructType>(Ty);
3002 if (!STy)
3003 return 0;
3004
Chandler Carruth90a735d2013-07-19 07:21:28 +00003005 const StructLayout *SL = DL.getStructLayout(STy);
Chandler Carruth054a40a2012-09-14 11:08:31 +00003006 if (Offset >= SL->getSizeInBytes())
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003007 return 0;
3008 uint64_t EndOffset = Offset + Size;
3009 if (EndOffset > SL->getSizeInBytes())
3010 return 0;
3011
3012 unsigned Index = SL->getElementContainingOffset(Offset);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003013 Offset -= SL->getElementOffset(Index);
3014
3015 Type *ElementTy = STy->getElementType(Index);
Chandler Carruth90a735d2013-07-19 07:21:28 +00003016 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003017 if (Offset >= ElementSize)
3018 return 0; // The offset points into alignment padding.
3019
3020 // See if any partition must be contained by the element.
3021 if (Offset > 0 || Size < ElementSize) {
3022 if ((Offset + Size) > ElementSize)
3023 return 0;
Chandler Carruth90a735d2013-07-19 07:21:28 +00003024 return getTypePartition(DL, ElementTy, Offset, Size);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003025 }
3026 assert(Offset == 0);
3027
3028 if (Size == ElementSize)
Chandler Carruth90a735d2013-07-19 07:21:28 +00003029 return stripAggregateTypeWrapping(DL, ElementTy);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003030
3031 StructType::element_iterator EI = STy->element_begin() + Index,
3032 EE = STy->element_end();
3033 if (EndOffset < SL->getSizeInBytes()) {
3034 unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
3035 if (Index == EndIndex)
3036 return 0; // Within a single element and its padding.
Chandler Carruth054a40a2012-09-14 11:08:31 +00003037
3038 // Don't try to form "natural" types if the elements don't line up with the
3039 // expected size.
3040 // FIXME: We could potentially recurse down through the last element in the
3041 // sub-struct to find a natural end point.
3042 if (SL->getElementOffset(EndIndex) != EndOffset)
3043 return 0;
3044
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003045 assert(Index < EndIndex);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003046 EE = STy->element_begin() + EndIndex;
3047 }
3048
3049 // Try to build up a sub-structure.
Benjamin Kramer7ddd7052012-10-20 12:04:57 +00003050 StructType *SubTy = StructType::get(STy->getContext(), makeArrayRef(EI, EE),
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003051 STy->isPacked());
Chandler Carruth90a735d2013-07-19 07:21:28 +00003052 const StructLayout *SubSL = DL.getStructLayout(SubTy);
Chandler Carruth054a40a2012-09-14 11:08:31 +00003053 if (Size != SubSL->getSizeInBytes())
3054 return 0; // The sub-struct doesn't have quite the size needed.
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003055
Chandler Carruth054a40a2012-09-14 11:08:31 +00003056 return SubTy;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003057}
3058
3059/// \brief Rewrite an alloca partition's users.
3060///
3061/// This routine drives both of the rewriting goals of the SROA pass. It tries
3062/// to rewrite uses of an alloca partition to be conducive for SSA value
3063/// promotion. If the partition needs a new, more refined alloca, this will
3064/// build that new alloca, preserving as much type information as possible, and
3065/// rewrite the uses of the old alloca to point at the new one and have the
3066/// appropriate new offsets. It also evaluates how successful the rewrite was
3067/// at enabling promotion and if it was successful queues the alloca to be
3068/// promoted.
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003069bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S,
3070 AllocaSlices::iterator B, AllocaSlices::iterator E,
3071 int64_t BeginOffset, int64_t EndOffset,
3072 ArrayRef<AllocaSlices::iterator> SplitUses) {
Chandler Carruthf0546402013-07-18 07:15:00 +00003073 assert(BeginOffset < EndOffset);
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003074 uint64_t SliceSize = EndOffset - BeginOffset;
Chandler Carruth82a57542012-10-01 10:54:05 +00003075
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003076 // Try to compute a friendly type for this partition of the alloca. This
3077 // won't always succeed, in which case we fall back to a legal integer type
3078 // or an i8 array of an appropriate size.
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003079 Type *SliceTy = 0;
Chandler Carruthf0546402013-07-18 07:15:00 +00003080 if (Type *CommonUseTy = findCommonType(B, E, EndOffset))
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003081 if (DL->getTypeAllocSize(CommonUseTy) >= SliceSize)
3082 SliceTy = CommonUseTy;
3083 if (!SliceTy)
Chandler Carruth90a735d2013-07-19 07:21:28 +00003084 if (Type *TypePartitionTy = getTypePartition(*DL, AI.getAllocatedType(),
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003085 BeginOffset, SliceSize))
3086 SliceTy = TypePartitionTy;
3087 if ((!SliceTy || (SliceTy->isArrayTy() &&
3088 SliceTy->getArrayElementType()->isIntegerTy())) &&
3089 DL->isLegalInteger(SliceSize * 8))
3090 SliceTy = Type::getIntNTy(*C, SliceSize * 8);
3091 if (!SliceTy)
3092 SliceTy = ArrayType::get(Type::getInt8Ty(*C), SliceSize);
3093 assert(DL->getTypeAllocSize(SliceTy) >= SliceSize);
Chandler Carruthf0546402013-07-18 07:15:00 +00003094
3095 bool IsVectorPromotable = isVectorPromotionViable(
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003096 *DL, SliceTy, S, BeginOffset, EndOffset, B, E, SplitUses);
Chandler Carruthf0546402013-07-18 07:15:00 +00003097
3098 bool IsIntegerPromotable =
3099 !IsVectorPromotable &&
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003100 isIntegerWideningViable(*DL, SliceTy, BeginOffset, S, B, E, SplitUses);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003101
3102 // Check for the case where we're going to rewrite to a new alloca of the
3103 // exact same type as the original, and with the same access offsets. In that
3104 // case, re-use the existing alloca, but still run through the rewriter to
Jakub Staszak086f6cd2013-02-19 22:02:21 +00003105 // perform phi and select speculation.
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003106 AllocaInst *NewAI;
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003107 if (SliceTy == AI.getAllocatedType()) {
Chandler Carruthf0546402013-07-18 07:15:00 +00003108 assert(BeginOffset == 0 &&
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003109 "Non-zero begin offset but same alloca type");
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003110 NewAI = &AI;
Chandler Carruthf0546402013-07-18 07:15:00 +00003111 // FIXME: We should be able to bail at this point with "nothing changed".
3112 // FIXME: We might want to defer PHI speculation until after here.
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003113 } else {
Chandler Carruth903790e2012-09-29 10:41:21 +00003114 unsigned Alignment = AI.getAlignment();
3115 if (!Alignment) {
3116 // The minimum alignment which users can rely on when the explicit
3117 // alignment is omitted or zero is that required by the ABI for this
3118 // type.
Chandler Carruth90a735d2013-07-19 07:21:28 +00003119 Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
Chandler Carruth903790e2012-09-29 10:41:21 +00003120 }
Chandler Carruthf0546402013-07-18 07:15:00 +00003121 Alignment = MinAlign(Alignment, BeginOffset);
Chandler Carruth903790e2012-09-29 10:41:21 +00003122 // If we will get at least this much alignment from the type alone, leave
3123 // the alloca's alignment unconstrained.
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003124 if (Alignment <= DL->getABITypeAlignment(SliceTy))
Chandler Carruth903790e2012-09-29 10:41:21 +00003125 Alignment = 0;
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003126 NewAI = new AllocaInst(SliceTy, 0, Alignment,
3127 AI.getName() + ".sroa." + Twine(B - S.begin()), &AI);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003128 ++NumNewAllocas;
3129 }
3130
3131 DEBUG(dbgs() << "Rewriting alloca partition "
Chandler Carruthf0546402013-07-18 07:15:00 +00003132 << "[" << BeginOffset << "," << EndOffset << ") to: " << *NewAI
3133 << "\n");
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003134
Chandler Carruthf0546402013-07-18 07:15:00 +00003135 // Track the high watermark on several worklists that are only relevant for
3136 // promoted allocas. We will reset it to this point if the alloca is not in
3137 // fact scheduled for promotion.
Chandler Carruthac8317f2012-10-04 12:33:50 +00003138 unsigned PPWOldSize = PostPromotionWorklist.size();
Chandler Carruthf0546402013-07-18 07:15:00 +00003139 unsigned SPOldSize = SpeculatablePHIs.size();
3140 unsigned SSOldSize = SpeculatableSelects.size();
Chandler Carruth6c321c12013-07-19 10:57:36 +00003141 unsigned NumUses = 0;
Chandler Carruth6c321c12013-07-19 10:57:36 +00003142
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003143 AllocaSliceRewriter Rewriter(*DL, S, *this, AI, *NewAI, BeginOffset,
3144 EndOffset, IsVectorPromotable,
3145 IsIntegerPromotable);
Chandler Carruthf0546402013-07-18 07:15:00 +00003146 bool Promotable = true;
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003147 for (ArrayRef<AllocaSlices::iterator>::const_iterator SUI = SplitUses.begin(),
3148 SUE = SplitUses.end();
Chandler Carruthf0546402013-07-18 07:15:00 +00003149 SUI != SUE; ++SUI) {
3150 DEBUG(dbgs() << " rewriting split ");
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003151 DEBUG(S.printSlice(dbgs(), *SUI, ""));
Chandler Carruthf0546402013-07-18 07:15:00 +00003152 Promotable &= Rewriter.visit(*SUI);
Chandler Carruth6c321c12013-07-19 10:57:36 +00003153 ++NumUses;
Chandler Carruthf0546402013-07-18 07:15:00 +00003154 }
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003155 for (AllocaSlices::iterator I = B; I != E; ++I) {
Chandler Carruthf0546402013-07-18 07:15:00 +00003156 DEBUG(dbgs() << " rewriting ");
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003157 DEBUG(S.printSlice(dbgs(), I, ""));
Chandler Carruthf0546402013-07-18 07:15:00 +00003158 Promotable &= Rewriter.visit(I);
Chandler Carruth6c321c12013-07-19 10:57:36 +00003159 ++NumUses;
Chandler Carruthf0546402013-07-18 07:15:00 +00003160 }
3161
Chandler Carruth6c321c12013-07-19 10:57:36 +00003162 NumAllocaPartitionUses += NumUses;
3163 MaxUsesPerAllocaPartition =
3164 std::max<unsigned>(NumUses, MaxUsesPerAllocaPartition);
Chandler Carruth6c321c12013-07-19 10:57:36 +00003165
Chandler Carruth83ea1952013-07-24 09:47:28 +00003166 if (Promotable && !Rewriter.isUsedByRewrittenSpeculatableInstructions()) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003167 DEBUG(dbgs() << " and queuing for promotion\n");
3168 PromotableAllocas.push_back(NewAI);
Chandler Carruth58e25d32013-07-24 12:12:17 +00003169 } else if (NewAI != &AI ||
3170 (Promotable &&
3171 Rewriter.isUsedByRewrittenSpeculatableInstructions())) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003172 // If we can't promote the alloca, iterate on it to check for new
3173 // refinements exposed by splitting the current alloca. Don't iterate on an
3174 // alloca which didn't actually change and didn't get promoted.
Chandler Carruth58e25d32013-07-24 12:12:17 +00003175 //
3176 // Alternatively, if we could promote the alloca but have speculatable
3177 // instructions then we will speculate them after finishing our processing
3178 // of the original alloca. Mark the new one for re-visiting in the next
3179 // iteration so the speculated operations can be rewritten.
3180 //
Chandler Carruthf0546402013-07-18 07:15:00 +00003181 // FIXME: We should actually track whether the rewriter changed anything.
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003182 Worklist.insert(NewAI);
3183 }
Chandler Carruthac8317f2012-10-04 12:33:50 +00003184
3185 // Drop any post-promotion work items if promotion didn't happen.
Chandler Carruthf0546402013-07-18 07:15:00 +00003186 if (!Promotable) {
Chandler Carruthac8317f2012-10-04 12:33:50 +00003187 while (PostPromotionWorklist.size() > PPWOldSize)
3188 PostPromotionWorklist.pop_back();
Chandler Carruthf0546402013-07-18 07:15:00 +00003189 while (SpeculatablePHIs.size() > SPOldSize)
3190 SpeculatablePHIs.pop_back();
3191 while (SpeculatableSelects.size() > SSOldSize)
3192 SpeculatableSelects.pop_back();
3193 }
Chandler Carruthac8317f2012-10-04 12:33:50 +00003194
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003195 return true;
3196}
3197
Chandler Carruthf0546402013-07-18 07:15:00 +00003198namespace {
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003199struct IsSliceEndLessOrEqualTo {
3200 uint64_t UpperBound;
Chandler Carruthf0546402013-07-18 07:15:00 +00003201
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003202 IsSliceEndLessOrEqualTo(uint64_t UpperBound) : UpperBound(UpperBound) {}
Chandler Carruthf0546402013-07-18 07:15:00 +00003203
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003204 bool operator()(const AllocaSlices::iterator &I) {
3205 return I->endOffset() <= UpperBound;
3206 }
3207};
Chandler Carruthf0546402013-07-18 07:15:00 +00003208}
3209
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003210static void
3211removeFinishedSplitUses(SmallVectorImpl<AllocaSlices::iterator> &SplitUses,
3212 uint64_t &MaxSplitUseEndOffset, uint64_t Offset) {
Chandler Carruthf0546402013-07-18 07:15:00 +00003213 if (Offset >= MaxSplitUseEndOffset) {
3214 SplitUses.clear();
3215 MaxSplitUseEndOffset = 0;
3216 return;
3217 }
3218
3219 size_t SplitUsesOldSize = SplitUses.size();
3220 SplitUses.erase(std::remove_if(SplitUses.begin(), SplitUses.end(),
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003221 IsSliceEndLessOrEqualTo(Offset)),
Chandler Carruthf0546402013-07-18 07:15:00 +00003222 SplitUses.end());
3223 if (SplitUsesOldSize == SplitUses.size())
3224 return;
3225
3226 // Recompute the max. While this is linear, so is remove_if.
3227 MaxSplitUseEndOffset = 0;
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003228 for (SmallVectorImpl<AllocaSlices::iterator>::iterator
Chandler Carruthf0546402013-07-18 07:15:00 +00003229 SUI = SplitUses.begin(),
3230 SUE = SplitUses.end();
3231 SUI != SUE; ++SUI)
3232 MaxSplitUseEndOffset = std::max((*SUI)->endOffset(), MaxSplitUseEndOffset);
3233}
3234
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003235/// \brief Walks the slices of an alloca and form partitions based on them,
3236/// rewriting each of their uses.
3237bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &S) {
3238 if (S.begin() == S.end())
Chandler Carruthf0546402013-07-18 07:15:00 +00003239 return false;
3240
Chandler Carruth6c321c12013-07-19 10:57:36 +00003241 unsigned NumPartitions = 0;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003242 bool Changed = false;
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003243 SmallVector<AllocaSlices::iterator, 4> SplitUses;
Chandler Carruthf0546402013-07-18 07:15:00 +00003244 uint64_t MaxSplitUseEndOffset = 0;
3245
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003246 uint64_t BeginOffset = S.begin()->beginOffset();
Chandler Carruthf0546402013-07-18 07:15:00 +00003247
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003248 for (AllocaSlices::iterator SI = S.begin(), SJ = llvm::next(SI), SE = S.end();
3249 SI != SE; SI = SJ) {
3250 uint64_t MaxEndOffset = SI->endOffset();
Chandler Carruthf0546402013-07-18 07:15:00 +00003251
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003252 if (!SI->isSplittable()) {
3253 // When we're forming an unsplittable region, it must always start at the
3254 // first slice and will extend through its end.
3255 assert(BeginOffset == SI->beginOffset());
Chandler Carruthf0546402013-07-18 07:15:00 +00003256
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003257 // Form a partition including all of the overlapping slices with this
3258 // unsplittable slice.
3259 while (SJ != SE && SJ->beginOffset() < MaxEndOffset) {
3260 if (!SJ->isSplittable())
3261 MaxEndOffset = std::max(MaxEndOffset, SJ->endOffset());
3262 ++SJ;
Chandler Carruthf0546402013-07-18 07:15:00 +00003263 }
3264 } else {
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003265 assert(SI->isSplittable()); // Established above.
Chandler Carruthf0546402013-07-18 07:15:00 +00003266
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003267 // Collect all of the overlapping splittable slices.
3268 while (SJ != SE && SJ->beginOffset() < MaxEndOffset &&
3269 SJ->isSplittable()) {
3270 MaxEndOffset = std::max(MaxEndOffset, SJ->endOffset());
3271 ++SJ;
Chandler Carruthf0546402013-07-18 07:15:00 +00003272 }
3273
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003274 // Back up MaxEndOffset and SJ if we ended the span early when
3275 // encountering an unsplittable slice.
3276 if (SJ != SE && SJ->beginOffset() < MaxEndOffset) {
3277 assert(!SJ->isSplittable());
3278 MaxEndOffset = SJ->beginOffset();
Chandler Carruthf0546402013-07-18 07:15:00 +00003279 }
3280 }
3281
3282 // Check if we have managed to move the end offset forward yet. If so,
3283 // we'll have to rewrite uses and erase old split uses.
3284 if (BeginOffset < MaxEndOffset) {
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003285 // Rewrite a sequence of overlapping slices.
3286 Changed |=
3287 rewritePartition(AI, S, SI, SJ, BeginOffset, MaxEndOffset, SplitUses);
Chandler Carruth6c321c12013-07-19 10:57:36 +00003288 ++NumPartitions;
Chandler Carruthf0546402013-07-18 07:15:00 +00003289
3290 removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset, MaxEndOffset);
3291 }
3292
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003293 // Accumulate all the splittable slices from the [SI,SJ) region which
Chandler Carruthf0546402013-07-18 07:15:00 +00003294 // overlap going forward.
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003295 for (AllocaSlices::iterator SK = SI; SK != SJ; ++SK)
3296 if (SK->isSplittable() && SK->endOffset() > MaxEndOffset) {
3297 SplitUses.push_back(SK);
3298 MaxSplitUseEndOffset = std::max(SK->endOffset(), MaxSplitUseEndOffset);
Chandler Carruthf0546402013-07-18 07:15:00 +00003299 }
3300
3301 // If we're already at the end and we have no split uses, we're done.
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003302 if (SJ == SE && SplitUses.empty())
Chandler Carruthf0546402013-07-18 07:15:00 +00003303 break;
3304
3305 // If we have no split uses or no gap in offsets, we're ready to move to
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003306 // the next slice.
3307 if (SplitUses.empty() || (SJ != SE && MaxEndOffset == SJ->beginOffset())) {
3308 BeginOffset = SJ->beginOffset();
Chandler Carruthf0546402013-07-18 07:15:00 +00003309 continue;
3310 }
3311
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003312 // Even if we have split slices, if the next slice is splittable and the
3313 // split slices reach it, we can simply set up the beginning offset of the
3314 // next iteration to bridge between them.
3315 if (SJ != SE && SJ->isSplittable() &&
3316 MaxSplitUseEndOffset > SJ->beginOffset()) {
Chandler Carruthf0546402013-07-18 07:15:00 +00003317 BeginOffset = MaxEndOffset;
3318 continue;
3319 }
3320
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003321 // Otherwise, we have a tail of split slices. Rewrite them with an empty
3322 // range of slices.
Chandler Carruthf0546402013-07-18 07:15:00 +00003323 uint64_t PostSplitEndOffset =
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003324 SJ == SE ? MaxSplitUseEndOffset : SJ->beginOffset();
Chandler Carruthf0546402013-07-18 07:15:00 +00003325
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003326 Changed |= rewritePartition(AI, S, SJ, SJ, MaxEndOffset, PostSplitEndOffset,
3327 SplitUses);
Chandler Carruth6c321c12013-07-19 10:57:36 +00003328 ++NumPartitions;
Chandler Carruth6c321c12013-07-19 10:57:36 +00003329
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003330 if (SJ == SE)
Chandler Carruthf0546402013-07-18 07:15:00 +00003331 break; // Skip the rest, we don't need to do any cleanup.
3332
3333 removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset,
3334 PostSplitEndOffset);
3335
3336 // Now just reset the begin offset for the next iteration.
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003337 BeginOffset = SJ->beginOffset();
Chandler Carruthf0546402013-07-18 07:15:00 +00003338 }
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003339
Chandler Carruth6c321c12013-07-19 10:57:36 +00003340 NumAllocaPartitions += NumPartitions;
3341 MaxPartitionsPerAlloca =
3342 std::max<unsigned>(NumPartitions, MaxPartitionsPerAlloca);
Chandler Carruth6c321c12013-07-19 10:57:36 +00003343
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003344 return Changed;
3345}
3346
Chandler Carruth1bf38c62014-01-19 12:16:54 +00003347/// \brief Clobber a use with undef, deleting the used value if it becomes dead.
3348void SROA::clobberUse(Use &U) {
3349 Value *OldV = U;
3350 // Replace the use with an undef value.
3351 U = UndefValue::get(OldV->getType());
3352
3353 // Check for this making an instruction dead. We have to garbage collect
3354 // all the dead instructions to ensure the uses of any alloca end up being
3355 // minimal.
3356 if (Instruction *OldI = dyn_cast<Instruction>(OldV))
3357 if (isInstructionTriviallyDead(OldI)) {
3358 DeadInsts.insert(OldI);
3359 }
3360}
3361
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003362/// \brief Analyze an alloca for SROA.
3363///
3364/// This analyzes the alloca to ensure we can reason about it, builds
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003365/// the slices of the alloca, and then hands it off to be split and
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003366/// rewritten as needed.
3367bool SROA::runOnAlloca(AllocaInst &AI) {
3368 DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
3369 ++NumAllocasAnalyzed;
3370
3371 // Special case dead allocas, as they're trivial.
3372 if (AI.use_empty()) {
3373 AI.eraseFromParent();
3374 return true;
3375 }
3376
3377 // Skip alloca forms that this analysis can't handle.
3378 if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() ||
Chandler Carruth90a735d2013-07-19 07:21:28 +00003379 DL->getTypeAllocSize(AI.getAllocatedType()) == 0)
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003380 return false;
3381
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00003382 bool Changed = false;
3383
3384 // First, split any FCA loads and stores touching this alloca to promote
3385 // better splitting and promotion opportunities.
Chandler Carruth90a735d2013-07-19 07:21:28 +00003386 AggLoadStoreRewriter AggRewriter(*DL);
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00003387 Changed |= AggRewriter.rewrite(AI);
3388
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003389 // Build the slices using a recursive instruction-visiting builder.
3390 AllocaSlices S(*DL, AI);
3391 DEBUG(S.print(dbgs()));
3392 if (S.isEscaped())
Chandler Carruth42cb9cb2012-09-18 12:57:43 +00003393 return Changed;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003394
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003395 // Delete all the dead users of this alloca before splitting and rewriting it.
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003396 for (AllocaSlices::dead_user_iterator DI = S.dead_user_begin(),
3397 DE = S.dead_user_end();
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003398 DI != DE; ++DI) {
Chandler Carruth1bf38c62014-01-19 12:16:54 +00003399 // Free up everything used by this instruction.
3400 for (User::op_iterator DOI = (*DI)->op_begin(), DOE = (*DI)->op_end();
3401 DOI != DOE; ++DOI)
3402 clobberUse(*DOI);
3403
3404 // Now replace the uses of this instruction.
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003405 (*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType()));
Chandler Carruth1bf38c62014-01-19 12:16:54 +00003406
3407 // And mark it for deletion.
Chandler Carruth18db7952012-11-20 01:12:50 +00003408 DeadInsts.insert(*DI);
Chandler Carruth1bf38c62014-01-19 12:16:54 +00003409 Changed = true;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003410 }
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003411 for (AllocaSlices::dead_op_iterator DO = S.dead_op_begin(),
3412 DE = S.dead_op_end();
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003413 DO != DE; ++DO) {
Chandler Carruth1bf38c62014-01-19 12:16:54 +00003414 clobberUse(**DO);
3415 Changed = true;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003416 }
3417
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003418 // No slices to split. Leave the dead alloca for a later pass to clean up.
3419 if (S.begin() == S.end())
Chandler Carruthe5b7a2c2012-10-05 01:29:09 +00003420 return Changed;
3421
Chandler Carruth9f21fe12013-07-19 09:13:58 +00003422 Changed |= splitAlloca(AI, S);
Chandler Carruthf0546402013-07-18 07:15:00 +00003423
3424 DEBUG(dbgs() << " Speculating PHIs\n");
3425 while (!SpeculatablePHIs.empty())
3426 speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val());
3427
3428 DEBUG(dbgs() << " Speculating Selects\n");
3429 while (!SpeculatableSelects.empty())
3430 speculateSelectInstLoads(*SpeculatableSelects.pop_back_val());
3431
3432 return Changed;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003433}
3434
Chandler Carruth19450da2012-09-14 10:26:38 +00003435/// \brief Delete the dead instructions accumulated in this run.
3436///
3437/// Recursively deletes the dead instructions we've accumulated. This is done
3438/// at the very end to maximize locality of the recursive delete and to
3439/// minimize the problems of invalidated instruction pointers as such pointers
3440/// are used heavily in the intermediate stages of the algorithm.
3441///
3442/// We also record the alloca instructions deleted here so that they aren't
3443/// subsequently handed to mem2reg to promote.
3444void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003445 while (!DeadInsts.empty()) {
3446 Instruction *I = DeadInsts.pop_back_val();
3447 DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
3448
Chandler Carruth58d05562012-10-25 04:37:07 +00003449 I->replaceAllUsesWith(UndefValue::get(I->getType()));
3450
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003451 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
3452 if (Instruction *U = dyn_cast<Instruction>(*OI)) {
3453 // Zero out the operand and see if it becomes trivially dead.
3454 *OI = 0;
3455 if (isInstructionTriviallyDead(U))
Chandler Carruth18db7952012-11-20 01:12:50 +00003456 DeadInsts.insert(U);
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003457 }
3458
3459 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
3460 DeletedAllocas.insert(AI);
3461
3462 ++NumDeleted;
3463 I->eraseFromParent();
3464 }
3465}
3466
Chandler Carruthcd7c8cd2013-07-29 09:06:53 +00003467static void enqueueUsersInWorklist(Instruction &I,
Chandler Carruth45b136f2013-08-11 01:03:18 +00003468 SmallVectorImpl<Instruction *> &Worklist,
3469 SmallPtrSet<Instruction *, 8> &Visited) {
Chandler Carruthcd7c8cd2013-07-29 09:06:53 +00003470 for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
3471 ++UI)
Chandler Carruth45b136f2013-08-11 01:03:18 +00003472 if (Visited.insert(cast<Instruction>(*UI)))
3473 Worklist.push_back(cast<Instruction>(*UI));
Chandler Carruthcd7c8cd2013-07-29 09:06:53 +00003474}
3475
Chandler Carruth70b44c52012-09-15 11:43:14 +00003476/// \brief Promote the allocas, using the best available technique.
3477///
3478/// This attempts to promote whatever allocas have been identified as viable in
3479/// the PromotableAllocas list. If that list is empty, there is nothing to do.
3480/// If there is a domtree available, we attempt to promote using the full power
3481/// of mem2reg. Otherwise, we build and use the AllocaPromoter above which is
3482/// based on the SSAUpdater utilities. This function returns whether any
Jakub Staszak086f6cd2013-02-19 22:02:21 +00003483/// promotion occurred.
Chandler Carruth70b44c52012-09-15 11:43:14 +00003484bool SROA::promoteAllocas(Function &F) {
3485 if (PromotableAllocas.empty())
3486 return false;
3487
3488 NumPromoted += PromotableAllocas.size();
3489
3490 if (DT && !ForceSSAUpdater) {
3491 DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
Nick Lewyckyc7776f72013-08-13 22:51:58 +00003492 PromoteMemToReg(PromotableAllocas, *DT);
Chandler Carruth70b44c52012-09-15 11:43:14 +00003493 PromotableAllocas.clear();
3494 return true;
3495 }
3496
3497 DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n");
3498 SSAUpdater SSA;
3499 DIBuilder DIB(*F.getParent());
Chandler Carruth45b136f2013-08-11 01:03:18 +00003500 SmallVector<Instruction *, 64> Insts;
Chandler Carruth70b44c52012-09-15 11:43:14 +00003501
Chandler Carruthcd7c8cd2013-07-29 09:06:53 +00003502 // We need a worklist to walk the uses of each alloca.
Chandler Carruth45b136f2013-08-11 01:03:18 +00003503 SmallVector<Instruction *, 8> Worklist;
3504 SmallPtrSet<Instruction *, 8> Visited;
Chandler Carruthcd7c8cd2013-07-29 09:06:53 +00003505 SmallVector<Instruction *, 32> DeadInsts;
3506
Chandler Carruth70b44c52012-09-15 11:43:14 +00003507 for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) {
3508 AllocaInst *AI = PromotableAllocas[Idx];
Chandler Carruth45b136f2013-08-11 01:03:18 +00003509 Insts.clear();
3510 Worklist.clear();
3511 Visited.clear();
Chandler Carruthcd7c8cd2013-07-29 09:06:53 +00003512
Chandler Carruth45b136f2013-08-11 01:03:18 +00003513 enqueueUsersInWorklist(*AI, Worklist, Visited);
Chandler Carruthcd7c8cd2013-07-29 09:06:53 +00003514
Chandler Carruth45b136f2013-08-11 01:03:18 +00003515 while (!Worklist.empty()) {
3516 Instruction *I = Worklist.pop_back_val();
Chandler Carruthcd7c8cd2013-07-29 09:06:53 +00003517
Chandler Carruth70b44c52012-09-15 11:43:14 +00003518 // FIXME: Currently the SSAUpdater infrastructure doesn't reason about
3519 // lifetime intrinsics and so we strip them (and the bitcasts+GEPs
3520 // leading to them) here. Eventually it should use them to optimize the
3521 // scalar values produced.
Chandler Carruth45b136f2013-08-11 01:03:18 +00003522 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
Chandler Carruth70b44c52012-09-15 11:43:14 +00003523 assert(II->getIntrinsicID() == Intrinsic::lifetime_start ||
3524 II->getIntrinsicID() == Intrinsic::lifetime_end);
3525 II->eraseFromParent();
3526 continue;
3527 }
3528
Chandler Carruthcd7c8cd2013-07-29 09:06:53 +00003529 // Push the loads and stores we find onto the list. SROA will already
3530 // have validated that all loads and stores are viable candidates for
3531 // promotion.
Chandler Carruth45b136f2013-08-11 01:03:18 +00003532 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
Chandler Carruthcd7c8cd2013-07-29 09:06:53 +00003533 assert(LI->getType() == AI->getAllocatedType());
3534 Insts.push_back(LI);
3535 continue;
3536 }
Chandler Carruth45b136f2013-08-11 01:03:18 +00003537 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
Chandler Carruthcd7c8cd2013-07-29 09:06:53 +00003538 assert(SI->getValueOperand()->getType() == AI->getAllocatedType());
3539 Insts.push_back(SI);
3540 continue;
3541 }
3542
3543 // For everything else, we know that only no-op bitcasts and GEPs will
3544 // make it this far, just recurse through them and recall them for later
3545 // removal.
Chandler Carruth45b136f2013-08-11 01:03:18 +00003546 DeadInsts.push_back(I);
3547 enqueueUsersInWorklist(*I, Worklist, Visited);
Chandler Carruth70b44c52012-09-15 11:43:14 +00003548 }
3549 AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts);
Chandler Carruthcd7c8cd2013-07-29 09:06:53 +00003550 while (!DeadInsts.empty())
3551 DeadInsts.pop_back_val()->eraseFromParent();
3552 AI->eraseFromParent();
Chandler Carruth70b44c52012-09-15 11:43:14 +00003553 }
3554
3555 PromotableAllocas.clear();
3556 return true;
3557}
3558
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003559namespace {
3560 /// \brief A predicate to test whether an alloca belongs to a set.
3561 class IsAllocaInSet {
3562 typedef SmallPtrSet<AllocaInst *, 4> SetType;
3563 const SetType &Set;
3564
3565 public:
Chandler Carruth3f57b822012-10-03 00:03:00 +00003566 typedef AllocaInst *argument_type;
3567
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003568 IsAllocaInSet(const SetType &Set) : Set(Set) {}
Chandler Carruth3f57b822012-10-03 00:03:00 +00003569 bool operator()(AllocaInst *AI) const { return Set.count(AI); }
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003570 };
3571}
3572
3573bool SROA::runOnFunction(Function &F) {
Paul Robinsonaf4e64d2014-02-06 00:07:05 +00003574 if (skipOptnoneFunction(F))
3575 return false;
3576
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003577 DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
3578 C = &F.getContext();
Chandler Carruth90a735d2013-07-19 07:21:28 +00003579 DL = getAnalysisIfAvailable<DataLayout>();
3580 if (!DL) {
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003581 DEBUG(dbgs() << " Skipping SROA -- no target data!\n");
3582 return false;
3583 }
Chandler Carruth73523022014-01-13 13:07:17 +00003584 DominatorTreeWrapperPass *DTWP =
3585 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
3586 DT = DTWP ? &DTWP->getDomTree() : 0;
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003587
3588 BasicBlock &EntryBB = F.getEntryBlock();
3589 for (BasicBlock::iterator I = EntryBB.begin(), E = llvm::prior(EntryBB.end());
3590 I != E; ++I)
3591 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
3592 Worklist.insert(AI);
3593
3594 bool Changed = false;
Chandler Carruth19450da2012-09-14 10:26:38 +00003595 // A set of deleted alloca instruction pointers which should be removed from
3596 // the list of promotable allocas.
3597 SmallPtrSet<AllocaInst *, 4> DeletedAllocas;
3598
Chandler Carruthac8317f2012-10-04 12:33:50 +00003599 do {
3600 while (!Worklist.empty()) {
3601 Changed |= runOnAlloca(*Worklist.pop_back_val());
3602 deleteDeadInstructions(DeletedAllocas);
Chandler Carruthb09f0a32012-10-02 22:46:45 +00003603
Chandler Carruthac8317f2012-10-04 12:33:50 +00003604 // Remove the deleted allocas from various lists so that we don't try to
3605 // continue processing them.
3606 if (!DeletedAllocas.empty()) {
3607 Worklist.remove_if(IsAllocaInSet(DeletedAllocas));
3608 PostPromotionWorklist.remove_if(IsAllocaInSet(DeletedAllocas));
3609 PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(),
3610 PromotableAllocas.end(),
3611 IsAllocaInSet(DeletedAllocas)),
3612 PromotableAllocas.end());
3613 DeletedAllocas.clear();
3614 }
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003615 }
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003616
Chandler Carruthac8317f2012-10-04 12:33:50 +00003617 Changed |= promoteAllocas(F);
3618
3619 Worklist = PostPromotionWorklist;
3620 PostPromotionWorklist.clear();
3621 } while (!Worklist.empty());
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003622
3623 return Changed;
3624}
3625
3626void SROA::getAnalysisUsage(AnalysisUsage &AU) const {
Chandler Carruth70b44c52012-09-15 11:43:14 +00003627 if (RequiresDomTree)
Chandler Carruth73523022014-01-13 13:07:17 +00003628 AU.addRequired<DominatorTreeWrapperPass>();
Chandler Carruth1b398ae2012-09-14 09:22:59 +00003629 AU.setPreservesCFG();
3630}