blob: ab6f7299e2734ef1db13c6acea251008f40b1453 [file] [log] [blame]
Chris Lattnered7b41e2003-05-27 15:45:27 +00001//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
Misha Brukmanfd939082005-04-21 23:48:37 +00002//
John Criswellb576c942003-10-20 19:43:21 +00003// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Misha Brukmanfd939082005-04-21 23:48:37 +00007//
John Criswellb576c942003-10-20 19:43:21 +00008//===----------------------------------------------------------------------===//
Chris Lattnered7b41e2003-05-27 15:45:27 +00009//
10// This transformation implements the well known scalar replacement of
11// aggregates transformation. This xform breaks up alloca instructions of
12// aggregate type (structure or array) into individual alloca instructions for
Chris Lattner38aec322003-09-11 16:45:55 +000013// each member (if possible). Then, if possible, it transforms the individual
14// alloca instructions into nice clean scalar SSA form.
15//
16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because
17// often interact, especially for C++ programs. As such, iterating between
18// SRoA, then Mem2Reg until we run out of things to promote works well.
Chris Lattnered7b41e2003-05-27 15:45:27 +000019//
20//===----------------------------------------------------------------------===//
21
Chris Lattner0e5f4992006-12-19 21:40:18 +000022#define DEBUG_TYPE "scalarrepl"
Chris Lattnered7b41e2003-05-27 15:45:27 +000023#include "llvm/Transforms/Scalar.h"
Chris Lattner38aec322003-09-11 16:45:55 +000024#include "llvm/Constants.h"
25#include "llvm/DerivedTypes.h"
Chris Lattnered7b41e2003-05-27 15:45:27 +000026#include "llvm/Function.h"
Chris Lattner79b3bd32007-04-25 06:40:51 +000027#include "llvm/GlobalVariable.h"
Misha Brukmand8e1eea2004-07-29 17:05:13 +000028#include "llvm/Instructions.h"
Chris Lattner372dda82007-03-05 07:52:57 +000029#include "llvm/IntrinsicInst.h"
Owen Andersonfa5cbd62009-07-03 19:42:02 +000030#include "llvm/LLVMContext.h"
Chris Lattner372dda82007-03-05 07:52:57 +000031#include "llvm/Pass.h"
Chris Lattner38aec322003-09-11 16:45:55 +000032#include "llvm/Analysis/Dominators.h"
33#include "llvm/Target/TargetData.h"
34#include "llvm/Transforms/Utils/PromoteMemToReg.h"
Devang Patel4afc90d2009-02-10 07:00:59 +000035#include "llvm/Transforms/Utils/Local.h"
Chris Lattner95255282006-06-28 23:17:24 +000036#include "llvm/Support/Debug.h"
Torok Edwin7d696d82009-07-11 13:10:19 +000037#include "llvm/Support/ErrorHandling.h"
Chris Lattnera1888942005-12-12 07:19:13 +000038#include "llvm/Support/GetElementPtrTypeIterator.h"
Chris Lattner65a65022009-02-03 19:41:50 +000039#include "llvm/Support/IRBuilder.h"
Chris Lattnera1888942005-12-12 07:19:13 +000040#include "llvm/Support/MathExtras.h"
Chris Lattnerbdff5482009-08-23 04:37:46 +000041#include "llvm/Support/raw_ostream.h"
Chris Lattner1ccd1852007-02-12 22:56:41 +000042#include "llvm/ADT/SmallVector.h"
Reid Spencer551ccae2004-09-01 22:55:40 +000043#include "llvm/ADT/Statistic.h"
Chris Lattnerd8664732003-12-02 17:43:55 +000044using namespace llvm;
Brian Gaeked0fde302003-11-11 22:41:34 +000045
Chris Lattner0e5f4992006-12-19 21:40:18 +000046STATISTIC(NumReplaced, "Number of allocas broken up");
47STATISTIC(NumPromoted, "Number of allocas promoted");
48STATISTIC(NumConverted, "Number of aggregates converted to scalar");
Chris Lattner79b3bd32007-04-25 06:40:51 +000049STATISTIC(NumGlobals, "Number of allocas copied from constant global");
Chris Lattnered7b41e2003-05-27 15:45:27 +000050
Chris Lattner0e5f4992006-12-19 21:40:18 +000051namespace {
Chris Lattner3e8b6632009-09-02 06:11:42 +000052 struct SROA : public FunctionPass {
Nick Lewyckyecd94c82007-05-06 13:37:16 +000053 static char ID; // Pass identification, replacement for typeid
Dan Gohmanae73dc12008-09-04 17:05:41 +000054 explicit SROA(signed T = -1) : FunctionPass(&ID) {
Devang Patelff366852007-07-09 21:19:23 +000055 if (T == -1)
Chris Lattnerb0e71ed2007-08-02 21:33:36 +000056 SRThreshold = 128;
Devang Patelff366852007-07-09 21:19:23 +000057 else
58 SRThreshold = T;
59 }
Devang Patel794fd752007-05-01 21:15:47 +000060
Chris Lattnered7b41e2003-05-27 15:45:27 +000061 bool runOnFunction(Function &F);
62
Chris Lattner38aec322003-09-11 16:45:55 +000063 bool performScalarRepl(Function &F);
64 bool performPromotion(Function &F);
65
Chris Lattnera15854c2003-08-31 00:45:13 +000066 // getAnalysisUsage - This pass does not require any passes, but we know it
67 // will not alter the CFG, so say so.
68 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
Devang Patel326821e2007-06-07 21:57:03 +000069 AU.addRequired<DominatorTree>();
Chris Lattner38aec322003-09-11 16:45:55 +000070 AU.addRequired<DominanceFrontier>();
Chris Lattnera15854c2003-08-31 00:45:13 +000071 AU.setPreservesCFG();
72 }
73
Chris Lattnered7b41e2003-05-27 15:45:27 +000074 private:
Chris Lattner56c38522009-01-07 06:34:28 +000075 TargetData *TD;
76
Bob Wilsonb742def2009-12-18 20:14:40 +000077 /// DeadInsts - Keep track of instructions we have made dead, so that
78 /// we can remove them after we are done working.
79 SmallVector<Value*, 32> DeadInsts;
80
Chris Lattner39a1c042007-05-30 06:11:23 +000081 /// AllocaInfo - When analyzing uses of an alloca instruction, this captures
82 /// information about the uses. All these fields are initialized to false
83 /// and set to true when something is learned.
84 struct AllocaInfo {
85 /// isUnsafe - This is set to true if the alloca cannot be SROA'd.
86 bool isUnsafe : 1;
87
Devang Patel4afc90d2009-02-10 07:00:59 +000088 /// needsCleanup - This is set to true if there is some use of the alloca
89 /// that requires cleanup.
90 bool needsCleanup : 1;
Chris Lattner39a1c042007-05-30 06:11:23 +000091
92 /// isMemCpySrc - This is true if this aggregate is memcpy'd from.
93 bool isMemCpySrc : 1;
94
Zhou Sheng33b0b8d2007-07-06 06:01:16 +000095 /// isMemCpyDst - This is true if this aggregate is memcpy'd into.
Chris Lattner39a1c042007-05-30 06:11:23 +000096 bool isMemCpyDst : 1;
97
98 AllocaInfo()
Devang Patel4afc90d2009-02-10 07:00:59 +000099 : isUnsafe(false), needsCleanup(false),
Chris Lattner39a1c042007-05-30 06:11:23 +0000100 isMemCpySrc(false), isMemCpyDst(false) {}
101 };
102
Devang Patelff366852007-07-09 21:19:23 +0000103 unsigned SRThreshold;
104
Chris Lattner39a1c042007-05-30 06:11:23 +0000105 void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; }
106
Victor Hernandez7b929da2009-10-23 21:09:37 +0000107 int isSafeAllocaToScalarRepl(AllocaInst *AI);
Chris Lattner39a1c042007-05-30 06:11:23 +0000108
Bob Wilsonb742def2009-12-18 20:14:40 +0000109 void isSafeForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
110 uint64_t ArrayOffset, AllocaInfo &Info);
111 void isSafeGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t &Offset,
112 uint64_t &ArrayOffset, AllocaInfo &Info);
113 void isSafeMemAccess(AllocaInst *AI, uint64_t Offset, uint64_t ArrayOffset,
114 uint64_t MemSize, const Type *MemOpType, bool isStore,
115 AllocaInfo &Info);
116 bool TypeHasComponent(const Type *T, uint64_t Offset, uint64_t Size);
117 unsigned FindElementAndOffset(const Type *&T, uint64_t &Offset);
Chris Lattner39a1c042007-05-30 06:11:23 +0000118
Victor Hernandez7b929da2009-10-23 21:09:37 +0000119 void DoScalarReplacement(AllocaInst *AI,
120 std::vector<AllocaInst*> &WorkList);
Bob Wilsonb742def2009-12-18 20:14:40 +0000121 void DeleteDeadInstructions();
Devang Patel4afc90d2009-02-10 07:00:59 +0000122 void CleanupGEP(GetElementPtrInst *GEP);
Bob Wilsonb742def2009-12-18 20:14:40 +0000123 void CleanupAllocaUsers(Value *V);
Victor Hernandez7b929da2009-10-23 21:09:37 +0000124 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocaInst *Base);
Chris Lattnera1888942005-12-12 07:19:13 +0000125
Bob Wilsonb742def2009-12-18 20:14:40 +0000126 void RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
127 SmallVector<AllocaInst*, 32> &NewElts);
128 void RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
129 SmallVector<AllocaInst*, 32> &NewElts);
130 void RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
131 SmallVector<AllocaInst*, 32> &NewElts);
132 void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
Victor Hernandez7b929da2009-10-23 21:09:37 +0000133 AllocaInst *AI,
Chris Lattnerd93afec2009-01-07 07:18:45 +0000134 SmallVector<AllocaInst*, 32> &NewElts);
Victor Hernandez7b929da2009-10-23 21:09:37 +0000135 void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
Chris Lattnerd2fa7812009-01-07 08:11:13 +0000136 SmallVector<AllocaInst*, 32> &NewElts);
Victor Hernandez7b929da2009-10-23 21:09:37 +0000137 void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
Chris Lattner6e733d32009-01-28 20:16:43 +0000138 SmallVector<AllocaInst*, 32> &NewElts);
Chris Lattnerd93afec2009-01-07 07:18:45 +0000139
Chris Lattner7809ecd2009-02-03 01:30:09 +0000140 bool CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy,
Chris Lattner1a3257b2009-02-03 18:15:05 +0000141 bool &SawVec, uint64_t Offset, unsigned AllocaSize);
Chris Lattner2e0d5f82009-01-31 02:28:54 +0000142 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset);
Chris Lattner6e011152009-02-03 21:01:03 +0000143 Value *ConvertScalar_ExtractValue(Value *NV, const Type *ToType,
Chris Lattner9bc67da2009-02-03 19:45:44 +0000144 uint64_t Offset, IRBuilder<> &Builder);
Chris Lattner9b872db2009-02-03 19:30:11 +0000145 Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal,
Chris Lattner65a65022009-02-03 19:41:50 +0000146 uint64_t Offset, IRBuilder<> &Builder);
Victor Hernandez7b929da2009-10-23 21:09:37 +0000147 static Instruction *isOnlyCopiedFromConstantGlobal(AllocaInst *AI);
Chris Lattnered7b41e2003-05-27 15:45:27 +0000148 };
Chris Lattnered7b41e2003-05-27 15:45:27 +0000149}
150
Dan Gohman844731a2008-05-13 00:00:25 +0000151char SROA::ID = 0;
152static RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates");
153
Brian Gaeked0fde302003-11-11 22:41:34 +0000154// Public interface to the ScalarReplAggregates pass
Devang Patelff366852007-07-09 21:19:23 +0000155FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) {
156 return new SROA(Threshold);
157}
Chris Lattnered7b41e2003-05-27 15:45:27 +0000158
159
Chris Lattnered7b41e2003-05-27 15:45:27 +0000160bool SROA::runOnFunction(Function &F) {
Dan Gohmane4af1cf2009-08-19 18:22:18 +0000161 TD = getAnalysisIfAvailable<TargetData>();
162
Chris Lattnerfe7ea0d2003-09-12 15:36:03 +0000163 bool Changed = performPromotion(F);
Dan Gohmane4af1cf2009-08-19 18:22:18 +0000164
165 // FIXME: ScalarRepl currently depends on TargetData more than it
166 // theoretically needs to. It should be refactored in order to support
167 // target-independent IR. Until this is done, just skip the actual
168 // scalar-replacement portion of this pass.
169 if (!TD) return Changed;
170
Chris Lattnerfe7ea0d2003-09-12 15:36:03 +0000171 while (1) {
172 bool LocalChange = performScalarRepl(F);
173 if (!LocalChange) break; // No need to repromote if no scalarrepl
174 Changed = true;
175 LocalChange = performPromotion(F);
176 if (!LocalChange) break; // No need to re-scalarrepl if no promotion
177 }
Chris Lattner38aec322003-09-11 16:45:55 +0000178
179 return Changed;
180}
181
182
183bool SROA::performPromotion(Function &F) {
184 std::vector<AllocaInst*> Allocas;
Devang Patel326821e2007-06-07 21:57:03 +0000185 DominatorTree &DT = getAnalysis<DominatorTree>();
Chris Lattner43f820d2003-10-05 21:20:13 +0000186 DominanceFrontier &DF = getAnalysis<DominanceFrontier>();
Chris Lattner38aec322003-09-11 16:45:55 +0000187
Chris Lattner02a3be02003-09-20 14:39:18 +0000188 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function
Chris Lattner38aec322003-09-11 16:45:55 +0000189
Chris Lattnerfe7ea0d2003-09-12 15:36:03 +0000190 bool Changed = false;
Misha Brukmanfd939082005-04-21 23:48:37 +0000191
Chris Lattner38aec322003-09-11 16:45:55 +0000192 while (1) {
193 Allocas.clear();
194
195 // Find allocas that are safe to promote, by looking at all instructions in
196 // the entry node
197 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
198 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca?
Devang Patel41968df2007-04-25 17:15:20 +0000199 if (isAllocaPromotable(AI))
Chris Lattner38aec322003-09-11 16:45:55 +0000200 Allocas.push_back(AI);
201
202 if (Allocas.empty()) break;
203
Nick Lewyckyce2c51b2009-11-23 03:50:44 +0000204 PromoteMemToReg(Allocas, DT, DF);
Chris Lattner38aec322003-09-11 16:45:55 +0000205 NumPromoted += Allocas.size();
206 Changed = true;
207 }
208
209 return Changed;
210}
211
Chris Lattner963a97f2008-06-22 17:46:21 +0000212/// getNumSAElements - Return the number of elements in the specific struct or
213/// array.
214static uint64_t getNumSAElements(const Type *T) {
215 if (const StructType *ST = dyn_cast<StructType>(T))
216 return ST->getNumElements();
217 return cast<ArrayType>(T)->getNumElements();
218}
219
Chris Lattner38aec322003-09-11 16:45:55 +0000220// performScalarRepl - This algorithm is a simple worklist driven algorithm,
221// which runs on all of the malloc/alloca instructions in the function, removing
222// them if they are only used by getelementptr instructions.
223//
224bool SROA::performScalarRepl(Function &F) {
Victor Hernandez7b929da2009-10-23 21:09:37 +0000225 std::vector<AllocaInst*> WorkList;
Chris Lattnered7b41e2003-05-27 15:45:27 +0000226
227 // Scan the entry basic block, adding any alloca's and mallocs to the worklist
Chris Lattner02a3be02003-09-20 14:39:18 +0000228 BasicBlock &BB = F.getEntryBlock();
Chris Lattnered7b41e2003-05-27 15:45:27 +0000229 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
Victor Hernandez7b929da2009-10-23 21:09:37 +0000230 if (AllocaInst *A = dyn_cast<AllocaInst>(I))
Chris Lattnered7b41e2003-05-27 15:45:27 +0000231 WorkList.push_back(A);
232
233 // Process the worklist
234 bool Changed = false;
235 while (!WorkList.empty()) {
Victor Hernandez7b929da2009-10-23 21:09:37 +0000236 AllocaInst *AI = WorkList.back();
Chris Lattnered7b41e2003-05-27 15:45:27 +0000237 WorkList.pop_back();
Chris Lattnera1888942005-12-12 07:19:13 +0000238
Chris Lattneradd2bd72006-12-22 23:14:42 +0000239 // Handle dead allocas trivially. These can be formed by SROA'ing arrays
240 // with unused elements.
241 if (AI->use_empty()) {
242 AI->eraseFromParent();
243 continue;
244 }
Chris Lattner7809ecd2009-02-03 01:30:09 +0000245
246 // If this alloca is impossible for us to promote, reject it early.
247 if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized())
248 continue;
Chris Lattner79b3bd32007-04-25 06:40:51 +0000249
250 // Check to see if this allocation is only modified by a memcpy/memmove from
251 // a constant global. If this is the case, we can change all users to use
252 // the constant global instead. This is commonly produced by the CFE by
253 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
254 // is only subsequently read.
255 if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) {
Nick Lewycky59136252009-09-15 07:08:25 +0000256 DEBUG(errs() << "Found alloca equal to global: " << *AI << '\n');
257 DEBUG(errs() << " memcpy = " << *TheCopy << '\n');
Chris Lattner79b3bd32007-04-25 06:40:51 +0000258 Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2));
Owen Andersonbaf3c402009-07-29 18:55:55 +0000259 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType()));
Chris Lattner79b3bd32007-04-25 06:40:51 +0000260 TheCopy->eraseFromParent(); // Don't mutate the global.
261 AI->eraseFromParent();
262 ++NumGlobals;
263 Changed = true;
264 continue;
265 }
Chris Lattner15c82772009-02-02 20:44:45 +0000266
Chris Lattner7809ecd2009-02-03 01:30:09 +0000267 // Check to see if we can perform the core SROA transformation. We cannot
268 // transform the allocation instruction if it is an array allocation
269 // (allocations OF arrays are ok though), and an allocation of a scalar
270 // value cannot be decomposed at all.
Duncan Sands777d2302009-05-09 07:06:46 +0000271 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
Bill Wendling5a377cb2009-03-03 12:12:58 +0000272
Nick Lewyckyd3aa25e2009-08-17 05:37:31 +0000273 // Do not promote [0 x %struct].
274 if (AllocaSize == 0) continue;
275
Bill Wendling5a377cb2009-03-03 12:12:58 +0000276 // Do not promote any struct whose size is too big.
Bill Wendling3aaf5d92009-03-03 19:18:49 +0000277 if (AllocaSize > SRThreshold) continue;
Nick Lewyckyd3aa25e2009-08-17 05:37:31 +0000278
Chris Lattner7809ecd2009-02-03 01:30:09 +0000279 if ((isa<StructType>(AI->getAllocatedType()) ||
280 isa<ArrayType>(AI->getAllocatedType())) &&
Chris Lattner7809ecd2009-02-03 01:30:09 +0000281 // Do not promote any struct into more than "32" separate vars.
Evan Cheng67fca632009-03-06 00:56:43 +0000282 getNumSAElements(AI->getAllocatedType()) <= SRThreshold/4) {
Chris Lattner7809ecd2009-02-03 01:30:09 +0000283 // Check that all of the users of the allocation are capable of being
284 // transformed.
285 switch (isSafeAllocaToScalarRepl(AI)) {
Torok Edwinc23197a2009-07-14 16:55:14 +0000286 default: llvm_unreachable("Unexpected value!");
Chris Lattner7809ecd2009-02-03 01:30:09 +0000287 case 0: // Not safe to scalar replace.
288 break;
289 case 1: // Safe, but requires cleanup/canonicalizations first
Devang Patel4afc90d2009-02-10 07:00:59 +0000290 CleanupAllocaUsers(AI);
Chris Lattner7809ecd2009-02-03 01:30:09 +0000291 // FALL THROUGH.
292 case 3: // Safe to scalar replace.
293 DoScalarReplacement(AI, WorkList);
294 Changed = true;
295 continue;
296 }
297 }
Chris Lattner6e733d32009-01-28 20:16:43 +0000298
299 // If we can turn this aggregate value (potentially with casts) into a
300 // simple scalar value that can be mem2reg'd into a register value.
Chris Lattner2e0d5f82009-01-31 02:28:54 +0000301 // IsNotTrivial tracks whether this is something that mem2reg could have
302 // promoted itself. If so, we don't want to transform it needlessly. Note
303 // that we can't just check based on the type: the alloca may be of an i32
304 // but that has pointer arithmetic to set byte 3 of it or something.
Chris Lattner6e733d32009-01-28 20:16:43 +0000305 bool IsNotTrivial = false;
Chris Lattner7809ecd2009-02-03 01:30:09 +0000306 const Type *VectorTy = 0;
Chris Lattner1a3257b2009-02-03 18:15:05 +0000307 bool HadAVector = false;
308 if (CanConvertToScalar(AI, IsNotTrivial, VectorTy, HadAVector,
Chris Lattner0ff83ab2009-03-04 19:22:30 +0000309 0, unsigned(AllocaSize)) && IsNotTrivial) {
Chris Lattner7809ecd2009-02-03 01:30:09 +0000310 AllocaInst *NewAI;
Chris Lattner1a3257b2009-02-03 18:15:05 +0000311 // If we were able to find a vector type that can handle this with
312 // insert/extract elements, and if there was at least one use that had
313 // a vector type, promote this to a vector. We don't want to promote
314 // random stuff that doesn't use vectors (e.g. <9 x double>) because then
315 // we just get a lot of insert/extracts. If at least one vector is
316 // involved, then we probably really do have a union of vector/array.
317 if (VectorTy && isa<VectorType>(VectorTy) && HadAVector) {
Nick Lewycky59136252009-09-15 07:08:25 +0000318 DEBUG(errs() << "CONVERT TO VECTOR: " << *AI << "\n TYPE = "
Chris Lattnerbdff5482009-08-23 04:37:46 +0000319 << *VectorTy << '\n');
Chris Lattner15c82772009-02-02 20:44:45 +0000320
Chris Lattner7809ecd2009-02-03 01:30:09 +0000321 // Create and insert the vector alloca.
Owen Anderson50dead02009-07-15 23:53:25 +0000322 NewAI = new AllocaInst(VectorTy, 0, "", AI->getParent()->begin());
Chris Lattner15c82772009-02-02 20:44:45 +0000323 ConvertUsesToScalar(AI, NewAI, 0);
Chris Lattner7809ecd2009-02-03 01:30:09 +0000324 } else {
Chris Lattnerbdff5482009-08-23 04:37:46 +0000325 DEBUG(errs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n");
Chris Lattner7809ecd2009-02-03 01:30:09 +0000326
327 // Create and insert the integer alloca.
Owen Anderson1d0be152009-08-13 21:58:54 +0000328 const Type *NewTy = IntegerType::get(AI->getContext(), AllocaSize*8);
Owen Anderson50dead02009-07-15 23:53:25 +0000329 NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin());
Chris Lattner7809ecd2009-02-03 01:30:09 +0000330 ConvertUsesToScalar(AI, NewAI, 0);
Chris Lattner6e733d32009-01-28 20:16:43 +0000331 }
Chris Lattner7809ecd2009-02-03 01:30:09 +0000332 NewAI->takeName(AI);
333 AI->eraseFromParent();
334 ++NumConverted;
335 Changed = true;
336 continue;
337 }
Chris Lattner6e733d32009-01-28 20:16:43 +0000338
Chris Lattner7809ecd2009-02-03 01:30:09 +0000339 // Otherwise, couldn't process this alloca.
Chris Lattnered7b41e2003-05-27 15:45:27 +0000340 }
341
342 return Changed;
343}
Chris Lattner5e062a12003-05-30 04:15:41 +0000344
Chris Lattnera10b29b2007-04-25 05:02:56 +0000345/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
346/// predicate, do SROA now.
Victor Hernandez7b929da2009-10-23 21:09:37 +0000347void SROA::DoScalarReplacement(AllocaInst *AI,
348 std::vector<AllocaInst*> &WorkList) {
Chris Lattnerff114702009-09-15 05:14:57 +0000349 DEBUG(errs() << "Found inst to SROA: " << *AI << '\n');
Chris Lattnera10b29b2007-04-25 05:02:56 +0000350 SmallVector<AllocaInst*, 32> ElementAllocas;
351 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
352 ElementAllocas.reserve(ST->getNumContainedTypes());
353 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
Owen Anderson50dead02009-07-15 23:53:25 +0000354 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
Chris Lattnera10b29b2007-04-25 05:02:56 +0000355 AI->getAlignment(),
Daniel Dunbarfe09b202009-07-30 17:37:43 +0000356 AI->getName() + "." + Twine(i), AI);
Chris Lattnera10b29b2007-04-25 05:02:56 +0000357 ElementAllocas.push_back(NA);
358 WorkList.push_back(NA); // Add to worklist for recursive processing
359 }
360 } else {
361 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
362 ElementAllocas.reserve(AT->getNumElements());
363 const Type *ElTy = AT->getElementType();
364 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
Owen Anderson50dead02009-07-15 23:53:25 +0000365 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
Daniel Dunbarfe09b202009-07-30 17:37:43 +0000366 AI->getName() + "." + Twine(i), AI);
Chris Lattnera10b29b2007-04-25 05:02:56 +0000367 ElementAllocas.push_back(NA);
368 WorkList.push_back(NA); // Add to worklist for recursive processing
369 }
370 }
371
Bob Wilsonb742def2009-12-18 20:14:40 +0000372 // Now that we have created the new alloca instructions, rewrite all the
373 // uses of the old alloca.
374 RewriteForScalarRepl(AI, AI, 0, ElementAllocas);
Chris Lattnera59adc42009-12-14 05:11:02 +0000375
Bob Wilsonb742def2009-12-18 20:14:40 +0000376 // Now erase any instructions that were made dead while rewriting the alloca.
377 DeleteDeadInstructions();
Bob Wilson39c88a62009-12-17 18:34:24 +0000378 AI->eraseFromParent();
Bob Wilsonb742def2009-12-18 20:14:40 +0000379
Chris Lattnera10b29b2007-04-25 05:02:56 +0000380 NumReplaced++;
381}
Chris Lattnera59adc42009-12-14 05:11:02 +0000382
Bob Wilsonb742def2009-12-18 20:14:40 +0000383/// DeleteDeadInstructions - Erase instructions on the DeadInstrs list,
384/// recursively including all their operands that become trivially dead.
385void SROA::DeleteDeadInstructions() {
386 while (!DeadInsts.empty()) {
387 Instruction *I = cast<Instruction>(DeadInsts.pop_back_val());
Chris Lattnera59adc42009-12-14 05:11:02 +0000388
Bob Wilsonb742def2009-12-18 20:14:40 +0000389 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
390 if (Instruction *U = dyn_cast<Instruction>(*OI)) {
391 // Zero out the operand and see if it becomes trivially dead.
392 // (But, don't add allocas to the dead instruction list -- they are
393 // already on the worklist and will be deleted separately.)
394 *OI = 0;
395 if (isInstructionTriviallyDead(U) && !isa<AllocaInst>(U))
396 DeadInsts.push_back(U);
Chris Lattnera59adc42009-12-14 05:11:02 +0000397 }
Bob Wilsonb742def2009-12-18 20:14:40 +0000398
399 I->eraseFromParent();
Chris Lattnera59adc42009-12-14 05:11:02 +0000400 }
Chris Lattnera59adc42009-12-14 05:11:02 +0000401}
Bob Wilsonb742def2009-12-18 20:14:40 +0000402
Chris Lattnerd878ecd2004-11-14 05:00:19 +0000403/// AllUsersAreLoads - Return true if all users of this value are loads.
404static bool AllUsersAreLoads(Value *Ptr) {
405 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
406 I != E; ++I)
407 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load)
408 return false;
Misha Brukmanfd939082005-04-21 23:48:37 +0000409 return true;
Chris Lattnerd878ecd2004-11-14 05:00:19 +0000410}
411
Bob Wilsonb742def2009-12-18 20:14:40 +0000412/// isSafeForScalarRepl - Check if instruction I is a safe use with regard to
413/// performing scalar replacement of alloca AI. The results are flagged in
414/// the Info parameter. Offset and ArrayOffset indicate the position within
415/// AI that is referenced by this instruction.
416void SROA::isSafeForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
417 uint64_t ArrayOffset, AllocaInfo &Info) {
418 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) {
419 Instruction *User = cast<Instruction>(*UI);
Chris Lattnerbe883a22003-11-25 21:09:18 +0000420
Bob Wilsonb742def2009-12-18 20:14:40 +0000421 if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
422 isSafeForScalarRepl(BC, AI, Offset, ArrayOffset, Info);
423 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
424 uint64_t GEPArrayOffset = ArrayOffset;
425 uint64_t GEPOffset = Offset;
426 isSafeGEP(GEPI, AI, GEPOffset, GEPArrayOffset, Info);
427 if (!Info.isUnsafe)
428 isSafeForScalarRepl(GEPI, AI, GEPOffset, GEPArrayOffset, Info);
429 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) {
430 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
431 if (Length)
432 isSafeMemAccess(AI, Offset, ArrayOffset, Length->getZExtValue(), 0,
433 UI.getOperandNo() == 1, Info);
434 else
435 MarkUnsafe(Info);
436 } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
437 if (!LI->isVolatile()) {
438 const Type *LIType = LI->getType();
439 isSafeMemAccess(AI, Offset, ArrayOffset, TD->getTypeAllocSize(LIType),
440 LIType, false, Info);
441 } else
442 MarkUnsafe(Info);
443 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
444 // Store is ok if storing INTO the pointer, not storing the pointer
445 if (!SI->isVolatile() && SI->getOperand(0) != I) {
446 const Type *SIType = SI->getOperand(0)->getType();
447 isSafeMemAccess(AI, Offset, ArrayOffset, TD->getTypeAllocSize(SIType),
448 SIType, true, Info);
449 } else
450 MarkUnsafe(Info);
451 } else if (isa<DbgInfoIntrinsic>(UI)) {
452 // If one user is DbgInfoIntrinsic then check if all users are
453 // DbgInfoIntrinsics.
454 if (OnlyUsedByDbgInfoIntrinsics(I)) {
455 Info.needsCleanup = true;
456 return;
457 }
458 MarkUnsafe(Info);
459 } else {
460 DEBUG(errs() << " Transformation preventing inst: " << *User << '\n');
461 MarkUnsafe(Info);
462 }
463 if (Info.isUnsafe) return;
Bob Wilson39c88a62009-12-17 18:34:24 +0000464 }
Bob Wilsonb742def2009-12-18 20:14:40 +0000465}
Bob Wilson39c88a62009-12-17 18:34:24 +0000466
Bob Wilsonb742def2009-12-18 20:14:40 +0000467/// isSafeGEP - Check if a GEP instruction can be handled for scalar
468/// replacement. It is safe when all the indices are constant, in-bounds
469/// references, and when the resulting offset corresponds to an element within
470/// the alloca type. The results are flagged in the Info parameter. Upon
471/// return, Offset is adjusted as specified by the GEP indices. For the
472/// special case of a variable index to a 2-element array, ArrayOffset is set
473/// to the array element size.
474void SROA::isSafeGEP(GetElementPtrInst *GEPI, AllocaInst *AI,
475 uint64_t &Offset, uint64_t &ArrayOffset,
476 AllocaInfo &Info) {
477 gep_type_iterator GEPIt = gep_type_begin(GEPI), E = gep_type_end(GEPI);
478 if (GEPIt == E)
479 return;
Bob Wilson39c88a62009-12-17 18:34:24 +0000480
Bob Wilsonb742def2009-12-18 20:14:40 +0000481 // The first GEP index must be zero.
482 if (!isa<ConstantInt>(GEPIt.getOperand()) ||
483 !cast<ConstantInt>(GEPIt.getOperand())->isZero())
484 return MarkUnsafe(Info);
485 if (++GEPIt == E)
486 return;
487
Chris Lattner88e6dc82008-08-23 05:21:06 +0000488 // If the first index is a non-constant index into an array, see if we can
489 // handle it as a special case.
Bob Wilsonb742def2009-12-18 20:14:40 +0000490 const Type *ArrayEltTy = 0;
491 if (ArrayOffset == 0 && Offset == 0) {
492 if (const ArrayType *AT = dyn_cast<ArrayType>(*GEPIt)) {
493 if (!isa<ConstantInt>(GEPIt.getOperand())) {
494 uint64_t NumElements = AT->getNumElements();
495
496 // If this is an array index and the index is not constant, we cannot
497 // promote... that is unless the array has exactly one or two elements
498 // in it, in which case we CAN promote it, but we have to canonicalize
499 // this out if this is the only problem.
500 if ((NumElements != 1 && NumElements != 2) || !AllUsersAreLoads(GEPI))
501 return MarkUnsafe(Info);
Devang Patel4afc90d2009-02-10 07:00:59 +0000502 Info.needsCleanup = true;
Bob Wilsonb742def2009-12-18 20:14:40 +0000503 ArrayOffset = TD->getTypeAllocSizeInBits(AT->getElementType());
504 ArrayEltTy = AT->getElementType();
505 ++GEPIt;
Chris Lattner39a1c042007-05-30 06:11:23 +0000506 }
Chris Lattnerd878ecd2004-11-14 05:00:19 +0000507 }
Chris Lattner5e062a12003-05-30 04:15:41 +0000508 }
Bob Wilsonb742def2009-12-18 20:14:40 +0000509
Chris Lattner88e6dc82008-08-23 05:21:06 +0000510 // Walk through the GEP type indices, checking the types that this indexes
511 // into.
Bob Wilsonb742def2009-12-18 20:14:40 +0000512 for (; GEPIt != E; ++GEPIt) {
Chris Lattner88e6dc82008-08-23 05:21:06 +0000513 // Ignore struct elements, no extra checking needed for these.
Bob Wilsonb742def2009-12-18 20:14:40 +0000514 if (isa<StructType>(*GEPIt))
Chris Lattner88e6dc82008-08-23 05:21:06 +0000515 continue;
Matthijs Kooijman5fac55f2008-10-06 16:23:31 +0000516
Bob Wilsonb742def2009-12-18 20:14:40 +0000517 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand());
518 if (!IdxVal)
519 return MarkUnsafe(Info);
520
521 if (const ArrayType *AT = dyn_cast<ArrayType>(*GEPIt)) {
Matthijs Kooijman5fac55f2008-10-06 16:23:31 +0000522 // This GEP indexes an array. Verify that this is an in-range constant
523 // integer. Specifically, consider A[0][i]. We cannot know that the user
524 // isn't doing invalid things like allowing i to index an out-of-range
525 // subscript that accesses A[1]. Because of this, we have to reject SROA
Bob Wilsond614a1f2009-12-04 21:51:35 +0000526 // of any accesses into structs where any of the components are variables.
Matthijs Kooijman5fac55f2008-10-06 16:23:31 +0000527 if (IdxVal->getZExtValue() >= AT->getNumElements())
528 return MarkUnsafe(Info);
Bob Wilsonb742def2009-12-18 20:14:40 +0000529 } else {
530 const VectorType *VT = dyn_cast<VectorType>(*GEPIt);
531 assert(VT && "unexpected type in GEP type iterator");
Dale Johannesenc0bc5472008-11-04 20:54:03 +0000532 if (IdxVal->getZExtValue() >= VT->getNumElements())
533 return MarkUnsafe(Info);
Matthijs Kooijman5fac55f2008-10-06 16:23:31 +0000534 }
Chris Lattner88e6dc82008-08-23 05:21:06 +0000535 }
Bob Wilsonb742def2009-12-18 20:14:40 +0000536
537 // All the indices are safe. Now compute the offset due to this GEP and
538 // check if the alloca has a component element at that offset.
539 if (ArrayOffset == 0) {
540 SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
541 Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(),
542 &Indices[0], Indices.size());
543 } else {
544 // Both array elements have the same type, so it suffices to check one of
545 // them. Copy the GEP indices starting from the array index, but replace
546 // that variable index with a constant zero.
547 SmallVector<Value*, 8> Indices(GEPI->op_begin() + 2, GEPI->op_end());
548 Indices[0] = Constant::getNullValue(Type::getInt32Ty(GEPI->getContext()));
549 const Type *ArrayEltPtr = PointerType::getUnqual(ArrayEltTy);
550 Offset += TD->getIndexedOffset(ArrayEltPtr, &Indices[0], Indices.size());
551 }
552 if (!TypeHasComponent(AI->getAllocatedType(), Offset, 0))
553 MarkUnsafe(Info);
Chris Lattner5e062a12003-05-30 04:15:41 +0000554}
555
Bob Wilsonb742def2009-12-18 20:14:40 +0000556/// isSafeMemAccess - Check if a load/store/memcpy operates on the entire AI
557/// alloca or has an offset and size that corresponds to a component element
558/// within it. The offset checked here may have been formed from a GEP with a
559/// pointer bitcasted to a different type.
560void SROA::isSafeMemAccess(AllocaInst *AI, uint64_t Offset,
561 uint64_t ArrayOffset, uint64_t MemSize,
562 const Type *MemOpType, bool isStore,
563 AllocaInfo &Info) {
564 // Check if this is a load/store of the entire alloca.
565 if (Offset == 0 && ArrayOffset == 0 &&
566 MemSize == TD->getTypeAllocSize(AI->getAllocatedType())) {
567 bool UsesAggregateType = (MemOpType == AI->getAllocatedType());
568 // This is safe for MemIntrinsics (where MemOpType is 0), integer types
569 // (which are essentially the same as the MemIntrinsics, especially with
570 // regard to copying padding between elements), or references using the
571 // aggregate type of the alloca.
572 if (!MemOpType || isa<IntegerType>(MemOpType) || UsesAggregateType) {
573 if (!UsesAggregateType) {
574 if (isStore)
575 Info.isMemCpyDst = true;
576 else
577 Info.isMemCpySrc = true;
578 }
579 return;
580 }
581 }
582 // Check if the offset/size correspond to a component within the alloca type.
583 const Type *T = AI->getAllocatedType();
584 if (TypeHasComponent(T, Offset, MemSize) &&
585 (ArrayOffset == 0 || TypeHasComponent(T, Offset + ArrayOffset, MemSize)))
586 return;
587
588 return MarkUnsafe(Info);
589}
590
591/// TypeHasComponent - Return true if T has a component type with the
592/// specified offset and size. If Size is zero, do not check the size.
593bool SROA::TypeHasComponent(const Type *T, uint64_t Offset, uint64_t Size) {
594 const Type *EltTy;
595 uint64_t EltSize;
596 if (const StructType *ST = dyn_cast<StructType>(T)) {
597 const StructLayout *Layout = TD->getStructLayout(ST);
598 unsigned EltIdx = Layout->getElementContainingOffset(Offset);
599 EltTy = ST->getContainedType(EltIdx);
600 EltSize = TD->getTypeAllocSize(EltTy);
601 Offset -= Layout->getElementOffset(EltIdx);
602 } else if (const ArrayType *AT = dyn_cast<ArrayType>(T)) {
603 EltTy = AT->getElementType();
604 EltSize = TD->getTypeAllocSize(EltTy);
605 Offset %= EltSize;
606 } else {
607 return false;
608 }
609 if (Offset == 0 && (Size == 0 || EltSize == Size))
610 return true;
611 // Check if the component spans multiple elements.
612 if (Offset + Size > EltSize)
613 return false;
614 return TypeHasComponent(EltTy, Offset, Size);
615}
616
617/// RewriteForScalarRepl - Alloca AI is being split into NewElts, so rewrite
618/// the instruction I, which references it, to use the separate elements.
619/// Offset indicates the position within AI that is referenced by this
620/// instruction.
621void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
622 SmallVector<AllocaInst*, 32> &NewElts) {
623 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) {
624 Instruction *User = cast<Instruction>(*UI);
625
626 if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
627 RewriteBitCast(BC, AI, Offset, NewElts);
628 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
629 RewriteGEP(GEPI, AI, Offset, NewElts);
630 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
631 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
632 uint64_t MemSize = Length->getZExtValue();
633 if (Offset == 0 &&
634 MemSize == TD->getTypeAllocSize(AI->getAllocatedType()))
635 RewriteMemIntrinUserOfAlloca(MI, I, AI, NewElts);
636 } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
637 const Type *LIType = LI->getType();
638 if (LIType == AI->getAllocatedType()) {
639 // Replace:
640 // %res = load { i32, i32 }* %alloc
641 // with:
642 // %load.0 = load i32* %alloc.0
643 // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0
644 // %load.1 = load i32* %alloc.1
645 // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1
646 // (Also works for arrays instead of structs)
647 Value *Insert = UndefValue::get(LIType);
648 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
649 Value *Load = new LoadInst(NewElts[i], "load", LI);
650 Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI);
651 }
652 LI->replaceAllUsesWith(Insert);
653 DeadInsts.push_back(LI);
654 } else if (isa<IntegerType>(LIType) &&
655 TD->getTypeAllocSize(LIType) ==
656 TD->getTypeAllocSize(AI->getAllocatedType())) {
657 // If this is a load of the entire alloca to an integer, rewrite it.
658 RewriteLoadUserOfWholeAlloca(LI, AI, NewElts);
659 }
660 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
661 Value *Val = SI->getOperand(0);
662 const Type *SIType = Val->getType();
663 if (SIType == AI->getAllocatedType()) {
664 // Replace:
665 // store { i32, i32 } %val, { i32, i32 }* %alloc
666 // with:
667 // %val.0 = extractvalue { i32, i32 } %val, 0
668 // store i32 %val.0, i32* %alloc.0
669 // %val.1 = extractvalue { i32, i32 } %val, 1
670 // store i32 %val.1, i32* %alloc.1
671 // (Also works for arrays instead of structs)
672 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
673 Value *Extract = ExtractValueInst::Create(Val, i, Val->getName(), SI);
674 new StoreInst(Extract, NewElts[i], SI);
675 }
676 DeadInsts.push_back(SI);
677 } else if (isa<IntegerType>(SIType) &&
678 TD->getTypeAllocSize(SIType) ==
679 TD->getTypeAllocSize(AI->getAllocatedType())) {
680 // If this is a store of the entire alloca from an integer, rewrite it.
681 RewriteStoreUserOfWholeAlloca(SI, AI, NewElts);
682 }
683 }
Bob Wilson39c88a62009-12-17 18:34:24 +0000684 }
685}
686
Bob Wilsonb742def2009-12-18 20:14:40 +0000687/// RewriteBitCast - Update a bitcast reference to the alloca being replaced
688/// and recursively continue updating all of its uses.
689void SROA::RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
690 SmallVector<AllocaInst*, 32> &NewElts) {
691 RewriteForScalarRepl(BC, AI, Offset, NewElts);
692 if (BC->getOperand(0) != AI)
693 return;
Bob Wilson39c88a62009-12-17 18:34:24 +0000694
Bob Wilsonb742def2009-12-18 20:14:40 +0000695 // The bitcast references the original alloca. Replace its uses with
696 // references to the first new element alloca.
697 Instruction *Val = NewElts[0];
698 if (Val->getType() != BC->getDestTy()) {
699 Val = new BitCastInst(Val, BC->getDestTy(), "", BC);
700 Val->takeName(BC);
Daniel Dunbarfca55c82009-12-16 10:56:17 +0000701 }
Bob Wilsonb742def2009-12-18 20:14:40 +0000702 BC->replaceAllUsesWith(Val);
703 DeadInsts.push_back(BC);
Daniel Dunbarfca55c82009-12-16 10:56:17 +0000704}
705
Bob Wilsonb742def2009-12-18 20:14:40 +0000706/// FindElementAndOffset - Return the index of the element containing Offset
707/// within the specified type, which must be either a struct or an array.
708/// Sets T to the type of the element and Offset to the offset within that
709/// element.
710unsigned SROA::FindElementAndOffset(const Type *&T, uint64_t &Offset) {
711 unsigned Idx = 0;
712 if (const StructType *ST = dyn_cast<StructType>(T)) {
713 const StructLayout *Layout = TD->getStructLayout(ST);
714 Idx = Layout->getElementContainingOffset(Offset);
715 T = ST->getContainedType(Idx);
716 Offset -= Layout->getElementOffset(Idx);
717 } else {
718 const ArrayType *AT = dyn_cast<ArrayType>(T);
719 assert(AT && "unexpected type for scalar replacement");
720 T = AT->getElementType();
721 uint64_t EltSize = TD->getTypeAllocSize(T);
722 Idx = (unsigned)(Offset / EltSize);
723 Offset -= Idx * EltSize;
Chris Lattnera59adc42009-12-14 05:11:02 +0000724 }
Bob Wilsonb742def2009-12-18 20:14:40 +0000725 return Idx;
726}
727
728/// RewriteGEP - Check if this GEP instruction moves the pointer across
729/// elements of the alloca that are being split apart, and if so, rewrite
730/// the GEP to be relative to the new element.
731void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
732 SmallVector<AllocaInst*, 32> &NewElts) {
733 uint64_t OldOffset = Offset;
734 SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
735 Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(),
736 &Indices[0], Indices.size());
737
738 RewriteForScalarRepl(GEPI, AI, Offset, NewElts);
739
740 const Type *T = AI->getAllocatedType();
741 unsigned OldIdx = FindElementAndOffset(T, OldOffset);
742 if (GEPI->getOperand(0) == AI)
743 OldIdx = ~0U; // Force the GEP to be rewritten.
744
745 T = AI->getAllocatedType();
746 uint64_t EltOffset = Offset;
747 unsigned Idx = FindElementAndOffset(T, EltOffset);
748
749 // If this GEP does not move the pointer across elements of the alloca
750 // being split, then it does not needs to be rewritten.
751 if (Idx == OldIdx)
752 return;
753
754 const Type *i32Ty = Type::getInt32Ty(AI->getContext());
755 SmallVector<Value*, 8> NewArgs;
756 NewArgs.push_back(Constant::getNullValue(i32Ty));
757 while (EltOffset != 0) {
758 unsigned EltIdx = FindElementAndOffset(T, EltOffset);
759 NewArgs.push_back(ConstantInt::get(i32Ty, EltIdx));
760 }
761 Instruction *Val = NewElts[Idx];
762 if (NewArgs.size() > 1) {
763 Val = GetElementPtrInst::CreateInBounds(Val, NewArgs.begin(),
764 NewArgs.end(), "", GEPI);
765 Val->takeName(GEPI);
766 }
767 if (Val->getType() != GEPI->getType())
768 Val = new BitCastInst(Val, GEPI->getType(), Val->getNameStr(), GEPI);
769 GEPI->replaceAllUsesWith(Val);
770 DeadInsts.push_back(GEPI);
Chris Lattnerd93afec2009-01-07 07:18:45 +0000771}
772
773/// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI.
774/// Rewrite it to copy or set the elements of the scalarized memory.
Bob Wilsonb742def2009-12-18 20:14:40 +0000775void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
Victor Hernandez7b929da2009-10-23 21:09:37 +0000776 AllocaInst *AI,
Chris Lattnerd93afec2009-01-07 07:18:45 +0000777 SmallVector<AllocaInst*, 32> &NewElts) {
Chris Lattnerd93afec2009-01-07 07:18:45 +0000778 // If this is a memcpy/memmove, construct the other pointer as the
Chris Lattner88fe1ad2009-03-04 19:23:25 +0000779 // appropriate type. The "Other" pointer is the pointer that goes to memory
780 // that doesn't have anything to do with the alloca that we are promoting. For
781 // memset, this Value* stays null.
Chris Lattnerd93afec2009-01-07 07:18:45 +0000782 Value *OtherPtr = 0;
Owen Andersone922c022009-07-22 00:24:57 +0000783 LLVMContext &Context = MI->getContext();
Chris Lattnerdfe964c2009-03-08 03:59:00 +0000784 unsigned MemAlignment = MI->getAlignment();
Chris Lattner3ce5e882009-03-08 03:37:16 +0000785 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy
Bob Wilsonb742def2009-12-18 20:14:40 +0000786 if (Inst == MTI->getRawDest())
Chris Lattner3ce5e882009-03-08 03:37:16 +0000787 OtherPtr = MTI->getRawSource();
Chris Lattnerd93afec2009-01-07 07:18:45 +0000788 else {
Bob Wilsonb742def2009-12-18 20:14:40 +0000789 assert(Inst == MTI->getRawSource());
Chris Lattner3ce5e882009-03-08 03:37:16 +0000790 OtherPtr = MTI->getRawDest();
Chris Lattnerd93afec2009-01-07 07:18:45 +0000791 }
792 }
Bob Wilson78c50b82009-12-08 18:22:03 +0000793
Chris Lattnerd93afec2009-01-07 07:18:45 +0000794 // If there is an other pointer, we want to convert it to the same pointer
795 // type as AI has, so we can GEP through it safely.
796 if (OtherPtr) {
Bob Wilsonb742def2009-12-18 20:14:40 +0000797
798 // Remove bitcasts and all-zero GEPs from OtherPtr. This is an
799 // optimization, but it's also required to detect the corner case where
800 // both pointer operands are referencing the same memory, and where
801 // OtherPtr may be a bitcast or GEP that currently being rewritten. (This
802 // function is only called for mem intrinsics that access the whole
803 // aggregate, so non-zero GEPs are not an issue here.)
804 while (1) {
805 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) {
806 OtherPtr = BC->getOperand(0);
807 continue;
808 }
809 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(OtherPtr)) {
810 // All zero GEPs are effectively bitcasts.
811 if (GEP->hasAllZeroIndices()) {
812 OtherPtr = GEP->getOperand(0);
813 continue;
814 }
815 }
816 break;
817 }
818 // If OtherPtr has already been rewritten, this intrinsic will be dead.
819 if (OtherPtr == NewElts[0])
820 return;
Chris Lattner372dda82007-03-05 07:52:57 +0000821
Chris Lattnerd93afec2009-01-07 07:18:45 +0000822 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr))
823 if (BCE->getOpcode() == Instruction::BitCast)
824 OtherPtr = BCE->getOperand(0);
825
826 // If the pointer is not the right type, insert a bitcast to the right
827 // type.
828 if (OtherPtr->getType() != AI->getType())
829 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(),
830 MI);
831 }
832
833 // Process each element of the aggregate.
834 Value *TheFn = MI->getOperand(0);
835 const Type *BytePtrTy = MI->getRawDest()->getType();
Bob Wilsonb742def2009-12-18 20:14:40 +0000836 bool SROADest = MI->getRawDest() == Inst;
Chris Lattnerd93afec2009-01-07 07:18:45 +0000837
Owen Anderson1d0be152009-08-13 21:58:54 +0000838 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(MI->getContext()));
Chris Lattnerd93afec2009-01-07 07:18:45 +0000839
840 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
841 // If this is a memcpy/memmove, emit a GEP of the other element address.
842 Value *OtherElt = 0;
Chris Lattner1541e0f2009-03-04 19:20:50 +0000843 unsigned OtherEltAlign = MemAlignment;
844
Bob Wilsonb742def2009-12-18 20:14:40 +0000845 if (OtherPtr == AI) {
846 OtherElt = NewElts[i];
847 OtherEltAlign = 0;
848 } else if (OtherPtr) {
Owen Anderson1d0be152009-08-13 21:58:54 +0000849 Value *Idx[2] = { Zero,
850 ConstantInt::get(Type::getInt32Ty(MI->getContext()), i) };
Bob Wilsonb742def2009-12-18 20:14:40 +0000851 OtherElt = GetElementPtrInst::CreateInBounds(OtherPtr, Idx, Idx + 2,
Daniel Dunbarfe09b202009-07-30 17:37:43 +0000852 OtherPtr->getNameStr()+"."+Twine(i),
Bob Wilsonb742def2009-12-18 20:14:40 +0000853 MI);
Chris Lattner1541e0f2009-03-04 19:20:50 +0000854 uint64_t EltOffset;
855 const PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());
856 if (const StructType *ST =
857 dyn_cast<StructType>(OtherPtrTy->getElementType())) {
858 EltOffset = TD->getStructLayout(ST)->getElementOffset(i);
859 } else {
860 const Type *EltTy =
861 cast<SequentialType>(OtherPtr->getType())->getElementType();
Duncan Sands777d2302009-05-09 07:06:46 +0000862 EltOffset = TD->getTypeAllocSize(EltTy)*i;
Chris Lattner1541e0f2009-03-04 19:20:50 +0000863 }
864
865 // The alignment of the other pointer is the guaranteed alignment of the
866 // element, which is affected by both the known alignment of the whole
867 // mem intrinsic and the alignment of the element. If the alignment of
868 // the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the
869 // known alignment is just 4 bytes.
870 OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset);
Chris Lattnerc14d3ca2007-03-08 06:36:54 +0000871 }
Chris Lattnerd93afec2009-01-07 07:18:45 +0000872
873 Value *EltPtr = NewElts[i];
Chris Lattner1541e0f2009-03-04 19:20:50 +0000874 const Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType();
Chris Lattnerd93afec2009-01-07 07:18:45 +0000875
876 // If we got down to a scalar, insert a load or store as appropriate.
877 if (EltTy->isSingleValueType()) {
Chris Lattner3ce5e882009-03-08 03:37:16 +0000878 if (isa<MemTransferInst>(MI)) {
Chris Lattner1541e0f2009-03-04 19:20:50 +0000879 if (SROADest) {
880 // From Other to Alloca.
881 Value *Elt = new LoadInst(OtherElt, "tmp", false, OtherEltAlign, MI);
882 new StoreInst(Elt, EltPtr, MI);
883 } else {
884 // From Alloca to Other.
885 Value *Elt = new LoadInst(EltPtr, "tmp", MI);
886 new StoreInst(Elt, OtherElt, false, OtherEltAlign, MI);
887 }
Chris Lattnerd93afec2009-01-07 07:18:45 +0000888 continue;
889 }
890 assert(isa<MemSetInst>(MI));
891
892 // If the stored element is zero (common case), just store a null
893 // constant.
894 Constant *StoreVal;
895 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) {
896 if (CI->isZero()) {
Owen Andersona7235ea2009-07-31 20:28:14 +0000897 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0>
Chris Lattnerd93afec2009-01-07 07:18:45 +0000898 } else {
899 // If EltTy is a vector type, get the element type.
Dan Gohman44118f02009-06-16 00:20:26 +0000900 const Type *ValTy = EltTy->getScalarType();
901
Chris Lattnerd93afec2009-01-07 07:18:45 +0000902 // Construct an integer with the right value.
903 unsigned EltSize = TD->getTypeSizeInBits(ValTy);
904 APInt OneVal(EltSize, CI->getZExtValue());
905 APInt TotalVal(OneVal);
906 // Set each byte.
907 for (unsigned i = 0; 8*i < EltSize; ++i) {
908 TotalVal = TotalVal.shl(8);
909 TotalVal |= OneVal;
910 }
911
912 // Convert the integer value to the appropriate type.
Owen Andersoneed707b2009-07-24 23:12:02 +0000913 StoreVal = ConstantInt::get(Context, TotalVal);
Chris Lattnerd93afec2009-01-07 07:18:45 +0000914 if (isa<PointerType>(ValTy))
Owen Andersonbaf3c402009-07-29 18:55:55 +0000915 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy);
Chris Lattnerd93afec2009-01-07 07:18:45 +0000916 else if (ValTy->isFloatingPoint())
Owen Andersonbaf3c402009-07-29 18:55:55 +0000917 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy);
Chris Lattnerd93afec2009-01-07 07:18:45 +0000918 assert(StoreVal->getType() == ValTy && "Type mismatch!");
919
920 // If the requested value was a vector constant, create it.
921 if (EltTy != ValTy) {
922 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements();
923 SmallVector<Constant*, 16> Elts(NumElts, StoreVal);
Owen Andersonaf7ec972009-07-28 21:19:26 +0000924 StoreVal = ConstantVector::get(&Elts[0], NumElts);
Chris Lattnerd93afec2009-01-07 07:18:45 +0000925 }
926 }
927 new StoreInst(StoreVal, EltPtr, MI);
928 continue;
929 }
930 // Otherwise, if we're storing a byte variable, use a memset call for
931 // this element.
932 }
933
934 // Cast the element pointer to BytePtrTy.
935 if (EltPtr->getType() != BytePtrTy)
936 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI);
937
938 // Cast the other pointer (if we have one) to BytePtrTy.
939 if (OtherElt && OtherElt->getType() != BytePtrTy)
940 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(),
941 MI);
942
Duncan Sands777d2302009-05-09 07:06:46 +0000943 unsigned EltSize = TD->getTypeAllocSize(EltTy);
Chris Lattnerd93afec2009-01-07 07:18:45 +0000944
945 // Finally, insert the meminst for this element.
Chris Lattner3ce5e882009-03-08 03:37:16 +0000946 if (isa<MemTransferInst>(MI)) {
Chris Lattnerd93afec2009-01-07 07:18:45 +0000947 Value *Ops[] = {
948 SROADest ? EltPtr : OtherElt, // Dest ptr
949 SROADest ? OtherElt : EltPtr, // Src ptr
Owen Andersoneed707b2009-07-24 23:12:02 +0000950 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
Owen Anderson1d0be152009-08-13 21:58:54 +0000951 // Align
952 ConstantInt::get(Type::getInt32Ty(MI->getContext()), OtherEltAlign)
Chris Lattnerd93afec2009-01-07 07:18:45 +0000953 };
954 CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
955 } else {
956 assert(isa<MemSetInst>(MI));
957 Value *Ops[] = {
958 EltPtr, MI->getOperand(2), // Dest, Value,
Owen Andersoneed707b2009-07-24 23:12:02 +0000959 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
Chris Lattnerd93afec2009-01-07 07:18:45 +0000960 Zero // Align
961 };
962 CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
963 }
Chris Lattner372dda82007-03-05 07:52:57 +0000964 }
Bob Wilsonb742def2009-12-18 20:14:40 +0000965 DeadInsts.push_back(MI);
Chris Lattner372dda82007-03-05 07:52:57 +0000966}
Chris Lattnerd2fa7812009-01-07 08:11:13 +0000967
Bob Wilson39fdd692009-12-04 21:57:37 +0000968/// RewriteStoreUserOfWholeAlloca - We found a store of an integer that
Chris Lattnerd2fa7812009-01-07 08:11:13 +0000969/// overwrites the entire allocation. Extract out the pieces of the stored
970/// integer and store them individually.
Victor Hernandez7b929da2009-10-23 21:09:37 +0000971void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
Chris Lattnerd2fa7812009-01-07 08:11:13 +0000972 SmallVector<AllocaInst*, 32> &NewElts){
973 // Extract each element out of the integer according to its structure offset
974 // and store the element value to the individual alloca.
975 Value *SrcVal = SI->getOperand(0);
Bob Wilsonb742def2009-12-18 20:14:40 +0000976 const Type *AllocaEltTy = AI->getAllocatedType();
Duncan Sands777d2302009-05-09 07:06:46 +0000977 uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
Chris Lattnerd93afec2009-01-07 07:18:45 +0000978
Eli Friedman41b33f42009-06-01 09:14:32 +0000979 // Handle tail padding by extending the operand
980 if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
Owen Andersonfa5cbd62009-07-03 19:42:02 +0000981 SrcVal = new ZExtInst(SrcVal,
Owen Anderson1d0be152009-08-13 21:58:54 +0000982 IntegerType::get(SI->getContext(), AllocaSizeBits),
983 "", SI);
Chris Lattnerd2fa7812009-01-07 08:11:13 +0000984
Nick Lewycky59136252009-09-15 07:08:25 +0000985 DEBUG(errs() << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << '\n' << *SI
986 << '\n');
Chris Lattnerd2fa7812009-01-07 08:11:13 +0000987
988 // There are two forms here: AI could be an array or struct. Both cases
989 // have different ways to compute the element offset.
990 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
991 const StructLayout *Layout = TD->getStructLayout(EltSTy);
992
993 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
994 // Get the number of bits to shift SrcVal to get the value.
995 const Type *FieldTy = EltSTy->getElementType(i);
996 uint64_t Shift = Layout->getElementOffsetInBits(i);
997
998 if (TD->isBigEndian())
Duncan Sands777d2302009-05-09 07:06:46 +0000999 Shift = AllocaSizeBits-Shift-TD->getTypeAllocSizeInBits(FieldTy);
Chris Lattnerd2fa7812009-01-07 08:11:13 +00001000
1001 Value *EltVal = SrcVal;
1002 if (Shift) {
Owen Andersoneed707b2009-07-24 23:12:02 +00001003 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift);
Chris Lattnerd2fa7812009-01-07 08:11:13 +00001004 EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal,
1005 "sroa.store.elt", SI);
1006 }
1007
1008 // Truncate down to an integer of the right size.
1009 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
Chris Lattner583dd602009-01-09 18:18:43 +00001010
1011 // Ignore zero sized fields like {}, they obviously contain no data.
1012 if (FieldSizeBits == 0) continue;
1013
Chris Lattnerd2fa7812009-01-07 08:11:13 +00001014 if (FieldSizeBits != AllocaSizeBits)
Owen Andersonfa5cbd62009-07-03 19:42:02 +00001015 EltVal = new TruncInst(EltVal,
Owen Anderson1d0be152009-08-13 21:58:54 +00001016 IntegerType::get(SI->getContext(), FieldSizeBits),
1017 "", SI);
Chris Lattnerd2fa7812009-01-07 08:11:13 +00001018 Value *DestField = NewElts[i];
1019 if (EltVal->getType() == FieldTy) {
1020 // Storing to an integer field of this size, just do it.
1021 } else if (FieldTy->isFloatingPoint() || isa<VectorType>(FieldTy)) {
1022 // Bitcast to the right element type (for fp/vector values).
1023 EltVal = new BitCastInst(EltVal, FieldTy, "", SI);
1024 } else {
1025 // Otherwise, bitcast the dest pointer (for aggregates).
1026 DestField = new BitCastInst(DestField,
Owen Andersondebcb012009-07-29 22:17:13 +00001027 PointerType::getUnqual(EltVal->getType()),
Chris Lattnerd2fa7812009-01-07 08:11:13 +00001028 "", SI);
1029 }
1030 new StoreInst(EltVal, DestField, SI);
1031 }
1032
1033 } else {
1034 const ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
1035 const Type *ArrayEltTy = ATy->getElementType();
Duncan Sands777d2302009-05-09 07:06:46 +00001036 uint64_t ElementOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
Chris Lattnerd2fa7812009-01-07 08:11:13 +00001037 uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy);
1038
1039 uint64_t Shift;
1040
1041 if (TD->isBigEndian())
1042 Shift = AllocaSizeBits-ElementOffset;
1043 else
1044 Shift = 0;
1045
1046 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
Chris Lattner583dd602009-01-09 18:18:43 +00001047 // Ignore zero sized fields like {}, they obviously contain no data.
1048 if (ElementSizeBits == 0) continue;
Chris Lattnerd2fa7812009-01-07 08:11:13 +00001049
1050 Value *EltVal = SrcVal;
1051 if (Shift) {
Owen Andersoneed707b2009-07-24 23:12:02 +00001052 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift);
Chris Lattnerd2fa7812009-01-07 08:11:13 +00001053 EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal,
1054 "sroa.store.elt", SI);
1055 }
1056
1057 // Truncate down to an integer of the right size.
1058 if (ElementSizeBits != AllocaSizeBits)
Owen Andersonfa5cbd62009-07-03 19:42:02 +00001059 EltVal = new TruncInst(EltVal,
Owen Anderson1d0be152009-08-13 21:58:54 +00001060 IntegerType::get(SI->getContext(),
1061 ElementSizeBits),"",SI);
Chris Lattnerd2fa7812009-01-07 08:11:13 +00001062 Value *DestField = NewElts[i];
1063 if (EltVal->getType() == ArrayEltTy) {
1064 // Storing to an integer field of this size, just do it.
1065 } else if (ArrayEltTy->isFloatingPoint() || isa<VectorType>(ArrayEltTy)) {
1066 // Bitcast to the right element type (for fp/vector values).
1067 EltVal = new BitCastInst(EltVal, ArrayEltTy, "", SI);
1068 } else {
1069 // Otherwise, bitcast the dest pointer (for aggregates).
1070 DestField = new BitCastInst(DestField,
Owen Andersondebcb012009-07-29 22:17:13 +00001071 PointerType::getUnqual(EltVal->getType()),
Chris Lattnerd2fa7812009-01-07 08:11:13 +00001072 "", SI);
1073 }
1074 new StoreInst(EltVal, DestField, SI);
1075
1076 if (TD->isBigEndian())
1077 Shift -= ElementOffset;
1078 else
1079 Shift += ElementOffset;
1080 }
1081 }
1082
Bob Wilsonb742def2009-12-18 20:14:40 +00001083 DeadInsts.push_back(SI);
Chris Lattnerd2fa7812009-01-07 08:11:13 +00001084}
1085
Bob Wilson39fdd692009-12-04 21:57:37 +00001086/// RewriteLoadUserOfWholeAlloca - We found a load of the entire allocation to
Chris Lattner5ffe6ac2009-01-08 05:42:05 +00001087/// an integer. Load the individual pieces to form the aggregate value.
Victor Hernandez7b929da2009-10-23 21:09:37 +00001088void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
Chris Lattner5ffe6ac2009-01-08 05:42:05 +00001089 SmallVector<AllocaInst*, 32> &NewElts) {
1090 // Extract each element out of the NewElts according to its structure offset
1091 // and form the result value.
Bob Wilsonb742def2009-12-18 20:14:40 +00001092 const Type *AllocaEltTy = AI->getAllocatedType();
Duncan Sands777d2302009-05-09 07:06:46 +00001093 uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
Chris Lattner5ffe6ac2009-01-08 05:42:05 +00001094
Nick Lewycky59136252009-09-15 07:08:25 +00001095 DEBUG(errs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI
1096 << '\n');
Chris Lattner5ffe6ac2009-01-08 05:42:05 +00001097
1098 // There are two forms here: AI could be an array or struct. Both cases
1099 // have different ways to compute the element offset.
1100 const StructLayout *Layout = 0;
1101 uint64_t ArrayEltBitOffset = 0;
1102 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
1103 Layout = TD->getStructLayout(EltSTy);
1104 } else {
1105 const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
Duncan Sands777d2302009-05-09 07:06:46 +00001106 ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
Chris Lattner5ffe6ac2009-01-08 05:42:05 +00001107 }
Owen Andersone922c022009-07-22 00:24:57 +00001108
Owen Andersone922c022009-07-22 00:24:57 +00001109 Value *ResultVal =
Owen Anderson1d0be152009-08-13 21:58:54 +00001110 Constant::getNullValue(IntegerType::get(LI->getContext(), AllocaSizeBits));
Chris Lattner5ffe6ac2009-01-08 05:42:05 +00001111
1112 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
1113 // Load the value from the alloca. If the NewElt is an aggregate, cast
1114 // the pointer to an integer of the same size before doing the load.
1115 Value *SrcField = NewElts[i];
1116 const Type *FieldTy =
1117 cast<PointerType>(SrcField->getType())->getElementType();
Chris Lattner583dd602009-01-09 18:18:43 +00001118 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
1119
1120 // Ignore zero sized fields like {}, they obviously contain no data.
1121 if (FieldSizeBits == 0) continue;
1122
Owen Anderson1d0be152009-08-13 21:58:54 +00001123 const IntegerType *FieldIntTy = IntegerType::get(LI->getContext(),
1124 FieldSizeBits);
Chris Lattner5ffe6ac2009-01-08 05:42:05 +00001125 if (!isa<IntegerType>(FieldTy) && !FieldTy->isFloatingPoint() &&
1126 !isa<VectorType>(FieldTy))
Owen Andersonfa5cbd62009-07-03 19:42:02 +00001127 SrcField = new BitCastInst(SrcField,
Owen Andersondebcb012009-07-29 22:17:13 +00001128 PointerType::getUnqual(FieldIntTy),
Chris Lattner5ffe6ac2009-01-08 05:42:05 +00001129 "", LI);
1130 SrcField = new LoadInst(SrcField, "sroa.load.elt", LI);
1131
1132 // If SrcField is a fp or vector of the right size but that isn't an
1133 // integer type, bitcast to an integer so we can shift it.
1134 if (SrcField->getType() != FieldIntTy)
1135 SrcField = new BitCastInst(SrcField, FieldIntTy, "", LI);
1136
1137 // Zero extend the field to be the same size as the final alloca so that
1138 // we can shift and insert it.
1139 if (SrcField->getType() != ResultVal->getType())
1140 SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI);
1141
1142 // Determine the number of bits to shift SrcField.
1143 uint64_t Shift;
1144 if (Layout) // Struct case.
1145 Shift = Layout->getElementOffsetInBits(i);
1146 else // Array case.
1147 Shift = i*ArrayEltBitOffset;
1148
1149 if (TD->isBigEndian())
1150 Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth();
1151
1152 if (Shift) {
Owen Andersoneed707b2009-07-24 23:12:02 +00001153 Value *ShiftVal = ConstantInt::get(SrcField->getType(), Shift);
Chris Lattner5ffe6ac2009-01-08 05:42:05 +00001154 SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI);
1155 }
1156
1157 ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI);
1158 }
Eli Friedman41b33f42009-06-01 09:14:32 +00001159
1160 // Handle tail padding by truncating the result
1161 if (TD->getTypeSizeInBits(LI->getType()) != AllocaSizeBits)
1162 ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI);
1163
Chris Lattner5ffe6ac2009-01-08 05:42:05 +00001164 LI->replaceAllUsesWith(ResultVal);
Bob Wilsonb742def2009-12-18 20:14:40 +00001165 DeadInsts.push_back(LI);
Chris Lattner5ffe6ac2009-01-08 05:42:05 +00001166}
1167
Duncan Sands3cb36502007-11-04 14:43:57 +00001168/// HasPadding - Return true if the specified type has any structure or
1169/// alignment padding, false otherwise.
Duncan Sandsa0fcc082008-06-04 08:21:45 +00001170static bool HasPadding(const Type *Ty, const TargetData &TD) {
Chris Lattner39a1c042007-05-30 06:11:23 +00001171 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
1172 const StructLayout *SL = TD.getStructLayout(STy);
1173 unsigned PrevFieldBitOffset = 0;
1174 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Duncan Sands3cb36502007-11-04 14:43:57 +00001175 unsigned FieldBitOffset = SL->getElementOffsetInBits(i);
1176
Chris Lattner39a1c042007-05-30 06:11:23 +00001177 // Padding in sub-elements?
Duncan Sandsa0fcc082008-06-04 08:21:45 +00001178 if (HasPadding(STy->getElementType(i), TD))
Chris Lattner39a1c042007-05-30 06:11:23 +00001179 return true;
Duncan Sands3cb36502007-11-04 14:43:57 +00001180
Chris Lattner39a1c042007-05-30 06:11:23 +00001181 // Check to see if there is any padding between this element and the
1182 // previous one.
1183 if (i) {
Duncan Sands3cb36502007-11-04 14:43:57 +00001184 unsigned PrevFieldEnd =
Chris Lattner39a1c042007-05-30 06:11:23 +00001185 PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1));
1186 if (PrevFieldEnd < FieldBitOffset)
1187 return true;
1188 }
Duncan Sands3cb36502007-11-04 14:43:57 +00001189
Chris Lattner39a1c042007-05-30 06:11:23 +00001190 PrevFieldBitOffset = FieldBitOffset;
1191 }
Duncan Sands3cb36502007-11-04 14:43:57 +00001192
Chris Lattner39a1c042007-05-30 06:11:23 +00001193 // Check for tail padding.
1194 if (unsigned EltCount = STy->getNumElements()) {
1195 unsigned PrevFieldEnd = PrevFieldBitOffset +
1196 TD.getTypeSizeInBits(STy->getElementType(EltCount-1));
Duncan Sands3cb36502007-11-04 14:43:57 +00001197 if (PrevFieldEnd < SL->getSizeInBits())
Chris Lattner39a1c042007-05-30 06:11:23 +00001198 return true;
1199 }
1200
1201 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Duncan Sandsa0fcc082008-06-04 08:21:45 +00001202 return HasPadding(ATy->getElementType(), TD);
Duncan Sands3cb36502007-11-04 14:43:57 +00001203 } else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
Duncan Sandsa0fcc082008-06-04 08:21:45 +00001204 return HasPadding(VTy->getElementType(), TD);
Chris Lattner39a1c042007-05-30 06:11:23 +00001205 }
Duncan Sands777d2302009-05-09 07:06:46 +00001206 return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
Chris Lattner39a1c042007-05-30 06:11:23 +00001207}
Chris Lattner372dda82007-03-05 07:52:57 +00001208
Chris Lattnerf5990ed2004-11-14 04:24:28 +00001209/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
1210/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe,
1211/// or 1 if safe after canonicalization has been performed.
Victor Hernandez7b929da2009-10-23 21:09:37 +00001212int SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
Chris Lattner5e062a12003-05-30 04:15:41 +00001213 // Loop over the use list of the alloca. We can only transform it if all of
1214 // the users are safe to transform.
Chris Lattner39a1c042007-05-30 06:11:23 +00001215 AllocaInfo Info;
1216
Bob Wilsonb742def2009-12-18 20:14:40 +00001217 isSafeForScalarRepl(AI, AI, 0, 0, Info);
1218 if (Info.isUnsafe) {
1219 DEBUG(errs() << "Cannot transform: " << *AI << '\n');
1220 return 0;
Chris Lattnerf5990ed2004-11-14 04:24:28 +00001221 }
Chris Lattner39a1c042007-05-30 06:11:23 +00001222
1223 // Okay, we know all the users are promotable. If the aggregate is a memcpy
1224 // source and destination, we have to be careful. In particular, the memcpy
1225 // could be moving around elements that live in structure padding of the LLVM
1226 // types, but may actually be used. In these cases, we refuse to promote the
1227 // struct.
1228 if (Info.isMemCpySrc && Info.isMemCpyDst &&
Bob Wilsonb742def2009-12-18 20:14:40 +00001229 HasPadding(AI->getAllocatedType(), *TD))
Chris Lattner39a1c042007-05-30 06:11:23 +00001230 return 0;
Duncan Sands3cb36502007-11-04 14:43:57 +00001231
Chris Lattner39a1c042007-05-30 06:11:23 +00001232 // If we require cleanup, return 1, otherwise return 3.
Devang Patel4afc90d2009-02-10 07:00:59 +00001233 return Info.needsCleanup ? 1 : 3;
Chris Lattnerf5990ed2004-11-14 04:24:28 +00001234}
1235
Bob Wilson65ab34f2009-12-08 18:27:03 +00001236/// CleanupGEP - GEP is used by an Alloca, which can be promoted after the GEP
Devang Patel4afc90d2009-02-10 07:00:59 +00001237/// is canonicalized here.
1238void SROA::CleanupGEP(GetElementPtrInst *GEPI) {
1239 gep_type_iterator I = gep_type_begin(GEPI);
1240 ++I;
1241
Devang Patel7afe8fa2009-02-10 19:28:07 +00001242 const ArrayType *AT = dyn_cast<ArrayType>(*I);
1243 if (!AT)
1244 return;
1245
1246 uint64_t NumElements = AT->getNumElements();
1247
1248 if (isa<ConstantInt>(I.getOperand()))
1249 return;
1250
1251 if (NumElements == 1) {
Owen Anderson1d0be152009-08-13 21:58:54 +00001252 GEPI->setOperand(2,
1253 Constant::getNullValue(Type::getInt32Ty(GEPI->getContext())));
Devang Patel7afe8fa2009-02-10 19:28:07 +00001254 return;
1255 }
Devang Patel4afc90d2009-02-10 07:00:59 +00001256
Devang Patel7afe8fa2009-02-10 19:28:07 +00001257 assert(NumElements == 2 && "Unhandled case!");
1258 // All users of the GEP must be loads. At each use of the GEP, insert
1259 // two loads of the appropriate indexed GEP and select between them.
Owen Anderson333c4002009-07-09 23:48:35 +00001260 Value *IsOne = new ICmpInst(GEPI, ICmpInst::ICMP_NE, I.getOperand(),
Owen Andersona7235ea2009-07-31 20:28:14 +00001261 Constant::getNullValue(I.getOperand()->getType()),
Owen Anderson333c4002009-07-09 23:48:35 +00001262 "isone");
Devang Patel7afe8fa2009-02-10 19:28:07 +00001263 // Insert the new GEP instructions, which are properly indexed.
1264 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end());
Owen Anderson1d0be152009-08-13 21:58:54 +00001265 Indices[1] = Constant::getNullValue(Type::getInt32Ty(GEPI->getContext()));
Bob Wilsonb742def2009-12-18 20:14:40 +00001266 Value *ZeroIdx = GetElementPtrInst::CreateInBounds(GEPI->getOperand(0),
1267 Indices.begin(),
1268 Indices.end(),
1269 GEPI->getName()+".0",GEPI);
Owen Anderson1d0be152009-08-13 21:58:54 +00001270 Indices[1] = ConstantInt::get(Type::getInt32Ty(GEPI->getContext()), 1);
Bob Wilsonb742def2009-12-18 20:14:40 +00001271 Value *OneIdx = GetElementPtrInst::CreateInBounds(GEPI->getOperand(0),
1272 Indices.begin(),
1273 Indices.end(),
1274 GEPI->getName()+".1", GEPI);
Devang Patel7afe8fa2009-02-10 19:28:07 +00001275 // Replace all loads of the variable index GEP with loads from both
1276 // indexes and a select.
1277 while (!GEPI->use_empty()) {
1278 LoadInst *LI = cast<LoadInst>(GEPI->use_back());
1279 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI);
1280 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI);
1281 Value *R = SelectInst::Create(IsOne, One, Zero, LI->getName(), LI);
1282 LI->replaceAllUsesWith(R);
1283 LI->eraseFromParent();
Devang Patel4afc90d2009-02-10 07:00:59 +00001284 }
1285}
1286
1287/// CleanupAllocaUsers - If SROA reported that it can promote the specified
Chris Lattnerf5990ed2004-11-14 04:24:28 +00001288/// allocation, but only if cleaned up, perform the cleanups required.
Bob Wilsonb742def2009-12-18 20:14:40 +00001289void SROA::CleanupAllocaUsers(Value *V) {
Chris Lattnerd878ecd2004-11-14 05:00:19 +00001290 // At this point, we know that the end result will be SROA'd and promoted, so
1291 // we can insert ugly code if required so long as sroa+mem2reg will clean it
1292 // up.
Bob Wilsonb742def2009-12-18 20:14:40 +00001293 for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
Chris Lattnerd878ecd2004-11-14 05:00:19 +00001294 UI != E; ) {
Devang Patel4afc90d2009-02-10 07:00:59 +00001295 User *U = *UI++;
Bob Wilsonb742def2009-12-18 20:14:40 +00001296 if (isa<BitCastInst>(U)) {
1297 CleanupAllocaUsers(U);
1298 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
Devang Patel4afc90d2009-02-10 07:00:59 +00001299 CleanupGEP(GEPI);
Bob Wilsonb742def2009-12-18 20:14:40 +00001300 CleanupAllocaUsers(GEPI);
1301 if (GEPI->use_empty()) GEPI->eraseFromParent();
1302 } else {
Jay Foad0906b1b2009-06-06 17:49:35 +00001303 Instruction *I = cast<Instruction>(U);
Devang Patel4afc90d2009-02-10 07:00:59 +00001304 SmallVector<DbgInfoIntrinsic *, 2> DbgInUses;
Zhou Shengb0c41992009-03-18 12:48:48 +00001305 if (!isa<StoreInst>(I) && OnlyUsedByDbgInfoIntrinsics(I, &DbgInUses)) {
Devang Patel4afc90d2009-02-10 07:00:59 +00001306 // Safe to remove debug info uses.
1307 while (!DbgInUses.empty()) {
1308 DbgInfoIntrinsic *DI = DbgInUses.back(); DbgInUses.pop_back();
1309 DI->eraseFromParent();
Chris Lattnerd878ecd2004-11-14 05:00:19 +00001310 }
Devang Patel4afc90d2009-02-10 07:00:59 +00001311 I->eraseFromParent();
Chris Lattnerd878ecd2004-11-14 05:00:19 +00001312 }
1313 }
1314 }
Chris Lattner5e062a12003-05-30 04:15:41 +00001315}
Chris Lattnera1888942005-12-12 07:19:13 +00001316
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001317/// MergeInType - Add the 'In' type to the accumulated type (Accum) so far at
1318/// the offset specified by Offset (which is specified in bytes).
Chris Lattnerde6df882006-04-14 21:42:41 +00001319///
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001320/// There are two cases we handle here:
1321/// 1) A union of vector types of the same size and potentially its elements.
Chris Lattnerd22dbdf2006-12-15 07:32:38 +00001322/// Here we turn element accesses into insert/extract element operations.
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001323/// This promotes a <4 x float> with a store of float to the third element
1324/// into a <4 x float> that uses insert element.
1325/// 2) A fully general blob of memory, which we turn into some (potentially
1326/// large) integer type with extract and insert operations where the loads
1327/// and stores would mutate the memory.
Chris Lattner7809ecd2009-02-03 01:30:09 +00001328static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy,
Owen Andersonfa5cbd62009-07-03 19:42:02 +00001329 unsigned AllocaSize, const TargetData &TD,
Owen Andersone922c022009-07-22 00:24:57 +00001330 LLVMContext &Context) {
Chris Lattner7809ecd2009-02-03 01:30:09 +00001331 // If this could be contributing to a vector, analyze it.
Owen Anderson1d0be152009-08-13 21:58:54 +00001332 if (VecTy != Type::getVoidTy(Context)) { // either null or a vector type.
Chris Lattner996d7a92009-02-02 18:02:59 +00001333
Chris Lattner7809ecd2009-02-03 01:30:09 +00001334 // If the In type is a vector that is the same size as the alloca, see if it
1335 // matches the existing VecTy.
1336 if (const VectorType *VInTy = dyn_cast<VectorType>(In)) {
1337 if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) {
1338 // If we're storing/loading a vector of the right size, allow it as a
1339 // vector. If this the first vector we see, remember the type so that
1340 // we know the element size.
1341 if (VecTy == 0)
1342 VecTy = VInTy;
1343 return;
1344 }
Chris Lattnercf0fe8d2009-10-05 05:54:46 +00001345 } else if (In->isFloatTy() || In->isDoubleTy() ||
Chris Lattner7809ecd2009-02-03 01:30:09 +00001346 (isa<IntegerType>(In) && In->getPrimitiveSizeInBits() >= 8 &&
1347 isPowerOf2_32(In->getPrimitiveSizeInBits()))) {
1348 // If we're accessing something that could be an element of a vector, see
1349 // if the implied vector agrees with what we already have and if Offset is
1350 // compatible with it.
1351 unsigned EltSize = In->getPrimitiveSizeInBits()/8;
1352 if (Offset % EltSize == 0 &&
1353 AllocaSize % EltSize == 0 &&
1354 (VecTy == 0 ||
1355 cast<VectorType>(VecTy)->getElementType()
1356 ->getPrimitiveSizeInBits()/8 == EltSize)) {
1357 if (VecTy == 0)
Owen Andersondebcb012009-07-29 22:17:13 +00001358 VecTy = VectorType::get(In, AllocaSize/EltSize);
Chris Lattner7809ecd2009-02-03 01:30:09 +00001359 return;
1360 }
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001361 }
1362 }
1363
Chris Lattner7809ecd2009-02-03 01:30:09 +00001364 // Otherwise, we have a case that we can't handle with an optimized vector
1365 // form. We can still turn this into a large integer.
Owen Anderson1d0be152009-08-13 21:58:54 +00001366 VecTy = Type::getVoidTy(Context);
Chris Lattnera1888942005-12-12 07:19:13 +00001367}
1368
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001369/// CanConvertToScalar - V is a pointer. If we can convert the pointee and all
Bob Wilsonefc58e72009-12-09 18:05:27 +00001370/// its accesses to a single vector type, return true and set VecTy to
Chris Lattner7809ecd2009-02-03 01:30:09 +00001371/// the new type. If we could convert the alloca into a single promotable
1372/// integer, return true but set VecTy to VoidTy. Further, if the use is not a
1373/// completely trivial use that mem2reg could promote, set IsNotTrivial. Offset
1374/// is the current offset from the base of the alloca being analyzed.
Chris Lattnera1888942005-12-12 07:19:13 +00001375///
Chris Lattner1a3257b2009-02-03 18:15:05 +00001376/// If we see at least one access to the value that is as a vector type, set the
1377/// SawVec flag.
Chris Lattner1a3257b2009-02-03 18:15:05 +00001378bool SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy,
1379 bool &SawVec, uint64_t Offset,
Chris Lattner7809ecd2009-02-03 01:30:09 +00001380 unsigned AllocaSize) {
Chris Lattnera1888942005-12-12 07:19:13 +00001381 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
1382 Instruction *User = cast<Instruction>(*UI);
1383
1384 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001385 // Don't break volatile loads.
Chris Lattner6e733d32009-01-28 20:16:43 +00001386 if (LI->isVolatile())
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001387 return false;
Owen Andersone922c022009-07-22 00:24:57 +00001388 MergeInType(LI->getType(), Offset, VecTy,
1389 AllocaSize, *TD, V->getContext());
Chris Lattner1a3257b2009-02-03 18:15:05 +00001390 SawVec |= isa<VectorType>(LI->getType());
Chris Lattnercf321862009-01-07 06:39:58 +00001391 continue;
1392 }
1393
1394 if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
Reid Spencer24d6da52007-01-21 00:29:26 +00001395 // Storing the pointer, not into the value?
Chris Lattner6e733d32009-01-28 20:16:43 +00001396 if (SI->getOperand(0) == V || SI->isVolatile()) return 0;
Owen Andersonfa5cbd62009-07-03 19:42:02 +00001397 MergeInType(SI->getOperand(0)->getType(), Offset,
Owen Andersone922c022009-07-22 00:24:57 +00001398 VecTy, AllocaSize, *TD, V->getContext());
Chris Lattner1a3257b2009-02-03 18:15:05 +00001399 SawVec |= isa<VectorType>(SI->getOperand(0)->getType());
Chris Lattnercf321862009-01-07 06:39:58 +00001400 continue;
1401 }
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001402
1403 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
Chris Lattner1a3257b2009-02-03 18:15:05 +00001404 if (!CanConvertToScalar(BCI, IsNotTrivial, VecTy, SawVec, Offset,
1405 AllocaSize))
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001406 return false;
Chris Lattnera1888942005-12-12 07:19:13 +00001407 IsNotTrivial = true;
Chris Lattnercf321862009-01-07 06:39:58 +00001408 continue;
1409 }
1410
1411 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001412 // If this is a GEP with a variable indices, we can't handle it.
1413 if (!GEP->hasAllConstantIndices())
1414 return false;
Chris Lattnercf321862009-01-07 06:39:58 +00001415
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001416 // Compute the offset that this GEP adds to the pointer.
1417 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
Bob Wilsonb742def2009-12-18 20:14:40 +00001418 uint64_t GEPOffset = TD->getIndexedOffset(GEP->getPointerOperandType(),
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001419 &Indices[0], Indices.size());
1420 // See if all uses can be converted.
Chris Lattner1a3257b2009-02-03 18:15:05 +00001421 if (!CanConvertToScalar(GEP, IsNotTrivial, VecTy, SawVec,Offset+GEPOffset,
Chris Lattner7809ecd2009-02-03 01:30:09 +00001422 AllocaSize))
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001423 return false;
1424 IsNotTrivial = true;
1425 continue;
Chris Lattnera1888942005-12-12 07:19:13 +00001426 }
Chris Lattner3ce5e882009-03-08 03:37:16 +00001427
Chris Lattner3d730f72009-02-03 02:01:43 +00001428 // If this is a constant sized memset of a constant value (e.g. 0) we can
1429 // handle it.
Chris Lattner3ce5e882009-03-08 03:37:16 +00001430 if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
1431 // Store of constant value and constant size.
1432 if (isa<ConstantInt>(MSI->getValue()) &&
1433 isa<ConstantInt>(MSI->getLength())) {
Chris Lattner3ce5e882009-03-08 03:37:16 +00001434 IsNotTrivial = true;
1435 continue;
1436 }
Chris Lattner3d730f72009-02-03 02:01:43 +00001437 }
Chris Lattnerc5704872009-03-08 04:04:21 +00001438
1439 // If this is a memcpy or memmove into or out of the whole allocation, we
1440 // can handle it like a load or store of the scalar type.
1441 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
1442 if (ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength()))
1443 if (Len->getZExtValue() == AllocaSize && Offset == 0) {
1444 IsNotTrivial = true;
1445 continue;
1446 }
1447 }
Chris Lattnerdfe964c2009-03-08 03:59:00 +00001448
Devang Patel00e389c2009-03-06 07:03:54 +00001449 // Ignore dbg intrinsic.
1450 if (isa<DbgInfoIntrinsic>(User))
1451 continue;
1452
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001453 // Otherwise, we cannot handle this!
1454 return false;
Chris Lattnera1888942005-12-12 07:19:13 +00001455 }
1456
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001457 return true;
Chris Lattnera1888942005-12-12 07:19:13 +00001458}
1459
Chris Lattnera1888942005-12-12 07:19:13 +00001460/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
Chris Lattnerde6df882006-04-14 21:42:41 +00001461/// directly. This happens when we are converting an "integer union" to a
1462/// single integer scalar, or when we are converting a "vector union" to a
1463/// vector with insert/extractelement instructions.
1464///
1465/// Offset is an offset from the original alloca, in bits that need to be
1466/// shifted to the right. By the end of this, there should be no uses of Ptr.
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001467void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) {
Chris Lattnera1888942005-12-12 07:19:13 +00001468 while (!Ptr->use_empty()) {
1469 Instruction *User = cast<Instruction>(Ptr->use_back());
Duncan Sands4b3dfbd2009-02-02 10:06:20 +00001470
Chris Lattnercf321862009-01-07 06:39:58 +00001471 if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
Chris Lattnerb10e0da2008-01-30 00:39:15 +00001472 ConvertUsesToScalar(CI, NewAI, Offset);
Chris Lattnera1888942005-12-12 07:19:13 +00001473 CI->eraseFromParent();
Chris Lattnercf321862009-01-07 06:39:58 +00001474 continue;
1475 }
Duncan Sands4b3dfbd2009-02-02 10:06:20 +00001476
Chris Lattnercf321862009-01-07 06:39:58 +00001477 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001478 // Compute the offset that this GEP adds to the pointer.
1479 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
Bob Wilsonb742def2009-12-18 20:14:40 +00001480 uint64_t GEPOffset = TD->getIndexedOffset(GEP->getPointerOperandType(),
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001481 &Indices[0], Indices.size());
1482 ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8);
Chris Lattnera1888942005-12-12 07:19:13 +00001483 GEP->eraseFromParent();
Chris Lattnercf321862009-01-07 06:39:58 +00001484 continue;
Chris Lattnera1888942005-12-12 07:19:13 +00001485 }
Chris Lattner3d730f72009-02-03 02:01:43 +00001486
Chris Lattner9bc67da2009-02-03 19:45:44 +00001487 IRBuilder<> Builder(User->getParent(), User);
1488
1489 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
Chris Lattner6e011152009-02-03 21:01:03 +00001490 // The load is a bit extract from NewAI shifted right by Offset bits.
1491 Value *LoadedVal = Builder.CreateLoad(NewAI, "tmp");
1492 Value *NewLoadVal
1493 = ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset, Builder);
1494 LI->replaceAllUsesWith(NewLoadVal);
Chris Lattner9bc67da2009-02-03 19:45:44 +00001495 LI->eraseFromParent();
1496 continue;
1497 }
1498
1499 if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
1500 assert(SI->getOperand(0) != Ptr && "Consistency error!");
Benjamin Kramere6f32942009-11-29 21:17:48 +00001501 // FIXME: Remove once builder has Twine API.
Bob Wilsond614a1f2009-12-04 21:51:35 +00001502 Value *Old = Builder.CreateLoad(NewAI,
1503 (NewAI->getName()+".in").str().c_str());
Chris Lattner9bc67da2009-02-03 19:45:44 +00001504 Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset,
1505 Builder);
1506 Builder.CreateStore(New, NewAI);
1507 SI->eraseFromParent();
1508 continue;
1509 }
1510
Chris Lattner3d730f72009-02-03 02:01:43 +00001511 // If this is a constant sized memset of a constant value (e.g. 0) we can
1512 // transform it into a store of the expanded constant value.
1513 if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
1514 assert(MSI->getRawDest() == Ptr && "Consistency error!");
1515 unsigned NumBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
Chris Lattner33e24ad2009-04-21 16:52:12 +00001516 if (NumBytes != 0) {
1517 unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue();
1518
1519 // Compute the value replicated the right number of times.
1520 APInt APVal(NumBytes*8, Val);
Chris Lattner3d730f72009-02-03 02:01:43 +00001521
Chris Lattner33e24ad2009-04-21 16:52:12 +00001522 // Splat the value if non-zero.
1523 if (Val)
1524 for (unsigned i = 1; i != NumBytes; ++i)
1525 APVal |= APVal << 8;
Benjamin Kramere6f32942009-11-29 21:17:48 +00001526
1527 // FIXME: Remove once builder has Twine API.
Bob Wilsond614a1f2009-12-04 21:51:35 +00001528 Value *Old = Builder.CreateLoad(NewAI,
1529 (NewAI->getName()+".in").str().c_str());
Owen Andersone922c022009-07-22 00:24:57 +00001530 Value *New = ConvertScalar_InsertValue(
Owen Andersoneed707b2009-07-24 23:12:02 +00001531 ConstantInt::get(User->getContext(), APVal),
Owen Andersonfa5cbd62009-07-03 19:42:02 +00001532 Old, Offset, Builder);
Chris Lattner33e24ad2009-04-21 16:52:12 +00001533 Builder.CreateStore(New, NewAI);
1534 }
Chris Lattner3d730f72009-02-03 02:01:43 +00001535 MSI->eraseFromParent();
1536 continue;
1537 }
Chris Lattnerc5704872009-03-08 04:04:21 +00001538
1539 // If this is a memcpy or memmove into or out of the whole allocation, we
1540 // can handle it like a load or store of the scalar type.
1541 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
1542 assert(Offset == 0 && "must be store to start of alloca");
1543
1544 // If the source and destination are both to the same alloca, then this is
1545 // a noop copy-to-self, just delete it. Otherwise, emit a load and store
1546 // as appropriate.
1547 AllocaInst *OrigAI = cast<AllocaInst>(Ptr->getUnderlyingObject());
1548
1549 if (MTI->getSource()->getUnderlyingObject() != OrigAI) {
1550 // Dest must be OrigAI, change this to be a load from the original
1551 // pointer (bitcasted), then a store to our new alloca.
1552 assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
1553 Value *SrcPtr = MTI->getSource();
1554 SrcPtr = Builder.CreateBitCast(SrcPtr, NewAI->getType());
1555
1556 LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
1557 SrcVal->setAlignment(MTI->getAlignment());
1558 Builder.CreateStore(SrcVal, NewAI);
1559 } else if (MTI->getDest()->getUnderlyingObject() != OrigAI) {
1560 // Src must be OrigAI, change this to be a load from NewAI then a store
1561 // through the original dest pointer (bitcasted).
1562 assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");
1563 LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval");
1564
1565 Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), NewAI->getType());
1566 StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr);
1567 NewStore->setAlignment(MTI->getAlignment());
1568 } else {
1569 // Noop transfer. Src == Dst
1570 }
1571
1572
1573 MTI->eraseFromParent();
1574 continue;
1575 }
Chris Lattnerdfe964c2009-03-08 03:59:00 +00001576
Devang Patel00e389c2009-03-06 07:03:54 +00001577 // If user is a dbg info intrinsic then it is safe to remove it.
1578 if (isa<DbgInfoIntrinsic>(User)) {
1579 User->eraseFromParent();
1580 continue;
1581 }
1582
Torok Edwinc23197a2009-07-14 16:55:14 +00001583 llvm_unreachable("Unsupported operation!");
Chris Lattnera1888942005-12-12 07:19:13 +00001584 }
1585}
Chris Lattner79b3bd32007-04-25 06:40:51 +00001586
Chris Lattner6e011152009-02-03 21:01:03 +00001587/// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer
1588/// or vector value FromVal, extracting the bits from the offset specified by
1589/// Offset. This returns the value, which is of type ToType.
1590///
1591/// This happens when we are converting an "integer union" to a single
Duncan Sands4b3dfbd2009-02-02 10:06:20 +00001592/// integer scalar, or when we are converting a "vector union" to a vector with
1593/// insert/extractelement instructions.
Chris Lattner800de312008-02-29 07:03:13 +00001594///
Duncan Sands4b3dfbd2009-02-02 10:06:20 +00001595/// Offset is an offset from the original alloca, in bits that need to be
Chris Lattner6e011152009-02-03 21:01:03 +00001596/// shifted to the right.
1597Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
1598 uint64_t Offset, IRBuilder<> &Builder) {
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001599 // If the load is of the whole new alloca, no conversion is needed.
Chris Lattner6e011152009-02-03 21:01:03 +00001600 if (FromVal->getType() == ToType && Offset == 0)
1601 return FromVal;
Chris Lattner9d34c4d2008-02-29 07:12:06 +00001602
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001603 // If the result alloca is a vector type, this is either an element
1604 // access or a bitcast to another vector type of the same size.
Chris Lattner6e011152009-02-03 21:01:03 +00001605 if (const VectorType *VTy = dyn_cast<VectorType>(FromVal->getType())) {
1606 if (isa<VectorType>(ToType))
1607 return Builder.CreateBitCast(FromVal, ToType, "tmp");
Chris Lattner9d34c4d2008-02-29 07:12:06 +00001608
1609 // Otherwise it must be an element access.
Chris Lattner9d34c4d2008-02-29 07:12:06 +00001610 unsigned Elt = 0;
1611 if (Offset) {
Duncan Sands777d2302009-05-09 07:06:46 +00001612 unsigned EltSize = TD->getTypeAllocSizeInBits(VTy->getElementType());
Chris Lattner9d34c4d2008-02-29 07:12:06 +00001613 Elt = Offset/EltSize;
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001614 assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
Chris Lattner800de312008-02-29 07:03:13 +00001615 }
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001616 // Return the element extracted out of it.
Owen Anderson1d0be152009-08-13 21:58:54 +00001617 Value *V = Builder.CreateExtractElement(FromVal, ConstantInt::get(
1618 Type::getInt32Ty(FromVal->getContext()), Elt), "tmp");
Chris Lattner6e011152009-02-03 21:01:03 +00001619 if (V->getType() != ToType)
1620 V = Builder.CreateBitCast(V, ToType, "tmp");
Chris Lattner7809ecd2009-02-03 01:30:09 +00001621 return V;
Chris Lattner9d34c4d2008-02-29 07:12:06 +00001622 }
Chris Lattner1aa70562009-02-03 21:08:45 +00001623
1624 // If ToType is a first class aggregate, extract out each of the pieces and
1625 // use insertvalue's to form the FCA.
1626 if (const StructType *ST = dyn_cast<StructType>(ToType)) {
1627 const StructLayout &Layout = *TD->getStructLayout(ST);
Owen Anderson9e9a0d52009-07-30 23:03:37 +00001628 Value *Res = UndefValue::get(ST);
Chris Lattner1aa70562009-02-03 21:08:45 +00001629 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
1630 Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i),
Chris Lattnere991ced2009-02-06 04:34:07 +00001631 Offset+Layout.getElementOffsetInBits(i),
Chris Lattner1aa70562009-02-03 21:08:45 +00001632 Builder);
1633 Res = Builder.CreateInsertValue(Res, Elt, i, "tmp");
1634 }
1635 return Res;
1636 }
1637
1638 if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
Duncan Sands777d2302009-05-09 07:06:46 +00001639 uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType());
Owen Anderson9e9a0d52009-07-30 23:03:37 +00001640 Value *Res = UndefValue::get(AT);
Chris Lattner1aa70562009-02-03 21:08:45 +00001641 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
1642 Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),
1643 Offset+i*EltSize, Builder);
1644 Res = Builder.CreateInsertValue(Res, Elt, i, "tmp");
1645 }
1646 return Res;
1647 }
Duncan Sands4b3dfbd2009-02-02 10:06:20 +00001648
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001649 // Otherwise, this must be a union that was converted to an integer value.
Chris Lattner6e011152009-02-03 21:01:03 +00001650 const IntegerType *NTy = cast<IntegerType>(FromVal->getType());
Duncan Sands4b3dfbd2009-02-02 10:06:20 +00001651
Chris Lattner9d34c4d2008-02-29 07:12:06 +00001652 // If this is a big-endian system and the load is narrower than the
1653 // full alloca type, we need to do a shift to get the right bits.
1654 int ShAmt = 0;
Chris Lattner56c38522009-01-07 06:34:28 +00001655 if (TD->isBigEndian()) {
Chris Lattner9d34c4d2008-02-29 07:12:06 +00001656 // On big-endian machines, the lowest bit is stored at the bit offset
1657 // from the pointer given by getTypeStoreSizeInBits. This matters for
1658 // integers with a bitwidth that is not a multiple of 8.
Chris Lattner56c38522009-01-07 06:34:28 +00001659 ShAmt = TD->getTypeStoreSizeInBits(NTy) -
Chris Lattner6e011152009-02-03 21:01:03 +00001660 TD->getTypeStoreSizeInBits(ToType) - Offset;
Chris Lattner9d34c4d2008-02-29 07:12:06 +00001661 } else {
1662 ShAmt = Offset;
1663 }
Duncan Sands4b3dfbd2009-02-02 10:06:20 +00001664
Chris Lattner9d34c4d2008-02-29 07:12:06 +00001665 // Note: we support negative bitwidths (with shl) which are not defined.
1666 // We do this to support (f.e.) loads off the end of a structure where
1667 // only some bits are used.
1668 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth())
Owen Andersonfa5cbd62009-07-03 19:42:02 +00001669 FromVal = Builder.CreateLShr(FromVal,
Owen Andersoneed707b2009-07-24 23:12:02 +00001670 ConstantInt::get(FromVal->getType(),
Chris Lattner1aa70562009-02-03 21:08:45 +00001671 ShAmt), "tmp");
Chris Lattner9d34c4d2008-02-29 07:12:06 +00001672 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth())
Owen Andersonfa5cbd62009-07-03 19:42:02 +00001673 FromVal = Builder.CreateShl(FromVal,
Owen Andersoneed707b2009-07-24 23:12:02 +00001674 ConstantInt::get(FromVal->getType(),
Chris Lattner1aa70562009-02-03 21:08:45 +00001675 -ShAmt), "tmp");
Duncan Sands4b3dfbd2009-02-02 10:06:20 +00001676
Chris Lattner9d34c4d2008-02-29 07:12:06 +00001677 // Finally, unconditionally truncate the integer to the right width.
Chris Lattner6e011152009-02-03 21:01:03 +00001678 unsigned LIBitWidth = TD->getTypeSizeInBits(ToType);
Chris Lattner9d34c4d2008-02-29 07:12:06 +00001679 if (LIBitWidth < NTy->getBitWidth())
Owen Andersonfa5cbd62009-07-03 19:42:02 +00001680 FromVal =
Owen Anderson1d0be152009-08-13 21:58:54 +00001681 Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(),
1682 LIBitWidth), "tmp");
Chris Lattner55a683d2009-02-03 07:08:57 +00001683 else if (LIBitWidth > NTy->getBitWidth())
Owen Andersonfa5cbd62009-07-03 19:42:02 +00001684 FromVal =
Owen Anderson1d0be152009-08-13 21:58:54 +00001685 Builder.CreateZExt(FromVal, IntegerType::get(FromVal->getContext(),
1686 LIBitWidth), "tmp");
Duncan Sands4b3dfbd2009-02-02 10:06:20 +00001687
Chris Lattner9d34c4d2008-02-29 07:12:06 +00001688 // If the result is an integer, this is a trunc or bitcast.
Chris Lattner6e011152009-02-03 21:01:03 +00001689 if (isa<IntegerType>(ToType)) {
Chris Lattner9d34c4d2008-02-29 07:12:06 +00001690 // Should be done.
Chris Lattner6e011152009-02-03 21:01:03 +00001691 } else if (ToType->isFloatingPoint() || isa<VectorType>(ToType)) {
Chris Lattner9d34c4d2008-02-29 07:12:06 +00001692 // Just do a bitcast, we know the sizes match up.
Chris Lattner6e011152009-02-03 21:01:03 +00001693 FromVal = Builder.CreateBitCast(FromVal, ToType, "tmp");
Chris Lattner800de312008-02-29 07:03:13 +00001694 } else {
Chris Lattner9d34c4d2008-02-29 07:12:06 +00001695 // Otherwise must be a pointer.
Chris Lattner6e011152009-02-03 21:01:03 +00001696 FromVal = Builder.CreateIntToPtr(FromVal, ToType, "tmp");
Chris Lattner800de312008-02-29 07:03:13 +00001697 }
Chris Lattner6e011152009-02-03 21:01:03 +00001698 assert(FromVal->getType() == ToType && "Didn't convert right?");
1699 return FromVal;
Chris Lattner800de312008-02-29 07:03:13 +00001700}
1701
Chris Lattner9b872db2009-02-03 19:30:11 +00001702/// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer
1703/// or vector value "Old" at the offset specified by Offset.
1704///
1705/// This happens when we are converting an "integer union" to a
Chris Lattner800de312008-02-29 07:03:13 +00001706/// single integer scalar, or when we are converting a "vector union" to a
1707/// vector with insert/extractelement instructions.
1708///
1709/// Offset is an offset from the original alloca, in bits that need to be
Chris Lattner9b872db2009-02-03 19:30:11 +00001710/// shifted to the right.
1711Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old,
Chris Lattner65a65022009-02-03 19:41:50 +00001712 uint64_t Offset, IRBuilder<> &Builder) {
Duncan Sands4b3dfbd2009-02-02 10:06:20 +00001713
Chris Lattner800de312008-02-29 07:03:13 +00001714 // Convert the stored type to the actual type, shift it left to insert
1715 // then 'or' into place.
Chris Lattner9b872db2009-02-03 19:30:11 +00001716 const Type *AllocaType = Old->getType();
Owen Andersone922c022009-07-22 00:24:57 +00001717 LLVMContext &Context = Old->getContext();
Duncan Sands4b3dfbd2009-02-02 10:06:20 +00001718
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001719 if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
Duncan Sands777d2302009-05-09 07:06:46 +00001720 uint64_t VecSize = TD->getTypeAllocSizeInBits(VTy);
1721 uint64_t ValSize = TD->getTypeAllocSizeInBits(SV->getType());
Chris Lattner29e64172009-03-08 04:17:04 +00001722
1723 // Changing the whole vector with memset or with an access of a different
1724 // vector type?
1725 if (ValSize == VecSize)
1726 return Builder.CreateBitCast(SV, AllocaType, "tmp");
1727
Duncan Sands777d2302009-05-09 07:06:46 +00001728 uint64_t EltSize = TD->getTypeAllocSizeInBits(VTy->getElementType());
Chris Lattner29e64172009-03-08 04:17:04 +00001729
1730 // Must be an element insertion.
1731 unsigned Elt = Offset/EltSize;
1732
1733 if (SV->getType() != VTy->getElementType())
1734 SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp");
1735
1736 SV = Builder.CreateInsertElement(Old, SV,
Owen Anderson1d0be152009-08-13 21:58:54 +00001737 ConstantInt::get(Type::getInt32Ty(SV->getContext()), Elt),
Chris Lattner29e64172009-03-08 04:17:04 +00001738 "tmp");
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001739 return SV;
1740 }
Chris Lattner9b872db2009-02-03 19:30:11 +00001741
1742 // If SV is a first-class aggregate value, insert each value recursively.
1743 if (const StructType *ST = dyn_cast<StructType>(SV->getType())) {
1744 const StructLayout &Layout = *TD->getStructLayout(ST);
1745 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
Chris Lattner65a65022009-02-03 19:41:50 +00001746 Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
Chris Lattner9b872db2009-02-03 19:30:11 +00001747 Old = ConvertScalar_InsertValue(Elt, Old,
Chris Lattnere991ced2009-02-06 04:34:07 +00001748 Offset+Layout.getElementOffsetInBits(i),
Chris Lattner65a65022009-02-03 19:41:50 +00001749 Builder);
Chris Lattner9b872db2009-02-03 19:30:11 +00001750 }
1751 return Old;
1752 }
1753
1754 if (const ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
Duncan Sands777d2302009-05-09 07:06:46 +00001755 uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType());
Chris Lattner9b872db2009-02-03 19:30:11 +00001756 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
Chris Lattner65a65022009-02-03 19:41:50 +00001757 Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
1758 Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, Builder);
Chris Lattner9b872db2009-02-03 19:30:11 +00001759 }
1760 return Old;
1761 }
Duncan Sands4b3dfbd2009-02-02 10:06:20 +00001762
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001763 // If SV is a float, convert it to the appropriate integer type.
Chris Lattner9b872db2009-02-03 19:30:11 +00001764 // If it is a pointer, do the same.
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001765 unsigned SrcWidth = TD->getTypeSizeInBits(SV->getType());
1766 unsigned DestWidth = TD->getTypeSizeInBits(AllocaType);
1767 unsigned SrcStoreWidth = TD->getTypeStoreSizeInBits(SV->getType());
1768 unsigned DestStoreWidth = TD->getTypeStoreSizeInBits(AllocaType);
1769 if (SV->getType()->isFloatingPoint() || isa<VectorType>(SV->getType()))
Owen Anderson1d0be152009-08-13 21:58:54 +00001770 SV = Builder.CreateBitCast(SV,
1771 IntegerType::get(SV->getContext(),SrcWidth), "tmp");
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001772 else if (isa<PointerType>(SV->getType()))
Owen Anderson1d0be152009-08-13 21:58:54 +00001773 SV = Builder.CreatePtrToInt(SV, TD->getIntPtrType(SV->getContext()), "tmp");
Duncan Sands4b3dfbd2009-02-02 10:06:20 +00001774
Chris Lattner7809ecd2009-02-03 01:30:09 +00001775 // Zero extend or truncate the value if needed.
1776 if (SV->getType() != AllocaType) {
1777 if (SV->getType()->getPrimitiveSizeInBits() <
1778 AllocaType->getPrimitiveSizeInBits())
Chris Lattner65a65022009-02-03 19:41:50 +00001779 SV = Builder.CreateZExt(SV, AllocaType, "tmp");
Chris Lattner7809ecd2009-02-03 01:30:09 +00001780 else {
1781 // Truncation may be needed if storing more than the alloca can hold
1782 // (undefined behavior).
Chris Lattner65a65022009-02-03 19:41:50 +00001783 SV = Builder.CreateTrunc(SV, AllocaType, "tmp");
Chris Lattner7809ecd2009-02-03 01:30:09 +00001784 SrcWidth = DestWidth;
1785 SrcStoreWidth = DestStoreWidth;
1786 }
1787 }
Duncan Sands4b3dfbd2009-02-02 10:06:20 +00001788
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001789 // If this is a big-endian system and the store is narrower than the
1790 // full alloca type, we need to do a shift to get the right bits.
1791 int ShAmt = 0;
1792 if (TD->isBigEndian()) {
1793 // On big-endian machines, the lowest bit is stored at the bit offset
1794 // from the pointer given by getTypeStoreSizeInBits. This matters for
1795 // integers with a bitwidth that is not a multiple of 8.
1796 ShAmt = DestStoreWidth - SrcStoreWidth - Offset;
Chris Lattner800de312008-02-29 07:03:13 +00001797 } else {
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001798 ShAmt = Offset;
1799 }
Duncan Sands4b3dfbd2009-02-02 10:06:20 +00001800
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001801 // Note: we support negative bitwidths (with shr) which are not defined.
1802 // We do this to support (f.e.) stores off the end of a structure where
1803 // only some bits in the structure are set.
1804 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth));
1805 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) {
Owen Andersoneed707b2009-07-24 23:12:02 +00001806 SV = Builder.CreateShl(SV, ConstantInt::get(SV->getType(),
Owen Andersonfa5cbd62009-07-03 19:42:02 +00001807 ShAmt), "tmp");
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001808 Mask <<= ShAmt;
1809 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) {
Owen Andersoneed707b2009-07-24 23:12:02 +00001810 SV = Builder.CreateLShr(SV, ConstantInt::get(SV->getType(),
Owen Andersonfa5cbd62009-07-03 19:42:02 +00001811 -ShAmt), "tmp");
Duncan Sands0e7c46b2009-02-02 09:53:14 +00001812 Mask = Mask.lshr(-ShAmt);
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001813 }
Duncan Sands4b3dfbd2009-02-02 10:06:20 +00001814
Chris Lattner2e0d5f82009-01-31 02:28:54 +00001815 // Mask out the bits we are about to insert from the old value, and or
1816 // in the new bits.
1817 if (SrcWidth != DestWidth) {
1818 assert(DestWidth > SrcWidth);
Owen Andersoneed707b2009-07-24 23:12:02 +00001819 Old = Builder.CreateAnd(Old, ConstantInt::get(Context, ~Mask), "mask");
Chris Lattner65a65022009-02-03 19:41:50 +00001820 SV = Builder.CreateOr(Old, SV, "ins");
Chris Lattner800de312008-02-29 07:03:13 +00001821 }
1822 return SV;
1823}
1824
1825
Chris Lattner79b3bd32007-04-25 06:40:51 +00001826
1827/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to
1828/// some part of a constant global variable. This intentionally only accepts
1829/// constant expressions because we don't can't rewrite arbitrary instructions.
1830static bool PointsToConstantGlobal(Value *V) {
1831 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
1832 return GV->isConstant();
1833 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
1834 if (CE->getOpcode() == Instruction::BitCast ||
1835 CE->getOpcode() == Instruction::GetElementPtr)
1836 return PointsToConstantGlobal(CE->getOperand(0));
1837 return false;
1838}
1839
1840/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
1841/// pointer to an alloca. Ignore any reads of the pointer, return false if we
1842/// see any stores or other unknown uses. If we see pointer arithmetic, keep
1843/// track of whether it moves the pointer (with isOffset) but otherwise traverse
1844/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
1845/// the alloca, and if the source pointer is a pointer to a constant global, we
1846/// can optimize this.
1847static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy,
1848 bool isOffset) {
1849 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
Chris Lattner6e733d32009-01-28 20:16:43 +00001850 if (LoadInst *LI = dyn_cast<LoadInst>(*UI))
1851 // Ignore non-volatile loads, they are always ok.
1852 if (!LI->isVolatile())
1853 continue;
1854
Chris Lattner79b3bd32007-04-25 06:40:51 +00001855 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) {
1856 // If uses of the bitcast are ok, we are ok.
1857 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset))
1858 return false;
1859 continue;
1860 }
1861 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) {
1862 // If the GEP has all zero indices, it doesn't offset the pointer. If it
1863 // doesn't, it does.
1864 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy,
1865 isOffset || !GEP->hasAllZeroIndices()))
1866 return false;
1867 continue;
1868 }
1869
1870 // If this is isn't our memcpy/memmove, reject it as something we can't
1871 // handle.
Chris Lattner3ce5e882009-03-08 03:37:16 +00001872 if (!isa<MemTransferInst>(*UI))
Chris Lattner79b3bd32007-04-25 06:40:51 +00001873 return false;
1874
1875 // If we already have seen a copy, reject the second one.
1876 if (TheCopy) return false;
1877
1878 // If the pointer has been offset from the start of the alloca, we can't
1879 // safely handle this.
1880 if (isOffset) return false;
1881
1882 // If the memintrinsic isn't using the alloca as the dest, reject it.
1883 if (UI.getOperandNo() != 1) return false;
1884
1885 MemIntrinsic *MI = cast<MemIntrinsic>(*UI);
1886
1887 // If the source of the memcpy/move is not a constant global, reject it.
1888 if (!PointsToConstantGlobal(MI->getOperand(2)))
1889 return false;
1890
1891 // Otherwise, the transform is safe. Remember the copy instruction.
1892 TheCopy = MI;
1893 }
1894 return true;
1895}
1896
1897/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
1898/// modified by a copy from a constant global. If we can prove this, we can
1899/// replace any uses of the alloca with uses of the global directly.
Victor Hernandez7b929da2009-10-23 21:09:37 +00001900Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocaInst *AI) {
Chris Lattner79b3bd32007-04-25 06:40:51 +00001901 Instruction *TheCopy = 0;
1902 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false))
1903 return TheCopy;
1904 return 0;
1905}