blob: 9dec29cc592be89f840a7516cd99f3e2cb0589e1 [file] [log] [blame]
Chris Lattnerfb41a502003-05-27 15:45:27 +00001//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
Misha Brukmanb1c93172005-04-21 23:48:37 +00002//
John Criswell482202a2003-10-20 19:43:21 +00003// The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
Misha Brukmanb1c93172005-04-21 23:48:37 +00007//
John Criswell482202a2003-10-20 19:43:21 +00008//===----------------------------------------------------------------------===//
Chris Lattnerfb41a502003-05-27 15:45:27 +00009//
10// This transformation implements the well known scalar replacement of
11// aggregates transformation. This xform breaks up alloca instructions of
12// aggregate type (structure or array) into individual alloca instructions for
Chris Lattner5d8a12e2003-09-11 16:45:55 +000013// each member (if possible). Then, if possible, it transforms the individual
14// alloca instructions into nice clean scalar SSA form.
15//
16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because
17// often interact, especially for C++ programs. As such, iterating between
18// SRoA, then Mem2Reg until we run out of things to promote works well.
Chris Lattnerfb41a502003-05-27 15:45:27 +000019//
20//===----------------------------------------------------------------------===//
21
22#include "llvm/Transforms/Scalar.h"
Chris Lattner5d8a12e2003-09-11 16:45:55 +000023#include "llvm/Constants.h"
24#include "llvm/DerivedTypes.h"
Chris Lattnerfb41a502003-05-27 15:45:27 +000025#include "llvm/Function.h"
26#include "llvm/Pass.h"
Misha Brukman2b3387a2004-07-29 17:05:13 +000027#include "llvm/Instructions.h"
Chris Lattner5d8a12e2003-09-11 16:45:55 +000028#include "llvm/Analysis/Dominators.h"
29#include "llvm/Target/TargetData.h"
30#include "llvm/Transforms/Utils/PromoteMemToReg.h"
Chris Lattner3b0a62d2005-12-12 07:19:13 +000031#include "llvm/Support/GetElementPtrTypeIterator.h"
32#include "llvm/Support/MathExtras.h"
Reid Spencer7c16caa2004-09-01 22:55:40 +000033#include "llvm/Support/Debug.h"
34#include "llvm/ADT/Statistic.h"
35#include "llvm/ADT/StringExtras.h"
Chris Lattnerc597b8a2006-01-22 23:32:06 +000036#include <iostream>
Chris Lattner40d2aeb2003-12-02 17:43:55 +000037using namespace llvm;
Brian Gaeke960707c2003-11-11 22:41:34 +000038
Chris Lattnerfb41a502003-05-27 15:45:27 +000039namespace {
Misha Brukman217ca0b2003-09-11 16:58:31 +000040 Statistic<> NumReplaced("scalarrepl", "Number of allocas broken up");
41 Statistic<> NumPromoted("scalarrepl", "Number of allocas promoted");
Chris Lattner3b0a62d2005-12-12 07:19:13 +000042 Statistic<> NumConverted("scalarrepl",
43 "Number of aggregates converted to scalar");
Chris Lattnerfb41a502003-05-27 15:45:27 +000044
45 struct SROA : public FunctionPass {
46 bool runOnFunction(Function &F);
47
Chris Lattner5d8a12e2003-09-11 16:45:55 +000048 bool performScalarRepl(Function &F);
49 bool performPromotion(Function &F);
50
Chris Lattnerc8174582003-08-31 00:45:13 +000051 // getAnalysisUsage - This pass does not require any passes, but we know it
52 // will not alter the CFG, so say so.
53 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
Chris Lattnera906bac2003-10-05 21:20:13 +000054 AU.addRequired<DominatorTree>();
Chris Lattner5d8a12e2003-09-11 16:45:55 +000055 AU.addRequired<DominanceFrontier>();
56 AU.addRequired<TargetData>();
Chris Lattnerc8174582003-08-31 00:45:13 +000057 AU.setPreservesCFG();
58 }
59
Chris Lattnerfb41a502003-05-27 15:45:27 +000060 private:
Chris Lattner88819122004-11-14 04:24:28 +000061 int isSafeElementUse(Value *Ptr);
62 int isSafeUseOfAllocation(Instruction *User);
63 int isSafeAllocaToScalarRepl(AllocationInst *AI);
64 void CanonicalizeAllocaUsers(AllocationInst *AI);
Chris Lattnerfb41a502003-05-27 15:45:27 +000065 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base);
Chris Lattner3b0a62d2005-12-12 07:19:13 +000066
67 const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial);
68 void ConvertToScalar(AllocationInst *AI, const Type *Ty);
69 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset);
Chris Lattnerfb41a502003-05-27 15:45:27 +000070 };
71
72 RegisterOpt<SROA> X("scalarrepl", "Scalar Replacement of Aggregates");
73}
74
Brian Gaeke960707c2003-11-11 22:41:34 +000075// Public interface to the ScalarReplAggregates pass
Chris Lattner3e860842004-09-20 04:43:15 +000076FunctionPass *llvm::createScalarReplAggregatesPass() { return new SROA(); }
Chris Lattnerfb41a502003-05-27 15:45:27 +000077
78
Chris Lattnerfb41a502003-05-27 15:45:27 +000079bool SROA::runOnFunction(Function &F) {
Chris Lattner9a95f2a2003-09-12 15:36:03 +000080 bool Changed = performPromotion(F);
81 while (1) {
82 bool LocalChange = performScalarRepl(F);
83 if (!LocalChange) break; // No need to repromote if no scalarrepl
84 Changed = true;
85 LocalChange = performPromotion(F);
86 if (!LocalChange) break; // No need to re-scalarrepl if no promotion
87 }
Chris Lattner5d8a12e2003-09-11 16:45:55 +000088
89 return Changed;
90}
91
92
93bool SROA::performPromotion(Function &F) {
94 std::vector<AllocaInst*> Allocas;
95 const TargetData &TD = getAnalysis<TargetData>();
Chris Lattnera906bac2003-10-05 21:20:13 +000096 DominatorTree &DT = getAnalysis<DominatorTree>();
97 DominanceFrontier &DF = getAnalysis<DominanceFrontier>();
Chris Lattner5d8a12e2003-09-11 16:45:55 +000098
Chris Lattner5dac64f2003-09-20 14:39:18 +000099 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function
Chris Lattner5d8a12e2003-09-11 16:45:55 +0000100
Chris Lattner9a95f2a2003-09-12 15:36:03 +0000101 bool Changed = false;
Misha Brukmanb1c93172005-04-21 23:48:37 +0000102
Chris Lattner5d8a12e2003-09-11 16:45:55 +0000103 while (1) {
104 Allocas.clear();
105
106 // Find allocas that are safe to promote, by looking at all instructions in
107 // the entry node
108 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
109 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca?
110 if (isAllocaPromotable(AI, TD))
111 Allocas.push_back(AI);
112
113 if (Allocas.empty()) break;
114
Chris Lattnera906bac2003-10-05 21:20:13 +0000115 PromoteMemToReg(Allocas, DT, DF, TD);
Chris Lattner5d8a12e2003-09-11 16:45:55 +0000116 NumPromoted += Allocas.size();
117 Changed = true;
118 }
119
120 return Changed;
121}
122
Chris Lattner5d8a12e2003-09-11 16:45:55 +0000123// performScalarRepl - This algorithm is a simple worklist driven algorithm,
124// which runs on all of the malloc/alloca instructions in the function, removing
125// them if they are only used by getelementptr instructions.
126//
127bool SROA::performScalarRepl(Function &F) {
Chris Lattnerfb41a502003-05-27 15:45:27 +0000128 std::vector<AllocationInst*> WorkList;
129
130 // Scan the entry basic block, adding any alloca's and mallocs to the worklist
Chris Lattner5dac64f2003-09-20 14:39:18 +0000131 BasicBlock &BB = F.getEntryBlock();
Chris Lattnerfb41a502003-05-27 15:45:27 +0000132 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
133 if (AllocationInst *A = dyn_cast<AllocationInst>(I))
134 WorkList.push_back(A);
135
136 // Process the worklist
137 bool Changed = false;
138 while (!WorkList.empty()) {
139 AllocationInst *AI = WorkList.back();
140 WorkList.pop_back();
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000141
142 // If we can turn this aggregate value (potentially with casts) into a
143 // simple scalar value that can be mem2reg'd into a register value.
144 bool IsNotTrivial = false;
145 if (const Type *ActualType = CanConvertToScalar(AI, IsNotTrivial))
146 if (IsNotTrivial) {
147 ConvertToScalar(AI, ActualType);
148 Changed = true;
149 continue;
150 }
Chris Lattnerfb41a502003-05-27 15:45:27 +0000151
152 // We cannot transform the allocation instruction if it is an array
Chris Lattnerc16b2102003-05-27 16:09:27 +0000153 // allocation (allocations OF arrays are ok though), and an allocation of a
154 // scalar value cannot be decomposed at all.
155 //
Chris Lattnerfb41a502003-05-27 15:45:27 +0000156 if (AI->isArrayAllocation() ||
Chris Lattnerc16b2102003-05-27 16:09:27 +0000157 (!isa<StructType>(AI->getAllocatedType()) &&
158 !isa<ArrayType>(AI->getAllocatedType()))) continue;
159
Chris Lattner6e5398d2003-05-30 04:15:41 +0000160 // Check that all of the users of the allocation are capable of being
161 // transformed.
Chris Lattner88819122004-11-14 04:24:28 +0000162 switch (isSafeAllocaToScalarRepl(AI)) {
163 default: assert(0 && "Unexpected value!");
164 case 0: // Not safe to scalar replace.
Chris Lattner6e5398d2003-05-30 04:15:41 +0000165 continue;
Chris Lattner88819122004-11-14 04:24:28 +0000166 case 1: // Safe, but requires cleanup/canonicalizations first
167 CanonicalizeAllocaUsers(AI);
168 case 3: // Safe to scalar replace.
169 break;
170 }
Chris Lattnerfb41a502003-05-27 15:45:27 +0000171
172 DEBUG(std::cerr << "Found inst to xform: " << *AI);
173 Changed = true;
Misha Brukmanb1c93172005-04-21 23:48:37 +0000174
Chris Lattnerfb41a502003-05-27 15:45:27 +0000175 std::vector<AllocaInst*> ElementAllocas;
176 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
177 ElementAllocas.reserve(ST->getNumContainedTypes());
178 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
Nate Begeman848622f2005-11-05 09:21:28 +0000179 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
180 AI->getAlignment(),
Chris Lattnerfb41a502003-05-27 15:45:27 +0000181 AI->getName() + "." + utostr(i), AI);
182 ElementAllocas.push_back(NA);
183 WorkList.push_back(NA); // Add to worklist for recursive processing
184 }
185 } else {
Chris Lattner6e5398d2003-05-30 04:15:41 +0000186 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
Chris Lattnerfb41a502003-05-27 15:45:27 +0000187 ElementAllocas.reserve(AT->getNumElements());
188 const Type *ElTy = AT->getElementType();
189 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
Nate Begeman848622f2005-11-05 09:21:28 +0000190 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
Chris Lattnerfb41a502003-05-27 15:45:27 +0000191 AI->getName() + "." + utostr(i), AI);
192 ElementAllocas.push_back(NA);
193 WorkList.push_back(NA); // Add to worklist for recursive processing
194 }
195 }
Misha Brukmanb1c93172005-04-21 23:48:37 +0000196
Chris Lattnerfb41a502003-05-27 15:45:27 +0000197 // Now that we have created the alloca instructions that we want to use,
198 // expand the getelementptr instructions to use them.
199 //
Chris Lattnerb5f8eb82004-06-19 02:02:22 +0000200 while (!AI->use_empty()) {
201 Instruction *User = cast<Instruction>(AI->use_back());
Chris Lattnerfe3f4e62004-11-14 05:00:19 +0000202 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
203 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst>
Misha Brukmanb1c93172005-04-21 23:48:37 +0000204 unsigned Idx =
Chris Lattnerce274ce2005-01-08 19:34:41 +0000205 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getRawValue();
Misha Brukmanb1c93172005-04-21 23:48:37 +0000206
Chris Lattnerfe3f4e62004-11-14 05:00:19 +0000207 assert(Idx < ElementAllocas.size() && "Index out of range?");
208 AllocaInst *AllocaToUse = ElementAllocas[Idx];
Misha Brukmanb1c93172005-04-21 23:48:37 +0000209
Chris Lattnerfe3f4e62004-11-14 05:00:19 +0000210 Value *RepValue;
211 if (GEPI->getNumOperands() == 3) {
212 // Do not insert a new getelementptr instruction with zero indices, only
213 // to have it optimized out later.
214 RepValue = AllocaToUse;
Chris Lattnerfb41a502003-05-27 15:45:27 +0000215 } else {
Chris Lattnerfe3f4e62004-11-14 05:00:19 +0000216 // We are indexing deeply into the structure, so we still need a
217 // getelement ptr instruction to finish the indexing. This may be
218 // expanded itself once the worklist is rerun.
219 //
220 std::string OldName = GEPI->getName(); // Steal the old name.
221 std::vector<Value*> NewArgs;
222 NewArgs.push_back(Constant::getNullValue(Type::IntTy));
223 NewArgs.insert(NewArgs.end(), GEPI->op_begin()+3, GEPI->op_end());
224 GEPI->setName("");
225 RepValue = new GetElementPtrInst(AllocaToUse, NewArgs, OldName, GEPI);
Chris Lattnerfb41a502003-05-27 15:45:27 +0000226 }
Misha Brukmanb1c93172005-04-21 23:48:37 +0000227
Chris Lattnerfe3f4e62004-11-14 05:00:19 +0000228 // Move all of the users over to the new GEP.
229 GEPI->replaceAllUsesWith(RepValue);
230 // Delete the old GEP
231 GEPI->eraseFromParent();
Chris Lattnerfb41a502003-05-27 15:45:27 +0000232 }
233
234 // Finally, delete the Alloca instruction
235 AI->getParent()->getInstList().erase(AI);
Chris Lattnerc16b2102003-05-27 16:09:27 +0000236 NumReplaced++;
Chris Lattnerfb41a502003-05-27 15:45:27 +0000237 }
238
239 return Changed;
240}
Chris Lattner6e5398d2003-05-30 04:15:41 +0000241
242
Chris Lattner88819122004-11-14 04:24:28 +0000243/// isSafeElementUse - Check to see if this use is an allowed use for a
244/// getelementptr instruction of an array aggregate allocation.
245///
246int SROA::isSafeElementUse(Value *Ptr) {
247 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
248 I != E; ++I) {
249 Instruction *User = cast<Instruction>(*I);
250 switch (User->getOpcode()) {
251 case Instruction::Load: break;
252 case Instruction::Store:
253 // Store is ok if storing INTO the pointer, not storing the pointer
254 if (User->getOperand(0) == Ptr) return 0;
255 break;
256 case Instruction::GetElementPtr: {
257 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User);
258 if (GEP->getNumOperands() > 1) {
259 if (!isa<Constant>(GEP->getOperand(1)) ||
260 !cast<Constant>(GEP->getOperand(1))->isNullValue())
261 return 0; // Using pointer arithmetic to navigate the array...
262 }
263 if (!isSafeElementUse(GEP)) return 0;
264 break;
265 }
266 default:
267 DEBUG(std::cerr << " Transformation preventing inst: " << *User);
268 return 0;
269 }
270 }
271 return 3; // All users look ok :)
272}
273
Chris Lattnerfe3f4e62004-11-14 05:00:19 +0000274/// AllUsersAreLoads - Return true if all users of this value are loads.
275static bool AllUsersAreLoads(Value *Ptr) {
276 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
277 I != E; ++I)
278 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load)
279 return false;
Misha Brukmanb1c93172005-04-21 23:48:37 +0000280 return true;
Chris Lattnerfe3f4e62004-11-14 05:00:19 +0000281}
282
Chris Lattner6e5398d2003-05-30 04:15:41 +0000283/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an
284/// aggregate allocation.
285///
Chris Lattner88819122004-11-14 04:24:28 +0000286int SROA::isSafeUseOfAllocation(Instruction *User) {
287 if (!isa<GetElementPtrInst>(User)) return 0;
Chris Lattner52310702003-11-25 21:09:18 +0000288
289 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
290 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI);
291
Chris Lattnerfc34f8b2006-03-08 01:05:29 +0000292 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>".
Chris Lattner52310702003-11-25 21:09:18 +0000293 if (I == E ||
294 I.getOperand() != Constant::getNullValue(I.getOperand()->getType()))
Chris Lattner88819122004-11-14 04:24:28 +0000295 return 0;
Chris Lattner52310702003-11-25 21:09:18 +0000296
297 ++I;
Chris Lattnerfe3f4e62004-11-14 05:00:19 +0000298 if (I == E) return 0; // ran out of GEP indices??
Chris Lattner52310702003-11-25 21:09:18 +0000299
300 // If this is a use of an array allocation, do a bit more checking for sanity.
301 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
302 uint64_t NumElements = AT->getNumElements();
Chris Lattnerfe3f4e62004-11-14 05:00:19 +0000303
304 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand())) {
305 // Check to make sure that index falls within the array. If not,
306 // something funny is going on, so we won't do the optimization.
307 //
308 if (cast<ConstantInt>(GEPI->getOperand(2))->getRawValue() >= NumElements)
309 return 0;
Misha Brukmanb1c93172005-04-21 23:48:37 +0000310
Chris Lattnerfc34f8b2006-03-08 01:05:29 +0000311 // We cannot scalar repl this level of the array unless any array
312 // sub-indices are in-range constants. In particular, consider:
313 // A[0][i]. We cannot know that the user isn't doing invalid things like
314 // allowing i to index an out-of-range subscript that accesses A[1].
315 //
316 // Scalar replacing *just* the outer index of the array is probably not
317 // going to be a win anyway, so just give up.
318 for (++I; I != E && isa<ArrayType>(*I); ++I) {
319 const ArrayType *SubArrayTy = cast<ArrayType>(*I);
320 uint64_t NumElements = SubArrayTy->getNumElements();
321 if (!isa<ConstantInt>(I.getOperand())) return 0;
322 if (cast<ConstantInt>(I.getOperand())->getRawValue() >= NumElements)
323 return 0;
324 }
325
Chris Lattnerfe3f4e62004-11-14 05:00:19 +0000326 } else {
327 // If this is an array index and the index is not constant, we cannot
328 // promote... that is unless the array has exactly one or two elements in
329 // it, in which case we CAN promote it, but we have to canonicalize this
330 // out if this is the only problem.
Chris Lattnerfc34f8b2006-03-08 01:05:29 +0000331 if ((NumElements == 1 || NumElements == 2) &&
332 AllUsersAreLoads(GEPI))
333 return 1; // Canonicalization required!
Chris Lattner88819122004-11-14 04:24:28 +0000334 return 0;
Chris Lattnerfe3f4e62004-11-14 05:00:19 +0000335 }
Chris Lattner6e5398d2003-05-30 04:15:41 +0000336 }
Chris Lattner52310702003-11-25 21:09:18 +0000337
338 // If there are any non-simple uses of this getelementptr, make sure to reject
339 // them.
340 return isSafeElementUse(GEPI);
Chris Lattner6e5398d2003-05-30 04:15:41 +0000341}
342
Chris Lattner88819122004-11-14 04:24:28 +0000343/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
344/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe,
345/// or 1 if safe after canonicalization has been performed.
Chris Lattner6e5398d2003-05-30 04:15:41 +0000346///
Chris Lattner88819122004-11-14 04:24:28 +0000347int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) {
Chris Lattner6e5398d2003-05-30 04:15:41 +0000348 // Loop over the use list of the alloca. We can only transform it if all of
349 // the users are safe to transform.
350 //
Chris Lattner88819122004-11-14 04:24:28 +0000351 int isSafe = 3;
Chris Lattner6e5398d2003-05-30 04:15:41 +0000352 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end();
Chris Lattner88819122004-11-14 04:24:28 +0000353 I != E; ++I) {
354 isSafe &= isSafeUseOfAllocation(cast<Instruction>(*I));
355 if (isSafe == 0) {
Chris Lattner6e5398d2003-05-30 04:15:41 +0000356 DEBUG(std::cerr << "Cannot transform: " << *AI << " due to user: "
Chris Lattner88819122004-11-14 04:24:28 +0000357 << **I);
358 return 0;
Chris Lattner6e5398d2003-05-30 04:15:41 +0000359 }
Chris Lattner88819122004-11-14 04:24:28 +0000360 }
361 // If we require cleanup, isSafe is now 1, otherwise it is 3.
362 return isSafe;
363}
364
365/// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified
366/// allocation, but only if cleaned up, perform the cleanups required.
367void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) {
Chris Lattnerfe3f4e62004-11-14 05:00:19 +0000368 // At this point, we know that the end result will be SROA'd and promoted, so
369 // we can insert ugly code if required so long as sroa+mem2reg will clean it
370 // up.
371 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
372 UI != E; ) {
373 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(*UI++);
Reid Spencer93396382004-11-15 17:29:41 +0000374 gep_type_iterator I = gep_type_begin(GEPI);
Chris Lattnerfe3f4e62004-11-14 05:00:19 +0000375 ++I;
Chris Lattner88819122004-11-14 04:24:28 +0000376
Chris Lattnerfe3f4e62004-11-14 05:00:19 +0000377 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
378 uint64_t NumElements = AT->getNumElements();
Misha Brukmanb1c93172005-04-21 23:48:37 +0000379
Chris Lattnerfe3f4e62004-11-14 05:00:19 +0000380 if (!isa<ConstantInt>(I.getOperand())) {
381 if (NumElements == 1) {
382 GEPI->setOperand(2, Constant::getNullValue(Type::IntTy));
383 } else {
384 assert(NumElements == 2 && "Unhandled case!");
385 // All users of the GEP must be loads. At each use of the GEP, insert
386 // two loads of the appropriate indexed GEP and select between them.
387 Value *IsOne = BinaryOperator::createSetNE(I.getOperand(),
388 Constant::getNullValue(I.getOperand()->getType()),
389 "isone", GEPI);
390 // Insert the new GEP instructions, which are properly indexed.
391 std::vector<Value*> Indices(GEPI->op_begin()+1, GEPI->op_end());
392 Indices[1] = Constant::getNullValue(Type::IntTy);
393 Value *ZeroIdx = new GetElementPtrInst(GEPI->getOperand(0), Indices,
394 GEPI->getName()+".0", GEPI);
395 Indices[1] = ConstantInt::get(Type::IntTy, 1);
396 Value *OneIdx = new GetElementPtrInst(GEPI->getOperand(0), Indices,
397 GEPI->getName()+".1", GEPI);
398 // Replace all loads of the variable index GEP with loads from both
399 // indexes and a select.
400 while (!GEPI->use_empty()) {
401 LoadInst *LI = cast<LoadInst>(GEPI->use_back());
402 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI);
403 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI);
404 Value *R = new SelectInst(IsOne, One, Zero, LI->getName(), LI);
405 LI->replaceAllUsesWith(R);
406 LI->eraseFromParent();
407 }
408 GEPI->eraseFromParent();
409 }
410 }
411 }
412 }
Chris Lattner6e5398d2003-05-30 04:15:41 +0000413}
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000414
415/// MergeInType - Add the 'In' type to the accumulated type so far. If the
416/// types are incompatible, return true, otherwise update Accum and return
417/// false.
Chris Lattner3323ce12006-04-14 21:42:41 +0000418///
419/// There are two cases we handle here:
420/// 1) An effectively integer union, where the pieces are stored into as
421/// smaller integers (common with byte swap and other idioms).
422/// 2) A union of a vector and its elements. Here we turn element accesses
423/// into insert/extract element operations.
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000424static bool MergeInType(const Type *In, const Type *&Accum) {
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000425 // If this is our first type, just use it.
Chris Lattner3323ce12006-04-14 21:42:41 +0000426 const PackedType *PTy;
427 if (Accum == Type::VoidTy || In == Accum) {
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000428 Accum = In;
Chris Lattner3323ce12006-04-14 21:42:41 +0000429 } else if (In->isIntegral() && Accum->isIntegral()) { // integer union.
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000430 // Otherwise pick whichever type is larger.
431 if (In->getTypeID() > Accum->getTypeID())
432 Accum = In;
Chris Lattner3323ce12006-04-14 21:42:41 +0000433 } else if ((PTy = dyn_cast<PackedType>(Accum)) &&
434 PTy->getElementType() == In) {
435 // Accum is a vector, and we are accessing an element: ok.
436 } else if ((PTy = dyn_cast<PackedType>(In)) &&
437 PTy->getElementType() == Accum) {
438 // In is a vector, and accum is an element: ok, remember In.
439 Accum = In;
440 } else {
441 return true;
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000442 }
443 return false;
444}
445
446/// getUIntAtLeastAsBitAs - Return an unsigned integer type that is at least
447/// as big as the specified type. If there is no suitable type, this returns
448/// null.
449const Type *getUIntAtLeastAsBitAs(unsigned NumBits) {
450 if (NumBits > 64) return 0;
451 if (NumBits > 32) return Type::ULongTy;
452 if (NumBits > 16) return Type::UIntTy;
453 if (NumBits > 8) return Type::UShortTy;
454 return Type::UByteTy;
455}
456
457/// CanConvertToScalar - V is a pointer. If we can convert the pointee to a
458/// single scalar integer type, return that type. Further, if the use is not
459/// a completely trivial use that mem2reg could promote, set IsNotTrivial. If
460/// there are no uses of this pointer, return Type::VoidTy to differentiate from
461/// failure.
462///
463const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) {
464 const Type *UsedType = Type::VoidTy; // No uses, no forced type.
465 const TargetData &TD = getAnalysis<TargetData>();
466 const PointerType *PTy = cast<PointerType>(V->getType());
467
468 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
469 Instruction *User = cast<Instruction>(*UI);
470
471 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
472 if (MergeInType(LI->getType(), UsedType))
473 return 0;
474
475 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
476 // Storing the pointer, not the into the value?
477 if (SI->getOperand(0) == V) return 0;
478
Chris Lattner3323ce12006-04-14 21:42:41 +0000479 // NOTE: We could handle storing of FP imms into integers here!
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000480
481 if (MergeInType(SI->getOperand(0)->getType(), UsedType))
482 return 0;
483 } else if (CastInst *CI = dyn_cast<CastInst>(User)) {
484 if (!isa<PointerType>(CI->getType())) return 0;
485 IsNotTrivial = true;
486 const Type *SubTy = CanConvertToScalar(CI, IsNotTrivial);
487 if (!SubTy || MergeInType(SubTy, UsedType)) return 0;
488 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
489 // Check to see if this is stepping over an element: GEP Ptr, int C
490 if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) {
491 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getRawValue();
492 unsigned ElSize = TD.getTypeSize(PTy->getElementType());
493 unsigned BitOffset = Idx*ElSize*8;
494 if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0;
495
496 IsNotTrivial = true;
497 const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial);
498 if (SubElt == 0) return 0;
Chris Lattner3323ce12006-04-14 21:42:41 +0000499 if (SubElt != Type::VoidTy && SubElt->isInteger()) {
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000500 const Type *NewTy =
501 getUIntAtLeastAsBitAs(SubElt->getPrimitiveSizeInBits()+BitOffset);
502 if (NewTy == 0 || MergeInType(NewTy, UsedType)) return 0;
503 continue;
504 }
505 } else if (GEP->getNumOperands() == 3 &&
506 isa<ConstantInt>(GEP->getOperand(1)) &&
507 isa<ConstantInt>(GEP->getOperand(2)) &&
508 cast<Constant>(GEP->getOperand(1))->isNullValue()) {
509 // We are stepping into an element, e.g. a structure or an array:
510 // GEP Ptr, int 0, uint C
511 const Type *AggTy = PTy->getElementType();
512 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getRawValue();
513
514 if (const ArrayType *ATy = dyn_cast<ArrayType>(AggTy)) {
515 if (Idx >= ATy->getNumElements()) return 0; // Out of range.
Chris Lattner3323ce12006-04-14 21:42:41 +0000516 } else if (const PackedType *PackedTy = dyn_cast<PackedType>(AggTy)) {
517 // Getting an element of the packed vector.
518 if (Idx >= PackedTy->getNumElements()) return 0; // Out of range.
519
520 // Merge in the packed type.
521 if (MergeInType(PackedTy, UsedType)) return 0;
522
523 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial);
524 if (SubTy == 0) return 0;
525
526 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType))
527 return 0;
528
529 // We'll need to change this to an insert/extract element operation.
530 IsNotTrivial = true;
531 continue; // Everything looks ok
532
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000533 } else if (isa<StructType>(AggTy)) {
534 // Structs are always ok.
535 } else {
536 return 0;
537 }
538 const Type *NTy = getUIntAtLeastAsBitAs(TD.getTypeSize(AggTy)*8);
539 if (NTy == 0 || MergeInType(NTy, UsedType)) return 0;
540 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial);
541 if (SubTy == 0) return 0;
542 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType))
543 return 0;
544 continue; // Everything looks ok
545 }
546 return 0;
547 } else {
548 // Cannot handle this!
549 return 0;
550 }
551 }
552
553 return UsedType;
554}
555
556/// ConvertToScalar - The specified alloca passes the CanConvertToScalar
557/// predicate and is non-trivial. Convert it to something that can be trivially
558/// promoted into a register by mem2reg.
559void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) {
560 DEBUG(std::cerr << "CONVERT TO SCALAR: " << *AI << " TYPE = "
561 << *ActualTy << "\n");
562 ++NumConverted;
563
564 BasicBlock *EntryBlock = AI->getParent();
565 assert(EntryBlock == &EntryBlock->getParent()->front() &&
566 "Not in the entry block!");
567 EntryBlock->getInstList().remove(AI); // Take the alloca out of the program.
568
Chris Lattner3323ce12006-04-14 21:42:41 +0000569 if (ActualTy->isInteger())
570 ActualTy = ActualTy->getUnsignedVersion();
571
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000572 // Create and insert the alloca.
Chris Lattner3323ce12006-04-14 21:42:41 +0000573 AllocaInst *NewAI = new AllocaInst(ActualTy, 0, AI->getName(),
574 EntryBlock->begin());
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000575 ConvertUsesToScalar(AI, NewAI, 0);
576 delete AI;
577}
578
579
580/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
Chris Lattner3323ce12006-04-14 21:42:41 +0000581/// directly. This happens when we are converting an "integer union" to a
582/// single integer scalar, or when we are converting a "vector union" to a
583/// vector with insert/extractelement instructions.
584///
585/// Offset is an offset from the original alloca, in bits that need to be
586/// shifted to the right. By the end of this, there should be no uses of Ptr.
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000587void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
Chris Lattner3323ce12006-04-14 21:42:41 +0000588 bool isVectorInsert = isa<PackedType>(NewAI->getType()->getElementType());
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000589 while (!Ptr->use_empty()) {
590 Instruction *User = cast<Instruction>(Ptr->use_back());
591
592 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
593 // The load is a bit extract from NewAI shifted right by Offset bits.
594 Value *NV = new LoadInst(NewAI, LI->getName(), LI);
Chris Lattner3323ce12006-04-14 21:42:41 +0000595 if (NV->getType() != LI->getType()) {
596 if (const PackedType *PTy = dyn_cast<PackedType>(NV->getType())) {
597 // Must be an element access.
598 unsigned Elt = Offset/PTy->getElementType()->getPrimitiveSizeInBits();
599 NV = new ExtractElementInst(NV, ConstantUInt::get(Type::UIntTy, Elt),
600 "tmp", LI);
601 } else {
602 assert(NV->getType()->isInteger() && "Unknown promotion!");
603 if (Offset && Offset < NV->getType()->getPrimitiveSizeInBits())
604 NV = new ShiftInst(Instruction::Shr, NV,
605 ConstantUInt::get(Type::UByteTy, Offset),
606 LI->getName(), LI);
607 NV = new CastInst(NV, LI->getType(), LI->getName(), LI);
608 }
609 }
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000610 LI->replaceAllUsesWith(NV);
611 LI->eraseFromParent();
612 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
613 assert(SI->getOperand(0) != Ptr && "Consistency error!");
614
615 // Convert the stored type to the actual type, shift it left to insert
616 // then 'or' into place.
617 Value *SV = SI->getOperand(0);
Chris Lattner3323ce12006-04-14 21:42:41 +0000618 const Type *AllocaType = NewAI->getType()->getElementType();
619 if (SV->getType() != AllocaType) {
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000620 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI);
Chris Lattner3323ce12006-04-14 21:42:41 +0000621
622 if (const PackedType *PTy = dyn_cast<PackedType>(AllocaType)) {
623 // Must be an element insertion.
624 unsigned Elt = Offset/PTy->getElementType()->getPrimitiveSizeInBits();
625 SV = new InsertElementInst(Old, SV,
626 ConstantUInt::get(Type::UIntTy, Elt),
627 "tmp", SI);
628 } else {
629 // If SV is signed, convert it to unsigned, so that the next cast zero
630 // extends the value.
631 if (SV->getType()->isSigned())
632 SV = new CastInst(SV, SV->getType()->getUnsignedVersion(),
633 SV->getName(), SI);
634 SV = new CastInst(SV, Old->getType(), SV->getName(), SI);
635 if (Offset && Offset < SV->getType()->getPrimitiveSizeInBits())
636 SV = new ShiftInst(Instruction::Shl, SV,
637 ConstantUInt::get(Type::UByteTy, Offset),
638 SV->getName()+".adj", SI);
639 // Mask out the bits we are about to insert from the old value.
640 unsigned TotalBits = SV->getType()->getPrimitiveSizeInBits();
641 unsigned InsertBits =
642 SI->getOperand(0)->getType()->getPrimitiveSizeInBits();
643 if (TotalBits != InsertBits) {
644 assert(TotalBits > InsertBits);
645 uint64_t Mask = ~(((1ULL << InsertBits)-1) << Offset);
646 if (TotalBits != 64)
647 Mask = Mask & ((1ULL << TotalBits)-1);
648 Old = BinaryOperator::createAnd(Old,
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000649 ConstantUInt::get(Old->getType(), Mask),
Chris Lattner3323ce12006-04-14 21:42:41 +0000650 Old->getName()+".mask", SI);
651 SV = BinaryOperator::createOr(Old, SV, SV->getName()+".ins", SI);
652 }
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000653 }
654 }
655 new StoreInst(SV, NewAI, SI);
656 SI->eraseFromParent();
657
658 } else if (CastInst *CI = dyn_cast<CastInst>(User)) {
659 unsigned NewOff = Offset;
660 const TargetData &TD = getAnalysis<TargetData>();
Chris Lattner3323ce12006-04-14 21:42:41 +0000661 if (TD.isBigEndian() && !isVectorInsert) {
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000662 // Adjust the pointer. For example, storing 16-bits into a 32-bit
663 // alloca with just a cast makes it modify the top 16-bits.
664 const Type *SrcTy = cast<PointerType>(Ptr->getType())->getElementType();
665 const Type *DstTy = cast<PointerType>(CI->getType())->getElementType();
666 int PtrDiffBits = TD.getTypeSize(SrcTy)*8-TD.getTypeSize(DstTy)*8;
667 NewOff += PtrDiffBits;
668 }
669 ConvertUsesToScalar(CI, NewAI, NewOff);
670 CI->eraseFromParent();
671 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
672 const PointerType *AggPtrTy =
673 cast<PointerType>(GEP->getOperand(0)->getType());
674 const TargetData &TD = getAnalysis<TargetData>();
675 unsigned AggSizeInBits = TD.getTypeSize(AggPtrTy->getElementType())*8;
676
677 // Check to see if this is stepping over an element: GEP Ptr, int C
678 unsigned NewOffset = Offset;
679 if (GEP->getNumOperands() == 2) {
680 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getRawValue();
681 unsigned BitOffset = Idx*AggSizeInBits;
682
Chris Lattner3323ce12006-04-14 21:42:41 +0000683 if (TD.isLittleEndian() || isVectorInsert)
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000684 NewOffset += BitOffset;
685 else
686 NewOffset -= BitOffset;
687
688 } else if (GEP->getNumOperands() == 3) {
689 // We know that operand #2 is zero.
690 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getRawValue();
691 const Type *AggTy = AggPtrTy->getElementType();
692 if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) {
693 unsigned ElSizeBits = TD.getTypeSize(SeqTy->getElementType())*8;
694
Chris Lattner3323ce12006-04-14 21:42:41 +0000695 if (TD.isLittleEndian() || isVectorInsert)
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000696 NewOffset += ElSizeBits*Idx;
697 else
698 NewOffset += AggSizeInBits-ElSizeBits*(Idx+1);
699 } else if (const StructType *STy = dyn_cast<StructType>(AggTy)) {
700 unsigned EltBitOffset = TD.getStructLayout(STy)->MemberOffsets[Idx]*8;
701
Chris Lattner3323ce12006-04-14 21:42:41 +0000702 if (TD.isLittleEndian() || isVectorInsert)
Chris Lattner3b0a62d2005-12-12 07:19:13 +0000703 NewOffset += EltBitOffset;
704 else {
705 const PointerType *ElPtrTy = cast<PointerType>(GEP->getType());
706 unsigned ElSizeBits = TD.getTypeSize(ElPtrTy->getElementType())*8;
707 NewOffset += AggSizeInBits-(EltBitOffset+ElSizeBits);
708 }
709
710 } else {
711 assert(0 && "Unsupported operation!");
712 abort();
713 }
714 } else {
715 assert(0 && "Unsupported operation!");
716 abort();
717 }
718 ConvertUsesToScalar(GEP, NewAI, NewOffset);
719 GEP->eraseFromParent();
720 } else {
721 assert(0 && "Unsupported operation!");
722 abort();
723 }
724 }
725}