blob: f499290fe87618781148d806b421d76b4e50aed4 [file] [log] [blame]
Chris Lattnera65e2f72010-01-05 05:57:49 +00001//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visit functions for load, store and alloca.
11//
12//===----------------------------------------------------------------------===//
13
14#include "InstCombine.h"
15#include "llvm/IntrinsicInst.h"
Dan Gohman826bdf82010-05-28 16:19:17 +000016#include "llvm/Analysis/Loads.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000017#include "llvm/Target/TargetData.h"
18#include "llvm/Transforms/Utils/BasicBlockUtils.h"
19#include "llvm/Transforms/Utils/Local.h"
20#include "llvm/ADT/Statistic.h"
21using namespace llvm;
22
23STATISTIC(NumDeadStore, "Number of dead stores eliminated");
24
25Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
Dan Gohmandf5d7dc2010-05-28 15:09:00 +000026 // Ensure that the alloca array size argument has type intptr_t, so that
27 // any casting is exposed early.
28 if (TD) {
29 const Type *IntPtrTy = TD->getIntPtrType(AI.getContext());
30 if (AI.getArraySize()->getType() != IntPtrTy) {
31 Value *V = Builder->CreateIntCast(AI.getArraySize(),
32 IntPtrTy, false);
33 AI.setOperand(0, V);
34 return &AI;
35 }
36 }
37
Chris Lattnera65e2f72010-01-05 05:57:49 +000038 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
39 if (AI.isArrayAllocation()) { // Check C != 1
40 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
41 const Type *NewTy =
42 ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
43 assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
44 AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
45 New->setAlignment(AI.getAlignment());
46
47 // Scan to the end of the allocation instructions, to skip over a block of
48 // allocas if possible...also skip interleaved debug info
49 //
50 BasicBlock::iterator It = New;
51 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
52
53 // Now that I is pointing to the first non-allocation-inst in the block,
54 // insert our getelementptr instruction...
55 //
56 Value *NullIdx =Constant::getNullValue(Type::getInt32Ty(AI.getContext()));
57 Value *Idx[2];
58 Idx[0] = NullIdx;
59 Idx[1] = NullIdx;
Eli Friedman41e509a2011-05-18 23:58:37 +000060 Instruction *GEP =
61 GetElementPtrInst::CreateInBounds(New, Idx, Idx + 2,
62 New->getName()+".sub");
63 InsertNewInstBefore(GEP, *It);
Chris Lattnera65e2f72010-01-05 05:57:49 +000064
65 // Now make everything use the getelementptr instead of the original
66 // allocation.
Eli Friedman41e509a2011-05-18 23:58:37 +000067 return ReplaceInstUsesWith(AI, GEP);
Chris Lattnera65e2f72010-01-05 05:57:49 +000068 } else if (isa<UndefValue>(AI.getArraySize())) {
69 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
70 }
71 }
72
73 if (TD && isa<AllocaInst>(AI) && AI.getAllocatedType()->isSized()) {
74 // If alloca'ing a zero byte object, replace the alloca with a null pointer.
75 // Note that we only do this for alloca's, because malloc should allocate
76 // and return a unique pointer, even for a zero byte allocation.
77 if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
78 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
79
80 // If the alignment is 0 (unspecified), assign it the preferred alignment.
81 if (AI.getAlignment() == 0)
82 AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
83 }
84
85 return 0;
86}
87
88
89/// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
90static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
91 const TargetData *TD) {
92 User *CI = cast<User>(LI.getOperand(0));
93 Value *CastOp = CI->getOperand(0);
94
95 const PointerType *DestTy = cast<PointerType>(CI->getType());
96 const Type *DestPTy = DestTy->getElementType();
97 if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
98
99 // If the address spaces don't match, don't eliminate the cast.
100 if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
101 return 0;
102
103 const Type *SrcPTy = SrcTy->getElementType();
104
Duncan Sands19d0b472010-02-16 11:11:14 +0000105 if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
106 DestPTy->isVectorTy()) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000107 // If the source is an array, the code below will not succeed. Check to
108 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
109 // constants.
110 if (const ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
111 if (Constant *CSrc = dyn_cast<Constant>(CastOp))
112 if (ASrcTy->getNumElements() != 0) {
113 Value *Idxs[2];
114 Idxs[0] = Constant::getNullValue(Type::getInt32Ty(LI.getContext()));
115 Idxs[1] = Idxs[0];
116 CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs, 2);
117 SrcTy = cast<PointerType>(CastOp->getType());
118 SrcPTy = SrcTy->getElementType();
119 }
120
121 if (IC.getTargetData() &&
Duncan Sands19d0b472010-02-16 11:11:14 +0000122 (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
123 SrcPTy->isVectorTy()) &&
Chris Lattnera65e2f72010-01-05 05:57:49 +0000124 // Do not allow turning this into a load of an integer, which is then
125 // casted to a pointer, this pessimizes pointer analysis a lot.
Duncan Sands19d0b472010-02-16 11:11:14 +0000126 (SrcPTy->isPointerTy() == LI.getType()->isPointerTy()) &&
Chris Lattnera65e2f72010-01-05 05:57:49 +0000127 IC.getTargetData()->getTypeSizeInBits(SrcPTy) ==
128 IC.getTargetData()->getTypeSizeInBits(DestPTy)) {
129
130 // Okay, we are casting from one integer or pointer type to another of
131 // the same size. Instead of casting the pointer before the load, cast
132 // the result of the loaded value.
Bob Wilson4b71b6c2010-01-30 00:41:10 +0000133 LoadInst *NewLoad =
Chris Lattnera65e2f72010-01-05 05:57:49 +0000134 IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
Bob Wilson4b71b6c2010-01-30 00:41:10 +0000135 NewLoad->setAlignment(LI.getAlignment());
Chris Lattnera65e2f72010-01-05 05:57:49 +0000136 // Now cast the result of the load.
137 return new BitCastInst(NewLoad, LI.getType());
138 }
139 }
140 }
141 return 0;
142}
143
144Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
145 Value *Op = LI.getOperand(0);
146
147 // Attempt to improve the alignment.
148 if (TD) {
149 unsigned KnownAlign =
Chris Lattner6fcd32e2010-12-25 20:37:57 +0000150 getOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()),TD);
Dan Gohman36196602010-08-03 18:20:32 +0000151 unsigned LoadAlign = LI.getAlignment();
152 unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
153 TD->getABITypeAlignment(LI.getType());
154
155 if (KnownAlign > EffectiveLoadAlign)
Chris Lattnera65e2f72010-01-05 05:57:49 +0000156 LI.setAlignment(KnownAlign);
Dan Gohman36196602010-08-03 18:20:32 +0000157 else if (LoadAlign == 0)
158 LI.setAlignment(EffectiveLoadAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000159 }
160
161 // load (cast X) --> cast (load X) iff safe.
162 if (isa<CastInst>(Op))
163 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
164 return Res;
165
166 // None of the following transforms are legal for volatile loads.
167 if (LI.isVolatile()) return 0;
168
169 // Do really simple store-to-load forwarding and load CSE, to catch cases
Duncan Sands75b5d272011-02-15 09:23:02 +0000170 // where there are several consecutive memory accesses to the same location,
Chris Lattnera65e2f72010-01-05 05:57:49 +0000171 // separated by a few arithmetic operations.
172 BasicBlock::iterator BBI = &LI;
173 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
174 return ReplaceInstUsesWith(LI, AvailableVal);
175
176 // load(gep null, ...) -> unreachable
177 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
178 const Value *GEPI0 = GEPI->getOperand(0);
179 // TODO: Consider a target hook for valid address spaces for this xform.
180 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
181 // Insert a new store to null instruction before the load to indicate
182 // that this code is not reachable. We do this instead of inserting
183 // an unreachable instruction directly because we cannot modify the
184 // CFG.
185 new StoreInst(UndefValue::get(LI.getType()),
186 Constant::getNullValue(Op->getType()), &LI);
187 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
188 }
189 }
190
191 // load null/undef -> unreachable
192 // TODO: Consider a target hook for valid address spaces for this xform.
193 if (isa<UndefValue>(Op) ||
194 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
195 // Insert a new store to null instruction before the load to indicate that
196 // this code is not reachable. We do this instead of inserting an
197 // unreachable instruction directly because we cannot modify the CFG.
198 new StoreInst(UndefValue::get(LI.getType()),
199 Constant::getNullValue(Op->getType()), &LI);
200 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
201 }
202
203 // Instcombine load (constantexpr_cast global) -> cast (load global)
204 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
205 if (CE->isCast())
206 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
207 return Res;
208
209 if (Op->hasOneUse()) {
210 // Change select and PHI nodes to select values instead of addresses: this
211 // helps alias analysis out a lot, allows many others simplifications, and
212 // exposes redundancy in the code.
213 //
214 // Note that we cannot do the transformation unless we know that the
215 // introduced loads cannot trap! Something like this is valid as long as
216 // the condition is always false: load (select bool %C, int* null, int* %G),
217 // but it would not be valid if we transformed it to load from null
218 // unconditionally.
219 //
220 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
221 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
Bob Wilson56600a12010-01-30 04:42:39 +0000222 unsigned Align = LI.getAlignment();
223 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, TD) &&
224 isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, TD)) {
Bob Wilson4b71b6c2010-01-30 00:41:10 +0000225 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
Bob Wilson56600a12010-01-30 04:42:39 +0000226 SI->getOperand(1)->getName()+".val");
Bob Wilson4b71b6c2010-01-30 00:41:10 +0000227 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
Bob Wilson56600a12010-01-30 04:42:39 +0000228 SI->getOperand(2)->getName()+".val");
229 V1->setAlignment(Align);
230 V2->setAlignment(Align);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000231 return SelectInst::Create(SI->getCondition(), V1, V2);
232 }
233
234 // load (select (cond, null, P)) -> load P
235 if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
236 if (C->isNullValue()) {
237 LI.setOperand(0, SI->getOperand(2));
238 return &LI;
239 }
240
241 // load (select (cond, P, null)) -> load P
242 if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
243 if (C->isNullValue()) {
244 LI.setOperand(0, SI->getOperand(1));
245 return &LI;
246 }
247 }
248 }
249 return 0;
250}
251
252/// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
253/// when possible. This makes it generally easy to do alias analysis and/or
254/// SROA/mem2reg of the memory object.
255static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
256 User *CI = cast<User>(SI.getOperand(1));
257 Value *CastOp = CI->getOperand(0);
258
259 const Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
260 const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
261 if (SrcTy == 0) return 0;
262
263 const Type *SrcPTy = SrcTy->getElementType();
264
Duncan Sands19d0b472010-02-16 11:11:14 +0000265 if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
Chris Lattnera65e2f72010-01-05 05:57:49 +0000266 return 0;
267
268 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
269 /// to its first element. This allows us to handle things like:
270 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
271 /// on 32-bit hosts.
272 SmallVector<Value*, 4> NewGEPIndices;
273
274 // If the source is an array, the code below will not succeed. Check to
275 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
276 // constants.
Duncan Sands19d0b472010-02-16 11:11:14 +0000277 if (SrcPTy->isArrayTy() || SrcPTy->isStructTy()) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000278 // Index through pointer.
279 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
280 NewGEPIndices.push_back(Zero);
281
282 while (1) {
283 if (const StructType *STy = dyn_cast<StructType>(SrcPTy)) {
284 if (!STy->getNumElements()) /* Struct can be empty {} */
285 break;
286 NewGEPIndices.push_back(Zero);
287 SrcPTy = STy->getElementType(0);
288 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
289 NewGEPIndices.push_back(Zero);
290 SrcPTy = ATy->getElementType();
291 } else {
292 break;
293 }
294 }
295
296 SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
297 }
298
Duncan Sands19d0b472010-02-16 11:11:14 +0000299 if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
Chris Lattnera65e2f72010-01-05 05:57:49 +0000300 return 0;
301
302 // If the pointers point into different address spaces or if they point to
303 // values with different sizes, we can't do the transformation.
304 if (!IC.getTargetData() ||
305 SrcTy->getAddressSpace() !=
306 cast<PointerType>(CI->getType())->getAddressSpace() ||
307 IC.getTargetData()->getTypeSizeInBits(SrcPTy) !=
308 IC.getTargetData()->getTypeSizeInBits(DestPTy))
309 return 0;
310
311 // Okay, we are casting from one integer or pointer type to another of
312 // the same size. Instead of casting the pointer before
313 // the store, cast the value to be stored.
314 Value *NewCast;
315 Value *SIOp0 = SI.getOperand(0);
316 Instruction::CastOps opcode = Instruction::BitCast;
317 const Type* CastSrcTy = SIOp0->getType();
318 const Type* CastDstTy = SrcPTy;
Duncan Sands19d0b472010-02-16 11:11:14 +0000319 if (CastDstTy->isPointerTy()) {
Duncan Sands9dff9be2010-02-15 16:12:20 +0000320 if (CastSrcTy->isIntegerTy())
Chris Lattnera65e2f72010-01-05 05:57:49 +0000321 opcode = Instruction::IntToPtr;
Duncan Sands19d0b472010-02-16 11:11:14 +0000322 } else if (CastDstTy->isIntegerTy()) {
323 if (SIOp0->getType()->isPointerTy())
Chris Lattnera65e2f72010-01-05 05:57:49 +0000324 opcode = Instruction::PtrToInt;
325 }
326
327 // SIOp0 is a pointer to aggregate and this is a store to the first field,
328 // emit a GEP to index into its first field.
329 if (!NewGEPIndices.empty())
330 CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices.begin(),
331 NewGEPIndices.end());
332
333 NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
334 SIOp0->getName()+".c");
Dan Gohman2e20dfb2010-10-25 16:16:27 +0000335 SI.setOperand(0, NewCast);
336 SI.setOperand(1, CastOp);
337 return &SI;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000338}
339
340/// equivalentAddressValues - Test if A and B will obviously have the same
341/// value. This includes recognizing that %t0 and %t1 will have the same
342/// value in code like this:
343/// %t0 = getelementptr \@a, 0, 3
344/// store i32 0, i32* %t0
345/// %t1 = getelementptr \@a, 0, 3
346/// %t2 = load i32* %t1
347///
348static bool equivalentAddressValues(Value *A, Value *B) {
349 // Test if the values are trivially equivalent.
350 if (A == B) return true;
351
352 // Test if the values come form identical arithmetic instructions.
353 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
354 // its only used to compare two uses within the same basic block, which
355 // means that they'll always either have the same value or one of them
356 // will have an undefined value.
357 if (isa<BinaryOperator>(A) ||
358 isa<CastInst>(A) ||
359 isa<PHINode>(A) ||
360 isa<GetElementPtrInst>(A))
361 if (Instruction *BI = dyn_cast<Instruction>(B))
362 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
363 return true;
364
365 // Otherwise they may not be equivalent.
366 return false;
367}
368
Chris Lattnera65e2f72010-01-05 05:57:49 +0000369Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
370 Value *Val = SI.getOperand(0);
371 Value *Ptr = SI.getOperand(1);
372
373 // If the RHS is an alloca with a single use, zapify the store, making the
374 // alloca dead.
Chris Lattnera65e2f72010-01-05 05:57:49 +0000375 if (!SI.isVolatile()) {
376 if (Ptr->hasOneUse()) {
377 if (isa<AllocaInst>(Ptr))
378 return EraseInstFromFunction(SI);
379 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
380 if (isa<AllocaInst>(GEP->getOperand(0))) {
381 if (GEP->getOperand(0)->hasOneUse())
382 return EraseInstFromFunction(SI);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000383 }
384 }
385 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000386 }
387
388 // Attempt to improve the alignment.
389 if (TD) {
390 unsigned KnownAlign =
Chris Lattner6fcd32e2010-12-25 20:37:57 +0000391 getOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()),
392 TD);
Dan Gohman36196602010-08-03 18:20:32 +0000393 unsigned StoreAlign = SI.getAlignment();
394 unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
395 TD->getABITypeAlignment(Val->getType());
396
397 if (KnownAlign > EffectiveStoreAlign)
Chris Lattnera65e2f72010-01-05 05:57:49 +0000398 SI.setAlignment(KnownAlign);
Dan Gohman36196602010-08-03 18:20:32 +0000399 else if (StoreAlign == 0)
400 SI.setAlignment(EffectiveStoreAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000401 }
402
403 // Do really simple DSE, to catch cases where there are several consecutive
404 // stores to the same location, separated by a few arithmetic operations. This
405 // situation often occurs with bitfield accesses.
406 BasicBlock::iterator BBI = &SI;
407 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
408 --ScanInsts) {
409 --BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +0000410 // Don't count debug info directives, lest they affect codegen,
411 // and we skip pointer-to-pointer bitcasts, which are NOPs.
412 if (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +0000413 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000414 ScanInsts++;
415 continue;
416 }
417
418 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
419 // Prev store isn't volatile, and stores to the same location?
420 if (!PrevSI->isVolatile() &&equivalentAddressValues(PrevSI->getOperand(1),
421 SI.getOperand(1))) {
422 ++NumDeadStore;
423 ++BBI;
424 EraseInstFromFunction(*PrevSI);
425 continue;
426 }
427 break;
428 }
429
430 // If this is a load, we have to stop. However, if the loaded value is from
431 // the pointer we're loading and is producing the pointer we're storing,
432 // then *this* store is dead (X = load P; store X -> P).
433 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
Jin-Gu Kangb452db02011-03-14 01:21:00 +0000434 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
435 !SI.isVolatile())
436 return EraseInstFromFunction(SI);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000437
438 // Otherwise, this is a load from some other location. Stores before it
439 // may not be dead.
440 break;
441 }
442
443 // Don't skip over loads or things that can modify memory.
444 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
445 break;
446 }
447
448
449 if (SI.isVolatile()) return 0; // Don't hack volatile stores.
450
451 // store X, null -> turns into 'unreachable' in SimplifyCFG
452 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
453 if (!isa<UndefValue>(Val)) {
454 SI.setOperand(0, UndefValue::get(Val->getType()));
455 if (Instruction *U = dyn_cast<Instruction>(Val))
456 Worklist.Add(U); // Dropped a use.
457 }
458 return 0; // Do not modify these!
459 }
460
461 // store undef, Ptr -> noop
462 if (isa<UndefValue>(Val))
463 return EraseInstFromFunction(SI);
464
465 // If the pointer destination is a cast, see if we can fold the cast into the
466 // source instead.
467 if (isa<CastInst>(Ptr))
468 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
469 return Res;
470 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
471 if (CE->isCast())
472 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
473 return Res;
474
475
476 // If this store is the last instruction in the basic block (possibly
Victor Hernandez5f5abd52010-01-21 23:07:15 +0000477 // excepting debug info instructions), and if the block ends with an
478 // unconditional branch, try to move it to the successor block.
Chris Lattnera65e2f72010-01-05 05:57:49 +0000479 BBI = &SI;
480 do {
481 ++BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +0000482 } while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +0000483 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
Chris Lattnera65e2f72010-01-05 05:57:49 +0000484 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
485 if (BI->isUnconditional())
486 if (SimplifyStoreAtEndOfBlock(SI))
487 return 0; // xform done!
488
489 return 0;
490}
491
492/// SimplifyStoreAtEndOfBlock - Turn things like:
493/// if () { *P = v1; } else { *P = v2 }
494/// into a phi node with a store in the successor.
495///
496/// Simplify things like:
497/// *P = v1; if () { *P = v2; }
498/// into a phi node with a store in the successor.
499///
500bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
501 BasicBlock *StoreBB = SI.getParent();
502
503 // Check to see if the successor block has exactly two incoming edges. If
504 // so, see if the other predecessor contains a store to the same location.
505 // if so, insert a PHI node (if needed) and move the stores down.
506 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
507
508 // Determine whether Dest has exactly two predecessors and, if so, compute
509 // the other predecessor.
510 pred_iterator PI = pred_begin(DestBB);
Gabor Greif1b787df2010-07-12 15:48:26 +0000511 BasicBlock *P = *PI;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000512 BasicBlock *OtherBB = 0;
Gabor Greif1b787df2010-07-12 15:48:26 +0000513
514 if (P != StoreBB)
515 OtherBB = P;
516
517 if (++PI == pred_end(DestBB))
Chris Lattnera65e2f72010-01-05 05:57:49 +0000518 return false;
519
Gabor Greif1b787df2010-07-12 15:48:26 +0000520 P = *PI;
521 if (P != StoreBB) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000522 if (OtherBB)
523 return false;
Gabor Greif1b787df2010-07-12 15:48:26 +0000524 OtherBB = P;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000525 }
526 if (++PI != pred_end(DestBB))
527 return false;
528
529 // Bail out if all the relevant blocks aren't distinct (this can happen,
530 // for example, if SI is in an infinite loop)
531 if (StoreBB == DestBB || OtherBB == DestBB)
532 return false;
533
534 // Verify that the other block ends in a branch and is not otherwise empty.
535 BasicBlock::iterator BBI = OtherBB->getTerminator();
536 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
537 if (!OtherBr || BBI == OtherBB->begin())
538 return false;
539
540 // If the other block ends in an unconditional branch, check for the 'if then
541 // else' case. there is an instruction before the branch.
542 StoreInst *OtherStore = 0;
543 if (OtherBr->isUnconditional()) {
544 --BBI;
545 // Skip over debugging info.
Victor Hernandez5f8c8c02010-01-22 19:05:05 +0000546 while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +0000547 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000548 if (BBI==OtherBB->begin())
549 return false;
550 --BBI;
551 }
552 // If this isn't a store, isn't a store to the same location, or if the
553 // alignments differ, bail out.
554 OtherStore = dyn_cast<StoreInst>(BBI);
555 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
556 OtherStore->getAlignment() != SI.getAlignment())
557 return false;
558 } else {
559 // Otherwise, the other block ended with a conditional branch. If one of the
560 // destinations is StoreBB, then we have the if/then case.
561 if (OtherBr->getSuccessor(0) != StoreBB &&
562 OtherBr->getSuccessor(1) != StoreBB)
563 return false;
564
565 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
566 // if/then triangle. See if there is a store to the same ptr as SI that
567 // lives in OtherBB.
568 for (;; --BBI) {
569 // Check to see if we find the matching store.
570 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
571 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
572 OtherStore->getAlignment() != SI.getAlignment())
573 return false;
574 break;
575 }
576 // If we find something that may be using or overwriting the stored
577 // value, or if we run out of instructions, we can't do the xform.
578 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
579 BBI == OtherBB->begin())
580 return false;
581 }
582
583 // In order to eliminate the store in OtherBr, we have to
584 // make sure nothing reads or overwrites the stored value in
585 // StoreBB.
586 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
587 // FIXME: This should really be AA driven.
588 if (I->mayReadFromMemory() || I->mayWriteToMemory())
589 return false;
590 }
591 }
592
593 // Insert a PHI node now if we need it.
594 Value *MergedVal = OtherStore->getOperand(0);
595 if (MergedVal != SI.getOperand(0)) {
Jay Foad52131342011-03-30 11:28:46 +0000596 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
Chris Lattnera65e2f72010-01-05 05:57:49 +0000597 PN->addIncoming(SI.getOperand(0), SI.getParent());
598 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
599 MergedVal = InsertNewInstBefore(PN, DestBB->front());
600 }
601
602 // Advance to a place where it is safe to insert the new store and
603 // insert it.
604 BBI = DestBB->getFirstNonPHI();
Eli Friedman35211c62011-05-27 00:19:40 +0000605 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
606 OtherStore->isVolatile(),
607 SI.getAlignment());
608 InsertNewInstBefore(NewSI, *BBI);
609 NewSI->setDebugLoc(OtherStore->getDebugLoc());
610
Chris Lattnera65e2f72010-01-05 05:57:49 +0000611 // Nuke the old stores.
612 EraseInstFromFunction(SI);
613 EraseInstFromFunction(*OtherStore);
614 return true;
615}