blob: 3813d8dd0cc0dd1bde75eb935e5943cb2dca9005 [file] [log] [blame]
Chris Lattnera65e2f72010-01-05 05:57:49 +00001//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visit functions for load, store and alloca.
11//
12//===----------------------------------------------------------------------===//
13
Chandler Carrutha9174582015-01-22 05:25:13 +000014#include "InstCombineInternal.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000015#include "llvm/ADT/Statistic.h"
Dan Gohman826bdf82010-05-28 16:19:17 +000016#include "llvm/Analysis/Loads.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000017#include "llvm/IR/DataLayout.h"
Chandler Carruthbc6378d2014-10-19 10:46:46 +000018#include "llvm/IR/LLVMContext.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000019#include "llvm/IR/IntrinsicInst.h"
Charles Davis33d1dc02015-02-25 05:10:25 +000020#include "llvm/IR/MDBuilder.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000021#include "llvm/Transforms/Utils/BasicBlockUtils.h"
22#include "llvm/Transforms/Utils/Local.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000023using namespace llvm;
24
Chandler Carruth964daaa2014-04-22 02:55:47 +000025#define DEBUG_TYPE "instcombine"
26
Chandler Carruthc908ca12012-08-21 08:39:44 +000027STATISTIC(NumDeadStore, "Number of dead stores eliminated");
28STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
29
30/// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
31/// some part of a constant global variable. This intentionally only accepts
32/// constant expressions because we can't rewrite arbitrary instructions.
33static bool pointsToConstantGlobal(Value *V) {
34 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
35 return GV->isConstant();
Matt Arsenault607281772014-04-24 00:01:09 +000036
37 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000038 if (CE->getOpcode() == Instruction::BitCast ||
Matt Arsenault607281772014-04-24 00:01:09 +000039 CE->getOpcode() == Instruction::AddrSpaceCast ||
Chandler Carruthc908ca12012-08-21 08:39:44 +000040 CE->getOpcode() == Instruction::GetElementPtr)
41 return pointsToConstantGlobal(CE->getOperand(0));
Matt Arsenault607281772014-04-24 00:01:09 +000042 }
Chandler Carruthc908ca12012-08-21 08:39:44 +000043 return false;
44}
45
46/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
47/// pointer to an alloca. Ignore any reads of the pointer, return false if we
48/// see any stores or other unknown uses. If we see pointer arithmetic, keep
49/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
50/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
51/// the alloca, and if the source pointer is a pointer to a constant global, we
52/// can optimize this.
53static bool
54isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
Reid Kleckner813dab22014-07-01 21:36:20 +000055 SmallVectorImpl<Instruction *> &ToDelete) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000056 // We track lifetime intrinsics as we encounter them. If we decide to go
57 // ahead and replace the value with the global, this lets the caller quickly
58 // eliminate the markers.
59
Reid Kleckner813dab22014-07-01 21:36:20 +000060 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
61 ValuesToInspect.push_back(std::make_pair(V, false));
62 while (!ValuesToInspect.empty()) {
63 auto ValuePair = ValuesToInspect.pop_back_val();
64 const bool IsOffset = ValuePair.second;
65 for (auto &U : ValuePair.first->uses()) {
66 Instruction *I = cast<Instruction>(U.getUser());
Chandler Carruthc908ca12012-08-21 08:39:44 +000067
Reid Kleckner813dab22014-07-01 21:36:20 +000068 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
69 // Ignore non-volatile loads, they are always ok.
70 if (!LI->isSimple()) return false;
Chandler Carruthc908ca12012-08-21 08:39:44 +000071 continue;
72 }
Reid Kleckner813dab22014-07-01 21:36:20 +000073
74 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
75 // If uses of the bitcast are ok, we are ok.
76 ValuesToInspect.push_back(std::make_pair(I, IsOffset));
77 continue;
78 }
79 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
80 // If the GEP has all zero indices, it doesn't offset the pointer. If it
81 // doesn't, it does.
82 ValuesToInspect.push_back(
83 std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices()));
84 continue;
85 }
86
87 if (CallSite CS = I) {
88 // If this is the function being called then we treat it like a load and
89 // ignore it.
90 if (CS.isCallee(&U))
91 continue;
92
93 // Inalloca arguments are clobbered by the call.
94 unsigned ArgNo = CS.getArgumentNo(&U);
95 if (CS.isInAllocaArgument(ArgNo))
96 return false;
97
98 // If this is a readonly/readnone call site, then we know it is just a
99 // load (but one that potentially returns the value itself), so we can
100 // ignore it if we know that the value isn't captured.
101 if (CS.onlyReadsMemory() &&
102 (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
103 continue;
104
105 // If this is being passed as a byval argument, the caller is making a
106 // copy, so it is only a read of the alloca.
107 if (CS.isByValArgument(ArgNo))
108 continue;
109 }
110
111 // Lifetime intrinsics can be handled by the caller.
112 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
113 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
114 II->getIntrinsicID() == Intrinsic::lifetime_end) {
115 assert(II->use_empty() && "Lifetime markers have no result to use!");
116 ToDelete.push_back(II);
117 continue;
118 }
119 }
120
121 // If this is isn't our memcpy/memmove, reject it as something we can't
122 // handle.
123 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
124 if (!MI)
125 return false;
126
127 // If the transfer is using the alloca as a source of the transfer, then
128 // ignore it since it is a load (unless the transfer is volatile).
129 if (U.getOperandNo() == 1) {
130 if (MI->isVolatile()) return false;
131 continue;
132 }
133
134 // If we already have seen a copy, reject the second one.
135 if (TheCopy) return false;
136
137 // If the pointer has been offset from the start of the alloca, we can't
138 // safely handle this.
139 if (IsOffset) return false;
140
141 // If the memintrinsic isn't using the alloca as the dest, reject it.
142 if (U.getOperandNo() != 0) return false;
143
144 // If the source of the memcpy/move is not a constant global, reject it.
145 if (!pointsToConstantGlobal(MI->getSource()))
146 return false;
147
148 // Otherwise, the transform is safe. Remember the copy instruction.
149 TheCopy = MI;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000150 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000151 }
152 return true;
153}
154
155/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
156/// modified by a copy from a constant global. If we can prove this, we can
157/// replace any uses of the alloca with uses of the global directly.
158static MemTransferInst *
159isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
160 SmallVectorImpl<Instruction *> &ToDelete) {
Craig Topperf40110f2014-04-25 05:29:35 +0000161 MemTransferInst *TheCopy = nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000162 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
163 return TheCopy;
Craig Topperf40110f2014-04-25 05:29:35 +0000164 return nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000165}
166
Chris Lattnera65e2f72010-01-05 05:57:49 +0000167Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
Dan Gohmandf5d7dc2010-05-28 15:09:00 +0000168 // Ensure that the alloca array size argument has type intptr_t, so that
169 // any casting is exposed early.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000170 Type *IntPtrTy = DL.getIntPtrType(AI.getType());
171 if (AI.getArraySize()->getType() != IntPtrTy) {
172 Value *V = Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false);
173 AI.setOperand(0, V);
174 return &AI;
Dan Gohmandf5d7dc2010-05-28 15:09:00 +0000175 }
176
Chris Lattnera65e2f72010-01-05 05:57:49 +0000177 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
178 if (AI.isArrayAllocation()) { // Check C != 1
179 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000180 Type *NewTy =
Chris Lattnera65e2f72010-01-05 05:57:49 +0000181 ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
Craig Topperf40110f2014-04-25 05:29:35 +0000182 AllocaInst *New = Builder->CreateAlloca(NewTy, nullptr, AI.getName());
Chris Lattnera65e2f72010-01-05 05:57:49 +0000183 New->setAlignment(AI.getAlignment());
184
185 // Scan to the end of the allocation instructions, to skip over a block of
186 // allocas if possible...also skip interleaved debug info
187 //
188 BasicBlock::iterator It = New;
189 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
190
191 // Now that I is pointing to the first non-allocation-inst in the block,
192 // insert our getelementptr instruction...
193 //
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000194 Type *IdxTy = DL.getIntPtrType(AI.getType());
Matt Arsenault9e3a6ca2013-08-14 00:24:38 +0000195 Value *NullIdx = Constant::getNullValue(IdxTy);
Matt Arsenault640ff9d2013-08-14 00:24:05 +0000196 Value *Idx[2] = { NullIdx, NullIdx };
Eli Friedman41e509a2011-05-18 23:58:37 +0000197 Instruction *GEP =
Matt Arsenault640ff9d2013-08-14 00:24:05 +0000198 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
Eli Friedman41e509a2011-05-18 23:58:37 +0000199 InsertNewInstBefore(GEP, *It);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000200
201 // Now make everything use the getelementptr instead of the original
202 // allocation.
Eli Friedman41e509a2011-05-18 23:58:37 +0000203 return ReplaceInstUsesWith(AI, GEP);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000204 } else if (isa<UndefValue>(AI.getArraySize())) {
205 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
206 }
207 }
208
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000209 if (AI.getAllocatedType()->isSized()) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000210 // If the alignment is 0 (unspecified), assign it the preferred alignment.
211 if (AI.getAlignment() == 0)
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000212 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000213
214 // Move all alloca's of zero byte objects to the entry block and merge them
215 // together. Note that we only do this for alloca's, because malloc should
216 // allocate and return a unique pointer, even for a zero byte allocation.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000217 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000218 // For a zero sized alloca there is no point in doing an array allocation.
219 // This is helpful if the array size is a complicated expression not used
220 // elsewhere.
221 if (AI.isArrayAllocation()) {
222 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
223 return &AI;
224 }
225
226 // Get the first instruction in the entry block.
227 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
228 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
229 if (FirstInst != &AI) {
230 // If the entry block doesn't start with a zero-size alloca then move
231 // this one to the start of the entry block. There is no problem with
232 // dominance as the array size was forced to a constant earlier already.
233 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
234 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000235 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000236 AI.moveBefore(FirstInst);
237 return &AI;
238 }
239
Richard Osborneb68053e2012-09-18 09:31:44 +0000240 // If the alignment of the entry block alloca is 0 (unspecified),
241 // assign it the preferred alignment.
242 if (EntryAI->getAlignment() == 0)
243 EntryAI->setAlignment(
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000244 DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000245 // Replace this zero-sized alloca with the one at the start of the entry
246 // block after ensuring that the address will be aligned enough for both
247 // types.
Richard Osborneb68053e2012-09-18 09:31:44 +0000248 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
249 AI.getAlignment());
Duncan Sands8bc764a2012-06-26 13:39:21 +0000250 EntryAI->setAlignment(MaxAlign);
251 if (AI.getType() != EntryAI->getType())
252 return new BitCastInst(EntryAI, AI.getType());
253 return ReplaceInstUsesWith(AI, EntryAI);
254 }
255 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000256 }
257
Eli Friedmanb14873c2012-11-26 23:04:53 +0000258 if (AI.getAlignment()) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000259 // Check to see if this allocation is only modified by a memcpy/memmove from
260 // a constant global whose alignment is equal to or exceeds that of the
261 // allocation. If this is the case, we can change all users to use
262 // the constant global instead. This is commonly produced by the CFE by
263 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
264 // is only subsequently read.
265 SmallVector<Instruction *, 4> ToDelete;
266 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
Chandler Carruth66b31302015-01-04 12:03:27 +0000267 unsigned SourceAlign = getOrEnforceKnownAlignment(
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000268 Copy->getSource(), AI.getAlignment(), DL, &AI, AC, DT);
Eli Friedmanb14873c2012-11-26 23:04:53 +0000269 if (AI.getAlignment() <= SourceAlign) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000270 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
271 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
272 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
273 EraseInstFromFunction(*ToDelete[i]);
274 Constant *TheSrc = cast<Constant>(Copy->getSource());
Matt Arsenaultbbf18c62013-12-07 02:58:45 +0000275 Constant *Cast
276 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
277 Instruction *NewI = ReplaceInstUsesWith(AI, Cast);
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000278 EraseInstFromFunction(*Copy);
279 ++NumGlobalCopies;
280 return NewI;
281 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000282 }
283 }
284
Nuno Lopes95cc4f32012-07-09 18:38:20 +0000285 // At last, use the generic allocation site handler to aggressively remove
286 // unused allocas.
287 return visitAllocSite(AI);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000288}
289
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000290/// \brief Helper to combine a load to a new type.
291///
292/// This just does the work of combining a load to a new type. It handles
293/// metadata, etc., and returns the new instruction. The \c NewTy should be the
294/// loaded *value* type. This will convert it to a pointer, cast the operand to
295/// that pointer type, load it, etc.
296///
297/// Note that this will create all of the instructions with whatever insert
298/// point the \c InstCombiner currently is using.
299static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy) {
300 Value *Ptr = LI.getPointerOperand();
301 unsigned AS = LI.getPointerAddressSpace();
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000302 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000303 LI.getAllMetadata(MD);
304
305 LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
306 IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
307 LI.getAlignment(), LI.getName());
Charles Davis33d1dc02015-02-25 05:10:25 +0000308 MDBuilder MDB(NewLoad->getContext());
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000309 for (const auto &MDPair : MD) {
310 unsigned ID = MDPair.first;
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000311 MDNode *N = MDPair.second;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000312 // Note, essentially every kind of metadata should be preserved here! This
313 // routine is supposed to clone a load instruction changing *only its type*.
314 // The only metadata it makes sense to drop is metadata which is invalidated
315 // when the pointer type changes. This should essentially never be the case
316 // in LLVM, but we explicitly switch over only known metadata to be
317 // conservatively correct. If you are adding metadata to LLVM which pertains
318 // to loads, you almost certainly want to add it here.
319 switch (ID) {
320 case LLVMContext::MD_dbg:
321 case LLVMContext::MD_tbaa:
322 case LLVMContext::MD_prof:
323 case LLVMContext::MD_fpmath:
324 case LLVMContext::MD_tbaa_struct:
325 case LLVMContext::MD_invariant_load:
326 case LLVMContext::MD_alias_scope:
327 case LLVMContext::MD_noalias:
Philip Reames5a3f5f72014-10-21 00:13:20 +0000328 case LLVMContext::MD_nontemporal:
329 case LLVMContext::MD_mem_parallel_loop_access:
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000330 // All of these directly apply.
331 NewLoad->setMetadata(ID, N);
332 break;
333
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000334 case LLVMContext::MD_nonnull:
Charles Davis33d1dc02015-02-25 05:10:25 +0000335 // This only directly applies if the new type is also a pointer.
336 if (NewTy->isPointerTy()) {
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000337 NewLoad->setMetadata(ID, N);
Charles Davis33d1dc02015-02-25 05:10:25 +0000338 break;
339 }
340 // If it's integral now, translate it to !range metadata.
341 if (NewTy->isIntegerTy()) {
342 auto *ITy = cast<IntegerType>(NewTy);
343 auto *NullInt = ConstantExpr::getPtrToInt(
344 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
345 auto *NonNullInt =
346 ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
347 NewLoad->setMetadata(LLVMContext::MD_range,
348 MDB.createRange(NonNullInt, NullInt));
349 }
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000350 break;
351
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000352 case LLVMContext::MD_range:
353 // FIXME: It would be nice to propagate this in some way, but the type
Charles Davis33d1dc02015-02-25 05:10:25 +0000354 // conversions make it hard. If the new type is a pointer, we could
355 // translate it to !nonnull metadata.
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000356 break;
357 }
358 }
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000359 return NewLoad;
360}
361
Chandler Carruthfa11d832015-01-22 03:34:54 +0000362/// \brief Combine a store to a new type.
363///
364/// Returns the newly created store instruction.
365static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
366 Value *Ptr = SI.getPointerOperand();
367 unsigned AS = SI.getPointerAddressSpace();
368 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
369 SI.getAllMetadata(MD);
370
371 StoreInst *NewStore = IC.Builder->CreateAlignedStore(
372 V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
373 SI.getAlignment());
374 for (const auto &MDPair : MD) {
375 unsigned ID = MDPair.first;
376 MDNode *N = MDPair.second;
377 // Note, essentially every kind of metadata should be preserved here! This
378 // routine is supposed to clone a store instruction changing *only its
379 // type*. The only metadata it makes sense to drop is metadata which is
380 // invalidated when the pointer type changes. This should essentially
381 // never be the case in LLVM, but we explicitly switch over only known
382 // metadata to be conservatively correct. If you are adding metadata to
383 // LLVM which pertains to stores, you almost certainly want to add it
384 // here.
385 switch (ID) {
386 case LLVMContext::MD_dbg:
387 case LLVMContext::MD_tbaa:
388 case LLVMContext::MD_prof:
389 case LLVMContext::MD_fpmath:
390 case LLVMContext::MD_tbaa_struct:
391 case LLVMContext::MD_alias_scope:
392 case LLVMContext::MD_noalias:
393 case LLVMContext::MD_nontemporal:
394 case LLVMContext::MD_mem_parallel_loop_access:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000395 // All of these directly apply.
396 NewStore->setMetadata(ID, N);
397 break;
398
399 case LLVMContext::MD_invariant_load:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000400 case LLVMContext::MD_nonnull:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000401 case LLVMContext::MD_range:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000402 // These don't apply for stores.
Chandler Carruthfa11d832015-01-22 03:34:54 +0000403 break;
404 }
405 }
406
407 return NewStore;
408}
409
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000410/// \brief Combine loads to match the type of value their uses after looking
411/// through intervening bitcasts.
412///
413/// The core idea here is that if the result of a load is used in an operation,
414/// we should load the type most conducive to that operation. For example, when
415/// loading an integer and converting that immediately to a pointer, we should
416/// instead directly load a pointer.
417///
418/// However, this routine must never change the width of a load or the number of
419/// loads as that would introduce a semantic change. This combine is expected to
420/// be a semantic no-op which just allows loads to more closely model the types
421/// of their consuming operations.
422///
423/// Currently, we also refuse to change the precise type used for an atomic load
424/// or a volatile load. This is debatable, and might be reasonable to change
425/// later. However, it is risky in case some backend or other part of LLVM is
426/// relying on the exact type loaded to select appropriate atomic operations.
427static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
428 // FIXME: We could probably with some care handle both volatile and atomic
429 // loads here but it isn't clear that this is important.
430 if (!LI.isSimple())
431 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000432
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000433 if (LI.use_empty())
434 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000435
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000436 Type *Ty = LI.getType();
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000437 const DataLayout &DL = IC.getDataLayout();
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000438
439 // Try to canonicalize loads which are only ever stored to operate over
440 // integers instead of any other type. We only do this when the loaded type
441 // is sized and has a size exactly the same as its store size and the store
442 // size is a legal integer type.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000443 if (!Ty->isIntegerTy() && Ty->isSized() &&
444 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
445 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty)) {
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000446 if (std::all_of(LI.user_begin(), LI.user_end(), [&LI](User *U) {
447 auto *SI = dyn_cast<StoreInst>(U);
448 return SI && SI->getPointerOperand() != &LI;
449 })) {
450 LoadInst *NewLoad = combineLoadToNewType(
451 IC, LI,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000452 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000453 // Replace all the stores with stores of the newly loaded value.
454 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
455 auto *SI = cast<StoreInst>(*UI++);
456 IC.Builder->SetInsertPoint(SI);
457 combineStoreToNewValue(IC, *SI, NewLoad);
458 IC.EraseInstFromFunction(*SI);
459 }
460 assert(LI.use_empty() && "Failed to remove all users of the load!");
461 // Return the old load so the combiner can delete it safely.
462 return &LI;
463 }
464 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000465
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000466 // Fold away bit casts of the loaded value by loading the desired type.
467 if (LI.hasOneUse())
468 if (auto *BC = dyn_cast<BitCastInst>(LI.user_back())) {
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000469 LoadInst *NewLoad = combineLoadToNewType(IC, LI, BC->getDestTy());
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000470 BC->replaceAllUsesWith(NewLoad);
471 IC.EraseInstFromFunction(*BC);
472 return &LI;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000473 }
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000474
Chandler Carrutha7f247e2014-12-09 19:21:16 +0000475 // FIXME: We should also canonicalize loads of vectors when their elements are
476 // cast to other types.
Craig Topperf40110f2014-04-25 05:29:35 +0000477 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000478}
479
Hal Finkel847e05f2015-02-20 03:05:53 +0000480// If we can determine that all possible objects pointed to by the provided
481// pointer value are, not only dereferenceable, but also definitively less than
482// or equal to the provided maximum size, then return true. Otherwise, return
483// false (constant global values and allocas fall into this category).
484//
485// FIXME: This should probably live in ValueTracking (or similar).
486static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000487 const DataLayout &DL) {
Hal Finkel847e05f2015-02-20 03:05:53 +0000488 SmallPtrSet<Value *, 4> Visited;
489 SmallVector<Value *, 4> Worklist(1, V);
490
491 do {
492 Value *P = Worklist.pop_back_val();
493 P = P->stripPointerCasts();
494
495 if (!Visited.insert(P).second)
496 continue;
497
498 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
499 Worklist.push_back(SI->getTrueValue());
500 Worklist.push_back(SI->getFalseValue());
501 continue;
502 }
503
504 if (PHINode *PN = dyn_cast<PHINode>(P)) {
505 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
506 Worklist.push_back(PN->getIncomingValue(i));
507 continue;
508 }
509
510 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
511 if (GA->mayBeOverridden())
512 return false;
513 Worklist.push_back(GA->getAliasee());
514 continue;
515 }
516
517 // If we know how big this object is, and it is less than MaxSize, continue
518 // searching. Otherwise, return false.
519 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
520 if (!AI->getAllocatedType()->isSized())
521 return false;
522
523 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
524 if (!CS)
525 return false;
526
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000527 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
Hal Finkel847e05f2015-02-20 03:05:53 +0000528 // Make sure that, even if the multiplication below would wrap as an
529 // uint64_t, we still do the right thing.
530 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
531 return false;
532 continue;
533 }
534
535 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
536 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
537 return false;
538
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000539 uint64_t InitSize = DL.getTypeAllocSize(GV->getType()->getElementType());
Hal Finkel847e05f2015-02-20 03:05:53 +0000540 if (InitSize > MaxSize)
541 return false;
542 continue;
543 }
544
545 return false;
546 } while (!Worklist.empty());
547
548 return true;
549}
550
551// If we're indexing into an object of a known size, and the outer index is
552// not a constant, but having any value but zero would lead to undefined
553// behavior, replace it with zero.
554//
555// For example, if we have:
556// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
557// ...
558// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
559// ... = load i32* %arrayidx, align 4
560// Then we know that we can replace %x in the GEP with i64 0.
561//
562// FIXME: We could fold any GEP index to zero that would cause UB if it were
563// not zero. Currently, we only handle the first such index. Also, we could
564// also search through non-zero constant indices if we kept track of the
565// offsets those indices implied.
566static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
567 Instruction *MemI, unsigned &Idx) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000568 if (GEPI->getNumOperands() < 2)
Hal Finkel847e05f2015-02-20 03:05:53 +0000569 return false;
570
571 // Find the first non-zero index of a GEP. If all indices are zero, return
572 // one past the last index.
573 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
574 unsigned I = 1;
575 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
576 Value *V = GEPI->getOperand(I);
577 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
578 if (CI->isZero())
579 continue;
580
581 break;
582 }
583
584 return I;
585 };
586
587 // Skip through initial 'zero' indices, and find the corresponding pointer
588 // type. See if the next index is not a constant.
589 Idx = FirstNZIdx(GEPI);
590 if (Idx == GEPI->getNumOperands())
591 return false;
592 if (isa<Constant>(GEPI->getOperand(Idx)))
593 return false;
594
595 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
596 Type *AllocTy =
597 GetElementPtrInst::getIndexedType(GEPI->getOperand(0)->getType(), Ops);
598 if (!AllocTy || !AllocTy->isSized())
599 return false;
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000600 const DataLayout &DL = IC.getDataLayout();
601 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
Hal Finkel847e05f2015-02-20 03:05:53 +0000602
603 // If there are more indices after the one we might replace with a zero, make
604 // sure they're all non-negative. If any of them are negative, the overall
605 // address being computed might be before the base address determined by the
606 // first non-zero index.
607 auto IsAllNonNegative = [&]() {
608 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
609 bool KnownNonNegative, KnownNegative;
610 IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative,
611 KnownNegative, 0, MemI);
612 if (KnownNonNegative)
613 continue;
614 return false;
615 }
616
617 return true;
618 };
619
620 // FIXME: If the GEP is not inbounds, and there are extra indices after the
621 // one we'll replace, those could cause the address computation to wrap
622 // (rendering the IsAllNonNegative() check below insufficient). We can do
623 // better, ignoring zero indicies (and other indicies we can prove small
624 // enough not to wrap).
625 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
626 return false;
627
628 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
629 // also known to be dereferenceable.
630 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
631 IsAllNonNegative();
632}
633
634// If we're indexing into an object with a variable index for the memory
635// access, but the object has only one element, we can assume that the index
636// will always be zero. If we replace the GEP, return it.
637template <typename T>
638static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
639 T &MemI) {
640 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
641 unsigned Idx;
642 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
643 Instruction *NewGEPI = GEPI->clone();
644 NewGEPI->setOperand(Idx,
645 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
646 NewGEPI->insertBefore(GEPI);
647 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
648 return NewGEPI;
649 }
650 }
651
652 return nullptr;
653}
654
Chris Lattnera65e2f72010-01-05 05:57:49 +0000655Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
656 Value *Op = LI.getOperand(0);
657
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000658 // Try to canonicalize the loaded type.
659 if (Instruction *Res = combineLoadToOperationType(*this, LI))
660 return Res;
661
Chris Lattnera65e2f72010-01-05 05:57:49 +0000662 // Attempt to improve the alignment.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000663 unsigned KnownAlign = getOrEnforceKnownAlignment(
664 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, AC, DT);
665 unsigned LoadAlign = LI.getAlignment();
666 unsigned EffectiveLoadAlign =
667 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
Dan Gohman36196602010-08-03 18:20:32 +0000668
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000669 if (KnownAlign > EffectiveLoadAlign)
670 LI.setAlignment(KnownAlign);
671 else if (LoadAlign == 0)
672 LI.setAlignment(EffectiveLoadAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000673
Hal Finkel847e05f2015-02-20 03:05:53 +0000674 // Replace GEP indices if possible.
675 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
676 Worklist.Add(NewGEPI);
677 return &LI;
678 }
679
Eli Friedman8bc586e2011-08-15 22:09:40 +0000680 // None of the following transforms are legal for volatile/atomic loads.
681 // FIXME: Some of it is okay for atomic loads; needs refactoring.
Craig Topperf40110f2014-04-25 05:29:35 +0000682 if (!LI.isSimple()) return nullptr;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000683
Chris Lattnera65e2f72010-01-05 05:57:49 +0000684 // Do really simple store-to-load forwarding and load CSE, to catch cases
Duncan Sands75b5d272011-02-15 09:23:02 +0000685 // where there are several consecutive memory accesses to the same location,
Chris Lattnera65e2f72010-01-05 05:57:49 +0000686 // separated by a few arithmetic operations.
687 BasicBlock::iterator BBI = &LI;
688 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
Chandler Carrutheeec35a2014-10-20 00:24:14 +0000689 return ReplaceInstUsesWith(
Chandler Carruth1a3c2c42014-11-25 08:20:27 +0000690 LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
691 LI.getName() + ".cast"));
Chris Lattnera65e2f72010-01-05 05:57:49 +0000692
693 // load(gep null, ...) -> unreachable
694 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
695 const Value *GEPI0 = GEPI->getOperand(0);
696 // TODO: Consider a target hook for valid address spaces for this xform.
697 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
698 // Insert a new store to null instruction before the load to indicate
699 // that this code is not reachable. We do this instead of inserting
700 // an unreachable instruction directly because we cannot modify the
701 // CFG.
702 new StoreInst(UndefValue::get(LI.getType()),
703 Constant::getNullValue(Op->getType()), &LI);
704 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
705 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000706 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000707
708 // load null/undef -> unreachable
709 // TODO: Consider a target hook for valid address spaces for this xform.
710 if (isa<UndefValue>(Op) ||
711 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
712 // Insert a new store to null instruction before the load to indicate that
713 // this code is not reachable. We do this instead of inserting an
714 // unreachable instruction directly because we cannot modify the CFG.
715 new StoreInst(UndefValue::get(LI.getType()),
716 Constant::getNullValue(Op->getType()), &LI);
717 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
718 }
719
Chris Lattnera65e2f72010-01-05 05:57:49 +0000720 if (Op->hasOneUse()) {
721 // Change select and PHI nodes to select values instead of addresses: this
722 // helps alias analysis out a lot, allows many others simplifications, and
723 // exposes redundancy in the code.
724 //
725 // Note that we cannot do the transformation unless we know that the
726 // introduced loads cannot trap! Something like this is valid as long as
727 // the condition is always false: load (select bool %C, int* null, int* %G),
728 // but it would not be valid if we transformed it to load from null
729 // unconditionally.
730 //
731 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
732 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
Bob Wilson56600a12010-01-30 04:42:39 +0000733 unsigned Align = LI.getAlignment();
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000734 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align) &&
735 isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align)) {
Bob Wilson4b71b6c2010-01-30 00:41:10 +0000736 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
Bob Wilson56600a12010-01-30 04:42:39 +0000737 SI->getOperand(1)->getName()+".val");
Bob Wilson4b71b6c2010-01-30 00:41:10 +0000738 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
Bob Wilson56600a12010-01-30 04:42:39 +0000739 SI->getOperand(2)->getName()+".val");
740 V1->setAlignment(Align);
741 V2->setAlignment(Align);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000742 return SelectInst::Create(SI->getCondition(), V1, V2);
743 }
744
745 // load (select (cond, null, P)) -> load P
Philip Reames5ad26c32014-12-29 22:46:21 +0000746 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
747 LI.getPointerAddressSpace() == 0) {
748 LI.setOperand(0, SI->getOperand(2));
749 return &LI;
750 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000751
752 // load (select (cond, P, null)) -> load P
Philip Reames5ad26c32014-12-29 22:46:21 +0000753 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
754 LI.getPointerAddressSpace() == 0) {
755 LI.setOperand(0, SI->getOperand(1));
756 return &LI;
757 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000758 }
759 }
Craig Topperf40110f2014-04-25 05:29:35 +0000760 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000761}
762
Chandler Carruth816d26f2014-11-25 10:09:51 +0000763/// \brief Combine stores to match the type of value being stored.
764///
765/// The core idea here is that the memory does not have any intrinsic type and
766/// where we can we should match the type of a store to the type of value being
767/// stored.
768///
769/// However, this routine must never change the width of a store or the number of
770/// stores as that would introduce a semantic change. This combine is expected to
771/// be a semantic no-op which just allows stores to more closely model the types
772/// of their incoming values.
773///
774/// Currently, we also refuse to change the precise type used for an atomic or
775/// volatile store. This is debatable, and might be reasonable to change later.
776/// However, it is risky in case some backend or other part of LLVM is relying
777/// on the exact type stored to select appropriate atomic operations.
778///
779/// \returns true if the store was successfully combined away. This indicates
780/// the caller must erase the store instruction. We have to let the caller erase
781/// the store instruction sas otherwise there is no way to signal whether it was
782/// combined or not: IC.EraseInstFromFunction returns a null pointer.
783static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
784 // FIXME: We could probably with some care handle both volatile and atomic
785 // stores here but it isn't clear that this is important.
786 if (!SI.isSimple())
787 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000788
Chandler Carruth816d26f2014-11-25 10:09:51 +0000789 Value *V = SI.getValueOperand();
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000790
Chandler Carruth816d26f2014-11-25 10:09:51 +0000791 // Fold away bit casts of the stored value by storing the original type.
792 if (auto *BC = dyn_cast<BitCastInst>(V)) {
Chandler Carrutha7f247e2014-12-09 19:21:16 +0000793 V = BC->getOperand(0);
Chandler Carruth2135b972015-01-21 23:45:01 +0000794 combineStoreToNewValue(IC, SI, V);
Chandler Carruth816d26f2014-11-25 10:09:51 +0000795 return true;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000796 }
797
Chandler Carruth816d26f2014-11-25 10:09:51 +0000798 // FIXME: We should also canonicalize loads of vectors when their elements are
799 // cast to other types.
800 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000801}
802
803/// equivalentAddressValues - Test if A and B will obviously have the same
804/// value. This includes recognizing that %t0 and %t1 will have the same
805/// value in code like this:
806/// %t0 = getelementptr \@a, 0, 3
807/// store i32 0, i32* %t0
808/// %t1 = getelementptr \@a, 0, 3
809/// %t2 = load i32* %t1
810///
811static bool equivalentAddressValues(Value *A, Value *B) {
812 // Test if the values are trivially equivalent.
813 if (A == B) return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000814
Chris Lattnera65e2f72010-01-05 05:57:49 +0000815 // Test if the values come form identical arithmetic instructions.
816 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
817 // its only used to compare two uses within the same basic block, which
818 // means that they'll always either have the same value or one of them
819 // will have an undefined value.
820 if (isa<BinaryOperator>(A) ||
821 isa<CastInst>(A) ||
822 isa<PHINode>(A) ||
823 isa<GetElementPtrInst>(A))
824 if (Instruction *BI = dyn_cast<Instruction>(B))
825 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
826 return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000827
Chris Lattnera65e2f72010-01-05 05:57:49 +0000828 // Otherwise they may not be equivalent.
829 return false;
830}
831
Chris Lattnera65e2f72010-01-05 05:57:49 +0000832Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
833 Value *Val = SI.getOperand(0);
834 Value *Ptr = SI.getOperand(1);
835
Chandler Carruth816d26f2014-11-25 10:09:51 +0000836 // Try to canonicalize the stored type.
837 if (combineStoreToValueType(*this, SI))
838 return EraseInstFromFunction(SI);
839
Chris Lattnera65e2f72010-01-05 05:57:49 +0000840 // Attempt to improve the alignment.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000841 unsigned KnownAlign = getOrEnforceKnownAlignment(
842 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, AC, DT);
843 unsigned StoreAlign = SI.getAlignment();
844 unsigned EffectiveStoreAlign =
845 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
Dan Gohman36196602010-08-03 18:20:32 +0000846
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000847 if (KnownAlign > EffectiveStoreAlign)
848 SI.setAlignment(KnownAlign);
849 else if (StoreAlign == 0)
850 SI.setAlignment(EffectiveStoreAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000851
Hal Finkel847e05f2015-02-20 03:05:53 +0000852 // Replace GEP indices if possible.
853 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
854 Worklist.Add(NewGEPI);
855 return &SI;
856 }
857
Eli Friedman8bc586e2011-08-15 22:09:40 +0000858 // Don't hack volatile/atomic stores.
859 // FIXME: Some bits are legal for atomic stores; needs refactoring.
Craig Topperf40110f2014-04-25 05:29:35 +0000860 if (!SI.isSimple()) return nullptr;
Eli Friedman8bc586e2011-08-15 22:09:40 +0000861
862 // If the RHS is an alloca with a single use, zapify the store, making the
863 // alloca dead.
864 if (Ptr->hasOneUse()) {
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000865 if (isa<AllocaInst>(Ptr))
Eli Friedman8bc586e2011-08-15 22:09:40 +0000866 return EraseInstFromFunction(SI);
867 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
868 if (isa<AllocaInst>(GEP->getOperand(0))) {
869 if (GEP->getOperand(0)->hasOneUse())
870 return EraseInstFromFunction(SI);
871 }
872 }
873 }
874
Chris Lattnera65e2f72010-01-05 05:57:49 +0000875 // Do really simple DSE, to catch cases where there are several consecutive
876 // stores to the same location, separated by a few arithmetic operations. This
877 // situation often occurs with bitfield accesses.
878 BasicBlock::iterator BBI = &SI;
879 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
880 --ScanInsts) {
881 --BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +0000882 // Don't count debug info directives, lest they affect codegen,
883 // and we skip pointer-to-pointer bitcasts, which are NOPs.
884 if (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +0000885 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000886 ScanInsts++;
887 continue;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000888 }
889
Chris Lattnera65e2f72010-01-05 05:57:49 +0000890 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
891 // Prev store isn't volatile, and stores to the same location?
Eli Friedman8bc586e2011-08-15 22:09:40 +0000892 if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
893 SI.getOperand(1))) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000894 ++NumDeadStore;
895 ++BBI;
896 EraseInstFromFunction(*PrevSI);
897 continue;
898 }
899 break;
900 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000901
Chris Lattnera65e2f72010-01-05 05:57:49 +0000902 // If this is a load, we have to stop. However, if the loaded value is from
903 // the pointer we're loading and is producing the pointer we're storing,
904 // then *this* store is dead (X = load P; store X -> P).
905 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
Jin-Gu Kangb452db02011-03-14 01:21:00 +0000906 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
Eli Friedman8bc586e2011-08-15 22:09:40 +0000907 LI->isSimple())
Jin-Gu Kangb452db02011-03-14 01:21:00 +0000908 return EraseInstFromFunction(SI);
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000909
Chris Lattnera65e2f72010-01-05 05:57:49 +0000910 // Otherwise, this is a load from some other location. Stores before it
911 // may not be dead.
912 break;
913 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000914
Chris Lattnera65e2f72010-01-05 05:57:49 +0000915 // Don't skip over loads or things that can modify memory.
916 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
917 break;
918 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000919
920 // store X, null -> turns into 'unreachable' in SimplifyCFG
921 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
922 if (!isa<UndefValue>(Val)) {
923 SI.setOperand(0, UndefValue::get(Val->getType()));
924 if (Instruction *U = dyn_cast<Instruction>(Val))
925 Worklist.Add(U); // Dropped a use.
926 }
Craig Topperf40110f2014-04-25 05:29:35 +0000927 return nullptr; // Do not modify these!
Chris Lattnera65e2f72010-01-05 05:57:49 +0000928 }
929
930 // store undef, Ptr -> noop
931 if (isa<UndefValue>(Val))
932 return EraseInstFromFunction(SI);
933
Chris Lattnera65e2f72010-01-05 05:57:49 +0000934 // If this store is the last instruction in the basic block (possibly
Victor Hernandez5f5abd52010-01-21 23:07:15 +0000935 // excepting debug info instructions), and if the block ends with an
936 // unconditional branch, try to move it to the successor block.
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000937 BBI = &SI;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000938 do {
939 ++BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +0000940 } while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +0000941 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
Chris Lattnera65e2f72010-01-05 05:57:49 +0000942 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
943 if (BI->isUnconditional())
944 if (SimplifyStoreAtEndOfBlock(SI))
Craig Topperf40110f2014-04-25 05:29:35 +0000945 return nullptr; // xform done!
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000946
Craig Topperf40110f2014-04-25 05:29:35 +0000947 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000948}
949
950/// SimplifyStoreAtEndOfBlock - Turn things like:
951/// if () { *P = v1; } else { *P = v2 }
952/// into a phi node with a store in the successor.
953///
954/// Simplify things like:
955/// *P = v1; if () { *P = v2; }
956/// into a phi node with a store in the successor.
957///
958bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
959 BasicBlock *StoreBB = SI.getParent();
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000960
Chris Lattnera65e2f72010-01-05 05:57:49 +0000961 // Check to see if the successor block has exactly two incoming edges. If
962 // so, see if the other predecessor contains a store to the same location.
963 // if so, insert a PHI node (if needed) and move the stores down.
964 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000965
Chris Lattnera65e2f72010-01-05 05:57:49 +0000966 // Determine whether Dest has exactly two predecessors and, if so, compute
967 // the other predecessor.
968 pred_iterator PI = pred_begin(DestBB);
Gabor Greif1b787df2010-07-12 15:48:26 +0000969 BasicBlock *P = *PI;
Craig Topperf40110f2014-04-25 05:29:35 +0000970 BasicBlock *OtherBB = nullptr;
Gabor Greif1b787df2010-07-12 15:48:26 +0000971
972 if (P != StoreBB)
973 OtherBB = P;
974
975 if (++PI == pred_end(DestBB))
Chris Lattnera65e2f72010-01-05 05:57:49 +0000976 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000977
Gabor Greif1b787df2010-07-12 15:48:26 +0000978 P = *PI;
979 if (P != StoreBB) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000980 if (OtherBB)
981 return false;
Gabor Greif1b787df2010-07-12 15:48:26 +0000982 OtherBB = P;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000983 }
984 if (++PI != pred_end(DestBB))
985 return false;
986
987 // Bail out if all the relevant blocks aren't distinct (this can happen,
988 // for example, if SI is in an infinite loop)
989 if (StoreBB == DestBB || OtherBB == DestBB)
990 return false;
991
992 // Verify that the other block ends in a branch and is not otherwise empty.
993 BasicBlock::iterator BBI = OtherBB->getTerminator();
994 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
995 if (!OtherBr || BBI == OtherBB->begin())
996 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000997
Chris Lattnera65e2f72010-01-05 05:57:49 +0000998 // If the other block ends in an unconditional branch, check for the 'if then
999 // else' case. there is an instruction before the branch.
Craig Topperf40110f2014-04-25 05:29:35 +00001000 StoreInst *OtherStore = nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001001 if (OtherBr->isUnconditional()) {
1002 --BBI;
1003 // Skip over debugging info.
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001004 while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001005 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001006 if (BBI==OtherBB->begin())
1007 return false;
1008 --BBI;
1009 }
Eli Friedman8bc586e2011-08-15 22:09:40 +00001010 // If this isn't a store, isn't a store to the same location, or is not the
1011 // right kind of store, bail out.
Chris Lattnera65e2f72010-01-05 05:57:49 +00001012 OtherStore = dyn_cast<StoreInst>(BBI);
1013 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001014 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001015 return false;
1016 } else {
1017 // Otherwise, the other block ended with a conditional branch. If one of the
1018 // destinations is StoreBB, then we have the if/then case.
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001019 if (OtherBr->getSuccessor(0) != StoreBB &&
Chris Lattnera65e2f72010-01-05 05:57:49 +00001020 OtherBr->getSuccessor(1) != StoreBB)
1021 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001022
Chris Lattnera65e2f72010-01-05 05:57:49 +00001023 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1024 // if/then triangle. See if there is a store to the same ptr as SI that
1025 // lives in OtherBB.
1026 for (;; --BBI) {
1027 // Check to see if we find the matching store.
1028 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1029 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001030 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001031 return false;
1032 break;
1033 }
1034 // If we find something that may be using or overwriting the stored
1035 // value, or if we run out of instructions, we can't do the xform.
1036 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
1037 BBI == OtherBB->begin())
1038 return false;
1039 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001040
Chris Lattnera65e2f72010-01-05 05:57:49 +00001041 // In order to eliminate the store in OtherBr, we have to
1042 // make sure nothing reads or overwrites the stored value in
1043 // StoreBB.
1044 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1045 // FIXME: This should really be AA driven.
1046 if (I->mayReadFromMemory() || I->mayWriteToMemory())
1047 return false;
1048 }
1049 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001050
Chris Lattnera65e2f72010-01-05 05:57:49 +00001051 // Insert a PHI node now if we need it.
1052 Value *MergedVal = OtherStore->getOperand(0);
1053 if (MergedVal != SI.getOperand(0)) {
Jay Foad52131342011-03-30 11:28:46 +00001054 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
Chris Lattnera65e2f72010-01-05 05:57:49 +00001055 PN->addIncoming(SI.getOperand(0), SI.getParent());
1056 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1057 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1058 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001059
Chris Lattnera65e2f72010-01-05 05:57:49 +00001060 // Advance to a place where it is safe to insert the new store and
1061 // insert it.
Bill Wendling8ddfc092011-08-16 20:45:24 +00001062 BBI = DestBB->getFirstInsertionPt();
Eli Friedman35211c62011-05-27 00:19:40 +00001063 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
Eli Friedman8bc586e2011-08-15 22:09:40 +00001064 SI.isVolatile(),
1065 SI.getAlignment(),
1066 SI.getOrdering(),
1067 SI.getSynchScope());
Eli Friedman35211c62011-05-27 00:19:40 +00001068 InsertNewInstBefore(NewSI, *BBI);
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001069 NewSI->setDebugLoc(OtherStore->getDebugLoc());
Eli Friedman35211c62011-05-27 00:19:40 +00001070
Hal Finkelcc39b672014-07-24 12:16:19 +00001071 // If the two stores had AA tags, merge them.
1072 AAMDNodes AATags;
1073 SI.getAAMetadata(AATags);
1074 if (AATags) {
1075 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1076 NewSI->setAAMetadata(AATags);
1077 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001078
Chris Lattnera65e2f72010-01-05 05:57:49 +00001079 // Nuke the old stores.
1080 EraseInstFromFunction(SI);
1081 EraseInstFromFunction(*OtherStore);
1082 return true;
1083}