blob: 1b99d8783af053fe6a101c2457db1dfaf4c26be1 [file] [log] [blame]
Chris Lattnera65e2f72010-01-05 05:57:49 +00001//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visit functions for load, store and alloca.
11//
12//===----------------------------------------------------------------------===//
13
Chandler Carrutha9174582015-01-22 05:25:13 +000014#include "InstCombineInternal.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000015#include "llvm/ADT/Statistic.h"
Dan Gohman826bdf82010-05-28 16:19:17 +000016#include "llvm/Analysis/Loads.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000017#include "llvm/IR/DataLayout.h"
Chandler Carruthbc6378d2014-10-19 10:46:46 +000018#include "llvm/IR/LLVMContext.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000019#include "llvm/IR/IntrinsicInst.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000020#include "llvm/Transforms/Utils/BasicBlockUtils.h"
21#include "llvm/Transforms/Utils/Local.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000022using namespace llvm;
23
Chandler Carruth964daaa2014-04-22 02:55:47 +000024#define DEBUG_TYPE "instcombine"
25
Chandler Carruthc908ca12012-08-21 08:39:44 +000026STATISTIC(NumDeadStore, "Number of dead stores eliminated");
27STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
28
29/// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
30/// some part of a constant global variable. This intentionally only accepts
31/// constant expressions because we can't rewrite arbitrary instructions.
32static bool pointsToConstantGlobal(Value *V) {
33 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
34 return GV->isConstant();
Matt Arsenault607281772014-04-24 00:01:09 +000035
36 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000037 if (CE->getOpcode() == Instruction::BitCast ||
Matt Arsenault607281772014-04-24 00:01:09 +000038 CE->getOpcode() == Instruction::AddrSpaceCast ||
Chandler Carruthc908ca12012-08-21 08:39:44 +000039 CE->getOpcode() == Instruction::GetElementPtr)
40 return pointsToConstantGlobal(CE->getOperand(0));
Matt Arsenault607281772014-04-24 00:01:09 +000041 }
Chandler Carruthc908ca12012-08-21 08:39:44 +000042 return false;
43}
44
45/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
46/// pointer to an alloca. Ignore any reads of the pointer, return false if we
47/// see any stores or other unknown uses. If we see pointer arithmetic, keep
48/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
49/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
50/// the alloca, and if the source pointer is a pointer to a constant global, we
51/// can optimize this.
52static bool
53isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
Reid Kleckner813dab22014-07-01 21:36:20 +000054 SmallVectorImpl<Instruction *> &ToDelete) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000055 // We track lifetime intrinsics as we encounter them. If we decide to go
56 // ahead and replace the value with the global, this lets the caller quickly
57 // eliminate the markers.
58
Reid Kleckner813dab22014-07-01 21:36:20 +000059 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
60 ValuesToInspect.push_back(std::make_pair(V, false));
61 while (!ValuesToInspect.empty()) {
62 auto ValuePair = ValuesToInspect.pop_back_val();
63 const bool IsOffset = ValuePair.second;
64 for (auto &U : ValuePair.first->uses()) {
65 Instruction *I = cast<Instruction>(U.getUser());
Chandler Carruthc908ca12012-08-21 08:39:44 +000066
Reid Kleckner813dab22014-07-01 21:36:20 +000067 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
68 // Ignore non-volatile loads, they are always ok.
69 if (!LI->isSimple()) return false;
Chandler Carruthc908ca12012-08-21 08:39:44 +000070 continue;
71 }
Reid Kleckner813dab22014-07-01 21:36:20 +000072
73 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
74 // If uses of the bitcast are ok, we are ok.
75 ValuesToInspect.push_back(std::make_pair(I, IsOffset));
76 continue;
77 }
78 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
79 // If the GEP has all zero indices, it doesn't offset the pointer. If it
80 // doesn't, it does.
81 ValuesToInspect.push_back(
82 std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices()));
83 continue;
84 }
85
86 if (CallSite CS = I) {
87 // If this is the function being called then we treat it like a load and
88 // ignore it.
89 if (CS.isCallee(&U))
90 continue;
91
92 // Inalloca arguments are clobbered by the call.
93 unsigned ArgNo = CS.getArgumentNo(&U);
94 if (CS.isInAllocaArgument(ArgNo))
95 return false;
96
97 // If this is a readonly/readnone call site, then we know it is just a
98 // load (but one that potentially returns the value itself), so we can
99 // ignore it if we know that the value isn't captured.
100 if (CS.onlyReadsMemory() &&
101 (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
102 continue;
103
104 // If this is being passed as a byval argument, the caller is making a
105 // copy, so it is only a read of the alloca.
106 if (CS.isByValArgument(ArgNo))
107 continue;
108 }
109
110 // Lifetime intrinsics can be handled by the caller.
111 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
112 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
113 II->getIntrinsicID() == Intrinsic::lifetime_end) {
114 assert(II->use_empty() && "Lifetime markers have no result to use!");
115 ToDelete.push_back(II);
116 continue;
117 }
118 }
119
120 // If this is isn't our memcpy/memmove, reject it as something we can't
121 // handle.
122 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
123 if (!MI)
124 return false;
125
126 // If the transfer is using the alloca as a source of the transfer, then
127 // ignore it since it is a load (unless the transfer is volatile).
128 if (U.getOperandNo() == 1) {
129 if (MI->isVolatile()) return false;
130 continue;
131 }
132
133 // If we already have seen a copy, reject the second one.
134 if (TheCopy) return false;
135
136 // If the pointer has been offset from the start of the alloca, we can't
137 // safely handle this.
138 if (IsOffset) return false;
139
140 // If the memintrinsic isn't using the alloca as the dest, reject it.
141 if (U.getOperandNo() != 0) return false;
142
143 // If the source of the memcpy/move is not a constant global, reject it.
144 if (!pointsToConstantGlobal(MI->getSource()))
145 return false;
146
147 // Otherwise, the transform is safe. Remember the copy instruction.
148 TheCopy = MI;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000149 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000150 }
151 return true;
152}
153
154/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
155/// modified by a copy from a constant global. If we can prove this, we can
156/// replace any uses of the alloca with uses of the global directly.
157static MemTransferInst *
158isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
159 SmallVectorImpl<Instruction *> &ToDelete) {
Craig Topperf40110f2014-04-25 05:29:35 +0000160 MemTransferInst *TheCopy = nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000161 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
162 return TheCopy;
Craig Topperf40110f2014-04-25 05:29:35 +0000163 return nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000164}
165
Chris Lattnera65e2f72010-01-05 05:57:49 +0000166Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
Dan Gohmandf5d7dc2010-05-28 15:09:00 +0000167 // Ensure that the alloca array size argument has type intptr_t, so that
168 // any casting is exposed early.
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000169 if (DL) {
170 Type *IntPtrTy = DL->getIntPtrType(AI.getType());
Dan Gohmandf5d7dc2010-05-28 15:09:00 +0000171 if (AI.getArraySize()->getType() != IntPtrTy) {
172 Value *V = Builder->CreateIntCast(AI.getArraySize(),
173 IntPtrTy, false);
174 AI.setOperand(0, V);
175 return &AI;
176 }
177 }
178
Chris Lattnera65e2f72010-01-05 05:57:49 +0000179 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
180 if (AI.isArrayAllocation()) { // Check C != 1
181 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000182 Type *NewTy =
Chris Lattnera65e2f72010-01-05 05:57:49 +0000183 ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
Craig Topperf40110f2014-04-25 05:29:35 +0000184 AllocaInst *New = Builder->CreateAlloca(NewTy, nullptr, AI.getName());
Chris Lattnera65e2f72010-01-05 05:57:49 +0000185 New->setAlignment(AI.getAlignment());
186
187 // Scan to the end of the allocation instructions, to skip over a block of
188 // allocas if possible...also skip interleaved debug info
189 //
190 BasicBlock::iterator It = New;
191 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
192
193 // Now that I is pointing to the first non-allocation-inst in the block,
194 // insert our getelementptr instruction...
195 //
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000196 Type *IdxTy = DL
197 ? DL->getIntPtrType(AI.getType())
Matt Arsenault9e3a6ca2013-08-14 00:24:38 +0000198 : Type::getInt64Ty(AI.getContext());
199 Value *NullIdx = Constant::getNullValue(IdxTy);
Matt Arsenault640ff9d2013-08-14 00:24:05 +0000200 Value *Idx[2] = { NullIdx, NullIdx };
Eli Friedman41e509a2011-05-18 23:58:37 +0000201 Instruction *GEP =
Matt Arsenault640ff9d2013-08-14 00:24:05 +0000202 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
Eli Friedman41e509a2011-05-18 23:58:37 +0000203 InsertNewInstBefore(GEP, *It);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000204
205 // Now make everything use the getelementptr instead of the original
206 // allocation.
Eli Friedman41e509a2011-05-18 23:58:37 +0000207 return ReplaceInstUsesWith(AI, GEP);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000208 } else if (isa<UndefValue>(AI.getArraySize())) {
209 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
210 }
211 }
212
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000213 if (DL && AI.getAllocatedType()->isSized()) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000214 // If the alignment is 0 (unspecified), assign it the preferred alignment.
215 if (AI.getAlignment() == 0)
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000216 AI.setAlignment(DL->getPrefTypeAlignment(AI.getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000217
218 // Move all alloca's of zero byte objects to the entry block and merge them
219 // together. Note that we only do this for alloca's, because malloc should
220 // allocate and return a unique pointer, even for a zero byte allocation.
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000221 if (DL->getTypeAllocSize(AI.getAllocatedType()) == 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000222 // For a zero sized alloca there is no point in doing an array allocation.
223 // This is helpful if the array size is a complicated expression not used
224 // elsewhere.
225 if (AI.isArrayAllocation()) {
226 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
227 return &AI;
228 }
229
230 // Get the first instruction in the entry block.
231 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
232 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
233 if (FirstInst != &AI) {
234 // If the entry block doesn't start with a zero-size alloca then move
235 // this one to the start of the entry block. There is no problem with
236 // dominance as the array size was forced to a constant earlier already.
237 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
238 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000239 DL->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000240 AI.moveBefore(FirstInst);
241 return &AI;
242 }
243
Richard Osborneb68053e2012-09-18 09:31:44 +0000244 // If the alignment of the entry block alloca is 0 (unspecified),
245 // assign it the preferred alignment.
246 if (EntryAI->getAlignment() == 0)
247 EntryAI->setAlignment(
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000248 DL->getPrefTypeAlignment(EntryAI->getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000249 // Replace this zero-sized alloca with the one at the start of the entry
250 // block after ensuring that the address will be aligned enough for both
251 // types.
Richard Osborneb68053e2012-09-18 09:31:44 +0000252 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
253 AI.getAlignment());
Duncan Sands8bc764a2012-06-26 13:39:21 +0000254 EntryAI->setAlignment(MaxAlign);
255 if (AI.getType() != EntryAI->getType())
256 return new BitCastInst(EntryAI, AI.getType());
257 return ReplaceInstUsesWith(AI, EntryAI);
258 }
259 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000260 }
261
Eli Friedmanb14873c2012-11-26 23:04:53 +0000262 if (AI.getAlignment()) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000263 // Check to see if this allocation is only modified by a memcpy/memmove from
264 // a constant global whose alignment is equal to or exceeds that of the
265 // allocation. If this is the case, we can change all users to use
266 // the constant global instead. This is commonly produced by the CFE by
267 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
268 // is only subsequently read.
269 SmallVector<Instruction *, 4> ToDelete;
270 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
Chandler Carruth66b31302015-01-04 12:03:27 +0000271 unsigned SourceAlign = getOrEnforceKnownAlignment(
272 Copy->getSource(), AI.getAlignment(), DL, AC, &AI, DT);
Eli Friedmanb14873c2012-11-26 23:04:53 +0000273 if (AI.getAlignment() <= SourceAlign) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000274 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
275 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
276 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
277 EraseInstFromFunction(*ToDelete[i]);
278 Constant *TheSrc = cast<Constant>(Copy->getSource());
Matt Arsenaultbbf18c62013-12-07 02:58:45 +0000279 Constant *Cast
280 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
281 Instruction *NewI = ReplaceInstUsesWith(AI, Cast);
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000282 EraseInstFromFunction(*Copy);
283 ++NumGlobalCopies;
284 return NewI;
285 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000286 }
287 }
288
Nuno Lopes95cc4f32012-07-09 18:38:20 +0000289 // At last, use the generic allocation site handler to aggressively remove
290 // unused allocas.
291 return visitAllocSite(AI);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000292}
293
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000294/// \brief Helper to combine a load to a new type.
295///
296/// This just does the work of combining a load to a new type. It handles
297/// metadata, etc., and returns the new instruction. The \c NewTy should be the
298/// loaded *value* type. This will convert it to a pointer, cast the operand to
299/// that pointer type, load it, etc.
300///
301/// Note that this will create all of the instructions with whatever insert
302/// point the \c InstCombiner currently is using.
303static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy) {
304 Value *Ptr = LI.getPointerOperand();
305 unsigned AS = LI.getPointerAddressSpace();
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000306 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000307 LI.getAllMetadata(MD);
308
309 LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
310 IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
311 LI.getAlignment(), LI.getName());
312 for (const auto &MDPair : MD) {
313 unsigned ID = MDPair.first;
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000314 MDNode *N = MDPair.second;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000315 // Note, essentially every kind of metadata should be preserved here! This
316 // routine is supposed to clone a load instruction changing *only its type*.
317 // The only metadata it makes sense to drop is metadata which is invalidated
318 // when the pointer type changes. This should essentially never be the case
319 // in LLVM, but we explicitly switch over only known metadata to be
320 // conservatively correct. If you are adding metadata to LLVM which pertains
321 // to loads, you almost certainly want to add it here.
322 switch (ID) {
323 case LLVMContext::MD_dbg:
324 case LLVMContext::MD_tbaa:
325 case LLVMContext::MD_prof:
326 case LLVMContext::MD_fpmath:
327 case LLVMContext::MD_tbaa_struct:
328 case LLVMContext::MD_invariant_load:
329 case LLVMContext::MD_alias_scope:
330 case LLVMContext::MD_noalias:
Philip Reames5a3f5f72014-10-21 00:13:20 +0000331 case LLVMContext::MD_nontemporal:
332 case LLVMContext::MD_mem_parallel_loop_access:
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000333 // All of these directly apply.
334 NewLoad->setMetadata(ID, N);
335 break;
336
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000337 case LLVMContext::MD_nonnull:
338 // FIXME: We should translate this into range metadata for integer types
339 // and vice versa.
340 if (NewTy->isPointerTy())
341 NewLoad->setMetadata(ID, N);
342 break;
343
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000344 case LLVMContext::MD_range:
345 // FIXME: It would be nice to propagate this in some way, but the type
346 // conversions make it hard.
347 break;
348 }
349 }
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000350 return NewLoad;
351}
352
Chandler Carruthfa11d832015-01-22 03:34:54 +0000353/// \brief Combine a store to a new type.
354///
355/// Returns the newly created store instruction.
356static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
357 Value *Ptr = SI.getPointerOperand();
358 unsigned AS = SI.getPointerAddressSpace();
359 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
360 SI.getAllMetadata(MD);
361
362 StoreInst *NewStore = IC.Builder->CreateAlignedStore(
363 V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
364 SI.getAlignment());
365 for (const auto &MDPair : MD) {
366 unsigned ID = MDPair.first;
367 MDNode *N = MDPair.second;
368 // Note, essentially every kind of metadata should be preserved here! This
369 // routine is supposed to clone a store instruction changing *only its
370 // type*. The only metadata it makes sense to drop is metadata which is
371 // invalidated when the pointer type changes. This should essentially
372 // never be the case in LLVM, but we explicitly switch over only known
373 // metadata to be conservatively correct. If you are adding metadata to
374 // LLVM which pertains to stores, you almost certainly want to add it
375 // here.
376 switch (ID) {
377 case LLVMContext::MD_dbg:
378 case LLVMContext::MD_tbaa:
379 case LLVMContext::MD_prof:
380 case LLVMContext::MD_fpmath:
381 case LLVMContext::MD_tbaa_struct:
382 case LLVMContext::MD_alias_scope:
383 case LLVMContext::MD_noalias:
384 case LLVMContext::MD_nontemporal:
385 case LLVMContext::MD_mem_parallel_loop_access:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000386 // All of these directly apply.
387 NewStore->setMetadata(ID, N);
388 break;
389
390 case LLVMContext::MD_invariant_load:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000391 case LLVMContext::MD_nonnull:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000392 case LLVMContext::MD_range:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000393 // These don't apply for stores.
Chandler Carruthfa11d832015-01-22 03:34:54 +0000394 break;
395 }
396 }
397
398 return NewStore;
399}
400
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000401/// \brief Combine loads to match the type of value their uses after looking
402/// through intervening bitcasts.
403///
404/// The core idea here is that if the result of a load is used in an operation,
405/// we should load the type most conducive to that operation. For example, when
406/// loading an integer and converting that immediately to a pointer, we should
407/// instead directly load a pointer.
408///
409/// However, this routine must never change the width of a load or the number of
410/// loads as that would introduce a semantic change. This combine is expected to
411/// be a semantic no-op which just allows loads to more closely model the types
412/// of their consuming operations.
413///
414/// Currently, we also refuse to change the precise type used for an atomic load
415/// or a volatile load. This is debatable, and might be reasonable to change
416/// later. However, it is risky in case some backend or other part of LLVM is
417/// relying on the exact type loaded to select appropriate atomic operations.
418static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
419 // FIXME: We could probably with some care handle both volatile and atomic
420 // loads here but it isn't clear that this is important.
421 if (!LI.isSimple())
422 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000423
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000424 if (LI.use_empty())
425 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000426
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000427 Type *Ty = LI.getType();
428
429 // Try to canonicalize loads which are only ever stored to operate over
430 // integers instead of any other type. We only do this when the loaded type
431 // is sized and has a size exactly the same as its store size and the store
432 // size is a legal integer type.
433 const DataLayout *DL = IC.getDataLayout();
434 if (!Ty->isIntegerTy() && Ty->isSized() && DL &&
435 DL->isLegalInteger(DL->getTypeStoreSizeInBits(Ty)) &&
436 DL->getTypeStoreSizeInBits(Ty) == DL->getTypeSizeInBits(Ty)) {
437 if (std::all_of(LI.user_begin(), LI.user_end(), [&LI](User *U) {
438 auto *SI = dyn_cast<StoreInst>(U);
439 return SI && SI->getPointerOperand() != &LI;
440 })) {
441 LoadInst *NewLoad = combineLoadToNewType(
442 IC, LI,
443 Type::getIntNTy(LI.getContext(), DL->getTypeStoreSizeInBits(Ty)));
444 // Replace all the stores with stores of the newly loaded value.
445 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
446 auto *SI = cast<StoreInst>(*UI++);
447 IC.Builder->SetInsertPoint(SI);
448 combineStoreToNewValue(IC, *SI, NewLoad);
449 IC.EraseInstFromFunction(*SI);
450 }
451 assert(LI.use_empty() && "Failed to remove all users of the load!");
452 // Return the old load so the combiner can delete it safely.
453 return &LI;
454 }
455 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000456
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000457 // Fold away bit casts of the loaded value by loading the desired type.
458 if (LI.hasOneUse())
459 if (auto *BC = dyn_cast<BitCastInst>(LI.user_back())) {
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000460 LoadInst *NewLoad = combineLoadToNewType(IC, LI, BC->getDestTy());
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000461 BC->replaceAllUsesWith(NewLoad);
462 IC.EraseInstFromFunction(*BC);
463 return &LI;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000464 }
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000465
Chandler Carrutha7f247e2014-12-09 19:21:16 +0000466 // FIXME: We should also canonicalize loads of vectors when their elements are
467 // cast to other types.
Craig Topperf40110f2014-04-25 05:29:35 +0000468 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000469}
470
Hal Finkel847e05f2015-02-20 03:05:53 +0000471// If we can determine that all possible objects pointed to by the provided
472// pointer value are, not only dereferenceable, but also definitively less than
473// or equal to the provided maximum size, then return true. Otherwise, return
474// false (constant global values and allocas fall into this category).
475//
476// FIXME: This should probably live in ValueTracking (or similar).
477static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
478 const DataLayout *DL) {
479 SmallPtrSet<Value *, 4> Visited;
480 SmallVector<Value *, 4> Worklist(1, V);
481
482 do {
483 Value *P = Worklist.pop_back_val();
484 P = P->stripPointerCasts();
485
486 if (!Visited.insert(P).second)
487 continue;
488
489 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
490 Worklist.push_back(SI->getTrueValue());
491 Worklist.push_back(SI->getFalseValue());
492 continue;
493 }
494
495 if (PHINode *PN = dyn_cast<PHINode>(P)) {
496 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
497 Worklist.push_back(PN->getIncomingValue(i));
498 continue;
499 }
500
501 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
502 if (GA->mayBeOverridden())
503 return false;
504 Worklist.push_back(GA->getAliasee());
505 continue;
506 }
507
508 // If we know how big this object is, and it is less than MaxSize, continue
509 // searching. Otherwise, return false.
510 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
511 if (!AI->getAllocatedType()->isSized())
512 return false;
513
514 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
515 if (!CS)
516 return false;
517
518 uint64_t TypeSize = DL->getTypeAllocSize(AI->getAllocatedType());
519 // Make sure that, even if the multiplication below would wrap as an
520 // uint64_t, we still do the right thing.
521 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
522 return false;
523 continue;
524 }
525
526 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
527 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
528 return false;
529
530 uint64_t InitSize = DL->getTypeAllocSize(GV->getType()->getElementType());
531 if (InitSize > MaxSize)
532 return false;
533 continue;
534 }
535
536 return false;
537 } while (!Worklist.empty());
538
539 return true;
540}
541
542// If we're indexing into an object of a known size, and the outer index is
543// not a constant, but having any value but zero would lead to undefined
544// behavior, replace it with zero.
545//
546// For example, if we have:
547// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
548// ...
549// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
550// ... = load i32* %arrayidx, align 4
551// Then we know that we can replace %x in the GEP with i64 0.
552//
553// FIXME: We could fold any GEP index to zero that would cause UB if it were
554// not zero. Currently, we only handle the first such index. Also, we could
555// also search through non-zero constant indices if we kept track of the
556// offsets those indices implied.
557static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
558 Instruction *MemI, unsigned &Idx) {
559 const DataLayout *DL = IC.getDataLayout();
560 if (GEPI->getNumOperands() < 2 || !DL)
561 return false;
562
563 // Find the first non-zero index of a GEP. If all indices are zero, return
564 // one past the last index.
565 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
566 unsigned I = 1;
567 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
568 Value *V = GEPI->getOperand(I);
569 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
570 if (CI->isZero())
571 continue;
572
573 break;
574 }
575
576 return I;
577 };
578
579 // Skip through initial 'zero' indices, and find the corresponding pointer
580 // type. See if the next index is not a constant.
581 Idx = FirstNZIdx(GEPI);
582 if (Idx == GEPI->getNumOperands())
583 return false;
584 if (isa<Constant>(GEPI->getOperand(Idx)))
585 return false;
586
587 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
588 Type *AllocTy =
589 GetElementPtrInst::getIndexedType(GEPI->getOperand(0)->getType(), Ops);
590 if (!AllocTy || !AllocTy->isSized())
591 return false;
592 uint64_t TyAllocSize = DL->getTypeAllocSize(AllocTy);
593
594 // If there are more indices after the one we might replace with a zero, make
595 // sure they're all non-negative. If any of them are negative, the overall
596 // address being computed might be before the base address determined by the
597 // first non-zero index.
598 auto IsAllNonNegative = [&]() {
599 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
600 bool KnownNonNegative, KnownNegative;
601 IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative,
602 KnownNegative, 0, MemI);
603 if (KnownNonNegative)
604 continue;
605 return false;
606 }
607
608 return true;
609 };
610
611 // FIXME: If the GEP is not inbounds, and there are extra indices after the
612 // one we'll replace, those could cause the address computation to wrap
613 // (rendering the IsAllNonNegative() check below insufficient). We can do
614 // better, ignoring zero indicies (and other indicies we can prove small
615 // enough not to wrap).
616 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
617 return false;
618
619 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
620 // also known to be dereferenceable.
621 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
622 IsAllNonNegative();
623}
624
625// If we're indexing into an object with a variable index for the memory
626// access, but the object has only one element, we can assume that the index
627// will always be zero. If we replace the GEP, return it.
628template <typename T>
629static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
630 T &MemI) {
631 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
632 unsigned Idx;
633 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
634 Instruction *NewGEPI = GEPI->clone();
635 NewGEPI->setOperand(Idx,
636 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
637 NewGEPI->insertBefore(GEPI);
638 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
639 return NewGEPI;
640 }
641 }
642
643 return nullptr;
644}
645
Chris Lattnera65e2f72010-01-05 05:57:49 +0000646Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
647 Value *Op = LI.getOperand(0);
648
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000649 // Try to canonicalize the loaded type.
650 if (Instruction *Res = combineLoadToOperationType(*this, LI))
651 return Res;
652
Chris Lattnera65e2f72010-01-05 05:57:49 +0000653 // Attempt to improve the alignment.
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000654 if (DL) {
Chandler Carruth66b31302015-01-04 12:03:27 +0000655 unsigned KnownAlign = getOrEnforceKnownAlignment(
656 Op, DL->getPrefTypeAlignment(LI.getType()), DL, AC, &LI, DT);
Dan Gohman36196602010-08-03 18:20:32 +0000657 unsigned LoadAlign = LI.getAlignment();
658 unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000659 DL->getABITypeAlignment(LI.getType());
Dan Gohman36196602010-08-03 18:20:32 +0000660
661 if (KnownAlign > EffectiveLoadAlign)
Chris Lattnera65e2f72010-01-05 05:57:49 +0000662 LI.setAlignment(KnownAlign);
Dan Gohman36196602010-08-03 18:20:32 +0000663 else if (LoadAlign == 0)
664 LI.setAlignment(EffectiveLoadAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000665 }
666
Hal Finkel847e05f2015-02-20 03:05:53 +0000667 // Replace GEP indices if possible.
668 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
669 Worklist.Add(NewGEPI);
670 return &LI;
671 }
672
Eli Friedman8bc586e2011-08-15 22:09:40 +0000673 // None of the following transforms are legal for volatile/atomic loads.
674 // FIXME: Some of it is okay for atomic loads; needs refactoring.
Craig Topperf40110f2014-04-25 05:29:35 +0000675 if (!LI.isSimple()) return nullptr;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000676
Chris Lattnera65e2f72010-01-05 05:57:49 +0000677 // Do really simple store-to-load forwarding and load CSE, to catch cases
Duncan Sands75b5d272011-02-15 09:23:02 +0000678 // where there are several consecutive memory accesses to the same location,
Chris Lattnera65e2f72010-01-05 05:57:49 +0000679 // separated by a few arithmetic operations.
680 BasicBlock::iterator BBI = &LI;
681 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
Chandler Carrutheeec35a2014-10-20 00:24:14 +0000682 return ReplaceInstUsesWith(
Chandler Carruth1a3c2c42014-11-25 08:20:27 +0000683 LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
684 LI.getName() + ".cast"));
Chris Lattnera65e2f72010-01-05 05:57:49 +0000685
686 // load(gep null, ...) -> unreachable
687 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
688 const Value *GEPI0 = GEPI->getOperand(0);
689 // TODO: Consider a target hook for valid address spaces for this xform.
690 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
691 // Insert a new store to null instruction before the load to indicate
692 // that this code is not reachable. We do this instead of inserting
693 // an unreachable instruction directly because we cannot modify the
694 // CFG.
695 new StoreInst(UndefValue::get(LI.getType()),
696 Constant::getNullValue(Op->getType()), &LI);
697 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
698 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000699 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000700
701 // load null/undef -> unreachable
702 // TODO: Consider a target hook for valid address spaces for this xform.
703 if (isa<UndefValue>(Op) ||
704 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
705 // Insert a new store to null instruction before the load to indicate that
706 // this code is not reachable. We do this instead of inserting an
707 // unreachable instruction directly because we cannot modify the CFG.
708 new StoreInst(UndefValue::get(LI.getType()),
709 Constant::getNullValue(Op->getType()), &LI);
710 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
711 }
712
Chris Lattnera65e2f72010-01-05 05:57:49 +0000713 if (Op->hasOneUse()) {
714 // Change select and PHI nodes to select values instead of addresses: this
715 // helps alias analysis out a lot, allows many others simplifications, and
716 // exposes redundancy in the code.
717 //
718 // Note that we cannot do the transformation unless we know that the
719 // introduced loads cannot trap! Something like this is valid as long as
720 // the condition is always false: load (select bool %C, int* null, int* %G),
721 // but it would not be valid if we transformed it to load from null
722 // unconditionally.
723 //
724 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
725 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
Bob Wilson56600a12010-01-30 04:42:39 +0000726 unsigned Align = LI.getAlignment();
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000727 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, DL) &&
728 isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, DL)) {
Bob Wilson4b71b6c2010-01-30 00:41:10 +0000729 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
Bob Wilson56600a12010-01-30 04:42:39 +0000730 SI->getOperand(1)->getName()+".val");
Bob Wilson4b71b6c2010-01-30 00:41:10 +0000731 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
Bob Wilson56600a12010-01-30 04:42:39 +0000732 SI->getOperand(2)->getName()+".val");
733 V1->setAlignment(Align);
734 V2->setAlignment(Align);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000735 return SelectInst::Create(SI->getCondition(), V1, V2);
736 }
737
738 // load (select (cond, null, P)) -> load P
Philip Reames5ad26c32014-12-29 22:46:21 +0000739 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
740 LI.getPointerAddressSpace() == 0) {
741 LI.setOperand(0, SI->getOperand(2));
742 return &LI;
743 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000744
745 // load (select (cond, P, null)) -> load P
Philip Reames5ad26c32014-12-29 22:46:21 +0000746 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
747 LI.getPointerAddressSpace() == 0) {
748 LI.setOperand(0, SI->getOperand(1));
749 return &LI;
750 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000751 }
752 }
Craig Topperf40110f2014-04-25 05:29:35 +0000753 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000754}
755
Chandler Carruth816d26f2014-11-25 10:09:51 +0000756/// \brief Combine stores to match the type of value being stored.
757///
758/// The core idea here is that the memory does not have any intrinsic type and
759/// where we can we should match the type of a store to the type of value being
760/// stored.
761///
762/// However, this routine must never change the width of a store or the number of
763/// stores as that would introduce a semantic change. This combine is expected to
764/// be a semantic no-op which just allows stores to more closely model the types
765/// of their incoming values.
766///
767/// Currently, we also refuse to change the precise type used for an atomic or
768/// volatile store. This is debatable, and might be reasonable to change later.
769/// However, it is risky in case some backend or other part of LLVM is relying
770/// on the exact type stored to select appropriate atomic operations.
771///
772/// \returns true if the store was successfully combined away. This indicates
773/// the caller must erase the store instruction. We have to let the caller erase
774/// the store instruction sas otherwise there is no way to signal whether it was
775/// combined or not: IC.EraseInstFromFunction returns a null pointer.
776static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
777 // FIXME: We could probably with some care handle both volatile and atomic
778 // stores here but it isn't clear that this is important.
779 if (!SI.isSimple())
780 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000781
Chandler Carruth816d26f2014-11-25 10:09:51 +0000782 Value *V = SI.getValueOperand();
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000783
Chandler Carruth816d26f2014-11-25 10:09:51 +0000784 // Fold away bit casts of the stored value by storing the original type.
785 if (auto *BC = dyn_cast<BitCastInst>(V)) {
Chandler Carrutha7f247e2014-12-09 19:21:16 +0000786 V = BC->getOperand(0);
Chandler Carruth2135b972015-01-21 23:45:01 +0000787 combineStoreToNewValue(IC, SI, V);
Chandler Carruth816d26f2014-11-25 10:09:51 +0000788 return true;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000789 }
790
Chandler Carruth816d26f2014-11-25 10:09:51 +0000791 // FIXME: We should also canonicalize loads of vectors when their elements are
792 // cast to other types.
793 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000794}
795
796/// equivalentAddressValues - Test if A and B will obviously have the same
797/// value. This includes recognizing that %t0 and %t1 will have the same
798/// value in code like this:
799/// %t0 = getelementptr \@a, 0, 3
800/// store i32 0, i32* %t0
801/// %t1 = getelementptr \@a, 0, 3
802/// %t2 = load i32* %t1
803///
804static bool equivalentAddressValues(Value *A, Value *B) {
805 // Test if the values are trivially equivalent.
806 if (A == B) return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000807
Chris Lattnera65e2f72010-01-05 05:57:49 +0000808 // Test if the values come form identical arithmetic instructions.
809 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
810 // its only used to compare two uses within the same basic block, which
811 // means that they'll always either have the same value or one of them
812 // will have an undefined value.
813 if (isa<BinaryOperator>(A) ||
814 isa<CastInst>(A) ||
815 isa<PHINode>(A) ||
816 isa<GetElementPtrInst>(A))
817 if (Instruction *BI = dyn_cast<Instruction>(B))
818 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
819 return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000820
Chris Lattnera65e2f72010-01-05 05:57:49 +0000821 // Otherwise they may not be equivalent.
822 return false;
823}
824
Chris Lattnera65e2f72010-01-05 05:57:49 +0000825Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
826 Value *Val = SI.getOperand(0);
827 Value *Ptr = SI.getOperand(1);
828
Chandler Carruth816d26f2014-11-25 10:09:51 +0000829 // Try to canonicalize the stored type.
830 if (combineStoreToValueType(*this, SI))
831 return EraseInstFromFunction(SI);
832
Chris Lattnera65e2f72010-01-05 05:57:49 +0000833 // Attempt to improve the alignment.
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000834 if (DL) {
Chandler Carruth66b31302015-01-04 12:03:27 +0000835 unsigned KnownAlign = getOrEnforceKnownAlignment(
836 Ptr, DL->getPrefTypeAlignment(Val->getType()), DL, AC, &SI, DT);
Dan Gohman36196602010-08-03 18:20:32 +0000837 unsigned StoreAlign = SI.getAlignment();
838 unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000839 DL->getABITypeAlignment(Val->getType());
Dan Gohman36196602010-08-03 18:20:32 +0000840
Bill Wendling55b6b2b2012-03-16 18:20:54 +0000841 if (KnownAlign > EffectiveStoreAlign)
Chris Lattnera65e2f72010-01-05 05:57:49 +0000842 SI.setAlignment(KnownAlign);
Bill Wendling55b6b2b2012-03-16 18:20:54 +0000843 else if (StoreAlign == 0)
844 SI.setAlignment(EffectiveStoreAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000845 }
846
Hal Finkel847e05f2015-02-20 03:05:53 +0000847 // Replace GEP indices if possible.
848 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
849 Worklist.Add(NewGEPI);
850 return &SI;
851 }
852
Eli Friedman8bc586e2011-08-15 22:09:40 +0000853 // Don't hack volatile/atomic stores.
854 // FIXME: Some bits are legal for atomic stores; needs refactoring.
Craig Topperf40110f2014-04-25 05:29:35 +0000855 if (!SI.isSimple()) return nullptr;
Eli Friedman8bc586e2011-08-15 22:09:40 +0000856
857 // If the RHS is an alloca with a single use, zapify the store, making the
858 // alloca dead.
859 if (Ptr->hasOneUse()) {
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000860 if (isa<AllocaInst>(Ptr))
Eli Friedman8bc586e2011-08-15 22:09:40 +0000861 return EraseInstFromFunction(SI);
862 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
863 if (isa<AllocaInst>(GEP->getOperand(0))) {
864 if (GEP->getOperand(0)->hasOneUse())
865 return EraseInstFromFunction(SI);
866 }
867 }
868 }
869
Chris Lattnera65e2f72010-01-05 05:57:49 +0000870 // Do really simple DSE, to catch cases where there are several consecutive
871 // stores to the same location, separated by a few arithmetic operations. This
872 // situation often occurs with bitfield accesses.
873 BasicBlock::iterator BBI = &SI;
874 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
875 --ScanInsts) {
876 --BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +0000877 // Don't count debug info directives, lest they affect codegen,
878 // and we skip pointer-to-pointer bitcasts, which are NOPs.
879 if (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +0000880 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000881 ScanInsts++;
882 continue;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000883 }
884
Chris Lattnera65e2f72010-01-05 05:57:49 +0000885 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
886 // Prev store isn't volatile, and stores to the same location?
Eli Friedman8bc586e2011-08-15 22:09:40 +0000887 if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
888 SI.getOperand(1))) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000889 ++NumDeadStore;
890 ++BBI;
891 EraseInstFromFunction(*PrevSI);
892 continue;
893 }
894 break;
895 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000896
Chris Lattnera65e2f72010-01-05 05:57:49 +0000897 // If this is a load, we have to stop. However, if the loaded value is from
898 // the pointer we're loading and is producing the pointer we're storing,
899 // then *this* store is dead (X = load P; store X -> P).
900 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
Jin-Gu Kangb452db02011-03-14 01:21:00 +0000901 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
Eli Friedman8bc586e2011-08-15 22:09:40 +0000902 LI->isSimple())
Jin-Gu Kangb452db02011-03-14 01:21:00 +0000903 return EraseInstFromFunction(SI);
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000904
Chris Lattnera65e2f72010-01-05 05:57:49 +0000905 // Otherwise, this is a load from some other location. Stores before it
906 // may not be dead.
907 break;
908 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000909
Chris Lattnera65e2f72010-01-05 05:57:49 +0000910 // Don't skip over loads or things that can modify memory.
911 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
912 break;
913 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000914
915 // store X, null -> turns into 'unreachable' in SimplifyCFG
916 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
917 if (!isa<UndefValue>(Val)) {
918 SI.setOperand(0, UndefValue::get(Val->getType()));
919 if (Instruction *U = dyn_cast<Instruction>(Val))
920 Worklist.Add(U); // Dropped a use.
921 }
Craig Topperf40110f2014-04-25 05:29:35 +0000922 return nullptr; // Do not modify these!
Chris Lattnera65e2f72010-01-05 05:57:49 +0000923 }
924
925 // store undef, Ptr -> noop
926 if (isa<UndefValue>(Val))
927 return EraseInstFromFunction(SI);
928
Chris Lattnera65e2f72010-01-05 05:57:49 +0000929 // If this store is the last instruction in the basic block (possibly
Victor Hernandez5f5abd52010-01-21 23:07:15 +0000930 // excepting debug info instructions), and if the block ends with an
931 // unconditional branch, try to move it to the successor block.
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000932 BBI = &SI;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000933 do {
934 ++BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +0000935 } while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +0000936 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
Chris Lattnera65e2f72010-01-05 05:57:49 +0000937 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
938 if (BI->isUnconditional())
939 if (SimplifyStoreAtEndOfBlock(SI))
Craig Topperf40110f2014-04-25 05:29:35 +0000940 return nullptr; // xform done!
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000941
Craig Topperf40110f2014-04-25 05:29:35 +0000942 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000943}
944
945/// SimplifyStoreAtEndOfBlock - Turn things like:
946/// if () { *P = v1; } else { *P = v2 }
947/// into a phi node with a store in the successor.
948///
949/// Simplify things like:
950/// *P = v1; if () { *P = v2; }
951/// into a phi node with a store in the successor.
952///
953bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
954 BasicBlock *StoreBB = SI.getParent();
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000955
Chris Lattnera65e2f72010-01-05 05:57:49 +0000956 // Check to see if the successor block has exactly two incoming edges. If
957 // so, see if the other predecessor contains a store to the same location.
958 // if so, insert a PHI node (if needed) and move the stores down.
959 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000960
Chris Lattnera65e2f72010-01-05 05:57:49 +0000961 // Determine whether Dest has exactly two predecessors and, if so, compute
962 // the other predecessor.
963 pred_iterator PI = pred_begin(DestBB);
Gabor Greif1b787df2010-07-12 15:48:26 +0000964 BasicBlock *P = *PI;
Craig Topperf40110f2014-04-25 05:29:35 +0000965 BasicBlock *OtherBB = nullptr;
Gabor Greif1b787df2010-07-12 15:48:26 +0000966
967 if (P != StoreBB)
968 OtherBB = P;
969
970 if (++PI == pred_end(DestBB))
Chris Lattnera65e2f72010-01-05 05:57:49 +0000971 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000972
Gabor Greif1b787df2010-07-12 15:48:26 +0000973 P = *PI;
974 if (P != StoreBB) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000975 if (OtherBB)
976 return false;
Gabor Greif1b787df2010-07-12 15:48:26 +0000977 OtherBB = P;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000978 }
979 if (++PI != pred_end(DestBB))
980 return false;
981
982 // Bail out if all the relevant blocks aren't distinct (this can happen,
983 // for example, if SI is in an infinite loop)
984 if (StoreBB == DestBB || OtherBB == DestBB)
985 return false;
986
987 // Verify that the other block ends in a branch and is not otherwise empty.
988 BasicBlock::iterator BBI = OtherBB->getTerminator();
989 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
990 if (!OtherBr || BBI == OtherBB->begin())
991 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000992
Chris Lattnera65e2f72010-01-05 05:57:49 +0000993 // If the other block ends in an unconditional branch, check for the 'if then
994 // else' case. there is an instruction before the branch.
Craig Topperf40110f2014-04-25 05:29:35 +0000995 StoreInst *OtherStore = nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000996 if (OtherBr->isUnconditional()) {
997 --BBI;
998 // Skip over debugging info.
Victor Hernandez5f8c8c02010-01-22 19:05:05 +0000999 while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001000 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001001 if (BBI==OtherBB->begin())
1002 return false;
1003 --BBI;
1004 }
Eli Friedman8bc586e2011-08-15 22:09:40 +00001005 // If this isn't a store, isn't a store to the same location, or is not the
1006 // right kind of store, bail out.
Chris Lattnera65e2f72010-01-05 05:57:49 +00001007 OtherStore = dyn_cast<StoreInst>(BBI);
1008 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001009 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001010 return false;
1011 } else {
1012 // Otherwise, the other block ended with a conditional branch. If one of the
1013 // destinations is StoreBB, then we have the if/then case.
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001014 if (OtherBr->getSuccessor(0) != StoreBB &&
Chris Lattnera65e2f72010-01-05 05:57:49 +00001015 OtherBr->getSuccessor(1) != StoreBB)
1016 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001017
Chris Lattnera65e2f72010-01-05 05:57:49 +00001018 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1019 // if/then triangle. See if there is a store to the same ptr as SI that
1020 // lives in OtherBB.
1021 for (;; --BBI) {
1022 // Check to see if we find the matching store.
1023 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1024 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001025 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001026 return false;
1027 break;
1028 }
1029 // If we find something that may be using or overwriting the stored
1030 // value, or if we run out of instructions, we can't do the xform.
1031 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
1032 BBI == OtherBB->begin())
1033 return false;
1034 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001035
Chris Lattnera65e2f72010-01-05 05:57:49 +00001036 // In order to eliminate the store in OtherBr, we have to
1037 // make sure nothing reads or overwrites the stored value in
1038 // StoreBB.
1039 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1040 // FIXME: This should really be AA driven.
1041 if (I->mayReadFromMemory() || I->mayWriteToMemory())
1042 return false;
1043 }
1044 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001045
Chris Lattnera65e2f72010-01-05 05:57:49 +00001046 // Insert a PHI node now if we need it.
1047 Value *MergedVal = OtherStore->getOperand(0);
1048 if (MergedVal != SI.getOperand(0)) {
Jay Foad52131342011-03-30 11:28:46 +00001049 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
Chris Lattnera65e2f72010-01-05 05:57:49 +00001050 PN->addIncoming(SI.getOperand(0), SI.getParent());
1051 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1052 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1053 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001054
Chris Lattnera65e2f72010-01-05 05:57:49 +00001055 // Advance to a place where it is safe to insert the new store and
1056 // insert it.
Bill Wendling8ddfc092011-08-16 20:45:24 +00001057 BBI = DestBB->getFirstInsertionPt();
Eli Friedman35211c62011-05-27 00:19:40 +00001058 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
Eli Friedman8bc586e2011-08-15 22:09:40 +00001059 SI.isVolatile(),
1060 SI.getAlignment(),
1061 SI.getOrdering(),
1062 SI.getSynchScope());
Eli Friedman35211c62011-05-27 00:19:40 +00001063 InsertNewInstBefore(NewSI, *BBI);
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001064 NewSI->setDebugLoc(OtherStore->getDebugLoc());
Eli Friedman35211c62011-05-27 00:19:40 +00001065
Hal Finkelcc39b672014-07-24 12:16:19 +00001066 // If the two stores had AA tags, merge them.
1067 AAMDNodes AATags;
1068 SI.getAAMetadata(AATags);
1069 if (AATags) {
1070 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1071 NewSI->setAAMetadata(AATags);
1072 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001073
Chris Lattnera65e2f72010-01-05 05:57:49 +00001074 // Nuke the old stores.
1075 EraseInstFromFunction(SI);
1076 EraseInstFromFunction(*OtherStore);
1077 return true;
1078}