blob: a44a8deda1a69e3e6ac5d6e0f1cf488dc8070b23 [file] [log] [blame]
Chris Lattnera65e2f72010-01-05 05:57:49 +00001//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visit functions for load, store and alloca.
11//
12//===----------------------------------------------------------------------===//
13
Chandler Carrutha9174582015-01-22 05:25:13 +000014#include "InstCombineInternal.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000015#include "llvm/ADT/Statistic.h"
Dan Gohman826bdf82010-05-28 16:19:17 +000016#include "llvm/Analysis/Loads.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000017#include "llvm/IR/DataLayout.h"
Chandler Carruthbc6378d2014-10-19 10:46:46 +000018#include "llvm/IR/LLVMContext.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000019#include "llvm/IR/IntrinsicInst.h"
Charles Davis33d1dc02015-02-25 05:10:25 +000020#include "llvm/IR/MDBuilder.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000021#include "llvm/Transforms/Utils/BasicBlockUtils.h"
22#include "llvm/Transforms/Utils/Local.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000023using namespace llvm;
24
Chandler Carruth964daaa2014-04-22 02:55:47 +000025#define DEBUG_TYPE "instcombine"
26
Chandler Carruthc908ca12012-08-21 08:39:44 +000027STATISTIC(NumDeadStore, "Number of dead stores eliminated");
28STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
29
30/// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
31/// some part of a constant global variable. This intentionally only accepts
32/// constant expressions because we can't rewrite arbitrary instructions.
33static bool pointsToConstantGlobal(Value *V) {
34 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
35 return GV->isConstant();
Matt Arsenault607281772014-04-24 00:01:09 +000036
37 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000038 if (CE->getOpcode() == Instruction::BitCast ||
Matt Arsenault607281772014-04-24 00:01:09 +000039 CE->getOpcode() == Instruction::AddrSpaceCast ||
Chandler Carruthc908ca12012-08-21 08:39:44 +000040 CE->getOpcode() == Instruction::GetElementPtr)
41 return pointsToConstantGlobal(CE->getOperand(0));
Matt Arsenault607281772014-04-24 00:01:09 +000042 }
Chandler Carruthc908ca12012-08-21 08:39:44 +000043 return false;
44}
45
46/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
47/// pointer to an alloca. Ignore any reads of the pointer, return false if we
48/// see any stores or other unknown uses. If we see pointer arithmetic, keep
49/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
50/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
51/// the alloca, and if the source pointer is a pointer to a constant global, we
52/// can optimize this.
53static bool
54isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
Reid Kleckner813dab22014-07-01 21:36:20 +000055 SmallVectorImpl<Instruction *> &ToDelete) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000056 // We track lifetime intrinsics as we encounter them. If we decide to go
57 // ahead and replace the value with the global, this lets the caller quickly
58 // eliminate the markers.
59
Reid Kleckner813dab22014-07-01 21:36:20 +000060 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
61 ValuesToInspect.push_back(std::make_pair(V, false));
62 while (!ValuesToInspect.empty()) {
63 auto ValuePair = ValuesToInspect.pop_back_val();
64 const bool IsOffset = ValuePair.second;
65 for (auto &U : ValuePair.first->uses()) {
66 Instruction *I = cast<Instruction>(U.getUser());
Chandler Carruthc908ca12012-08-21 08:39:44 +000067
Reid Kleckner813dab22014-07-01 21:36:20 +000068 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
69 // Ignore non-volatile loads, they are always ok.
70 if (!LI->isSimple()) return false;
Chandler Carruthc908ca12012-08-21 08:39:44 +000071 continue;
72 }
Reid Kleckner813dab22014-07-01 21:36:20 +000073
74 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
75 // If uses of the bitcast are ok, we are ok.
76 ValuesToInspect.push_back(std::make_pair(I, IsOffset));
77 continue;
78 }
79 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
80 // If the GEP has all zero indices, it doesn't offset the pointer. If it
81 // doesn't, it does.
82 ValuesToInspect.push_back(
83 std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices()));
84 continue;
85 }
86
87 if (CallSite CS = I) {
88 // If this is the function being called then we treat it like a load and
89 // ignore it.
90 if (CS.isCallee(&U))
91 continue;
92
93 // Inalloca arguments are clobbered by the call.
94 unsigned ArgNo = CS.getArgumentNo(&U);
95 if (CS.isInAllocaArgument(ArgNo))
96 return false;
97
98 // If this is a readonly/readnone call site, then we know it is just a
99 // load (but one that potentially returns the value itself), so we can
100 // ignore it if we know that the value isn't captured.
101 if (CS.onlyReadsMemory() &&
102 (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
103 continue;
104
105 // If this is being passed as a byval argument, the caller is making a
106 // copy, so it is only a read of the alloca.
107 if (CS.isByValArgument(ArgNo))
108 continue;
109 }
110
111 // Lifetime intrinsics can be handled by the caller.
112 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
113 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
114 II->getIntrinsicID() == Intrinsic::lifetime_end) {
115 assert(II->use_empty() && "Lifetime markers have no result to use!");
116 ToDelete.push_back(II);
117 continue;
118 }
119 }
120
121 // If this is isn't our memcpy/memmove, reject it as something we can't
122 // handle.
123 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
124 if (!MI)
125 return false;
126
127 // If the transfer is using the alloca as a source of the transfer, then
128 // ignore it since it is a load (unless the transfer is volatile).
129 if (U.getOperandNo() == 1) {
130 if (MI->isVolatile()) return false;
131 continue;
132 }
133
134 // If we already have seen a copy, reject the second one.
135 if (TheCopy) return false;
136
137 // If the pointer has been offset from the start of the alloca, we can't
138 // safely handle this.
139 if (IsOffset) return false;
140
141 // If the memintrinsic isn't using the alloca as the dest, reject it.
142 if (U.getOperandNo() != 0) return false;
143
144 // If the source of the memcpy/move is not a constant global, reject it.
145 if (!pointsToConstantGlobal(MI->getSource()))
146 return false;
147
148 // Otherwise, the transform is safe. Remember the copy instruction.
149 TheCopy = MI;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000150 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000151 }
152 return true;
153}
154
155/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
156/// modified by a copy from a constant global. If we can prove this, we can
157/// replace any uses of the alloca with uses of the global directly.
158static MemTransferInst *
159isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
160 SmallVectorImpl<Instruction *> &ToDelete) {
Craig Topperf40110f2014-04-25 05:29:35 +0000161 MemTransferInst *TheCopy = nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000162 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
163 return TheCopy;
Craig Topperf40110f2014-04-25 05:29:35 +0000164 return nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000165}
166
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000167static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
Duncan P. N. Exon Smith720762e2015-03-13 19:30:44 +0000168 // Check for array size of 1 (scalar allocation).
169 if (!AI.isArrayAllocation())
170 return nullptr;
171
Dan Gohmandf5d7dc2010-05-28 15:09:00 +0000172 // Ensure that the alloca array size argument has type intptr_t, so that
173 // any casting is exposed early.
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000174 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000175 if (AI.getArraySize()->getType() != IntPtrTy) {
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000176 Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false);
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000177 AI.setOperand(0, V);
178 return &AI;
Dan Gohmandf5d7dc2010-05-28 15:09:00 +0000179 }
180
Chris Lattnera65e2f72010-01-05 05:57:49 +0000181 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000182 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
183 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
184 AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName());
185 New->setAlignment(AI.getAlignment());
Chris Lattnera65e2f72010-01-05 05:57:49 +0000186
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000187 // Scan to the end of the allocation instructions, to skip over a block of
188 // allocas if possible...also skip interleaved debug info
189 //
190 BasicBlock::iterator It = New;
191 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
192 ++It;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000193
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000194 // Now that I is pointing to the first non-allocation-inst in the block,
195 // insert our getelementptr instruction...
196 //
197 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
198 Value *NullIdx = Constant::getNullValue(IdxTy);
199 Value *Idx[2] = {NullIdx, NullIdx};
200 Instruction *GEP =
Matt Arsenault640ff9d2013-08-14 00:24:05 +0000201 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000202 IC.InsertNewInstBefore(GEP, *It);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000203
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000204 // Now make everything use the getelementptr instead of the original
205 // allocation.
206 return IC.ReplaceInstUsesWith(AI, GEP);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000207 }
208
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000209 if (isa<UndefValue>(AI.getArraySize()))
210 return IC.ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
211
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000212 return nullptr;
213}
214
215Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
216 if (auto *I = simplifyAllocaArraySize(*this, AI))
217 return I;
218
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000219 if (AI.getAllocatedType()->isSized()) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000220 // If the alignment is 0 (unspecified), assign it the preferred alignment.
221 if (AI.getAlignment() == 0)
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000222 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000223
224 // Move all alloca's of zero byte objects to the entry block and merge them
225 // together. Note that we only do this for alloca's, because malloc should
226 // allocate and return a unique pointer, even for a zero byte allocation.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000227 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000228 // For a zero sized alloca there is no point in doing an array allocation.
229 // This is helpful if the array size is a complicated expression not used
230 // elsewhere.
231 if (AI.isArrayAllocation()) {
232 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
233 return &AI;
234 }
235
236 // Get the first instruction in the entry block.
237 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
238 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
239 if (FirstInst != &AI) {
240 // If the entry block doesn't start with a zero-size alloca then move
241 // this one to the start of the entry block. There is no problem with
242 // dominance as the array size was forced to a constant earlier already.
243 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
244 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000245 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000246 AI.moveBefore(FirstInst);
247 return &AI;
248 }
249
Richard Osborneb68053e2012-09-18 09:31:44 +0000250 // If the alignment of the entry block alloca is 0 (unspecified),
251 // assign it the preferred alignment.
252 if (EntryAI->getAlignment() == 0)
253 EntryAI->setAlignment(
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000254 DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000255 // Replace this zero-sized alloca with the one at the start of the entry
256 // block after ensuring that the address will be aligned enough for both
257 // types.
Richard Osborneb68053e2012-09-18 09:31:44 +0000258 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
259 AI.getAlignment());
Duncan Sands8bc764a2012-06-26 13:39:21 +0000260 EntryAI->setAlignment(MaxAlign);
261 if (AI.getType() != EntryAI->getType())
262 return new BitCastInst(EntryAI, AI.getType());
263 return ReplaceInstUsesWith(AI, EntryAI);
264 }
265 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000266 }
267
Eli Friedmanb14873c2012-11-26 23:04:53 +0000268 if (AI.getAlignment()) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000269 // Check to see if this allocation is only modified by a memcpy/memmove from
270 // a constant global whose alignment is equal to or exceeds that of the
271 // allocation. If this is the case, we can change all users to use
272 // the constant global instead. This is commonly produced by the CFE by
273 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
274 // is only subsequently read.
275 SmallVector<Instruction *, 4> ToDelete;
276 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
Chandler Carruth66b31302015-01-04 12:03:27 +0000277 unsigned SourceAlign = getOrEnforceKnownAlignment(
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000278 Copy->getSource(), AI.getAlignment(), DL, &AI, AC, DT);
Eli Friedmanb14873c2012-11-26 23:04:53 +0000279 if (AI.getAlignment() <= SourceAlign) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000280 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
281 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
282 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
283 EraseInstFromFunction(*ToDelete[i]);
284 Constant *TheSrc = cast<Constant>(Copy->getSource());
Matt Arsenaultbbf18c62013-12-07 02:58:45 +0000285 Constant *Cast
286 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
287 Instruction *NewI = ReplaceInstUsesWith(AI, Cast);
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000288 EraseInstFromFunction(*Copy);
289 ++NumGlobalCopies;
290 return NewI;
291 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000292 }
293 }
294
Nuno Lopes95cc4f32012-07-09 18:38:20 +0000295 // At last, use the generic allocation site handler to aggressively remove
296 // unused allocas.
297 return visitAllocSite(AI);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000298}
299
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000300/// \brief Helper to combine a load to a new type.
301///
302/// This just does the work of combining a load to a new type. It handles
303/// metadata, etc., and returns the new instruction. The \c NewTy should be the
304/// loaded *value* type. This will convert it to a pointer, cast the operand to
305/// that pointer type, load it, etc.
306///
307/// Note that this will create all of the instructions with whatever insert
308/// point the \c InstCombiner currently is using.
309static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy) {
310 Value *Ptr = LI.getPointerOperand();
311 unsigned AS = LI.getPointerAddressSpace();
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000312 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000313 LI.getAllMetadata(MD);
314
315 LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
316 IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
317 LI.getAlignment(), LI.getName());
Charles Davis33d1dc02015-02-25 05:10:25 +0000318 MDBuilder MDB(NewLoad->getContext());
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000319 for (const auto &MDPair : MD) {
320 unsigned ID = MDPair.first;
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000321 MDNode *N = MDPair.second;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000322 // Note, essentially every kind of metadata should be preserved here! This
323 // routine is supposed to clone a load instruction changing *only its type*.
324 // The only metadata it makes sense to drop is metadata which is invalidated
325 // when the pointer type changes. This should essentially never be the case
326 // in LLVM, but we explicitly switch over only known metadata to be
327 // conservatively correct. If you are adding metadata to LLVM which pertains
328 // to loads, you almost certainly want to add it here.
329 switch (ID) {
330 case LLVMContext::MD_dbg:
331 case LLVMContext::MD_tbaa:
332 case LLVMContext::MD_prof:
333 case LLVMContext::MD_fpmath:
334 case LLVMContext::MD_tbaa_struct:
335 case LLVMContext::MD_invariant_load:
336 case LLVMContext::MD_alias_scope:
337 case LLVMContext::MD_noalias:
Philip Reames5a3f5f72014-10-21 00:13:20 +0000338 case LLVMContext::MD_nontemporal:
339 case LLVMContext::MD_mem_parallel_loop_access:
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000340 // All of these directly apply.
341 NewLoad->setMetadata(ID, N);
342 break;
343
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000344 case LLVMContext::MD_nonnull:
Charles Davis33d1dc02015-02-25 05:10:25 +0000345 // This only directly applies if the new type is also a pointer.
346 if (NewTy->isPointerTy()) {
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000347 NewLoad->setMetadata(ID, N);
Charles Davis33d1dc02015-02-25 05:10:25 +0000348 break;
349 }
350 // If it's integral now, translate it to !range metadata.
351 if (NewTy->isIntegerTy()) {
352 auto *ITy = cast<IntegerType>(NewTy);
353 auto *NullInt = ConstantExpr::getPtrToInt(
354 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
355 auto *NonNullInt =
356 ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
357 NewLoad->setMetadata(LLVMContext::MD_range,
358 MDB.createRange(NonNullInt, NullInt));
359 }
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000360 break;
361
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000362 case LLVMContext::MD_range:
363 // FIXME: It would be nice to propagate this in some way, but the type
Charles Davis33d1dc02015-02-25 05:10:25 +0000364 // conversions make it hard. If the new type is a pointer, we could
365 // translate it to !nonnull metadata.
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000366 break;
367 }
368 }
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000369 return NewLoad;
370}
371
Chandler Carruthfa11d832015-01-22 03:34:54 +0000372/// \brief Combine a store to a new type.
373///
374/// Returns the newly created store instruction.
375static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
376 Value *Ptr = SI.getPointerOperand();
377 unsigned AS = SI.getPointerAddressSpace();
378 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
379 SI.getAllMetadata(MD);
380
381 StoreInst *NewStore = IC.Builder->CreateAlignedStore(
382 V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
383 SI.getAlignment());
384 for (const auto &MDPair : MD) {
385 unsigned ID = MDPair.first;
386 MDNode *N = MDPair.second;
387 // Note, essentially every kind of metadata should be preserved here! This
388 // routine is supposed to clone a store instruction changing *only its
389 // type*. The only metadata it makes sense to drop is metadata which is
390 // invalidated when the pointer type changes. This should essentially
391 // never be the case in LLVM, but we explicitly switch over only known
392 // metadata to be conservatively correct. If you are adding metadata to
393 // LLVM which pertains to stores, you almost certainly want to add it
394 // here.
395 switch (ID) {
396 case LLVMContext::MD_dbg:
397 case LLVMContext::MD_tbaa:
398 case LLVMContext::MD_prof:
399 case LLVMContext::MD_fpmath:
400 case LLVMContext::MD_tbaa_struct:
401 case LLVMContext::MD_alias_scope:
402 case LLVMContext::MD_noalias:
403 case LLVMContext::MD_nontemporal:
404 case LLVMContext::MD_mem_parallel_loop_access:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000405 // All of these directly apply.
406 NewStore->setMetadata(ID, N);
407 break;
408
409 case LLVMContext::MD_invariant_load:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000410 case LLVMContext::MD_nonnull:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000411 case LLVMContext::MD_range:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000412 // These don't apply for stores.
Chandler Carruthfa11d832015-01-22 03:34:54 +0000413 break;
414 }
415 }
416
417 return NewStore;
418}
419
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000420/// \brief Combine loads to match the type of value their uses after looking
421/// through intervening bitcasts.
422///
423/// The core idea here is that if the result of a load is used in an operation,
424/// we should load the type most conducive to that operation. For example, when
425/// loading an integer and converting that immediately to a pointer, we should
426/// instead directly load a pointer.
427///
428/// However, this routine must never change the width of a load or the number of
429/// loads as that would introduce a semantic change. This combine is expected to
430/// be a semantic no-op which just allows loads to more closely model the types
431/// of their consuming operations.
432///
433/// Currently, we also refuse to change the precise type used for an atomic load
434/// or a volatile load. This is debatable, and might be reasonable to change
435/// later. However, it is risky in case some backend or other part of LLVM is
436/// relying on the exact type loaded to select appropriate atomic operations.
437static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
438 // FIXME: We could probably with some care handle both volatile and atomic
439 // loads here but it isn't clear that this is important.
440 if (!LI.isSimple())
441 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000442
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000443 if (LI.use_empty())
444 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000445
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000446 Type *Ty = LI.getType();
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000447 const DataLayout &DL = IC.getDataLayout();
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000448
449 // Try to canonicalize loads which are only ever stored to operate over
450 // integers instead of any other type. We only do this when the loaded type
451 // is sized and has a size exactly the same as its store size and the store
452 // size is a legal integer type.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000453 if (!Ty->isIntegerTy() && Ty->isSized() &&
454 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
455 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty)) {
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000456 if (std::all_of(LI.user_begin(), LI.user_end(), [&LI](User *U) {
457 auto *SI = dyn_cast<StoreInst>(U);
458 return SI && SI->getPointerOperand() != &LI;
459 })) {
460 LoadInst *NewLoad = combineLoadToNewType(
461 IC, LI,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000462 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000463 // Replace all the stores with stores of the newly loaded value.
464 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
465 auto *SI = cast<StoreInst>(*UI++);
466 IC.Builder->SetInsertPoint(SI);
467 combineStoreToNewValue(IC, *SI, NewLoad);
468 IC.EraseInstFromFunction(*SI);
469 }
470 assert(LI.use_empty() && "Failed to remove all users of the load!");
471 // Return the old load so the combiner can delete it safely.
472 return &LI;
473 }
474 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000475
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000476 // Fold away bit casts of the loaded value by loading the desired type.
477 if (LI.hasOneUse())
478 if (auto *BC = dyn_cast<BitCastInst>(LI.user_back())) {
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000479 LoadInst *NewLoad = combineLoadToNewType(IC, LI, BC->getDestTy());
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000480 BC->replaceAllUsesWith(NewLoad);
481 IC.EraseInstFromFunction(*BC);
482 return &LI;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000483 }
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000484
Chandler Carrutha7f247e2014-12-09 19:21:16 +0000485 // FIXME: We should also canonicalize loads of vectors when their elements are
486 // cast to other types.
Craig Topperf40110f2014-04-25 05:29:35 +0000487 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000488}
489
Hal Finkel847e05f2015-02-20 03:05:53 +0000490// If we can determine that all possible objects pointed to by the provided
491// pointer value are, not only dereferenceable, but also definitively less than
492// or equal to the provided maximum size, then return true. Otherwise, return
493// false (constant global values and allocas fall into this category).
494//
495// FIXME: This should probably live in ValueTracking (or similar).
496static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000497 const DataLayout &DL) {
Hal Finkel847e05f2015-02-20 03:05:53 +0000498 SmallPtrSet<Value *, 4> Visited;
499 SmallVector<Value *, 4> Worklist(1, V);
500
501 do {
502 Value *P = Worklist.pop_back_val();
503 P = P->stripPointerCasts();
504
505 if (!Visited.insert(P).second)
506 continue;
507
508 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
509 Worklist.push_back(SI->getTrueValue());
510 Worklist.push_back(SI->getFalseValue());
511 continue;
512 }
513
514 if (PHINode *PN = dyn_cast<PHINode>(P)) {
515 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
516 Worklist.push_back(PN->getIncomingValue(i));
517 continue;
518 }
519
520 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
521 if (GA->mayBeOverridden())
522 return false;
523 Worklist.push_back(GA->getAliasee());
524 continue;
525 }
526
527 // If we know how big this object is, and it is less than MaxSize, continue
528 // searching. Otherwise, return false.
529 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
530 if (!AI->getAllocatedType()->isSized())
531 return false;
532
533 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
534 if (!CS)
535 return false;
536
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000537 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
Hal Finkel847e05f2015-02-20 03:05:53 +0000538 // Make sure that, even if the multiplication below would wrap as an
539 // uint64_t, we still do the right thing.
540 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
541 return false;
542 continue;
543 }
544
545 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
546 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
547 return false;
548
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000549 uint64_t InitSize = DL.getTypeAllocSize(GV->getType()->getElementType());
Hal Finkel847e05f2015-02-20 03:05:53 +0000550 if (InitSize > MaxSize)
551 return false;
552 continue;
553 }
554
555 return false;
556 } while (!Worklist.empty());
557
558 return true;
559}
560
561// If we're indexing into an object of a known size, and the outer index is
562// not a constant, but having any value but zero would lead to undefined
563// behavior, replace it with zero.
564//
565// For example, if we have:
566// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
567// ...
568// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
569// ... = load i32* %arrayidx, align 4
570// Then we know that we can replace %x in the GEP with i64 0.
571//
572// FIXME: We could fold any GEP index to zero that would cause UB if it were
573// not zero. Currently, we only handle the first such index. Also, we could
574// also search through non-zero constant indices if we kept track of the
575// offsets those indices implied.
576static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
577 Instruction *MemI, unsigned &Idx) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000578 if (GEPI->getNumOperands() < 2)
Hal Finkel847e05f2015-02-20 03:05:53 +0000579 return false;
580
581 // Find the first non-zero index of a GEP. If all indices are zero, return
582 // one past the last index.
583 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
584 unsigned I = 1;
585 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
586 Value *V = GEPI->getOperand(I);
587 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
588 if (CI->isZero())
589 continue;
590
591 break;
592 }
593
594 return I;
595 };
596
597 // Skip through initial 'zero' indices, and find the corresponding pointer
598 // type. See if the next index is not a constant.
599 Idx = FirstNZIdx(GEPI);
600 if (Idx == GEPI->getNumOperands())
601 return false;
602 if (isa<Constant>(GEPI->getOperand(Idx)))
603 return false;
604
605 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
606 Type *AllocTy =
607 GetElementPtrInst::getIndexedType(GEPI->getOperand(0)->getType(), Ops);
608 if (!AllocTy || !AllocTy->isSized())
609 return false;
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000610 const DataLayout &DL = IC.getDataLayout();
611 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
Hal Finkel847e05f2015-02-20 03:05:53 +0000612
613 // If there are more indices after the one we might replace with a zero, make
614 // sure they're all non-negative. If any of them are negative, the overall
615 // address being computed might be before the base address determined by the
616 // first non-zero index.
617 auto IsAllNonNegative = [&]() {
618 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
619 bool KnownNonNegative, KnownNegative;
620 IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative,
621 KnownNegative, 0, MemI);
622 if (KnownNonNegative)
623 continue;
624 return false;
625 }
626
627 return true;
628 };
629
630 // FIXME: If the GEP is not inbounds, and there are extra indices after the
631 // one we'll replace, those could cause the address computation to wrap
632 // (rendering the IsAllNonNegative() check below insufficient). We can do
633 // better, ignoring zero indicies (and other indicies we can prove small
634 // enough not to wrap).
635 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
636 return false;
637
638 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
639 // also known to be dereferenceable.
640 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
641 IsAllNonNegative();
642}
643
644// If we're indexing into an object with a variable index for the memory
645// access, but the object has only one element, we can assume that the index
646// will always be zero. If we replace the GEP, return it.
647template <typename T>
648static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
649 T &MemI) {
650 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
651 unsigned Idx;
652 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
653 Instruction *NewGEPI = GEPI->clone();
654 NewGEPI->setOperand(Idx,
655 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
656 NewGEPI->insertBefore(GEPI);
657 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
658 return NewGEPI;
659 }
660 }
661
662 return nullptr;
663}
664
Chris Lattnera65e2f72010-01-05 05:57:49 +0000665Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
666 Value *Op = LI.getOperand(0);
667
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000668 // Try to canonicalize the loaded type.
669 if (Instruction *Res = combineLoadToOperationType(*this, LI))
670 return Res;
671
Chris Lattnera65e2f72010-01-05 05:57:49 +0000672 // Attempt to improve the alignment.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000673 unsigned KnownAlign = getOrEnforceKnownAlignment(
674 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, AC, DT);
675 unsigned LoadAlign = LI.getAlignment();
676 unsigned EffectiveLoadAlign =
677 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
Dan Gohman36196602010-08-03 18:20:32 +0000678
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000679 if (KnownAlign > EffectiveLoadAlign)
680 LI.setAlignment(KnownAlign);
681 else if (LoadAlign == 0)
682 LI.setAlignment(EffectiveLoadAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000683
Hal Finkel847e05f2015-02-20 03:05:53 +0000684 // Replace GEP indices if possible.
685 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
686 Worklist.Add(NewGEPI);
687 return &LI;
688 }
689
Eli Friedman8bc586e2011-08-15 22:09:40 +0000690 // None of the following transforms are legal for volatile/atomic loads.
691 // FIXME: Some of it is okay for atomic loads; needs refactoring.
Craig Topperf40110f2014-04-25 05:29:35 +0000692 if (!LI.isSimple()) return nullptr;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000693
Chris Lattnera65e2f72010-01-05 05:57:49 +0000694 // Do really simple store-to-load forwarding and load CSE, to catch cases
Duncan Sands75b5d272011-02-15 09:23:02 +0000695 // where there are several consecutive memory accesses to the same location,
Chris Lattnera65e2f72010-01-05 05:57:49 +0000696 // separated by a few arithmetic operations.
697 BasicBlock::iterator BBI = &LI;
698 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
Chandler Carrutheeec35a2014-10-20 00:24:14 +0000699 return ReplaceInstUsesWith(
Chandler Carruth1a3c2c42014-11-25 08:20:27 +0000700 LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
701 LI.getName() + ".cast"));
Chris Lattnera65e2f72010-01-05 05:57:49 +0000702
703 // load(gep null, ...) -> unreachable
704 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
705 const Value *GEPI0 = GEPI->getOperand(0);
706 // TODO: Consider a target hook for valid address spaces for this xform.
707 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
708 // Insert a new store to null instruction before the load to indicate
709 // that this code is not reachable. We do this instead of inserting
710 // an unreachable instruction directly because we cannot modify the
711 // CFG.
712 new StoreInst(UndefValue::get(LI.getType()),
713 Constant::getNullValue(Op->getType()), &LI);
714 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
715 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000716 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000717
718 // load null/undef -> unreachable
719 // TODO: Consider a target hook for valid address spaces for this xform.
720 if (isa<UndefValue>(Op) ||
721 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
722 // Insert a new store to null instruction before the load to indicate that
723 // this code is not reachable. We do this instead of inserting an
724 // unreachable instruction directly because we cannot modify the CFG.
725 new StoreInst(UndefValue::get(LI.getType()),
726 Constant::getNullValue(Op->getType()), &LI);
727 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
728 }
729
Chris Lattnera65e2f72010-01-05 05:57:49 +0000730 if (Op->hasOneUse()) {
731 // Change select and PHI nodes to select values instead of addresses: this
732 // helps alias analysis out a lot, allows many others simplifications, and
733 // exposes redundancy in the code.
734 //
735 // Note that we cannot do the transformation unless we know that the
736 // introduced loads cannot trap! Something like this is valid as long as
737 // the condition is always false: load (select bool %C, int* null, int* %G),
738 // but it would not be valid if we transformed it to load from null
739 // unconditionally.
740 //
741 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
742 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
Bob Wilson56600a12010-01-30 04:42:39 +0000743 unsigned Align = LI.getAlignment();
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000744 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align) &&
745 isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align)) {
Bob Wilson4b71b6c2010-01-30 00:41:10 +0000746 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
Bob Wilson56600a12010-01-30 04:42:39 +0000747 SI->getOperand(1)->getName()+".val");
Bob Wilson4b71b6c2010-01-30 00:41:10 +0000748 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
Bob Wilson56600a12010-01-30 04:42:39 +0000749 SI->getOperand(2)->getName()+".val");
750 V1->setAlignment(Align);
751 V2->setAlignment(Align);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000752 return SelectInst::Create(SI->getCondition(), V1, V2);
753 }
754
755 // load (select (cond, null, P)) -> load P
Philip Reames5ad26c32014-12-29 22:46:21 +0000756 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
757 LI.getPointerAddressSpace() == 0) {
758 LI.setOperand(0, SI->getOperand(2));
759 return &LI;
760 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000761
762 // load (select (cond, P, null)) -> load P
Philip Reames5ad26c32014-12-29 22:46:21 +0000763 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
764 LI.getPointerAddressSpace() == 0) {
765 LI.setOperand(0, SI->getOperand(1));
766 return &LI;
767 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000768 }
769 }
Craig Topperf40110f2014-04-25 05:29:35 +0000770 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000771}
772
Chandler Carruth816d26f2014-11-25 10:09:51 +0000773/// \brief Combine stores to match the type of value being stored.
774///
775/// The core idea here is that the memory does not have any intrinsic type and
776/// where we can we should match the type of a store to the type of value being
777/// stored.
778///
779/// However, this routine must never change the width of a store or the number of
780/// stores as that would introduce a semantic change. This combine is expected to
781/// be a semantic no-op which just allows stores to more closely model the types
782/// of their incoming values.
783///
784/// Currently, we also refuse to change the precise type used for an atomic or
785/// volatile store. This is debatable, and might be reasonable to change later.
786/// However, it is risky in case some backend or other part of LLVM is relying
787/// on the exact type stored to select appropriate atomic operations.
788///
789/// \returns true if the store was successfully combined away. This indicates
790/// the caller must erase the store instruction. We have to let the caller erase
791/// the store instruction sas otherwise there is no way to signal whether it was
792/// combined or not: IC.EraseInstFromFunction returns a null pointer.
793static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
794 // FIXME: We could probably with some care handle both volatile and atomic
795 // stores here but it isn't clear that this is important.
796 if (!SI.isSimple())
797 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000798
Chandler Carruth816d26f2014-11-25 10:09:51 +0000799 Value *V = SI.getValueOperand();
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000800
Chandler Carruth816d26f2014-11-25 10:09:51 +0000801 // Fold away bit casts of the stored value by storing the original type.
802 if (auto *BC = dyn_cast<BitCastInst>(V)) {
Chandler Carrutha7f247e2014-12-09 19:21:16 +0000803 V = BC->getOperand(0);
Chandler Carruth2135b972015-01-21 23:45:01 +0000804 combineStoreToNewValue(IC, SI, V);
Chandler Carruth816d26f2014-11-25 10:09:51 +0000805 return true;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000806 }
807
Chandler Carruth816d26f2014-11-25 10:09:51 +0000808 // FIXME: We should also canonicalize loads of vectors when their elements are
809 // cast to other types.
810 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000811}
812
813/// equivalentAddressValues - Test if A and B will obviously have the same
814/// value. This includes recognizing that %t0 and %t1 will have the same
815/// value in code like this:
816/// %t0 = getelementptr \@a, 0, 3
817/// store i32 0, i32* %t0
818/// %t1 = getelementptr \@a, 0, 3
819/// %t2 = load i32* %t1
820///
821static bool equivalentAddressValues(Value *A, Value *B) {
822 // Test if the values are trivially equivalent.
823 if (A == B) return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000824
Chris Lattnera65e2f72010-01-05 05:57:49 +0000825 // Test if the values come form identical arithmetic instructions.
826 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
827 // its only used to compare two uses within the same basic block, which
828 // means that they'll always either have the same value or one of them
829 // will have an undefined value.
830 if (isa<BinaryOperator>(A) ||
831 isa<CastInst>(A) ||
832 isa<PHINode>(A) ||
833 isa<GetElementPtrInst>(A))
834 if (Instruction *BI = dyn_cast<Instruction>(B))
835 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
836 return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000837
Chris Lattnera65e2f72010-01-05 05:57:49 +0000838 // Otherwise they may not be equivalent.
839 return false;
840}
841
Chris Lattnera65e2f72010-01-05 05:57:49 +0000842Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
843 Value *Val = SI.getOperand(0);
844 Value *Ptr = SI.getOperand(1);
845
Chandler Carruth816d26f2014-11-25 10:09:51 +0000846 // Try to canonicalize the stored type.
847 if (combineStoreToValueType(*this, SI))
848 return EraseInstFromFunction(SI);
849
Chris Lattnera65e2f72010-01-05 05:57:49 +0000850 // Attempt to improve the alignment.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000851 unsigned KnownAlign = getOrEnforceKnownAlignment(
852 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, AC, DT);
853 unsigned StoreAlign = SI.getAlignment();
854 unsigned EffectiveStoreAlign =
855 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
Dan Gohman36196602010-08-03 18:20:32 +0000856
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000857 if (KnownAlign > EffectiveStoreAlign)
858 SI.setAlignment(KnownAlign);
859 else if (StoreAlign == 0)
860 SI.setAlignment(EffectiveStoreAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000861
Hal Finkel847e05f2015-02-20 03:05:53 +0000862 // Replace GEP indices if possible.
863 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
864 Worklist.Add(NewGEPI);
865 return &SI;
866 }
867
Eli Friedman8bc586e2011-08-15 22:09:40 +0000868 // Don't hack volatile/atomic stores.
869 // FIXME: Some bits are legal for atomic stores; needs refactoring.
Craig Topperf40110f2014-04-25 05:29:35 +0000870 if (!SI.isSimple()) return nullptr;
Eli Friedman8bc586e2011-08-15 22:09:40 +0000871
872 // If the RHS is an alloca with a single use, zapify the store, making the
873 // alloca dead.
874 if (Ptr->hasOneUse()) {
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000875 if (isa<AllocaInst>(Ptr))
Eli Friedman8bc586e2011-08-15 22:09:40 +0000876 return EraseInstFromFunction(SI);
877 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
878 if (isa<AllocaInst>(GEP->getOperand(0))) {
879 if (GEP->getOperand(0)->hasOneUse())
880 return EraseInstFromFunction(SI);
881 }
882 }
883 }
884
Chris Lattnera65e2f72010-01-05 05:57:49 +0000885 // Do really simple DSE, to catch cases where there are several consecutive
886 // stores to the same location, separated by a few arithmetic operations. This
887 // situation often occurs with bitfield accesses.
888 BasicBlock::iterator BBI = &SI;
889 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
890 --ScanInsts) {
891 --BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +0000892 // Don't count debug info directives, lest they affect codegen,
893 // and we skip pointer-to-pointer bitcasts, which are NOPs.
894 if (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +0000895 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000896 ScanInsts++;
897 continue;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000898 }
899
Chris Lattnera65e2f72010-01-05 05:57:49 +0000900 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
901 // Prev store isn't volatile, and stores to the same location?
Eli Friedman8bc586e2011-08-15 22:09:40 +0000902 if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
903 SI.getOperand(1))) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000904 ++NumDeadStore;
905 ++BBI;
906 EraseInstFromFunction(*PrevSI);
907 continue;
908 }
909 break;
910 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000911
Chris Lattnera65e2f72010-01-05 05:57:49 +0000912 // If this is a load, we have to stop. However, if the loaded value is from
913 // the pointer we're loading and is producing the pointer we're storing,
914 // then *this* store is dead (X = load P; store X -> P).
915 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
Jin-Gu Kangb452db02011-03-14 01:21:00 +0000916 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
Eli Friedman8bc586e2011-08-15 22:09:40 +0000917 LI->isSimple())
Jin-Gu Kangb452db02011-03-14 01:21:00 +0000918 return EraseInstFromFunction(SI);
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000919
Chris Lattnera65e2f72010-01-05 05:57:49 +0000920 // Otherwise, this is a load from some other location. Stores before it
921 // may not be dead.
922 break;
923 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000924
Chris Lattnera65e2f72010-01-05 05:57:49 +0000925 // Don't skip over loads or things that can modify memory.
926 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
927 break;
928 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000929
930 // store X, null -> turns into 'unreachable' in SimplifyCFG
931 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
932 if (!isa<UndefValue>(Val)) {
933 SI.setOperand(0, UndefValue::get(Val->getType()));
934 if (Instruction *U = dyn_cast<Instruction>(Val))
935 Worklist.Add(U); // Dropped a use.
936 }
Craig Topperf40110f2014-04-25 05:29:35 +0000937 return nullptr; // Do not modify these!
Chris Lattnera65e2f72010-01-05 05:57:49 +0000938 }
939
940 // store undef, Ptr -> noop
941 if (isa<UndefValue>(Val))
942 return EraseInstFromFunction(SI);
943
Chris Lattnera65e2f72010-01-05 05:57:49 +0000944 // If this store is the last instruction in the basic block (possibly
Victor Hernandez5f5abd52010-01-21 23:07:15 +0000945 // excepting debug info instructions), and if the block ends with an
946 // unconditional branch, try to move it to the successor block.
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000947 BBI = &SI;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000948 do {
949 ++BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +0000950 } while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +0000951 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
Chris Lattnera65e2f72010-01-05 05:57:49 +0000952 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
953 if (BI->isUnconditional())
954 if (SimplifyStoreAtEndOfBlock(SI))
Craig Topperf40110f2014-04-25 05:29:35 +0000955 return nullptr; // xform done!
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000956
Craig Topperf40110f2014-04-25 05:29:35 +0000957 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000958}
959
960/// SimplifyStoreAtEndOfBlock - Turn things like:
961/// if () { *P = v1; } else { *P = v2 }
962/// into a phi node with a store in the successor.
963///
964/// Simplify things like:
965/// *P = v1; if () { *P = v2; }
966/// into a phi node with a store in the successor.
967///
968bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
969 BasicBlock *StoreBB = SI.getParent();
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000970
Chris Lattnera65e2f72010-01-05 05:57:49 +0000971 // Check to see if the successor block has exactly two incoming edges. If
972 // so, see if the other predecessor contains a store to the same location.
973 // if so, insert a PHI node (if needed) and move the stores down.
974 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000975
Chris Lattnera65e2f72010-01-05 05:57:49 +0000976 // Determine whether Dest has exactly two predecessors and, if so, compute
977 // the other predecessor.
978 pred_iterator PI = pred_begin(DestBB);
Gabor Greif1b787df2010-07-12 15:48:26 +0000979 BasicBlock *P = *PI;
Craig Topperf40110f2014-04-25 05:29:35 +0000980 BasicBlock *OtherBB = nullptr;
Gabor Greif1b787df2010-07-12 15:48:26 +0000981
982 if (P != StoreBB)
983 OtherBB = P;
984
985 if (++PI == pred_end(DestBB))
Chris Lattnera65e2f72010-01-05 05:57:49 +0000986 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000987
Gabor Greif1b787df2010-07-12 15:48:26 +0000988 P = *PI;
989 if (P != StoreBB) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000990 if (OtherBB)
991 return false;
Gabor Greif1b787df2010-07-12 15:48:26 +0000992 OtherBB = P;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000993 }
994 if (++PI != pred_end(DestBB))
995 return false;
996
997 // Bail out if all the relevant blocks aren't distinct (this can happen,
998 // for example, if SI is in an infinite loop)
999 if (StoreBB == DestBB || OtherBB == DestBB)
1000 return false;
1001
1002 // Verify that the other block ends in a branch and is not otherwise empty.
1003 BasicBlock::iterator BBI = OtherBB->getTerminator();
1004 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1005 if (!OtherBr || BBI == OtherBB->begin())
1006 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001007
Chris Lattnera65e2f72010-01-05 05:57:49 +00001008 // If the other block ends in an unconditional branch, check for the 'if then
1009 // else' case. there is an instruction before the branch.
Craig Topperf40110f2014-04-25 05:29:35 +00001010 StoreInst *OtherStore = nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001011 if (OtherBr->isUnconditional()) {
1012 --BBI;
1013 // Skip over debugging info.
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001014 while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001015 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001016 if (BBI==OtherBB->begin())
1017 return false;
1018 --BBI;
1019 }
Eli Friedman8bc586e2011-08-15 22:09:40 +00001020 // If this isn't a store, isn't a store to the same location, or is not the
1021 // right kind of store, bail out.
Chris Lattnera65e2f72010-01-05 05:57:49 +00001022 OtherStore = dyn_cast<StoreInst>(BBI);
1023 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001024 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001025 return false;
1026 } else {
1027 // Otherwise, the other block ended with a conditional branch. If one of the
1028 // destinations is StoreBB, then we have the if/then case.
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001029 if (OtherBr->getSuccessor(0) != StoreBB &&
Chris Lattnera65e2f72010-01-05 05:57:49 +00001030 OtherBr->getSuccessor(1) != StoreBB)
1031 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001032
Chris Lattnera65e2f72010-01-05 05:57:49 +00001033 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1034 // if/then triangle. See if there is a store to the same ptr as SI that
1035 // lives in OtherBB.
1036 for (;; --BBI) {
1037 // Check to see if we find the matching store.
1038 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1039 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001040 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001041 return false;
1042 break;
1043 }
1044 // If we find something that may be using or overwriting the stored
1045 // value, or if we run out of instructions, we can't do the xform.
1046 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
1047 BBI == OtherBB->begin())
1048 return false;
1049 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001050
Chris Lattnera65e2f72010-01-05 05:57:49 +00001051 // In order to eliminate the store in OtherBr, we have to
1052 // make sure nothing reads or overwrites the stored value in
1053 // StoreBB.
1054 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1055 // FIXME: This should really be AA driven.
1056 if (I->mayReadFromMemory() || I->mayWriteToMemory())
1057 return false;
1058 }
1059 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001060
Chris Lattnera65e2f72010-01-05 05:57:49 +00001061 // Insert a PHI node now if we need it.
1062 Value *MergedVal = OtherStore->getOperand(0);
1063 if (MergedVal != SI.getOperand(0)) {
Jay Foad52131342011-03-30 11:28:46 +00001064 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
Chris Lattnera65e2f72010-01-05 05:57:49 +00001065 PN->addIncoming(SI.getOperand(0), SI.getParent());
1066 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1067 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1068 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001069
Chris Lattnera65e2f72010-01-05 05:57:49 +00001070 // Advance to a place where it is safe to insert the new store and
1071 // insert it.
Bill Wendling8ddfc092011-08-16 20:45:24 +00001072 BBI = DestBB->getFirstInsertionPt();
Eli Friedman35211c62011-05-27 00:19:40 +00001073 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
Eli Friedman8bc586e2011-08-15 22:09:40 +00001074 SI.isVolatile(),
1075 SI.getAlignment(),
1076 SI.getOrdering(),
1077 SI.getSynchScope());
Eli Friedman35211c62011-05-27 00:19:40 +00001078 InsertNewInstBefore(NewSI, *BBI);
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001079 NewSI->setDebugLoc(OtherStore->getDebugLoc());
Eli Friedman35211c62011-05-27 00:19:40 +00001080
Hal Finkelcc39b672014-07-24 12:16:19 +00001081 // If the two stores had AA tags, merge them.
1082 AAMDNodes AATags;
1083 SI.getAAMetadata(AATags);
1084 if (AATags) {
1085 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1086 NewSI->setAAMetadata(AATags);
1087 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001088
Chris Lattnera65e2f72010-01-05 05:57:49 +00001089 // Nuke the old stores.
1090 EraseInstFromFunction(SI);
1091 EraseInstFromFunction(*OtherStore);
1092 return true;
1093}