blob: 6ceca78d45512f79e7cf057cedb1b4ffd019c848 [file] [log] [blame]
Chris Lattnera65e2f72010-01-05 05:57:49 +00001//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visit functions for load, store and alloca.
11//
12//===----------------------------------------------------------------------===//
13
Chandler Carrutha9174582015-01-22 05:25:13 +000014#include "InstCombineInternal.h"
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +000015#include "llvm/ADT/SmallString.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000016#include "llvm/ADT/Statistic.h"
Dan Gohman826bdf82010-05-28 16:19:17 +000017#include "llvm/Analysis/Loads.h"
Peter Collingbourneecdd58f2016-10-21 19:59:26 +000018#include "llvm/IR/ConstantRange.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000019#include "llvm/IR/DataLayout.h"
Chandler Carruthbc6378d2014-10-19 10:46:46 +000020#include "llvm/IR/LLVMContext.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000021#include "llvm/IR/IntrinsicInst.h"
Charles Davis33d1dc02015-02-25 05:10:25 +000022#include "llvm/IR/MDBuilder.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000023#include "llvm/Transforms/Utils/BasicBlockUtils.h"
24#include "llvm/Transforms/Utils/Local.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000025using namespace llvm;
26
Chandler Carruth964daaa2014-04-22 02:55:47 +000027#define DEBUG_TYPE "instcombine"
28
Chandler Carruthc908ca12012-08-21 08:39:44 +000029STATISTIC(NumDeadStore, "Number of dead stores eliminated");
30STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
31
32/// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
33/// some part of a constant global variable. This intentionally only accepts
34/// constant expressions because we can't rewrite arbitrary instructions.
35static bool pointsToConstantGlobal(Value *V) {
36 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
37 return GV->isConstant();
Matt Arsenault607281772014-04-24 00:01:09 +000038
39 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000040 if (CE->getOpcode() == Instruction::BitCast ||
Matt Arsenault607281772014-04-24 00:01:09 +000041 CE->getOpcode() == Instruction::AddrSpaceCast ||
Chandler Carruthc908ca12012-08-21 08:39:44 +000042 CE->getOpcode() == Instruction::GetElementPtr)
43 return pointsToConstantGlobal(CE->getOperand(0));
Matt Arsenault607281772014-04-24 00:01:09 +000044 }
Chandler Carruthc908ca12012-08-21 08:39:44 +000045 return false;
46}
47
48/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
49/// pointer to an alloca. Ignore any reads of the pointer, return false if we
50/// see any stores or other unknown uses. If we see pointer arithmetic, keep
51/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
52/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
53/// the alloca, and if the source pointer is a pointer to a constant global, we
54/// can optimize this.
55static bool
56isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
Reid Kleckner813dab22014-07-01 21:36:20 +000057 SmallVectorImpl<Instruction *> &ToDelete) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000058 // We track lifetime intrinsics as we encounter them. If we decide to go
59 // ahead and replace the value with the global, this lets the caller quickly
60 // eliminate the markers.
61
Reid Kleckner813dab22014-07-01 21:36:20 +000062 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
David Majnemer0a16c222016-08-11 21:15:00 +000063 ValuesToInspect.emplace_back(V, false);
Reid Kleckner813dab22014-07-01 21:36:20 +000064 while (!ValuesToInspect.empty()) {
65 auto ValuePair = ValuesToInspect.pop_back_val();
66 const bool IsOffset = ValuePair.second;
67 for (auto &U : ValuePair.first->uses()) {
David Majnemer0a16c222016-08-11 21:15:00 +000068 auto *I = cast<Instruction>(U.getUser());
Chandler Carruthc908ca12012-08-21 08:39:44 +000069
David Majnemer0a16c222016-08-11 21:15:00 +000070 if (auto *LI = dyn_cast<LoadInst>(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000071 // Ignore non-volatile loads, they are always ok.
72 if (!LI->isSimple()) return false;
Chandler Carruthc908ca12012-08-21 08:39:44 +000073 continue;
74 }
Reid Kleckner813dab22014-07-01 21:36:20 +000075
76 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
77 // If uses of the bitcast are ok, we are ok.
David Majnemer0a16c222016-08-11 21:15:00 +000078 ValuesToInspect.emplace_back(I, IsOffset);
Reid Kleckner813dab22014-07-01 21:36:20 +000079 continue;
80 }
David Majnemer0a16c222016-08-11 21:15:00 +000081 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000082 // If the GEP has all zero indices, it doesn't offset the pointer. If it
83 // doesn't, it does.
David Majnemer0a16c222016-08-11 21:15:00 +000084 ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
Reid Kleckner813dab22014-07-01 21:36:20 +000085 continue;
86 }
87
Benjamin Kramer3a09ef62015-04-10 14:50:08 +000088 if (auto CS = CallSite(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000089 // If this is the function being called then we treat it like a load and
90 // ignore it.
91 if (CS.isCallee(&U))
92 continue;
93
David Majnemer02f47872015-12-23 09:58:41 +000094 unsigned DataOpNo = CS.getDataOperandNo(&U);
95 bool IsArgOperand = CS.isArgOperand(&U);
96
Reid Kleckner813dab22014-07-01 21:36:20 +000097 // Inalloca arguments are clobbered by the call.
David Majnemer02f47872015-12-23 09:58:41 +000098 if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
Reid Kleckner813dab22014-07-01 21:36:20 +000099 return false;
100
101 // If this is a readonly/readnone call site, then we know it is just a
102 // load (but one that potentially returns the value itself), so we can
103 // ignore it if we know that the value isn't captured.
104 if (CS.onlyReadsMemory() &&
David Majnemer02f47872015-12-23 09:58:41 +0000105 (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
Reid Kleckner813dab22014-07-01 21:36:20 +0000106 continue;
107
108 // If this is being passed as a byval argument, the caller is making a
109 // copy, so it is only a read of the alloca.
David Majnemer02f47872015-12-23 09:58:41 +0000110 if (IsArgOperand && CS.isByValArgument(DataOpNo))
Reid Kleckner813dab22014-07-01 21:36:20 +0000111 continue;
112 }
113
114 // Lifetime intrinsics can be handled by the caller.
115 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
116 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
117 II->getIntrinsicID() == Intrinsic::lifetime_end) {
118 assert(II->use_empty() && "Lifetime markers have no result to use!");
119 ToDelete.push_back(II);
120 continue;
121 }
122 }
123
124 // If this is isn't our memcpy/memmove, reject it as something we can't
125 // handle.
126 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
127 if (!MI)
128 return false;
129
130 // If the transfer is using the alloca as a source of the transfer, then
131 // ignore it since it is a load (unless the transfer is volatile).
132 if (U.getOperandNo() == 1) {
133 if (MI->isVolatile()) return false;
134 continue;
135 }
136
137 // If we already have seen a copy, reject the second one.
138 if (TheCopy) return false;
139
140 // If the pointer has been offset from the start of the alloca, we can't
141 // safely handle this.
142 if (IsOffset) return false;
143
144 // If the memintrinsic isn't using the alloca as the dest, reject it.
145 if (U.getOperandNo() != 0) return false;
146
147 // If the source of the memcpy/move is not a constant global, reject it.
148 if (!pointsToConstantGlobal(MI->getSource()))
149 return false;
150
151 // Otherwise, the transform is safe. Remember the copy instruction.
152 TheCopy = MI;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000153 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000154 }
155 return true;
156}
157
158/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
159/// modified by a copy from a constant global. If we can prove this, we can
160/// replace any uses of the alloca with uses of the global directly.
161static MemTransferInst *
162isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
163 SmallVectorImpl<Instruction *> &ToDelete) {
Craig Topperf40110f2014-04-25 05:29:35 +0000164 MemTransferInst *TheCopy = nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000165 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
166 return TheCopy;
Craig Topperf40110f2014-04-25 05:29:35 +0000167 return nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000168}
169
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000170static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
Duncan P. N. Exon Smith720762e2015-03-13 19:30:44 +0000171 // Check for array size of 1 (scalar allocation).
Duncan P. N. Exon Smithbe95b4a2015-03-13 19:42:09 +0000172 if (!AI.isArrayAllocation()) {
173 // i32 1 is the canonical array size for scalar allocations.
174 if (AI.getArraySize()->getType()->isIntegerTy(32))
175 return nullptr;
176
177 // Canonicalize it.
178 Value *V = IC.Builder->getInt32(1);
179 AI.setOperand(0, V);
180 return &AI;
181 }
Duncan P. N. Exon Smith720762e2015-03-13 19:30:44 +0000182
Chris Lattnera65e2f72010-01-05 05:57:49 +0000183 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000184 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
185 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
186 AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName());
187 New->setAlignment(AI.getAlignment());
Chris Lattnera65e2f72010-01-05 05:57:49 +0000188
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000189 // Scan to the end of the allocation instructions, to skip over a block of
190 // allocas if possible...also skip interleaved debug info
191 //
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +0000192 BasicBlock::iterator It(New);
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000193 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
194 ++It;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000195
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000196 // Now that I is pointing to the first non-allocation-inst in the block,
197 // insert our getelementptr instruction...
198 //
199 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
200 Value *NullIdx = Constant::getNullValue(IdxTy);
201 Value *Idx[2] = {NullIdx, NullIdx};
202 Instruction *GEP =
Matt Arsenault640ff9d2013-08-14 00:24:05 +0000203 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000204 IC.InsertNewInstBefore(GEP, *It);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000205
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000206 // Now make everything use the getelementptr instead of the original
207 // allocation.
Sanjay Patel4b198802016-02-01 22:23:39 +0000208 return IC.replaceInstUsesWith(AI, GEP);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000209 }
210
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000211 if (isa<UndefValue>(AI.getArraySize()))
Sanjay Patel4b198802016-02-01 22:23:39 +0000212 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000213
Duncan P. N. Exon Smith07ff9b02015-03-13 19:34:55 +0000214 // Ensure that the alloca array size argument has type intptr_t, so that
215 // any casting is exposed early.
216 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
217 if (AI.getArraySize()->getType() != IntPtrTy) {
218 Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false);
219 AI.setOperand(0, V);
220 return &AI;
221 }
222
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000223 return nullptr;
224}
225
226Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
227 if (auto *I = simplifyAllocaArraySize(*this, AI))
228 return I;
229
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000230 if (AI.getAllocatedType()->isSized()) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000231 // If the alignment is 0 (unspecified), assign it the preferred alignment.
232 if (AI.getAlignment() == 0)
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000233 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000234
235 // Move all alloca's of zero byte objects to the entry block and merge them
236 // together. Note that we only do this for alloca's, because malloc should
237 // allocate and return a unique pointer, even for a zero byte allocation.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000238 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000239 // For a zero sized alloca there is no point in doing an array allocation.
240 // This is helpful if the array size is a complicated expression not used
241 // elsewhere.
242 if (AI.isArrayAllocation()) {
243 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
244 return &AI;
245 }
246
247 // Get the first instruction in the entry block.
248 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
249 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
250 if (FirstInst != &AI) {
251 // If the entry block doesn't start with a zero-size alloca then move
252 // this one to the start of the entry block. There is no problem with
253 // dominance as the array size was forced to a constant earlier already.
254 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
255 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000256 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000257 AI.moveBefore(FirstInst);
258 return &AI;
259 }
260
Richard Osborneb68053e2012-09-18 09:31:44 +0000261 // If the alignment of the entry block alloca is 0 (unspecified),
262 // assign it the preferred alignment.
263 if (EntryAI->getAlignment() == 0)
264 EntryAI->setAlignment(
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000265 DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000266 // Replace this zero-sized alloca with the one at the start of the entry
267 // block after ensuring that the address will be aligned enough for both
268 // types.
Richard Osborneb68053e2012-09-18 09:31:44 +0000269 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
270 AI.getAlignment());
Duncan Sands8bc764a2012-06-26 13:39:21 +0000271 EntryAI->setAlignment(MaxAlign);
272 if (AI.getType() != EntryAI->getType())
273 return new BitCastInst(EntryAI, AI.getType());
Sanjay Patel4b198802016-02-01 22:23:39 +0000274 return replaceInstUsesWith(AI, EntryAI);
Duncan Sands8bc764a2012-06-26 13:39:21 +0000275 }
276 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000277 }
278
Eli Friedmanb14873c2012-11-26 23:04:53 +0000279 if (AI.getAlignment()) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000280 // Check to see if this allocation is only modified by a memcpy/memmove from
281 // a constant global whose alignment is equal to or exceeds that of the
282 // allocation. If this is the case, we can change all users to use
283 // the constant global instead. This is commonly produced by the CFE by
284 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
285 // is only subsequently read.
286 SmallVector<Instruction *, 4> ToDelete;
287 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
Chandler Carruth66b31302015-01-04 12:03:27 +0000288 unsigned SourceAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +0000289 Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
Eli Friedmanb14873c2012-11-26 23:04:53 +0000290 if (AI.getAlignment() <= SourceAlign) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000291 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
292 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
293 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
Sanjay Patel4b198802016-02-01 22:23:39 +0000294 eraseInstFromFunction(*ToDelete[i]);
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000295 Constant *TheSrc = cast<Constant>(Copy->getSource());
Matt Arsenaultbbf18c62013-12-07 02:58:45 +0000296 Constant *Cast
297 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
Sanjay Patel4b198802016-02-01 22:23:39 +0000298 Instruction *NewI = replaceInstUsesWith(AI, Cast);
299 eraseInstFromFunction(*Copy);
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000300 ++NumGlobalCopies;
301 return NewI;
302 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000303 }
304 }
305
Nuno Lopes95cc4f32012-07-09 18:38:20 +0000306 // At last, use the generic allocation site handler to aggressively remove
307 // unused allocas.
308 return visitAllocSite(AI);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000309}
310
Philip Reames89e92d22016-12-01 20:17:06 +0000311// Are we allowed to form a atomic load or store of this type?
312static bool isSupportedAtomicType(Type *Ty) {
313 return Ty->isIntegerTy() || Ty->isPointerTy() || Ty->isFloatingPointTy();
314}
315
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000316/// \brief Helper to combine a load to a new type.
317///
318/// This just does the work of combining a load to a new type. It handles
319/// metadata, etc., and returns the new instruction. The \c NewTy should be the
320/// loaded *value* type. This will convert it to a pointer, cast the operand to
321/// that pointer type, load it, etc.
322///
323/// Note that this will create all of the instructions with whatever insert
324/// point the \c InstCombiner currently is using.
Mehdi Amini2668a482015-05-07 05:52:40 +0000325static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
326 const Twine &Suffix = "") {
Philip Reames89e92d22016-12-01 20:17:06 +0000327 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
328 "can't fold an atomic load to requested type");
329
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000330 Value *Ptr = LI.getPointerOperand();
331 unsigned AS = LI.getPointerAddressSpace();
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000332 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000333 LI.getAllMetadata(MD);
334
335 LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
336 IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
Philip Reames6f4d0082016-05-06 22:17:01 +0000337 LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
338 NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
Charles Davis33d1dc02015-02-25 05:10:25 +0000339 MDBuilder MDB(NewLoad->getContext());
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000340 for (const auto &MDPair : MD) {
341 unsigned ID = MDPair.first;
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000342 MDNode *N = MDPair.second;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000343 // Note, essentially every kind of metadata should be preserved here! This
344 // routine is supposed to clone a load instruction changing *only its type*.
345 // The only metadata it makes sense to drop is metadata which is invalidated
346 // when the pointer type changes. This should essentially never be the case
347 // in LLVM, but we explicitly switch over only known metadata to be
348 // conservatively correct. If you are adding metadata to LLVM which pertains
349 // to loads, you almost certainly want to add it here.
350 switch (ID) {
351 case LLVMContext::MD_dbg:
352 case LLVMContext::MD_tbaa:
353 case LLVMContext::MD_prof:
354 case LLVMContext::MD_fpmath:
355 case LLVMContext::MD_tbaa_struct:
356 case LLVMContext::MD_invariant_load:
357 case LLVMContext::MD_alias_scope:
358 case LLVMContext::MD_noalias:
Philip Reames5a3f5f72014-10-21 00:13:20 +0000359 case LLVMContext::MD_nontemporal:
360 case LLVMContext::MD_mem_parallel_loop_access:
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000361 // All of these directly apply.
362 NewLoad->setMetadata(ID, N);
363 break;
364
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000365 case LLVMContext::MD_nonnull:
Charles Davis33d1dc02015-02-25 05:10:25 +0000366 // This only directly applies if the new type is also a pointer.
367 if (NewTy->isPointerTy()) {
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000368 NewLoad->setMetadata(ID, N);
Charles Davis33d1dc02015-02-25 05:10:25 +0000369 break;
370 }
371 // If it's integral now, translate it to !range metadata.
372 if (NewTy->isIntegerTy()) {
373 auto *ITy = cast<IntegerType>(NewTy);
374 auto *NullInt = ConstantExpr::getPtrToInt(
375 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
376 auto *NonNullInt =
377 ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
378 NewLoad->setMetadata(LLVMContext::MD_range,
379 MDB.createRange(NonNullInt, NullInt));
380 }
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000381 break;
Artur Pilipenko5c5011d2015-11-02 17:53:51 +0000382 case LLVMContext::MD_align:
383 case LLVMContext::MD_dereferenceable:
384 case LLVMContext::MD_dereferenceable_or_null:
385 // These only directly apply if the new type is also a pointer.
386 if (NewTy->isPointerTy())
387 NewLoad->setMetadata(ID, N);
388 break;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000389 case LLVMContext::MD_range:
390 // FIXME: It would be nice to propagate this in some way, but the type
David Majnemer80dca0c2016-10-11 01:00:45 +0000391 // conversions make it hard.
392
393 // If it's a pointer now and the range does not contain 0, make it !nonnull.
394 if (NewTy->isPointerTy()) {
395 unsigned BitWidth = IC.getDataLayout().getTypeSizeInBits(NewTy);
396 if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
397 MDNode *NN = MDNode::get(LI.getContext(), None);
398 NewLoad->setMetadata(LLVMContext::MD_nonnull, NN);
399 }
400 }
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000401 break;
402 }
403 }
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000404 return NewLoad;
405}
406
Chandler Carruthfa11d832015-01-22 03:34:54 +0000407/// \brief Combine a store to a new type.
408///
409/// Returns the newly created store instruction.
410static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
Philip Reames89e92d22016-12-01 20:17:06 +0000411 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
412 "can't fold an atomic store of requested type");
413
Chandler Carruthfa11d832015-01-22 03:34:54 +0000414 Value *Ptr = SI.getPointerOperand();
415 unsigned AS = SI.getPointerAddressSpace();
416 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
417 SI.getAllMetadata(MD);
418
419 StoreInst *NewStore = IC.Builder->CreateAlignedStore(
420 V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
Philip Reames6f4d0082016-05-06 22:17:01 +0000421 SI.getAlignment(), SI.isVolatile());
422 NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope());
Chandler Carruthfa11d832015-01-22 03:34:54 +0000423 for (const auto &MDPair : MD) {
424 unsigned ID = MDPair.first;
425 MDNode *N = MDPair.second;
426 // Note, essentially every kind of metadata should be preserved here! This
427 // routine is supposed to clone a store instruction changing *only its
428 // type*. The only metadata it makes sense to drop is metadata which is
429 // invalidated when the pointer type changes. This should essentially
430 // never be the case in LLVM, but we explicitly switch over only known
431 // metadata to be conservatively correct. If you are adding metadata to
432 // LLVM which pertains to stores, you almost certainly want to add it
433 // here.
434 switch (ID) {
435 case LLVMContext::MD_dbg:
436 case LLVMContext::MD_tbaa:
437 case LLVMContext::MD_prof:
438 case LLVMContext::MD_fpmath:
439 case LLVMContext::MD_tbaa_struct:
440 case LLVMContext::MD_alias_scope:
441 case LLVMContext::MD_noalias:
442 case LLVMContext::MD_nontemporal:
443 case LLVMContext::MD_mem_parallel_loop_access:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000444 // All of these directly apply.
445 NewStore->setMetadata(ID, N);
446 break;
447
448 case LLVMContext::MD_invariant_load:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000449 case LLVMContext::MD_nonnull:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000450 case LLVMContext::MD_range:
Artur Pilipenko5c5011d2015-11-02 17:53:51 +0000451 case LLVMContext::MD_align:
452 case LLVMContext::MD_dereferenceable:
453 case LLVMContext::MD_dereferenceable_or_null:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000454 // These don't apply for stores.
Chandler Carruthfa11d832015-01-22 03:34:54 +0000455 break;
456 }
457 }
458
459 return NewStore;
460}
461
JF Bastien3e2e69f2016-04-21 19:41:48 +0000462/// \brief Combine loads to match the type of their uses' value after looking
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000463/// through intervening bitcasts.
464///
465/// The core idea here is that if the result of a load is used in an operation,
466/// we should load the type most conducive to that operation. For example, when
467/// loading an integer and converting that immediately to a pointer, we should
468/// instead directly load a pointer.
469///
470/// However, this routine must never change the width of a load or the number of
471/// loads as that would introduce a semantic change. This combine is expected to
472/// be a semantic no-op which just allows loads to more closely model the types
473/// of their consuming operations.
474///
475/// Currently, we also refuse to change the precise type used for an atomic load
476/// or a volatile load. This is debatable, and might be reasonable to change
477/// later. However, it is risky in case some backend or other part of LLVM is
478/// relying on the exact type loaded to select appropriate atomic operations.
479static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
Philip Reames6f4d0082016-05-06 22:17:01 +0000480 // FIXME: We could probably with some care handle both volatile and ordered
481 // atomic loads here but it isn't clear that this is important.
482 if (!LI.isUnordered())
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000483 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000484
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000485 if (LI.use_empty())
486 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000487
Arnold Schwaighofer5d335552016-09-10 18:14:57 +0000488 // swifterror values can't be bitcasted.
489 if (LI.getPointerOperand()->isSwiftError())
490 return nullptr;
491
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000492 Type *Ty = LI.getType();
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000493 const DataLayout &DL = IC.getDataLayout();
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000494
495 // Try to canonicalize loads which are only ever stored to operate over
496 // integers instead of any other type. We only do this when the loaded type
497 // is sized and has a size exactly the same as its store size and the store
498 // size is a legal integer type.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000499 if (!Ty->isIntegerTy() && Ty->isSized() &&
500 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
Sanjoy Dasba04d3a2016-08-06 02:58:48 +0000501 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) &&
502 !DL.isNonIntegralPointerType(Ty)) {
David Majnemer0a16c222016-08-11 21:15:00 +0000503 if (all_of(LI.users(), [&LI](User *U) {
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000504 auto *SI = dyn_cast<StoreInst>(U);
Arnold Schwaighoferc3685632017-01-31 17:53:49 +0000505 return SI && SI->getPointerOperand() != &LI &&
506 !SI->getPointerOperand()->isSwiftError();
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000507 })) {
508 LoadInst *NewLoad = combineLoadToNewType(
509 IC, LI,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000510 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000511 // Replace all the stores with stores of the newly loaded value.
512 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
513 auto *SI = cast<StoreInst>(*UI++);
514 IC.Builder->SetInsertPoint(SI);
515 combineStoreToNewValue(IC, *SI, NewLoad);
Sanjay Patel4b198802016-02-01 22:23:39 +0000516 IC.eraseInstFromFunction(*SI);
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000517 }
518 assert(LI.use_empty() && "Failed to remove all users of the load!");
519 // Return the old load so the combiner can delete it safely.
520 return &LI;
521 }
522 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000523
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000524 // Fold away bit casts of the loaded value by loading the desired type.
Quentin Colombet490cfbe2016-02-11 22:30:41 +0000525 // We can do this for BitCastInsts as well as casts from and to pointer types,
526 // as long as those are noops (i.e., the source or dest type have the same
527 // bitwidth as the target's pointers).
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000528 if (LI.hasOneUse())
Philip Reames89e92d22016-12-01 20:17:06 +0000529 if (auto* CI = dyn_cast<CastInst>(LI.user_back()))
530 if (CI->isNoopCast(DL))
531 if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) {
532 LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
533 CI->replaceAllUsesWith(NewLoad);
534 IC.eraseInstFromFunction(*CI);
535 return &LI;
536 }
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000537
Chandler Carrutha7f247e2014-12-09 19:21:16 +0000538 // FIXME: We should also canonicalize loads of vectors when their elements are
539 // cast to other types.
Craig Topperf40110f2014-04-25 05:29:35 +0000540 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000541}
542
Mehdi Amini2668a482015-05-07 05:52:40 +0000543static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
544 // FIXME: We could probably with some care handle both volatile and atomic
545 // stores here but it isn't clear that this is important.
546 if (!LI.isSimple())
547 return nullptr;
548
549 Type *T = LI.getType();
550 if (!T->isAggregateType())
551 return nullptr;
552
Benjamin Kramerc1263532016-03-11 10:20:56 +0000553 StringRef Name = LI.getName();
Bruce Mitchenere9ffb452015-09-12 01:17:08 +0000554 assert(LI.getAlignment() && "Alignment must be set at this point");
Mehdi Amini2668a482015-05-07 05:52:40 +0000555
556 if (auto *ST = dyn_cast<StructType>(T)) {
557 // If the struct only have one element, we unpack.
Amaury Sechet61a7d622016-02-17 19:21:28 +0000558 auto NumElements = ST->getNumElements();
559 if (NumElements == 1) {
Mehdi Amini2668a482015-05-07 05:52:40 +0000560 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
561 ".unpack");
Sanjay Patel4b198802016-02-01 22:23:39 +0000562 return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
Amaury Sechet61a7d622016-02-17 19:21:28 +0000563 UndefValue::get(T), NewLoad, 0, Name));
Mehdi Amini2668a482015-05-07 05:52:40 +0000564 }
Mehdi Amini1c131b32015-12-15 01:44:07 +0000565
566 // We don't want to break loads with padding here as we'd loose
567 // the knowledge that padding exists for the rest of the pipeline.
568 const DataLayout &DL = IC.getDataLayout();
569 auto *SL = DL.getStructLayout(ST);
570 if (SL->hasPadding())
571 return nullptr;
572
Amaury Sechet61a7d622016-02-17 19:21:28 +0000573 auto Align = LI.getAlignment();
574 if (!Align)
575 Align = DL.getABITypeAlignment(ST);
576
Mehdi Amini1c131b32015-12-15 01:44:07 +0000577 auto *Addr = LI.getPointerOperand();
Amaury Sechet61a7d622016-02-17 19:21:28 +0000578 auto *IdxType = Type::getInt32Ty(T->getContext());
Mehdi Amini1c131b32015-12-15 01:44:07 +0000579 auto *Zero = ConstantInt::get(IdxType, 0);
Amaury Sechet61a7d622016-02-17 19:21:28 +0000580
581 Value *V = UndefValue::get(T);
582 for (unsigned i = 0; i < NumElements; i++) {
Mehdi Amini1c131b32015-12-15 01:44:07 +0000583 Value *Indices[2] = {
584 Zero,
585 ConstantInt::get(IdxType, i),
586 };
Amaury Sechetda71cb72016-02-17 21:21:29 +0000587 auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
Benjamin Kramerc1263532016-03-11 10:20:56 +0000588 Name + ".elt");
Amaury Sechet61a7d622016-02-17 19:21:28 +0000589 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
Benjamin Kramerc1263532016-03-11 10:20:56 +0000590 auto *L = IC.Builder->CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
Mehdi Amini1c131b32015-12-15 01:44:07 +0000591 V = IC.Builder->CreateInsertValue(V, L, i);
592 }
593
594 V->setName(Name);
Sanjay Patel4b198802016-02-01 22:23:39 +0000595 return IC.replaceInstUsesWith(LI, V);
Mehdi Amini2668a482015-05-07 05:52:40 +0000596 }
597
David Majnemer58fb0382015-05-11 05:04:22 +0000598 if (auto *AT = dyn_cast<ArrayType>(T)) {
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000599 auto *ET = AT->getElementType();
600 auto NumElements = AT->getNumElements();
601 if (NumElements == 1) {
602 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack");
Sanjay Patel4b198802016-02-01 22:23:39 +0000603 return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000604 UndefValue::get(T), NewLoad, 0, Name));
David Majnemer58fb0382015-05-11 05:04:22 +0000605 }
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000606
Davide Italianoda114122016-10-07 20:57:42 +0000607 // Bail out if the array is too large. Ideally we would like to optimize
608 // arrays of arbitrary size but this has a terrible impact on compile time.
609 // The threshold here is chosen arbitrarily, maybe needs a little bit of
610 // tuning.
611 if (NumElements > 1024)
612 return nullptr;
613
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000614 const DataLayout &DL = IC.getDataLayout();
615 auto EltSize = DL.getTypeAllocSize(ET);
616 auto Align = LI.getAlignment();
617 if (!Align)
618 Align = DL.getABITypeAlignment(T);
619
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000620 auto *Addr = LI.getPointerOperand();
621 auto *IdxType = Type::getInt64Ty(T->getContext());
622 auto *Zero = ConstantInt::get(IdxType, 0);
623
624 Value *V = UndefValue::get(T);
625 uint64_t Offset = 0;
626 for (uint64_t i = 0; i < NumElements; i++) {
627 Value *Indices[2] = {
628 Zero,
629 ConstantInt::get(IdxType, i),
630 };
631 auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
Benjamin Kramerc1263532016-03-11 10:20:56 +0000632 Name + ".elt");
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000633 auto *L = IC.Builder->CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
Benjamin Kramerc1263532016-03-11 10:20:56 +0000634 Name + ".unpack");
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000635 V = IC.Builder->CreateInsertValue(V, L, i);
636 Offset += EltSize;
637 }
638
639 V->setName(Name);
640 return IC.replaceInstUsesWith(LI, V);
David Majnemer58fb0382015-05-11 05:04:22 +0000641 }
642
Mehdi Amini2668a482015-05-07 05:52:40 +0000643 return nullptr;
644}
645
Hal Finkel847e05f2015-02-20 03:05:53 +0000646// If we can determine that all possible objects pointed to by the provided
647// pointer value are, not only dereferenceable, but also definitively less than
648// or equal to the provided maximum size, then return true. Otherwise, return
649// false (constant global values and allocas fall into this category).
650//
651// FIXME: This should probably live in ValueTracking (or similar).
652static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000653 const DataLayout &DL) {
Hal Finkel847e05f2015-02-20 03:05:53 +0000654 SmallPtrSet<Value *, 4> Visited;
655 SmallVector<Value *, 4> Worklist(1, V);
656
657 do {
658 Value *P = Worklist.pop_back_val();
659 P = P->stripPointerCasts();
660
661 if (!Visited.insert(P).second)
662 continue;
663
664 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
665 Worklist.push_back(SI->getTrueValue());
666 Worklist.push_back(SI->getFalseValue());
667 continue;
668 }
669
670 if (PHINode *PN = dyn_cast<PHINode>(P)) {
Pete Cooper833f34d2015-05-12 20:05:31 +0000671 for (Value *IncValue : PN->incoming_values())
672 Worklist.push_back(IncValue);
Hal Finkel847e05f2015-02-20 03:05:53 +0000673 continue;
674 }
675
676 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
Sanjoy Das99042472016-04-17 04:30:43 +0000677 if (GA->isInterposable())
Hal Finkel847e05f2015-02-20 03:05:53 +0000678 return false;
679 Worklist.push_back(GA->getAliasee());
680 continue;
681 }
682
683 // If we know how big this object is, and it is less than MaxSize, continue
684 // searching. Otherwise, return false.
685 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
686 if (!AI->getAllocatedType()->isSized())
687 return false;
688
689 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
690 if (!CS)
691 return false;
692
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000693 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
Hal Finkel847e05f2015-02-20 03:05:53 +0000694 // Make sure that, even if the multiplication below would wrap as an
695 // uint64_t, we still do the right thing.
696 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
697 return false;
698 continue;
699 }
700
701 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
702 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
703 return false;
704
Manuel Jacob5f6eaac2016-01-16 20:30:46 +0000705 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
Hal Finkel847e05f2015-02-20 03:05:53 +0000706 if (InitSize > MaxSize)
707 return false;
708 continue;
709 }
710
711 return false;
712 } while (!Worklist.empty());
713
714 return true;
715}
716
717// If we're indexing into an object of a known size, and the outer index is
718// not a constant, but having any value but zero would lead to undefined
719// behavior, replace it with zero.
720//
721// For example, if we have:
722// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
723// ...
724// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
725// ... = load i32* %arrayidx, align 4
726// Then we know that we can replace %x in the GEP with i64 0.
727//
728// FIXME: We could fold any GEP index to zero that would cause UB if it were
729// not zero. Currently, we only handle the first such index. Also, we could
730// also search through non-zero constant indices if we kept track of the
731// offsets those indices implied.
732static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
733 Instruction *MemI, unsigned &Idx) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000734 if (GEPI->getNumOperands() < 2)
Hal Finkel847e05f2015-02-20 03:05:53 +0000735 return false;
736
737 // Find the first non-zero index of a GEP. If all indices are zero, return
738 // one past the last index.
739 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
740 unsigned I = 1;
741 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
742 Value *V = GEPI->getOperand(I);
743 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
744 if (CI->isZero())
745 continue;
746
747 break;
748 }
749
750 return I;
751 };
752
753 // Skip through initial 'zero' indices, and find the corresponding pointer
754 // type. See if the next index is not a constant.
755 Idx = FirstNZIdx(GEPI);
756 if (Idx == GEPI->getNumOperands())
757 return false;
758 if (isa<Constant>(GEPI->getOperand(Idx)))
759 return false;
760
761 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
Eduard Burtescu19eb0312016-01-19 17:28:00 +0000762 Type *AllocTy =
763 GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
Hal Finkel847e05f2015-02-20 03:05:53 +0000764 if (!AllocTy || !AllocTy->isSized())
765 return false;
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000766 const DataLayout &DL = IC.getDataLayout();
767 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
Hal Finkel847e05f2015-02-20 03:05:53 +0000768
769 // If there are more indices after the one we might replace with a zero, make
770 // sure they're all non-negative. If any of them are negative, the overall
771 // address being computed might be before the base address determined by the
772 // first non-zero index.
773 auto IsAllNonNegative = [&]() {
774 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
775 bool KnownNonNegative, KnownNegative;
776 IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative,
777 KnownNegative, 0, MemI);
778 if (KnownNonNegative)
779 continue;
780 return false;
781 }
782
783 return true;
784 };
785
786 // FIXME: If the GEP is not inbounds, and there are extra indices after the
787 // one we'll replace, those could cause the address computation to wrap
788 // (rendering the IsAllNonNegative() check below insufficient). We can do
Bruce Mitchenere9ffb452015-09-12 01:17:08 +0000789 // better, ignoring zero indices (and other indices we can prove small
Hal Finkel847e05f2015-02-20 03:05:53 +0000790 // enough not to wrap).
791 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
792 return false;
793
794 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
795 // also known to be dereferenceable.
796 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
797 IsAllNonNegative();
798}
799
800// If we're indexing into an object with a variable index for the memory
801// access, but the object has only one element, we can assume that the index
802// will always be zero. If we replace the GEP, return it.
803template <typename T>
804static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
805 T &MemI) {
806 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
807 unsigned Idx;
808 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
809 Instruction *NewGEPI = GEPI->clone();
810 NewGEPI->setOperand(Idx,
811 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
812 NewGEPI->insertBefore(GEPI);
813 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
814 return NewGEPI;
815 }
816 }
817
818 return nullptr;
819}
820
Chris Lattnera65e2f72010-01-05 05:57:49 +0000821Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
822 Value *Op = LI.getOperand(0);
823
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000824 // Try to canonicalize the loaded type.
825 if (Instruction *Res = combineLoadToOperationType(*this, LI))
826 return Res;
827
Chris Lattnera65e2f72010-01-05 05:57:49 +0000828 // Attempt to improve the alignment.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000829 unsigned KnownAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +0000830 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT);
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000831 unsigned LoadAlign = LI.getAlignment();
832 unsigned EffectiveLoadAlign =
833 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
Dan Gohman36196602010-08-03 18:20:32 +0000834
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000835 if (KnownAlign > EffectiveLoadAlign)
836 LI.setAlignment(KnownAlign);
837 else if (LoadAlign == 0)
838 LI.setAlignment(EffectiveLoadAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000839
Hal Finkel847e05f2015-02-20 03:05:53 +0000840 // Replace GEP indices if possible.
841 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
842 Worklist.Add(NewGEPI);
843 return &LI;
844 }
845
Mehdi Amini2668a482015-05-07 05:52:40 +0000846 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
847 return Res;
848
Chris Lattnera65e2f72010-01-05 05:57:49 +0000849 // Do really simple store-to-load forwarding and load CSE, to catch cases
Duncan Sands75b5d272011-02-15 09:23:02 +0000850 // where there are several consecutive memory accesses to the same location,
Chris Lattnera65e2f72010-01-05 05:57:49 +0000851 // separated by a few arithmetic operations.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +0000852 BasicBlock::iterator BBI(LI);
Eli Friedmanbd254a62016-06-16 02:33:42 +0000853 bool IsLoadCSE = false;
Sanjay Patelb38ad88e2017-01-02 23:25:28 +0000854 if (Value *AvailableVal = FindAvailableLoadedValue(
855 &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) {
856 if (IsLoadCSE)
857 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI);
Bjorn Steinbrinka91fd092015-07-10 06:55:44 +0000858
Sanjay Patel4b198802016-02-01 22:23:39 +0000859 return replaceInstUsesWith(
Chandler Carruth1a3c2c42014-11-25 08:20:27 +0000860 LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
861 LI.getName() + ".cast"));
Bjorn Steinbrinka91fd092015-07-10 06:55:44 +0000862 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000863
Philip Reames3ac07182016-04-21 17:45:05 +0000864 // None of the following transforms are legal for volatile/ordered atomic
865 // loads. Most of them do apply for unordered atomics.
866 if (!LI.isUnordered()) return nullptr;
Philip Reamesac550902016-04-21 17:03:33 +0000867
Chris Lattnera65e2f72010-01-05 05:57:49 +0000868 // load(gep null, ...) -> unreachable
869 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
870 const Value *GEPI0 = GEPI->getOperand(0);
871 // TODO: Consider a target hook for valid address spaces for this xform.
872 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
873 // Insert a new store to null instruction before the load to indicate
874 // that this code is not reachable. We do this instead of inserting
875 // an unreachable instruction directly because we cannot modify the
876 // CFG.
877 new StoreInst(UndefValue::get(LI.getType()),
878 Constant::getNullValue(Op->getType()), &LI);
Sanjay Patel4b198802016-02-01 22:23:39 +0000879 return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
Chris Lattnera65e2f72010-01-05 05:57:49 +0000880 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000881 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000882
883 // load null/undef -> unreachable
884 // TODO: Consider a target hook for valid address spaces for this xform.
885 if (isa<UndefValue>(Op) ||
886 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
887 // Insert a new store to null instruction before the load to indicate that
888 // this code is not reachable. We do this instead of inserting an
889 // unreachable instruction directly because we cannot modify the CFG.
890 new StoreInst(UndefValue::get(LI.getType()),
891 Constant::getNullValue(Op->getType()), &LI);
Sanjay Patel4b198802016-02-01 22:23:39 +0000892 return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
Chris Lattnera65e2f72010-01-05 05:57:49 +0000893 }
894
Chris Lattnera65e2f72010-01-05 05:57:49 +0000895 if (Op->hasOneUse()) {
896 // Change select and PHI nodes to select values instead of addresses: this
897 // helps alias analysis out a lot, allows many others simplifications, and
898 // exposes redundancy in the code.
899 //
900 // Note that we cannot do the transformation unless we know that the
901 // introduced loads cannot trap! Something like this is valid as long as
902 // the condition is always false: load (select bool %C, int* null, int* %G),
903 // but it would not be valid if we transformed it to load from null
904 // unconditionally.
905 //
906 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
907 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
Bob Wilson56600a12010-01-30 04:42:39 +0000908 unsigned Align = LI.getAlignment();
Artur Pilipenko9bb6bea2016-04-27 11:00:48 +0000909 if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
910 isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
Bob Wilson4b71b6c2010-01-30 00:41:10 +0000911 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
Bob Wilson56600a12010-01-30 04:42:39 +0000912 SI->getOperand(1)->getName()+".val");
Bob Wilson4b71b6c2010-01-30 00:41:10 +0000913 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
Bob Wilson56600a12010-01-30 04:42:39 +0000914 SI->getOperand(2)->getName()+".val");
Philip Reamesa98c7ea2016-04-21 17:59:40 +0000915 assert(LI.isUnordered() && "implied by above");
Bob Wilson56600a12010-01-30 04:42:39 +0000916 V1->setAlignment(Align);
Philip Reamesa98c7ea2016-04-21 17:59:40 +0000917 V1->setAtomic(LI.getOrdering(), LI.getSynchScope());
Bob Wilson56600a12010-01-30 04:42:39 +0000918 V2->setAlignment(Align);
Philip Reamesa98c7ea2016-04-21 17:59:40 +0000919 V2->setAtomic(LI.getOrdering(), LI.getSynchScope());
Chris Lattnera65e2f72010-01-05 05:57:49 +0000920 return SelectInst::Create(SI->getCondition(), V1, V2);
921 }
922
923 // load (select (cond, null, P)) -> load P
Larisse Voufo532bf712015-09-18 19:14:35 +0000924 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
Philip Reames5ad26c32014-12-29 22:46:21 +0000925 LI.getPointerAddressSpace() == 0) {
926 LI.setOperand(0, SI->getOperand(2));
927 return &LI;
928 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000929
930 // load (select (cond, P, null)) -> load P
Philip Reames5ad26c32014-12-29 22:46:21 +0000931 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
932 LI.getPointerAddressSpace() == 0) {
933 LI.setOperand(0, SI->getOperand(1));
934 return &LI;
935 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000936 }
937 }
Craig Topperf40110f2014-04-25 05:29:35 +0000938 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000939}
940
Arch D. Robisonbe0490a2016-04-25 22:22:39 +0000941/// \brief Look for extractelement/insertvalue sequence that acts like a bitcast.
942///
943/// \returns underlying value that was "cast", or nullptr otherwise.
944///
945/// For example, if we have:
946///
947/// %E0 = extractelement <2 x double> %U, i32 0
948/// %V0 = insertvalue [2 x double] undef, double %E0, 0
949/// %E1 = extractelement <2 x double> %U, i32 1
950/// %V1 = insertvalue [2 x double] %V0, double %E1, 1
951///
952/// and the layout of a <2 x double> is isomorphic to a [2 x double],
953/// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
954/// Note that %U may contain non-undef values where %V1 has undef.
955static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
956 Value *U = nullptr;
957 while (auto *IV = dyn_cast<InsertValueInst>(V)) {
958 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
959 if (!E)
960 return nullptr;
961 auto *W = E->getVectorOperand();
962 if (!U)
963 U = W;
964 else if (U != W)
965 return nullptr;
966 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
967 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
968 return nullptr;
969 V = IV->getAggregateOperand();
970 }
971 if (!isa<UndefValue>(V) ||!U)
972 return nullptr;
973
974 auto *UT = cast<VectorType>(U->getType());
975 auto *VT = V->getType();
976 // Check that types UT and VT are bitwise isomorphic.
977 const auto &DL = IC.getDataLayout();
978 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
979 return nullptr;
980 }
981 if (auto *AT = dyn_cast<ArrayType>(VT)) {
982 if (AT->getNumElements() != UT->getNumElements())
983 return nullptr;
984 } else {
985 auto *ST = cast<StructType>(VT);
986 if (ST->getNumElements() != UT->getNumElements())
987 return nullptr;
988 for (const auto *EltT : ST->elements()) {
989 if (EltT != UT->getElementType())
990 return nullptr;
991 }
992 }
993 return U;
994}
995
Chandler Carruth816d26f2014-11-25 10:09:51 +0000996/// \brief Combine stores to match the type of value being stored.
997///
998/// The core idea here is that the memory does not have any intrinsic type and
999/// where we can we should match the type of a store to the type of value being
1000/// stored.
1001///
1002/// However, this routine must never change the width of a store or the number of
1003/// stores as that would introduce a semantic change. This combine is expected to
1004/// be a semantic no-op which just allows stores to more closely model the types
1005/// of their incoming values.
1006///
1007/// Currently, we also refuse to change the precise type used for an atomic or
1008/// volatile store. This is debatable, and might be reasonable to change later.
1009/// However, it is risky in case some backend or other part of LLVM is relying
1010/// on the exact type stored to select appropriate atomic operations.
1011///
1012/// \returns true if the store was successfully combined away. This indicates
1013/// the caller must erase the store instruction. We have to let the caller erase
Bruce Mitchenere9ffb452015-09-12 01:17:08 +00001014/// the store instruction as otherwise there is no way to signal whether it was
Chandler Carruth816d26f2014-11-25 10:09:51 +00001015/// combined or not: IC.EraseInstFromFunction returns a null pointer.
1016static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
Philip Reames6f4d0082016-05-06 22:17:01 +00001017 // FIXME: We could probably with some care handle both volatile and ordered
1018 // atomic stores here but it isn't clear that this is important.
1019 if (!SI.isUnordered())
Chandler Carruth816d26f2014-11-25 10:09:51 +00001020 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001021
Arnold Schwaighofer5d335552016-09-10 18:14:57 +00001022 // swifterror values can't be bitcasted.
1023 if (SI.getPointerOperand()->isSwiftError())
1024 return false;
1025
Chandler Carruth816d26f2014-11-25 10:09:51 +00001026 Value *V = SI.getValueOperand();
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001027
Chandler Carruth816d26f2014-11-25 10:09:51 +00001028 // Fold away bit casts of the stored value by storing the original type.
1029 if (auto *BC = dyn_cast<BitCastInst>(V)) {
Chandler Carrutha7f247e2014-12-09 19:21:16 +00001030 V = BC->getOperand(0);
Philip Reames89e92d22016-12-01 20:17:06 +00001031 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1032 combineStoreToNewValue(IC, SI, V);
1033 return true;
1034 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001035 }
1036
Philip Reames89e92d22016-12-01 20:17:06 +00001037 if (Value *U = likeBitCastFromVector(IC, V))
1038 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1039 combineStoreToNewValue(IC, SI, U);
1040 return true;
1041 }
Arch D. Robisonbe0490a2016-04-25 22:22:39 +00001042
JF Bastienc22d2992016-04-21 19:53:39 +00001043 // FIXME: We should also canonicalize stores of vectors when their elements
1044 // are cast to other types.
Chandler Carruth816d26f2014-11-25 10:09:51 +00001045 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001046}
1047
Mehdi Aminib344ac92015-03-14 22:19:33 +00001048static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
1049 // FIXME: We could probably with some care handle both volatile and atomic
1050 // stores here but it isn't clear that this is important.
1051 if (!SI.isSimple())
1052 return false;
1053
1054 Value *V = SI.getValueOperand();
1055 Type *T = V->getType();
1056
1057 if (!T->isAggregateType())
1058 return false;
1059
Mehdi Amini2668a482015-05-07 05:52:40 +00001060 if (auto *ST = dyn_cast<StructType>(T)) {
Mehdi Aminib344ac92015-03-14 22:19:33 +00001061 // If the struct only have one element, we unpack.
Mehdi Amini1c131b32015-12-15 01:44:07 +00001062 unsigned Count = ST->getNumElements();
1063 if (Count == 1) {
Mehdi Aminib344ac92015-03-14 22:19:33 +00001064 V = IC.Builder->CreateExtractValue(V, 0);
1065 combineStoreToNewValue(IC, SI, V);
1066 return true;
1067 }
Mehdi Amini1c131b32015-12-15 01:44:07 +00001068
1069 // We don't want to break loads with padding here as we'd loose
1070 // the knowledge that padding exists for the rest of the pipeline.
1071 const DataLayout &DL = IC.getDataLayout();
1072 auto *SL = DL.getStructLayout(ST);
1073 if (SL->hasPadding())
1074 return false;
1075
Amaury Sechet61a7d622016-02-17 19:21:28 +00001076 auto Align = SI.getAlignment();
1077 if (!Align)
1078 Align = DL.getABITypeAlignment(ST);
1079
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +00001080 SmallString<16> EltName = V->getName();
1081 EltName += ".elt";
Mehdi Amini1c131b32015-12-15 01:44:07 +00001082 auto *Addr = SI.getPointerOperand();
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +00001083 SmallString<16> AddrName = Addr->getName();
1084 AddrName += ".repack";
Amaury Sechet61a7d622016-02-17 19:21:28 +00001085
Mehdi Amini1c131b32015-12-15 01:44:07 +00001086 auto *IdxType = Type::getInt32Ty(ST->getContext());
1087 auto *Zero = ConstantInt::get(IdxType, 0);
1088 for (unsigned i = 0; i < Count; i++) {
1089 Value *Indices[2] = {
1090 Zero,
1091 ConstantInt::get(IdxType, i),
1092 };
Amaury Sechetda71cb72016-02-17 21:21:29 +00001093 auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1094 AddrName);
Mehdi Amini1c131b32015-12-15 01:44:07 +00001095 auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
Amaury Sechet61a7d622016-02-17 19:21:28 +00001096 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
1097 IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
Mehdi Amini1c131b32015-12-15 01:44:07 +00001098 }
1099
1100 return true;
Mehdi Aminib344ac92015-03-14 22:19:33 +00001101 }
1102
David Majnemer75364602015-05-11 05:04:27 +00001103 if (auto *AT = dyn_cast<ArrayType>(T)) {
1104 // If the array only have one element, we unpack.
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001105 auto NumElements = AT->getNumElements();
1106 if (NumElements == 1) {
David Majnemer75364602015-05-11 05:04:27 +00001107 V = IC.Builder->CreateExtractValue(V, 0);
1108 combineStoreToNewValue(IC, SI, V);
1109 return true;
1110 }
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001111
Davide Italianof6988d22016-10-07 21:53:09 +00001112 // Bail out if the array is too large. Ideally we would like to optimize
1113 // arrays of arbitrary size but this has a terrible impact on compile time.
1114 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1115 // tuning.
1116 if (NumElements > 1024)
1117 return false;
1118
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001119 const DataLayout &DL = IC.getDataLayout();
1120 auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1121 auto Align = SI.getAlignment();
1122 if (!Align)
1123 Align = DL.getABITypeAlignment(T);
1124
1125 SmallString<16> EltName = V->getName();
1126 EltName += ".elt";
1127 auto *Addr = SI.getPointerOperand();
1128 SmallString<16> AddrName = Addr->getName();
1129 AddrName += ".repack";
1130
1131 auto *IdxType = Type::getInt64Ty(T->getContext());
1132 auto *Zero = ConstantInt::get(IdxType, 0);
1133
1134 uint64_t Offset = 0;
1135 for (uint64_t i = 0; i < NumElements; i++) {
1136 Value *Indices[2] = {
1137 Zero,
1138 ConstantInt::get(IdxType, i),
1139 };
1140 auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1141 AddrName);
1142 auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
1143 auto EltAlign = MinAlign(Align, Offset);
1144 IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
1145 Offset += EltSize;
1146 }
1147
1148 return true;
David Majnemer75364602015-05-11 05:04:27 +00001149 }
1150
Mehdi Aminib344ac92015-03-14 22:19:33 +00001151 return false;
1152}
1153
Chris Lattnera65e2f72010-01-05 05:57:49 +00001154/// equivalentAddressValues - Test if A and B will obviously have the same
1155/// value. This includes recognizing that %t0 and %t1 will have the same
1156/// value in code like this:
1157/// %t0 = getelementptr \@a, 0, 3
1158/// store i32 0, i32* %t0
1159/// %t1 = getelementptr \@a, 0, 3
1160/// %t2 = load i32* %t1
1161///
1162static bool equivalentAddressValues(Value *A, Value *B) {
1163 // Test if the values are trivially equivalent.
1164 if (A == B) return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001165
Chris Lattnera65e2f72010-01-05 05:57:49 +00001166 // Test if the values come form identical arithmetic instructions.
1167 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1168 // its only used to compare two uses within the same basic block, which
1169 // means that they'll always either have the same value or one of them
1170 // will have an undefined value.
1171 if (isa<BinaryOperator>(A) ||
1172 isa<CastInst>(A) ||
1173 isa<PHINode>(A) ||
1174 isa<GetElementPtrInst>(A))
1175 if (Instruction *BI = dyn_cast<Instruction>(B))
1176 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1177 return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001178
Chris Lattnera65e2f72010-01-05 05:57:49 +00001179 // Otherwise they may not be equivalent.
1180 return false;
1181}
1182
Chris Lattnera65e2f72010-01-05 05:57:49 +00001183Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
1184 Value *Val = SI.getOperand(0);
1185 Value *Ptr = SI.getOperand(1);
1186
Chandler Carruth816d26f2014-11-25 10:09:51 +00001187 // Try to canonicalize the stored type.
1188 if (combineStoreToValueType(*this, SI))
Sanjay Patel4b198802016-02-01 22:23:39 +00001189 return eraseInstFromFunction(SI);
Chandler Carruth816d26f2014-11-25 10:09:51 +00001190
Chris Lattnera65e2f72010-01-05 05:57:49 +00001191 // Attempt to improve the alignment.
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001192 unsigned KnownAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001193 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT);
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001194 unsigned StoreAlign = SI.getAlignment();
1195 unsigned EffectiveStoreAlign =
1196 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
Dan Gohman36196602010-08-03 18:20:32 +00001197
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001198 if (KnownAlign > EffectiveStoreAlign)
1199 SI.setAlignment(KnownAlign);
1200 else if (StoreAlign == 0)
1201 SI.setAlignment(EffectiveStoreAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001202
Mehdi Aminib344ac92015-03-14 22:19:33 +00001203 // Try to canonicalize the stored type.
1204 if (unpackStoreToAggregate(*this, SI))
Sanjay Patel4b198802016-02-01 22:23:39 +00001205 return eraseInstFromFunction(SI);
Mehdi Aminib344ac92015-03-14 22:19:33 +00001206
Hal Finkel847e05f2015-02-20 03:05:53 +00001207 // Replace GEP indices if possible.
1208 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1209 Worklist.Add(NewGEPI);
1210 return &SI;
1211 }
1212
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001213 // Don't hack volatile/ordered stores.
1214 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1215 if (!SI.isUnordered()) return nullptr;
Eli Friedman8bc586e2011-08-15 22:09:40 +00001216
1217 // If the RHS is an alloca with a single use, zapify the store, making the
1218 // alloca dead.
1219 if (Ptr->hasOneUse()) {
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001220 if (isa<AllocaInst>(Ptr))
Sanjay Patel4b198802016-02-01 22:23:39 +00001221 return eraseInstFromFunction(SI);
Eli Friedman8bc586e2011-08-15 22:09:40 +00001222 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1223 if (isa<AllocaInst>(GEP->getOperand(0))) {
1224 if (GEP->getOperand(0)->hasOneUse())
Sanjay Patel4b198802016-02-01 22:23:39 +00001225 return eraseInstFromFunction(SI);
Eli Friedman8bc586e2011-08-15 22:09:40 +00001226 }
1227 }
1228 }
1229
Chris Lattnera65e2f72010-01-05 05:57:49 +00001230 // Do really simple DSE, to catch cases where there are several consecutive
1231 // stores to the same location, separated by a few arithmetic operations. This
1232 // situation often occurs with bitfield accesses.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001233 BasicBlock::iterator BBI(SI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001234 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1235 --ScanInsts) {
1236 --BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001237 // Don't count debug info directives, lest they affect codegen,
1238 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1239 if (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001240 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001241 ScanInsts++;
1242 continue;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001243 }
1244
Chris Lattnera65e2f72010-01-05 05:57:49 +00001245 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1246 // Prev store isn't volatile, and stores to the same location?
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001247 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
Eli Friedman8bc586e2011-08-15 22:09:40 +00001248 SI.getOperand(1))) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001249 ++NumDeadStore;
1250 ++BBI;
Sanjay Patel4b198802016-02-01 22:23:39 +00001251 eraseInstFromFunction(*PrevSI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001252 continue;
1253 }
1254 break;
1255 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001256
Chris Lattnera65e2f72010-01-05 05:57:49 +00001257 // If this is a load, we have to stop. However, if the loaded value is from
1258 // the pointer we're loading and is producing the pointer we're storing,
1259 // then *this* store is dead (X = load P; store X -> P).
1260 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001261 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1262 assert(SI.isUnordered() && "can't eliminate ordering operation");
Sanjay Patel4b198802016-02-01 22:23:39 +00001263 return eraseInstFromFunction(SI);
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001264 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001265
Chris Lattnera65e2f72010-01-05 05:57:49 +00001266 // Otherwise, this is a load from some other location. Stores before it
1267 // may not be dead.
1268 break;
1269 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001270
Sanjoy Das679bc322017-01-17 05:45:09 +00001271 // Don't skip over loads, throws or things that can modify memory.
1272 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001273 break;
1274 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001275
1276 // store X, null -> turns into 'unreachable' in SimplifyCFG
1277 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
1278 if (!isa<UndefValue>(Val)) {
1279 SI.setOperand(0, UndefValue::get(Val->getType()));
1280 if (Instruction *U = dyn_cast<Instruction>(Val))
1281 Worklist.Add(U); // Dropped a use.
1282 }
Craig Topperf40110f2014-04-25 05:29:35 +00001283 return nullptr; // Do not modify these!
Chris Lattnera65e2f72010-01-05 05:57:49 +00001284 }
1285
1286 // store undef, Ptr -> noop
1287 if (isa<UndefValue>(Val))
Sanjay Patel4b198802016-02-01 22:23:39 +00001288 return eraseInstFromFunction(SI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001289
Chris Lattnera65e2f72010-01-05 05:57:49 +00001290 // If this store is the last instruction in the basic block (possibly
Victor Hernandez5f5abd52010-01-21 23:07:15 +00001291 // excepting debug info instructions), and if the block ends with an
1292 // unconditional branch, try to move it to the successor block.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001293 BBI = SI.getIterator();
Chris Lattnera65e2f72010-01-05 05:57:49 +00001294 do {
1295 ++BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001296 } while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001297 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
Chris Lattnera65e2f72010-01-05 05:57:49 +00001298 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
1299 if (BI->isUnconditional())
1300 if (SimplifyStoreAtEndOfBlock(SI))
Craig Topperf40110f2014-04-25 05:29:35 +00001301 return nullptr; // xform done!
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001302
Craig Topperf40110f2014-04-25 05:29:35 +00001303 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001304}
1305
1306/// SimplifyStoreAtEndOfBlock - Turn things like:
1307/// if () { *P = v1; } else { *P = v2 }
1308/// into a phi node with a store in the successor.
1309///
1310/// Simplify things like:
1311/// *P = v1; if () { *P = v2; }
1312/// into a phi node with a store in the successor.
1313///
1314bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
Philip Reames5f0e3692016-04-22 20:53:32 +00001315 assert(SI.isUnordered() &&
1316 "this code has not been auditted for volatile or ordered store case");
Justin Bognerc7e4fbe2016-08-05 01:09:48 +00001317
Chris Lattnera65e2f72010-01-05 05:57:49 +00001318 BasicBlock *StoreBB = SI.getParent();
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001319
Chris Lattnera65e2f72010-01-05 05:57:49 +00001320 // Check to see if the successor block has exactly two incoming edges. If
1321 // so, see if the other predecessor contains a store to the same location.
1322 // if so, insert a PHI node (if needed) and move the stores down.
1323 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001324
Chris Lattnera65e2f72010-01-05 05:57:49 +00001325 // Determine whether Dest has exactly two predecessors and, if so, compute
1326 // the other predecessor.
1327 pred_iterator PI = pred_begin(DestBB);
Gabor Greif1b787df2010-07-12 15:48:26 +00001328 BasicBlock *P = *PI;
Craig Topperf40110f2014-04-25 05:29:35 +00001329 BasicBlock *OtherBB = nullptr;
Gabor Greif1b787df2010-07-12 15:48:26 +00001330
1331 if (P != StoreBB)
1332 OtherBB = P;
1333
1334 if (++PI == pred_end(DestBB))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001335 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001336
Gabor Greif1b787df2010-07-12 15:48:26 +00001337 P = *PI;
1338 if (P != StoreBB) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001339 if (OtherBB)
1340 return false;
Gabor Greif1b787df2010-07-12 15:48:26 +00001341 OtherBB = P;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001342 }
1343 if (++PI != pred_end(DestBB))
1344 return false;
1345
1346 // Bail out if all the relevant blocks aren't distinct (this can happen,
1347 // for example, if SI is in an infinite loop)
1348 if (StoreBB == DestBB || OtherBB == DestBB)
1349 return false;
1350
1351 // Verify that the other block ends in a branch and is not otherwise empty.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001352 BasicBlock::iterator BBI(OtherBB->getTerminator());
Chris Lattnera65e2f72010-01-05 05:57:49 +00001353 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1354 if (!OtherBr || BBI == OtherBB->begin())
1355 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001356
Chris Lattnera65e2f72010-01-05 05:57:49 +00001357 // If the other block ends in an unconditional branch, check for the 'if then
1358 // else' case. there is an instruction before the branch.
Craig Topperf40110f2014-04-25 05:29:35 +00001359 StoreInst *OtherStore = nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001360 if (OtherBr->isUnconditional()) {
1361 --BBI;
1362 // Skip over debugging info.
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001363 while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001364 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001365 if (BBI==OtherBB->begin())
1366 return false;
1367 --BBI;
1368 }
Eli Friedman8bc586e2011-08-15 22:09:40 +00001369 // If this isn't a store, isn't a store to the same location, or is not the
1370 // right kind of store, bail out.
Chris Lattnera65e2f72010-01-05 05:57:49 +00001371 OtherStore = dyn_cast<StoreInst>(BBI);
1372 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001373 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001374 return false;
1375 } else {
1376 // Otherwise, the other block ended with a conditional branch. If one of the
1377 // destinations is StoreBB, then we have the if/then case.
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001378 if (OtherBr->getSuccessor(0) != StoreBB &&
Chris Lattnera65e2f72010-01-05 05:57:49 +00001379 OtherBr->getSuccessor(1) != StoreBB)
1380 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001381
Chris Lattnera65e2f72010-01-05 05:57:49 +00001382 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1383 // if/then triangle. See if there is a store to the same ptr as SI that
1384 // lives in OtherBB.
1385 for (;; --BBI) {
1386 // Check to see if we find the matching store.
1387 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1388 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001389 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001390 return false;
1391 break;
1392 }
1393 // If we find something that may be using or overwriting the stored
1394 // value, or if we run out of instructions, we can't do the xform.
Sanjoy Das679bc322017-01-17 05:45:09 +00001395 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1396 BBI->mayWriteToMemory() || BBI == OtherBB->begin())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001397 return false;
1398 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001399
Chris Lattnera65e2f72010-01-05 05:57:49 +00001400 // In order to eliminate the store in OtherBr, we have to
1401 // make sure nothing reads or overwrites the stored value in
1402 // StoreBB.
1403 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1404 // FIXME: This should really be AA driven.
Sanjoy Das679bc322017-01-17 05:45:09 +00001405 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001406 return false;
1407 }
1408 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001409
Chris Lattnera65e2f72010-01-05 05:57:49 +00001410 // Insert a PHI node now if we need it.
1411 Value *MergedVal = OtherStore->getOperand(0);
1412 if (MergedVal != SI.getOperand(0)) {
Jay Foad52131342011-03-30 11:28:46 +00001413 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
Chris Lattnera65e2f72010-01-05 05:57:49 +00001414 PN->addIncoming(SI.getOperand(0), SI.getParent());
1415 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1416 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1417 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001418
Chris Lattnera65e2f72010-01-05 05:57:49 +00001419 // Advance to a place where it is safe to insert the new store and
1420 // insert it.
Bill Wendling8ddfc092011-08-16 20:45:24 +00001421 BBI = DestBB->getFirstInsertionPt();
Eli Friedman35211c62011-05-27 00:19:40 +00001422 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
Eli Friedman8bc586e2011-08-15 22:09:40 +00001423 SI.isVolatile(),
1424 SI.getAlignment(),
1425 SI.getOrdering(),
1426 SI.getSynchScope());
Eli Friedman35211c62011-05-27 00:19:40 +00001427 InsertNewInstBefore(NewSI, *BBI);
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001428 NewSI->setDebugLoc(OtherStore->getDebugLoc());
Eli Friedman35211c62011-05-27 00:19:40 +00001429
Hal Finkelcc39b672014-07-24 12:16:19 +00001430 // If the two stores had AA tags, merge them.
1431 AAMDNodes AATags;
1432 SI.getAAMetadata(AATags);
1433 if (AATags) {
1434 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1435 NewSI->setAAMetadata(AATags);
1436 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001437
Chris Lattnera65e2f72010-01-05 05:57:49 +00001438 // Nuke the old stores.
Sanjay Patel4b198802016-02-01 22:23:39 +00001439 eraseInstFromFunction(SI);
1440 eraseInstFromFunction(*OtherStore);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001441 return true;
1442}