blob: e0d7b72e817028a6ca131827d65039264e4afcaa [file] [log] [blame]
Chris Lattnera65e2f72010-01-05 05:57:49 +00001//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visit functions for load, store and alloca.
11//
12//===----------------------------------------------------------------------===//
13
Chandler Carrutha9174582015-01-22 05:25:13 +000014#include "InstCombineInternal.h"
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +000015#include "llvm/ADT/SmallString.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000016#include "llvm/ADT/Statistic.h"
Dan Gohman826bdf82010-05-28 16:19:17 +000017#include "llvm/Analysis/Loads.h"
Peter Collingbourneecdd58f2016-10-21 19:59:26 +000018#include "llvm/IR/ConstantRange.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000019#include "llvm/IR/DataLayout.h"
Paul Robinson383c5c22017-02-06 22:19:04 +000020#include "llvm/IR/DebugInfo.h"
Chandler Carruthbc6378d2014-10-19 10:46:46 +000021#include "llvm/IR/LLVMContext.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000022#include "llvm/IR/IntrinsicInst.h"
Charles Davis33d1dc02015-02-25 05:10:25 +000023#include "llvm/IR/MDBuilder.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000024#include "llvm/Transforms/Utils/BasicBlockUtils.h"
25#include "llvm/Transforms/Utils/Local.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000026using namespace llvm;
27
Chandler Carruth964daaa2014-04-22 02:55:47 +000028#define DEBUG_TYPE "instcombine"
29
Chandler Carruthc908ca12012-08-21 08:39:44 +000030STATISTIC(NumDeadStore, "Number of dead stores eliminated");
31STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
32
33/// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
34/// some part of a constant global variable. This intentionally only accepts
35/// constant expressions because we can't rewrite arbitrary instructions.
36static bool pointsToConstantGlobal(Value *V) {
37 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
38 return GV->isConstant();
Matt Arsenault607281772014-04-24 00:01:09 +000039
40 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000041 if (CE->getOpcode() == Instruction::BitCast ||
Matt Arsenault607281772014-04-24 00:01:09 +000042 CE->getOpcode() == Instruction::AddrSpaceCast ||
Chandler Carruthc908ca12012-08-21 08:39:44 +000043 CE->getOpcode() == Instruction::GetElementPtr)
44 return pointsToConstantGlobal(CE->getOperand(0));
Matt Arsenault607281772014-04-24 00:01:09 +000045 }
Chandler Carruthc908ca12012-08-21 08:39:44 +000046 return false;
47}
48
49/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
50/// pointer to an alloca. Ignore any reads of the pointer, return false if we
51/// see any stores or other unknown uses. If we see pointer arithmetic, keep
52/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
53/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
54/// the alloca, and if the source pointer is a pointer to a constant global, we
55/// can optimize this.
56static bool
57isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
Reid Kleckner813dab22014-07-01 21:36:20 +000058 SmallVectorImpl<Instruction *> &ToDelete) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000059 // We track lifetime intrinsics as we encounter them. If we decide to go
60 // ahead and replace the value with the global, this lets the caller quickly
61 // eliminate the markers.
62
Reid Kleckner813dab22014-07-01 21:36:20 +000063 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
David Majnemer0a16c222016-08-11 21:15:00 +000064 ValuesToInspect.emplace_back(V, false);
Reid Kleckner813dab22014-07-01 21:36:20 +000065 while (!ValuesToInspect.empty()) {
66 auto ValuePair = ValuesToInspect.pop_back_val();
67 const bool IsOffset = ValuePair.second;
68 for (auto &U : ValuePair.first->uses()) {
David Majnemer0a16c222016-08-11 21:15:00 +000069 auto *I = cast<Instruction>(U.getUser());
Chandler Carruthc908ca12012-08-21 08:39:44 +000070
David Majnemer0a16c222016-08-11 21:15:00 +000071 if (auto *LI = dyn_cast<LoadInst>(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000072 // Ignore non-volatile loads, they are always ok.
73 if (!LI->isSimple()) return false;
Chandler Carruthc908ca12012-08-21 08:39:44 +000074 continue;
75 }
Reid Kleckner813dab22014-07-01 21:36:20 +000076
77 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
78 // If uses of the bitcast are ok, we are ok.
David Majnemer0a16c222016-08-11 21:15:00 +000079 ValuesToInspect.emplace_back(I, IsOffset);
Reid Kleckner813dab22014-07-01 21:36:20 +000080 continue;
81 }
David Majnemer0a16c222016-08-11 21:15:00 +000082 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000083 // If the GEP has all zero indices, it doesn't offset the pointer. If it
84 // doesn't, it does.
David Majnemer0a16c222016-08-11 21:15:00 +000085 ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
Reid Kleckner813dab22014-07-01 21:36:20 +000086 continue;
87 }
88
Benjamin Kramer3a09ef62015-04-10 14:50:08 +000089 if (auto CS = CallSite(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000090 // If this is the function being called then we treat it like a load and
91 // ignore it.
92 if (CS.isCallee(&U))
93 continue;
94
David Majnemer02f47872015-12-23 09:58:41 +000095 unsigned DataOpNo = CS.getDataOperandNo(&U);
96 bool IsArgOperand = CS.isArgOperand(&U);
97
Reid Kleckner813dab22014-07-01 21:36:20 +000098 // Inalloca arguments are clobbered by the call.
David Majnemer02f47872015-12-23 09:58:41 +000099 if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
Reid Kleckner813dab22014-07-01 21:36:20 +0000100 return false;
101
102 // If this is a readonly/readnone call site, then we know it is just a
103 // load (but one that potentially returns the value itself), so we can
104 // ignore it if we know that the value isn't captured.
105 if (CS.onlyReadsMemory() &&
David Majnemer02f47872015-12-23 09:58:41 +0000106 (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
Reid Kleckner813dab22014-07-01 21:36:20 +0000107 continue;
108
109 // If this is being passed as a byval argument, the caller is making a
110 // copy, so it is only a read of the alloca.
David Majnemer02f47872015-12-23 09:58:41 +0000111 if (IsArgOperand && CS.isByValArgument(DataOpNo))
Reid Kleckner813dab22014-07-01 21:36:20 +0000112 continue;
113 }
114
115 // Lifetime intrinsics can be handled by the caller.
116 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
117 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
118 II->getIntrinsicID() == Intrinsic::lifetime_end) {
119 assert(II->use_empty() && "Lifetime markers have no result to use!");
120 ToDelete.push_back(II);
121 continue;
122 }
123 }
124
125 // If this is isn't our memcpy/memmove, reject it as something we can't
126 // handle.
127 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
128 if (!MI)
129 return false;
130
131 // If the transfer is using the alloca as a source of the transfer, then
132 // ignore it since it is a load (unless the transfer is volatile).
133 if (U.getOperandNo() == 1) {
134 if (MI->isVolatile()) return false;
135 continue;
136 }
137
138 // If we already have seen a copy, reject the second one.
139 if (TheCopy) return false;
140
141 // If the pointer has been offset from the start of the alloca, we can't
142 // safely handle this.
143 if (IsOffset) return false;
144
145 // If the memintrinsic isn't using the alloca as the dest, reject it.
146 if (U.getOperandNo() != 0) return false;
147
148 // If the source of the memcpy/move is not a constant global, reject it.
149 if (!pointsToConstantGlobal(MI->getSource()))
150 return false;
151
152 // Otherwise, the transform is safe. Remember the copy instruction.
153 TheCopy = MI;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000154 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000155 }
156 return true;
157}
158
159/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
160/// modified by a copy from a constant global. If we can prove this, we can
161/// replace any uses of the alloca with uses of the global directly.
162static MemTransferInst *
163isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
164 SmallVectorImpl<Instruction *> &ToDelete) {
Craig Topperf40110f2014-04-25 05:29:35 +0000165 MemTransferInst *TheCopy = nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000166 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
167 return TheCopy;
Craig Topperf40110f2014-04-25 05:29:35 +0000168 return nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000169}
170
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000171static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
Duncan P. N. Exon Smith720762e2015-03-13 19:30:44 +0000172 // Check for array size of 1 (scalar allocation).
Duncan P. N. Exon Smithbe95b4a2015-03-13 19:42:09 +0000173 if (!AI.isArrayAllocation()) {
174 // i32 1 is the canonical array size for scalar allocations.
175 if (AI.getArraySize()->getType()->isIntegerTy(32))
176 return nullptr;
177
178 // Canonicalize it.
179 Value *V = IC.Builder->getInt32(1);
180 AI.setOperand(0, V);
181 return &AI;
182 }
Duncan P. N. Exon Smith720762e2015-03-13 19:30:44 +0000183
Chris Lattnera65e2f72010-01-05 05:57:49 +0000184 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000185 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
186 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
187 AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName());
188 New->setAlignment(AI.getAlignment());
Chris Lattnera65e2f72010-01-05 05:57:49 +0000189
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000190 // Scan to the end of the allocation instructions, to skip over a block of
191 // allocas if possible...also skip interleaved debug info
192 //
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +0000193 BasicBlock::iterator It(New);
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000194 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
195 ++It;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000196
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000197 // Now that I is pointing to the first non-allocation-inst in the block,
198 // insert our getelementptr instruction...
199 //
200 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
201 Value *NullIdx = Constant::getNullValue(IdxTy);
202 Value *Idx[2] = {NullIdx, NullIdx};
203 Instruction *GEP =
Matt Arsenault640ff9d2013-08-14 00:24:05 +0000204 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000205 IC.InsertNewInstBefore(GEP, *It);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000206
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000207 // Now make everything use the getelementptr instead of the original
208 // allocation.
Sanjay Patel4b198802016-02-01 22:23:39 +0000209 return IC.replaceInstUsesWith(AI, GEP);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000210 }
211
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000212 if (isa<UndefValue>(AI.getArraySize()))
Sanjay Patel4b198802016-02-01 22:23:39 +0000213 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000214
Duncan P. N. Exon Smith07ff9b02015-03-13 19:34:55 +0000215 // Ensure that the alloca array size argument has type intptr_t, so that
216 // any casting is exposed early.
217 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
218 if (AI.getArraySize()->getType() != IntPtrTy) {
219 Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false);
220 AI.setOperand(0, V);
221 return &AI;
222 }
223
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000224 return nullptr;
225}
226
227Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
228 if (auto *I = simplifyAllocaArraySize(*this, AI))
229 return I;
230
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000231 if (AI.getAllocatedType()->isSized()) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000232 // If the alignment is 0 (unspecified), assign it the preferred alignment.
233 if (AI.getAlignment() == 0)
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000234 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000235
236 // Move all alloca's of zero byte objects to the entry block and merge them
237 // together. Note that we only do this for alloca's, because malloc should
238 // allocate and return a unique pointer, even for a zero byte allocation.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000239 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000240 // For a zero sized alloca there is no point in doing an array allocation.
241 // This is helpful if the array size is a complicated expression not used
242 // elsewhere.
243 if (AI.isArrayAllocation()) {
244 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
245 return &AI;
246 }
247
248 // Get the first instruction in the entry block.
249 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
250 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
251 if (FirstInst != &AI) {
252 // If the entry block doesn't start with a zero-size alloca then move
253 // this one to the start of the entry block. There is no problem with
254 // dominance as the array size was forced to a constant earlier already.
255 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
256 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000257 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000258 AI.moveBefore(FirstInst);
259 return &AI;
260 }
261
Richard Osborneb68053e2012-09-18 09:31:44 +0000262 // If the alignment of the entry block alloca is 0 (unspecified),
263 // assign it the preferred alignment.
264 if (EntryAI->getAlignment() == 0)
265 EntryAI->setAlignment(
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000266 DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000267 // Replace this zero-sized alloca with the one at the start of the entry
268 // block after ensuring that the address will be aligned enough for both
269 // types.
Richard Osborneb68053e2012-09-18 09:31:44 +0000270 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
271 AI.getAlignment());
Duncan Sands8bc764a2012-06-26 13:39:21 +0000272 EntryAI->setAlignment(MaxAlign);
273 if (AI.getType() != EntryAI->getType())
274 return new BitCastInst(EntryAI, AI.getType());
Sanjay Patel4b198802016-02-01 22:23:39 +0000275 return replaceInstUsesWith(AI, EntryAI);
Duncan Sands8bc764a2012-06-26 13:39:21 +0000276 }
277 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000278 }
279
Eli Friedmanb14873c2012-11-26 23:04:53 +0000280 if (AI.getAlignment()) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000281 // Check to see if this allocation is only modified by a memcpy/memmove from
282 // a constant global whose alignment is equal to or exceeds that of the
283 // allocation. If this is the case, we can change all users to use
284 // the constant global instead. This is commonly produced by the CFE by
285 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
286 // is only subsequently read.
287 SmallVector<Instruction *, 4> ToDelete;
288 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
Chandler Carruth66b31302015-01-04 12:03:27 +0000289 unsigned SourceAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +0000290 Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
Eli Friedmanb14873c2012-11-26 23:04:53 +0000291 if (AI.getAlignment() <= SourceAlign) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000292 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
293 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
294 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
Sanjay Patel4b198802016-02-01 22:23:39 +0000295 eraseInstFromFunction(*ToDelete[i]);
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000296 Constant *TheSrc = cast<Constant>(Copy->getSource());
Matt Arsenaultbbf18c62013-12-07 02:58:45 +0000297 Constant *Cast
298 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
Sanjay Patel4b198802016-02-01 22:23:39 +0000299 Instruction *NewI = replaceInstUsesWith(AI, Cast);
300 eraseInstFromFunction(*Copy);
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000301 ++NumGlobalCopies;
302 return NewI;
303 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000304 }
305 }
306
Nuno Lopes95cc4f32012-07-09 18:38:20 +0000307 // At last, use the generic allocation site handler to aggressively remove
308 // unused allocas.
309 return visitAllocSite(AI);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000310}
311
Philip Reames89e92d22016-12-01 20:17:06 +0000312// Are we allowed to form a atomic load or store of this type?
313static bool isSupportedAtomicType(Type *Ty) {
314 return Ty->isIntegerTy() || Ty->isPointerTy() || Ty->isFloatingPointTy();
315}
316
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000317/// \brief Helper to combine a load to a new type.
318///
319/// This just does the work of combining a load to a new type. It handles
320/// metadata, etc., and returns the new instruction. The \c NewTy should be the
321/// loaded *value* type. This will convert it to a pointer, cast the operand to
322/// that pointer type, load it, etc.
323///
324/// Note that this will create all of the instructions with whatever insert
325/// point the \c InstCombiner currently is using.
Mehdi Amini2668a482015-05-07 05:52:40 +0000326static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
327 const Twine &Suffix = "") {
Philip Reames89e92d22016-12-01 20:17:06 +0000328 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
329 "can't fold an atomic load to requested type");
330
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000331 Value *Ptr = LI.getPointerOperand();
332 unsigned AS = LI.getPointerAddressSpace();
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000333 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000334 LI.getAllMetadata(MD);
335
336 LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
337 IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
Philip Reames6f4d0082016-05-06 22:17:01 +0000338 LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
339 NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
Charles Davis33d1dc02015-02-25 05:10:25 +0000340 MDBuilder MDB(NewLoad->getContext());
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000341 for (const auto &MDPair : MD) {
342 unsigned ID = MDPair.first;
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000343 MDNode *N = MDPair.second;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000344 // Note, essentially every kind of metadata should be preserved here! This
345 // routine is supposed to clone a load instruction changing *only its type*.
346 // The only metadata it makes sense to drop is metadata which is invalidated
347 // when the pointer type changes. This should essentially never be the case
348 // in LLVM, but we explicitly switch over only known metadata to be
349 // conservatively correct. If you are adding metadata to LLVM which pertains
350 // to loads, you almost certainly want to add it here.
351 switch (ID) {
352 case LLVMContext::MD_dbg:
353 case LLVMContext::MD_tbaa:
354 case LLVMContext::MD_prof:
355 case LLVMContext::MD_fpmath:
356 case LLVMContext::MD_tbaa_struct:
357 case LLVMContext::MD_invariant_load:
358 case LLVMContext::MD_alias_scope:
359 case LLVMContext::MD_noalias:
Philip Reames5a3f5f72014-10-21 00:13:20 +0000360 case LLVMContext::MD_nontemporal:
361 case LLVMContext::MD_mem_parallel_loop_access:
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000362 // All of these directly apply.
363 NewLoad->setMetadata(ID, N);
364 break;
365
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000366 case LLVMContext::MD_nonnull:
Charles Davis33d1dc02015-02-25 05:10:25 +0000367 // This only directly applies if the new type is also a pointer.
368 if (NewTy->isPointerTy()) {
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000369 NewLoad->setMetadata(ID, N);
Charles Davis33d1dc02015-02-25 05:10:25 +0000370 break;
371 }
372 // If it's integral now, translate it to !range metadata.
373 if (NewTy->isIntegerTy()) {
374 auto *ITy = cast<IntegerType>(NewTy);
375 auto *NullInt = ConstantExpr::getPtrToInt(
376 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
377 auto *NonNullInt =
378 ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
379 NewLoad->setMetadata(LLVMContext::MD_range,
380 MDB.createRange(NonNullInt, NullInt));
381 }
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000382 break;
Artur Pilipenko5c5011d2015-11-02 17:53:51 +0000383 case LLVMContext::MD_align:
384 case LLVMContext::MD_dereferenceable:
385 case LLVMContext::MD_dereferenceable_or_null:
386 // These only directly apply if the new type is also a pointer.
387 if (NewTy->isPointerTy())
388 NewLoad->setMetadata(ID, N);
389 break;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000390 case LLVMContext::MD_range:
391 // FIXME: It would be nice to propagate this in some way, but the type
David Majnemer80dca0c2016-10-11 01:00:45 +0000392 // conversions make it hard.
393
394 // If it's a pointer now and the range does not contain 0, make it !nonnull.
395 if (NewTy->isPointerTy()) {
396 unsigned BitWidth = IC.getDataLayout().getTypeSizeInBits(NewTy);
397 if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
398 MDNode *NN = MDNode::get(LI.getContext(), None);
399 NewLoad->setMetadata(LLVMContext::MD_nonnull, NN);
400 }
401 }
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000402 break;
403 }
404 }
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000405 return NewLoad;
406}
407
Chandler Carruthfa11d832015-01-22 03:34:54 +0000408/// \brief Combine a store to a new type.
409///
410/// Returns the newly created store instruction.
411static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
Philip Reames89e92d22016-12-01 20:17:06 +0000412 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
413 "can't fold an atomic store of requested type");
414
Chandler Carruthfa11d832015-01-22 03:34:54 +0000415 Value *Ptr = SI.getPointerOperand();
416 unsigned AS = SI.getPointerAddressSpace();
417 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
418 SI.getAllMetadata(MD);
419
420 StoreInst *NewStore = IC.Builder->CreateAlignedStore(
421 V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
Philip Reames6f4d0082016-05-06 22:17:01 +0000422 SI.getAlignment(), SI.isVolatile());
423 NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope());
Chandler Carruthfa11d832015-01-22 03:34:54 +0000424 for (const auto &MDPair : MD) {
425 unsigned ID = MDPair.first;
426 MDNode *N = MDPair.second;
427 // Note, essentially every kind of metadata should be preserved here! This
428 // routine is supposed to clone a store instruction changing *only its
429 // type*. The only metadata it makes sense to drop is metadata which is
430 // invalidated when the pointer type changes. This should essentially
431 // never be the case in LLVM, but we explicitly switch over only known
432 // metadata to be conservatively correct. If you are adding metadata to
433 // LLVM which pertains to stores, you almost certainly want to add it
434 // here.
435 switch (ID) {
436 case LLVMContext::MD_dbg:
437 case LLVMContext::MD_tbaa:
438 case LLVMContext::MD_prof:
439 case LLVMContext::MD_fpmath:
440 case LLVMContext::MD_tbaa_struct:
441 case LLVMContext::MD_alias_scope:
442 case LLVMContext::MD_noalias:
443 case LLVMContext::MD_nontemporal:
444 case LLVMContext::MD_mem_parallel_loop_access:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000445 // All of these directly apply.
446 NewStore->setMetadata(ID, N);
447 break;
448
449 case LLVMContext::MD_invariant_load:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000450 case LLVMContext::MD_nonnull:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000451 case LLVMContext::MD_range:
Artur Pilipenko5c5011d2015-11-02 17:53:51 +0000452 case LLVMContext::MD_align:
453 case LLVMContext::MD_dereferenceable:
454 case LLVMContext::MD_dereferenceable_or_null:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000455 // These don't apply for stores.
Chandler Carruthfa11d832015-01-22 03:34:54 +0000456 break;
457 }
458 }
459
460 return NewStore;
461}
462
JF Bastien3e2e69f2016-04-21 19:41:48 +0000463/// \brief Combine loads to match the type of their uses' value after looking
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000464/// through intervening bitcasts.
465///
466/// The core idea here is that if the result of a load is used in an operation,
467/// we should load the type most conducive to that operation. For example, when
468/// loading an integer and converting that immediately to a pointer, we should
469/// instead directly load a pointer.
470///
471/// However, this routine must never change the width of a load or the number of
472/// loads as that would introduce a semantic change. This combine is expected to
473/// be a semantic no-op which just allows loads to more closely model the types
474/// of their consuming operations.
475///
476/// Currently, we also refuse to change the precise type used for an atomic load
477/// or a volatile load. This is debatable, and might be reasonable to change
478/// later. However, it is risky in case some backend or other part of LLVM is
479/// relying on the exact type loaded to select appropriate atomic operations.
480static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
Philip Reames6f4d0082016-05-06 22:17:01 +0000481 // FIXME: We could probably with some care handle both volatile and ordered
482 // atomic loads here but it isn't clear that this is important.
483 if (!LI.isUnordered())
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000484 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000485
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000486 if (LI.use_empty())
487 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000488
Arnold Schwaighofer5d335552016-09-10 18:14:57 +0000489 // swifterror values can't be bitcasted.
490 if (LI.getPointerOperand()->isSwiftError())
491 return nullptr;
492
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000493 Type *Ty = LI.getType();
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000494 const DataLayout &DL = IC.getDataLayout();
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000495
496 // Try to canonicalize loads which are only ever stored to operate over
497 // integers instead of any other type. We only do this when the loaded type
498 // is sized and has a size exactly the same as its store size and the store
499 // size is a legal integer type.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000500 if (!Ty->isIntegerTy() && Ty->isSized() &&
501 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
Sanjoy Dasba04d3a2016-08-06 02:58:48 +0000502 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) &&
503 !DL.isNonIntegralPointerType(Ty)) {
David Majnemer0a16c222016-08-11 21:15:00 +0000504 if (all_of(LI.users(), [&LI](User *U) {
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000505 auto *SI = dyn_cast<StoreInst>(U);
Arnold Schwaighoferc3685632017-01-31 17:53:49 +0000506 return SI && SI->getPointerOperand() != &LI &&
507 !SI->getPointerOperand()->isSwiftError();
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000508 })) {
509 LoadInst *NewLoad = combineLoadToNewType(
510 IC, LI,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000511 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000512 // Replace all the stores with stores of the newly loaded value.
513 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
514 auto *SI = cast<StoreInst>(*UI++);
515 IC.Builder->SetInsertPoint(SI);
516 combineStoreToNewValue(IC, *SI, NewLoad);
Sanjay Patel4b198802016-02-01 22:23:39 +0000517 IC.eraseInstFromFunction(*SI);
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000518 }
519 assert(LI.use_empty() && "Failed to remove all users of the load!");
520 // Return the old load so the combiner can delete it safely.
521 return &LI;
522 }
523 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000524
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000525 // Fold away bit casts of the loaded value by loading the desired type.
Quentin Colombet490cfbe2016-02-11 22:30:41 +0000526 // We can do this for BitCastInsts as well as casts from and to pointer types,
527 // as long as those are noops (i.e., the source or dest type have the same
528 // bitwidth as the target's pointers).
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000529 if (LI.hasOneUse())
Philip Reames89e92d22016-12-01 20:17:06 +0000530 if (auto* CI = dyn_cast<CastInst>(LI.user_back()))
531 if (CI->isNoopCast(DL))
532 if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) {
533 LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
534 CI->replaceAllUsesWith(NewLoad);
535 IC.eraseInstFromFunction(*CI);
536 return &LI;
537 }
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000538
Chandler Carrutha7f247e2014-12-09 19:21:16 +0000539 // FIXME: We should also canonicalize loads of vectors when their elements are
540 // cast to other types.
Craig Topperf40110f2014-04-25 05:29:35 +0000541 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000542}
543
Mehdi Amini2668a482015-05-07 05:52:40 +0000544static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
545 // FIXME: We could probably with some care handle both volatile and atomic
546 // stores here but it isn't clear that this is important.
547 if (!LI.isSimple())
548 return nullptr;
549
550 Type *T = LI.getType();
551 if (!T->isAggregateType())
552 return nullptr;
553
Benjamin Kramerc1263532016-03-11 10:20:56 +0000554 StringRef Name = LI.getName();
Bruce Mitchenere9ffb452015-09-12 01:17:08 +0000555 assert(LI.getAlignment() && "Alignment must be set at this point");
Mehdi Amini2668a482015-05-07 05:52:40 +0000556
557 if (auto *ST = dyn_cast<StructType>(T)) {
558 // If the struct only have one element, we unpack.
Amaury Sechet61a7d622016-02-17 19:21:28 +0000559 auto NumElements = ST->getNumElements();
560 if (NumElements == 1) {
Mehdi Amini2668a482015-05-07 05:52:40 +0000561 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
562 ".unpack");
Sanjay Patel4b198802016-02-01 22:23:39 +0000563 return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
Amaury Sechet61a7d622016-02-17 19:21:28 +0000564 UndefValue::get(T), NewLoad, 0, Name));
Mehdi Amini2668a482015-05-07 05:52:40 +0000565 }
Mehdi Amini1c131b32015-12-15 01:44:07 +0000566
567 // We don't want to break loads with padding here as we'd loose
568 // the knowledge that padding exists for the rest of the pipeline.
569 const DataLayout &DL = IC.getDataLayout();
570 auto *SL = DL.getStructLayout(ST);
571 if (SL->hasPadding())
572 return nullptr;
573
Amaury Sechet61a7d622016-02-17 19:21:28 +0000574 auto Align = LI.getAlignment();
575 if (!Align)
576 Align = DL.getABITypeAlignment(ST);
577
Mehdi Amini1c131b32015-12-15 01:44:07 +0000578 auto *Addr = LI.getPointerOperand();
Amaury Sechet61a7d622016-02-17 19:21:28 +0000579 auto *IdxType = Type::getInt32Ty(T->getContext());
Mehdi Amini1c131b32015-12-15 01:44:07 +0000580 auto *Zero = ConstantInt::get(IdxType, 0);
Amaury Sechet61a7d622016-02-17 19:21:28 +0000581
582 Value *V = UndefValue::get(T);
583 for (unsigned i = 0; i < NumElements; i++) {
Mehdi Amini1c131b32015-12-15 01:44:07 +0000584 Value *Indices[2] = {
585 Zero,
586 ConstantInt::get(IdxType, i),
587 };
Amaury Sechetda71cb72016-02-17 21:21:29 +0000588 auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
Benjamin Kramerc1263532016-03-11 10:20:56 +0000589 Name + ".elt");
Amaury Sechet61a7d622016-02-17 19:21:28 +0000590 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
Benjamin Kramerc1263532016-03-11 10:20:56 +0000591 auto *L = IC.Builder->CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
Mehdi Amini1c131b32015-12-15 01:44:07 +0000592 V = IC.Builder->CreateInsertValue(V, L, i);
593 }
594
595 V->setName(Name);
Sanjay Patel4b198802016-02-01 22:23:39 +0000596 return IC.replaceInstUsesWith(LI, V);
Mehdi Amini2668a482015-05-07 05:52:40 +0000597 }
598
David Majnemer58fb0382015-05-11 05:04:22 +0000599 if (auto *AT = dyn_cast<ArrayType>(T)) {
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000600 auto *ET = AT->getElementType();
601 auto NumElements = AT->getNumElements();
602 if (NumElements == 1) {
603 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack");
Sanjay Patel4b198802016-02-01 22:23:39 +0000604 return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000605 UndefValue::get(T), NewLoad, 0, Name));
David Majnemer58fb0382015-05-11 05:04:22 +0000606 }
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000607
Davide Italianoda114122016-10-07 20:57:42 +0000608 // Bail out if the array is too large. Ideally we would like to optimize
609 // arrays of arbitrary size but this has a terrible impact on compile time.
610 // The threshold here is chosen arbitrarily, maybe needs a little bit of
611 // tuning.
612 if (NumElements > 1024)
613 return nullptr;
614
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000615 const DataLayout &DL = IC.getDataLayout();
616 auto EltSize = DL.getTypeAllocSize(ET);
617 auto Align = LI.getAlignment();
618 if (!Align)
619 Align = DL.getABITypeAlignment(T);
620
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000621 auto *Addr = LI.getPointerOperand();
622 auto *IdxType = Type::getInt64Ty(T->getContext());
623 auto *Zero = ConstantInt::get(IdxType, 0);
624
625 Value *V = UndefValue::get(T);
626 uint64_t Offset = 0;
627 for (uint64_t i = 0; i < NumElements; i++) {
628 Value *Indices[2] = {
629 Zero,
630 ConstantInt::get(IdxType, i),
631 };
632 auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
Benjamin Kramerc1263532016-03-11 10:20:56 +0000633 Name + ".elt");
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000634 auto *L = IC.Builder->CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
Benjamin Kramerc1263532016-03-11 10:20:56 +0000635 Name + ".unpack");
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000636 V = IC.Builder->CreateInsertValue(V, L, i);
637 Offset += EltSize;
638 }
639
640 V->setName(Name);
641 return IC.replaceInstUsesWith(LI, V);
David Majnemer58fb0382015-05-11 05:04:22 +0000642 }
643
Mehdi Amini2668a482015-05-07 05:52:40 +0000644 return nullptr;
645}
646
Hal Finkel847e05f2015-02-20 03:05:53 +0000647// If we can determine that all possible objects pointed to by the provided
648// pointer value are, not only dereferenceable, but also definitively less than
649// or equal to the provided maximum size, then return true. Otherwise, return
650// false (constant global values and allocas fall into this category).
651//
652// FIXME: This should probably live in ValueTracking (or similar).
653static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000654 const DataLayout &DL) {
Hal Finkel847e05f2015-02-20 03:05:53 +0000655 SmallPtrSet<Value *, 4> Visited;
656 SmallVector<Value *, 4> Worklist(1, V);
657
658 do {
659 Value *P = Worklist.pop_back_val();
660 P = P->stripPointerCasts();
661
662 if (!Visited.insert(P).second)
663 continue;
664
665 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
666 Worklist.push_back(SI->getTrueValue());
667 Worklist.push_back(SI->getFalseValue());
668 continue;
669 }
670
671 if (PHINode *PN = dyn_cast<PHINode>(P)) {
Pete Cooper833f34d2015-05-12 20:05:31 +0000672 for (Value *IncValue : PN->incoming_values())
673 Worklist.push_back(IncValue);
Hal Finkel847e05f2015-02-20 03:05:53 +0000674 continue;
675 }
676
677 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
Sanjoy Das99042472016-04-17 04:30:43 +0000678 if (GA->isInterposable())
Hal Finkel847e05f2015-02-20 03:05:53 +0000679 return false;
680 Worklist.push_back(GA->getAliasee());
681 continue;
682 }
683
684 // If we know how big this object is, and it is less than MaxSize, continue
685 // searching. Otherwise, return false.
686 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
687 if (!AI->getAllocatedType()->isSized())
688 return false;
689
690 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
691 if (!CS)
692 return false;
693
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000694 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
Hal Finkel847e05f2015-02-20 03:05:53 +0000695 // Make sure that, even if the multiplication below would wrap as an
696 // uint64_t, we still do the right thing.
697 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
698 return false;
699 continue;
700 }
701
702 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
703 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
704 return false;
705
Manuel Jacob5f6eaac2016-01-16 20:30:46 +0000706 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
Hal Finkel847e05f2015-02-20 03:05:53 +0000707 if (InitSize > MaxSize)
708 return false;
709 continue;
710 }
711
712 return false;
713 } while (!Worklist.empty());
714
715 return true;
716}
717
718// If we're indexing into an object of a known size, and the outer index is
719// not a constant, but having any value but zero would lead to undefined
720// behavior, replace it with zero.
721//
722// For example, if we have:
723// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
724// ...
725// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
726// ... = load i32* %arrayidx, align 4
727// Then we know that we can replace %x in the GEP with i64 0.
728//
729// FIXME: We could fold any GEP index to zero that would cause UB if it were
730// not zero. Currently, we only handle the first such index. Also, we could
731// also search through non-zero constant indices if we kept track of the
732// offsets those indices implied.
733static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
734 Instruction *MemI, unsigned &Idx) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000735 if (GEPI->getNumOperands() < 2)
Hal Finkel847e05f2015-02-20 03:05:53 +0000736 return false;
737
738 // Find the first non-zero index of a GEP. If all indices are zero, return
739 // one past the last index.
740 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
741 unsigned I = 1;
742 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
743 Value *V = GEPI->getOperand(I);
744 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
745 if (CI->isZero())
746 continue;
747
748 break;
749 }
750
751 return I;
752 };
753
754 // Skip through initial 'zero' indices, and find the corresponding pointer
755 // type. See if the next index is not a constant.
756 Idx = FirstNZIdx(GEPI);
757 if (Idx == GEPI->getNumOperands())
758 return false;
759 if (isa<Constant>(GEPI->getOperand(Idx)))
760 return false;
761
762 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
Eduard Burtescu19eb0312016-01-19 17:28:00 +0000763 Type *AllocTy =
764 GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
Hal Finkel847e05f2015-02-20 03:05:53 +0000765 if (!AllocTy || !AllocTy->isSized())
766 return false;
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000767 const DataLayout &DL = IC.getDataLayout();
768 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
Hal Finkel847e05f2015-02-20 03:05:53 +0000769
770 // If there are more indices after the one we might replace with a zero, make
771 // sure they're all non-negative. If any of them are negative, the overall
772 // address being computed might be before the base address determined by the
773 // first non-zero index.
774 auto IsAllNonNegative = [&]() {
775 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
776 bool KnownNonNegative, KnownNegative;
777 IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative,
778 KnownNegative, 0, MemI);
779 if (KnownNonNegative)
780 continue;
781 return false;
782 }
783
784 return true;
785 };
786
787 // FIXME: If the GEP is not inbounds, and there are extra indices after the
788 // one we'll replace, those could cause the address computation to wrap
789 // (rendering the IsAllNonNegative() check below insufficient). We can do
Bruce Mitchenere9ffb452015-09-12 01:17:08 +0000790 // better, ignoring zero indices (and other indices we can prove small
Hal Finkel847e05f2015-02-20 03:05:53 +0000791 // enough not to wrap).
792 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
793 return false;
794
795 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
796 // also known to be dereferenceable.
797 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
798 IsAllNonNegative();
799}
800
801// If we're indexing into an object with a variable index for the memory
802// access, but the object has only one element, we can assume that the index
803// will always be zero. If we replace the GEP, return it.
804template <typename T>
805static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
806 T &MemI) {
807 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
808 unsigned Idx;
809 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
810 Instruction *NewGEPI = GEPI->clone();
811 NewGEPI->setOperand(Idx,
812 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
813 NewGEPI->insertBefore(GEPI);
814 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
815 return NewGEPI;
816 }
817 }
818
819 return nullptr;
820}
821
Chris Lattnera65e2f72010-01-05 05:57:49 +0000822Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
823 Value *Op = LI.getOperand(0);
824
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000825 // Try to canonicalize the loaded type.
826 if (Instruction *Res = combineLoadToOperationType(*this, LI))
827 return Res;
828
Chris Lattnera65e2f72010-01-05 05:57:49 +0000829 // Attempt to improve the alignment.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000830 unsigned KnownAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +0000831 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT);
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000832 unsigned LoadAlign = LI.getAlignment();
833 unsigned EffectiveLoadAlign =
834 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
Dan Gohman36196602010-08-03 18:20:32 +0000835
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000836 if (KnownAlign > EffectiveLoadAlign)
837 LI.setAlignment(KnownAlign);
838 else if (LoadAlign == 0)
839 LI.setAlignment(EffectiveLoadAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000840
Hal Finkel847e05f2015-02-20 03:05:53 +0000841 // Replace GEP indices if possible.
842 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
843 Worklist.Add(NewGEPI);
844 return &LI;
845 }
846
Mehdi Amini2668a482015-05-07 05:52:40 +0000847 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
848 return Res;
849
Chris Lattnera65e2f72010-01-05 05:57:49 +0000850 // Do really simple store-to-load forwarding and load CSE, to catch cases
Duncan Sands75b5d272011-02-15 09:23:02 +0000851 // where there are several consecutive memory accesses to the same location,
Chris Lattnera65e2f72010-01-05 05:57:49 +0000852 // separated by a few arithmetic operations.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +0000853 BasicBlock::iterator BBI(LI);
Eli Friedmanbd254a62016-06-16 02:33:42 +0000854 bool IsLoadCSE = false;
Sanjay Patelb38ad88e2017-01-02 23:25:28 +0000855 if (Value *AvailableVal = FindAvailableLoadedValue(
856 &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) {
857 if (IsLoadCSE)
858 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI);
Bjorn Steinbrinka91fd092015-07-10 06:55:44 +0000859
Sanjay Patel4b198802016-02-01 22:23:39 +0000860 return replaceInstUsesWith(
Chandler Carruth1a3c2c42014-11-25 08:20:27 +0000861 LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
862 LI.getName() + ".cast"));
Bjorn Steinbrinka91fd092015-07-10 06:55:44 +0000863 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000864
Philip Reames3ac07182016-04-21 17:45:05 +0000865 // None of the following transforms are legal for volatile/ordered atomic
866 // loads. Most of them do apply for unordered atomics.
867 if (!LI.isUnordered()) return nullptr;
Philip Reamesac550902016-04-21 17:03:33 +0000868
Chris Lattnera65e2f72010-01-05 05:57:49 +0000869 // load(gep null, ...) -> unreachable
870 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
871 const Value *GEPI0 = GEPI->getOperand(0);
872 // TODO: Consider a target hook for valid address spaces for this xform.
873 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
874 // Insert a new store to null instruction before the load to indicate
875 // that this code is not reachable. We do this instead of inserting
876 // an unreachable instruction directly because we cannot modify the
877 // CFG.
878 new StoreInst(UndefValue::get(LI.getType()),
879 Constant::getNullValue(Op->getType()), &LI);
Sanjay Patel4b198802016-02-01 22:23:39 +0000880 return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
Chris Lattnera65e2f72010-01-05 05:57:49 +0000881 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +0000882 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000883
884 // load null/undef -> unreachable
885 // TODO: Consider a target hook for valid address spaces for this xform.
886 if (isa<UndefValue>(Op) ||
887 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
888 // Insert a new store to null instruction before the load to indicate that
889 // this code is not reachable. We do this instead of inserting an
890 // unreachable instruction directly because we cannot modify the CFG.
891 new StoreInst(UndefValue::get(LI.getType()),
892 Constant::getNullValue(Op->getType()), &LI);
Sanjay Patel4b198802016-02-01 22:23:39 +0000893 return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
Chris Lattnera65e2f72010-01-05 05:57:49 +0000894 }
895
Chris Lattnera65e2f72010-01-05 05:57:49 +0000896 if (Op->hasOneUse()) {
897 // Change select and PHI nodes to select values instead of addresses: this
898 // helps alias analysis out a lot, allows many others simplifications, and
899 // exposes redundancy in the code.
900 //
901 // Note that we cannot do the transformation unless we know that the
902 // introduced loads cannot trap! Something like this is valid as long as
903 // the condition is always false: load (select bool %C, int* null, int* %G),
904 // but it would not be valid if we transformed it to load from null
905 // unconditionally.
906 //
907 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
908 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
Bob Wilson56600a12010-01-30 04:42:39 +0000909 unsigned Align = LI.getAlignment();
Artur Pilipenko9bb6bea2016-04-27 11:00:48 +0000910 if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
911 isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
Bob Wilson4b71b6c2010-01-30 00:41:10 +0000912 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
Bob Wilson56600a12010-01-30 04:42:39 +0000913 SI->getOperand(1)->getName()+".val");
Bob Wilson4b71b6c2010-01-30 00:41:10 +0000914 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
Bob Wilson56600a12010-01-30 04:42:39 +0000915 SI->getOperand(2)->getName()+".val");
Philip Reamesa98c7ea2016-04-21 17:59:40 +0000916 assert(LI.isUnordered() && "implied by above");
Bob Wilson56600a12010-01-30 04:42:39 +0000917 V1->setAlignment(Align);
Philip Reamesa98c7ea2016-04-21 17:59:40 +0000918 V1->setAtomic(LI.getOrdering(), LI.getSynchScope());
Bob Wilson56600a12010-01-30 04:42:39 +0000919 V2->setAlignment(Align);
Philip Reamesa98c7ea2016-04-21 17:59:40 +0000920 V2->setAtomic(LI.getOrdering(), LI.getSynchScope());
Chris Lattnera65e2f72010-01-05 05:57:49 +0000921 return SelectInst::Create(SI->getCondition(), V1, V2);
922 }
923
924 // load (select (cond, null, P)) -> load P
Larisse Voufo532bf712015-09-18 19:14:35 +0000925 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
Philip Reames5ad26c32014-12-29 22:46:21 +0000926 LI.getPointerAddressSpace() == 0) {
927 LI.setOperand(0, SI->getOperand(2));
928 return &LI;
929 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000930
931 // load (select (cond, P, null)) -> load P
Philip Reames5ad26c32014-12-29 22:46:21 +0000932 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
933 LI.getPointerAddressSpace() == 0) {
934 LI.setOperand(0, SI->getOperand(1));
935 return &LI;
936 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000937 }
938 }
Craig Topperf40110f2014-04-25 05:29:35 +0000939 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000940}
941
Arch D. Robisonbe0490a2016-04-25 22:22:39 +0000942/// \brief Look for extractelement/insertvalue sequence that acts like a bitcast.
943///
944/// \returns underlying value that was "cast", or nullptr otherwise.
945///
946/// For example, if we have:
947///
948/// %E0 = extractelement <2 x double> %U, i32 0
949/// %V0 = insertvalue [2 x double] undef, double %E0, 0
950/// %E1 = extractelement <2 x double> %U, i32 1
951/// %V1 = insertvalue [2 x double] %V0, double %E1, 1
952///
953/// and the layout of a <2 x double> is isomorphic to a [2 x double],
954/// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
955/// Note that %U may contain non-undef values where %V1 has undef.
956static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
957 Value *U = nullptr;
958 while (auto *IV = dyn_cast<InsertValueInst>(V)) {
959 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
960 if (!E)
961 return nullptr;
962 auto *W = E->getVectorOperand();
963 if (!U)
964 U = W;
965 else if (U != W)
966 return nullptr;
967 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
968 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
969 return nullptr;
970 V = IV->getAggregateOperand();
971 }
972 if (!isa<UndefValue>(V) ||!U)
973 return nullptr;
974
975 auto *UT = cast<VectorType>(U->getType());
976 auto *VT = V->getType();
977 // Check that types UT and VT are bitwise isomorphic.
978 const auto &DL = IC.getDataLayout();
979 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
980 return nullptr;
981 }
982 if (auto *AT = dyn_cast<ArrayType>(VT)) {
983 if (AT->getNumElements() != UT->getNumElements())
984 return nullptr;
985 } else {
986 auto *ST = cast<StructType>(VT);
987 if (ST->getNumElements() != UT->getNumElements())
988 return nullptr;
989 for (const auto *EltT : ST->elements()) {
990 if (EltT != UT->getElementType())
991 return nullptr;
992 }
993 }
994 return U;
995}
996
Chandler Carruth816d26f2014-11-25 10:09:51 +0000997/// \brief Combine stores to match the type of value being stored.
998///
999/// The core idea here is that the memory does not have any intrinsic type and
1000/// where we can we should match the type of a store to the type of value being
1001/// stored.
1002///
1003/// However, this routine must never change the width of a store or the number of
1004/// stores as that would introduce a semantic change. This combine is expected to
1005/// be a semantic no-op which just allows stores to more closely model the types
1006/// of their incoming values.
1007///
1008/// Currently, we also refuse to change the precise type used for an atomic or
1009/// volatile store. This is debatable, and might be reasonable to change later.
1010/// However, it is risky in case some backend or other part of LLVM is relying
1011/// on the exact type stored to select appropriate atomic operations.
1012///
1013/// \returns true if the store was successfully combined away. This indicates
1014/// the caller must erase the store instruction. We have to let the caller erase
Bruce Mitchenere9ffb452015-09-12 01:17:08 +00001015/// the store instruction as otherwise there is no way to signal whether it was
Chandler Carruth816d26f2014-11-25 10:09:51 +00001016/// combined or not: IC.EraseInstFromFunction returns a null pointer.
1017static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
Philip Reames6f4d0082016-05-06 22:17:01 +00001018 // FIXME: We could probably with some care handle both volatile and ordered
1019 // atomic stores here but it isn't clear that this is important.
1020 if (!SI.isUnordered())
Chandler Carruth816d26f2014-11-25 10:09:51 +00001021 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001022
Arnold Schwaighofer5d335552016-09-10 18:14:57 +00001023 // swifterror values can't be bitcasted.
1024 if (SI.getPointerOperand()->isSwiftError())
1025 return false;
1026
Chandler Carruth816d26f2014-11-25 10:09:51 +00001027 Value *V = SI.getValueOperand();
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001028
Chandler Carruth816d26f2014-11-25 10:09:51 +00001029 // Fold away bit casts of the stored value by storing the original type.
1030 if (auto *BC = dyn_cast<BitCastInst>(V)) {
Chandler Carrutha7f247e2014-12-09 19:21:16 +00001031 V = BC->getOperand(0);
Philip Reames89e92d22016-12-01 20:17:06 +00001032 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1033 combineStoreToNewValue(IC, SI, V);
1034 return true;
1035 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001036 }
1037
Philip Reames89e92d22016-12-01 20:17:06 +00001038 if (Value *U = likeBitCastFromVector(IC, V))
1039 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1040 combineStoreToNewValue(IC, SI, U);
1041 return true;
1042 }
Arch D. Robisonbe0490a2016-04-25 22:22:39 +00001043
JF Bastienc22d2992016-04-21 19:53:39 +00001044 // FIXME: We should also canonicalize stores of vectors when their elements
1045 // are cast to other types.
Chandler Carruth816d26f2014-11-25 10:09:51 +00001046 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001047}
1048
Mehdi Aminib344ac92015-03-14 22:19:33 +00001049static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
1050 // FIXME: We could probably with some care handle both volatile and atomic
1051 // stores here but it isn't clear that this is important.
1052 if (!SI.isSimple())
1053 return false;
1054
1055 Value *V = SI.getValueOperand();
1056 Type *T = V->getType();
1057
1058 if (!T->isAggregateType())
1059 return false;
1060
Mehdi Amini2668a482015-05-07 05:52:40 +00001061 if (auto *ST = dyn_cast<StructType>(T)) {
Mehdi Aminib344ac92015-03-14 22:19:33 +00001062 // If the struct only have one element, we unpack.
Mehdi Amini1c131b32015-12-15 01:44:07 +00001063 unsigned Count = ST->getNumElements();
1064 if (Count == 1) {
Mehdi Aminib344ac92015-03-14 22:19:33 +00001065 V = IC.Builder->CreateExtractValue(V, 0);
1066 combineStoreToNewValue(IC, SI, V);
1067 return true;
1068 }
Mehdi Amini1c131b32015-12-15 01:44:07 +00001069
1070 // We don't want to break loads with padding here as we'd loose
1071 // the knowledge that padding exists for the rest of the pipeline.
1072 const DataLayout &DL = IC.getDataLayout();
1073 auto *SL = DL.getStructLayout(ST);
1074 if (SL->hasPadding())
1075 return false;
1076
Amaury Sechet61a7d622016-02-17 19:21:28 +00001077 auto Align = SI.getAlignment();
1078 if (!Align)
1079 Align = DL.getABITypeAlignment(ST);
1080
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +00001081 SmallString<16> EltName = V->getName();
1082 EltName += ".elt";
Mehdi Amini1c131b32015-12-15 01:44:07 +00001083 auto *Addr = SI.getPointerOperand();
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +00001084 SmallString<16> AddrName = Addr->getName();
1085 AddrName += ".repack";
Amaury Sechet61a7d622016-02-17 19:21:28 +00001086
Mehdi Amini1c131b32015-12-15 01:44:07 +00001087 auto *IdxType = Type::getInt32Ty(ST->getContext());
1088 auto *Zero = ConstantInt::get(IdxType, 0);
1089 for (unsigned i = 0; i < Count; i++) {
1090 Value *Indices[2] = {
1091 Zero,
1092 ConstantInt::get(IdxType, i),
1093 };
Amaury Sechetda71cb72016-02-17 21:21:29 +00001094 auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1095 AddrName);
Mehdi Amini1c131b32015-12-15 01:44:07 +00001096 auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
Amaury Sechet61a7d622016-02-17 19:21:28 +00001097 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
1098 IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
Mehdi Amini1c131b32015-12-15 01:44:07 +00001099 }
1100
1101 return true;
Mehdi Aminib344ac92015-03-14 22:19:33 +00001102 }
1103
David Majnemer75364602015-05-11 05:04:27 +00001104 if (auto *AT = dyn_cast<ArrayType>(T)) {
1105 // If the array only have one element, we unpack.
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001106 auto NumElements = AT->getNumElements();
1107 if (NumElements == 1) {
David Majnemer75364602015-05-11 05:04:27 +00001108 V = IC.Builder->CreateExtractValue(V, 0);
1109 combineStoreToNewValue(IC, SI, V);
1110 return true;
1111 }
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001112
Davide Italianof6988d22016-10-07 21:53:09 +00001113 // Bail out if the array is too large. Ideally we would like to optimize
1114 // arrays of arbitrary size but this has a terrible impact on compile time.
1115 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1116 // tuning.
1117 if (NumElements > 1024)
1118 return false;
1119
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001120 const DataLayout &DL = IC.getDataLayout();
1121 auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1122 auto Align = SI.getAlignment();
1123 if (!Align)
1124 Align = DL.getABITypeAlignment(T);
1125
1126 SmallString<16> EltName = V->getName();
1127 EltName += ".elt";
1128 auto *Addr = SI.getPointerOperand();
1129 SmallString<16> AddrName = Addr->getName();
1130 AddrName += ".repack";
1131
1132 auto *IdxType = Type::getInt64Ty(T->getContext());
1133 auto *Zero = ConstantInt::get(IdxType, 0);
1134
1135 uint64_t Offset = 0;
1136 for (uint64_t i = 0; i < NumElements; i++) {
1137 Value *Indices[2] = {
1138 Zero,
1139 ConstantInt::get(IdxType, i),
1140 };
1141 auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1142 AddrName);
1143 auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
1144 auto EltAlign = MinAlign(Align, Offset);
1145 IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
1146 Offset += EltSize;
1147 }
1148
1149 return true;
David Majnemer75364602015-05-11 05:04:27 +00001150 }
1151
Mehdi Aminib344ac92015-03-14 22:19:33 +00001152 return false;
1153}
1154
Chris Lattnera65e2f72010-01-05 05:57:49 +00001155/// equivalentAddressValues - Test if A and B will obviously have the same
1156/// value. This includes recognizing that %t0 and %t1 will have the same
1157/// value in code like this:
1158/// %t0 = getelementptr \@a, 0, 3
1159/// store i32 0, i32* %t0
1160/// %t1 = getelementptr \@a, 0, 3
1161/// %t2 = load i32* %t1
1162///
1163static bool equivalentAddressValues(Value *A, Value *B) {
1164 // Test if the values are trivially equivalent.
1165 if (A == B) return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001166
Chris Lattnera65e2f72010-01-05 05:57:49 +00001167 // Test if the values come form identical arithmetic instructions.
1168 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1169 // its only used to compare two uses within the same basic block, which
1170 // means that they'll always either have the same value or one of them
1171 // will have an undefined value.
1172 if (isa<BinaryOperator>(A) ||
1173 isa<CastInst>(A) ||
1174 isa<PHINode>(A) ||
1175 isa<GetElementPtrInst>(A))
1176 if (Instruction *BI = dyn_cast<Instruction>(B))
1177 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1178 return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001179
Chris Lattnera65e2f72010-01-05 05:57:49 +00001180 // Otherwise they may not be equivalent.
1181 return false;
1182}
1183
Chris Lattnera65e2f72010-01-05 05:57:49 +00001184Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
1185 Value *Val = SI.getOperand(0);
1186 Value *Ptr = SI.getOperand(1);
1187
Chandler Carruth816d26f2014-11-25 10:09:51 +00001188 // Try to canonicalize the stored type.
1189 if (combineStoreToValueType(*this, SI))
Sanjay Patel4b198802016-02-01 22:23:39 +00001190 return eraseInstFromFunction(SI);
Chandler Carruth816d26f2014-11-25 10:09:51 +00001191
Chris Lattnera65e2f72010-01-05 05:57:49 +00001192 // Attempt to improve the alignment.
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001193 unsigned KnownAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001194 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT);
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001195 unsigned StoreAlign = SI.getAlignment();
1196 unsigned EffectiveStoreAlign =
1197 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
Dan Gohman36196602010-08-03 18:20:32 +00001198
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001199 if (KnownAlign > EffectiveStoreAlign)
1200 SI.setAlignment(KnownAlign);
1201 else if (StoreAlign == 0)
1202 SI.setAlignment(EffectiveStoreAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001203
Mehdi Aminib344ac92015-03-14 22:19:33 +00001204 // Try to canonicalize the stored type.
1205 if (unpackStoreToAggregate(*this, SI))
Sanjay Patel4b198802016-02-01 22:23:39 +00001206 return eraseInstFromFunction(SI);
Mehdi Aminib344ac92015-03-14 22:19:33 +00001207
Hal Finkel847e05f2015-02-20 03:05:53 +00001208 // Replace GEP indices if possible.
1209 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1210 Worklist.Add(NewGEPI);
1211 return &SI;
1212 }
1213
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001214 // Don't hack volatile/ordered stores.
1215 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1216 if (!SI.isUnordered()) return nullptr;
Eli Friedman8bc586e2011-08-15 22:09:40 +00001217
1218 // If the RHS is an alloca with a single use, zapify the store, making the
1219 // alloca dead.
1220 if (Ptr->hasOneUse()) {
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001221 if (isa<AllocaInst>(Ptr))
Sanjay Patel4b198802016-02-01 22:23:39 +00001222 return eraseInstFromFunction(SI);
Eli Friedman8bc586e2011-08-15 22:09:40 +00001223 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1224 if (isa<AllocaInst>(GEP->getOperand(0))) {
1225 if (GEP->getOperand(0)->hasOneUse())
Sanjay Patel4b198802016-02-01 22:23:39 +00001226 return eraseInstFromFunction(SI);
Eli Friedman8bc586e2011-08-15 22:09:40 +00001227 }
1228 }
1229 }
1230
Chris Lattnera65e2f72010-01-05 05:57:49 +00001231 // Do really simple DSE, to catch cases where there are several consecutive
1232 // stores to the same location, separated by a few arithmetic operations. This
1233 // situation often occurs with bitfield accesses.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001234 BasicBlock::iterator BBI(SI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001235 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1236 --ScanInsts) {
1237 --BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001238 // Don't count debug info directives, lest they affect codegen,
1239 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1240 if (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001241 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001242 ScanInsts++;
1243 continue;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001244 }
1245
Chris Lattnera65e2f72010-01-05 05:57:49 +00001246 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1247 // Prev store isn't volatile, and stores to the same location?
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001248 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
Eli Friedman8bc586e2011-08-15 22:09:40 +00001249 SI.getOperand(1))) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001250 ++NumDeadStore;
1251 ++BBI;
Sanjay Patel4b198802016-02-01 22:23:39 +00001252 eraseInstFromFunction(*PrevSI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001253 continue;
1254 }
1255 break;
1256 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001257
Chris Lattnera65e2f72010-01-05 05:57:49 +00001258 // If this is a load, we have to stop. However, if the loaded value is from
1259 // the pointer we're loading and is producing the pointer we're storing,
1260 // then *this* store is dead (X = load P; store X -> P).
1261 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001262 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1263 assert(SI.isUnordered() && "can't eliminate ordering operation");
Sanjay Patel4b198802016-02-01 22:23:39 +00001264 return eraseInstFromFunction(SI);
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001265 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001266
Chris Lattnera65e2f72010-01-05 05:57:49 +00001267 // Otherwise, this is a load from some other location. Stores before it
1268 // may not be dead.
1269 break;
1270 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001271
Sanjoy Das679bc322017-01-17 05:45:09 +00001272 // Don't skip over loads, throws or things that can modify memory.
1273 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001274 break;
1275 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001276
1277 // store X, null -> turns into 'unreachable' in SimplifyCFG
1278 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
1279 if (!isa<UndefValue>(Val)) {
1280 SI.setOperand(0, UndefValue::get(Val->getType()));
1281 if (Instruction *U = dyn_cast<Instruction>(Val))
1282 Worklist.Add(U); // Dropped a use.
1283 }
Craig Topperf40110f2014-04-25 05:29:35 +00001284 return nullptr; // Do not modify these!
Chris Lattnera65e2f72010-01-05 05:57:49 +00001285 }
1286
1287 // store undef, Ptr -> noop
1288 if (isa<UndefValue>(Val))
Sanjay Patel4b198802016-02-01 22:23:39 +00001289 return eraseInstFromFunction(SI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001290
Chris Lattnera65e2f72010-01-05 05:57:49 +00001291 // If this store is the last instruction in the basic block (possibly
Victor Hernandez5f5abd52010-01-21 23:07:15 +00001292 // excepting debug info instructions), and if the block ends with an
1293 // unconditional branch, try to move it to the successor block.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001294 BBI = SI.getIterator();
Chris Lattnera65e2f72010-01-05 05:57:49 +00001295 do {
1296 ++BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001297 } while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001298 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
Chris Lattnera65e2f72010-01-05 05:57:49 +00001299 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
1300 if (BI->isUnconditional())
1301 if (SimplifyStoreAtEndOfBlock(SI))
Craig Topperf40110f2014-04-25 05:29:35 +00001302 return nullptr; // xform done!
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001303
Craig Topperf40110f2014-04-25 05:29:35 +00001304 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001305}
1306
1307/// SimplifyStoreAtEndOfBlock - Turn things like:
1308/// if () { *P = v1; } else { *P = v2 }
1309/// into a phi node with a store in the successor.
1310///
1311/// Simplify things like:
1312/// *P = v1; if () { *P = v2; }
1313/// into a phi node with a store in the successor.
1314///
1315bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
Philip Reames5f0e3692016-04-22 20:53:32 +00001316 assert(SI.isUnordered() &&
1317 "this code has not been auditted for volatile or ordered store case");
Justin Bognerc7e4fbe2016-08-05 01:09:48 +00001318
Chris Lattnera65e2f72010-01-05 05:57:49 +00001319 BasicBlock *StoreBB = SI.getParent();
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001320
Chris Lattnera65e2f72010-01-05 05:57:49 +00001321 // Check to see if the successor block has exactly two incoming edges. If
1322 // so, see if the other predecessor contains a store to the same location.
1323 // if so, insert a PHI node (if needed) and move the stores down.
1324 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001325
Chris Lattnera65e2f72010-01-05 05:57:49 +00001326 // Determine whether Dest has exactly two predecessors and, if so, compute
1327 // the other predecessor.
1328 pred_iterator PI = pred_begin(DestBB);
Gabor Greif1b787df2010-07-12 15:48:26 +00001329 BasicBlock *P = *PI;
Craig Topperf40110f2014-04-25 05:29:35 +00001330 BasicBlock *OtherBB = nullptr;
Gabor Greif1b787df2010-07-12 15:48:26 +00001331
1332 if (P != StoreBB)
1333 OtherBB = P;
1334
1335 if (++PI == pred_end(DestBB))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001336 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001337
Gabor Greif1b787df2010-07-12 15:48:26 +00001338 P = *PI;
1339 if (P != StoreBB) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001340 if (OtherBB)
1341 return false;
Gabor Greif1b787df2010-07-12 15:48:26 +00001342 OtherBB = P;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001343 }
1344 if (++PI != pred_end(DestBB))
1345 return false;
1346
1347 // Bail out if all the relevant blocks aren't distinct (this can happen,
1348 // for example, if SI is in an infinite loop)
1349 if (StoreBB == DestBB || OtherBB == DestBB)
1350 return false;
1351
1352 // Verify that the other block ends in a branch and is not otherwise empty.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001353 BasicBlock::iterator BBI(OtherBB->getTerminator());
Chris Lattnera65e2f72010-01-05 05:57:49 +00001354 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1355 if (!OtherBr || BBI == OtherBB->begin())
1356 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001357
Chris Lattnera65e2f72010-01-05 05:57:49 +00001358 // If the other block ends in an unconditional branch, check for the 'if then
1359 // else' case. there is an instruction before the branch.
Craig Topperf40110f2014-04-25 05:29:35 +00001360 StoreInst *OtherStore = nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001361 if (OtherBr->isUnconditional()) {
1362 --BBI;
1363 // Skip over debugging info.
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001364 while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001365 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001366 if (BBI==OtherBB->begin())
1367 return false;
1368 --BBI;
1369 }
Eli Friedman8bc586e2011-08-15 22:09:40 +00001370 // If this isn't a store, isn't a store to the same location, or is not the
1371 // right kind of store, bail out.
Chris Lattnera65e2f72010-01-05 05:57:49 +00001372 OtherStore = dyn_cast<StoreInst>(BBI);
1373 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001374 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001375 return false;
1376 } else {
1377 // Otherwise, the other block ended with a conditional branch. If one of the
1378 // destinations is StoreBB, then we have the if/then case.
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001379 if (OtherBr->getSuccessor(0) != StoreBB &&
Chris Lattnera65e2f72010-01-05 05:57:49 +00001380 OtherBr->getSuccessor(1) != StoreBB)
1381 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001382
Chris Lattnera65e2f72010-01-05 05:57:49 +00001383 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1384 // if/then triangle. See if there is a store to the same ptr as SI that
1385 // lives in OtherBB.
1386 for (;; --BBI) {
1387 // Check to see if we find the matching store.
1388 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1389 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001390 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001391 return false;
1392 break;
1393 }
1394 // If we find something that may be using or overwriting the stored
1395 // value, or if we run out of instructions, we can't do the xform.
Sanjoy Das679bc322017-01-17 05:45:09 +00001396 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1397 BBI->mayWriteToMemory() || BBI == OtherBB->begin())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001398 return false;
1399 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001400
Chris Lattnera65e2f72010-01-05 05:57:49 +00001401 // In order to eliminate the store in OtherBr, we have to
1402 // make sure nothing reads or overwrites the stored value in
1403 // StoreBB.
1404 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1405 // FIXME: This should really be AA driven.
Sanjoy Das679bc322017-01-17 05:45:09 +00001406 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001407 return false;
1408 }
1409 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001410
Chris Lattnera65e2f72010-01-05 05:57:49 +00001411 // Insert a PHI node now if we need it.
1412 Value *MergedVal = OtherStore->getOperand(0);
1413 if (MergedVal != SI.getOperand(0)) {
Jay Foad52131342011-03-30 11:28:46 +00001414 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
Chris Lattnera65e2f72010-01-05 05:57:49 +00001415 PN->addIncoming(SI.getOperand(0), SI.getParent());
1416 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1417 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1418 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001419
Chris Lattnera65e2f72010-01-05 05:57:49 +00001420 // Advance to a place where it is safe to insert the new store and
1421 // insert it.
Bill Wendling8ddfc092011-08-16 20:45:24 +00001422 BBI = DestBB->getFirstInsertionPt();
Eli Friedman35211c62011-05-27 00:19:40 +00001423 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
Eli Friedman8bc586e2011-08-15 22:09:40 +00001424 SI.isVolatile(),
1425 SI.getAlignment(),
1426 SI.getOrdering(),
1427 SI.getSynchScope());
Eli Friedman35211c62011-05-27 00:19:40 +00001428 InsertNewInstBefore(NewSI, *BBI);
Paul Robinson383c5c22017-02-06 22:19:04 +00001429 // The debug locations of the original instructions might differ; merge them.
1430 NewSI->setDebugLoc(DILocation::getMergedLocation(SI.getDebugLoc(),
1431 OtherStore->getDebugLoc()));
Eli Friedman35211c62011-05-27 00:19:40 +00001432
Hal Finkelcc39b672014-07-24 12:16:19 +00001433 // If the two stores had AA tags, merge them.
1434 AAMDNodes AATags;
1435 SI.getAAMetadata(AATags);
1436 if (AATags) {
1437 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1438 NewSI->setAAMetadata(AATags);
1439 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001440
Chris Lattnera65e2f72010-01-05 05:57:49 +00001441 // Nuke the old stores.
Sanjay Patel4b198802016-02-01 22:23:39 +00001442 eraseInstFromFunction(SI);
1443 eraseInstFromFunction(*OtherStore);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001444 return true;
1445}