blob: ee30aa44674304401988dacf58eadce241ee757e [file] [log] [blame]
Chris Lattnera65e2f72010-01-05 05:57:49 +00001//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visit functions for load, store and alloca.
11//
12//===----------------------------------------------------------------------===//
13
Chandler Carrutha9174582015-01-22 05:25:13 +000014#include "InstCombineInternal.h"
Yaxun Liuba01ed02017-02-10 21:46:07 +000015#include "llvm/ADT/MapVector.h"
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +000016#include "llvm/ADT/SmallString.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000017#include "llvm/ADT/Statistic.h"
Dan Gohman826bdf82010-05-28 16:19:17 +000018#include "llvm/Analysis/Loads.h"
Peter Collingbourneecdd58f2016-10-21 19:59:26 +000019#include "llvm/IR/ConstantRange.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000020#include "llvm/IR/DataLayout.h"
Paul Robinson383c5c22017-02-06 22:19:04 +000021#include "llvm/IR/DebugInfo.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000022#include "llvm/IR/IntrinsicInst.h"
Yaxun Liuba01ed02017-02-10 21:46:07 +000023#include "llvm/IR/LLVMContext.h"
Charles Davis33d1dc02015-02-25 05:10:25 +000024#include "llvm/IR/MDBuilder.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000025#include "llvm/Transforms/Utils/BasicBlockUtils.h"
26#include "llvm/Transforms/Utils/Local.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000027using namespace llvm;
28
Chandler Carruth964daaa2014-04-22 02:55:47 +000029#define DEBUG_TYPE "instcombine"
30
Chandler Carruthc908ca12012-08-21 08:39:44 +000031STATISTIC(NumDeadStore, "Number of dead stores eliminated");
32STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
33
34/// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
35/// some part of a constant global variable. This intentionally only accepts
36/// constant expressions because we can't rewrite arbitrary instructions.
37static bool pointsToConstantGlobal(Value *V) {
38 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
39 return GV->isConstant();
Matt Arsenault607281772014-04-24 00:01:09 +000040
41 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000042 if (CE->getOpcode() == Instruction::BitCast ||
Matt Arsenault607281772014-04-24 00:01:09 +000043 CE->getOpcode() == Instruction::AddrSpaceCast ||
Chandler Carruthc908ca12012-08-21 08:39:44 +000044 CE->getOpcode() == Instruction::GetElementPtr)
45 return pointsToConstantGlobal(CE->getOperand(0));
Matt Arsenault607281772014-04-24 00:01:09 +000046 }
Chandler Carruthc908ca12012-08-21 08:39:44 +000047 return false;
48}
49
50/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
51/// pointer to an alloca. Ignore any reads of the pointer, return false if we
52/// see any stores or other unknown uses. If we see pointer arithmetic, keep
53/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
54/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
55/// the alloca, and if the source pointer is a pointer to a constant global, we
56/// can optimize this.
57static bool
58isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
Reid Kleckner813dab22014-07-01 21:36:20 +000059 SmallVectorImpl<Instruction *> &ToDelete) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000060 // We track lifetime intrinsics as we encounter them. If we decide to go
61 // ahead and replace the value with the global, this lets the caller quickly
62 // eliminate the markers.
63
Reid Kleckner813dab22014-07-01 21:36:20 +000064 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
David Majnemer0a16c222016-08-11 21:15:00 +000065 ValuesToInspect.emplace_back(V, false);
Reid Kleckner813dab22014-07-01 21:36:20 +000066 while (!ValuesToInspect.empty()) {
67 auto ValuePair = ValuesToInspect.pop_back_val();
68 const bool IsOffset = ValuePair.second;
69 for (auto &U : ValuePair.first->uses()) {
David Majnemer0a16c222016-08-11 21:15:00 +000070 auto *I = cast<Instruction>(U.getUser());
Chandler Carruthc908ca12012-08-21 08:39:44 +000071
David Majnemer0a16c222016-08-11 21:15:00 +000072 if (auto *LI = dyn_cast<LoadInst>(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000073 // Ignore non-volatile loads, they are always ok.
74 if (!LI->isSimple()) return false;
Chandler Carruthc908ca12012-08-21 08:39:44 +000075 continue;
76 }
Reid Kleckner813dab22014-07-01 21:36:20 +000077
78 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
79 // If uses of the bitcast are ok, we are ok.
David Majnemer0a16c222016-08-11 21:15:00 +000080 ValuesToInspect.emplace_back(I, IsOffset);
Reid Kleckner813dab22014-07-01 21:36:20 +000081 continue;
82 }
David Majnemer0a16c222016-08-11 21:15:00 +000083 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000084 // If the GEP has all zero indices, it doesn't offset the pointer. If it
85 // doesn't, it does.
David Majnemer0a16c222016-08-11 21:15:00 +000086 ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
Reid Kleckner813dab22014-07-01 21:36:20 +000087 continue;
88 }
89
Benjamin Kramer3a09ef62015-04-10 14:50:08 +000090 if (auto CS = CallSite(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000091 // If this is the function being called then we treat it like a load and
92 // ignore it.
93 if (CS.isCallee(&U))
94 continue;
95
David Majnemer02f47872015-12-23 09:58:41 +000096 unsigned DataOpNo = CS.getDataOperandNo(&U);
97 bool IsArgOperand = CS.isArgOperand(&U);
98
Reid Kleckner813dab22014-07-01 21:36:20 +000099 // Inalloca arguments are clobbered by the call.
David Majnemer02f47872015-12-23 09:58:41 +0000100 if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
Reid Kleckner813dab22014-07-01 21:36:20 +0000101 return false;
102
103 // If this is a readonly/readnone call site, then we know it is just a
104 // load (but one that potentially returns the value itself), so we can
105 // ignore it if we know that the value isn't captured.
106 if (CS.onlyReadsMemory() &&
David Majnemer02f47872015-12-23 09:58:41 +0000107 (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
Reid Kleckner813dab22014-07-01 21:36:20 +0000108 continue;
109
110 // If this is being passed as a byval argument, the caller is making a
111 // copy, so it is only a read of the alloca.
David Majnemer02f47872015-12-23 09:58:41 +0000112 if (IsArgOperand && CS.isByValArgument(DataOpNo))
Reid Kleckner813dab22014-07-01 21:36:20 +0000113 continue;
114 }
115
116 // Lifetime intrinsics can be handled by the caller.
117 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
118 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
119 II->getIntrinsicID() == Intrinsic::lifetime_end) {
120 assert(II->use_empty() && "Lifetime markers have no result to use!");
121 ToDelete.push_back(II);
122 continue;
123 }
124 }
125
126 // If this is isn't our memcpy/memmove, reject it as something we can't
127 // handle.
128 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
129 if (!MI)
130 return false;
131
132 // If the transfer is using the alloca as a source of the transfer, then
133 // ignore it since it is a load (unless the transfer is volatile).
134 if (U.getOperandNo() == 1) {
135 if (MI->isVolatile()) return false;
136 continue;
137 }
138
139 // If we already have seen a copy, reject the second one.
140 if (TheCopy) return false;
141
142 // If the pointer has been offset from the start of the alloca, we can't
143 // safely handle this.
144 if (IsOffset) return false;
145
146 // If the memintrinsic isn't using the alloca as the dest, reject it.
147 if (U.getOperandNo() != 0) return false;
148
149 // If the source of the memcpy/move is not a constant global, reject it.
150 if (!pointsToConstantGlobal(MI->getSource()))
151 return false;
152
153 // Otherwise, the transform is safe. Remember the copy instruction.
154 TheCopy = MI;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000155 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000156 }
157 return true;
158}
159
160/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
161/// modified by a copy from a constant global. If we can prove this, we can
162/// replace any uses of the alloca with uses of the global directly.
163static MemTransferInst *
164isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
165 SmallVectorImpl<Instruction *> &ToDelete) {
Craig Topperf40110f2014-04-25 05:29:35 +0000166 MemTransferInst *TheCopy = nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000167 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
168 return TheCopy;
Craig Topperf40110f2014-04-25 05:29:35 +0000169 return nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000170}
171
Vitaly Bukadf19ad42017-06-24 01:35:19 +0000172/// Returns true if V is dereferenceable for size of alloca.
173static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
174 const DataLayout &DL) {
175 if (AI->isArrayAllocation())
176 return false;
177 uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
178 if (!AllocaSize)
179 return false;
180 return isDereferenceableAndAlignedPointer(V, AI->getAlignment(),
181 APInt(64, AllocaSize), DL);
182}
183
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000184static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
Duncan P. N. Exon Smith720762e2015-03-13 19:30:44 +0000185 // Check for array size of 1 (scalar allocation).
Duncan P. N. Exon Smithbe95b4a2015-03-13 19:42:09 +0000186 if (!AI.isArrayAllocation()) {
187 // i32 1 is the canonical array size for scalar allocations.
188 if (AI.getArraySize()->getType()->isIntegerTy(32))
189 return nullptr;
190
191 // Canonicalize it.
192 Value *V = IC.Builder->getInt32(1);
193 AI.setOperand(0, V);
194 return &AI;
195 }
Duncan P. N. Exon Smith720762e2015-03-13 19:30:44 +0000196
Chris Lattnera65e2f72010-01-05 05:57:49 +0000197 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000198 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
199 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
200 AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName());
201 New->setAlignment(AI.getAlignment());
Chris Lattnera65e2f72010-01-05 05:57:49 +0000202
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000203 // Scan to the end of the allocation instructions, to skip over a block of
204 // allocas if possible...also skip interleaved debug info
205 //
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +0000206 BasicBlock::iterator It(New);
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000207 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
208 ++It;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000209
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000210 // Now that I is pointing to the first non-allocation-inst in the block,
211 // insert our getelementptr instruction...
212 //
213 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
214 Value *NullIdx = Constant::getNullValue(IdxTy);
215 Value *Idx[2] = {NullIdx, NullIdx};
216 Instruction *GEP =
Matt Arsenault640ff9d2013-08-14 00:24:05 +0000217 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000218 IC.InsertNewInstBefore(GEP, *It);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000219
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000220 // Now make everything use the getelementptr instead of the original
221 // allocation.
Sanjay Patel4b198802016-02-01 22:23:39 +0000222 return IC.replaceInstUsesWith(AI, GEP);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000223 }
224
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000225 if (isa<UndefValue>(AI.getArraySize()))
Sanjay Patel4b198802016-02-01 22:23:39 +0000226 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000227
Duncan P. N. Exon Smith07ff9b02015-03-13 19:34:55 +0000228 // Ensure that the alloca array size argument has type intptr_t, so that
229 // any casting is exposed early.
230 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
231 if (AI.getArraySize()->getType() != IntPtrTy) {
232 Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false);
233 AI.setOperand(0, V);
234 return &AI;
235 }
236
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000237 return nullptr;
238}
239
Benjamin Kramer03ab8a32017-02-10 22:26:35 +0000240namespace {
Yaxun Liuba01ed02017-02-10 21:46:07 +0000241// If I and V are pointers in different address space, it is not allowed to
242// use replaceAllUsesWith since I and V have different types. A
243// non-target-specific transformation should not use addrspacecast on V since
244// the two address space may be disjoint depending on target.
245//
246// This class chases down uses of the old pointer until reaching the load
247// instructions, then replaces the old pointer in the load instructions with
248// the new pointer. If during the chasing it sees bitcast or GEP, it will
249// create new bitcast or GEP with the new pointer and use them in the load
250// instruction.
251class PointerReplacer {
252public:
253 PointerReplacer(InstCombiner &IC) : IC(IC) {}
254 void replacePointer(Instruction &I, Value *V);
255
256private:
257 void findLoadAndReplace(Instruction &I);
258 void replace(Instruction *I);
259 Value *getReplacement(Value *I);
260
261 SmallVector<Instruction *, 4> Path;
262 MapVector<Value *, Value *> WorkMap;
263 InstCombiner &IC;
264};
Benjamin Kramer03ab8a32017-02-10 22:26:35 +0000265} // end anonymous namespace
Yaxun Liuba01ed02017-02-10 21:46:07 +0000266
267void PointerReplacer::findLoadAndReplace(Instruction &I) {
268 for (auto U : I.users()) {
269 auto *Inst = dyn_cast<Instruction>(&*U);
270 if (!Inst)
271 return;
272 DEBUG(dbgs() << "Found pointer user: " << *U << '\n');
273 if (isa<LoadInst>(Inst)) {
274 for (auto P : Path)
275 replace(P);
276 replace(Inst);
277 } else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) {
278 Path.push_back(Inst);
279 findLoadAndReplace(*Inst);
280 Path.pop_back();
281 } else {
282 return;
283 }
284 }
285}
286
287Value *PointerReplacer::getReplacement(Value *V) {
288 auto Loc = WorkMap.find(V);
289 if (Loc != WorkMap.end())
290 return Loc->second;
291 return nullptr;
292}
293
294void PointerReplacer::replace(Instruction *I) {
295 if (getReplacement(I))
296 return;
297
298 if (auto *LT = dyn_cast<LoadInst>(I)) {
299 auto *V = getReplacement(LT->getPointerOperand());
300 assert(V && "Operand not replaced");
301 auto *NewI = new LoadInst(V);
302 NewI->takeName(LT);
303 IC.InsertNewInstWith(NewI, *LT);
304 IC.replaceInstUsesWith(*LT, NewI);
305 WorkMap[LT] = NewI;
306 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
307 auto *V = getReplacement(GEP->getPointerOperand());
308 assert(V && "Operand not replaced");
309 SmallVector<Value *, 8> Indices;
310 Indices.append(GEP->idx_begin(), GEP->idx_end());
311 auto *NewI = GetElementPtrInst::Create(
312 V->getType()->getPointerElementType(), V, Indices);
313 IC.InsertNewInstWith(NewI, *GEP);
314 NewI->takeName(GEP);
315 WorkMap[GEP] = NewI;
316 } else if (auto *BC = dyn_cast<BitCastInst>(I)) {
317 auto *V = getReplacement(BC->getOperand(0));
318 assert(V && "Operand not replaced");
319 auto *NewT = PointerType::get(BC->getType()->getPointerElementType(),
320 V->getType()->getPointerAddressSpace());
321 auto *NewI = new BitCastInst(V, NewT);
322 IC.InsertNewInstWith(NewI, *BC);
323 NewI->takeName(BC);
Yaxun Liue6d1ce52017-02-24 20:27:25 +0000324 WorkMap[BC] = NewI;
Yaxun Liuba01ed02017-02-10 21:46:07 +0000325 } else {
326 llvm_unreachable("should never reach here");
327 }
328}
329
330void PointerReplacer::replacePointer(Instruction &I, Value *V) {
Benjamin Kramer684c87b2017-02-10 22:04:17 +0000331#ifndef NDEBUG
Yaxun Liuba01ed02017-02-10 21:46:07 +0000332 auto *PT = cast<PointerType>(I.getType());
333 auto *NT = cast<PointerType>(V->getType());
334 assert(PT != NT && PT->getElementType() == NT->getElementType() &&
335 "Invalid usage");
Benjamin Kramer684c87b2017-02-10 22:04:17 +0000336#endif
Yaxun Liuba01ed02017-02-10 21:46:07 +0000337 WorkMap[&I] = V;
338 findLoadAndReplace(I);
339}
340
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000341Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
342 if (auto *I = simplifyAllocaArraySize(*this, AI))
343 return I;
344
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000345 if (AI.getAllocatedType()->isSized()) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000346 // If the alignment is 0 (unspecified), assign it the preferred alignment.
347 if (AI.getAlignment() == 0)
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000348 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000349
350 // Move all alloca's of zero byte objects to the entry block and merge them
351 // together. Note that we only do this for alloca's, because malloc should
352 // allocate and return a unique pointer, even for a zero byte allocation.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000353 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000354 // For a zero sized alloca there is no point in doing an array allocation.
355 // This is helpful if the array size is a complicated expression not used
356 // elsewhere.
357 if (AI.isArrayAllocation()) {
358 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
359 return &AI;
360 }
361
362 // Get the first instruction in the entry block.
363 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
364 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
365 if (FirstInst != &AI) {
366 // If the entry block doesn't start with a zero-size alloca then move
367 // this one to the start of the entry block. There is no problem with
368 // dominance as the array size was forced to a constant earlier already.
369 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
370 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000371 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000372 AI.moveBefore(FirstInst);
373 return &AI;
374 }
375
Richard Osborneb68053e2012-09-18 09:31:44 +0000376 // If the alignment of the entry block alloca is 0 (unspecified),
377 // assign it the preferred alignment.
378 if (EntryAI->getAlignment() == 0)
379 EntryAI->setAlignment(
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000380 DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000381 // Replace this zero-sized alloca with the one at the start of the entry
382 // block after ensuring that the address will be aligned enough for both
383 // types.
Richard Osborneb68053e2012-09-18 09:31:44 +0000384 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
385 AI.getAlignment());
Duncan Sands8bc764a2012-06-26 13:39:21 +0000386 EntryAI->setAlignment(MaxAlign);
387 if (AI.getType() != EntryAI->getType())
388 return new BitCastInst(EntryAI, AI.getType());
Sanjay Patel4b198802016-02-01 22:23:39 +0000389 return replaceInstUsesWith(AI, EntryAI);
Duncan Sands8bc764a2012-06-26 13:39:21 +0000390 }
391 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000392 }
393
Eli Friedmanb14873c2012-11-26 23:04:53 +0000394 if (AI.getAlignment()) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000395 // Check to see if this allocation is only modified by a memcpy/memmove from
396 // a constant global whose alignment is equal to or exceeds that of the
397 // allocation. If this is the case, we can change all users to use
398 // the constant global instead. This is commonly produced by the CFE by
399 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
400 // is only subsequently read.
401 SmallVector<Instruction *, 4> ToDelete;
402 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
Chandler Carruth66b31302015-01-04 12:03:27 +0000403 unsigned SourceAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +0000404 Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
Vitaly Bukadf19ad42017-06-24 01:35:19 +0000405 if (AI.getAlignment() <= SourceAlign &&
406 isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000407 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
408 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
409 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
Sanjay Patel4b198802016-02-01 22:23:39 +0000410 eraseInstFromFunction(*ToDelete[i]);
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000411 Constant *TheSrc = cast<Constant>(Copy->getSource());
Yaxun Liuba01ed02017-02-10 21:46:07 +0000412 auto *SrcTy = TheSrc->getType();
413 auto *DestTy = PointerType::get(AI.getType()->getPointerElementType(),
414 SrcTy->getPointerAddressSpace());
415 Constant *Cast =
416 ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, DestTy);
417 if (AI.getType()->getPointerAddressSpace() ==
418 SrcTy->getPointerAddressSpace()) {
419 Instruction *NewI = replaceInstUsesWith(AI, Cast);
420 eraseInstFromFunction(*Copy);
421 ++NumGlobalCopies;
422 return NewI;
423 } else {
424 PointerReplacer PtrReplacer(*this);
425 PtrReplacer.replacePointer(AI, Cast);
426 ++NumGlobalCopies;
427 }
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000428 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000429 }
430 }
431
Nuno Lopes95cc4f32012-07-09 18:38:20 +0000432 // At last, use the generic allocation site handler to aggressively remove
433 // unused allocas.
434 return visitAllocSite(AI);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000435}
436
Philip Reames89e92d22016-12-01 20:17:06 +0000437// Are we allowed to form a atomic load or store of this type?
438static bool isSupportedAtomicType(Type *Ty) {
439 return Ty->isIntegerTy() || Ty->isPointerTy() || Ty->isFloatingPointTy();
440}
441
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000442/// \brief Helper to combine a load to a new type.
443///
444/// This just does the work of combining a load to a new type. It handles
445/// metadata, etc., and returns the new instruction. The \c NewTy should be the
446/// loaded *value* type. This will convert it to a pointer, cast the operand to
447/// that pointer type, load it, etc.
448///
449/// Note that this will create all of the instructions with whatever insert
450/// point the \c InstCombiner currently is using.
Mehdi Amini2668a482015-05-07 05:52:40 +0000451static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
452 const Twine &Suffix = "") {
Philip Reames89e92d22016-12-01 20:17:06 +0000453 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
454 "can't fold an atomic load to requested type");
455
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000456 Value *Ptr = LI.getPointerOperand();
457 unsigned AS = LI.getPointerAddressSpace();
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000458 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000459 LI.getAllMetadata(MD);
460
461 LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
462 IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
Philip Reames6f4d0082016-05-06 22:17:01 +0000463 LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
464 NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
Charles Davis33d1dc02015-02-25 05:10:25 +0000465 MDBuilder MDB(NewLoad->getContext());
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000466 for (const auto &MDPair : MD) {
467 unsigned ID = MDPair.first;
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000468 MDNode *N = MDPair.second;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000469 // Note, essentially every kind of metadata should be preserved here! This
470 // routine is supposed to clone a load instruction changing *only its type*.
471 // The only metadata it makes sense to drop is metadata which is invalidated
472 // when the pointer type changes. This should essentially never be the case
473 // in LLVM, but we explicitly switch over only known metadata to be
474 // conservatively correct. If you are adding metadata to LLVM which pertains
475 // to loads, you almost certainly want to add it here.
476 switch (ID) {
477 case LLVMContext::MD_dbg:
478 case LLVMContext::MD_tbaa:
479 case LLVMContext::MD_prof:
480 case LLVMContext::MD_fpmath:
481 case LLVMContext::MD_tbaa_struct:
482 case LLVMContext::MD_invariant_load:
483 case LLVMContext::MD_alias_scope:
484 case LLVMContext::MD_noalias:
Philip Reames5a3f5f72014-10-21 00:13:20 +0000485 case LLVMContext::MD_nontemporal:
486 case LLVMContext::MD_mem_parallel_loop_access:
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000487 // All of these directly apply.
488 NewLoad->setMetadata(ID, N);
489 break;
490
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000491 case LLVMContext::MD_nonnull:
Charles Davis33d1dc02015-02-25 05:10:25 +0000492 // This only directly applies if the new type is also a pointer.
493 if (NewTy->isPointerTy()) {
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000494 NewLoad->setMetadata(ID, N);
Charles Davis33d1dc02015-02-25 05:10:25 +0000495 break;
496 }
497 // If it's integral now, translate it to !range metadata.
498 if (NewTy->isIntegerTy()) {
499 auto *ITy = cast<IntegerType>(NewTy);
500 auto *NullInt = ConstantExpr::getPtrToInt(
501 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
502 auto *NonNullInt =
503 ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
504 NewLoad->setMetadata(LLVMContext::MD_range,
505 MDB.createRange(NonNullInt, NullInt));
506 }
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000507 break;
Artur Pilipenko5c5011d2015-11-02 17:53:51 +0000508 case LLVMContext::MD_align:
509 case LLVMContext::MD_dereferenceable:
510 case LLVMContext::MD_dereferenceable_or_null:
511 // These only directly apply if the new type is also a pointer.
512 if (NewTy->isPointerTy())
513 NewLoad->setMetadata(ID, N);
514 break;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000515 case LLVMContext::MD_range:
516 // FIXME: It would be nice to propagate this in some way, but the type
David Majnemer80dca0c2016-10-11 01:00:45 +0000517 // conversions make it hard.
518
519 // If it's a pointer now and the range does not contain 0, make it !nonnull.
520 if (NewTy->isPointerTy()) {
521 unsigned BitWidth = IC.getDataLayout().getTypeSizeInBits(NewTy);
522 if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
523 MDNode *NN = MDNode::get(LI.getContext(), None);
524 NewLoad->setMetadata(LLVMContext::MD_nonnull, NN);
525 }
526 }
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000527 break;
528 }
529 }
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000530 return NewLoad;
531}
532
Chandler Carruthfa11d832015-01-22 03:34:54 +0000533/// \brief Combine a store to a new type.
534///
535/// Returns the newly created store instruction.
536static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
Philip Reames89e92d22016-12-01 20:17:06 +0000537 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
538 "can't fold an atomic store of requested type");
539
Chandler Carruthfa11d832015-01-22 03:34:54 +0000540 Value *Ptr = SI.getPointerOperand();
541 unsigned AS = SI.getPointerAddressSpace();
542 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
543 SI.getAllMetadata(MD);
544
545 StoreInst *NewStore = IC.Builder->CreateAlignedStore(
546 V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
Philip Reames6f4d0082016-05-06 22:17:01 +0000547 SI.getAlignment(), SI.isVolatile());
548 NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope());
Chandler Carruthfa11d832015-01-22 03:34:54 +0000549 for (const auto &MDPair : MD) {
550 unsigned ID = MDPair.first;
551 MDNode *N = MDPair.second;
552 // Note, essentially every kind of metadata should be preserved here! This
553 // routine is supposed to clone a store instruction changing *only its
554 // type*. The only metadata it makes sense to drop is metadata which is
555 // invalidated when the pointer type changes. This should essentially
556 // never be the case in LLVM, but we explicitly switch over only known
557 // metadata to be conservatively correct. If you are adding metadata to
558 // LLVM which pertains to stores, you almost certainly want to add it
559 // here.
560 switch (ID) {
561 case LLVMContext::MD_dbg:
562 case LLVMContext::MD_tbaa:
563 case LLVMContext::MD_prof:
564 case LLVMContext::MD_fpmath:
565 case LLVMContext::MD_tbaa_struct:
566 case LLVMContext::MD_alias_scope:
567 case LLVMContext::MD_noalias:
568 case LLVMContext::MD_nontemporal:
569 case LLVMContext::MD_mem_parallel_loop_access:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000570 // All of these directly apply.
571 NewStore->setMetadata(ID, N);
572 break;
573
574 case LLVMContext::MD_invariant_load:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000575 case LLVMContext::MD_nonnull:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000576 case LLVMContext::MD_range:
Artur Pilipenko5c5011d2015-11-02 17:53:51 +0000577 case LLVMContext::MD_align:
578 case LLVMContext::MD_dereferenceable:
579 case LLVMContext::MD_dereferenceable_or_null:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000580 // These don't apply for stores.
Chandler Carruthfa11d832015-01-22 03:34:54 +0000581 break;
582 }
583 }
584
585 return NewStore;
586}
587
JF Bastien3e2e69f2016-04-21 19:41:48 +0000588/// \brief Combine loads to match the type of their uses' value after looking
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000589/// through intervening bitcasts.
590///
591/// The core idea here is that if the result of a load is used in an operation,
592/// we should load the type most conducive to that operation. For example, when
593/// loading an integer and converting that immediately to a pointer, we should
594/// instead directly load a pointer.
595///
596/// However, this routine must never change the width of a load or the number of
597/// loads as that would introduce a semantic change. This combine is expected to
598/// be a semantic no-op which just allows loads to more closely model the types
599/// of their consuming operations.
600///
601/// Currently, we also refuse to change the precise type used for an atomic load
602/// or a volatile load. This is debatable, and might be reasonable to change
603/// later. However, it is risky in case some backend or other part of LLVM is
604/// relying on the exact type loaded to select appropriate atomic operations.
605static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
Philip Reames6f4d0082016-05-06 22:17:01 +0000606 // FIXME: We could probably with some care handle both volatile and ordered
607 // atomic loads here but it isn't clear that this is important.
608 if (!LI.isUnordered())
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000609 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000610
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000611 if (LI.use_empty())
612 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000613
Arnold Schwaighofer5d335552016-09-10 18:14:57 +0000614 // swifterror values can't be bitcasted.
615 if (LI.getPointerOperand()->isSwiftError())
616 return nullptr;
617
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000618 Type *Ty = LI.getType();
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000619 const DataLayout &DL = IC.getDataLayout();
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000620
621 // Try to canonicalize loads which are only ever stored to operate over
622 // integers instead of any other type. We only do this when the loaded type
623 // is sized and has a size exactly the same as its store size and the store
624 // size is a legal integer type.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000625 if (!Ty->isIntegerTy() && Ty->isSized() &&
626 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
Sanjoy Dasba04d3a2016-08-06 02:58:48 +0000627 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) &&
628 !DL.isNonIntegralPointerType(Ty)) {
David Majnemer0a16c222016-08-11 21:15:00 +0000629 if (all_of(LI.users(), [&LI](User *U) {
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000630 auto *SI = dyn_cast<StoreInst>(U);
Arnold Schwaighoferc3685632017-01-31 17:53:49 +0000631 return SI && SI->getPointerOperand() != &LI &&
632 !SI->getPointerOperand()->isSwiftError();
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000633 })) {
634 LoadInst *NewLoad = combineLoadToNewType(
635 IC, LI,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000636 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000637 // Replace all the stores with stores of the newly loaded value.
638 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
639 auto *SI = cast<StoreInst>(*UI++);
640 IC.Builder->SetInsertPoint(SI);
641 combineStoreToNewValue(IC, *SI, NewLoad);
Sanjay Patel4b198802016-02-01 22:23:39 +0000642 IC.eraseInstFromFunction(*SI);
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000643 }
644 assert(LI.use_empty() && "Failed to remove all users of the load!");
645 // Return the old load so the combiner can delete it safely.
646 return &LI;
647 }
648 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000649
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000650 // Fold away bit casts of the loaded value by loading the desired type.
Quentin Colombet490cfbe2016-02-11 22:30:41 +0000651 // We can do this for BitCastInsts as well as casts from and to pointer types,
652 // as long as those are noops (i.e., the source or dest type have the same
653 // bitwidth as the target's pointers).
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000654 if (LI.hasOneUse())
Philip Reames89e92d22016-12-01 20:17:06 +0000655 if (auto* CI = dyn_cast<CastInst>(LI.user_back()))
656 if (CI->isNoopCast(DL))
657 if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) {
658 LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
659 CI->replaceAllUsesWith(NewLoad);
660 IC.eraseInstFromFunction(*CI);
661 return &LI;
662 }
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000663
Chandler Carrutha7f247e2014-12-09 19:21:16 +0000664 // FIXME: We should also canonicalize loads of vectors when their elements are
665 // cast to other types.
Craig Topperf40110f2014-04-25 05:29:35 +0000666 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000667}
668
Mehdi Amini2668a482015-05-07 05:52:40 +0000669static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
670 // FIXME: We could probably with some care handle both volatile and atomic
671 // stores here but it isn't clear that this is important.
672 if (!LI.isSimple())
673 return nullptr;
674
675 Type *T = LI.getType();
676 if (!T->isAggregateType())
677 return nullptr;
678
Benjamin Kramerc1263532016-03-11 10:20:56 +0000679 StringRef Name = LI.getName();
Bruce Mitchenere9ffb452015-09-12 01:17:08 +0000680 assert(LI.getAlignment() && "Alignment must be set at this point");
Mehdi Amini2668a482015-05-07 05:52:40 +0000681
682 if (auto *ST = dyn_cast<StructType>(T)) {
683 // If the struct only have one element, we unpack.
Amaury Sechet61a7d622016-02-17 19:21:28 +0000684 auto NumElements = ST->getNumElements();
685 if (NumElements == 1) {
Mehdi Amini2668a482015-05-07 05:52:40 +0000686 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
687 ".unpack");
Sanjay Patel4b198802016-02-01 22:23:39 +0000688 return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
Amaury Sechet61a7d622016-02-17 19:21:28 +0000689 UndefValue::get(T), NewLoad, 0, Name));
Mehdi Amini2668a482015-05-07 05:52:40 +0000690 }
Mehdi Amini1c131b32015-12-15 01:44:07 +0000691
692 // We don't want to break loads with padding here as we'd loose
693 // the knowledge that padding exists for the rest of the pipeline.
694 const DataLayout &DL = IC.getDataLayout();
695 auto *SL = DL.getStructLayout(ST);
696 if (SL->hasPadding())
697 return nullptr;
698
Amaury Sechet61a7d622016-02-17 19:21:28 +0000699 auto Align = LI.getAlignment();
700 if (!Align)
701 Align = DL.getABITypeAlignment(ST);
702
Mehdi Amini1c131b32015-12-15 01:44:07 +0000703 auto *Addr = LI.getPointerOperand();
Amaury Sechet61a7d622016-02-17 19:21:28 +0000704 auto *IdxType = Type::getInt32Ty(T->getContext());
Mehdi Amini1c131b32015-12-15 01:44:07 +0000705 auto *Zero = ConstantInt::get(IdxType, 0);
Amaury Sechet61a7d622016-02-17 19:21:28 +0000706
707 Value *V = UndefValue::get(T);
708 for (unsigned i = 0; i < NumElements; i++) {
Mehdi Amini1c131b32015-12-15 01:44:07 +0000709 Value *Indices[2] = {
710 Zero,
711 ConstantInt::get(IdxType, i),
712 };
Amaury Sechetda71cb72016-02-17 21:21:29 +0000713 auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
Benjamin Kramerc1263532016-03-11 10:20:56 +0000714 Name + ".elt");
Amaury Sechet61a7d622016-02-17 19:21:28 +0000715 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
Benjamin Kramerc1263532016-03-11 10:20:56 +0000716 auto *L = IC.Builder->CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
Mehdi Amini1c131b32015-12-15 01:44:07 +0000717 V = IC.Builder->CreateInsertValue(V, L, i);
718 }
719
720 V->setName(Name);
Sanjay Patel4b198802016-02-01 22:23:39 +0000721 return IC.replaceInstUsesWith(LI, V);
Mehdi Amini2668a482015-05-07 05:52:40 +0000722 }
723
David Majnemer58fb0382015-05-11 05:04:22 +0000724 if (auto *AT = dyn_cast<ArrayType>(T)) {
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000725 auto *ET = AT->getElementType();
726 auto NumElements = AT->getNumElements();
727 if (NumElements == 1) {
728 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack");
Sanjay Patel4b198802016-02-01 22:23:39 +0000729 return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000730 UndefValue::get(T), NewLoad, 0, Name));
David Majnemer58fb0382015-05-11 05:04:22 +0000731 }
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000732
Davide Italianoda114122016-10-07 20:57:42 +0000733 // Bail out if the array is too large. Ideally we would like to optimize
734 // arrays of arbitrary size but this has a terrible impact on compile time.
735 // The threshold here is chosen arbitrarily, maybe needs a little bit of
736 // tuning.
Davide Italiano2133bf52017-02-07 17:56:50 +0000737 if (NumElements > IC.MaxArraySizeForCombine)
Davide Italianoda114122016-10-07 20:57:42 +0000738 return nullptr;
739
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000740 const DataLayout &DL = IC.getDataLayout();
741 auto EltSize = DL.getTypeAllocSize(ET);
742 auto Align = LI.getAlignment();
743 if (!Align)
744 Align = DL.getABITypeAlignment(T);
745
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000746 auto *Addr = LI.getPointerOperand();
747 auto *IdxType = Type::getInt64Ty(T->getContext());
748 auto *Zero = ConstantInt::get(IdxType, 0);
749
750 Value *V = UndefValue::get(T);
751 uint64_t Offset = 0;
752 for (uint64_t i = 0; i < NumElements; i++) {
753 Value *Indices[2] = {
754 Zero,
755 ConstantInt::get(IdxType, i),
756 };
757 auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
Benjamin Kramerc1263532016-03-11 10:20:56 +0000758 Name + ".elt");
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000759 auto *L = IC.Builder->CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
Benjamin Kramerc1263532016-03-11 10:20:56 +0000760 Name + ".unpack");
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000761 V = IC.Builder->CreateInsertValue(V, L, i);
762 Offset += EltSize;
763 }
764
765 V->setName(Name);
766 return IC.replaceInstUsesWith(LI, V);
David Majnemer58fb0382015-05-11 05:04:22 +0000767 }
768
Mehdi Amini2668a482015-05-07 05:52:40 +0000769 return nullptr;
770}
771
Hal Finkel847e05f2015-02-20 03:05:53 +0000772// If we can determine that all possible objects pointed to by the provided
773// pointer value are, not only dereferenceable, but also definitively less than
774// or equal to the provided maximum size, then return true. Otherwise, return
775// false (constant global values and allocas fall into this category).
776//
777// FIXME: This should probably live in ValueTracking (or similar).
778static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000779 const DataLayout &DL) {
Hal Finkel847e05f2015-02-20 03:05:53 +0000780 SmallPtrSet<Value *, 4> Visited;
781 SmallVector<Value *, 4> Worklist(1, V);
782
783 do {
784 Value *P = Worklist.pop_back_val();
785 P = P->stripPointerCasts();
786
787 if (!Visited.insert(P).second)
788 continue;
789
790 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
791 Worklist.push_back(SI->getTrueValue());
792 Worklist.push_back(SI->getFalseValue());
793 continue;
794 }
795
796 if (PHINode *PN = dyn_cast<PHINode>(P)) {
Pete Cooper833f34d2015-05-12 20:05:31 +0000797 for (Value *IncValue : PN->incoming_values())
798 Worklist.push_back(IncValue);
Hal Finkel847e05f2015-02-20 03:05:53 +0000799 continue;
800 }
801
802 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
Sanjoy Das99042472016-04-17 04:30:43 +0000803 if (GA->isInterposable())
Hal Finkel847e05f2015-02-20 03:05:53 +0000804 return false;
805 Worklist.push_back(GA->getAliasee());
806 continue;
807 }
808
809 // If we know how big this object is, and it is less than MaxSize, continue
810 // searching. Otherwise, return false.
811 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
812 if (!AI->getAllocatedType()->isSized())
813 return false;
814
815 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
816 if (!CS)
817 return false;
818
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000819 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
Hal Finkel847e05f2015-02-20 03:05:53 +0000820 // Make sure that, even if the multiplication below would wrap as an
821 // uint64_t, we still do the right thing.
822 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
823 return false;
824 continue;
825 }
826
827 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
828 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
829 return false;
830
Manuel Jacob5f6eaac2016-01-16 20:30:46 +0000831 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
Hal Finkel847e05f2015-02-20 03:05:53 +0000832 if (InitSize > MaxSize)
833 return false;
834 continue;
835 }
836
837 return false;
838 } while (!Worklist.empty());
839
840 return true;
841}
842
843// If we're indexing into an object of a known size, and the outer index is
844// not a constant, but having any value but zero would lead to undefined
845// behavior, replace it with zero.
846//
847// For example, if we have:
848// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
849// ...
850// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
851// ... = load i32* %arrayidx, align 4
852// Then we know that we can replace %x in the GEP with i64 0.
853//
854// FIXME: We could fold any GEP index to zero that would cause UB if it were
855// not zero. Currently, we only handle the first such index. Also, we could
856// also search through non-zero constant indices if we kept track of the
857// offsets those indices implied.
858static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
859 Instruction *MemI, unsigned &Idx) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000860 if (GEPI->getNumOperands() < 2)
Hal Finkel847e05f2015-02-20 03:05:53 +0000861 return false;
862
863 // Find the first non-zero index of a GEP. If all indices are zero, return
864 // one past the last index.
865 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
866 unsigned I = 1;
867 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
868 Value *V = GEPI->getOperand(I);
869 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
870 if (CI->isZero())
871 continue;
872
873 break;
874 }
875
876 return I;
877 };
878
879 // Skip through initial 'zero' indices, and find the corresponding pointer
880 // type. See if the next index is not a constant.
881 Idx = FirstNZIdx(GEPI);
882 if (Idx == GEPI->getNumOperands())
883 return false;
884 if (isa<Constant>(GEPI->getOperand(Idx)))
885 return false;
886
887 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
Eduard Burtescu19eb0312016-01-19 17:28:00 +0000888 Type *AllocTy =
889 GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
Hal Finkel847e05f2015-02-20 03:05:53 +0000890 if (!AllocTy || !AllocTy->isSized())
891 return false;
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000892 const DataLayout &DL = IC.getDataLayout();
893 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
Hal Finkel847e05f2015-02-20 03:05:53 +0000894
895 // If there are more indices after the one we might replace with a zero, make
896 // sure they're all non-negative. If any of them are negative, the overall
897 // address being computed might be before the base address determined by the
898 // first non-zero index.
899 auto IsAllNonNegative = [&]() {
900 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
Craig Topper1a36b7d2017-05-15 06:39:41 +0000901 KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI);
902 if (Known.isNonNegative())
Hal Finkel847e05f2015-02-20 03:05:53 +0000903 continue;
904 return false;
905 }
906
907 return true;
908 };
909
910 // FIXME: If the GEP is not inbounds, and there are extra indices after the
911 // one we'll replace, those could cause the address computation to wrap
912 // (rendering the IsAllNonNegative() check below insufficient). We can do
Bruce Mitchenere9ffb452015-09-12 01:17:08 +0000913 // better, ignoring zero indices (and other indices we can prove small
Hal Finkel847e05f2015-02-20 03:05:53 +0000914 // enough not to wrap).
915 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
916 return false;
917
918 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
919 // also known to be dereferenceable.
920 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
921 IsAllNonNegative();
922}
923
924// If we're indexing into an object with a variable index for the memory
925// access, but the object has only one element, we can assume that the index
926// will always be zero. If we replace the GEP, return it.
927template <typename T>
928static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
929 T &MemI) {
930 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
931 unsigned Idx;
932 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
933 Instruction *NewGEPI = GEPI->clone();
934 NewGEPI->setOperand(Idx,
935 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
936 NewGEPI->insertBefore(GEPI);
937 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
938 return NewGEPI;
939 }
940 }
941
942 return nullptr;
943}
944
Davide Italianoffcb4df2017-04-19 17:26:57 +0000945static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {
946 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
947 const Value *GEPI0 = GEPI->getOperand(0);
948 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0)
949 return true;
950 }
951 if (isa<UndefValue>(Op) ||
952 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0))
953 return true;
954 return false;
955}
956
Chris Lattnera65e2f72010-01-05 05:57:49 +0000957Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
958 Value *Op = LI.getOperand(0);
959
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000960 // Try to canonicalize the loaded type.
961 if (Instruction *Res = combineLoadToOperationType(*this, LI))
962 return Res;
963
Chris Lattnera65e2f72010-01-05 05:57:49 +0000964 // Attempt to improve the alignment.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000965 unsigned KnownAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +0000966 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT);
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000967 unsigned LoadAlign = LI.getAlignment();
968 unsigned EffectiveLoadAlign =
969 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
Dan Gohman36196602010-08-03 18:20:32 +0000970
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000971 if (KnownAlign > EffectiveLoadAlign)
972 LI.setAlignment(KnownAlign);
973 else if (LoadAlign == 0)
974 LI.setAlignment(EffectiveLoadAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000975
Hal Finkel847e05f2015-02-20 03:05:53 +0000976 // Replace GEP indices if possible.
977 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
978 Worklist.Add(NewGEPI);
979 return &LI;
980 }
981
Mehdi Amini2668a482015-05-07 05:52:40 +0000982 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
983 return Res;
984
Chris Lattnera65e2f72010-01-05 05:57:49 +0000985 // Do really simple store-to-load forwarding and load CSE, to catch cases
Duncan Sands75b5d272011-02-15 09:23:02 +0000986 // where there are several consecutive memory accesses to the same location,
Chris Lattnera65e2f72010-01-05 05:57:49 +0000987 // separated by a few arithmetic operations.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +0000988 BasicBlock::iterator BBI(LI);
Eli Friedmanbd254a62016-06-16 02:33:42 +0000989 bool IsLoadCSE = false;
Sanjay Patelb38ad88e2017-01-02 23:25:28 +0000990 if (Value *AvailableVal = FindAvailableLoadedValue(
991 &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) {
992 if (IsLoadCSE)
993 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI);
Bjorn Steinbrinka91fd092015-07-10 06:55:44 +0000994
Sanjay Patel4b198802016-02-01 22:23:39 +0000995 return replaceInstUsesWith(
Chandler Carruth1a3c2c42014-11-25 08:20:27 +0000996 LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
997 LI.getName() + ".cast"));
Bjorn Steinbrinka91fd092015-07-10 06:55:44 +0000998 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000999
Philip Reames3ac07182016-04-21 17:45:05 +00001000 // None of the following transforms are legal for volatile/ordered atomic
1001 // loads. Most of them do apply for unordered atomics.
1002 if (!LI.isUnordered()) return nullptr;
Philip Reamesac550902016-04-21 17:03:33 +00001003
Chris Lattnera65e2f72010-01-05 05:57:49 +00001004 // load(gep null, ...) -> unreachable
Chris Lattnera65e2f72010-01-05 05:57:49 +00001005 // load null/undef -> unreachable
Davide Italianoffcb4df2017-04-19 17:26:57 +00001006 // TODO: Consider a target hook for valid address spaces for this xforms.
1007 if (canSimplifyNullLoadOrGEP(LI, Op)) {
1008 // Insert a new store to null instruction before the load to indicate
1009 // that this code is not reachable. We do this instead of inserting
1010 // an unreachable instruction directly because we cannot modify the
1011 // CFG.
Chris Lattnera65e2f72010-01-05 05:57:49 +00001012 new StoreInst(UndefValue::get(LI.getType()),
1013 Constant::getNullValue(Op->getType()), &LI);
Sanjay Patel4b198802016-02-01 22:23:39 +00001014 return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
Chris Lattnera65e2f72010-01-05 05:57:49 +00001015 }
1016
Chris Lattnera65e2f72010-01-05 05:57:49 +00001017 if (Op->hasOneUse()) {
1018 // Change select and PHI nodes to select values instead of addresses: this
1019 // helps alias analysis out a lot, allows many others simplifications, and
1020 // exposes redundancy in the code.
1021 //
1022 // Note that we cannot do the transformation unless we know that the
1023 // introduced loads cannot trap! Something like this is valid as long as
1024 // the condition is always false: load (select bool %C, int* null, int* %G),
1025 // but it would not be valid if we transformed it to load from null
1026 // unconditionally.
1027 //
1028 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
1029 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
Bob Wilson56600a12010-01-30 04:42:39 +00001030 unsigned Align = LI.getAlignment();
Artur Pilipenko9bb6bea2016-04-27 11:00:48 +00001031 if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
1032 isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
Bob Wilson4b71b6c2010-01-30 00:41:10 +00001033 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
Bob Wilson56600a12010-01-30 04:42:39 +00001034 SI->getOperand(1)->getName()+".val");
Bob Wilson4b71b6c2010-01-30 00:41:10 +00001035 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
Bob Wilson56600a12010-01-30 04:42:39 +00001036 SI->getOperand(2)->getName()+".val");
Philip Reamesa98c7ea2016-04-21 17:59:40 +00001037 assert(LI.isUnordered() && "implied by above");
Bob Wilson56600a12010-01-30 04:42:39 +00001038 V1->setAlignment(Align);
Philip Reamesa98c7ea2016-04-21 17:59:40 +00001039 V1->setAtomic(LI.getOrdering(), LI.getSynchScope());
Bob Wilson56600a12010-01-30 04:42:39 +00001040 V2->setAlignment(Align);
Philip Reamesa98c7ea2016-04-21 17:59:40 +00001041 V2->setAtomic(LI.getOrdering(), LI.getSynchScope());
Chris Lattnera65e2f72010-01-05 05:57:49 +00001042 return SelectInst::Create(SI->getCondition(), V1, V2);
1043 }
1044
1045 // load (select (cond, null, P)) -> load P
Larisse Voufo532bf712015-09-18 19:14:35 +00001046 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
Philip Reames5ad26c32014-12-29 22:46:21 +00001047 LI.getPointerAddressSpace() == 0) {
1048 LI.setOperand(0, SI->getOperand(2));
1049 return &LI;
1050 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001051
1052 // load (select (cond, P, null)) -> load P
Philip Reames5ad26c32014-12-29 22:46:21 +00001053 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
1054 LI.getPointerAddressSpace() == 0) {
1055 LI.setOperand(0, SI->getOperand(1));
1056 return &LI;
1057 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001058 }
1059 }
Craig Topperf40110f2014-04-25 05:29:35 +00001060 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001061}
1062
Arch D. Robisonbe0490a2016-04-25 22:22:39 +00001063/// \brief Look for extractelement/insertvalue sequence that acts like a bitcast.
1064///
1065/// \returns underlying value that was "cast", or nullptr otherwise.
1066///
1067/// For example, if we have:
1068///
1069/// %E0 = extractelement <2 x double> %U, i32 0
1070/// %V0 = insertvalue [2 x double] undef, double %E0, 0
1071/// %E1 = extractelement <2 x double> %U, i32 1
1072/// %V1 = insertvalue [2 x double] %V0, double %E1, 1
1073///
1074/// and the layout of a <2 x double> is isomorphic to a [2 x double],
1075/// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1076/// Note that %U may contain non-undef values where %V1 has undef.
1077static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
1078 Value *U = nullptr;
1079 while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1080 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1081 if (!E)
1082 return nullptr;
1083 auto *W = E->getVectorOperand();
1084 if (!U)
1085 U = W;
1086 else if (U != W)
1087 return nullptr;
1088 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1089 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1090 return nullptr;
1091 V = IV->getAggregateOperand();
1092 }
1093 if (!isa<UndefValue>(V) ||!U)
1094 return nullptr;
1095
1096 auto *UT = cast<VectorType>(U->getType());
1097 auto *VT = V->getType();
1098 // Check that types UT and VT are bitwise isomorphic.
1099 const auto &DL = IC.getDataLayout();
1100 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1101 return nullptr;
1102 }
1103 if (auto *AT = dyn_cast<ArrayType>(VT)) {
1104 if (AT->getNumElements() != UT->getNumElements())
1105 return nullptr;
1106 } else {
1107 auto *ST = cast<StructType>(VT);
1108 if (ST->getNumElements() != UT->getNumElements())
1109 return nullptr;
1110 for (const auto *EltT : ST->elements()) {
1111 if (EltT != UT->getElementType())
1112 return nullptr;
1113 }
1114 }
1115 return U;
1116}
1117
Chandler Carruth816d26f2014-11-25 10:09:51 +00001118/// \brief Combine stores to match the type of value being stored.
1119///
1120/// The core idea here is that the memory does not have any intrinsic type and
1121/// where we can we should match the type of a store to the type of value being
1122/// stored.
1123///
1124/// However, this routine must never change the width of a store or the number of
1125/// stores as that would introduce a semantic change. This combine is expected to
1126/// be a semantic no-op which just allows stores to more closely model the types
1127/// of their incoming values.
1128///
1129/// Currently, we also refuse to change the precise type used for an atomic or
1130/// volatile store. This is debatable, and might be reasonable to change later.
1131/// However, it is risky in case some backend or other part of LLVM is relying
1132/// on the exact type stored to select appropriate atomic operations.
1133///
1134/// \returns true if the store was successfully combined away. This indicates
1135/// the caller must erase the store instruction. We have to let the caller erase
Bruce Mitchenere9ffb452015-09-12 01:17:08 +00001136/// the store instruction as otherwise there is no way to signal whether it was
Chandler Carruth816d26f2014-11-25 10:09:51 +00001137/// combined or not: IC.EraseInstFromFunction returns a null pointer.
1138static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
Philip Reames6f4d0082016-05-06 22:17:01 +00001139 // FIXME: We could probably with some care handle both volatile and ordered
1140 // atomic stores here but it isn't clear that this is important.
1141 if (!SI.isUnordered())
Chandler Carruth816d26f2014-11-25 10:09:51 +00001142 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001143
Arnold Schwaighofer5d335552016-09-10 18:14:57 +00001144 // swifterror values can't be bitcasted.
1145 if (SI.getPointerOperand()->isSwiftError())
1146 return false;
1147
Chandler Carruth816d26f2014-11-25 10:09:51 +00001148 Value *V = SI.getValueOperand();
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001149
Chandler Carruth816d26f2014-11-25 10:09:51 +00001150 // Fold away bit casts of the stored value by storing the original type.
1151 if (auto *BC = dyn_cast<BitCastInst>(V)) {
Chandler Carrutha7f247e2014-12-09 19:21:16 +00001152 V = BC->getOperand(0);
Philip Reames89e92d22016-12-01 20:17:06 +00001153 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1154 combineStoreToNewValue(IC, SI, V);
1155 return true;
1156 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001157 }
1158
Philip Reames89e92d22016-12-01 20:17:06 +00001159 if (Value *U = likeBitCastFromVector(IC, V))
1160 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1161 combineStoreToNewValue(IC, SI, U);
1162 return true;
1163 }
Arch D. Robisonbe0490a2016-04-25 22:22:39 +00001164
JF Bastienc22d2992016-04-21 19:53:39 +00001165 // FIXME: We should also canonicalize stores of vectors when their elements
1166 // are cast to other types.
Chandler Carruth816d26f2014-11-25 10:09:51 +00001167 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001168}
1169
Mehdi Aminib344ac92015-03-14 22:19:33 +00001170static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
1171 // FIXME: We could probably with some care handle both volatile and atomic
1172 // stores here but it isn't clear that this is important.
1173 if (!SI.isSimple())
1174 return false;
1175
1176 Value *V = SI.getValueOperand();
1177 Type *T = V->getType();
1178
1179 if (!T->isAggregateType())
1180 return false;
1181
Mehdi Amini2668a482015-05-07 05:52:40 +00001182 if (auto *ST = dyn_cast<StructType>(T)) {
Mehdi Aminib344ac92015-03-14 22:19:33 +00001183 // If the struct only have one element, we unpack.
Mehdi Amini1c131b32015-12-15 01:44:07 +00001184 unsigned Count = ST->getNumElements();
1185 if (Count == 1) {
Mehdi Aminib344ac92015-03-14 22:19:33 +00001186 V = IC.Builder->CreateExtractValue(V, 0);
1187 combineStoreToNewValue(IC, SI, V);
1188 return true;
1189 }
Mehdi Amini1c131b32015-12-15 01:44:07 +00001190
1191 // We don't want to break loads with padding here as we'd loose
1192 // the knowledge that padding exists for the rest of the pipeline.
1193 const DataLayout &DL = IC.getDataLayout();
1194 auto *SL = DL.getStructLayout(ST);
1195 if (SL->hasPadding())
1196 return false;
1197
Amaury Sechet61a7d622016-02-17 19:21:28 +00001198 auto Align = SI.getAlignment();
1199 if (!Align)
1200 Align = DL.getABITypeAlignment(ST);
1201
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +00001202 SmallString<16> EltName = V->getName();
1203 EltName += ".elt";
Mehdi Amini1c131b32015-12-15 01:44:07 +00001204 auto *Addr = SI.getPointerOperand();
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +00001205 SmallString<16> AddrName = Addr->getName();
1206 AddrName += ".repack";
Amaury Sechet61a7d622016-02-17 19:21:28 +00001207
Mehdi Amini1c131b32015-12-15 01:44:07 +00001208 auto *IdxType = Type::getInt32Ty(ST->getContext());
1209 auto *Zero = ConstantInt::get(IdxType, 0);
1210 for (unsigned i = 0; i < Count; i++) {
1211 Value *Indices[2] = {
1212 Zero,
1213 ConstantInt::get(IdxType, i),
1214 };
Amaury Sechetda71cb72016-02-17 21:21:29 +00001215 auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1216 AddrName);
Mehdi Amini1c131b32015-12-15 01:44:07 +00001217 auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
Amaury Sechet61a7d622016-02-17 19:21:28 +00001218 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
1219 IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
Mehdi Amini1c131b32015-12-15 01:44:07 +00001220 }
1221
1222 return true;
Mehdi Aminib344ac92015-03-14 22:19:33 +00001223 }
1224
David Majnemer75364602015-05-11 05:04:27 +00001225 if (auto *AT = dyn_cast<ArrayType>(T)) {
1226 // If the array only have one element, we unpack.
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001227 auto NumElements = AT->getNumElements();
1228 if (NumElements == 1) {
David Majnemer75364602015-05-11 05:04:27 +00001229 V = IC.Builder->CreateExtractValue(V, 0);
1230 combineStoreToNewValue(IC, SI, V);
1231 return true;
1232 }
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001233
Davide Italianof6988d22016-10-07 21:53:09 +00001234 // Bail out if the array is too large. Ideally we would like to optimize
1235 // arrays of arbitrary size but this has a terrible impact on compile time.
1236 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1237 // tuning.
Davide Italiano2133bf52017-02-07 17:56:50 +00001238 if (NumElements > IC.MaxArraySizeForCombine)
Davide Italianof6988d22016-10-07 21:53:09 +00001239 return false;
1240
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001241 const DataLayout &DL = IC.getDataLayout();
1242 auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1243 auto Align = SI.getAlignment();
1244 if (!Align)
1245 Align = DL.getABITypeAlignment(T);
1246
1247 SmallString<16> EltName = V->getName();
1248 EltName += ".elt";
1249 auto *Addr = SI.getPointerOperand();
1250 SmallString<16> AddrName = Addr->getName();
1251 AddrName += ".repack";
1252
1253 auto *IdxType = Type::getInt64Ty(T->getContext());
1254 auto *Zero = ConstantInt::get(IdxType, 0);
1255
1256 uint64_t Offset = 0;
1257 for (uint64_t i = 0; i < NumElements; i++) {
1258 Value *Indices[2] = {
1259 Zero,
1260 ConstantInt::get(IdxType, i),
1261 };
1262 auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1263 AddrName);
1264 auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
1265 auto EltAlign = MinAlign(Align, Offset);
1266 IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
1267 Offset += EltSize;
1268 }
1269
1270 return true;
David Majnemer75364602015-05-11 05:04:27 +00001271 }
1272
Mehdi Aminib344ac92015-03-14 22:19:33 +00001273 return false;
1274}
1275
Chris Lattnera65e2f72010-01-05 05:57:49 +00001276/// equivalentAddressValues - Test if A and B will obviously have the same
1277/// value. This includes recognizing that %t0 and %t1 will have the same
1278/// value in code like this:
1279/// %t0 = getelementptr \@a, 0, 3
1280/// store i32 0, i32* %t0
1281/// %t1 = getelementptr \@a, 0, 3
1282/// %t2 = load i32* %t1
1283///
1284static bool equivalentAddressValues(Value *A, Value *B) {
1285 // Test if the values are trivially equivalent.
1286 if (A == B) return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001287
Chris Lattnera65e2f72010-01-05 05:57:49 +00001288 // Test if the values come form identical arithmetic instructions.
1289 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1290 // its only used to compare two uses within the same basic block, which
1291 // means that they'll always either have the same value or one of them
1292 // will have an undefined value.
1293 if (isa<BinaryOperator>(A) ||
1294 isa<CastInst>(A) ||
1295 isa<PHINode>(A) ||
1296 isa<GetElementPtrInst>(A))
1297 if (Instruction *BI = dyn_cast<Instruction>(B))
1298 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1299 return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001300
Chris Lattnera65e2f72010-01-05 05:57:49 +00001301 // Otherwise they may not be equivalent.
1302 return false;
1303}
1304
Chris Lattnera65e2f72010-01-05 05:57:49 +00001305Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
1306 Value *Val = SI.getOperand(0);
1307 Value *Ptr = SI.getOperand(1);
1308
Chandler Carruth816d26f2014-11-25 10:09:51 +00001309 // Try to canonicalize the stored type.
1310 if (combineStoreToValueType(*this, SI))
Sanjay Patel4b198802016-02-01 22:23:39 +00001311 return eraseInstFromFunction(SI);
Chandler Carruth816d26f2014-11-25 10:09:51 +00001312
Chris Lattnera65e2f72010-01-05 05:57:49 +00001313 // Attempt to improve the alignment.
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001314 unsigned KnownAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001315 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT);
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001316 unsigned StoreAlign = SI.getAlignment();
1317 unsigned EffectiveStoreAlign =
1318 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
Dan Gohman36196602010-08-03 18:20:32 +00001319
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001320 if (KnownAlign > EffectiveStoreAlign)
1321 SI.setAlignment(KnownAlign);
1322 else if (StoreAlign == 0)
1323 SI.setAlignment(EffectiveStoreAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001324
Mehdi Aminib344ac92015-03-14 22:19:33 +00001325 // Try to canonicalize the stored type.
1326 if (unpackStoreToAggregate(*this, SI))
Sanjay Patel4b198802016-02-01 22:23:39 +00001327 return eraseInstFromFunction(SI);
Mehdi Aminib344ac92015-03-14 22:19:33 +00001328
Hal Finkel847e05f2015-02-20 03:05:53 +00001329 // Replace GEP indices if possible.
1330 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1331 Worklist.Add(NewGEPI);
1332 return &SI;
1333 }
1334
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001335 // Don't hack volatile/ordered stores.
1336 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1337 if (!SI.isUnordered()) return nullptr;
Eli Friedman8bc586e2011-08-15 22:09:40 +00001338
1339 // If the RHS is an alloca with a single use, zapify the store, making the
1340 // alloca dead.
1341 if (Ptr->hasOneUse()) {
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001342 if (isa<AllocaInst>(Ptr))
Sanjay Patel4b198802016-02-01 22:23:39 +00001343 return eraseInstFromFunction(SI);
Eli Friedman8bc586e2011-08-15 22:09:40 +00001344 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1345 if (isa<AllocaInst>(GEP->getOperand(0))) {
1346 if (GEP->getOperand(0)->hasOneUse())
Sanjay Patel4b198802016-02-01 22:23:39 +00001347 return eraseInstFromFunction(SI);
Eli Friedman8bc586e2011-08-15 22:09:40 +00001348 }
1349 }
1350 }
1351
Chris Lattnera65e2f72010-01-05 05:57:49 +00001352 // Do really simple DSE, to catch cases where there are several consecutive
1353 // stores to the same location, separated by a few arithmetic operations. This
1354 // situation often occurs with bitfield accesses.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001355 BasicBlock::iterator BBI(SI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001356 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1357 --ScanInsts) {
1358 --BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001359 // Don't count debug info directives, lest they affect codegen,
1360 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1361 if (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001362 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001363 ScanInsts++;
1364 continue;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001365 }
1366
Chris Lattnera65e2f72010-01-05 05:57:49 +00001367 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1368 // Prev store isn't volatile, and stores to the same location?
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001369 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
Eli Friedman8bc586e2011-08-15 22:09:40 +00001370 SI.getOperand(1))) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001371 ++NumDeadStore;
1372 ++BBI;
Sanjay Patel4b198802016-02-01 22:23:39 +00001373 eraseInstFromFunction(*PrevSI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001374 continue;
1375 }
1376 break;
1377 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001378
Chris Lattnera65e2f72010-01-05 05:57:49 +00001379 // If this is a load, we have to stop. However, if the loaded value is from
1380 // the pointer we're loading and is producing the pointer we're storing,
1381 // then *this* store is dead (X = load P; store X -> P).
1382 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001383 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1384 assert(SI.isUnordered() && "can't eliminate ordering operation");
Sanjay Patel4b198802016-02-01 22:23:39 +00001385 return eraseInstFromFunction(SI);
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001386 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001387
Chris Lattnera65e2f72010-01-05 05:57:49 +00001388 // Otherwise, this is a load from some other location. Stores before it
1389 // may not be dead.
1390 break;
1391 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001392
Sanjoy Das679bc322017-01-17 05:45:09 +00001393 // Don't skip over loads, throws or things that can modify memory.
1394 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001395 break;
1396 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001397
1398 // store X, null -> turns into 'unreachable' in SimplifyCFG
1399 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
1400 if (!isa<UndefValue>(Val)) {
1401 SI.setOperand(0, UndefValue::get(Val->getType()));
1402 if (Instruction *U = dyn_cast<Instruction>(Val))
1403 Worklist.Add(U); // Dropped a use.
1404 }
Craig Topperf40110f2014-04-25 05:29:35 +00001405 return nullptr; // Do not modify these!
Chris Lattnera65e2f72010-01-05 05:57:49 +00001406 }
1407
1408 // store undef, Ptr -> noop
1409 if (isa<UndefValue>(Val))
Sanjay Patel4b198802016-02-01 22:23:39 +00001410 return eraseInstFromFunction(SI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001411
Chris Lattnera65e2f72010-01-05 05:57:49 +00001412 // If this store is the last instruction in the basic block (possibly
Victor Hernandez5f5abd52010-01-21 23:07:15 +00001413 // excepting debug info instructions), and if the block ends with an
1414 // unconditional branch, try to move it to the successor block.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001415 BBI = SI.getIterator();
Chris Lattnera65e2f72010-01-05 05:57:49 +00001416 do {
1417 ++BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001418 } while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001419 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
Chris Lattnera65e2f72010-01-05 05:57:49 +00001420 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
1421 if (BI->isUnconditional())
1422 if (SimplifyStoreAtEndOfBlock(SI))
Craig Topperf40110f2014-04-25 05:29:35 +00001423 return nullptr; // xform done!
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001424
Craig Topperf40110f2014-04-25 05:29:35 +00001425 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001426}
1427
1428/// SimplifyStoreAtEndOfBlock - Turn things like:
1429/// if () { *P = v1; } else { *P = v2 }
1430/// into a phi node with a store in the successor.
1431///
1432/// Simplify things like:
1433/// *P = v1; if () { *P = v2; }
1434/// into a phi node with a store in the successor.
1435///
1436bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
Philip Reames5f0e3692016-04-22 20:53:32 +00001437 assert(SI.isUnordered() &&
1438 "this code has not been auditted for volatile or ordered store case");
Justin Bognerc7e4fbe2016-08-05 01:09:48 +00001439
Chris Lattnera65e2f72010-01-05 05:57:49 +00001440 BasicBlock *StoreBB = SI.getParent();
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001441
Chris Lattnera65e2f72010-01-05 05:57:49 +00001442 // Check to see if the successor block has exactly two incoming edges. If
1443 // so, see if the other predecessor contains a store to the same location.
1444 // if so, insert a PHI node (if needed) and move the stores down.
1445 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001446
Chris Lattnera65e2f72010-01-05 05:57:49 +00001447 // Determine whether Dest has exactly two predecessors and, if so, compute
1448 // the other predecessor.
1449 pred_iterator PI = pred_begin(DestBB);
Gabor Greif1b787df2010-07-12 15:48:26 +00001450 BasicBlock *P = *PI;
Craig Topperf40110f2014-04-25 05:29:35 +00001451 BasicBlock *OtherBB = nullptr;
Gabor Greif1b787df2010-07-12 15:48:26 +00001452
1453 if (P != StoreBB)
1454 OtherBB = P;
1455
1456 if (++PI == pred_end(DestBB))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001457 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001458
Gabor Greif1b787df2010-07-12 15:48:26 +00001459 P = *PI;
1460 if (P != StoreBB) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001461 if (OtherBB)
1462 return false;
Gabor Greif1b787df2010-07-12 15:48:26 +00001463 OtherBB = P;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001464 }
1465 if (++PI != pred_end(DestBB))
1466 return false;
1467
1468 // Bail out if all the relevant blocks aren't distinct (this can happen,
1469 // for example, if SI is in an infinite loop)
1470 if (StoreBB == DestBB || OtherBB == DestBB)
1471 return false;
1472
1473 // Verify that the other block ends in a branch and is not otherwise empty.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001474 BasicBlock::iterator BBI(OtherBB->getTerminator());
Chris Lattnera65e2f72010-01-05 05:57:49 +00001475 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1476 if (!OtherBr || BBI == OtherBB->begin())
1477 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001478
Chris Lattnera65e2f72010-01-05 05:57:49 +00001479 // If the other block ends in an unconditional branch, check for the 'if then
1480 // else' case. there is an instruction before the branch.
Craig Topperf40110f2014-04-25 05:29:35 +00001481 StoreInst *OtherStore = nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001482 if (OtherBr->isUnconditional()) {
1483 --BBI;
1484 // Skip over debugging info.
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001485 while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001486 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001487 if (BBI==OtherBB->begin())
1488 return false;
1489 --BBI;
1490 }
Eli Friedman8bc586e2011-08-15 22:09:40 +00001491 // If this isn't a store, isn't a store to the same location, or is not the
1492 // right kind of store, bail out.
Chris Lattnera65e2f72010-01-05 05:57:49 +00001493 OtherStore = dyn_cast<StoreInst>(BBI);
1494 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001495 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001496 return false;
1497 } else {
1498 // Otherwise, the other block ended with a conditional branch. If one of the
1499 // destinations is StoreBB, then we have the if/then case.
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001500 if (OtherBr->getSuccessor(0) != StoreBB &&
Chris Lattnera65e2f72010-01-05 05:57:49 +00001501 OtherBr->getSuccessor(1) != StoreBB)
1502 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001503
Chris Lattnera65e2f72010-01-05 05:57:49 +00001504 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1505 // if/then triangle. See if there is a store to the same ptr as SI that
1506 // lives in OtherBB.
1507 for (;; --BBI) {
1508 // Check to see if we find the matching store.
1509 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1510 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001511 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001512 return false;
1513 break;
1514 }
1515 // If we find something that may be using or overwriting the stored
1516 // value, or if we run out of instructions, we can't do the xform.
Sanjoy Das679bc322017-01-17 05:45:09 +00001517 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1518 BBI->mayWriteToMemory() || BBI == OtherBB->begin())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001519 return false;
1520 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001521
Chris Lattnera65e2f72010-01-05 05:57:49 +00001522 // In order to eliminate the store in OtherBr, we have to
1523 // make sure nothing reads or overwrites the stored value in
1524 // StoreBB.
1525 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1526 // FIXME: This should really be AA driven.
Sanjoy Das679bc322017-01-17 05:45:09 +00001527 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001528 return false;
1529 }
1530 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001531
Chris Lattnera65e2f72010-01-05 05:57:49 +00001532 // Insert a PHI node now if we need it.
1533 Value *MergedVal = OtherStore->getOperand(0);
1534 if (MergedVal != SI.getOperand(0)) {
Jay Foad52131342011-03-30 11:28:46 +00001535 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
Chris Lattnera65e2f72010-01-05 05:57:49 +00001536 PN->addIncoming(SI.getOperand(0), SI.getParent());
1537 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1538 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1539 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001540
Chris Lattnera65e2f72010-01-05 05:57:49 +00001541 // Advance to a place where it is safe to insert the new store and
1542 // insert it.
Bill Wendling8ddfc092011-08-16 20:45:24 +00001543 BBI = DestBB->getFirstInsertionPt();
Eli Friedman35211c62011-05-27 00:19:40 +00001544 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
Eli Friedman8bc586e2011-08-15 22:09:40 +00001545 SI.isVolatile(),
1546 SI.getAlignment(),
1547 SI.getOrdering(),
1548 SI.getSynchScope());
Eli Friedman35211c62011-05-27 00:19:40 +00001549 InsertNewInstBefore(NewSI, *BBI);
Paul Robinson383c5c22017-02-06 22:19:04 +00001550 // The debug locations of the original instructions might differ; merge them.
1551 NewSI->setDebugLoc(DILocation::getMergedLocation(SI.getDebugLoc(),
1552 OtherStore->getDebugLoc()));
Eli Friedman35211c62011-05-27 00:19:40 +00001553
Hal Finkelcc39b672014-07-24 12:16:19 +00001554 // If the two stores had AA tags, merge them.
1555 AAMDNodes AATags;
1556 SI.getAAMetadata(AATags);
1557 if (AATags) {
1558 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1559 NewSI->setAAMetadata(AATags);
1560 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001561
Chris Lattnera65e2f72010-01-05 05:57:49 +00001562 // Nuke the old stores.
Sanjay Patel4b198802016-02-01 22:23:39 +00001563 eraseInstFromFunction(SI);
1564 eraseInstFromFunction(*OtherStore);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001565 return true;
1566}