blob: 5e4d32d6880e6fafa9468978ba797c8357f23acf [file] [log] [blame]
Chris Lattnera65e2f72010-01-05 05:57:49 +00001//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visit functions for load, store and alloca.
11//
12//===----------------------------------------------------------------------===//
13
Chandler Carrutha9174582015-01-22 05:25:13 +000014#include "InstCombineInternal.h"
Yaxun Liuba01ed02017-02-10 21:46:07 +000015#include "llvm/ADT/MapVector.h"
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +000016#include "llvm/ADT/SmallString.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000017#include "llvm/ADT/Statistic.h"
Dan Gohman826bdf82010-05-28 16:19:17 +000018#include "llvm/Analysis/Loads.h"
Peter Collingbourneecdd58f2016-10-21 19:59:26 +000019#include "llvm/IR/ConstantRange.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000020#include "llvm/IR/DataLayout.h"
Paul Robinson383c5c22017-02-06 22:19:04 +000021#include "llvm/IR/DebugInfo.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000022#include "llvm/IR/IntrinsicInst.h"
Yaxun Liuba01ed02017-02-10 21:46:07 +000023#include "llvm/IR/LLVMContext.h"
Charles Davis33d1dc02015-02-25 05:10:25 +000024#include "llvm/IR/MDBuilder.h"
Alexey Bataevec95c6c2017-12-08 15:32:10 +000025#include "llvm/IR/PatternMatch.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000026#include "llvm/Transforms/Utils/BasicBlockUtils.h"
27#include "llvm/Transforms/Utils/Local.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000028using namespace llvm;
Alexey Bataevec95c6c2017-12-08 15:32:10 +000029using namespace PatternMatch;
Chris Lattnera65e2f72010-01-05 05:57:49 +000030
Chandler Carruth964daaa2014-04-22 02:55:47 +000031#define DEBUG_TYPE "instcombine"
32
Chandler Carruthc908ca12012-08-21 08:39:44 +000033STATISTIC(NumDeadStore, "Number of dead stores eliminated");
34STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
35
36/// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
37/// some part of a constant global variable. This intentionally only accepts
38/// constant expressions because we can't rewrite arbitrary instructions.
39static bool pointsToConstantGlobal(Value *V) {
40 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
41 return GV->isConstant();
Matt Arsenault607281772014-04-24 00:01:09 +000042
43 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000044 if (CE->getOpcode() == Instruction::BitCast ||
Matt Arsenault607281772014-04-24 00:01:09 +000045 CE->getOpcode() == Instruction::AddrSpaceCast ||
Chandler Carruthc908ca12012-08-21 08:39:44 +000046 CE->getOpcode() == Instruction::GetElementPtr)
47 return pointsToConstantGlobal(CE->getOperand(0));
Matt Arsenault607281772014-04-24 00:01:09 +000048 }
Chandler Carruthc908ca12012-08-21 08:39:44 +000049 return false;
50}
51
52/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
53/// pointer to an alloca. Ignore any reads of the pointer, return false if we
54/// see any stores or other unknown uses. If we see pointer arithmetic, keep
55/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
56/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
57/// the alloca, and if the source pointer is a pointer to a constant global, we
58/// can optimize this.
59static bool
60isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
Reid Kleckner813dab22014-07-01 21:36:20 +000061 SmallVectorImpl<Instruction *> &ToDelete) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000062 // We track lifetime intrinsics as we encounter them. If we decide to go
63 // ahead and replace the value with the global, this lets the caller quickly
64 // eliminate the markers.
65
Reid Kleckner813dab22014-07-01 21:36:20 +000066 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
David Majnemer0a16c222016-08-11 21:15:00 +000067 ValuesToInspect.emplace_back(V, false);
Reid Kleckner813dab22014-07-01 21:36:20 +000068 while (!ValuesToInspect.empty()) {
69 auto ValuePair = ValuesToInspect.pop_back_val();
70 const bool IsOffset = ValuePair.second;
71 for (auto &U : ValuePair.first->uses()) {
David Majnemer0a16c222016-08-11 21:15:00 +000072 auto *I = cast<Instruction>(U.getUser());
Chandler Carruthc908ca12012-08-21 08:39:44 +000073
David Majnemer0a16c222016-08-11 21:15:00 +000074 if (auto *LI = dyn_cast<LoadInst>(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000075 // Ignore non-volatile loads, they are always ok.
76 if (!LI->isSimple()) return false;
Chandler Carruthc908ca12012-08-21 08:39:44 +000077 continue;
78 }
Reid Kleckner813dab22014-07-01 21:36:20 +000079
80 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
81 // If uses of the bitcast are ok, we are ok.
David Majnemer0a16c222016-08-11 21:15:00 +000082 ValuesToInspect.emplace_back(I, IsOffset);
Reid Kleckner813dab22014-07-01 21:36:20 +000083 continue;
84 }
David Majnemer0a16c222016-08-11 21:15:00 +000085 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000086 // If the GEP has all zero indices, it doesn't offset the pointer. If it
87 // doesn't, it does.
David Majnemer0a16c222016-08-11 21:15:00 +000088 ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
Reid Kleckner813dab22014-07-01 21:36:20 +000089 continue;
90 }
91
Benjamin Kramer3a09ef62015-04-10 14:50:08 +000092 if (auto CS = CallSite(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000093 // If this is the function being called then we treat it like a load and
94 // ignore it.
95 if (CS.isCallee(&U))
96 continue;
97
David Majnemer02f47872015-12-23 09:58:41 +000098 unsigned DataOpNo = CS.getDataOperandNo(&U);
99 bool IsArgOperand = CS.isArgOperand(&U);
100
Reid Kleckner813dab22014-07-01 21:36:20 +0000101 // Inalloca arguments are clobbered by the call.
David Majnemer02f47872015-12-23 09:58:41 +0000102 if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
Reid Kleckner813dab22014-07-01 21:36:20 +0000103 return false;
104
105 // If this is a readonly/readnone call site, then we know it is just a
106 // load (but one that potentially returns the value itself), so we can
107 // ignore it if we know that the value isn't captured.
108 if (CS.onlyReadsMemory() &&
David Majnemer02f47872015-12-23 09:58:41 +0000109 (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
Reid Kleckner813dab22014-07-01 21:36:20 +0000110 continue;
111
112 // If this is being passed as a byval argument, the caller is making a
113 // copy, so it is only a read of the alloca.
David Majnemer02f47872015-12-23 09:58:41 +0000114 if (IsArgOperand && CS.isByValArgument(DataOpNo))
Reid Kleckner813dab22014-07-01 21:36:20 +0000115 continue;
116 }
117
118 // Lifetime intrinsics can be handled by the caller.
119 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
120 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
121 II->getIntrinsicID() == Intrinsic::lifetime_end) {
122 assert(II->use_empty() && "Lifetime markers have no result to use!");
123 ToDelete.push_back(II);
124 continue;
125 }
126 }
127
128 // If this is isn't our memcpy/memmove, reject it as something we can't
129 // handle.
130 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
131 if (!MI)
132 return false;
133
134 // If the transfer is using the alloca as a source of the transfer, then
135 // ignore it since it is a load (unless the transfer is volatile).
136 if (U.getOperandNo() == 1) {
137 if (MI->isVolatile()) return false;
138 continue;
139 }
140
141 // If we already have seen a copy, reject the second one.
142 if (TheCopy) return false;
143
144 // If the pointer has been offset from the start of the alloca, we can't
145 // safely handle this.
146 if (IsOffset) return false;
147
148 // If the memintrinsic isn't using the alloca as the dest, reject it.
149 if (U.getOperandNo() != 0) return false;
150
151 // If the source of the memcpy/move is not a constant global, reject it.
152 if (!pointsToConstantGlobal(MI->getSource()))
153 return false;
154
155 // Otherwise, the transform is safe. Remember the copy instruction.
156 TheCopy = MI;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000157 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000158 }
159 return true;
160}
161
162/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
163/// modified by a copy from a constant global. If we can prove this, we can
164/// replace any uses of the alloca with uses of the global directly.
165static MemTransferInst *
166isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
167 SmallVectorImpl<Instruction *> &ToDelete) {
Craig Topperf40110f2014-04-25 05:29:35 +0000168 MemTransferInst *TheCopy = nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000169 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
170 return TheCopy;
Craig Topperf40110f2014-04-25 05:29:35 +0000171 return nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000172}
173
Vitaly Bukadf19ad42017-06-24 01:35:19 +0000174/// Returns true if V is dereferenceable for size of alloca.
175static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
176 const DataLayout &DL) {
177 if (AI->isArrayAllocation())
178 return false;
179 uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
180 if (!AllocaSize)
181 return false;
182 return isDereferenceableAndAlignedPointer(V, AI->getAlignment(),
183 APInt(64, AllocaSize), DL);
184}
185
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000186static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
Duncan P. N. Exon Smith720762e2015-03-13 19:30:44 +0000187 // Check for array size of 1 (scalar allocation).
Duncan P. N. Exon Smithbe95b4a2015-03-13 19:42:09 +0000188 if (!AI.isArrayAllocation()) {
189 // i32 1 is the canonical array size for scalar allocations.
190 if (AI.getArraySize()->getType()->isIntegerTy(32))
191 return nullptr;
192
193 // Canonicalize it.
Craig Topperbb4069e2017-07-07 23:16:26 +0000194 Value *V = IC.Builder.getInt32(1);
Duncan P. N. Exon Smithbe95b4a2015-03-13 19:42:09 +0000195 AI.setOperand(0, V);
196 return &AI;
197 }
Duncan P. N. Exon Smith720762e2015-03-13 19:30:44 +0000198
Chris Lattnera65e2f72010-01-05 05:57:49 +0000199 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000200 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
201 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
Craig Topperbb4069e2017-07-07 23:16:26 +0000202 AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName());
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000203 New->setAlignment(AI.getAlignment());
Chris Lattnera65e2f72010-01-05 05:57:49 +0000204
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000205 // Scan to the end of the allocation instructions, to skip over a block of
206 // allocas if possible...also skip interleaved debug info
207 //
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +0000208 BasicBlock::iterator It(New);
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000209 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
210 ++It;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000211
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000212 // Now that I is pointing to the first non-allocation-inst in the block,
213 // insert our getelementptr instruction...
214 //
215 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
216 Value *NullIdx = Constant::getNullValue(IdxTy);
217 Value *Idx[2] = {NullIdx, NullIdx};
218 Instruction *GEP =
Matt Arsenault640ff9d2013-08-14 00:24:05 +0000219 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000220 IC.InsertNewInstBefore(GEP, *It);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000221
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000222 // Now make everything use the getelementptr instead of the original
223 // allocation.
Sanjay Patel4b198802016-02-01 22:23:39 +0000224 return IC.replaceInstUsesWith(AI, GEP);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000225 }
226
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000227 if (isa<UndefValue>(AI.getArraySize()))
Sanjay Patel4b198802016-02-01 22:23:39 +0000228 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000229
Duncan P. N. Exon Smith07ff9b02015-03-13 19:34:55 +0000230 // Ensure that the alloca array size argument has type intptr_t, so that
231 // any casting is exposed early.
232 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
233 if (AI.getArraySize()->getType() != IntPtrTy) {
Craig Topperbb4069e2017-07-07 23:16:26 +0000234 Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false);
Duncan P. N. Exon Smith07ff9b02015-03-13 19:34:55 +0000235 AI.setOperand(0, V);
236 return &AI;
237 }
238
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000239 return nullptr;
240}
241
Benjamin Kramer03ab8a32017-02-10 22:26:35 +0000242namespace {
Yaxun Liuba01ed02017-02-10 21:46:07 +0000243// If I and V are pointers in different address space, it is not allowed to
244// use replaceAllUsesWith since I and V have different types. A
245// non-target-specific transformation should not use addrspacecast on V since
246// the two address space may be disjoint depending on target.
247//
248// This class chases down uses of the old pointer until reaching the load
249// instructions, then replaces the old pointer in the load instructions with
250// the new pointer. If during the chasing it sees bitcast or GEP, it will
251// create new bitcast or GEP with the new pointer and use them in the load
252// instruction.
253class PointerReplacer {
254public:
255 PointerReplacer(InstCombiner &IC) : IC(IC) {}
256 void replacePointer(Instruction &I, Value *V);
257
258private:
259 void findLoadAndReplace(Instruction &I);
260 void replace(Instruction *I);
261 Value *getReplacement(Value *I);
262
263 SmallVector<Instruction *, 4> Path;
264 MapVector<Value *, Value *> WorkMap;
265 InstCombiner &IC;
266};
Benjamin Kramer03ab8a32017-02-10 22:26:35 +0000267} // end anonymous namespace
Yaxun Liuba01ed02017-02-10 21:46:07 +0000268
269void PointerReplacer::findLoadAndReplace(Instruction &I) {
270 for (auto U : I.users()) {
271 auto *Inst = dyn_cast<Instruction>(&*U);
272 if (!Inst)
273 return;
274 DEBUG(dbgs() << "Found pointer user: " << *U << '\n');
275 if (isa<LoadInst>(Inst)) {
276 for (auto P : Path)
277 replace(P);
278 replace(Inst);
279 } else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) {
280 Path.push_back(Inst);
281 findLoadAndReplace(*Inst);
282 Path.pop_back();
283 } else {
284 return;
285 }
286 }
287}
288
289Value *PointerReplacer::getReplacement(Value *V) {
290 auto Loc = WorkMap.find(V);
291 if (Loc != WorkMap.end())
292 return Loc->second;
293 return nullptr;
294}
295
296void PointerReplacer::replace(Instruction *I) {
297 if (getReplacement(I))
298 return;
299
300 if (auto *LT = dyn_cast<LoadInst>(I)) {
301 auto *V = getReplacement(LT->getPointerOperand());
302 assert(V && "Operand not replaced");
303 auto *NewI = new LoadInst(V);
304 NewI->takeName(LT);
305 IC.InsertNewInstWith(NewI, *LT);
306 IC.replaceInstUsesWith(*LT, NewI);
307 WorkMap[LT] = NewI;
308 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
309 auto *V = getReplacement(GEP->getPointerOperand());
310 assert(V && "Operand not replaced");
311 SmallVector<Value *, 8> Indices;
312 Indices.append(GEP->idx_begin(), GEP->idx_end());
313 auto *NewI = GetElementPtrInst::Create(
314 V->getType()->getPointerElementType(), V, Indices);
315 IC.InsertNewInstWith(NewI, *GEP);
316 NewI->takeName(GEP);
317 WorkMap[GEP] = NewI;
318 } else if (auto *BC = dyn_cast<BitCastInst>(I)) {
319 auto *V = getReplacement(BC->getOperand(0));
320 assert(V && "Operand not replaced");
321 auto *NewT = PointerType::get(BC->getType()->getPointerElementType(),
322 V->getType()->getPointerAddressSpace());
323 auto *NewI = new BitCastInst(V, NewT);
324 IC.InsertNewInstWith(NewI, *BC);
325 NewI->takeName(BC);
Yaxun Liue6d1ce52017-02-24 20:27:25 +0000326 WorkMap[BC] = NewI;
Yaxun Liuba01ed02017-02-10 21:46:07 +0000327 } else {
328 llvm_unreachable("should never reach here");
329 }
330}
331
332void PointerReplacer::replacePointer(Instruction &I, Value *V) {
Benjamin Kramer684c87b2017-02-10 22:04:17 +0000333#ifndef NDEBUG
Yaxun Liuba01ed02017-02-10 21:46:07 +0000334 auto *PT = cast<PointerType>(I.getType());
335 auto *NT = cast<PointerType>(V->getType());
336 assert(PT != NT && PT->getElementType() == NT->getElementType() &&
337 "Invalid usage");
Benjamin Kramer684c87b2017-02-10 22:04:17 +0000338#endif
Yaxun Liuba01ed02017-02-10 21:46:07 +0000339 WorkMap[&I] = V;
340 findLoadAndReplace(I);
341}
342
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000343Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
344 if (auto *I = simplifyAllocaArraySize(*this, AI))
345 return I;
346
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000347 if (AI.getAllocatedType()->isSized()) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000348 // If the alignment is 0 (unspecified), assign it the preferred alignment.
349 if (AI.getAlignment() == 0)
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000350 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000351
352 // Move all alloca's of zero byte objects to the entry block and merge them
353 // together. Note that we only do this for alloca's, because malloc should
354 // allocate and return a unique pointer, even for a zero byte allocation.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000355 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000356 // For a zero sized alloca there is no point in doing an array allocation.
357 // This is helpful if the array size is a complicated expression not used
358 // elsewhere.
359 if (AI.isArrayAllocation()) {
360 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
361 return &AI;
362 }
363
364 // Get the first instruction in the entry block.
365 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
366 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
367 if (FirstInst != &AI) {
368 // If the entry block doesn't start with a zero-size alloca then move
369 // this one to the start of the entry block. There is no problem with
370 // dominance as the array size was forced to a constant earlier already.
371 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
372 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000373 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000374 AI.moveBefore(FirstInst);
375 return &AI;
376 }
377
Richard Osborneb68053e2012-09-18 09:31:44 +0000378 // If the alignment of the entry block alloca is 0 (unspecified),
379 // assign it the preferred alignment.
380 if (EntryAI->getAlignment() == 0)
381 EntryAI->setAlignment(
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000382 DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000383 // Replace this zero-sized alloca with the one at the start of the entry
384 // block after ensuring that the address will be aligned enough for both
385 // types.
Richard Osborneb68053e2012-09-18 09:31:44 +0000386 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
387 AI.getAlignment());
Duncan Sands8bc764a2012-06-26 13:39:21 +0000388 EntryAI->setAlignment(MaxAlign);
389 if (AI.getType() != EntryAI->getType())
390 return new BitCastInst(EntryAI, AI.getType());
Sanjay Patel4b198802016-02-01 22:23:39 +0000391 return replaceInstUsesWith(AI, EntryAI);
Duncan Sands8bc764a2012-06-26 13:39:21 +0000392 }
393 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000394 }
395
Eli Friedmanb14873c2012-11-26 23:04:53 +0000396 if (AI.getAlignment()) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000397 // Check to see if this allocation is only modified by a memcpy/memmove from
398 // a constant global whose alignment is equal to or exceeds that of the
399 // allocation. If this is the case, we can change all users to use
400 // the constant global instead. This is commonly produced by the CFE by
401 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
402 // is only subsequently read.
403 SmallVector<Instruction *, 4> ToDelete;
404 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
Chandler Carruth66b31302015-01-04 12:03:27 +0000405 unsigned SourceAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +0000406 Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
Vitaly Bukadf19ad42017-06-24 01:35:19 +0000407 if (AI.getAlignment() <= SourceAlign &&
408 isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000409 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
410 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
411 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
Sanjay Patel4b198802016-02-01 22:23:39 +0000412 eraseInstFromFunction(*ToDelete[i]);
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000413 Constant *TheSrc = cast<Constant>(Copy->getSource());
Yaxun Liuba01ed02017-02-10 21:46:07 +0000414 auto *SrcTy = TheSrc->getType();
415 auto *DestTy = PointerType::get(AI.getType()->getPointerElementType(),
416 SrcTy->getPointerAddressSpace());
417 Constant *Cast =
418 ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, DestTy);
419 if (AI.getType()->getPointerAddressSpace() ==
420 SrcTy->getPointerAddressSpace()) {
421 Instruction *NewI = replaceInstUsesWith(AI, Cast);
422 eraseInstFromFunction(*Copy);
423 ++NumGlobalCopies;
424 return NewI;
425 } else {
426 PointerReplacer PtrReplacer(*this);
427 PtrReplacer.replacePointer(AI, Cast);
428 ++NumGlobalCopies;
429 }
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000430 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000431 }
432 }
433
Nuno Lopes95cc4f32012-07-09 18:38:20 +0000434 // At last, use the generic allocation site handler to aggressively remove
435 // unused allocas.
436 return visitAllocSite(AI);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000437}
438
Philip Reames89e92d22016-12-01 20:17:06 +0000439// Are we allowed to form a atomic load or store of this type?
440static bool isSupportedAtomicType(Type *Ty) {
441 return Ty->isIntegerTy() || Ty->isPointerTy() || Ty->isFloatingPointTy();
442}
443
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000444/// \brief Helper to combine a load to a new type.
445///
446/// This just does the work of combining a load to a new type. It handles
447/// metadata, etc., and returns the new instruction. The \c NewTy should be the
448/// loaded *value* type. This will convert it to a pointer, cast the operand to
449/// that pointer type, load it, etc.
450///
451/// Note that this will create all of the instructions with whatever insert
452/// point the \c InstCombiner currently is using.
Mehdi Amini2668a482015-05-07 05:52:40 +0000453static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
454 const Twine &Suffix = "") {
Philip Reames89e92d22016-12-01 20:17:06 +0000455 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
456 "can't fold an atomic load to requested type");
457
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000458 Value *Ptr = LI.getPointerOperand();
459 unsigned AS = LI.getPointerAddressSpace();
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000460 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000461 LI.getAllMetadata(MD);
462
Craig Topperbb4069e2017-07-07 23:16:26 +0000463 LoadInst *NewLoad = IC.Builder.CreateAlignedLoad(
464 IC.Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
Philip Reames6f4d0082016-05-06 22:17:01 +0000465 LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +0000466 NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
Charles Davis33d1dc02015-02-25 05:10:25 +0000467 MDBuilder MDB(NewLoad->getContext());
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000468 for (const auto &MDPair : MD) {
469 unsigned ID = MDPair.first;
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000470 MDNode *N = MDPair.second;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000471 // Note, essentially every kind of metadata should be preserved here! This
472 // routine is supposed to clone a load instruction changing *only its type*.
473 // The only metadata it makes sense to drop is metadata which is invalidated
474 // when the pointer type changes. This should essentially never be the case
475 // in LLVM, but we explicitly switch over only known metadata to be
476 // conservatively correct. If you are adding metadata to LLVM which pertains
477 // to loads, you almost certainly want to add it here.
478 switch (ID) {
479 case LLVMContext::MD_dbg:
480 case LLVMContext::MD_tbaa:
481 case LLVMContext::MD_prof:
482 case LLVMContext::MD_fpmath:
483 case LLVMContext::MD_tbaa_struct:
484 case LLVMContext::MD_invariant_load:
485 case LLVMContext::MD_alias_scope:
486 case LLVMContext::MD_noalias:
Philip Reames5a3f5f72014-10-21 00:13:20 +0000487 case LLVMContext::MD_nontemporal:
488 case LLVMContext::MD_mem_parallel_loop_access:
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000489 // All of these directly apply.
490 NewLoad->setMetadata(ID, N);
491 break;
492
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000493 case LLVMContext::MD_nonnull:
Chandler Carruth2abb65a2017-06-26 03:31:31 +0000494 copyNonnullMetadata(LI, N, *NewLoad);
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000495 break;
Artur Pilipenko5c5011d2015-11-02 17:53:51 +0000496 case LLVMContext::MD_align:
497 case LLVMContext::MD_dereferenceable:
498 case LLVMContext::MD_dereferenceable_or_null:
499 // These only directly apply if the new type is also a pointer.
500 if (NewTy->isPointerTy())
501 NewLoad->setMetadata(ID, N);
502 break;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000503 case LLVMContext::MD_range:
Chandler Carruth2abb65a2017-06-26 03:31:31 +0000504 copyRangeMetadata(IC.getDataLayout(), LI, N, *NewLoad);
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000505 break;
506 }
507 }
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000508 return NewLoad;
509}
510
Chandler Carruthfa11d832015-01-22 03:34:54 +0000511/// \brief Combine a store to a new type.
512///
513/// Returns the newly created store instruction.
514static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
Philip Reames89e92d22016-12-01 20:17:06 +0000515 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
516 "can't fold an atomic store of requested type");
517
Chandler Carruthfa11d832015-01-22 03:34:54 +0000518 Value *Ptr = SI.getPointerOperand();
519 unsigned AS = SI.getPointerAddressSpace();
520 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
521 SI.getAllMetadata(MD);
522
Craig Topperbb4069e2017-07-07 23:16:26 +0000523 StoreInst *NewStore = IC.Builder.CreateAlignedStore(
524 V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
Philip Reames6f4d0082016-05-06 22:17:01 +0000525 SI.getAlignment(), SI.isVolatile());
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +0000526 NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
Chandler Carruthfa11d832015-01-22 03:34:54 +0000527 for (const auto &MDPair : MD) {
528 unsigned ID = MDPair.first;
529 MDNode *N = MDPair.second;
530 // Note, essentially every kind of metadata should be preserved here! This
531 // routine is supposed to clone a store instruction changing *only its
532 // type*. The only metadata it makes sense to drop is metadata which is
533 // invalidated when the pointer type changes. This should essentially
534 // never be the case in LLVM, but we explicitly switch over only known
535 // metadata to be conservatively correct. If you are adding metadata to
536 // LLVM which pertains to stores, you almost certainly want to add it
537 // here.
538 switch (ID) {
539 case LLVMContext::MD_dbg:
540 case LLVMContext::MD_tbaa:
541 case LLVMContext::MD_prof:
542 case LLVMContext::MD_fpmath:
543 case LLVMContext::MD_tbaa_struct:
544 case LLVMContext::MD_alias_scope:
545 case LLVMContext::MD_noalias:
546 case LLVMContext::MD_nontemporal:
547 case LLVMContext::MD_mem_parallel_loop_access:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000548 // All of these directly apply.
549 NewStore->setMetadata(ID, N);
550 break;
551
552 case LLVMContext::MD_invariant_load:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000553 case LLVMContext::MD_nonnull:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000554 case LLVMContext::MD_range:
Artur Pilipenko5c5011d2015-11-02 17:53:51 +0000555 case LLVMContext::MD_align:
556 case LLVMContext::MD_dereferenceable:
557 case LLVMContext::MD_dereferenceable_or_null:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000558 // These don't apply for stores.
Chandler Carruthfa11d832015-01-22 03:34:54 +0000559 break;
560 }
561 }
562
563 return NewStore;
564}
565
Alexey Bataevec95c6c2017-12-08 15:32:10 +0000566/// Returns true if instruction represent minmax pattern like:
567/// select ((cmp load V1, load V2), V1, V2).
568static bool isMinMaxWithLoads(Value *V) {
569 assert(V->getType()->isPointerTy() && "Expected pointer type.");
570 // Ignore possible ty* to ixx* bitcast.
571 V = peekThroughBitcast(V);
572 // Check that select is select ((cmp load V1, load V2), V1, V2) - minmax
573 // pattern.
574 CmpInst::Predicate Pred;
575 Instruction *L1;
576 Instruction *L2;
577 Value *LHS;
578 Value *RHS;
579 if (!match(V, m_Select(m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2)),
580 m_Value(LHS), m_Value(RHS))))
581 return false;
582 return (match(L1, m_Load(m_Specific(LHS))) &&
583 match(L2, m_Load(m_Specific(RHS)))) ||
584 (match(L1, m_Load(m_Specific(RHS))) &&
585 match(L2, m_Load(m_Specific(LHS))));
586}
587
JF Bastien3e2e69f2016-04-21 19:41:48 +0000588/// \brief Combine loads to match the type of their uses' value after looking
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000589/// through intervening bitcasts.
590///
591/// The core idea here is that if the result of a load is used in an operation,
592/// we should load the type most conducive to that operation. For example, when
593/// loading an integer and converting that immediately to a pointer, we should
594/// instead directly load a pointer.
595///
596/// However, this routine must never change the width of a load or the number of
597/// loads as that would introduce a semantic change. This combine is expected to
598/// be a semantic no-op which just allows loads to more closely model the types
599/// of their consuming operations.
600///
601/// Currently, we also refuse to change the precise type used for an atomic load
602/// or a volatile load. This is debatable, and might be reasonable to change
603/// later. However, it is risky in case some backend or other part of LLVM is
604/// relying on the exact type loaded to select appropriate atomic operations.
605static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
Philip Reames6f4d0082016-05-06 22:17:01 +0000606 // FIXME: We could probably with some care handle both volatile and ordered
607 // atomic loads here but it isn't clear that this is important.
608 if (!LI.isUnordered())
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000609 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000610
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000611 if (LI.use_empty())
612 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000613
Arnold Schwaighofer5d335552016-09-10 18:14:57 +0000614 // swifterror values can't be bitcasted.
615 if (LI.getPointerOperand()->isSwiftError())
616 return nullptr;
617
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000618 Type *Ty = LI.getType();
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000619 const DataLayout &DL = IC.getDataLayout();
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000620
621 // Try to canonicalize loads which are only ever stored to operate over
622 // integers instead of any other type. We only do this when the loaded type
623 // is sized and has a size exactly the same as its store size and the store
624 // size is a legal integer type.
Alexey Bataevec95c6c2017-12-08 15:32:10 +0000625 // Do not perform canonicalization if minmax pattern is found (to avoid
626 // infinite loop).
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000627 if (!Ty->isIntegerTy() && Ty->isSized() &&
628 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
Sanjoy Dasba04d3a2016-08-06 02:58:48 +0000629 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) &&
Alexey Bataevec95c6c2017-12-08 15:32:10 +0000630 !DL.isNonIntegralPointerType(Ty) &&
631 !isMinMaxWithLoads(
632 peekThroughBitcast(LI.getPointerOperand(), /*OneUseOnly=*/true))) {
David Majnemer0a16c222016-08-11 21:15:00 +0000633 if (all_of(LI.users(), [&LI](User *U) {
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000634 auto *SI = dyn_cast<StoreInst>(U);
Arnold Schwaighoferc3685632017-01-31 17:53:49 +0000635 return SI && SI->getPointerOperand() != &LI &&
636 !SI->getPointerOperand()->isSwiftError();
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000637 })) {
638 LoadInst *NewLoad = combineLoadToNewType(
639 IC, LI,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000640 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000641 // Replace all the stores with stores of the newly loaded value.
642 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
643 auto *SI = cast<StoreInst>(*UI++);
Craig Topperbb4069e2017-07-07 23:16:26 +0000644 IC.Builder.SetInsertPoint(SI);
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000645 combineStoreToNewValue(IC, *SI, NewLoad);
Sanjay Patel4b198802016-02-01 22:23:39 +0000646 IC.eraseInstFromFunction(*SI);
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000647 }
648 assert(LI.use_empty() && "Failed to remove all users of the load!");
649 // Return the old load so the combiner can delete it safely.
650 return &LI;
651 }
652 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000653
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000654 // Fold away bit casts of the loaded value by loading the desired type.
Quentin Colombet490cfbe2016-02-11 22:30:41 +0000655 // We can do this for BitCastInsts as well as casts from and to pointer types,
656 // as long as those are noops (i.e., the source or dest type have the same
657 // bitwidth as the target's pointers).
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000658 if (LI.hasOneUse())
Philip Reames89e92d22016-12-01 20:17:06 +0000659 if (auto* CI = dyn_cast<CastInst>(LI.user_back()))
660 if (CI->isNoopCast(DL))
661 if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) {
662 LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
663 CI->replaceAllUsesWith(NewLoad);
664 IC.eraseInstFromFunction(*CI);
665 return &LI;
666 }
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000667
Chandler Carrutha7f247e2014-12-09 19:21:16 +0000668 // FIXME: We should also canonicalize loads of vectors when their elements are
669 // cast to other types.
Craig Topperf40110f2014-04-25 05:29:35 +0000670 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000671}
672
Mehdi Amini2668a482015-05-07 05:52:40 +0000673static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
674 // FIXME: We could probably with some care handle both volatile and atomic
675 // stores here but it isn't clear that this is important.
676 if (!LI.isSimple())
677 return nullptr;
678
679 Type *T = LI.getType();
680 if (!T->isAggregateType())
681 return nullptr;
682
Benjamin Kramerc1263532016-03-11 10:20:56 +0000683 StringRef Name = LI.getName();
Bruce Mitchenere9ffb452015-09-12 01:17:08 +0000684 assert(LI.getAlignment() && "Alignment must be set at this point");
Mehdi Amini2668a482015-05-07 05:52:40 +0000685
686 if (auto *ST = dyn_cast<StructType>(T)) {
687 // If the struct only have one element, we unpack.
Amaury Sechet61a7d622016-02-17 19:21:28 +0000688 auto NumElements = ST->getNumElements();
689 if (NumElements == 1) {
Mehdi Amini2668a482015-05-07 05:52:40 +0000690 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
691 ".unpack");
Keno Fischera236dae2017-06-28 23:36:40 +0000692 AAMDNodes AAMD;
693 LI.getAAMetadata(AAMD);
694 NewLoad->setAAMetadata(AAMD);
Craig Topperbb4069e2017-07-07 23:16:26 +0000695 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
Amaury Sechet61a7d622016-02-17 19:21:28 +0000696 UndefValue::get(T), NewLoad, 0, Name));
Mehdi Amini2668a482015-05-07 05:52:40 +0000697 }
Mehdi Amini1c131b32015-12-15 01:44:07 +0000698
699 // We don't want to break loads with padding here as we'd loose
700 // the knowledge that padding exists for the rest of the pipeline.
701 const DataLayout &DL = IC.getDataLayout();
702 auto *SL = DL.getStructLayout(ST);
703 if (SL->hasPadding())
704 return nullptr;
705
Amaury Sechet61a7d622016-02-17 19:21:28 +0000706 auto Align = LI.getAlignment();
707 if (!Align)
708 Align = DL.getABITypeAlignment(ST);
709
Mehdi Amini1c131b32015-12-15 01:44:07 +0000710 auto *Addr = LI.getPointerOperand();
Amaury Sechet61a7d622016-02-17 19:21:28 +0000711 auto *IdxType = Type::getInt32Ty(T->getContext());
Mehdi Amini1c131b32015-12-15 01:44:07 +0000712 auto *Zero = ConstantInt::get(IdxType, 0);
Amaury Sechet61a7d622016-02-17 19:21:28 +0000713
714 Value *V = UndefValue::get(T);
715 for (unsigned i = 0; i < NumElements; i++) {
Mehdi Amini1c131b32015-12-15 01:44:07 +0000716 Value *Indices[2] = {
717 Zero,
718 ConstantInt::get(IdxType, i),
719 };
Craig Topperbb4069e2017-07-07 23:16:26 +0000720 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
721 Name + ".elt");
Amaury Sechet61a7d622016-02-17 19:21:28 +0000722 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
Craig Topperbb4069e2017-07-07 23:16:26 +0000723 auto *L = IC.Builder.CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
Keno Fischera236dae2017-06-28 23:36:40 +0000724 // Propagate AA metadata. It'll still be valid on the narrowed load.
725 AAMDNodes AAMD;
726 LI.getAAMetadata(AAMD);
727 L->setAAMetadata(AAMD);
Craig Topperbb4069e2017-07-07 23:16:26 +0000728 V = IC.Builder.CreateInsertValue(V, L, i);
Mehdi Amini1c131b32015-12-15 01:44:07 +0000729 }
730
731 V->setName(Name);
Sanjay Patel4b198802016-02-01 22:23:39 +0000732 return IC.replaceInstUsesWith(LI, V);
Mehdi Amini2668a482015-05-07 05:52:40 +0000733 }
734
David Majnemer58fb0382015-05-11 05:04:22 +0000735 if (auto *AT = dyn_cast<ArrayType>(T)) {
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000736 auto *ET = AT->getElementType();
737 auto NumElements = AT->getNumElements();
738 if (NumElements == 1) {
739 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack");
Keno Fischera236dae2017-06-28 23:36:40 +0000740 AAMDNodes AAMD;
741 LI.getAAMetadata(AAMD);
742 NewLoad->setAAMetadata(AAMD);
Craig Topperbb4069e2017-07-07 23:16:26 +0000743 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000744 UndefValue::get(T), NewLoad, 0, Name));
David Majnemer58fb0382015-05-11 05:04:22 +0000745 }
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000746
Davide Italianoda114122016-10-07 20:57:42 +0000747 // Bail out if the array is too large. Ideally we would like to optimize
748 // arrays of arbitrary size but this has a terrible impact on compile time.
749 // The threshold here is chosen arbitrarily, maybe needs a little bit of
750 // tuning.
Davide Italiano2133bf52017-02-07 17:56:50 +0000751 if (NumElements > IC.MaxArraySizeForCombine)
Davide Italianoda114122016-10-07 20:57:42 +0000752 return nullptr;
753
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000754 const DataLayout &DL = IC.getDataLayout();
755 auto EltSize = DL.getTypeAllocSize(ET);
756 auto Align = LI.getAlignment();
757 if (!Align)
758 Align = DL.getABITypeAlignment(T);
759
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000760 auto *Addr = LI.getPointerOperand();
761 auto *IdxType = Type::getInt64Ty(T->getContext());
762 auto *Zero = ConstantInt::get(IdxType, 0);
763
764 Value *V = UndefValue::get(T);
765 uint64_t Offset = 0;
766 for (uint64_t i = 0; i < NumElements; i++) {
767 Value *Indices[2] = {
768 Zero,
769 ConstantInt::get(IdxType, i),
770 };
Craig Topperbb4069e2017-07-07 23:16:26 +0000771 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
772 Name + ".elt");
773 auto *L = IC.Builder.CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
774 Name + ".unpack");
Keno Fischera236dae2017-06-28 23:36:40 +0000775 AAMDNodes AAMD;
776 LI.getAAMetadata(AAMD);
777 L->setAAMetadata(AAMD);
Craig Topperbb4069e2017-07-07 23:16:26 +0000778 V = IC.Builder.CreateInsertValue(V, L, i);
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000779 Offset += EltSize;
780 }
781
782 V->setName(Name);
783 return IC.replaceInstUsesWith(LI, V);
David Majnemer58fb0382015-05-11 05:04:22 +0000784 }
785
Mehdi Amini2668a482015-05-07 05:52:40 +0000786 return nullptr;
787}
788
Hal Finkel847e05f2015-02-20 03:05:53 +0000789// If we can determine that all possible objects pointed to by the provided
790// pointer value are, not only dereferenceable, but also definitively less than
791// or equal to the provided maximum size, then return true. Otherwise, return
792// false (constant global values and allocas fall into this category).
793//
794// FIXME: This should probably live in ValueTracking (or similar).
795static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000796 const DataLayout &DL) {
Hal Finkel847e05f2015-02-20 03:05:53 +0000797 SmallPtrSet<Value *, 4> Visited;
798 SmallVector<Value *, 4> Worklist(1, V);
799
800 do {
801 Value *P = Worklist.pop_back_val();
802 P = P->stripPointerCasts();
803
804 if (!Visited.insert(P).second)
805 continue;
806
807 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
808 Worklist.push_back(SI->getTrueValue());
809 Worklist.push_back(SI->getFalseValue());
810 continue;
811 }
812
813 if (PHINode *PN = dyn_cast<PHINode>(P)) {
Pete Cooper833f34d2015-05-12 20:05:31 +0000814 for (Value *IncValue : PN->incoming_values())
815 Worklist.push_back(IncValue);
Hal Finkel847e05f2015-02-20 03:05:53 +0000816 continue;
817 }
818
819 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
Sanjoy Das99042472016-04-17 04:30:43 +0000820 if (GA->isInterposable())
Hal Finkel847e05f2015-02-20 03:05:53 +0000821 return false;
822 Worklist.push_back(GA->getAliasee());
823 continue;
824 }
825
826 // If we know how big this object is, and it is less than MaxSize, continue
827 // searching. Otherwise, return false.
828 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
829 if (!AI->getAllocatedType()->isSized())
830 return false;
831
832 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
833 if (!CS)
834 return false;
835
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000836 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
Hal Finkel847e05f2015-02-20 03:05:53 +0000837 // Make sure that, even if the multiplication below would wrap as an
838 // uint64_t, we still do the right thing.
839 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
840 return false;
841 continue;
842 }
843
844 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
845 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
846 return false;
847
Manuel Jacob5f6eaac2016-01-16 20:30:46 +0000848 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
Hal Finkel847e05f2015-02-20 03:05:53 +0000849 if (InitSize > MaxSize)
850 return false;
851 continue;
852 }
853
854 return false;
855 } while (!Worklist.empty());
856
857 return true;
858}
859
860// If we're indexing into an object of a known size, and the outer index is
861// not a constant, but having any value but zero would lead to undefined
862// behavior, replace it with zero.
863//
864// For example, if we have:
865// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
866// ...
867// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
868// ... = load i32* %arrayidx, align 4
869// Then we know that we can replace %x in the GEP with i64 0.
870//
871// FIXME: We could fold any GEP index to zero that would cause UB if it were
872// not zero. Currently, we only handle the first such index. Also, we could
873// also search through non-zero constant indices if we kept track of the
874// offsets those indices implied.
875static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
876 Instruction *MemI, unsigned &Idx) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000877 if (GEPI->getNumOperands() < 2)
Hal Finkel847e05f2015-02-20 03:05:53 +0000878 return false;
879
880 // Find the first non-zero index of a GEP. If all indices are zero, return
881 // one past the last index.
882 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
883 unsigned I = 1;
884 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
885 Value *V = GEPI->getOperand(I);
886 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
887 if (CI->isZero())
888 continue;
889
890 break;
891 }
892
893 return I;
894 };
895
896 // Skip through initial 'zero' indices, and find the corresponding pointer
897 // type. See if the next index is not a constant.
898 Idx = FirstNZIdx(GEPI);
899 if (Idx == GEPI->getNumOperands())
900 return false;
901 if (isa<Constant>(GEPI->getOperand(Idx)))
902 return false;
903
904 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
Eduard Burtescu19eb0312016-01-19 17:28:00 +0000905 Type *AllocTy =
906 GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
Hal Finkel847e05f2015-02-20 03:05:53 +0000907 if (!AllocTy || !AllocTy->isSized())
908 return false;
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000909 const DataLayout &DL = IC.getDataLayout();
910 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
Hal Finkel847e05f2015-02-20 03:05:53 +0000911
912 // If there are more indices after the one we might replace with a zero, make
913 // sure they're all non-negative. If any of them are negative, the overall
914 // address being computed might be before the base address determined by the
915 // first non-zero index.
916 auto IsAllNonNegative = [&]() {
917 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
Craig Topper1a36b7d2017-05-15 06:39:41 +0000918 KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI);
919 if (Known.isNonNegative())
Hal Finkel847e05f2015-02-20 03:05:53 +0000920 continue;
921 return false;
922 }
923
924 return true;
925 };
926
927 // FIXME: If the GEP is not inbounds, and there are extra indices after the
928 // one we'll replace, those could cause the address computation to wrap
929 // (rendering the IsAllNonNegative() check below insufficient). We can do
Bruce Mitchenere9ffb452015-09-12 01:17:08 +0000930 // better, ignoring zero indices (and other indices we can prove small
Hal Finkel847e05f2015-02-20 03:05:53 +0000931 // enough not to wrap).
932 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
933 return false;
934
935 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
936 // also known to be dereferenceable.
937 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
938 IsAllNonNegative();
939}
940
941// If we're indexing into an object with a variable index for the memory
942// access, but the object has only one element, we can assume that the index
943// will always be zero. If we replace the GEP, return it.
944template <typename T>
945static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
946 T &MemI) {
947 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
948 unsigned Idx;
949 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
950 Instruction *NewGEPI = GEPI->clone();
951 NewGEPI->setOperand(Idx,
952 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
953 NewGEPI->insertBefore(GEPI);
954 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
955 return NewGEPI;
956 }
957 }
958
959 return nullptr;
960}
961
Anna Thomas2dd98352017-12-12 14:12:33 +0000962static bool canSimplifyNullStoreOrGEP(StoreInst &SI) {
963 if (SI.getPointerAddressSpace() != 0)
964 return false;
965
966 auto *Ptr = SI.getPointerOperand();
967 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
968 Ptr = GEPI->getOperand(0);
969 return isa<ConstantPointerNull>(Ptr);
970}
971
Davide Italianoffcb4df2017-04-19 17:26:57 +0000972static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {
973 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
974 const Value *GEPI0 = GEPI->getOperand(0);
975 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0)
976 return true;
977 }
978 if (isa<UndefValue>(Op) ||
979 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0))
980 return true;
981 return false;
982}
983
Chris Lattnera65e2f72010-01-05 05:57:49 +0000984Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
985 Value *Op = LI.getOperand(0);
986
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000987 // Try to canonicalize the loaded type.
988 if (Instruction *Res = combineLoadToOperationType(*this, LI))
989 return Res;
990
Chris Lattnera65e2f72010-01-05 05:57:49 +0000991 // Attempt to improve the alignment.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000992 unsigned KnownAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +0000993 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT);
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000994 unsigned LoadAlign = LI.getAlignment();
995 unsigned EffectiveLoadAlign =
996 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
Dan Gohman36196602010-08-03 18:20:32 +0000997
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000998 if (KnownAlign > EffectiveLoadAlign)
999 LI.setAlignment(KnownAlign);
1000 else if (LoadAlign == 0)
1001 LI.setAlignment(EffectiveLoadAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001002
Hal Finkel847e05f2015-02-20 03:05:53 +00001003 // Replace GEP indices if possible.
1004 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
1005 Worklist.Add(NewGEPI);
1006 return &LI;
1007 }
1008
Mehdi Amini2668a482015-05-07 05:52:40 +00001009 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
1010 return Res;
1011
Chris Lattnera65e2f72010-01-05 05:57:49 +00001012 // Do really simple store-to-load forwarding and load CSE, to catch cases
Duncan Sands75b5d272011-02-15 09:23:02 +00001013 // where there are several consecutive memory accesses to the same location,
Chris Lattnera65e2f72010-01-05 05:57:49 +00001014 // separated by a few arithmetic operations.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001015 BasicBlock::iterator BBI(LI);
Eli Friedmanbd254a62016-06-16 02:33:42 +00001016 bool IsLoadCSE = false;
Sanjay Patelb38ad88e2017-01-02 23:25:28 +00001017 if (Value *AvailableVal = FindAvailableLoadedValue(
1018 &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) {
1019 if (IsLoadCSE)
1020 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI);
Bjorn Steinbrinka91fd092015-07-10 06:55:44 +00001021
Sanjay Patel4b198802016-02-01 22:23:39 +00001022 return replaceInstUsesWith(
Craig Topperbb4069e2017-07-07 23:16:26 +00001023 LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
1024 LI.getName() + ".cast"));
Bjorn Steinbrinka91fd092015-07-10 06:55:44 +00001025 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001026
Philip Reames3ac07182016-04-21 17:45:05 +00001027 // None of the following transforms are legal for volatile/ordered atomic
1028 // loads. Most of them do apply for unordered atomics.
1029 if (!LI.isUnordered()) return nullptr;
Philip Reamesac550902016-04-21 17:03:33 +00001030
Chris Lattnera65e2f72010-01-05 05:57:49 +00001031 // load(gep null, ...) -> unreachable
Chris Lattnera65e2f72010-01-05 05:57:49 +00001032 // load null/undef -> unreachable
Davide Italianoffcb4df2017-04-19 17:26:57 +00001033 // TODO: Consider a target hook for valid address spaces for this xforms.
1034 if (canSimplifyNullLoadOrGEP(LI, Op)) {
1035 // Insert a new store to null instruction before the load to indicate
1036 // that this code is not reachable. We do this instead of inserting
1037 // an unreachable instruction directly because we cannot modify the
1038 // CFG.
Weiming Zhao984f1dc2017-07-19 01:27:24 +00001039 StoreInst *SI = new StoreInst(UndefValue::get(LI.getType()),
1040 Constant::getNullValue(Op->getType()), &LI);
1041 SI->setDebugLoc(LI.getDebugLoc());
Sanjay Patel4b198802016-02-01 22:23:39 +00001042 return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
Chris Lattnera65e2f72010-01-05 05:57:49 +00001043 }
1044
Chris Lattnera65e2f72010-01-05 05:57:49 +00001045 if (Op->hasOneUse()) {
1046 // Change select and PHI nodes to select values instead of addresses: this
1047 // helps alias analysis out a lot, allows many others simplifications, and
1048 // exposes redundancy in the code.
1049 //
1050 // Note that we cannot do the transformation unless we know that the
1051 // introduced loads cannot trap! Something like this is valid as long as
1052 // the condition is always false: load (select bool %C, int* null, int* %G),
1053 // but it would not be valid if we transformed it to load from null
1054 // unconditionally.
1055 //
1056 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
1057 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
Bob Wilson56600a12010-01-30 04:42:39 +00001058 unsigned Align = LI.getAlignment();
Artur Pilipenko9bb6bea2016-04-27 11:00:48 +00001059 if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
1060 isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
Craig Topperbb4069e2017-07-07 23:16:26 +00001061 LoadInst *V1 = Builder.CreateLoad(SI->getOperand(1),
1062 SI->getOperand(1)->getName()+".val");
1063 LoadInst *V2 = Builder.CreateLoad(SI->getOperand(2),
1064 SI->getOperand(2)->getName()+".val");
Philip Reamesa98c7ea2016-04-21 17:59:40 +00001065 assert(LI.isUnordered() && "implied by above");
Bob Wilson56600a12010-01-30 04:42:39 +00001066 V1->setAlignment(Align);
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +00001067 V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
Bob Wilson56600a12010-01-30 04:42:39 +00001068 V2->setAlignment(Align);
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +00001069 V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
Chris Lattnera65e2f72010-01-05 05:57:49 +00001070 return SelectInst::Create(SI->getCondition(), V1, V2);
1071 }
1072
1073 // load (select (cond, null, P)) -> load P
Larisse Voufo532bf712015-09-18 19:14:35 +00001074 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
Philip Reames5ad26c32014-12-29 22:46:21 +00001075 LI.getPointerAddressSpace() == 0) {
1076 LI.setOperand(0, SI->getOperand(2));
1077 return &LI;
1078 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001079
1080 // load (select (cond, P, null)) -> load P
Philip Reames5ad26c32014-12-29 22:46:21 +00001081 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
1082 LI.getPointerAddressSpace() == 0) {
1083 LI.setOperand(0, SI->getOperand(1));
1084 return &LI;
1085 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001086 }
1087 }
Craig Topperf40110f2014-04-25 05:29:35 +00001088 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001089}
1090
Arch D. Robisonbe0490a2016-04-25 22:22:39 +00001091/// \brief Look for extractelement/insertvalue sequence that acts like a bitcast.
1092///
1093/// \returns underlying value that was "cast", or nullptr otherwise.
1094///
1095/// For example, if we have:
1096///
1097/// %E0 = extractelement <2 x double> %U, i32 0
1098/// %V0 = insertvalue [2 x double] undef, double %E0, 0
1099/// %E1 = extractelement <2 x double> %U, i32 1
1100/// %V1 = insertvalue [2 x double] %V0, double %E1, 1
1101///
1102/// and the layout of a <2 x double> is isomorphic to a [2 x double],
1103/// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1104/// Note that %U may contain non-undef values where %V1 has undef.
1105static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
1106 Value *U = nullptr;
1107 while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1108 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1109 if (!E)
1110 return nullptr;
1111 auto *W = E->getVectorOperand();
1112 if (!U)
1113 U = W;
1114 else if (U != W)
1115 return nullptr;
1116 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1117 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1118 return nullptr;
1119 V = IV->getAggregateOperand();
1120 }
1121 if (!isa<UndefValue>(V) ||!U)
1122 return nullptr;
1123
1124 auto *UT = cast<VectorType>(U->getType());
1125 auto *VT = V->getType();
1126 // Check that types UT and VT are bitwise isomorphic.
1127 const auto &DL = IC.getDataLayout();
1128 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1129 return nullptr;
1130 }
1131 if (auto *AT = dyn_cast<ArrayType>(VT)) {
1132 if (AT->getNumElements() != UT->getNumElements())
1133 return nullptr;
1134 } else {
1135 auto *ST = cast<StructType>(VT);
1136 if (ST->getNumElements() != UT->getNumElements())
1137 return nullptr;
1138 for (const auto *EltT : ST->elements()) {
1139 if (EltT != UT->getElementType())
1140 return nullptr;
1141 }
1142 }
1143 return U;
1144}
1145
Chandler Carruth816d26f2014-11-25 10:09:51 +00001146/// \brief Combine stores to match the type of value being stored.
1147///
1148/// The core idea here is that the memory does not have any intrinsic type and
1149/// where we can we should match the type of a store to the type of value being
1150/// stored.
1151///
1152/// However, this routine must never change the width of a store or the number of
1153/// stores as that would introduce a semantic change. This combine is expected to
1154/// be a semantic no-op which just allows stores to more closely model the types
1155/// of their incoming values.
1156///
1157/// Currently, we also refuse to change the precise type used for an atomic or
1158/// volatile store. This is debatable, and might be reasonable to change later.
1159/// However, it is risky in case some backend or other part of LLVM is relying
1160/// on the exact type stored to select appropriate atomic operations.
1161///
1162/// \returns true if the store was successfully combined away. This indicates
1163/// the caller must erase the store instruction. We have to let the caller erase
Bruce Mitchenere9ffb452015-09-12 01:17:08 +00001164/// the store instruction as otherwise there is no way to signal whether it was
Chandler Carruth816d26f2014-11-25 10:09:51 +00001165/// combined or not: IC.EraseInstFromFunction returns a null pointer.
1166static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
Philip Reames6f4d0082016-05-06 22:17:01 +00001167 // FIXME: We could probably with some care handle both volatile and ordered
1168 // atomic stores here but it isn't clear that this is important.
1169 if (!SI.isUnordered())
Chandler Carruth816d26f2014-11-25 10:09:51 +00001170 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001171
Arnold Schwaighofer5d335552016-09-10 18:14:57 +00001172 // swifterror values can't be bitcasted.
1173 if (SI.getPointerOperand()->isSwiftError())
1174 return false;
1175
Chandler Carruth816d26f2014-11-25 10:09:51 +00001176 Value *V = SI.getValueOperand();
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001177
Chandler Carruth816d26f2014-11-25 10:09:51 +00001178 // Fold away bit casts of the stored value by storing the original type.
1179 if (auto *BC = dyn_cast<BitCastInst>(V)) {
Chandler Carrutha7f247e2014-12-09 19:21:16 +00001180 V = BC->getOperand(0);
Philip Reames89e92d22016-12-01 20:17:06 +00001181 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1182 combineStoreToNewValue(IC, SI, V);
1183 return true;
1184 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001185 }
1186
Philip Reames89e92d22016-12-01 20:17:06 +00001187 if (Value *U = likeBitCastFromVector(IC, V))
1188 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1189 combineStoreToNewValue(IC, SI, U);
1190 return true;
1191 }
Arch D. Robisonbe0490a2016-04-25 22:22:39 +00001192
JF Bastienc22d2992016-04-21 19:53:39 +00001193 // FIXME: We should also canonicalize stores of vectors when their elements
1194 // are cast to other types.
Chandler Carruth816d26f2014-11-25 10:09:51 +00001195 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001196}
1197
Mehdi Aminib344ac92015-03-14 22:19:33 +00001198static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
1199 // FIXME: We could probably with some care handle both volatile and atomic
1200 // stores here but it isn't clear that this is important.
1201 if (!SI.isSimple())
1202 return false;
1203
1204 Value *V = SI.getValueOperand();
1205 Type *T = V->getType();
1206
1207 if (!T->isAggregateType())
1208 return false;
1209
Mehdi Amini2668a482015-05-07 05:52:40 +00001210 if (auto *ST = dyn_cast<StructType>(T)) {
Mehdi Aminib344ac92015-03-14 22:19:33 +00001211 // If the struct only have one element, we unpack.
Mehdi Amini1c131b32015-12-15 01:44:07 +00001212 unsigned Count = ST->getNumElements();
1213 if (Count == 1) {
Craig Topperbb4069e2017-07-07 23:16:26 +00001214 V = IC.Builder.CreateExtractValue(V, 0);
Mehdi Aminib344ac92015-03-14 22:19:33 +00001215 combineStoreToNewValue(IC, SI, V);
1216 return true;
1217 }
Mehdi Amini1c131b32015-12-15 01:44:07 +00001218
1219 // We don't want to break loads with padding here as we'd loose
1220 // the knowledge that padding exists for the rest of the pipeline.
1221 const DataLayout &DL = IC.getDataLayout();
1222 auto *SL = DL.getStructLayout(ST);
1223 if (SL->hasPadding())
1224 return false;
1225
Amaury Sechet61a7d622016-02-17 19:21:28 +00001226 auto Align = SI.getAlignment();
1227 if (!Align)
1228 Align = DL.getABITypeAlignment(ST);
1229
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +00001230 SmallString<16> EltName = V->getName();
1231 EltName += ".elt";
Mehdi Amini1c131b32015-12-15 01:44:07 +00001232 auto *Addr = SI.getPointerOperand();
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +00001233 SmallString<16> AddrName = Addr->getName();
1234 AddrName += ".repack";
Amaury Sechet61a7d622016-02-17 19:21:28 +00001235
Mehdi Amini1c131b32015-12-15 01:44:07 +00001236 auto *IdxType = Type::getInt32Ty(ST->getContext());
1237 auto *Zero = ConstantInt::get(IdxType, 0);
1238 for (unsigned i = 0; i < Count; i++) {
1239 Value *Indices[2] = {
1240 Zero,
1241 ConstantInt::get(IdxType, i),
1242 };
Craig Topperbb4069e2017-07-07 23:16:26 +00001243 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1244 AddrName);
1245 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
Amaury Sechet61a7d622016-02-17 19:21:28 +00001246 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
Craig Topperbb4069e2017-07-07 23:16:26 +00001247 llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
Keno Fischera236dae2017-06-28 23:36:40 +00001248 AAMDNodes AAMD;
1249 SI.getAAMetadata(AAMD);
1250 NS->setAAMetadata(AAMD);
Mehdi Amini1c131b32015-12-15 01:44:07 +00001251 }
1252
1253 return true;
Mehdi Aminib344ac92015-03-14 22:19:33 +00001254 }
1255
David Majnemer75364602015-05-11 05:04:27 +00001256 if (auto *AT = dyn_cast<ArrayType>(T)) {
1257 // If the array only have one element, we unpack.
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001258 auto NumElements = AT->getNumElements();
1259 if (NumElements == 1) {
Craig Topperbb4069e2017-07-07 23:16:26 +00001260 V = IC.Builder.CreateExtractValue(V, 0);
David Majnemer75364602015-05-11 05:04:27 +00001261 combineStoreToNewValue(IC, SI, V);
1262 return true;
1263 }
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001264
Davide Italianof6988d22016-10-07 21:53:09 +00001265 // Bail out if the array is too large. Ideally we would like to optimize
1266 // arrays of arbitrary size but this has a terrible impact on compile time.
1267 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1268 // tuning.
Davide Italiano2133bf52017-02-07 17:56:50 +00001269 if (NumElements > IC.MaxArraySizeForCombine)
Davide Italianof6988d22016-10-07 21:53:09 +00001270 return false;
1271
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001272 const DataLayout &DL = IC.getDataLayout();
1273 auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1274 auto Align = SI.getAlignment();
1275 if (!Align)
1276 Align = DL.getABITypeAlignment(T);
1277
1278 SmallString<16> EltName = V->getName();
1279 EltName += ".elt";
1280 auto *Addr = SI.getPointerOperand();
1281 SmallString<16> AddrName = Addr->getName();
1282 AddrName += ".repack";
1283
1284 auto *IdxType = Type::getInt64Ty(T->getContext());
1285 auto *Zero = ConstantInt::get(IdxType, 0);
1286
1287 uint64_t Offset = 0;
1288 for (uint64_t i = 0; i < NumElements; i++) {
1289 Value *Indices[2] = {
1290 Zero,
1291 ConstantInt::get(IdxType, i),
1292 };
Craig Topperbb4069e2017-07-07 23:16:26 +00001293 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1294 AddrName);
1295 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001296 auto EltAlign = MinAlign(Align, Offset);
Craig Topperbb4069e2017-07-07 23:16:26 +00001297 Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
Keno Fischera236dae2017-06-28 23:36:40 +00001298 AAMDNodes AAMD;
1299 SI.getAAMetadata(AAMD);
1300 NS->setAAMetadata(AAMD);
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001301 Offset += EltSize;
1302 }
1303
1304 return true;
David Majnemer75364602015-05-11 05:04:27 +00001305 }
1306
Mehdi Aminib344ac92015-03-14 22:19:33 +00001307 return false;
1308}
1309
Chris Lattnera65e2f72010-01-05 05:57:49 +00001310/// equivalentAddressValues - Test if A and B will obviously have the same
1311/// value. This includes recognizing that %t0 and %t1 will have the same
1312/// value in code like this:
1313/// %t0 = getelementptr \@a, 0, 3
1314/// store i32 0, i32* %t0
1315/// %t1 = getelementptr \@a, 0, 3
1316/// %t2 = load i32* %t1
1317///
1318static bool equivalentAddressValues(Value *A, Value *B) {
1319 // Test if the values are trivially equivalent.
1320 if (A == B) return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001321
Chris Lattnera65e2f72010-01-05 05:57:49 +00001322 // Test if the values come form identical arithmetic instructions.
1323 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1324 // its only used to compare two uses within the same basic block, which
1325 // means that they'll always either have the same value or one of them
1326 // will have an undefined value.
1327 if (isa<BinaryOperator>(A) ||
1328 isa<CastInst>(A) ||
1329 isa<PHINode>(A) ||
1330 isa<GetElementPtrInst>(A))
1331 if (Instruction *BI = dyn_cast<Instruction>(B))
1332 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1333 return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001334
Chris Lattnera65e2f72010-01-05 05:57:49 +00001335 // Otherwise they may not be equivalent.
1336 return false;
1337}
1338
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001339/// Converts store (bitcast (load (bitcast (select ...)))) to
1340/// store (load (select ...)), where select is minmax:
1341/// select ((cmp load V1, load V2), V1, V2).
Alexey Bataev195c97e2017-12-12 18:47:00 +00001342static Instruction *removeBitcastsFromLoadStoreOnMinMax(InstCombiner &IC,
1343 StoreInst &SI) {
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001344 // bitcast?
Alexey Bataev195c97e2017-12-12 18:47:00 +00001345 if (!match(SI.getPointerOperand(), m_BitCast(m_Value())))
1346 return nullptr;
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001347 // load? integer?
1348 Value *LoadAddr;
1349 if (!match(SI.getValueOperand(), m_Load(m_BitCast(m_Value(LoadAddr)))))
Alexey Bataev195c97e2017-12-12 18:47:00 +00001350 return nullptr;
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001351 auto *LI = cast<LoadInst>(SI.getValueOperand());
1352 if (!LI->getType()->isIntegerTy())
Alexey Bataev195c97e2017-12-12 18:47:00 +00001353 return nullptr;
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001354 if (!isMinMaxWithLoads(LoadAddr))
Alexey Bataev195c97e2017-12-12 18:47:00 +00001355 return nullptr;
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001356
Alexey Bataev195c97e2017-12-12 18:47:00 +00001357 if (!all_of(LI->users(), [LI, LoadAddr](User *U) {
1358 auto *SI = dyn_cast<StoreInst>(U);
1359 return SI && SI->getPointerOperand() != LI &&
1360 peekThroughBitcast(SI->getPointerOperand()) != LoadAddr &&
1361 !SI->getPointerOperand()->isSwiftError();
1362 }))
1363 return nullptr;
1364
1365 IC.Builder.SetInsertPoint(LI);
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001366 LoadInst *NewLI = combineLoadToNewType(
1367 IC, *LI, LoadAddr->getType()->getPointerElementType());
Alexey Bataev195c97e2017-12-12 18:47:00 +00001368 // Replace all the stores with stores of the newly loaded value.
1369 for (auto *UI : LI->users()) {
1370 auto *USI = cast<StoreInst>(UI);
1371 IC.Builder.SetInsertPoint(USI);
1372 combineStoreToNewValue(IC, *USI, NewLI);
1373 }
1374 return LI;
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001375}
1376
Chris Lattnera65e2f72010-01-05 05:57:49 +00001377Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
1378 Value *Val = SI.getOperand(0);
1379 Value *Ptr = SI.getOperand(1);
1380
Chandler Carruth816d26f2014-11-25 10:09:51 +00001381 // Try to canonicalize the stored type.
1382 if (combineStoreToValueType(*this, SI))
Sanjay Patel4b198802016-02-01 22:23:39 +00001383 return eraseInstFromFunction(SI);
Chandler Carruth816d26f2014-11-25 10:09:51 +00001384
Chris Lattnera65e2f72010-01-05 05:57:49 +00001385 // Attempt to improve the alignment.
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001386 unsigned KnownAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001387 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT);
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001388 unsigned StoreAlign = SI.getAlignment();
1389 unsigned EffectiveStoreAlign =
1390 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
Dan Gohman36196602010-08-03 18:20:32 +00001391
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001392 if (KnownAlign > EffectiveStoreAlign)
1393 SI.setAlignment(KnownAlign);
1394 else if (StoreAlign == 0)
1395 SI.setAlignment(EffectiveStoreAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001396
Mehdi Aminib344ac92015-03-14 22:19:33 +00001397 // Try to canonicalize the stored type.
1398 if (unpackStoreToAggregate(*this, SI))
Sanjay Patel4b198802016-02-01 22:23:39 +00001399 return eraseInstFromFunction(SI);
Mehdi Aminib344ac92015-03-14 22:19:33 +00001400
Alexey Bataev195c97e2017-12-12 18:47:00 +00001401 if (Instruction *I = removeBitcastsFromLoadStoreOnMinMax(*this, SI)) {
1402 for (auto *UI : I->users())
1403 eraseInstFromFunction(*cast<Instruction>(UI));
1404 eraseInstFromFunction(*I);
1405 return nullptr;
1406 }
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001407
Hal Finkel847e05f2015-02-20 03:05:53 +00001408 // Replace GEP indices if possible.
1409 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1410 Worklist.Add(NewGEPI);
1411 return &SI;
1412 }
1413
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001414 // Don't hack volatile/ordered stores.
1415 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1416 if (!SI.isUnordered()) return nullptr;
Eli Friedman8bc586e2011-08-15 22:09:40 +00001417
1418 // If the RHS is an alloca with a single use, zapify the store, making the
1419 // alloca dead.
1420 if (Ptr->hasOneUse()) {
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001421 if (isa<AllocaInst>(Ptr))
Sanjay Patel4b198802016-02-01 22:23:39 +00001422 return eraseInstFromFunction(SI);
Eli Friedman8bc586e2011-08-15 22:09:40 +00001423 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1424 if (isa<AllocaInst>(GEP->getOperand(0))) {
1425 if (GEP->getOperand(0)->hasOneUse())
Sanjay Patel4b198802016-02-01 22:23:39 +00001426 return eraseInstFromFunction(SI);
Eli Friedman8bc586e2011-08-15 22:09:40 +00001427 }
1428 }
1429 }
1430
Chris Lattnera65e2f72010-01-05 05:57:49 +00001431 // Do really simple DSE, to catch cases where there are several consecutive
1432 // stores to the same location, separated by a few arithmetic operations. This
1433 // situation often occurs with bitfield accesses.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001434 BasicBlock::iterator BBI(SI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001435 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1436 --ScanInsts) {
1437 --BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001438 // Don't count debug info directives, lest they affect codegen,
1439 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1440 if (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001441 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001442 ScanInsts++;
1443 continue;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001444 }
1445
Chris Lattnera65e2f72010-01-05 05:57:49 +00001446 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1447 // Prev store isn't volatile, and stores to the same location?
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001448 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
Eli Friedman8bc586e2011-08-15 22:09:40 +00001449 SI.getOperand(1))) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001450 ++NumDeadStore;
1451 ++BBI;
Sanjay Patel4b198802016-02-01 22:23:39 +00001452 eraseInstFromFunction(*PrevSI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001453 continue;
1454 }
1455 break;
1456 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001457
Chris Lattnera65e2f72010-01-05 05:57:49 +00001458 // If this is a load, we have to stop. However, if the loaded value is from
1459 // the pointer we're loading and is producing the pointer we're storing,
1460 // then *this* store is dead (X = load P; store X -> P).
1461 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001462 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1463 assert(SI.isUnordered() && "can't eliminate ordering operation");
Sanjay Patel4b198802016-02-01 22:23:39 +00001464 return eraseInstFromFunction(SI);
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001465 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001466
Chris Lattnera65e2f72010-01-05 05:57:49 +00001467 // Otherwise, this is a load from some other location. Stores before it
1468 // may not be dead.
1469 break;
1470 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001471
Sanjoy Das679bc322017-01-17 05:45:09 +00001472 // Don't skip over loads, throws or things that can modify memory.
1473 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001474 break;
1475 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001476
1477 // store X, null -> turns into 'unreachable' in SimplifyCFG
Anna Thomas2dd98352017-12-12 14:12:33 +00001478 // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1479 if (canSimplifyNullStoreOrGEP(SI)) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001480 if (!isa<UndefValue>(Val)) {
1481 SI.setOperand(0, UndefValue::get(Val->getType()));
1482 if (Instruction *U = dyn_cast<Instruction>(Val))
1483 Worklist.Add(U); // Dropped a use.
1484 }
Craig Topperf40110f2014-04-25 05:29:35 +00001485 return nullptr; // Do not modify these!
Chris Lattnera65e2f72010-01-05 05:57:49 +00001486 }
1487
1488 // store undef, Ptr -> noop
1489 if (isa<UndefValue>(Val))
Sanjay Patel4b198802016-02-01 22:23:39 +00001490 return eraseInstFromFunction(SI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001491
Chris Lattnera65e2f72010-01-05 05:57:49 +00001492 // If this store is the last instruction in the basic block (possibly
Victor Hernandez5f5abd52010-01-21 23:07:15 +00001493 // excepting debug info instructions), and if the block ends with an
1494 // unconditional branch, try to move it to the successor block.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001495 BBI = SI.getIterator();
Chris Lattnera65e2f72010-01-05 05:57:49 +00001496 do {
1497 ++BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001498 } while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001499 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
Chris Lattnera65e2f72010-01-05 05:57:49 +00001500 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
1501 if (BI->isUnconditional())
1502 if (SimplifyStoreAtEndOfBlock(SI))
Craig Topperf40110f2014-04-25 05:29:35 +00001503 return nullptr; // xform done!
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001504
Craig Topperf40110f2014-04-25 05:29:35 +00001505 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001506}
1507
1508/// SimplifyStoreAtEndOfBlock - Turn things like:
1509/// if () { *P = v1; } else { *P = v2 }
1510/// into a phi node with a store in the successor.
1511///
1512/// Simplify things like:
1513/// *P = v1; if () { *P = v2; }
1514/// into a phi node with a store in the successor.
1515///
1516bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
Philip Reames5f0e3692016-04-22 20:53:32 +00001517 assert(SI.isUnordered() &&
1518 "this code has not been auditted for volatile or ordered store case");
Justin Bognerc7e4fbe2016-08-05 01:09:48 +00001519
Chris Lattnera65e2f72010-01-05 05:57:49 +00001520 BasicBlock *StoreBB = SI.getParent();
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001521
Chris Lattnera65e2f72010-01-05 05:57:49 +00001522 // Check to see if the successor block has exactly two incoming edges. If
1523 // so, see if the other predecessor contains a store to the same location.
1524 // if so, insert a PHI node (if needed) and move the stores down.
1525 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001526
Chris Lattnera65e2f72010-01-05 05:57:49 +00001527 // Determine whether Dest has exactly two predecessors and, if so, compute
1528 // the other predecessor.
1529 pred_iterator PI = pred_begin(DestBB);
Gabor Greif1b787df2010-07-12 15:48:26 +00001530 BasicBlock *P = *PI;
Craig Topperf40110f2014-04-25 05:29:35 +00001531 BasicBlock *OtherBB = nullptr;
Gabor Greif1b787df2010-07-12 15:48:26 +00001532
1533 if (P != StoreBB)
1534 OtherBB = P;
1535
1536 if (++PI == pred_end(DestBB))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001537 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001538
Gabor Greif1b787df2010-07-12 15:48:26 +00001539 P = *PI;
1540 if (P != StoreBB) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001541 if (OtherBB)
1542 return false;
Gabor Greif1b787df2010-07-12 15:48:26 +00001543 OtherBB = P;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001544 }
1545 if (++PI != pred_end(DestBB))
1546 return false;
1547
1548 // Bail out if all the relevant blocks aren't distinct (this can happen,
1549 // for example, if SI is in an infinite loop)
1550 if (StoreBB == DestBB || OtherBB == DestBB)
1551 return false;
1552
1553 // Verify that the other block ends in a branch and is not otherwise empty.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001554 BasicBlock::iterator BBI(OtherBB->getTerminator());
Chris Lattnera65e2f72010-01-05 05:57:49 +00001555 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1556 if (!OtherBr || BBI == OtherBB->begin())
1557 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001558
Chris Lattnera65e2f72010-01-05 05:57:49 +00001559 // If the other block ends in an unconditional branch, check for the 'if then
1560 // else' case. there is an instruction before the branch.
Craig Topperf40110f2014-04-25 05:29:35 +00001561 StoreInst *OtherStore = nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001562 if (OtherBr->isUnconditional()) {
1563 --BBI;
1564 // Skip over debugging info.
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001565 while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001566 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001567 if (BBI==OtherBB->begin())
1568 return false;
1569 --BBI;
1570 }
Eli Friedman8bc586e2011-08-15 22:09:40 +00001571 // If this isn't a store, isn't a store to the same location, or is not the
1572 // right kind of store, bail out.
Chris Lattnera65e2f72010-01-05 05:57:49 +00001573 OtherStore = dyn_cast<StoreInst>(BBI);
1574 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001575 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001576 return false;
1577 } else {
1578 // Otherwise, the other block ended with a conditional branch. If one of the
1579 // destinations is StoreBB, then we have the if/then case.
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001580 if (OtherBr->getSuccessor(0) != StoreBB &&
Chris Lattnera65e2f72010-01-05 05:57:49 +00001581 OtherBr->getSuccessor(1) != StoreBB)
1582 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001583
Chris Lattnera65e2f72010-01-05 05:57:49 +00001584 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1585 // if/then triangle. See if there is a store to the same ptr as SI that
1586 // lives in OtherBB.
1587 for (;; --BBI) {
1588 // Check to see if we find the matching store.
1589 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1590 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001591 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001592 return false;
1593 break;
1594 }
1595 // If we find something that may be using or overwriting the stored
1596 // value, or if we run out of instructions, we can't do the xform.
Sanjoy Das679bc322017-01-17 05:45:09 +00001597 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1598 BBI->mayWriteToMemory() || BBI == OtherBB->begin())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001599 return false;
1600 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001601
Chris Lattnera65e2f72010-01-05 05:57:49 +00001602 // In order to eliminate the store in OtherBr, we have to
1603 // make sure nothing reads or overwrites the stored value in
1604 // StoreBB.
1605 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1606 // FIXME: This should really be AA driven.
Sanjoy Das679bc322017-01-17 05:45:09 +00001607 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001608 return false;
1609 }
1610 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001611
Chris Lattnera65e2f72010-01-05 05:57:49 +00001612 // Insert a PHI node now if we need it.
1613 Value *MergedVal = OtherStore->getOperand(0);
1614 if (MergedVal != SI.getOperand(0)) {
Jay Foad52131342011-03-30 11:28:46 +00001615 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
Chris Lattnera65e2f72010-01-05 05:57:49 +00001616 PN->addIncoming(SI.getOperand(0), SI.getParent());
1617 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1618 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1619 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001620
Chris Lattnera65e2f72010-01-05 05:57:49 +00001621 // Advance to a place where it is safe to insert the new store and
1622 // insert it.
Bill Wendling8ddfc092011-08-16 20:45:24 +00001623 BBI = DestBB->getFirstInsertionPt();
Eli Friedman35211c62011-05-27 00:19:40 +00001624 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
Eli Friedman8bc586e2011-08-15 22:09:40 +00001625 SI.isVolatile(),
1626 SI.getAlignment(),
1627 SI.getOrdering(),
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +00001628 SI.getSyncScopeID());
Eli Friedman35211c62011-05-27 00:19:40 +00001629 InsertNewInstBefore(NewSI, *BBI);
Paul Robinson383c5c22017-02-06 22:19:04 +00001630 // The debug locations of the original instructions might differ; merge them.
Dehao Chenf4646272017-10-02 18:13:14 +00001631 NewSI->applyMergedLocation(SI.getDebugLoc(), OtherStore->getDebugLoc());
Eli Friedman35211c62011-05-27 00:19:40 +00001632
Hal Finkelcc39b672014-07-24 12:16:19 +00001633 // If the two stores had AA tags, merge them.
1634 AAMDNodes AATags;
1635 SI.getAAMetadata(AATags);
1636 if (AATags) {
1637 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1638 NewSI->setAAMetadata(AATags);
1639 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001640
Chris Lattnera65e2f72010-01-05 05:57:49 +00001641 // Nuke the old stores.
Sanjay Patel4b198802016-02-01 22:23:39 +00001642 eraseInstFromFunction(SI);
1643 eraseInstFromFunction(*OtherStore);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001644 return true;
1645}