blob: c59e1ce69ac2240bc8e2658506077b9f265b795c [file] [log] [blame]
Chris Lattnera65e2f72010-01-05 05:57:49 +00001//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visit functions for load, store and alloca.
11//
12//===----------------------------------------------------------------------===//
13
Chandler Carrutha9174582015-01-22 05:25:13 +000014#include "InstCombineInternal.h"
Yaxun Liuba01ed02017-02-10 21:46:07 +000015#include "llvm/ADT/MapVector.h"
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +000016#include "llvm/ADT/SmallString.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000017#include "llvm/ADT/Statistic.h"
Dan Gohman826bdf82010-05-28 16:19:17 +000018#include "llvm/Analysis/Loads.h"
Peter Collingbourneecdd58f2016-10-21 19:59:26 +000019#include "llvm/IR/ConstantRange.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000020#include "llvm/IR/DataLayout.h"
Paul Robinson383c5c22017-02-06 22:19:04 +000021#include "llvm/IR/DebugInfo.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000022#include "llvm/IR/IntrinsicInst.h"
Yaxun Liuba01ed02017-02-10 21:46:07 +000023#include "llvm/IR/LLVMContext.h"
Charles Davis33d1dc02015-02-25 05:10:25 +000024#include "llvm/IR/MDBuilder.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000025#include "llvm/Transforms/Utils/BasicBlockUtils.h"
26#include "llvm/Transforms/Utils/Local.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000027using namespace llvm;
28
Chandler Carruth964daaa2014-04-22 02:55:47 +000029#define DEBUG_TYPE "instcombine"
30
Chandler Carruthc908ca12012-08-21 08:39:44 +000031STATISTIC(NumDeadStore, "Number of dead stores eliminated");
32STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
33
34/// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
35/// some part of a constant global variable. This intentionally only accepts
36/// constant expressions because we can't rewrite arbitrary instructions.
37static bool pointsToConstantGlobal(Value *V) {
38 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
39 return GV->isConstant();
Matt Arsenault607281772014-04-24 00:01:09 +000040
41 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000042 if (CE->getOpcode() == Instruction::BitCast ||
Matt Arsenault607281772014-04-24 00:01:09 +000043 CE->getOpcode() == Instruction::AddrSpaceCast ||
Chandler Carruthc908ca12012-08-21 08:39:44 +000044 CE->getOpcode() == Instruction::GetElementPtr)
45 return pointsToConstantGlobal(CE->getOperand(0));
Matt Arsenault607281772014-04-24 00:01:09 +000046 }
Chandler Carruthc908ca12012-08-21 08:39:44 +000047 return false;
48}
49
50/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
51/// pointer to an alloca. Ignore any reads of the pointer, return false if we
52/// see any stores or other unknown uses. If we see pointer arithmetic, keep
53/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
54/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
55/// the alloca, and if the source pointer is a pointer to a constant global, we
56/// can optimize this.
57static bool
58isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
Reid Kleckner813dab22014-07-01 21:36:20 +000059 SmallVectorImpl<Instruction *> &ToDelete) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000060 // We track lifetime intrinsics as we encounter them. If we decide to go
61 // ahead and replace the value with the global, this lets the caller quickly
62 // eliminate the markers.
63
Reid Kleckner813dab22014-07-01 21:36:20 +000064 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
David Majnemer0a16c222016-08-11 21:15:00 +000065 ValuesToInspect.emplace_back(V, false);
Reid Kleckner813dab22014-07-01 21:36:20 +000066 while (!ValuesToInspect.empty()) {
67 auto ValuePair = ValuesToInspect.pop_back_val();
68 const bool IsOffset = ValuePair.second;
69 for (auto &U : ValuePair.first->uses()) {
David Majnemer0a16c222016-08-11 21:15:00 +000070 auto *I = cast<Instruction>(U.getUser());
Chandler Carruthc908ca12012-08-21 08:39:44 +000071
David Majnemer0a16c222016-08-11 21:15:00 +000072 if (auto *LI = dyn_cast<LoadInst>(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000073 // Ignore non-volatile loads, they are always ok.
74 if (!LI->isSimple()) return false;
Chandler Carruthc908ca12012-08-21 08:39:44 +000075 continue;
76 }
Reid Kleckner813dab22014-07-01 21:36:20 +000077
78 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
79 // If uses of the bitcast are ok, we are ok.
David Majnemer0a16c222016-08-11 21:15:00 +000080 ValuesToInspect.emplace_back(I, IsOffset);
Reid Kleckner813dab22014-07-01 21:36:20 +000081 continue;
82 }
David Majnemer0a16c222016-08-11 21:15:00 +000083 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000084 // If the GEP has all zero indices, it doesn't offset the pointer. If it
85 // doesn't, it does.
David Majnemer0a16c222016-08-11 21:15:00 +000086 ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
Reid Kleckner813dab22014-07-01 21:36:20 +000087 continue;
88 }
89
Benjamin Kramer3a09ef62015-04-10 14:50:08 +000090 if (auto CS = CallSite(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000091 // If this is the function being called then we treat it like a load and
92 // ignore it.
93 if (CS.isCallee(&U))
94 continue;
95
David Majnemer02f47872015-12-23 09:58:41 +000096 unsigned DataOpNo = CS.getDataOperandNo(&U);
97 bool IsArgOperand = CS.isArgOperand(&U);
98
Reid Kleckner813dab22014-07-01 21:36:20 +000099 // Inalloca arguments are clobbered by the call.
David Majnemer02f47872015-12-23 09:58:41 +0000100 if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
Reid Kleckner813dab22014-07-01 21:36:20 +0000101 return false;
102
103 // If this is a readonly/readnone call site, then we know it is just a
104 // load (but one that potentially returns the value itself), so we can
105 // ignore it if we know that the value isn't captured.
106 if (CS.onlyReadsMemory() &&
David Majnemer02f47872015-12-23 09:58:41 +0000107 (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
Reid Kleckner813dab22014-07-01 21:36:20 +0000108 continue;
109
110 // If this is being passed as a byval argument, the caller is making a
111 // copy, so it is only a read of the alloca.
David Majnemer02f47872015-12-23 09:58:41 +0000112 if (IsArgOperand && CS.isByValArgument(DataOpNo))
Reid Kleckner813dab22014-07-01 21:36:20 +0000113 continue;
114 }
115
116 // Lifetime intrinsics can be handled by the caller.
117 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
118 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
119 II->getIntrinsicID() == Intrinsic::lifetime_end) {
120 assert(II->use_empty() && "Lifetime markers have no result to use!");
121 ToDelete.push_back(II);
122 continue;
123 }
124 }
125
126 // If this is isn't our memcpy/memmove, reject it as something we can't
127 // handle.
128 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
129 if (!MI)
130 return false;
131
132 // If the transfer is using the alloca as a source of the transfer, then
133 // ignore it since it is a load (unless the transfer is volatile).
134 if (U.getOperandNo() == 1) {
135 if (MI->isVolatile()) return false;
136 continue;
137 }
138
139 // If we already have seen a copy, reject the second one.
140 if (TheCopy) return false;
141
142 // If the pointer has been offset from the start of the alloca, we can't
143 // safely handle this.
144 if (IsOffset) return false;
145
146 // If the memintrinsic isn't using the alloca as the dest, reject it.
147 if (U.getOperandNo() != 0) return false;
148
149 // If the source of the memcpy/move is not a constant global, reject it.
150 if (!pointsToConstantGlobal(MI->getSource()))
151 return false;
152
153 // Otherwise, the transform is safe. Remember the copy instruction.
154 TheCopy = MI;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000155 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000156 }
157 return true;
158}
159
160/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
161/// modified by a copy from a constant global. If we can prove this, we can
162/// replace any uses of the alloca with uses of the global directly.
163static MemTransferInst *
164isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
165 SmallVectorImpl<Instruction *> &ToDelete) {
Craig Topperf40110f2014-04-25 05:29:35 +0000166 MemTransferInst *TheCopy = nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000167 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
168 return TheCopy;
Craig Topperf40110f2014-04-25 05:29:35 +0000169 return nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000170}
171
Vitaly Bukadf19ad42017-06-24 01:35:19 +0000172/// Returns true if V is dereferenceable for size of alloca.
173static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
174 const DataLayout &DL) {
175 if (AI->isArrayAllocation())
176 return false;
177 uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
178 if (!AllocaSize)
179 return false;
180 return isDereferenceableAndAlignedPointer(V, AI->getAlignment(),
181 APInt(64, AllocaSize), DL);
182}
183
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000184static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
Duncan P. N. Exon Smith720762e2015-03-13 19:30:44 +0000185 // Check for array size of 1 (scalar allocation).
Duncan P. N. Exon Smithbe95b4a2015-03-13 19:42:09 +0000186 if (!AI.isArrayAllocation()) {
187 // i32 1 is the canonical array size for scalar allocations.
188 if (AI.getArraySize()->getType()->isIntegerTy(32))
189 return nullptr;
190
191 // Canonicalize it.
Craig Topperbb4069e2017-07-07 23:16:26 +0000192 Value *V = IC.Builder.getInt32(1);
Duncan P. N. Exon Smithbe95b4a2015-03-13 19:42:09 +0000193 AI.setOperand(0, V);
194 return &AI;
195 }
Duncan P. N. Exon Smith720762e2015-03-13 19:30:44 +0000196
Chris Lattnera65e2f72010-01-05 05:57:49 +0000197 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000198 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
199 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
Craig Topperbb4069e2017-07-07 23:16:26 +0000200 AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName());
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000201 New->setAlignment(AI.getAlignment());
Chris Lattnera65e2f72010-01-05 05:57:49 +0000202
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000203 // Scan to the end of the allocation instructions, to skip over a block of
204 // allocas if possible...also skip interleaved debug info
205 //
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +0000206 BasicBlock::iterator It(New);
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000207 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
208 ++It;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000209
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000210 // Now that I is pointing to the first non-allocation-inst in the block,
211 // insert our getelementptr instruction...
212 //
213 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
214 Value *NullIdx = Constant::getNullValue(IdxTy);
215 Value *Idx[2] = {NullIdx, NullIdx};
216 Instruction *GEP =
Matt Arsenault640ff9d2013-08-14 00:24:05 +0000217 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000218 IC.InsertNewInstBefore(GEP, *It);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000219
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000220 // Now make everything use the getelementptr instead of the original
221 // allocation.
Sanjay Patel4b198802016-02-01 22:23:39 +0000222 return IC.replaceInstUsesWith(AI, GEP);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000223 }
224
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000225 if (isa<UndefValue>(AI.getArraySize()))
Sanjay Patel4b198802016-02-01 22:23:39 +0000226 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000227
Duncan P. N. Exon Smith07ff9b02015-03-13 19:34:55 +0000228 // Ensure that the alloca array size argument has type intptr_t, so that
229 // any casting is exposed early.
230 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
231 if (AI.getArraySize()->getType() != IntPtrTy) {
Craig Topperbb4069e2017-07-07 23:16:26 +0000232 Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false);
Duncan P. N. Exon Smith07ff9b02015-03-13 19:34:55 +0000233 AI.setOperand(0, V);
234 return &AI;
235 }
236
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000237 return nullptr;
238}
239
Benjamin Kramer03ab8a32017-02-10 22:26:35 +0000240namespace {
Yaxun Liuba01ed02017-02-10 21:46:07 +0000241// If I and V are pointers in different address space, it is not allowed to
242// use replaceAllUsesWith since I and V have different types. A
243// non-target-specific transformation should not use addrspacecast on V since
244// the two address space may be disjoint depending on target.
245//
246// This class chases down uses of the old pointer until reaching the load
247// instructions, then replaces the old pointer in the load instructions with
248// the new pointer. If during the chasing it sees bitcast or GEP, it will
249// create new bitcast or GEP with the new pointer and use them in the load
250// instruction.
251class PointerReplacer {
252public:
253 PointerReplacer(InstCombiner &IC) : IC(IC) {}
254 void replacePointer(Instruction &I, Value *V);
255
256private:
257 void findLoadAndReplace(Instruction &I);
258 void replace(Instruction *I);
259 Value *getReplacement(Value *I);
260
261 SmallVector<Instruction *, 4> Path;
262 MapVector<Value *, Value *> WorkMap;
263 InstCombiner &IC;
264};
Benjamin Kramer03ab8a32017-02-10 22:26:35 +0000265} // end anonymous namespace
Yaxun Liuba01ed02017-02-10 21:46:07 +0000266
267void PointerReplacer::findLoadAndReplace(Instruction &I) {
268 for (auto U : I.users()) {
269 auto *Inst = dyn_cast<Instruction>(&*U);
270 if (!Inst)
271 return;
272 DEBUG(dbgs() << "Found pointer user: " << *U << '\n');
273 if (isa<LoadInst>(Inst)) {
274 for (auto P : Path)
275 replace(P);
276 replace(Inst);
277 } else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) {
278 Path.push_back(Inst);
279 findLoadAndReplace(*Inst);
280 Path.pop_back();
281 } else {
282 return;
283 }
284 }
285}
286
287Value *PointerReplacer::getReplacement(Value *V) {
288 auto Loc = WorkMap.find(V);
289 if (Loc != WorkMap.end())
290 return Loc->second;
291 return nullptr;
292}
293
294void PointerReplacer::replace(Instruction *I) {
295 if (getReplacement(I))
296 return;
297
298 if (auto *LT = dyn_cast<LoadInst>(I)) {
299 auto *V = getReplacement(LT->getPointerOperand());
300 assert(V && "Operand not replaced");
301 auto *NewI = new LoadInst(V);
302 NewI->takeName(LT);
303 IC.InsertNewInstWith(NewI, *LT);
304 IC.replaceInstUsesWith(*LT, NewI);
305 WorkMap[LT] = NewI;
306 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
307 auto *V = getReplacement(GEP->getPointerOperand());
308 assert(V && "Operand not replaced");
309 SmallVector<Value *, 8> Indices;
310 Indices.append(GEP->idx_begin(), GEP->idx_end());
311 auto *NewI = GetElementPtrInst::Create(
312 V->getType()->getPointerElementType(), V, Indices);
313 IC.InsertNewInstWith(NewI, *GEP);
314 NewI->takeName(GEP);
315 WorkMap[GEP] = NewI;
316 } else if (auto *BC = dyn_cast<BitCastInst>(I)) {
317 auto *V = getReplacement(BC->getOperand(0));
318 assert(V && "Operand not replaced");
319 auto *NewT = PointerType::get(BC->getType()->getPointerElementType(),
320 V->getType()->getPointerAddressSpace());
321 auto *NewI = new BitCastInst(V, NewT);
322 IC.InsertNewInstWith(NewI, *BC);
323 NewI->takeName(BC);
Yaxun Liue6d1ce52017-02-24 20:27:25 +0000324 WorkMap[BC] = NewI;
Yaxun Liuba01ed02017-02-10 21:46:07 +0000325 } else {
326 llvm_unreachable("should never reach here");
327 }
328}
329
330void PointerReplacer::replacePointer(Instruction &I, Value *V) {
Benjamin Kramer684c87b2017-02-10 22:04:17 +0000331#ifndef NDEBUG
Yaxun Liuba01ed02017-02-10 21:46:07 +0000332 auto *PT = cast<PointerType>(I.getType());
333 auto *NT = cast<PointerType>(V->getType());
334 assert(PT != NT && PT->getElementType() == NT->getElementType() &&
335 "Invalid usage");
Benjamin Kramer684c87b2017-02-10 22:04:17 +0000336#endif
Yaxun Liuba01ed02017-02-10 21:46:07 +0000337 WorkMap[&I] = V;
338 findLoadAndReplace(I);
339}
340
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000341Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
342 if (auto *I = simplifyAllocaArraySize(*this, AI))
343 return I;
344
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000345 if (AI.getAllocatedType()->isSized()) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000346 // If the alignment is 0 (unspecified), assign it the preferred alignment.
347 if (AI.getAlignment() == 0)
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000348 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000349
350 // Move all alloca's of zero byte objects to the entry block and merge them
351 // together. Note that we only do this for alloca's, because malloc should
352 // allocate and return a unique pointer, even for a zero byte allocation.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000353 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000354 // For a zero sized alloca there is no point in doing an array allocation.
355 // This is helpful if the array size is a complicated expression not used
356 // elsewhere.
357 if (AI.isArrayAllocation()) {
358 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
359 return &AI;
360 }
361
362 // Get the first instruction in the entry block.
363 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
364 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
365 if (FirstInst != &AI) {
366 // If the entry block doesn't start with a zero-size alloca then move
367 // this one to the start of the entry block. There is no problem with
368 // dominance as the array size was forced to a constant earlier already.
369 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
370 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000371 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000372 AI.moveBefore(FirstInst);
373 return &AI;
374 }
375
Richard Osborneb68053e2012-09-18 09:31:44 +0000376 // If the alignment of the entry block alloca is 0 (unspecified),
377 // assign it the preferred alignment.
378 if (EntryAI->getAlignment() == 0)
379 EntryAI->setAlignment(
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000380 DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000381 // Replace this zero-sized alloca with the one at the start of the entry
382 // block after ensuring that the address will be aligned enough for both
383 // types.
Richard Osborneb68053e2012-09-18 09:31:44 +0000384 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
385 AI.getAlignment());
Duncan Sands8bc764a2012-06-26 13:39:21 +0000386 EntryAI->setAlignment(MaxAlign);
387 if (AI.getType() != EntryAI->getType())
388 return new BitCastInst(EntryAI, AI.getType());
Sanjay Patel4b198802016-02-01 22:23:39 +0000389 return replaceInstUsesWith(AI, EntryAI);
Duncan Sands8bc764a2012-06-26 13:39:21 +0000390 }
391 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000392 }
393
Eli Friedmanb14873c2012-11-26 23:04:53 +0000394 if (AI.getAlignment()) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000395 // Check to see if this allocation is only modified by a memcpy/memmove from
396 // a constant global whose alignment is equal to or exceeds that of the
397 // allocation. If this is the case, we can change all users to use
398 // the constant global instead. This is commonly produced by the CFE by
399 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
400 // is only subsequently read.
401 SmallVector<Instruction *, 4> ToDelete;
402 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
Chandler Carruth66b31302015-01-04 12:03:27 +0000403 unsigned SourceAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +0000404 Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
Vitaly Bukadf19ad42017-06-24 01:35:19 +0000405 if (AI.getAlignment() <= SourceAlign &&
406 isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000407 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
408 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
409 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
Sanjay Patel4b198802016-02-01 22:23:39 +0000410 eraseInstFromFunction(*ToDelete[i]);
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000411 Constant *TheSrc = cast<Constant>(Copy->getSource());
Yaxun Liuba01ed02017-02-10 21:46:07 +0000412 auto *SrcTy = TheSrc->getType();
413 auto *DestTy = PointerType::get(AI.getType()->getPointerElementType(),
414 SrcTy->getPointerAddressSpace());
415 Constant *Cast =
416 ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, DestTy);
417 if (AI.getType()->getPointerAddressSpace() ==
418 SrcTy->getPointerAddressSpace()) {
419 Instruction *NewI = replaceInstUsesWith(AI, Cast);
420 eraseInstFromFunction(*Copy);
421 ++NumGlobalCopies;
422 return NewI;
423 } else {
424 PointerReplacer PtrReplacer(*this);
425 PtrReplacer.replacePointer(AI, Cast);
426 ++NumGlobalCopies;
427 }
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000428 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000429 }
430 }
431
Nuno Lopes95cc4f32012-07-09 18:38:20 +0000432 // At last, use the generic allocation site handler to aggressively remove
433 // unused allocas.
434 return visitAllocSite(AI);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000435}
436
Philip Reames89e92d22016-12-01 20:17:06 +0000437// Are we allowed to form a atomic load or store of this type?
438static bool isSupportedAtomicType(Type *Ty) {
439 return Ty->isIntegerTy() || Ty->isPointerTy() || Ty->isFloatingPointTy();
440}
441
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000442/// \brief Helper to combine a load to a new type.
443///
444/// This just does the work of combining a load to a new type. It handles
445/// metadata, etc., and returns the new instruction. The \c NewTy should be the
446/// loaded *value* type. This will convert it to a pointer, cast the operand to
447/// that pointer type, load it, etc.
448///
449/// Note that this will create all of the instructions with whatever insert
450/// point the \c InstCombiner currently is using.
Mehdi Amini2668a482015-05-07 05:52:40 +0000451static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
452 const Twine &Suffix = "") {
Philip Reames89e92d22016-12-01 20:17:06 +0000453 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
454 "can't fold an atomic load to requested type");
455
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000456 Value *Ptr = LI.getPointerOperand();
457 unsigned AS = LI.getPointerAddressSpace();
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000458 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000459 LI.getAllMetadata(MD);
460
Craig Topperbb4069e2017-07-07 23:16:26 +0000461 LoadInst *NewLoad = IC.Builder.CreateAlignedLoad(
462 IC.Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
Philip Reames6f4d0082016-05-06 22:17:01 +0000463 LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +0000464 NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
Charles Davis33d1dc02015-02-25 05:10:25 +0000465 MDBuilder MDB(NewLoad->getContext());
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000466 for (const auto &MDPair : MD) {
467 unsigned ID = MDPair.first;
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000468 MDNode *N = MDPair.second;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000469 // Note, essentially every kind of metadata should be preserved here! This
470 // routine is supposed to clone a load instruction changing *only its type*.
471 // The only metadata it makes sense to drop is metadata which is invalidated
472 // when the pointer type changes. This should essentially never be the case
473 // in LLVM, but we explicitly switch over only known metadata to be
474 // conservatively correct. If you are adding metadata to LLVM which pertains
475 // to loads, you almost certainly want to add it here.
476 switch (ID) {
477 case LLVMContext::MD_dbg:
478 case LLVMContext::MD_tbaa:
479 case LLVMContext::MD_prof:
480 case LLVMContext::MD_fpmath:
481 case LLVMContext::MD_tbaa_struct:
482 case LLVMContext::MD_invariant_load:
483 case LLVMContext::MD_alias_scope:
484 case LLVMContext::MD_noalias:
Philip Reames5a3f5f72014-10-21 00:13:20 +0000485 case LLVMContext::MD_nontemporal:
486 case LLVMContext::MD_mem_parallel_loop_access:
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000487 // All of these directly apply.
488 NewLoad->setMetadata(ID, N);
489 break;
490
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000491 case LLVMContext::MD_nonnull:
Chandler Carruth2abb65a2017-06-26 03:31:31 +0000492 copyNonnullMetadata(LI, N, *NewLoad);
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000493 break;
Artur Pilipenko5c5011d2015-11-02 17:53:51 +0000494 case LLVMContext::MD_align:
495 case LLVMContext::MD_dereferenceable:
496 case LLVMContext::MD_dereferenceable_or_null:
497 // These only directly apply if the new type is also a pointer.
498 if (NewTy->isPointerTy())
499 NewLoad->setMetadata(ID, N);
500 break;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000501 case LLVMContext::MD_range:
Chandler Carruth2abb65a2017-06-26 03:31:31 +0000502 copyRangeMetadata(IC.getDataLayout(), LI, N, *NewLoad);
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000503 break;
504 }
505 }
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000506 return NewLoad;
507}
508
Chandler Carruthfa11d832015-01-22 03:34:54 +0000509/// \brief Combine a store to a new type.
510///
511/// Returns the newly created store instruction.
512static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
Philip Reames89e92d22016-12-01 20:17:06 +0000513 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
514 "can't fold an atomic store of requested type");
515
Chandler Carruthfa11d832015-01-22 03:34:54 +0000516 Value *Ptr = SI.getPointerOperand();
517 unsigned AS = SI.getPointerAddressSpace();
518 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
519 SI.getAllMetadata(MD);
520
Craig Topperbb4069e2017-07-07 23:16:26 +0000521 StoreInst *NewStore = IC.Builder.CreateAlignedStore(
522 V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
Philip Reames6f4d0082016-05-06 22:17:01 +0000523 SI.getAlignment(), SI.isVolatile());
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +0000524 NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
Chandler Carruthfa11d832015-01-22 03:34:54 +0000525 for (const auto &MDPair : MD) {
526 unsigned ID = MDPair.first;
527 MDNode *N = MDPair.second;
528 // Note, essentially every kind of metadata should be preserved here! This
529 // routine is supposed to clone a store instruction changing *only its
530 // type*. The only metadata it makes sense to drop is metadata which is
531 // invalidated when the pointer type changes. This should essentially
532 // never be the case in LLVM, but we explicitly switch over only known
533 // metadata to be conservatively correct. If you are adding metadata to
534 // LLVM which pertains to stores, you almost certainly want to add it
535 // here.
536 switch (ID) {
537 case LLVMContext::MD_dbg:
538 case LLVMContext::MD_tbaa:
539 case LLVMContext::MD_prof:
540 case LLVMContext::MD_fpmath:
541 case LLVMContext::MD_tbaa_struct:
542 case LLVMContext::MD_alias_scope:
543 case LLVMContext::MD_noalias:
544 case LLVMContext::MD_nontemporal:
545 case LLVMContext::MD_mem_parallel_loop_access:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000546 // All of these directly apply.
547 NewStore->setMetadata(ID, N);
548 break;
549
550 case LLVMContext::MD_invariant_load:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000551 case LLVMContext::MD_nonnull:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000552 case LLVMContext::MD_range:
Artur Pilipenko5c5011d2015-11-02 17:53:51 +0000553 case LLVMContext::MD_align:
554 case LLVMContext::MD_dereferenceable:
555 case LLVMContext::MD_dereferenceable_or_null:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000556 // These don't apply for stores.
Chandler Carruthfa11d832015-01-22 03:34:54 +0000557 break;
558 }
559 }
560
561 return NewStore;
562}
563
JF Bastien3e2e69f2016-04-21 19:41:48 +0000564/// \brief Combine loads to match the type of their uses' value after looking
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000565/// through intervening bitcasts.
566///
567/// The core idea here is that if the result of a load is used in an operation,
568/// we should load the type most conducive to that operation. For example, when
569/// loading an integer and converting that immediately to a pointer, we should
570/// instead directly load a pointer.
571///
572/// However, this routine must never change the width of a load or the number of
573/// loads as that would introduce a semantic change. This combine is expected to
574/// be a semantic no-op which just allows loads to more closely model the types
575/// of their consuming operations.
576///
577/// Currently, we also refuse to change the precise type used for an atomic load
578/// or a volatile load. This is debatable, and might be reasonable to change
579/// later. However, it is risky in case some backend or other part of LLVM is
580/// relying on the exact type loaded to select appropriate atomic operations.
581static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
Philip Reames6f4d0082016-05-06 22:17:01 +0000582 // FIXME: We could probably with some care handle both volatile and ordered
583 // atomic loads here but it isn't clear that this is important.
584 if (!LI.isUnordered())
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000585 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000586
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000587 if (LI.use_empty())
588 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000589
Arnold Schwaighofer5d335552016-09-10 18:14:57 +0000590 // swifterror values can't be bitcasted.
591 if (LI.getPointerOperand()->isSwiftError())
592 return nullptr;
593
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000594 Type *Ty = LI.getType();
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000595 const DataLayout &DL = IC.getDataLayout();
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000596
597 // Try to canonicalize loads which are only ever stored to operate over
598 // integers instead of any other type. We only do this when the loaded type
599 // is sized and has a size exactly the same as its store size and the store
600 // size is a legal integer type.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000601 if (!Ty->isIntegerTy() && Ty->isSized() &&
602 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
Sanjoy Dasba04d3a2016-08-06 02:58:48 +0000603 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) &&
604 !DL.isNonIntegralPointerType(Ty)) {
David Majnemer0a16c222016-08-11 21:15:00 +0000605 if (all_of(LI.users(), [&LI](User *U) {
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000606 auto *SI = dyn_cast<StoreInst>(U);
Arnold Schwaighoferc3685632017-01-31 17:53:49 +0000607 return SI && SI->getPointerOperand() != &LI &&
608 !SI->getPointerOperand()->isSwiftError();
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000609 })) {
610 LoadInst *NewLoad = combineLoadToNewType(
611 IC, LI,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000612 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000613 // Replace all the stores with stores of the newly loaded value.
614 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
615 auto *SI = cast<StoreInst>(*UI++);
Craig Topperbb4069e2017-07-07 23:16:26 +0000616 IC.Builder.SetInsertPoint(SI);
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000617 combineStoreToNewValue(IC, *SI, NewLoad);
Sanjay Patel4b198802016-02-01 22:23:39 +0000618 IC.eraseInstFromFunction(*SI);
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000619 }
620 assert(LI.use_empty() && "Failed to remove all users of the load!");
621 // Return the old load so the combiner can delete it safely.
622 return &LI;
623 }
624 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000625
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000626 // Fold away bit casts of the loaded value by loading the desired type.
Quentin Colombet490cfbe2016-02-11 22:30:41 +0000627 // We can do this for BitCastInsts as well as casts from and to pointer types,
628 // as long as those are noops (i.e., the source or dest type have the same
629 // bitwidth as the target's pointers).
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000630 if (LI.hasOneUse())
Philip Reames89e92d22016-12-01 20:17:06 +0000631 if (auto* CI = dyn_cast<CastInst>(LI.user_back()))
632 if (CI->isNoopCast(DL))
633 if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) {
634 LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
635 CI->replaceAllUsesWith(NewLoad);
636 IC.eraseInstFromFunction(*CI);
637 return &LI;
638 }
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000639
Chandler Carrutha7f247e2014-12-09 19:21:16 +0000640 // FIXME: We should also canonicalize loads of vectors when their elements are
641 // cast to other types.
Craig Topperf40110f2014-04-25 05:29:35 +0000642 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000643}
644
Mehdi Amini2668a482015-05-07 05:52:40 +0000645static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
646 // FIXME: We could probably with some care handle both volatile and atomic
647 // stores here but it isn't clear that this is important.
648 if (!LI.isSimple())
649 return nullptr;
650
651 Type *T = LI.getType();
652 if (!T->isAggregateType())
653 return nullptr;
654
Benjamin Kramerc1263532016-03-11 10:20:56 +0000655 StringRef Name = LI.getName();
Bruce Mitchenere9ffb452015-09-12 01:17:08 +0000656 assert(LI.getAlignment() && "Alignment must be set at this point");
Mehdi Amini2668a482015-05-07 05:52:40 +0000657
658 if (auto *ST = dyn_cast<StructType>(T)) {
659 // If the struct only have one element, we unpack.
Amaury Sechet61a7d622016-02-17 19:21:28 +0000660 auto NumElements = ST->getNumElements();
661 if (NumElements == 1) {
Mehdi Amini2668a482015-05-07 05:52:40 +0000662 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
663 ".unpack");
Keno Fischera236dae2017-06-28 23:36:40 +0000664 AAMDNodes AAMD;
665 LI.getAAMetadata(AAMD);
666 NewLoad->setAAMetadata(AAMD);
Craig Topperbb4069e2017-07-07 23:16:26 +0000667 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
Amaury Sechet61a7d622016-02-17 19:21:28 +0000668 UndefValue::get(T), NewLoad, 0, Name));
Mehdi Amini2668a482015-05-07 05:52:40 +0000669 }
Mehdi Amini1c131b32015-12-15 01:44:07 +0000670
671 // We don't want to break loads with padding here as we'd loose
672 // the knowledge that padding exists for the rest of the pipeline.
673 const DataLayout &DL = IC.getDataLayout();
674 auto *SL = DL.getStructLayout(ST);
675 if (SL->hasPadding())
676 return nullptr;
677
Amaury Sechet61a7d622016-02-17 19:21:28 +0000678 auto Align = LI.getAlignment();
679 if (!Align)
680 Align = DL.getABITypeAlignment(ST);
681
Mehdi Amini1c131b32015-12-15 01:44:07 +0000682 auto *Addr = LI.getPointerOperand();
Amaury Sechet61a7d622016-02-17 19:21:28 +0000683 auto *IdxType = Type::getInt32Ty(T->getContext());
Mehdi Amini1c131b32015-12-15 01:44:07 +0000684 auto *Zero = ConstantInt::get(IdxType, 0);
Amaury Sechet61a7d622016-02-17 19:21:28 +0000685
686 Value *V = UndefValue::get(T);
687 for (unsigned i = 0; i < NumElements; i++) {
Mehdi Amini1c131b32015-12-15 01:44:07 +0000688 Value *Indices[2] = {
689 Zero,
690 ConstantInt::get(IdxType, i),
691 };
Craig Topperbb4069e2017-07-07 23:16:26 +0000692 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
693 Name + ".elt");
Amaury Sechet61a7d622016-02-17 19:21:28 +0000694 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
Craig Topperbb4069e2017-07-07 23:16:26 +0000695 auto *L = IC.Builder.CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
Keno Fischera236dae2017-06-28 23:36:40 +0000696 // Propagate AA metadata. It'll still be valid on the narrowed load.
697 AAMDNodes AAMD;
698 LI.getAAMetadata(AAMD);
699 L->setAAMetadata(AAMD);
Craig Topperbb4069e2017-07-07 23:16:26 +0000700 V = IC.Builder.CreateInsertValue(V, L, i);
Mehdi Amini1c131b32015-12-15 01:44:07 +0000701 }
702
703 V->setName(Name);
Sanjay Patel4b198802016-02-01 22:23:39 +0000704 return IC.replaceInstUsesWith(LI, V);
Mehdi Amini2668a482015-05-07 05:52:40 +0000705 }
706
David Majnemer58fb0382015-05-11 05:04:22 +0000707 if (auto *AT = dyn_cast<ArrayType>(T)) {
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000708 auto *ET = AT->getElementType();
709 auto NumElements = AT->getNumElements();
710 if (NumElements == 1) {
711 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack");
Keno Fischera236dae2017-06-28 23:36:40 +0000712 AAMDNodes AAMD;
713 LI.getAAMetadata(AAMD);
714 NewLoad->setAAMetadata(AAMD);
Craig Topperbb4069e2017-07-07 23:16:26 +0000715 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000716 UndefValue::get(T), NewLoad, 0, Name));
David Majnemer58fb0382015-05-11 05:04:22 +0000717 }
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000718
Davide Italianoda114122016-10-07 20:57:42 +0000719 // Bail out if the array is too large. Ideally we would like to optimize
720 // arrays of arbitrary size but this has a terrible impact on compile time.
721 // The threshold here is chosen arbitrarily, maybe needs a little bit of
722 // tuning.
Davide Italiano2133bf52017-02-07 17:56:50 +0000723 if (NumElements > IC.MaxArraySizeForCombine)
Davide Italianoda114122016-10-07 20:57:42 +0000724 return nullptr;
725
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000726 const DataLayout &DL = IC.getDataLayout();
727 auto EltSize = DL.getTypeAllocSize(ET);
728 auto Align = LI.getAlignment();
729 if (!Align)
730 Align = DL.getABITypeAlignment(T);
731
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000732 auto *Addr = LI.getPointerOperand();
733 auto *IdxType = Type::getInt64Ty(T->getContext());
734 auto *Zero = ConstantInt::get(IdxType, 0);
735
736 Value *V = UndefValue::get(T);
737 uint64_t Offset = 0;
738 for (uint64_t i = 0; i < NumElements; i++) {
739 Value *Indices[2] = {
740 Zero,
741 ConstantInt::get(IdxType, i),
742 };
Craig Topperbb4069e2017-07-07 23:16:26 +0000743 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
744 Name + ".elt");
745 auto *L = IC.Builder.CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
746 Name + ".unpack");
Keno Fischera236dae2017-06-28 23:36:40 +0000747 AAMDNodes AAMD;
748 LI.getAAMetadata(AAMD);
749 L->setAAMetadata(AAMD);
Craig Topperbb4069e2017-07-07 23:16:26 +0000750 V = IC.Builder.CreateInsertValue(V, L, i);
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000751 Offset += EltSize;
752 }
753
754 V->setName(Name);
755 return IC.replaceInstUsesWith(LI, V);
David Majnemer58fb0382015-05-11 05:04:22 +0000756 }
757
Mehdi Amini2668a482015-05-07 05:52:40 +0000758 return nullptr;
759}
760
Hal Finkel847e05f2015-02-20 03:05:53 +0000761// If we can determine that all possible objects pointed to by the provided
762// pointer value are, not only dereferenceable, but also definitively less than
763// or equal to the provided maximum size, then return true. Otherwise, return
764// false (constant global values and allocas fall into this category).
765//
766// FIXME: This should probably live in ValueTracking (or similar).
767static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000768 const DataLayout &DL) {
Hal Finkel847e05f2015-02-20 03:05:53 +0000769 SmallPtrSet<Value *, 4> Visited;
770 SmallVector<Value *, 4> Worklist(1, V);
771
772 do {
773 Value *P = Worklist.pop_back_val();
774 P = P->stripPointerCasts();
775
776 if (!Visited.insert(P).second)
777 continue;
778
779 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
780 Worklist.push_back(SI->getTrueValue());
781 Worklist.push_back(SI->getFalseValue());
782 continue;
783 }
784
785 if (PHINode *PN = dyn_cast<PHINode>(P)) {
Pete Cooper833f34d2015-05-12 20:05:31 +0000786 for (Value *IncValue : PN->incoming_values())
787 Worklist.push_back(IncValue);
Hal Finkel847e05f2015-02-20 03:05:53 +0000788 continue;
789 }
790
791 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
Sanjoy Das99042472016-04-17 04:30:43 +0000792 if (GA->isInterposable())
Hal Finkel847e05f2015-02-20 03:05:53 +0000793 return false;
794 Worklist.push_back(GA->getAliasee());
795 continue;
796 }
797
798 // If we know how big this object is, and it is less than MaxSize, continue
799 // searching. Otherwise, return false.
800 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
801 if (!AI->getAllocatedType()->isSized())
802 return false;
803
804 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
805 if (!CS)
806 return false;
807
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000808 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
Hal Finkel847e05f2015-02-20 03:05:53 +0000809 // Make sure that, even if the multiplication below would wrap as an
810 // uint64_t, we still do the right thing.
811 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
812 return false;
813 continue;
814 }
815
816 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
817 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
818 return false;
819
Manuel Jacob5f6eaac2016-01-16 20:30:46 +0000820 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
Hal Finkel847e05f2015-02-20 03:05:53 +0000821 if (InitSize > MaxSize)
822 return false;
823 continue;
824 }
825
826 return false;
827 } while (!Worklist.empty());
828
829 return true;
830}
831
832// If we're indexing into an object of a known size, and the outer index is
833// not a constant, but having any value but zero would lead to undefined
834// behavior, replace it with zero.
835//
836// For example, if we have:
837// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
838// ...
839// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
840// ... = load i32* %arrayidx, align 4
841// Then we know that we can replace %x in the GEP with i64 0.
842//
843// FIXME: We could fold any GEP index to zero that would cause UB if it were
844// not zero. Currently, we only handle the first such index. Also, we could
845// also search through non-zero constant indices if we kept track of the
846// offsets those indices implied.
847static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
848 Instruction *MemI, unsigned &Idx) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000849 if (GEPI->getNumOperands() < 2)
Hal Finkel847e05f2015-02-20 03:05:53 +0000850 return false;
851
852 // Find the first non-zero index of a GEP. If all indices are zero, return
853 // one past the last index.
854 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
855 unsigned I = 1;
856 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
857 Value *V = GEPI->getOperand(I);
858 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
859 if (CI->isZero())
860 continue;
861
862 break;
863 }
864
865 return I;
866 };
867
868 // Skip through initial 'zero' indices, and find the corresponding pointer
869 // type. See if the next index is not a constant.
870 Idx = FirstNZIdx(GEPI);
871 if (Idx == GEPI->getNumOperands())
872 return false;
873 if (isa<Constant>(GEPI->getOperand(Idx)))
874 return false;
875
876 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
Eduard Burtescu19eb0312016-01-19 17:28:00 +0000877 Type *AllocTy =
878 GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
Hal Finkel847e05f2015-02-20 03:05:53 +0000879 if (!AllocTy || !AllocTy->isSized())
880 return false;
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000881 const DataLayout &DL = IC.getDataLayout();
882 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
Hal Finkel847e05f2015-02-20 03:05:53 +0000883
884 // If there are more indices after the one we might replace with a zero, make
885 // sure they're all non-negative. If any of them are negative, the overall
886 // address being computed might be before the base address determined by the
887 // first non-zero index.
888 auto IsAllNonNegative = [&]() {
889 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
Craig Topper1a36b7d2017-05-15 06:39:41 +0000890 KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI);
891 if (Known.isNonNegative())
Hal Finkel847e05f2015-02-20 03:05:53 +0000892 continue;
893 return false;
894 }
895
896 return true;
897 };
898
899 // FIXME: If the GEP is not inbounds, and there are extra indices after the
900 // one we'll replace, those could cause the address computation to wrap
901 // (rendering the IsAllNonNegative() check below insufficient). We can do
Bruce Mitchenere9ffb452015-09-12 01:17:08 +0000902 // better, ignoring zero indices (and other indices we can prove small
Hal Finkel847e05f2015-02-20 03:05:53 +0000903 // enough not to wrap).
904 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
905 return false;
906
907 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
908 // also known to be dereferenceable.
909 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
910 IsAllNonNegative();
911}
912
913// If we're indexing into an object with a variable index for the memory
914// access, but the object has only one element, we can assume that the index
915// will always be zero. If we replace the GEP, return it.
916template <typename T>
917static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
918 T &MemI) {
919 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
920 unsigned Idx;
921 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
922 Instruction *NewGEPI = GEPI->clone();
923 NewGEPI->setOperand(Idx,
924 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
925 NewGEPI->insertBefore(GEPI);
926 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
927 return NewGEPI;
928 }
929 }
930
931 return nullptr;
932}
933
Davide Italianoffcb4df2017-04-19 17:26:57 +0000934static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {
935 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
936 const Value *GEPI0 = GEPI->getOperand(0);
937 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0)
938 return true;
939 }
940 if (isa<UndefValue>(Op) ||
941 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0))
942 return true;
943 return false;
944}
945
Chris Lattnera65e2f72010-01-05 05:57:49 +0000946Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
947 Value *Op = LI.getOperand(0);
948
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000949 // Try to canonicalize the loaded type.
950 if (Instruction *Res = combineLoadToOperationType(*this, LI))
951 return Res;
952
Chris Lattnera65e2f72010-01-05 05:57:49 +0000953 // Attempt to improve the alignment.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000954 unsigned KnownAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +0000955 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT);
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000956 unsigned LoadAlign = LI.getAlignment();
957 unsigned EffectiveLoadAlign =
958 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
Dan Gohman36196602010-08-03 18:20:32 +0000959
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000960 if (KnownAlign > EffectiveLoadAlign)
961 LI.setAlignment(KnownAlign);
962 else if (LoadAlign == 0)
963 LI.setAlignment(EffectiveLoadAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000964
Hal Finkel847e05f2015-02-20 03:05:53 +0000965 // Replace GEP indices if possible.
966 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
967 Worklist.Add(NewGEPI);
968 return &LI;
969 }
970
Mehdi Amini2668a482015-05-07 05:52:40 +0000971 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
972 return Res;
973
Chris Lattnera65e2f72010-01-05 05:57:49 +0000974 // Do really simple store-to-load forwarding and load CSE, to catch cases
Duncan Sands75b5d272011-02-15 09:23:02 +0000975 // where there are several consecutive memory accesses to the same location,
Chris Lattnera65e2f72010-01-05 05:57:49 +0000976 // separated by a few arithmetic operations.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +0000977 BasicBlock::iterator BBI(LI);
Eli Friedmanbd254a62016-06-16 02:33:42 +0000978 bool IsLoadCSE = false;
Sanjay Patelb38ad88e2017-01-02 23:25:28 +0000979 if (Value *AvailableVal = FindAvailableLoadedValue(
980 &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) {
981 if (IsLoadCSE)
982 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI);
Bjorn Steinbrinka91fd092015-07-10 06:55:44 +0000983
Sanjay Patel4b198802016-02-01 22:23:39 +0000984 return replaceInstUsesWith(
Craig Topperbb4069e2017-07-07 23:16:26 +0000985 LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
986 LI.getName() + ".cast"));
Bjorn Steinbrinka91fd092015-07-10 06:55:44 +0000987 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000988
Philip Reames3ac07182016-04-21 17:45:05 +0000989 // None of the following transforms are legal for volatile/ordered atomic
990 // loads. Most of them do apply for unordered atomics.
991 if (!LI.isUnordered()) return nullptr;
Philip Reamesac550902016-04-21 17:03:33 +0000992
Chris Lattnera65e2f72010-01-05 05:57:49 +0000993 // load(gep null, ...) -> unreachable
Chris Lattnera65e2f72010-01-05 05:57:49 +0000994 // load null/undef -> unreachable
Davide Italianoffcb4df2017-04-19 17:26:57 +0000995 // TODO: Consider a target hook for valid address spaces for this xforms.
996 if (canSimplifyNullLoadOrGEP(LI, Op)) {
997 // Insert a new store to null instruction before the load to indicate
998 // that this code is not reachable. We do this instead of inserting
999 // an unreachable instruction directly because we cannot modify the
1000 // CFG.
Chris Lattnera65e2f72010-01-05 05:57:49 +00001001 new StoreInst(UndefValue::get(LI.getType()),
1002 Constant::getNullValue(Op->getType()), &LI);
Sanjay Patel4b198802016-02-01 22:23:39 +00001003 return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
Chris Lattnera65e2f72010-01-05 05:57:49 +00001004 }
1005
Chris Lattnera65e2f72010-01-05 05:57:49 +00001006 if (Op->hasOneUse()) {
1007 // Change select and PHI nodes to select values instead of addresses: this
1008 // helps alias analysis out a lot, allows many others simplifications, and
1009 // exposes redundancy in the code.
1010 //
1011 // Note that we cannot do the transformation unless we know that the
1012 // introduced loads cannot trap! Something like this is valid as long as
1013 // the condition is always false: load (select bool %C, int* null, int* %G),
1014 // but it would not be valid if we transformed it to load from null
1015 // unconditionally.
1016 //
1017 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
1018 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
Bob Wilson56600a12010-01-30 04:42:39 +00001019 unsigned Align = LI.getAlignment();
Artur Pilipenko9bb6bea2016-04-27 11:00:48 +00001020 if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
1021 isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
Craig Topperbb4069e2017-07-07 23:16:26 +00001022 LoadInst *V1 = Builder.CreateLoad(SI->getOperand(1),
1023 SI->getOperand(1)->getName()+".val");
1024 LoadInst *V2 = Builder.CreateLoad(SI->getOperand(2),
1025 SI->getOperand(2)->getName()+".val");
Philip Reamesa98c7ea2016-04-21 17:59:40 +00001026 assert(LI.isUnordered() && "implied by above");
Bob Wilson56600a12010-01-30 04:42:39 +00001027 V1->setAlignment(Align);
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +00001028 V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
Bob Wilson56600a12010-01-30 04:42:39 +00001029 V2->setAlignment(Align);
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +00001030 V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
Chris Lattnera65e2f72010-01-05 05:57:49 +00001031 return SelectInst::Create(SI->getCondition(), V1, V2);
1032 }
1033
1034 // load (select (cond, null, P)) -> load P
Larisse Voufo532bf712015-09-18 19:14:35 +00001035 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
Philip Reames5ad26c32014-12-29 22:46:21 +00001036 LI.getPointerAddressSpace() == 0) {
1037 LI.setOperand(0, SI->getOperand(2));
1038 return &LI;
1039 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001040
1041 // load (select (cond, P, null)) -> load P
Philip Reames5ad26c32014-12-29 22:46:21 +00001042 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
1043 LI.getPointerAddressSpace() == 0) {
1044 LI.setOperand(0, SI->getOperand(1));
1045 return &LI;
1046 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001047 }
1048 }
Craig Topperf40110f2014-04-25 05:29:35 +00001049 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001050}
1051
Arch D. Robisonbe0490a2016-04-25 22:22:39 +00001052/// \brief Look for extractelement/insertvalue sequence that acts like a bitcast.
1053///
1054/// \returns underlying value that was "cast", or nullptr otherwise.
1055///
1056/// For example, if we have:
1057///
1058/// %E0 = extractelement <2 x double> %U, i32 0
1059/// %V0 = insertvalue [2 x double] undef, double %E0, 0
1060/// %E1 = extractelement <2 x double> %U, i32 1
1061/// %V1 = insertvalue [2 x double] %V0, double %E1, 1
1062///
1063/// and the layout of a <2 x double> is isomorphic to a [2 x double],
1064/// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1065/// Note that %U may contain non-undef values where %V1 has undef.
1066static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
1067 Value *U = nullptr;
1068 while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1069 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1070 if (!E)
1071 return nullptr;
1072 auto *W = E->getVectorOperand();
1073 if (!U)
1074 U = W;
1075 else if (U != W)
1076 return nullptr;
1077 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1078 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1079 return nullptr;
1080 V = IV->getAggregateOperand();
1081 }
1082 if (!isa<UndefValue>(V) ||!U)
1083 return nullptr;
1084
1085 auto *UT = cast<VectorType>(U->getType());
1086 auto *VT = V->getType();
1087 // Check that types UT and VT are bitwise isomorphic.
1088 const auto &DL = IC.getDataLayout();
1089 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1090 return nullptr;
1091 }
1092 if (auto *AT = dyn_cast<ArrayType>(VT)) {
1093 if (AT->getNumElements() != UT->getNumElements())
1094 return nullptr;
1095 } else {
1096 auto *ST = cast<StructType>(VT);
1097 if (ST->getNumElements() != UT->getNumElements())
1098 return nullptr;
1099 for (const auto *EltT : ST->elements()) {
1100 if (EltT != UT->getElementType())
1101 return nullptr;
1102 }
1103 }
1104 return U;
1105}
1106
Chandler Carruth816d26f2014-11-25 10:09:51 +00001107/// \brief Combine stores to match the type of value being stored.
1108///
1109/// The core idea here is that the memory does not have any intrinsic type and
1110/// where we can we should match the type of a store to the type of value being
1111/// stored.
1112///
1113/// However, this routine must never change the width of a store or the number of
1114/// stores as that would introduce a semantic change. This combine is expected to
1115/// be a semantic no-op which just allows stores to more closely model the types
1116/// of their incoming values.
1117///
1118/// Currently, we also refuse to change the precise type used for an atomic or
1119/// volatile store. This is debatable, and might be reasonable to change later.
1120/// However, it is risky in case some backend or other part of LLVM is relying
1121/// on the exact type stored to select appropriate atomic operations.
1122///
1123/// \returns true if the store was successfully combined away. This indicates
1124/// the caller must erase the store instruction. We have to let the caller erase
Bruce Mitchenere9ffb452015-09-12 01:17:08 +00001125/// the store instruction as otherwise there is no way to signal whether it was
Chandler Carruth816d26f2014-11-25 10:09:51 +00001126/// combined or not: IC.EraseInstFromFunction returns a null pointer.
1127static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
Philip Reames6f4d0082016-05-06 22:17:01 +00001128 // FIXME: We could probably with some care handle both volatile and ordered
1129 // atomic stores here but it isn't clear that this is important.
1130 if (!SI.isUnordered())
Chandler Carruth816d26f2014-11-25 10:09:51 +00001131 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001132
Arnold Schwaighofer5d335552016-09-10 18:14:57 +00001133 // swifterror values can't be bitcasted.
1134 if (SI.getPointerOperand()->isSwiftError())
1135 return false;
1136
Chandler Carruth816d26f2014-11-25 10:09:51 +00001137 Value *V = SI.getValueOperand();
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001138
Chandler Carruth816d26f2014-11-25 10:09:51 +00001139 // Fold away bit casts of the stored value by storing the original type.
1140 if (auto *BC = dyn_cast<BitCastInst>(V)) {
Chandler Carrutha7f247e2014-12-09 19:21:16 +00001141 V = BC->getOperand(0);
Philip Reames89e92d22016-12-01 20:17:06 +00001142 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1143 combineStoreToNewValue(IC, SI, V);
1144 return true;
1145 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001146 }
1147
Philip Reames89e92d22016-12-01 20:17:06 +00001148 if (Value *U = likeBitCastFromVector(IC, V))
1149 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1150 combineStoreToNewValue(IC, SI, U);
1151 return true;
1152 }
Arch D. Robisonbe0490a2016-04-25 22:22:39 +00001153
JF Bastienc22d2992016-04-21 19:53:39 +00001154 // FIXME: We should also canonicalize stores of vectors when their elements
1155 // are cast to other types.
Chandler Carruth816d26f2014-11-25 10:09:51 +00001156 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001157}
1158
Mehdi Aminib344ac92015-03-14 22:19:33 +00001159static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
1160 // FIXME: We could probably with some care handle both volatile and atomic
1161 // stores here but it isn't clear that this is important.
1162 if (!SI.isSimple())
1163 return false;
1164
1165 Value *V = SI.getValueOperand();
1166 Type *T = V->getType();
1167
1168 if (!T->isAggregateType())
1169 return false;
1170
Mehdi Amini2668a482015-05-07 05:52:40 +00001171 if (auto *ST = dyn_cast<StructType>(T)) {
Mehdi Aminib344ac92015-03-14 22:19:33 +00001172 // If the struct only have one element, we unpack.
Mehdi Amini1c131b32015-12-15 01:44:07 +00001173 unsigned Count = ST->getNumElements();
1174 if (Count == 1) {
Craig Topperbb4069e2017-07-07 23:16:26 +00001175 V = IC.Builder.CreateExtractValue(V, 0);
Mehdi Aminib344ac92015-03-14 22:19:33 +00001176 combineStoreToNewValue(IC, SI, V);
1177 return true;
1178 }
Mehdi Amini1c131b32015-12-15 01:44:07 +00001179
1180 // We don't want to break loads with padding here as we'd loose
1181 // the knowledge that padding exists for the rest of the pipeline.
1182 const DataLayout &DL = IC.getDataLayout();
1183 auto *SL = DL.getStructLayout(ST);
1184 if (SL->hasPadding())
1185 return false;
1186
Amaury Sechet61a7d622016-02-17 19:21:28 +00001187 auto Align = SI.getAlignment();
1188 if (!Align)
1189 Align = DL.getABITypeAlignment(ST);
1190
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +00001191 SmallString<16> EltName = V->getName();
1192 EltName += ".elt";
Mehdi Amini1c131b32015-12-15 01:44:07 +00001193 auto *Addr = SI.getPointerOperand();
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +00001194 SmallString<16> AddrName = Addr->getName();
1195 AddrName += ".repack";
Amaury Sechet61a7d622016-02-17 19:21:28 +00001196
Mehdi Amini1c131b32015-12-15 01:44:07 +00001197 auto *IdxType = Type::getInt32Ty(ST->getContext());
1198 auto *Zero = ConstantInt::get(IdxType, 0);
1199 for (unsigned i = 0; i < Count; i++) {
1200 Value *Indices[2] = {
1201 Zero,
1202 ConstantInt::get(IdxType, i),
1203 };
Craig Topperbb4069e2017-07-07 23:16:26 +00001204 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1205 AddrName);
1206 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
Amaury Sechet61a7d622016-02-17 19:21:28 +00001207 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
Craig Topperbb4069e2017-07-07 23:16:26 +00001208 llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
Keno Fischera236dae2017-06-28 23:36:40 +00001209 AAMDNodes AAMD;
1210 SI.getAAMetadata(AAMD);
1211 NS->setAAMetadata(AAMD);
Mehdi Amini1c131b32015-12-15 01:44:07 +00001212 }
1213
1214 return true;
Mehdi Aminib344ac92015-03-14 22:19:33 +00001215 }
1216
David Majnemer75364602015-05-11 05:04:27 +00001217 if (auto *AT = dyn_cast<ArrayType>(T)) {
1218 // If the array only have one element, we unpack.
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001219 auto NumElements = AT->getNumElements();
1220 if (NumElements == 1) {
Craig Topperbb4069e2017-07-07 23:16:26 +00001221 V = IC.Builder.CreateExtractValue(V, 0);
David Majnemer75364602015-05-11 05:04:27 +00001222 combineStoreToNewValue(IC, SI, V);
1223 return true;
1224 }
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001225
Davide Italianof6988d22016-10-07 21:53:09 +00001226 // Bail out if the array is too large. Ideally we would like to optimize
1227 // arrays of arbitrary size but this has a terrible impact on compile time.
1228 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1229 // tuning.
Davide Italiano2133bf52017-02-07 17:56:50 +00001230 if (NumElements > IC.MaxArraySizeForCombine)
Davide Italianof6988d22016-10-07 21:53:09 +00001231 return false;
1232
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001233 const DataLayout &DL = IC.getDataLayout();
1234 auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1235 auto Align = SI.getAlignment();
1236 if (!Align)
1237 Align = DL.getABITypeAlignment(T);
1238
1239 SmallString<16> EltName = V->getName();
1240 EltName += ".elt";
1241 auto *Addr = SI.getPointerOperand();
1242 SmallString<16> AddrName = Addr->getName();
1243 AddrName += ".repack";
1244
1245 auto *IdxType = Type::getInt64Ty(T->getContext());
1246 auto *Zero = ConstantInt::get(IdxType, 0);
1247
1248 uint64_t Offset = 0;
1249 for (uint64_t i = 0; i < NumElements; i++) {
1250 Value *Indices[2] = {
1251 Zero,
1252 ConstantInt::get(IdxType, i),
1253 };
Craig Topperbb4069e2017-07-07 23:16:26 +00001254 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1255 AddrName);
1256 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001257 auto EltAlign = MinAlign(Align, Offset);
Craig Topperbb4069e2017-07-07 23:16:26 +00001258 Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
Keno Fischera236dae2017-06-28 23:36:40 +00001259 AAMDNodes AAMD;
1260 SI.getAAMetadata(AAMD);
1261 NS->setAAMetadata(AAMD);
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001262 Offset += EltSize;
1263 }
1264
1265 return true;
David Majnemer75364602015-05-11 05:04:27 +00001266 }
1267
Mehdi Aminib344ac92015-03-14 22:19:33 +00001268 return false;
1269}
1270
Chris Lattnera65e2f72010-01-05 05:57:49 +00001271/// equivalentAddressValues - Test if A and B will obviously have the same
1272/// value. This includes recognizing that %t0 and %t1 will have the same
1273/// value in code like this:
1274/// %t0 = getelementptr \@a, 0, 3
1275/// store i32 0, i32* %t0
1276/// %t1 = getelementptr \@a, 0, 3
1277/// %t2 = load i32* %t1
1278///
1279static bool equivalentAddressValues(Value *A, Value *B) {
1280 // Test if the values are trivially equivalent.
1281 if (A == B) return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001282
Chris Lattnera65e2f72010-01-05 05:57:49 +00001283 // Test if the values come form identical arithmetic instructions.
1284 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1285 // its only used to compare two uses within the same basic block, which
1286 // means that they'll always either have the same value or one of them
1287 // will have an undefined value.
1288 if (isa<BinaryOperator>(A) ||
1289 isa<CastInst>(A) ||
1290 isa<PHINode>(A) ||
1291 isa<GetElementPtrInst>(A))
1292 if (Instruction *BI = dyn_cast<Instruction>(B))
1293 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1294 return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001295
Chris Lattnera65e2f72010-01-05 05:57:49 +00001296 // Otherwise they may not be equivalent.
1297 return false;
1298}
1299
Chris Lattnera65e2f72010-01-05 05:57:49 +00001300Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
1301 Value *Val = SI.getOperand(0);
1302 Value *Ptr = SI.getOperand(1);
1303
Chandler Carruth816d26f2014-11-25 10:09:51 +00001304 // Try to canonicalize the stored type.
1305 if (combineStoreToValueType(*this, SI))
Sanjay Patel4b198802016-02-01 22:23:39 +00001306 return eraseInstFromFunction(SI);
Chandler Carruth816d26f2014-11-25 10:09:51 +00001307
Chris Lattnera65e2f72010-01-05 05:57:49 +00001308 // Attempt to improve the alignment.
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001309 unsigned KnownAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001310 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT);
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001311 unsigned StoreAlign = SI.getAlignment();
1312 unsigned EffectiveStoreAlign =
1313 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
Dan Gohman36196602010-08-03 18:20:32 +00001314
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001315 if (KnownAlign > EffectiveStoreAlign)
1316 SI.setAlignment(KnownAlign);
1317 else if (StoreAlign == 0)
1318 SI.setAlignment(EffectiveStoreAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001319
Mehdi Aminib344ac92015-03-14 22:19:33 +00001320 // Try to canonicalize the stored type.
1321 if (unpackStoreToAggregate(*this, SI))
Sanjay Patel4b198802016-02-01 22:23:39 +00001322 return eraseInstFromFunction(SI);
Mehdi Aminib344ac92015-03-14 22:19:33 +00001323
Hal Finkel847e05f2015-02-20 03:05:53 +00001324 // Replace GEP indices if possible.
1325 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1326 Worklist.Add(NewGEPI);
1327 return &SI;
1328 }
1329
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001330 // Don't hack volatile/ordered stores.
1331 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1332 if (!SI.isUnordered()) return nullptr;
Eli Friedman8bc586e2011-08-15 22:09:40 +00001333
1334 // If the RHS is an alloca with a single use, zapify the store, making the
1335 // alloca dead.
1336 if (Ptr->hasOneUse()) {
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001337 if (isa<AllocaInst>(Ptr))
Sanjay Patel4b198802016-02-01 22:23:39 +00001338 return eraseInstFromFunction(SI);
Eli Friedman8bc586e2011-08-15 22:09:40 +00001339 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1340 if (isa<AllocaInst>(GEP->getOperand(0))) {
1341 if (GEP->getOperand(0)->hasOneUse())
Sanjay Patel4b198802016-02-01 22:23:39 +00001342 return eraseInstFromFunction(SI);
Eli Friedman8bc586e2011-08-15 22:09:40 +00001343 }
1344 }
1345 }
1346
Chris Lattnera65e2f72010-01-05 05:57:49 +00001347 // Do really simple DSE, to catch cases where there are several consecutive
1348 // stores to the same location, separated by a few arithmetic operations. This
1349 // situation often occurs with bitfield accesses.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001350 BasicBlock::iterator BBI(SI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001351 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1352 --ScanInsts) {
1353 --BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001354 // Don't count debug info directives, lest they affect codegen,
1355 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1356 if (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001357 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001358 ScanInsts++;
1359 continue;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001360 }
1361
Chris Lattnera65e2f72010-01-05 05:57:49 +00001362 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1363 // Prev store isn't volatile, and stores to the same location?
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001364 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
Eli Friedman8bc586e2011-08-15 22:09:40 +00001365 SI.getOperand(1))) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001366 ++NumDeadStore;
1367 ++BBI;
Sanjay Patel4b198802016-02-01 22:23:39 +00001368 eraseInstFromFunction(*PrevSI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001369 continue;
1370 }
1371 break;
1372 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001373
Chris Lattnera65e2f72010-01-05 05:57:49 +00001374 // If this is a load, we have to stop. However, if the loaded value is from
1375 // the pointer we're loading and is producing the pointer we're storing,
1376 // then *this* store is dead (X = load P; store X -> P).
1377 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001378 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1379 assert(SI.isUnordered() && "can't eliminate ordering operation");
Sanjay Patel4b198802016-02-01 22:23:39 +00001380 return eraseInstFromFunction(SI);
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001381 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001382
Chris Lattnera65e2f72010-01-05 05:57:49 +00001383 // Otherwise, this is a load from some other location. Stores before it
1384 // may not be dead.
1385 break;
1386 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001387
Sanjoy Das679bc322017-01-17 05:45:09 +00001388 // Don't skip over loads, throws or things that can modify memory.
1389 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001390 break;
1391 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001392
1393 // store X, null -> turns into 'unreachable' in SimplifyCFG
1394 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
1395 if (!isa<UndefValue>(Val)) {
1396 SI.setOperand(0, UndefValue::get(Val->getType()));
1397 if (Instruction *U = dyn_cast<Instruction>(Val))
1398 Worklist.Add(U); // Dropped a use.
1399 }
Craig Topperf40110f2014-04-25 05:29:35 +00001400 return nullptr; // Do not modify these!
Chris Lattnera65e2f72010-01-05 05:57:49 +00001401 }
1402
1403 // store undef, Ptr -> noop
1404 if (isa<UndefValue>(Val))
Sanjay Patel4b198802016-02-01 22:23:39 +00001405 return eraseInstFromFunction(SI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001406
Chris Lattnera65e2f72010-01-05 05:57:49 +00001407 // If this store is the last instruction in the basic block (possibly
Victor Hernandez5f5abd52010-01-21 23:07:15 +00001408 // excepting debug info instructions), and if the block ends with an
1409 // unconditional branch, try to move it to the successor block.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001410 BBI = SI.getIterator();
Chris Lattnera65e2f72010-01-05 05:57:49 +00001411 do {
1412 ++BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001413 } while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001414 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
Chris Lattnera65e2f72010-01-05 05:57:49 +00001415 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
1416 if (BI->isUnconditional())
1417 if (SimplifyStoreAtEndOfBlock(SI))
Craig Topperf40110f2014-04-25 05:29:35 +00001418 return nullptr; // xform done!
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001419
Craig Topperf40110f2014-04-25 05:29:35 +00001420 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001421}
1422
1423/// SimplifyStoreAtEndOfBlock - Turn things like:
1424/// if () { *P = v1; } else { *P = v2 }
1425/// into a phi node with a store in the successor.
1426///
1427/// Simplify things like:
1428/// *P = v1; if () { *P = v2; }
1429/// into a phi node with a store in the successor.
1430///
1431bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
Philip Reames5f0e3692016-04-22 20:53:32 +00001432 assert(SI.isUnordered() &&
1433 "this code has not been auditted for volatile or ordered store case");
Justin Bognerc7e4fbe2016-08-05 01:09:48 +00001434
Chris Lattnera65e2f72010-01-05 05:57:49 +00001435 BasicBlock *StoreBB = SI.getParent();
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001436
Chris Lattnera65e2f72010-01-05 05:57:49 +00001437 // Check to see if the successor block has exactly two incoming edges. If
1438 // so, see if the other predecessor contains a store to the same location.
1439 // if so, insert a PHI node (if needed) and move the stores down.
1440 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001441
Chris Lattnera65e2f72010-01-05 05:57:49 +00001442 // Determine whether Dest has exactly two predecessors and, if so, compute
1443 // the other predecessor.
1444 pred_iterator PI = pred_begin(DestBB);
Gabor Greif1b787df2010-07-12 15:48:26 +00001445 BasicBlock *P = *PI;
Craig Topperf40110f2014-04-25 05:29:35 +00001446 BasicBlock *OtherBB = nullptr;
Gabor Greif1b787df2010-07-12 15:48:26 +00001447
1448 if (P != StoreBB)
1449 OtherBB = P;
1450
1451 if (++PI == pred_end(DestBB))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001452 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001453
Gabor Greif1b787df2010-07-12 15:48:26 +00001454 P = *PI;
1455 if (P != StoreBB) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001456 if (OtherBB)
1457 return false;
Gabor Greif1b787df2010-07-12 15:48:26 +00001458 OtherBB = P;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001459 }
1460 if (++PI != pred_end(DestBB))
1461 return false;
1462
1463 // Bail out if all the relevant blocks aren't distinct (this can happen,
1464 // for example, if SI is in an infinite loop)
1465 if (StoreBB == DestBB || OtherBB == DestBB)
1466 return false;
1467
1468 // Verify that the other block ends in a branch and is not otherwise empty.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001469 BasicBlock::iterator BBI(OtherBB->getTerminator());
Chris Lattnera65e2f72010-01-05 05:57:49 +00001470 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1471 if (!OtherBr || BBI == OtherBB->begin())
1472 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001473
Chris Lattnera65e2f72010-01-05 05:57:49 +00001474 // If the other block ends in an unconditional branch, check for the 'if then
1475 // else' case. there is an instruction before the branch.
Craig Topperf40110f2014-04-25 05:29:35 +00001476 StoreInst *OtherStore = nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001477 if (OtherBr->isUnconditional()) {
1478 --BBI;
1479 // Skip over debugging info.
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001480 while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001481 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001482 if (BBI==OtherBB->begin())
1483 return false;
1484 --BBI;
1485 }
Eli Friedman8bc586e2011-08-15 22:09:40 +00001486 // If this isn't a store, isn't a store to the same location, or is not the
1487 // right kind of store, bail out.
Chris Lattnera65e2f72010-01-05 05:57:49 +00001488 OtherStore = dyn_cast<StoreInst>(BBI);
1489 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001490 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001491 return false;
1492 } else {
1493 // Otherwise, the other block ended with a conditional branch. If one of the
1494 // destinations is StoreBB, then we have the if/then case.
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001495 if (OtherBr->getSuccessor(0) != StoreBB &&
Chris Lattnera65e2f72010-01-05 05:57:49 +00001496 OtherBr->getSuccessor(1) != StoreBB)
1497 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001498
Chris Lattnera65e2f72010-01-05 05:57:49 +00001499 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1500 // if/then triangle. See if there is a store to the same ptr as SI that
1501 // lives in OtherBB.
1502 for (;; --BBI) {
1503 // Check to see if we find the matching store.
1504 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1505 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001506 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001507 return false;
1508 break;
1509 }
1510 // If we find something that may be using or overwriting the stored
1511 // value, or if we run out of instructions, we can't do the xform.
Sanjoy Das679bc322017-01-17 05:45:09 +00001512 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1513 BBI->mayWriteToMemory() || BBI == OtherBB->begin())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001514 return false;
1515 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001516
Chris Lattnera65e2f72010-01-05 05:57:49 +00001517 // In order to eliminate the store in OtherBr, we have to
1518 // make sure nothing reads or overwrites the stored value in
1519 // StoreBB.
1520 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1521 // FIXME: This should really be AA driven.
Sanjoy Das679bc322017-01-17 05:45:09 +00001522 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001523 return false;
1524 }
1525 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001526
Chris Lattnera65e2f72010-01-05 05:57:49 +00001527 // Insert a PHI node now if we need it.
1528 Value *MergedVal = OtherStore->getOperand(0);
1529 if (MergedVal != SI.getOperand(0)) {
Jay Foad52131342011-03-30 11:28:46 +00001530 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
Chris Lattnera65e2f72010-01-05 05:57:49 +00001531 PN->addIncoming(SI.getOperand(0), SI.getParent());
1532 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1533 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1534 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001535
Chris Lattnera65e2f72010-01-05 05:57:49 +00001536 // Advance to a place where it is safe to insert the new store and
1537 // insert it.
Bill Wendling8ddfc092011-08-16 20:45:24 +00001538 BBI = DestBB->getFirstInsertionPt();
Eli Friedman35211c62011-05-27 00:19:40 +00001539 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
Eli Friedman8bc586e2011-08-15 22:09:40 +00001540 SI.isVolatile(),
1541 SI.getAlignment(),
1542 SI.getOrdering(),
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +00001543 SI.getSyncScopeID());
Eli Friedman35211c62011-05-27 00:19:40 +00001544 InsertNewInstBefore(NewSI, *BBI);
Paul Robinson383c5c22017-02-06 22:19:04 +00001545 // The debug locations of the original instructions might differ; merge them.
1546 NewSI->setDebugLoc(DILocation::getMergedLocation(SI.getDebugLoc(),
1547 OtherStore->getDebugLoc()));
Eli Friedman35211c62011-05-27 00:19:40 +00001548
Hal Finkelcc39b672014-07-24 12:16:19 +00001549 // If the two stores had AA tags, merge them.
1550 AAMDNodes AATags;
1551 SI.getAAMetadata(AATags);
1552 if (AATags) {
1553 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1554 NewSI->setAAMetadata(AATags);
1555 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001556
Chris Lattnera65e2f72010-01-05 05:57:49 +00001557 // Nuke the old stores.
Sanjay Patel4b198802016-02-01 22:23:39 +00001558 eraseInstFromFunction(SI);
1559 eraseInstFromFunction(*OtherStore);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001560 return true;
1561}