blob: a7d860804510778bbdb6be2b47126c24c1f64db0 [file] [log] [blame]
Chris Lattnera65e2f72010-01-05 05:57:49 +00001//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Chris Lattnera65e2f72010-01-05 05:57:49 +00006//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visit functions for load, store and alloca.
10//
11//===----------------------------------------------------------------------===//
12
Chandler Carrutha9174582015-01-22 05:25:13 +000013#include "InstCombineInternal.h"
Yaxun Liuba01ed02017-02-10 21:46:07 +000014#include "llvm/ADT/MapVector.h"
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +000015#include "llvm/ADT/SmallString.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000016#include "llvm/ADT/Statistic.h"
Dan Gohman826bdf82010-05-28 16:19:17 +000017#include "llvm/Analysis/Loads.h"
David Blaikie31b98d22018-06-04 21:23:21 +000018#include "llvm/Transforms/Utils/Local.h"
Peter Collingbourneecdd58f2016-10-21 19:59:26 +000019#include "llvm/IR/ConstantRange.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000020#include "llvm/IR/DataLayout.h"
Vedant Kumar238533e2018-11-19 19:55:02 +000021#include "llvm/IR/DebugInfoMetadata.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000022#include "llvm/IR/IntrinsicInst.h"
Yaxun Liuba01ed02017-02-10 21:46:07 +000023#include "llvm/IR/LLVMContext.h"
Charles Davis33d1dc02015-02-25 05:10:25 +000024#include "llvm/IR/MDBuilder.h"
Alexey Bataevec95c6c2017-12-08 15:32:10 +000025#include "llvm/IR/PatternMatch.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000026#include "llvm/Transforms/Utils/BasicBlockUtils.h"
Chris Lattnera65e2f72010-01-05 05:57:49 +000027using namespace llvm;
Alexey Bataevec95c6c2017-12-08 15:32:10 +000028using namespace PatternMatch;
Chris Lattnera65e2f72010-01-05 05:57:49 +000029
Chandler Carruth964daaa2014-04-22 02:55:47 +000030#define DEBUG_TYPE "instcombine"
31
Chandler Carruthc908ca12012-08-21 08:39:44 +000032STATISTIC(NumDeadStore, "Number of dead stores eliminated");
33STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
34
35/// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
36/// some part of a constant global variable. This intentionally only accepts
37/// constant expressions because we can't rewrite arbitrary instructions.
38static bool pointsToConstantGlobal(Value *V) {
39 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
40 return GV->isConstant();
Matt Arsenault607281772014-04-24 00:01:09 +000041
42 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000043 if (CE->getOpcode() == Instruction::BitCast ||
Matt Arsenault607281772014-04-24 00:01:09 +000044 CE->getOpcode() == Instruction::AddrSpaceCast ||
Chandler Carruthc908ca12012-08-21 08:39:44 +000045 CE->getOpcode() == Instruction::GetElementPtr)
46 return pointsToConstantGlobal(CE->getOperand(0));
Matt Arsenault607281772014-04-24 00:01:09 +000047 }
Chandler Carruthc908ca12012-08-21 08:39:44 +000048 return false;
49}
50
51/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
52/// pointer to an alloca. Ignore any reads of the pointer, return false if we
53/// see any stores or other unknown uses. If we see pointer arithmetic, keep
54/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
55/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
56/// the alloca, and if the source pointer is a pointer to a constant global, we
57/// can optimize this.
58static bool
59isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
Reid Kleckner813dab22014-07-01 21:36:20 +000060 SmallVectorImpl<Instruction *> &ToDelete) {
Chandler Carruthc908ca12012-08-21 08:39:44 +000061 // We track lifetime intrinsics as we encounter them. If we decide to go
62 // ahead and replace the value with the global, this lets the caller quickly
63 // eliminate the markers.
64
Reid Kleckner813dab22014-07-01 21:36:20 +000065 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
David Majnemer0a16c222016-08-11 21:15:00 +000066 ValuesToInspect.emplace_back(V, false);
Reid Kleckner813dab22014-07-01 21:36:20 +000067 while (!ValuesToInspect.empty()) {
68 auto ValuePair = ValuesToInspect.pop_back_val();
69 const bool IsOffset = ValuePair.second;
70 for (auto &U : ValuePair.first->uses()) {
David Majnemer0a16c222016-08-11 21:15:00 +000071 auto *I = cast<Instruction>(U.getUser());
Chandler Carruthc908ca12012-08-21 08:39:44 +000072
David Majnemer0a16c222016-08-11 21:15:00 +000073 if (auto *LI = dyn_cast<LoadInst>(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000074 // Ignore non-volatile loads, they are always ok.
75 if (!LI->isSimple()) return false;
Chandler Carruthc908ca12012-08-21 08:39:44 +000076 continue;
77 }
Reid Kleckner813dab22014-07-01 21:36:20 +000078
79 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
80 // If uses of the bitcast are ok, we are ok.
David Majnemer0a16c222016-08-11 21:15:00 +000081 ValuesToInspect.emplace_back(I, IsOffset);
Reid Kleckner813dab22014-07-01 21:36:20 +000082 continue;
83 }
David Majnemer0a16c222016-08-11 21:15:00 +000084 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000085 // If the GEP has all zero indices, it doesn't offset the pointer. If it
86 // doesn't, it does.
David Majnemer0a16c222016-08-11 21:15:00 +000087 ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
Reid Kleckner813dab22014-07-01 21:36:20 +000088 continue;
89 }
90
Craig Topperc1892ec2019-01-31 17:23:29 +000091 if (auto *Call = dyn_cast<CallBase>(I)) {
Reid Kleckner813dab22014-07-01 21:36:20 +000092 // If this is the function being called then we treat it like a load and
93 // ignore it.
Craig Topperc1892ec2019-01-31 17:23:29 +000094 if (Call->isCallee(&U))
Reid Kleckner813dab22014-07-01 21:36:20 +000095 continue;
96
Craig Topperc1892ec2019-01-31 17:23:29 +000097 unsigned DataOpNo = Call->getDataOperandNo(&U);
98 bool IsArgOperand = Call->isArgOperand(&U);
David Majnemer02f47872015-12-23 09:58:41 +000099
Reid Kleckner813dab22014-07-01 21:36:20 +0000100 // Inalloca arguments are clobbered by the call.
Craig Topperc1892ec2019-01-31 17:23:29 +0000101 if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
Reid Kleckner813dab22014-07-01 21:36:20 +0000102 return false;
103
104 // If this is a readonly/readnone call site, then we know it is just a
105 // load (but one that potentially returns the value itself), so we can
106 // ignore it if we know that the value isn't captured.
Craig Topperc1892ec2019-01-31 17:23:29 +0000107 if (Call->onlyReadsMemory() &&
108 (Call->use_empty() || Call->doesNotCapture(DataOpNo)))
Reid Kleckner813dab22014-07-01 21:36:20 +0000109 continue;
110
111 // If this is being passed as a byval argument, the caller is making a
112 // copy, so it is only a read of the alloca.
Craig Topperc1892ec2019-01-31 17:23:29 +0000113 if (IsArgOperand && Call->isByValArgument(DataOpNo))
Reid Kleckner813dab22014-07-01 21:36:20 +0000114 continue;
115 }
116
117 // Lifetime intrinsics can be handled by the caller.
Vedant Kumarb264d692018-12-21 21:49:40 +0000118 if (I->isLifetimeStartOrEnd()) {
119 assert(I->use_empty() && "Lifetime markers have no result to use!");
120 ToDelete.push_back(I);
121 continue;
Reid Kleckner813dab22014-07-01 21:36:20 +0000122 }
123
124 // If this is isn't our memcpy/memmove, reject it as something we can't
125 // handle.
126 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
127 if (!MI)
128 return false;
129
130 // If the transfer is using the alloca as a source of the transfer, then
131 // ignore it since it is a load (unless the transfer is volatile).
132 if (U.getOperandNo() == 1) {
133 if (MI->isVolatile()) return false;
134 continue;
135 }
136
137 // If we already have seen a copy, reject the second one.
138 if (TheCopy) return false;
139
140 // If the pointer has been offset from the start of the alloca, we can't
141 // safely handle this.
142 if (IsOffset) return false;
143
144 // If the memintrinsic isn't using the alloca as the dest, reject it.
145 if (U.getOperandNo() != 0) return false;
146
147 // If the source of the memcpy/move is not a constant global, reject it.
148 if (!pointsToConstantGlobal(MI->getSource()))
149 return false;
150
151 // Otherwise, the transform is safe. Remember the copy instruction.
152 TheCopy = MI;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000153 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000154 }
155 return true;
156}
157
158/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
159/// modified by a copy from a constant global. If we can prove this, we can
160/// replace any uses of the alloca with uses of the global directly.
161static MemTransferInst *
162isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
163 SmallVectorImpl<Instruction *> &ToDelete) {
Craig Topperf40110f2014-04-25 05:29:35 +0000164 MemTransferInst *TheCopy = nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000165 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
166 return TheCopy;
Craig Topperf40110f2014-04-25 05:29:35 +0000167 return nullptr;
Chandler Carruthc908ca12012-08-21 08:39:44 +0000168}
169
Vitaly Bukadf19ad42017-06-24 01:35:19 +0000170/// Returns true if V is dereferenceable for size of alloca.
171static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
172 const DataLayout &DL) {
173 if (AI->isArrayAllocation())
174 return false;
175 uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
176 if (!AllocaSize)
177 return false;
178 return isDereferenceableAndAlignedPointer(V, AI->getAlignment(),
179 APInt(64, AllocaSize), DL);
180}
181
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000182static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
Duncan P. N. Exon Smith720762e2015-03-13 19:30:44 +0000183 // Check for array size of 1 (scalar allocation).
Duncan P. N. Exon Smithbe95b4a2015-03-13 19:42:09 +0000184 if (!AI.isArrayAllocation()) {
185 // i32 1 is the canonical array size for scalar allocations.
186 if (AI.getArraySize()->getType()->isIntegerTy(32))
187 return nullptr;
188
189 // Canonicalize it.
Craig Topperbb4069e2017-07-07 23:16:26 +0000190 Value *V = IC.Builder.getInt32(1);
Duncan P. N. Exon Smithbe95b4a2015-03-13 19:42:09 +0000191 AI.setOperand(0, V);
192 return &AI;
193 }
Duncan P. N. Exon Smith720762e2015-03-13 19:30:44 +0000194
Chris Lattnera65e2f72010-01-05 05:57:49 +0000195 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000196 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
Simon Pilgrim82edf8d2018-08-13 16:50:20 +0000197 if (C->getValue().getActiveBits() <= 64) {
198 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
199 AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName());
200 New->setAlignment(AI.getAlignment());
Chris Lattnera65e2f72010-01-05 05:57:49 +0000201
Simon Pilgrim82edf8d2018-08-13 16:50:20 +0000202 // Scan to the end of the allocation instructions, to skip over a block of
203 // allocas if possible...also skip interleaved debug info
204 //
205 BasicBlock::iterator It(New);
206 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
207 ++It;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000208
Simon Pilgrim82edf8d2018-08-13 16:50:20 +0000209 // Now that I is pointing to the first non-allocation-inst in the block,
210 // insert our getelementptr instruction...
211 //
212 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
213 Value *NullIdx = Constant::getNullValue(IdxTy);
214 Value *Idx[2] = {NullIdx, NullIdx};
215 Instruction *GEP =
216 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
217 IC.InsertNewInstBefore(GEP, *It);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000218
Simon Pilgrim82edf8d2018-08-13 16:50:20 +0000219 // Now make everything use the getelementptr instead of the original
220 // allocation.
221 return IC.replaceInstUsesWith(AI, GEP);
222 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000223 }
224
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000225 if (isa<UndefValue>(AI.getArraySize()))
Sanjay Patel4b198802016-02-01 22:23:39 +0000226 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
Duncan P. N. Exon Smithbb730132015-03-13 19:26:33 +0000227
Duncan P. N. Exon Smith07ff9b02015-03-13 19:34:55 +0000228 // Ensure that the alloca array size argument has type intptr_t, so that
229 // any casting is exposed early.
230 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
231 if (AI.getArraySize()->getType() != IntPtrTy) {
Craig Topperbb4069e2017-07-07 23:16:26 +0000232 Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false);
Duncan P. N. Exon Smith07ff9b02015-03-13 19:34:55 +0000233 AI.setOperand(0, V);
234 return &AI;
235 }
236
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000237 return nullptr;
238}
239
Benjamin Kramer03ab8a32017-02-10 22:26:35 +0000240namespace {
Yaxun Liuba01ed02017-02-10 21:46:07 +0000241// If I and V are pointers in different address space, it is not allowed to
242// use replaceAllUsesWith since I and V have different types. A
243// non-target-specific transformation should not use addrspacecast on V since
244// the two address space may be disjoint depending on target.
245//
246// This class chases down uses of the old pointer until reaching the load
247// instructions, then replaces the old pointer in the load instructions with
248// the new pointer. If during the chasing it sees bitcast or GEP, it will
249// create new bitcast or GEP with the new pointer and use them in the load
250// instruction.
251class PointerReplacer {
252public:
253 PointerReplacer(InstCombiner &IC) : IC(IC) {}
254 void replacePointer(Instruction &I, Value *V);
255
256private:
257 void findLoadAndReplace(Instruction &I);
258 void replace(Instruction *I);
259 Value *getReplacement(Value *I);
260
261 SmallVector<Instruction *, 4> Path;
262 MapVector<Value *, Value *> WorkMap;
263 InstCombiner &IC;
264};
Benjamin Kramer03ab8a32017-02-10 22:26:35 +0000265} // end anonymous namespace
Yaxun Liuba01ed02017-02-10 21:46:07 +0000266
267void PointerReplacer::findLoadAndReplace(Instruction &I) {
268 for (auto U : I.users()) {
269 auto *Inst = dyn_cast<Instruction>(&*U);
270 if (!Inst)
271 return;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000272 LLVM_DEBUG(dbgs() << "Found pointer user: " << *U << '\n');
Yaxun Liuba01ed02017-02-10 21:46:07 +0000273 if (isa<LoadInst>(Inst)) {
274 for (auto P : Path)
275 replace(P);
276 replace(Inst);
277 } else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) {
278 Path.push_back(Inst);
279 findLoadAndReplace(*Inst);
280 Path.pop_back();
281 } else {
282 return;
283 }
284 }
285}
286
287Value *PointerReplacer::getReplacement(Value *V) {
288 auto Loc = WorkMap.find(V);
289 if (Loc != WorkMap.end())
290 return Loc->second;
291 return nullptr;
292}
293
294void PointerReplacer::replace(Instruction *I) {
295 if (getReplacement(I))
296 return;
297
298 if (auto *LT = dyn_cast<LoadInst>(I)) {
299 auto *V = getReplacement(LT->getPointerOperand());
300 assert(V && "Operand not replaced");
301 auto *NewI = new LoadInst(V);
302 NewI->takeName(LT);
303 IC.InsertNewInstWith(NewI, *LT);
304 IC.replaceInstUsesWith(*LT, NewI);
305 WorkMap[LT] = NewI;
306 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
307 auto *V = getReplacement(GEP->getPointerOperand());
308 assert(V && "Operand not replaced");
309 SmallVector<Value *, 8> Indices;
310 Indices.append(GEP->idx_begin(), GEP->idx_end());
311 auto *NewI = GetElementPtrInst::Create(
312 V->getType()->getPointerElementType(), V, Indices);
313 IC.InsertNewInstWith(NewI, *GEP);
314 NewI->takeName(GEP);
315 WorkMap[GEP] = NewI;
316 } else if (auto *BC = dyn_cast<BitCastInst>(I)) {
317 auto *V = getReplacement(BC->getOperand(0));
318 assert(V && "Operand not replaced");
319 auto *NewT = PointerType::get(BC->getType()->getPointerElementType(),
320 V->getType()->getPointerAddressSpace());
321 auto *NewI = new BitCastInst(V, NewT);
322 IC.InsertNewInstWith(NewI, *BC);
323 NewI->takeName(BC);
Yaxun Liue6d1ce52017-02-24 20:27:25 +0000324 WorkMap[BC] = NewI;
Yaxun Liuba01ed02017-02-10 21:46:07 +0000325 } else {
326 llvm_unreachable("should never reach here");
327 }
328}
329
330void PointerReplacer::replacePointer(Instruction &I, Value *V) {
Benjamin Kramer684c87b2017-02-10 22:04:17 +0000331#ifndef NDEBUG
Yaxun Liuba01ed02017-02-10 21:46:07 +0000332 auto *PT = cast<PointerType>(I.getType());
333 auto *NT = cast<PointerType>(V->getType());
334 assert(PT != NT && PT->getElementType() == NT->getElementType() &&
335 "Invalid usage");
Benjamin Kramer684c87b2017-02-10 22:04:17 +0000336#endif
Yaxun Liuba01ed02017-02-10 21:46:07 +0000337 WorkMap[&I] = V;
338 findLoadAndReplace(I);
339}
340
Duncan P. N. Exon Smithc6820ec2015-03-13 19:22:03 +0000341Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
342 if (auto *I = simplifyAllocaArraySize(*this, AI))
343 return I;
344
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000345 if (AI.getAllocatedType()->isSized()) {
Chris Lattnera65e2f72010-01-05 05:57:49 +0000346 // If the alignment is 0 (unspecified), assign it the preferred alignment.
347 if (AI.getAlignment() == 0)
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000348 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000349
350 // Move all alloca's of zero byte objects to the entry block and merge them
351 // together. Note that we only do this for alloca's, because malloc should
352 // allocate and return a unique pointer, even for a zero byte allocation.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000353 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000354 // For a zero sized alloca there is no point in doing an array allocation.
355 // This is helpful if the array size is a complicated expression not used
356 // elsewhere.
357 if (AI.isArrayAllocation()) {
358 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
359 return &AI;
360 }
361
362 // Get the first instruction in the entry block.
363 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
364 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
365 if (FirstInst != &AI) {
366 // If the entry block doesn't start with a zero-size alloca then move
367 // this one to the start of the entry block. There is no problem with
368 // dominance as the array size was forced to a constant earlier already.
369 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
370 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000371 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
Duncan Sands8bc764a2012-06-26 13:39:21 +0000372 AI.moveBefore(FirstInst);
373 return &AI;
374 }
375
Richard Osborneb68053e2012-09-18 09:31:44 +0000376 // If the alignment of the entry block alloca is 0 (unspecified),
377 // assign it the preferred alignment.
378 if (EntryAI->getAlignment() == 0)
379 EntryAI->setAlignment(
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000380 DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
Duncan Sands8bc764a2012-06-26 13:39:21 +0000381 // Replace this zero-sized alloca with the one at the start of the entry
382 // block after ensuring that the address will be aligned enough for both
383 // types.
Richard Osborneb68053e2012-09-18 09:31:44 +0000384 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
385 AI.getAlignment());
Duncan Sands8bc764a2012-06-26 13:39:21 +0000386 EntryAI->setAlignment(MaxAlign);
387 if (AI.getType() != EntryAI->getType())
388 return new BitCastInst(EntryAI, AI.getType());
Sanjay Patel4b198802016-02-01 22:23:39 +0000389 return replaceInstUsesWith(AI, EntryAI);
Duncan Sands8bc764a2012-06-26 13:39:21 +0000390 }
391 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000392 }
393
Eli Friedmanb14873c2012-11-26 23:04:53 +0000394 if (AI.getAlignment()) {
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000395 // Check to see if this allocation is only modified by a memcpy/memmove from
396 // a constant global whose alignment is equal to or exceeds that of the
397 // allocation. If this is the case, we can change all users to use
398 // the constant global instead. This is commonly produced by the CFE by
399 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
400 // is only subsequently read.
401 SmallVector<Instruction *, 4> ToDelete;
402 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
Chandler Carruth66b31302015-01-04 12:03:27 +0000403 unsigned SourceAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +0000404 Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
Vitaly Bukadf19ad42017-06-24 01:35:19 +0000405 if (AI.getAlignment() <= SourceAlign &&
406 isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000407 LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
408 LLVM_DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000409 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
Sanjay Patel4b198802016-02-01 22:23:39 +0000410 eraseInstFromFunction(*ToDelete[i]);
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000411 Constant *TheSrc = cast<Constant>(Copy->getSource());
Yaxun Liuba01ed02017-02-10 21:46:07 +0000412 auto *SrcTy = TheSrc->getType();
413 auto *DestTy = PointerType::get(AI.getType()->getPointerElementType(),
414 SrcTy->getPointerAddressSpace());
415 Constant *Cast =
416 ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, DestTy);
417 if (AI.getType()->getPointerAddressSpace() ==
418 SrcTy->getPointerAddressSpace()) {
419 Instruction *NewI = replaceInstUsesWith(AI, Cast);
420 eraseInstFromFunction(*Copy);
421 ++NumGlobalCopies;
422 return NewI;
423 } else {
424 PointerReplacer PtrReplacer(*this);
425 PtrReplacer.replacePointer(AI, Cast);
426 ++NumGlobalCopies;
427 }
Richard Osborne2fd29bf2012-09-24 17:10:03 +0000428 }
Chandler Carruthc908ca12012-08-21 08:39:44 +0000429 }
430 }
431
Nuno Lopes95cc4f32012-07-09 18:38:20 +0000432 // At last, use the generic allocation site handler to aggressively remove
433 // unused allocas.
434 return visitAllocSite(AI);
Chris Lattnera65e2f72010-01-05 05:57:49 +0000435}
436
Philip Reames89e92d22016-12-01 20:17:06 +0000437// Are we allowed to form a atomic load or store of this type?
438static bool isSupportedAtomicType(Type *Ty) {
Vedant Kumarb3091da2018-07-06 20:17:42 +0000439 return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy();
Philip Reames89e92d22016-12-01 20:17:06 +0000440}
441
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000442/// Helper to combine a load to a new type.
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000443///
444/// This just does the work of combining a load to a new type. It handles
445/// metadata, etc., and returns the new instruction. The \c NewTy should be the
446/// loaded *value* type. This will convert it to a pointer, cast the operand to
447/// that pointer type, load it, etc.
448///
449/// Note that this will create all of the instructions with whatever insert
450/// point the \c InstCombiner currently is using.
Mehdi Amini2668a482015-05-07 05:52:40 +0000451static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
452 const Twine &Suffix = "") {
Philip Reames89e92d22016-12-01 20:17:06 +0000453 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
454 "can't fold an atomic load to requested type");
Alexey Bataev7c9ad0d2018-05-21 17:46:34 +0000455
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000456 Value *Ptr = LI.getPointerOperand();
457 unsigned AS = LI.getPointerAddressSpace();
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000458 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000459 LI.getAllMetadata(MD);
460
Alexey Bataev7c9ad0d2018-05-21 17:46:34 +0000461 Value *NewPtr = nullptr;
462 if (!(match(Ptr, m_BitCast(m_Value(NewPtr))) &&
463 NewPtr->getType()->getPointerElementType() == NewTy &&
464 NewPtr->getType()->getPointerAddressSpace() == AS))
465 NewPtr = IC.Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS));
466
Craig Topperbb4069e2017-07-07 23:16:26 +0000467 LoadInst *NewLoad = IC.Builder.CreateAlignedLoad(
Alexey Bataev7c9ad0d2018-05-21 17:46:34 +0000468 NewPtr, LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +0000469 NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
Charles Davis33d1dc02015-02-25 05:10:25 +0000470 MDBuilder MDB(NewLoad->getContext());
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000471 for (const auto &MDPair : MD) {
472 unsigned ID = MDPair.first;
Duncan P. N. Exon Smithde36e802014-11-11 21:30:22 +0000473 MDNode *N = MDPair.second;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000474 // Note, essentially every kind of metadata should be preserved here! This
475 // routine is supposed to clone a load instruction changing *only its type*.
476 // The only metadata it makes sense to drop is metadata which is invalidated
477 // when the pointer type changes. This should essentially never be the case
478 // in LLVM, but we explicitly switch over only known metadata to be
479 // conservatively correct. If you are adding metadata to LLVM which pertains
480 // to loads, you almost certainly want to add it here.
481 switch (ID) {
482 case LLVMContext::MD_dbg:
483 case LLVMContext::MD_tbaa:
484 case LLVMContext::MD_prof:
485 case LLVMContext::MD_fpmath:
486 case LLVMContext::MD_tbaa_struct:
487 case LLVMContext::MD_invariant_load:
488 case LLVMContext::MD_alias_scope:
489 case LLVMContext::MD_noalias:
Philip Reames5a3f5f72014-10-21 00:13:20 +0000490 case LLVMContext::MD_nontemporal:
491 case LLVMContext::MD_mem_parallel_loop_access:
Michael Kruse978ba612018-12-20 04:58:07 +0000492 case LLVMContext::MD_access_group:
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000493 // All of these directly apply.
494 NewLoad->setMetadata(ID, N);
495 break;
496
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000497 case LLVMContext::MD_nonnull:
Chandler Carruth2abb65a2017-06-26 03:31:31 +0000498 copyNonnullMetadata(LI, N, *NewLoad);
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000499 break;
Artur Pilipenko5c5011d2015-11-02 17:53:51 +0000500 case LLVMContext::MD_align:
501 case LLVMContext::MD_dereferenceable:
502 case LLVMContext::MD_dereferenceable_or_null:
503 // These only directly apply if the new type is also a pointer.
504 if (NewTy->isPointerTy())
505 NewLoad->setMetadata(ID, N);
506 break;
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000507 case LLVMContext::MD_range:
Chandler Carruth2abb65a2017-06-26 03:31:31 +0000508 copyRangeMetadata(IC.getDataLayout(), LI, N, *NewLoad);
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000509 break;
510 }
511 }
Chandler Carruthbc6378d2014-10-19 10:46:46 +0000512 return NewLoad;
513}
514
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000515/// Combine a store to a new type.
Chandler Carruthfa11d832015-01-22 03:34:54 +0000516///
517/// Returns the newly created store instruction.
518static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
Philip Reames89e92d22016-12-01 20:17:06 +0000519 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
520 "can't fold an atomic store of requested type");
Fangrui Songf78650a2018-07-30 19:41:25 +0000521
Chandler Carruthfa11d832015-01-22 03:34:54 +0000522 Value *Ptr = SI.getPointerOperand();
523 unsigned AS = SI.getPointerAddressSpace();
524 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
525 SI.getAllMetadata(MD);
526
Craig Topperbb4069e2017-07-07 23:16:26 +0000527 StoreInst *NewStore = IC.Builder.CreateAlignedStore(
528 V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
Philip Reames6f4d0082016-05-06 22:17:01 +0000529 SI.getAlignment(), SI.isVolatile());
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +0000530 NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
Chandler Carruthfa11d832015-01-22 03:34:54 +0000531 for (const auto &MDPair : MD) {
532 unsigned ID = MDPair.first;
533 MDNode *N = MDPair.second;
534 // Note, essentially every kind of metadata should be preserved here! This
535 // routine is supposed to clone a store instruction changing *only its
536 // type*. The only metadata it makes sense to drop is metadata which is
537 // invalidated when the pointer type changes. This should essentially
538 // never be the case in LLVM, but we explicitly switch over only known
539 // metadata to be conservatively correct. If you are adding metadata to
540 // LLVM which pertains to stores, you almost certainly want to add it
541 // here.
542 switch (ID) {
543 case LLVMContext::MD_dbg:
544 case LLVMContext::MD_tbaa:
545 case LLVMContext::MD_prof:
546 case LLVMContext::MD_fpmath:
547 case LLVMContext::MD_tbaa_struct:
548 case LLVMContext::MD_alias_scope:
549 case LLVMContext::MD_noalias:
550 case LLVMContext::MD_nontemporal:
551 case LLVMContext::MD_mem_parallel_loop_access:
Michael Kruse19942712018-12-20 17:11:02 +0000552 case LLVMContext::MD_access_group:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000553 // All of these directly apply.
554 NewStore->setMetadata(ID, N);
555 break;
Chandler Carruthfa11d832015-01-22 03:34:54 +0000556 case LLVMContext::MD_invariant_load:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000557 case LLVMContext::MD_nonnull:
Chandler Carruthfa11d832015-01-22 03:34:54 +0000558 case LLVMContext::MD_range:
Artur Pilipenko5c5011d2015-11-02 17:53:51 +0000559 case LLVMContext::MD_align:
560 case LLVMContext::MD_dereferenceable:
561 case LLVMContext::MD_dereferenceable_or_null:
Chandler Carruth87fdafc2015-02-13 02:30:01 +0000562 // These don't apply for stores.
Chandler Carruthfa11d832015-01-22 03:34:54 +0000563 break;
564 }
565 }
566
567 return NewStore;
568}
569
Alexey Bataevec95c6c2017-12-08 15:32:10 +0000570/// Returns true if instruction represent minmax pattern like:
571/// select ((cmp load V1, load V2), V1, V2).
572static bool isMinMaxWithLoads(Value *V) {
573 assert(V->getType()->isPointerTy() && "Expected pointer type.");
574 // Ignore possible ty* to ixx* bitcast.
575 V = peekThroughBitcast(V);
576 // Check that select is select ((cmp load V1, load V2), V1, V2) - minmax
577 // pattern.
578 CmpInst::Predicate Pred;
579 Instruction *L1;
580 Instruction *L2;
581 Value *LHS;
582 Value *RHS;
583 if (!match(V, m_Select(m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2)),
584 m_Value(LHS), m_Value(RHS))))
585 return false;
586 return (match(L1, m_Load(m_Specific(LHS))) &&
587 match(L2, m_Load(m_Specific(RHS)))) ||
588 (match(L1, m_Load(m_Specific(RHS))) &&
589 match(L2, m_Load(m_Specific(LHS))));
590}
591
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000592/// Combine loads to match the type of their uses' value after looking
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000593/// through intervening bitcasts.
594///
595/// The core idea here is that if the result of a load is used in an operation,
596/// we should load the type most conducive to that operation. For example, when
597/// loading an integer and converting that immediately to a pointer, we should
598/// instead directly load a pointer.
599///
600/// However, this routine must never change the width of a load or the number of
601/// loads as that would introduce a semantic change. This combine is expected to
602/// be a semantic no-op which just allows loads to more closely model the types
603/// of their consuming operations.
604///
605/// Currently, we also refuse to change the precise type used for an atomic load
606/// or a volatile load. This is debatable, and might be reasonable to change
607/// later. However, it is risky in case some backend or other part of LLVM is
608/// relying on the exact type loaded to select appropriate atomic operations.
609static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
Philip Reames6f4d0082016-05-06 22:17:01 +0000610 // FIXME: We could probably with some care handle both volatile and ordered
611 // atomic loads here but it isn't clear that this is important.
612 if (!LI.isUnordered())
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000613 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000614
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000615 if (LI.use_empty())
616 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000617
Arnold Schwaighofer5d335552016-09-10 18:14:57 +0000618 // swifterror values can't be bitcasted.
619 if (LI.getPointerOperand()->isSwiftError())
620 return nullptr;
621
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000622 Type *Ty = LI.getType();
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000623 const DataLayout &DL = IC.getDataLayout();
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000624
625 // Try to canonicalize loads which are only ever stored to operate over
626 // integers instead of any other type. We only do this when the loaded type
627 // is sized and has a size exactly the same as its store size and the store
628 // size is a legal integer type.
Alexey Bataevec95c6c2017-12-08 15:32:10 +0000629 // Do not perform canonicalization if minmax pattern is found (to avoid
630 // infinite loop).
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000631 if (!Ty->isIntegerTy() && Ty->isSized() &&
632 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
Sanjoy Dasba04d3a2016-08-06 02:58:48 +0000633 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) &&
Alexey Bataevec95c6c2017-12-08 15:32:10 +0000634 !DL.isNonIntegralPointerType(Ty) &&
635 !isMinMaxWithLoads(
636 peekThroughBitcast(LI.getPointerOperand(), /*OneUseOnly=*/true))) {
David Majnemer0a16c222016-08-11 21:15:00 +0000637 if (all_of(LI.users(), [&LI](User *U) {
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000638 auto *SI = dyn_cast<StoreInst>(U);
Arnold Schwaighoferc3685632017-01-31 17:53:49 +0000639 return SI && SI->getPointerOperand() != &LI &&
640 !SI->getPointerOperand()->isSwiftError();
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000641 })) {
642 LoadInst *NewLoad = combineLoadToNewType(
643 IC, LI,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000644 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000645 // Replace all the stores with stores of the newly loaded value.
646 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
647 auto *SI = cast<StoreInst>(*UI++);
Craig Topperbb4069e2017-07-07 23:16:26 +0000648 IC.Builder.SetInsertPoint(SI);
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000649 combineStoreToNewValue(IC, *SI, NewLoad);
Sanjay Patel4b198802016-02-01 22:23:39 +0000650 IC.eraseInstFromFunction(*SI);
Chandler Carruthcd8522e2015-01-22 05:08:12 +0000651 }
652 assert(LI.use_empty() && "Failed to remove all users of the load!");
653 // Return the old load so the combiner can delete it safely.
654 return &LI;
655 }
656 }
Chris Lattnera65e2f72010-01-05 05:57:49 +0000657
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000658 // Fold away bit casts of the loaded value by loading the desired type.
Quentin Colombet490cfbe2016-02-11 22:30:41 +0000659 // We can do this for BitCastInsts as well as casts from and to pointer types,
660 // as long as those are noops (i.e., the source or dest type have the same
661 // bitwidth as the target's pointers).
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000662 if (LI.hasOneUse())
Philip Reames89e92d22016-12-01 20:17:06 +0000663 if (auto* CI = dyn_cast<CastInst>(LI.user_back()))
664 if (CI->isNoopCast(DL))
665 if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) {
666 LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
667 CI->replaceAllUsesWith(NewLoad);
668 IC.eraseInstFromFunction(*CI);
669 return &LI;
670 }
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000671
Chandler Carrutha7f247e2014-12-09 19:21:16 +0000672 // FIXME: We should also canonicalize loads of vectors when their elements are
673 // cast to other types.
Craig Topperf40110f2014-04-25 05:29:35 +0000674 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +0000675}
676
Mehdi Amini2668a482015-05-07 05:52:40 +0000677static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
678 // FIXME: We could probably with some care handle both volatile and atomic
679 // stores here but it isn't clear that this is important.
680 if (!LI.isSimple())
681 return nullptr;
682
683 Type *T = LI.getType();
684 if (!T->isAggregateType())
685 return nullptr;
686
Benjamin Kramerc1263532016-03-11 10:20:56 +0000687 StringRef Name = LI.getName();
Bruce Mitchenere9ffb452015-09-12 01:17:08 +0000688 assert(LI.getAlignment() && "Alignment must be set at this point");
Mehdi Amini2668a482015-05-07 05:52:40 +0000689
690 if (auto *ST = dyn_cast<StructType>(T)) {
691 // If the struct only have one element, we unpack.
Amaury Sechet61a7d622016-02-17 19:21:28 +0000692 auto NumElements = ST->getNumElements();
693 if (NumElements == 1) {
Mehdi Amini2668a482015-05-07 05:52:40 +0000694 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
695 ".unpack");
Keno Fischera236dae2017-06-28 23:36:40 +0000696 AAMDNodes AAMD;
697 LI.getAAMetadata(AAMD);
698 NewLoad->setAAMetadata(AAMD);
Craig Topperbb4069e2017-07-07 23:16:26 +0000699 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
Amaury Sechet61a7d622016-02-17 19:21:28 +0000700 UndefValue::get(T), NewLoad, 0, Name));
Mehdi Amini2668a482015-05-07 05:52:40 +0000701 }
Mehdi Amini1c131b32015-12-15 01:44:07 +0000702
703 // We don't want to break loads with padding here as we'd loose
704 // the knowledge that padding exists for the rest of the pipeline.
705 const DataLayout &DL = IC.getDataLayout();
706 auto *SL = DL.getStructLayout(ST);
707 if (SL->hasPadding())
708 return nullptr;
709
Amaury Sechet61a7d622016-02-17 19:21:28 +0000710 auto Align = LI.getAlignment();
711 if (!Align)
712 Align = DL.getABITypeAlignment(ST);
713
Mehdi Amini1c131b32015-12-15 01:44:07 +0000714 auto *Addr = LI.getPointerOperand();
Amaury Sechet61a7d622016-02-17 19:21:28 +0000715 auto *IdxType = Type::getInt32Ty(T->getContext());
Mehdi Amini1c131b32015-12-15 01:44:07 +0000716 auto *Zero = ConstantInt::get(IdxType, 0);
Amaury Sechet61a7d622016-02-17 19:21:28 +0000717
718 Value *V = UndefValue::get(T);
719 for (unsigned i = 0; i < NumElements; i++) {
Mehdi Amini1c131b32015-12-15 01:44:07 +0000720 Value *Indices[2] = {
721 Zero,
722 ConstantInt::get(IdxType, i),
723 };
Craig Topperbb4069e2017-07-07 23:16:26 +0000724 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
725 Name + ".elt");
Amaury Sechet61a7d622016-02-17 19:21:28 +0000726 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
Craig Topperbb4069e2017-07-07 23:16:26 +0000727 auto *L = IC.Builder.CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
Keno Fischera236dae2017-06-28 23:36:40 +0000728 // Propagate AA metadata. It'll still be valid on the narrowed load.
729 AAMDNodes AAMD;
730 LI.getAAMetadata(AAMD);
731 L->setAAMetadata(AAMD);
Craig Topperbb4069e2017-07-07 23:16:26 +0000732 V = IC.Builder.CreateInsertValue(V, L, i);
Mehdi Amini1c131b32015-12-15 01:44:07 +0000733 }
734
735 V->setName(Name);
Sanjay Patel4b198802016-02-01 22:23:39 +0000736 return IC.replaceInstUsesWith(LI, V);
Mehdi Amini2668a482015-05-07 05:52:40 +0000737 }
738
David Majnemer58fb0382015-05-11 05:04:22 +0000739 if (auto *AT = dyn_cast<ArrayType>(T)) {
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000740 auto *ET = AT->getElementType();
741 auto NumElements = AT->getNumElements();
742 if (NumElements == 1) {
743 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack");
Keno Fischera236dae2017-06-28 23:36:40 +0000744 AAMDNodes AAMD;
745 LI.getAAMetadata(AAMD);
746 NewLoad->setAAMetadata(AAMD);
Craig Topperbb4069e2017-07-07 23:16:26 +0000747 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000748 UndefValue::get(T), NewLoad, 0, Name));
David Majnemer58fb0382015-05-11 05:04:22 +0000749 }
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000750
Davide Italianoda114122016-10-07 20:57:42 +0000751 // Bail out if the array is too large. Ideally we would like to optimize
752 // arrays of arbitrary size but this has a terrible impact on compile time.
753 // The threshold here is chosen arbitrarily, maybe needs a little bit of
754 // tuning.
Davide Italiano2133bf52017-02-07 17:56:50 +0000755 if (NumElements > IC.MaxArraySizeForCombine)
Davide Italianoda114122016-10-07 20:57:42 +0000756 return nullptr;
757
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000758 const DataLayout &DL = IC.getDataLayout();
759 auto EltSize = DL.getTypeAllocSize(ET);
760 auto Align = LI.getAlignment();
761 if (!Align)
762 Align = DL.getABITypeAlignment(T);
763
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000764 auto *Addr = LI.getPointerOperand();
765 auto *IdxType = Type::getInt64Ty(T->getContext());
766 auto *Zero = ConstantInt::get(IdxType, 0);
767
768 Value *V = UndefValue::get(T);
769 uint64_t Offset = 0;
770 for (uint64_t i = 0; i < NumElements; i++) {
771 Value *Indices[2] = {
772 Zero,
773 ConstantInt::get(IdxType, i),
774 };
Craig Topperbb4069e2017-07-07 23:16:26 +0000775 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
776 Name + ".elt");
777 auto *L = IC.Builder.CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
778 Name + ".unpack");
Keno Fischera236dae2017-06-28 23:36:40 +0000779 AAMDNodes AAMD;
780 LI.getAAMetadata(AAMD);
781 L->setAAMetadata(AAMD);
Craig Topperbb4069e2017-07-07 23:16:26 +0000782 V = IC.Builder.CreateInsertValue(V, L, i);
Amaury Sechet7cd3fe72016-03-02 21:28:30 +0000783 Offset += EltSize;
784 }
785
786 V->setName(Name);
787 return IC.replaceInstUsesWith(LI, V);
David Majnemer58fb0382015-05-11 05:04:22 +0000788 }
789
Mehdi Amini2668a482015-05-07 05:52:40 +0000790 return nullptr;
791}
792
Hal Finkel847e05f2015-02-20 03:05:53 +0000793// If we can determine that all possible objects pointed to by the provided
794// pointer value are, not only dereferenceable, but also definitively less than
795// or equal to the provided maximum size, then return true. Otherwise, return
796// false (constant global values and allocas fall into this category).
797//
798// FIXME: This should probably live in ValueTracking (or similar).
799static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000800 const DataLayout &DL) {
Hal Finkel847e05f2015-02-20 03:05:53 +0000801 SmallPtrSet<Value *, 4> Visited;
802 SmallVector<Value *, 4> Worklist(1, V);
803
804 do {
805 Value *P = Worklist.pop_back_val();
806 P = P->stripPointerCasts();
807
808 if (!Visited.insert(P).second)
809 continue;
810
811 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
812 Worklist.push_back(SI->getTrueValue());
813 Worklist.push_back(SI->getFalseValue());
814 continue;
815 }
816
817 if (PHINode *PN = dyn_cast<PHINode>(P)) {
Pete Cooper833f34d2015-05-12 20:05:31 +0000818 for (Value *IncValue : PN->incoming_values())
819 Worklist.push_back(IncValue);
Hal Finkel847e05f2015-02-20 03:05:53 +0000820 continue;
821 }
822
823 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
Sanjoy Das99042472016-04-17 04:30:43 +0000824 if (GA->isInterposable())
Hal Finkel847e05f2015-02-20 03:05:53 +0000825 return false;
826 Worklist.push_back(GA->getAliasee());
827 continue;
828 }
829
830 // If we know how big this object is, and it is less than MaxSize, continue
831 // searching. Otherwise, return false.
832 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
833 if (!AI->getAllocatedType()->isSized())
834 return false;
835
836 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
837 if (!CS)
838 return false;
839
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000840 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
Hal Finkel847e05f2015-02-20 03:05:53 +0000841 // Make sure that, even if the multiplication below would wrap as an
842 // uint64_t, we still do the right thing.
843 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
844 return false;
845 continue;
846 }
847
848 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
849 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
850 return false;
851
Manuel Jacob5f6eaac2016-01-16 20:30:46 +0000852 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
Hal Finkel847e05f2015-02-20 03:05:53 +0000853 if (InitSize > MaxSize)
854 return false;
855 continue;
856 }
857
858 return false;
859 } while (!Worklist.empty());
860
861 return true;
862}
863
864// If we're indexing into an object of a known size, and the outer index is
865// not a constant, but having any value but zero would lead to undefined
866// behavior, replace it with zero.
867//
868// For example, if we have:
869// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
870// ...
871// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
872// ... = load i32* %arrayidx, align 4
873// Then we know that we can replace %x in the GEP with i64 0.
874//
875// FIXME: We could fold any GEP index to zero that would cause UB if it were
876// not zero. Currently, we only handle the first such index. Also, we could
877// also search through non-zero constant indices if we kept track of the
878// offsets those indices implied.
879static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
880 Instruction *MemI, unsigned &Idx) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000881 if (GEPI->getNumOperands() < 2)
Hal Finkel847e05f2015-02-20 03:05:53 +0000882 return false;
883
884 // Find the first non-zero index of a GEP. If all indices are zero, return
885 // one past the last index.
886 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
887 unsigned I = 1;
888 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
889 Value *V = GEPI->getOperand(I);
890 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
891 if (CI->isZero())
892 continue;
893
894 break;
895 }
896
897 return I;
898 };
899
900 // Skip through initial 'zero' indices, and find the corresponding pointer
901 // type. See if the next index is not a constant.
902 Idx = FirstNZIdx(GEPI);
903 if (Idx == GEPI->getNumOperands())
904 return false;
905 if (isa<Constant>(GEPI->getOperand(Idx)))
906 return false;
907
908 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
Eduard Burtescu19eb0312016-01-19 17:28:00 +0000909 Type *AllocTy =
910 GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
Hal Finkel847e05f2015-02-20 03:05:53 +0000911 if (!AllocTy || !AllocTy->isSized())
912 return false;
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000913 const DataLayout &DL = IC.getDataLayout();
914 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
Hal Finkel847e05f2015-02-20 03:05:53 +0000915
916 // If there are more indices after the one we might replace with a zero, make
917 // sure they're all non-negative. If any of them are negative, the overall
918 // address being computed might be before the base address determined by the
919 // first non-zero index.
920 auto IsAllNonNegative = [&]() {
921 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
Craig Topper1a36b7d2017-05-15 06:39:41 +0000922 KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI);
923 if (Known.isNonNegative())
Hal Finkel847e05f2015-02-20 03:05:53 +0000924 continue;
925 return false;
926 }
927
928 return true;
929 };
930
931 // FIXME: If the GEP is not inbounds, and there are extra indices after the
932 // one we'll replace, those could cause the address computation to wrap
933 // (rendering the IsAllNonNegative() check below insufficient). We can do
Bruce Mitchenere9ffb452015-09-12 01:17:08 +0000934 // better, ignoring zero indices (and other indices we can prove small
Hal Finkel847e05f2015-02-20 03:05:53 +0000935 // enough not to wrap).
936 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
937 return false;
938
939 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
940 // also known to be dereferenceable.
941 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
942 IsAllNonNegative();
943}
944
945// If we're indexing into an object with a variable index for the memory
946// access, but the object has only one element, we can assume that the index
947// will always be zero. If we replace the GEP, return it.
948template <typename T>
949static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
950 T &MemI) {
951 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
952 unsigned Idx;
953 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
954 Instruction *NewGEPI = GEPI->clone();
955 NewGEPI->setOperand(Idx,
956 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
957 NewGEPI->insertBefore(GEPI);
958 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
959 return NewGEPI;
960 }
961 }
962
963 return nullptr;
964}
965
Anna Thomas2dd98352017-12-12 14:12:33 +0000966static bool canSimplifyNullStoreOrGEP(StoreInst &SI) {
Manoj Gupta77eeac32018-07-09 22:27:23 +0000967 if (NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()))
Anna Thomas2dd98352017-12-12 14:12:33 +0000968 return false;
969
970 auto *Ptr = SI.getPointerOperand();
971 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
972 Ptr = GEPI->getOperand(0);
Manoj Gupta77eeac32018-07-09 22:27:23 +0000973 return (isa<ConstantPointerNull>(Ptr) &&
974 !NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()));
Anna Thomas2dd98352017-12-12 14:12:33 +0000975}
976
Davide Italianoffcb4df2017-04-19 17:26:57 +0000977static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {
978 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
979 const Value *GEPI0 = GEPI->getOperand(0);
Manoj Gupta77eeac32018-07-09 22:27:23 +0000980 if (isa<ConstantPointerNull>(GEPI0) &&
981 !NullPointerIsDefined(LI.getFunction(), GEPI->getPointerAddressSpace()))
Davide Italianoffcb4df2017-04-19 17:26:57 +0000982 return true;
983 }
984 if (isa<UndefValue>(Op) ||
Manoj Gupta77eeac32018-07-09 22:27:23 +0000985 (isa<ConstantPointerNull>(Op) &&
986 !NullPointerIsDefined(LI.getFunction(), LI.getPointerAddressSpace())))
Davide Italianoffcb4df2017-04-19 17:26:57 +0000987 return true;
988 return false;
989}
990
Chris Lattnera65e2f72010-01-05 05:57:49 +0000991Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
992 Value *Op = LI.getOperand(0);
993
Chandler Carruth2f75fcf2014-10-18 06:36:22 +0000994 // Try to canonicalize the loaded type.
995 if (Instruction *Res = combineLoadToOperationType(*this, LI))
996 return Res;
997
Chris Lattnera65e2f72010-01-05 05:57:49 +0000998 // Attempt to improve the alignment.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000999 unsigned KnownAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001000 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT);
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001001 unsigned LoadAlign = LI.getAlignment();
1002 unsigned EffectiveLoadAlign =
1003 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
Dan Gohman36196602010-08-03 18:20:32 +00001004
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001005 if (KnownAlign > EffectiveLoadAlign)
1006 LI.setAlignment(KnownAlign);
1007 else if (LoadAlign == 0)
1008 LI.setAlignment(EffectiveLoadAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001009
Hal Finkel847e05f2015-02-20 03:05:53 +00001010 // Replace GEP indices if possible.
1011 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
1012 Worklist.Add(NewGEPI);
1013 return &LI;
1014 }
1015
Mehdi Amini2668a482015-05-07 05:52:40 +00001016 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
1017 return Res;
1018
Chris Lattnera65e2f72010-01-05 05:57:49 +00001019 // Do really simple store-to-load forwarding and load CSE, to catch cases
Duncan Sands75b5d272011-02-15 09:23:02 +00001020 // where there are several consecutive memory accesses to the same location,
Chris Lattnera65e2f72010-01-05 05:57:49 +00001021 // separated by a few arithmetic operations.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001022 BasicBlock::iterator BBI(LI);
Eli Friedmanbd254a62016-06-16 02:33:42 +00001023 bool IsLoadCSE = false;
Sanjay Patelb38ad88e2017-01-02 23:25:28 +00001024 if (Value *AvailableVal = FindAvailableLoadedValue(
1025 &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) {
1026 if (IsLoadCSE)
Florian Hahn406f1ff2018-08-24 11:40:04 +00001027 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI, false);
Bjorn Steinbrinka91fd092015-07-10 06:55:44 +00001028
Sanjay Patel4b198802016-02-01 22:23:39 +00001029 return replaceInstUsesWith(
Craig Topperbb4069e2017-07-07 23:16:26 +00001030 LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
1031 LI.getName() + ".cast"));
Bjorn Steinbrinka91fd092015-07-10 06:55:44 +00001032 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001033
Philip Reames3ac07182016-04-21 17:45:05 +00001034 // None of the following transforms are legal for volatile/ordered atomic
1035 // loads. Most of them do apply for unordered atomics.
1036 if (!LI.isUnordered()) return nullptr;
Philip Reamesac550902016-04-21 17:03:33 +00001037
Chris Lattnera65e2f72010-01-05 05:57:49 +00001038 // load(gep null, ...) -> unreachable
Chris Lattnera65e2f72010-01-05 05:57:49 +00001039 // load null/undef -> unreachable
Davide Italianoffcb4df2017-04-19 17:26:57 +00001040 // TODO: Consider a target hook for valid address spaces for this xforms.
1041 if (canSimplifyNullLoadOrGEP(LI, Op)) {
1042 // Insert a new store to null instruction before the load to indicate
1043 // that this code is not reachable. We do this instead of inserting
1044 // an unreachable instruction directly because we cannot modify the
1045 // CFG.
Weiming Zhao984f1dc2017-07-19 01:27:24 +00001046 StoreInst *SI = new StoreInst(UndefValue::get(LI.getType()),
1047 Constant::getNullValue(Op->getType()), &LI);
1048 SI->setDebugLoc(LI.getDebugLoc());
Sanjay Patel4b198802016-02-01 22:23:39 +00001049 return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
Chris Lattnera65e2f72010-01-05 05:57:49 +00001050 }
1051
Chris Lattnera65e2f72010-01-05 05:57:49 +00001052 if (Op->hasOneUse()) {
1053 // Change select and PHI nodes to select values instead of addresses: this
1054 // helps alias analysis out a lot, allows many others simplifications, and
1055 // exposes redundancy in the code.
1056 //
1057 // Note that we cannot do the transformation unless we know that the
1058 // introduced loads cannot trap! Something like this is valid as long as
1059 // the condition is always false: load (select bool %C, int* null, int* %G),
1060 // but it would not be valid if we transformed it to load from null
1061 // unconditionally.
1062 //
1063 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
1064 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
Bob Wilson56600a12010-01-30 04:42:39 +00001065 unsigned Align = LI.getAlignment();
Artur Pilipenko9bb6bea2016-04-27 11:00:48 +00001066 if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
1067 isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
Craig Topperbb4069e2017-07-07 23:16:26 +00001068 LoadInst *V1 = Builder.CreateLoad(SI->getOperand(1),
1069 SI->getOperand(1)->getName()+".val");
1070 LoadInst *V2 = Builder.CreateLoad(SI->getOperand(2),
1071 SI->getOperand(2)->getName()+".val");
Philip Reamesa98c7ea2016-04-21 17:59:40 +00001072 assert(LI.isUnordered() && "implied by above");
Bob Wilson56600a12010-01-30 04:42:39 +00001073 V1->setAlignment(Align);
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +00001074 V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
Bob Wilson56600a12010-01-30 04:42:39 +00001075 V2->setAlignment(Align);
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +00001076 V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
Chris Lattnera65e2f72010-01-05 05:57:49 +00001077 return SelectInst::Create(SI->getCondition(), V1, V2);
1078 }
1079
1080 // load (select (cond, null, P)) -> load P
Larisse Voufo532bf712015-09-18 19:14:35 +00001081 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
Manoj Gupta77eeac32018-07-09 22:27:23 +00001082 !NullPointerIsDefined(SI->getFunction(),
1083 LI.getPointerAddressSpace())) {
Philip Reames5ad26c32014-12-29 22:46:21 +00001084 LI.setOperand(0, SI->getOperand(2));
1085 return &LI;
1086 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001087
1088 // load (select (cond, P, null)) -> load P
Philip Reames5ad26c32014-12-29 22:46:21 +00001089 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
Manoj Gupta77eeac32018-07-09 22:27:23 +00001090 !NullPointerIsDefined(SI->getFunction(),
1091 LI.getPointerAddressSpace())) {
Philip Reames5ad26c32014-12-29 22:46:21 +00001092 LI.setOperand(0, SI->getOperand(1));
1093 return &LI;
1094 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001095 }
1096 }
Craig Topperf40110f2014-04-25 05:29:35 +00001097 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001098}
1099
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00001100/// Look for extractelement/insertvalue sequence that acts like a bitcast.
Arch D. Robisonbe0490a2016-04-25 22:22:39 +00001101///
1102/// \returns underlying value that was "cast", or nullptr otherwise.
1103///
1104/// For example, if we have:
1105///
1106/// %E0 = extractelement <2 x double> %U, i32 0
1107/// %V0 = insertvalue [2 x double] undef, double %E0, 0
1108/// %E1 = extractelement <2 x double> %U, i32 1
1109/// %V1 = insertvalue [2 x double] %V0, double %E1, 1
1110///
1111/// and the layout of a <2 x double> is isomorphic to a [2 x double],
1112/// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1113/// Note that %U may contain non-undef values where %V1 has undef.
1114static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
1115 Value *U = nullptr;
1116 while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1117 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1118 if (!E)
1119 return nullptr;
1120 auto *W = E->getVectorOperand();
1121 if (!U)
1122 U = W;
1123 else if (U != W)
1124 return nullptr;
1125 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1126 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1127 return nullptr;
1128 V = IV->getAggregateOperand();
1129 }
1130 if (!isa<UndefValue>(V) ||!U)
1131 return nullptr;
1132
1133 auto *UT = cast<VectorType>(U->getType());
1134 auto *VT = V->getType();
1135 // Check that types UT and VT are bitwise isomorphic.
1136 const auto &DL = IC.getDataLayout();
1137 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1138 return nullptr;
1139 }
1140 if (auto *AT = dyn_cast<ArrayType>(VT)) {
1141 if (AT->getNumElements() != UT->getNumElements())
1142 return nullptr;
1143 } else {
1144 auto *ST = cast<StructType>(VT);
1145 if (ST->getNumElements() != UT->getNumElements())
1146 return nullptr;
1147 for (const auto *EltT : ST->elements()) {
1148 if (EltT != UT->getElementType())
1149 return nullptr;
1150 }
1151 }
1152 return U;
1153}
1154
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00001155/// Combine stores to match the type of value being stored.
Chandler Carruth816d26f2014-11-25 10:09:51 +00001156///
1157/// The core idea here is that the memory does not have any intrinsic type and
1158/// where we can we should match the type of a store to the type of value being
1159/// stored.
1160///
1161/// However, this routine must never change the width of a store or the number of
1162/// stores as that would introduce a semantic change. This combine is expected to
1163/// be a semantic no-op which just allows stores to more closely model the types
1164/// of their incoming values.
1165///
1166/// Currently, we also refuse to change the precise type used for an atomic or
1167/// volatile store. This is debatable, and might be reasonable to change later.
1168/// However, it is risky in case some backend or other part of LLVM is relying
1169/// on the exact type stored to select appropriate atomic operations.
1170///
1171/// \returns true if the store was successfully combined away. This indicates
1172/// the caller must erase the store instruction. We have to let the caller erase
Bruce Mitchenere9ffb452015-09-12 01:17:08 +00001173/// the store instruction as otherwise there is no way to signal whether it was
Chandler Carruth816d26f2014-11-25 10:09:51 +00001174/// combined or not: IC.EraseInstFromFunction returns a null pointer.
1175static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
Philip Reames6f4d0082016-05-06 22:17:01 +00001176 // FIXME: We could probably with some care handle both volatile and ordered
1177 // atomic stores here but it isn't clear that this is important.
1178 if (!SI.isUnordered())
Chandler Carruth816d26f2014-11-25 10:09:51 +00001179 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001180
Arnold Schwaighofer5d335552016-09-10 18:14:57 +00001181 // swifterror values can't be bitcasted.
1182 if (SI.getPointerOperand()->isSwiftError())
1183 return false;
1184
Chandler Carruth816d26f2014-11-25 10:09:51 +00001185 Value *V = SI.getValueOperand();
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001186
Chandler Carruth816d26f2014-11-25 10:09:51 +00001187 // Fold away bit casts of the stored value by storing the original type.
1188 if (auto *BC = dyn_cast<BitCastInst>(V)) {
Chandler Carrutha7f247e2014-12-09 19:21:16 +00001189 V = BC->getOperand(0);
Philip Reames89e92d22016-12-01 20:17:06 +00001190 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1191 combineStoreToNewValue(IC, SI, V);
1192 return true;
1193 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001194 }
1195
Philip Reames89e92d22016-12-01 20:17:06 +00001196 if (Value *U = likeBitCastFromVector(IC, V))
1197 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1198 combineStoreToNewValue(IC, SI, U);
1199 return true;
1200 }
Arch D. Robisonbe0490a2016-04-25 22:22:39 +00001201
JF Bastienc22d2992016-04-21 19:53:39 +00001202 // FIXME: We should also canonicalize stores of vectors when their elements
1203 // are cast to other types.
Chandler Carruth816d26f2014-11-25 10:09:51 +00001204 return false;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001205}
1206
Mehdi Aminib344ac92015-03-14 22:19:33 +00001207static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
1208 // FIXME: We could probably with some care handle both volatile and atomic
1209 // stores here but it isn't clear that this is important.
1210 if (!SI.isSimple())
1211 return false;
1212
1213 Value *V = SI.getValueOperand();
1214 Type *T = V->getType();
1215
1216 if (!T->isAggregateType())
1217 return false;
1218
Mehdi Amini2668a482015-05-07 05:52:40 +00001219 if (auto *ST = dyn_cast<StructType>(T)) {
Mehdi Aminib344ac92015-03-14 22:19:33 +00001220 // If the struct only have one element, we unpack.
Mehdi Amini1c131b32015-12-15 01:44:07 +00001221 unsigned Count = ST->getNumElements();
1222 if (Count == 1) {
Craig Topperbb4069e2017-07-07 23:16:26 +00001223 V = IC.Builder.CreateExtractValue(V, 0);
Mehdi Aminib344ac92015-03-14 22:19:33 +00001224 combineStoreToNewValue(IC, SI, V);
1225 return true;
1226 }
Mehdi Amini1c131b32015-12-15 01:44:07 +00001227
1228 // We don't want to break loads with padding here as we'd loose
1229 // the knowledge that padding exists for the rest of the pipeline.
1230 const DataLayout &DL = IC.getDataLayout();
1231 auto *SL = DL.getStructLayout(ST);
1232 if (SL->hasPadding())
1233 return false;
1234
Amaury Sechet61a7d622016-02-17 19:21:28 +00001235 auto Align = SI.getAlignment();
1236 if (!Align)
1237 Align = DL.getABITypeAlignment(ST);
1238
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +00001239 SmallString<16> EltName = V->getName();
1240 EltName += ".elt";
Mehdi Amini1c131b32015-12-15 01:44:07 +00001241 auto *Addr = SI.getPointerOperand();
NAKAMURA Takumiec6b1fc2015-12-15 09:37:31 +00001242 SmallString<16> AddrName = Addr->getName();
1243 AddrName += ".repack";
Amaury Sechet61a7d622016-02-17 19:21:28 +00001244
Mehdi Amini1c131b32015-12-15 01:44:07 +00001245 auto *IdxType = Type::getInt32Ty(ST->getContext());
1246 auto *Zero = ConstantInt::get(IdxType, 0);
1247 for (unsigned i = 0; i < Count; i++) {
1248 Value *Indices[2] = {
1249 Zero,
1250 ConstantInt::get(IdxType, i),
1251 };
Craig Topperbb4069e2017-07-07 23:16:26 +00001252 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1253 AddrName);
1254 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
Amaury Sechet61a7d622016-02-17 19:21:28 +00001255 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
Craig Topperbb4069e2017-07-07 23:16:26 +00001256 llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
Keno Fischera236dae2017-06-28 23:36:40 +00001257 AAMDNodes AAMD;
1258 SI.getAAMetadata(AAMD);
1259 NS->setAAMetadata(AAMD);
Mehdi Amini1c131b32015-12-15 01:44:07 +00001260 }
1261
1262 return true;
Mehdi Aminib344ac92015-03-14 22:19:33 +00001263 }
1264
David Majnemer75364602015-05-11 05:04:27 +00001265 if (auto *AT = dyn_cast<ArrayType>(T)) {
1266 // If the array only have one element, we unpack.
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001267 auto NumElements = AT->getNumElements();
1268 if (NumElements == 1) {
Craig Topperbb4069e2017-07-07 23:16:26 +00001269 V = IC.Builder.CreateExtractValue(V, 0);
David Majnemer75364602015-05-11 05:04:27 +00001270 combineStoreToNewValue(IC, SI, V);
1271 return true;
1272 }
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001273
Davide Italianof6988d22016-10-07 21:53:09 +00001274 // Bail out if the array is too large. Ideally we would like to optimize
1275 // arrays of arbitrary size but this has a terrible impact on compile time.
1276 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1277 // tuning.
Davide Italiano2133bf52017-02-07 17:56:50 +00001278 if (NumElements > IC.MaxArraySizeForCombine)
Davide Italianof6988d22016-10-07 21:53:09 +00001279 return false;
1280
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001281 const DataLayout &DL = IC.getDataLayout();
1282 auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1283 auto Align = SI.getAlignment();
1284 if (!Align)
1285 Align = DL.getABITypeAlignment(T);
1286
1287 SmallString<16> EltName = V->getName();
1288 EltName += ".elt";
1289 auto *Addr = SI.getPointerOperand();
1290 SmallString<16> AddrName = Addr->getName();
1291 AddrName += ".repack";
1292
1293 auto *IdxType = Type::getInt64Ty(T->getContext());
1294 auto *Zero = ConstantInt::get(IdxType, 0);
1295
1296 uint64_t Offset = 0;
1297 for (uint64_t i = 0; i < NumElements; i++) {
1298 Value *Indices[2] = {
1299 Zero,
1300 ConstantInt::get(IdxType, i),
1301 };
Craig Topperbb4069e2017-07-07 23:16:26 +00001302 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1303 AddrName);
1304 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001305 auto EltAlign = MinAlign(Align, Offset);
Craig Topperbb4069e2017-07-07 23:16:26 +00001306 Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
Keno Fischera236dae2017-06-28 23:36:40 +00001307 AAMDNodes AAMD;
1308 SI.getAAMetadata(AAMD);
1309 NS->setAAMetadata(AAMD);
Amaury Sechet3b8b2ea2016-03-02 22:36:45 +00001310 Offset += EltSize;
1311 }
1312
1313 return true;
David Majnemer75364602015-05-11 05:04:27 +00001314 }
1315
Mehdi Aminib344ac92015-03-14 22:19:33 +00001316 return false;
1317}
1318
Chris Lattnera65e2f72010-01-05 05:57:49 +00001319/// equivalentAddressValues - Test if A and B will obviously have the same
1320/// value. This includes recognizing that %t0 and %t1 will have the same
1321/// value in code like this:
1322/// %t0 = getelementptr \@a, 0, 3
1323/// store i32 0, i32* %t0
1324/// %t1 = getelementptr \@a, 0, 3
1325/// %t2 = load i32* %t1
1326///
1327static bool equivalentAddressValues(Value *A, Value *B) {
1328 // Test if the values are trivially equivalent.
1329 if (A == B) return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001330
Chris Lattnera65e2f72010-01-05 05:57:49 +00001331 // Test if the values come form identical arithmetic instructions.
1332 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1333 // its only used to compare two uses within the same basic block, which
1334 // means that they'll always either have the same value or one of them
1335 // will have an undefined value.
1336 if (isa<BinaryOperator>(A) ||
1337 isa<CastInst>(A) ||
1338 isa<PHINode>(A) ||
1339 isa<GetElementPtrInst>(A))
1340 if (Instruction *BI = dyn_cast<Instruction>(B))
1341 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1342 return true;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001343
Chris Lattnera65e2f72010-01-05 05:57:49 +00001344 // Otherwise they may not be equivalent.
1345 return false;
1346}
1347
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001348/// Converts store (bitcast (load (bitcast (select ...)))) to
1349/// store (load (select ...)), where select is minmax:
1350/// select ((cmp load V1, load V2), V1, V2).
Alexey Bataev83c15b12017-12-12 20:28:46 +00001351static bool removeBitcastsFromLoadStoreOnMinMax(InstCombiner &IC,
1352 StoreInst &SI) {
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001353 // bitcast?
Alexey Bataev83c15b12017-12-12 20:28:46 +00001354 if (!match(SI.getPointerOperand(), m_BitCast(m_Value())))
Alexey Bataevfa0a76d2017-12-12 19:12:34 +00001355 return false;
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001356 // load? integer?
1357 Value *LoadAddr;
1358 if (!match(SI.getValueOperand(), m_Load(m_BitCast(m_Value(LoadAddr)))))
Alexey Bataevfa0a76d2017-12-12 19:12:34 +00001359 return false;
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001360 auto *LI = cast<LoadInst>(SI.getValueOperand());
1361 if (!LI->getType()->isIntegerTy())
Alexey Bataevfa0a76d2017-12-12 19:12:34 +00001362 return false;
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001363 if (!isMinMaxWithLoads(LoadAddr))
Alexey Bataevfa0a76d2017-12-12 19:12:34 +00001364 return false;
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001365
Alexey Bataev83c15b12017-12-12 20:28:46 +00001366 if (!all_of(LI->users(), [LI, LoadAddr](User *U) {
1367 auto *SI = dyn_cast<StoreInst>(U);
1368 return SI && SI->getPointerOperand() != LI &&
1369 peekThroughBitcast(SI->getPointerOperand()) != LoadAddr &&
1370 !SI->getPointerOperand()->isSwiftError();
1371 }))
1372 return false;
1373
1374 IC.Builder.SetInsertPoint(LI);
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001375 LoadInst *NewLI = combineLoadToNewType(
1376 IC, *LI, LoadAddr->getType()->getPointerElementType());
Alexey Bataev83c15b12017-12-12 20:28:46 +00001377 // Replace all the stores with stores of the newly loaded value.
1378 for (auto *UI : LI->users()) {
1379 auto *USI = cast<StoreInst>(UI);
1380 IC.Builder.SetInsertPoint(USI);
1381 combineStoreToNewValue(IC, *USI, NewLI);
1382 }
1383 IC.replaceInstUsesWith(*LI, UndefValue::get(LI->getType()));
1384 IC.eraseInstFromFunction(*LI);
Alexey Bataevfa0a76d2017-12-12 19:12:34 +00001385 return true;
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001386}
1387
Chris Lattnera65e2f72010-01-05 05:57:49 +00001388Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
1389 Value *Val = SI.getOperand(0);
1390 Value *Ptr = SI.getOperand(1);
1391
Chandler Carruth816d26f2014-11-25 10:09:51 +00001392 // Try to canonicalize the stored type.
1393 if (combineStoreToValueType(*this, SI))
Sanjay Patel4b198802016-02-01 22:23:39 +00001394 return eraseInstFromFunction(SI);
Chandler Carruth816d26f2014-11-25 10:09:51 +00001395
Chris Lattnera65e2f72010-01-05 05:57:49 +00001396 // Attempt to improve the alignment.
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001397 unsigned KnownAlign = getOrEnforceKnownAlignment(
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001398 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT);
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001399 unsigned StoreAlign = SI.getAlignment();
1400 unsigned EffectiveStoreAlign =
1401 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
Dan Gohman36196602010-08-03 18:20:32 +00001402
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001403 if (KnownAlign > EffectiveStoreAlign)
1404 SI.setAlignment(KnownAlign);
1405 else if (StoreAlign == 0)
1406 SI.setAlignment(EffectiveStoreAlign);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001407
Mehdi Aminib344ac92015-03-14 22:19:33 +00001408 // Try to canonicalize the stored type.
1409 if (unpackStoreToAggregate(*this, SI))
Sanjay Patel4b198802016-02-01 22:23:39 +00001410 return eraseInstFromFunction(SI);
Mehdi Aminib344ac92015-03-14 22:19:33 +00001411
Alexey Bataevfa0a76d2017-12-12 19:12:34 +00001412 if (removeBitcastsFromLoadStoreOnMinMax(*this, SI))
1413 return eraseInstFromFunction(SI);
Alexey Bataevec95c6c2017-12-08 15:32:10 +00001414
Hal Finkel847e05f2015-02-20 03:05:53 +00001415 // Replace GEP indices if possible.
1416 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1417 Worklist.Add(NewGEPI);
1418 return &SI;
1419 }
1420
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001421 // Don't hack volatile/ordered stores.
1422 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1423 if (!SI.isUnordered()) return nullptr;
Eli Friedman8bc586e2011-08-15 22:09:40 +00001424
1425 // If the RHS is an alloca with a single use, zapify the store, making the
1426 // alloca dead.
1427 if (Ptr->hasOneUse()) {
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001428 if (isa<AllocaInst>(Ptr))
Sanjay Patel4b198802016-02-01 22:23:39 +00001429 return eraseInstFromFunction(SI);
Eli Friedman8bc586e2011-08-15 22:09:40 +00001430 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1431 if (isa<AllocaInst>(GEP->getOperand(0))) {
1432 if (GEP->getOperand(0)->hasOneUse())
Sanjay Patel4b198802016-02-01 22:23:39 +00001433 return eraseInstFromFunction(SI);
Eli Friedman8bc586e2011-08-15 22:09:40 +00001434 }
1435 }
1436 }
1437
Chris Lattnera65e2f72010-01-05 05:57:49 +00001438 // Do really simple DSE, to catch cases where there are several consecutive
1439 // stores to the same location, separated by a few arithmetic operations. This
1440 // situation often occurs with bitfield accesses.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001441 BasicBlock::iterator BBI(SI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001442 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1443 --ScanInsts) {
1444 --BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001445 // Don't count debug info directives, lest they affect codegen,
1446 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1447 if (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001448 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001449 ScanInsts++;
1450 continue;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001451 }
1452
Chris Lattnera65e2f72010-01-05 05:57:49 +00001453 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1454 // Prev store isn't volatile, and stores to the same location?
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001455 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
Eli Friedman8bc586e2011-08-15 22:09:40 +00001456 SI.getOperand(1))) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001457 ++NumDeadStore;
1458 ++BBI;
Sanjay Patel4b198802016-02-01 22:23:39 +00001459 eraseInstFromFunction(*PrevSI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001460 continue;
1461 }
1462 break;
1463 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001464
Chris Lattnera65e2f72010-01-05 05:57:49 +00001465 // If this is a load, we have to stop. However, if the loaded value is from
1466 // the pointer we're loading and is producing the pointer we're storing,
1467 // then *this* store is dead (X = load P; store X -> P).
1468 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001469 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1470 assert(SI.isUnordered() && "can't eliminate ordering operation");
Sanjay Patel4b198802016-02-01 22:23:39 +00001471 return eraseInstFromFunction(SI);
Philip Reamesd7a6cc82015-12-17 22:19:27 +00001472 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001473
Chris Lattnera65e2f72010-01-05 05:57:49 +00001474 // Otherwise, this is a load from some other location. Stores before it
1475 // may not be dead.
1476 break;
1477 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001478
Sanjoy Das679bc322017-01-17 05:45:09 +00001479 // Don't skip over loads, throws or things that can modify memory.
1480 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001481 break;
1482 }
Chris Lattnera65e2f72010-01-05 05:57:49 +00001483
1484 // store X, null -> turns into 'unreachable' in SimplifyCFG
Anna Thomas2dd98352017-12-12 14:12:33 +00001485 // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1486 if (canSimplifyNullStoreOrGEP(SI)) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001487 if (!isa<UndefValue>(Val)) {
1488 SI.setOperand(0, UndefValue::get(Val->getType()));
1489 if (Instruction *U = dyn_cast<Instruction>(Val))
1490 Worklist.Add(U); // Dropped a use.
1491 }
Craig Topperf40110f2014-04-25 05:29:35 +00001492 return nullptr; // Do not modify these!
Chris Lattnera65e2f72010-01-05 05:57:49 +00001493 }
1494
1495 // store undef, Ptr -> noop
1496 if (isa<UndefValue>(Val))
Sanjay Patel4b198802016-02-01 22:23:39 +00001497 return eraseInstFromFunction(SI);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001498
Sanjay Patel4a12aa92018-11-10 20:29:25 +00001499 // If this store is the second-to-last instruction in the basic block
1500 // (excluding debug info and bitcasts of pointers) and if the block ends with
1501 // an unconditional branch, try to move the store to the successor block.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001502 BBI = SI.getIterator();
Chris Lattnera65e2f72010-01-05 05:57:49 +00001503 do {
1504 ++BBI;
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001505 } while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001506 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
Sanjay Patel4a12aa92018-11-10 20:29:25 +00001507
Chris Lattnera65e2f72010-01-05 05:57:49 +00001508 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
1509 if (BI->isUnconditional())
Sanjay Patel4a12aa92018-11-10 20:29:25 +00001510 mergeStoreIntoSuccessor(SI);
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001511
Craig Topperf40110f2014-04-25 05:29:35 +00001512 return nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001513}
1514
Sanjay Patel4a12aa92018-11-10 20:29:25 +00001515/// Try to transform:
Chris Lattnera65e2f72010-01-05 05:57:49 +00001516/// if () { *P = v1; } else { *P = v2 }
Sanjay Patel4a12aa92018-11-10 20:29:25 +00001517/// or:
Chris Lattnera65e2f72010-01-05 05:57:49 +00001518/// *P = v1; if () { *P = v2; }
1519/// into a phi node with a store in the successor.
Sanjay Patel4a12aa92018-11-10 20:29:25 +00001520bool InstCombiner::mergeStoreIntoSuccessor(StoreInst &SI) {
Philip Reames5f0e3692016-04-22 20:53:32 +00001521 assert(SI.isUnordered() &&
Sanjay Patel4a12aa92018-11-10 20:29:25 +00001522 "This code has not been audited for volatile or ordered store case.");
Justin Bognerc7e4fbe2016-08-05 01:09:48 +00001523
Sanjay Patel4a12aa92018-11-10 20:29:25 +00001524 // Check if the successor block has exactly 2 incoming edges.
Chris Lattnera65e2f72010-01-05 05:57:49 +00001525 BasicBlock *StoreBB = SI.getParent();
Chris Lattnera65e2f72010-01-05 05:57:49 +00001526 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
Vedant Kumar4de31bb2018-11-19 19:54:27 +00001527 if (!DestBB->hasNPredecessors(2))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001528 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001529
Sanjay Patel4a12aa92018-11-10 20:29:25 +00001530 // Capture the other block (the block that doesn't contain our store).
1531 pred_iterator PredIter = pred_begin(DestBB);
1532 if (*PredIter == StoreBB)
1533 ++PredIter;
1534 BasicBlock *OtherBB = *PredIter;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001535
Sanjay Patel4a12aa92018-11-10 20:29:25 +00001536 // Bail out if all of the relevant blocks aren't distinct. This can happen,
1537 // for example, if SI is in an infinite loop.
Chris Lattnera65e2f72010-01-05 05:57:49 +00001538 if (StoreBB == DestBB || OtherBB == DestBB)
1539 return false;
1540
1541 // Verify that the other block ends in a branch and is not otherwise empty.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00001542 BasicBlock::iterator BBI(OtherBB->getTerminator());
Chris Lattnera65e2f72010-01-05 05:57:49 +00001543 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1544 if (!OtherBr || BBI == OtherBB->begin())
1545 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001546
Chris Lattnera65e2f72010-01-05 05:57:49 +00001547 // If the other block ends in an unconditional branch, check for the 'if then
Sanjay Patel4a12aa92018-11-10 20:29:25 +00001548 // else' case. There is an instruction before the branch.
Craig Topperf40110f2014-04-25 05:29:35 +00001549 StoreInst *OtherStore = nullptr;
Chris Lattnera65e2f72010-01-05 05:57:49 +00001550 if (OtherBr->isUnconditional()) {
1551 --BBI;
1552 // Skip over debugging info.
Victor Hernandez5f8c8c02010-01-22 19:05:05 +00001553 while (isa<DbgInfoIntrinsic>(BBI) ||
Duncan Sands19d0b472010-02-16 11:11:14 +00001554 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
Chris Lattnera65e2f72010-01-05 05:57:49 +00001555 if (BBI==OtherBB->begin())
1556 return false;
1557 --BBI;
1558 }
Eli Friedman8bc586e2011-08-15 22:09:40 +00001559 // If this isn't a store, isn't a store to the same location, or is not the
1560 // right kind of store, bail out.
Chris Lattnera65e2f72010-01-05 05:57:49 +00001561 OtherStore = dyn_cast<StoreInst>(BBI);
1562 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001563 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001564 return false;
1565 } else {
1566 // Otherwise, the other block ended with a conditional branch. If one of the
1567 // destinations is StoreBB, then we have the if/then case.
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001568 if (OtherBr->getSuccessor(0) != StoreBB &&
Chris Lattnera65e2f72010-01-05 05:57:49 +00001569 OtherBr->getSuccessor(1) != StoreBB)
1570 return false;
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001571
Chris Lattnera65e2f72010-01-05 05:57:49 +00001572 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
Sanjay Patel4a12aa92018-11-10 20:29:25 +00001573 // if/then triangle. See if there is a store to the same ptr as SI that
Chris Lattnera65e2f72010-01-05 05:57:49 +00001574 // lives in OtherBB.
1575 for (;; --BBI) {
1576 // Check to see if we find the matching store.
1577 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1578 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
Eli Friedman8bc586e2011-08-15 22:09:40 +00001579 !SI.isSameOperationAs(OtherStore))
Chris Lattnera65e2f72010-01-05 05:57:49 +00001580 return false;
1581 break;
1582 }
1583 // If we find something that may be using or overwriting the stored
Sanjay Patel4a12aa92018-11-10 20:29:25 +00001584 // value, or if we run out of instructions, we can't do the transform.
Sanjoy Das679bc322017-01-17 05:45:09 +00001585 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1586 BBI->mayWriteToMemory() || BBI == OtherBB->begin())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001587 return false;
1588 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001589
Sanjay Patel4a12aa92018-11-10 20:29:25 +00001590 // In order to eliminate the store in OtherBr, we have to make sure nothing
1591 // reads or overwrites the stored value in StoreBB.
Chris Lattnera65e2f72010-01-05 05:57:49 +00001592 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1593 // FIXME: This should really be AA driven.
Sanjoy Das679bc322017-01-17 05:45:09 +00001594 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
Chris Lattnera65e2f72010-01-05 05:57:49 +00001595 return false;
1596 }
1597 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001598
Chris Lattnera65e2f72010-01-05 05:57:49 +00001599 // Insert a PHI node now if we need it.
1600 Value *MergedVal = OtherStore->getOperand(0);
Vedant Kumar238533e2018-11-19 19:55:02 +00001601 // The debug locations of the original instructions might differ. Merge them.
1602 DebugLoc MergedLoc = DILocation::getMergedLocation(SI.getDebugLoc(),
1603 OtherStore->getDebugLoc());
Chris Lattnera65e2f72010-01-05 05:57:49 +00001604 if (MergedVal != SI.getOperand(0)) {
Jay Foad52131342011-03-30 11:28:46 +00001605 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
Chris Lattnera65e2f72010-01-05 05:57:49 +00001606 PN->addIncoming(SI.getOperand(0), SI.getParent());
1607 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1608 MergedVal = InsertNewInstBefore(PN, DestBB->front());
Vedant Kumar238533e2018-11-19 19:55:02 +00001609 PN->setDebugLoc(MergedLoc);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001610 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001611
Sanjay Patel4a12aa92018-11-10 20:29:25 +00001612 // Advance to a place where it is safe to insert the new store and insert it.
Bill Wendling8ddfc092011-08-16 20:45:24 +00001613 BBI = DestBB->getFirstInsertionPt();
Eli Friedman35211c62011-05-27 00:19:40 +00001614 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
Sanjay Patel4a12aa92018-11-10 20:29:25 +00001615 SI.isVolatile(), SI.getAlignment(),
1616 SI.getOrdering(), SI.getSyncScopeID());
Eli Friedman35211c62011-05-27 00:19:40 +00001617 InsertNewInstBefore(NewSI, *BBI);
Vedant Kumar238533e2018-11-19 19:55:02 +00001618 NewSI->setDebugLoc(MergedLoc);
Eli Friedman35211c62011-05-27 00:19:40 +00001619
Hal Finkelcc39b672014-07-24 12:16:19 +00001620 // If the two stores had AA tags, merge them.
1621 AAMDNodes AATags;
1622 SI.getAAMetadata(AATags);
1623 if (AATags) {
1624 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1625 NewSI->setAAMetadata(AATags);
1626 }
Jim Grosbachbdbd7342013-04-05 21:20:12 +00001627
Chris Lattnera65e2f72010-01-05 05:57:49 +00001628 // Nuke the old stores.
Sanjay Patel4b198802016-02-01 22:23:39 +00001629 eraseInstFromFunction(SI);
1630 eraseInstFromFunction(*OtherStore);
Chris Lattnera65e2f72010-01-05 05:57:49 +00001631 return true;
1632}