blob: a51204ab2e3b9a3314d12471448ae814bb0b6e70 [file] [log] [blame]
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass performs various transformations related to eliminating memcpy
11// calls, or transforming sets of stores into memset's.
12//
13//===----------------------------------------------------------------------===//
14
Owen Andersonef9a6fd2008-04-09 08:23:16 +000015#include "llvm/Transforms/Scalar.h"
Amaury Sechetbdb261b2016-03-14 22:52:27 +000016#include "llvm/ADT/DenseSet.h"
Owen Andersonef9a6fd2008-04-09 08:23:16 +000017#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/Statistic.h"
Owen Andersonef9a6fd2008-04-09 08:23:16 +000019#include "llvm/Analysis/AliasAnalysis.h"
Chandler Carruth66b31302015-01-04 12:03:27 +000020#include "llvm/Analysis/AssumptionCache.h"
Chandler Carruth7b560d42015-09-09 17:55:00 +000021#include "llvm/Analysis/GlobalsModRef.h"
Owen Andersonef9a6fd2008-04-09 08:23:16 +000022#include "llvm/Analysis/MemoryDependenceAnalysis.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000023#include "llvm/Analysis/TargetLibraryInfo.h"
Chris Lattner9cb10352010-12-26 20:15:01 +000024#include "llvm/Analysis/ValueTracking.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000025#include "llvm/IR/DataLayout.h"
Chandler Carruth5ad5f152014-01-13 09:26:24 +000026#include "llvm/IR/Dominators.h"
Chandler Carruth03eb0de2014-03-04 10:40:04 +000027#include "llvm/IR/GetElementPtrTypeIterator.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000028#include "llvm/IR/GlobalVariable.h"
29#include "llvm/IR/IRBuilder.h"
30#include "llvm/IR/Instructions.h"
31#include "llvm/IR/IntrinsicInst.h"
Owen Andersonef9a6fd2008-04-09 08:23:16 +000032#include "llvm/Support/Debug.h"
Chris Lattnerb25de3f2009-08-23 04:37:46 +000033#include "llvm/Support/raw_ostream.h"
Chandler Carruthaafe0912012-06-29 12:38:19 +000034#include "llvm/Transforms/Utils/Local.h"
Nick Lewyckyf836c892015-07-21 21:56:26 +000035#include <algorithm>
Owen Andersonef9a6fd2008-04-09 08:23:16 +000036using namespace llvm;
37
Chandler Carruth964daaa2014-04-22 02:55:47 +000038#define DEBUG_TYPE "memcpyopt"
39
Owen Andersonef9a6fd2008-04-09 08:23:16 +000040STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
41STATISTIC(NumMemSetInfer, "Number of memsets inferred");
Duncan Sands0edc7102009-09-03 13:37:16 +000042STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
Benjamin Kramerea9152e2010-12-24 21:17:12 +000043STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
Owen Andersonef9a6fd2008-04-09 08:23:16 +000044
Benjamin Kramer15a257d2012-09-13 16:29:49 +000045static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
Mehdi Aminia28d91d2015-03-10 02:37:25 +000046 bool &VariableIdxFound,
47 const DataLayout &DL) {
Owen Andersonef9a6fd2008-04-09 08:23:16 +000048 // Skip over the first indices.
49 gep_type_iterator GTI = gep_type_begin(GEP);
50 for (unsigned i = 1; i != Idx; ++i, ++GTI)
51 /*skip along*/;
Nadav Rotem465834c2012-07-24 10:51:42 +000052
Owen Andersonef9a6fd2008-04-09 08:23:16 +000053 // Compute the offset implied by the rest of the indices.
54 int64_t Offset = 0;
55 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
56 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
Craig Topperf40110f2014-04-25 05:29:35 +000057 if (!OpC)
Owen Andersonef9a6fd2008-04-09 08:23:16 +000058 return VariableIdxFound = true;
59 if (OpC->isZero()) continue; // No offset.
60
61 // Handle struct indices, which add their field offset to the pointer.
Chris Lattner229907c2011-07-18 04:54:35 +000062 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +000063 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
Owen Andersonef9a6fd2008-04-09 08:23:16 +000064 continue;
65 }
Nadav Rotem465834c2012-07-24 10:51:42 +000066
Owen Andersonef9a6fd2008-04-09 08:23:16 +000067 // Otherwise, we have a sequential type like an array or vector. Multiply
68 // the index by the ElementSize.
Mehdi Aminia28d91d2015-03-10 02:37:25 +000069 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
Owen Andersonef9a6fd2008-04-09 08:23:16 +000070 Offset += Size*OpC->getSExtValue();
71 }
72
73 return Offset;
74}
75
Sanjay Patela75c41e2015-08-13 22:53:20 +000076/// Return true if Ptr1 is provably equal to Ptr2 plus a constant offset, and
77/// return that constant offset. For example, Ptr1 might be &A[42], and Ptr2
78/// might be &A[40]. In this case offset would be -8.
Owen Andersonef9a6fd2008-04-09 08:23:16 +000079static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
Mehdi Aminia28d91d2015-03-10 02:37:25 +000080 const DataLayout &DL) {
Chris Lattnerfa7c29d2011-01-12 01:43:46 +000081 Ptr1 = Ptr1->stripPointerCasts();
82 Ptr2 = Ptr2->stripPointerCasts();
Benjamin Kramer3ef5e462014-03-10 21:05:13 +000083
84 // Handle the trivial case first.
85 if (Ptr1 == Ptr2) {
86 Offset = 0;
87 return true;
88 }
89
Benjamin Kramer15a257d2012-09-13 16:29:49 +000090 GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
91 GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
Nadav Rotem465834c2012-07-24 10:51:42 +000092
Chris Lattner5120ebf2011-01-08 21:07:56 +000093 bool VariableIdxFound = false;
94
95 // If one pointer is a GEP and the other isn't, then see if the GEP is a
96 // constant offset from the base, as in "P" and "gep P, 1".
Craig Topperf40110f2014-04-25 05:29:35 +000097 if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +000098 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL);
Chris Lattner5120ebf2011-01-08 21:07:56 +000099 return !VariableIdxFound;
100 }
101
Craig Topperf40110f2014-04-25 05:29:35 +0000102 if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000103 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL);
Chris Lattner5120ebf2011-01-08 21:07:56 +0000104 return !VariableIdxFound;
105 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000106
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000107 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
108 // base. After that base, they may have some number of common (and
109 // potentially variable) indices. After that they handle some constant
110 // offset, which determines their offset from each other. At this point, we
111 // handle no other case.
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000112 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
113 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000114
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000115 // Skip any common indices and track the GEP types.
116 unsigned Idx = 1;
117 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
118 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
119 break;
120
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000121 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL);
122 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL);
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000123 if (VariableIdxFound) return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000124
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000125 Offset = Offset2-Offset1;
126 return true;
127}
128
129
Sanjay Patela75c41e2015-08-13 22:53:20 +0000130/// Represents a range of memset'd bytes with the ByteVal value.
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000131/// This allows us to analyze stores like:
132/// store 0 -> P+1
133/// store 0 -> P+0
134/// store 0 -> P+3
135/// store 0 -> P+2
136/// which sometimes happens with stores to arrays of structs etc. When we see
137/// the first store, we make a range [1, 2). The second store extends the range
138/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
139/// two ranges into [0, 3) which is memset'able.
140namespace {
141struct MemsetRange {
142 // Start/End - A semi range that describes the span that this range covers.
Nadav Rotem465834c2012-07-24 10:51:42 +0000143 // The range is closed at the start and open at the end: [Start, End).
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000144 int64_t Start, End;
145
146 /// StartPtr - The getelementptr instruction that points to the start of the
147 /// range.
148 Value *StartPtr;
Nadav Rotem465834c2012-07-24 10:51:42 +0000149
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000150 /// Alignment - The known alignment of the first store.
151 unsigned Alignment;
Nadav Rotem465834c2012-07-24 10:51:42 +0000152
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000153 /// TheStores - The actual stores that make up this range.
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000154 SmallVector<Instruction*, 16> TheStores;
Nadav Rotem465834c2012-07-24 10:51:42 +0000155
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000156 bool isProfitableToUseMemset(const DataLayout &DL) const;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000157};
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000158} // end anon namespace
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000159
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000160bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
Chad Rosier32775572011-12-05 22:53:09 +0000161 // If we found more than 4 stores to merge or 16 bytes, use memset.
Chad Rosier19446a02011-12-05 22:37:00 +0000162 if (TheStores.size() >= 4 || End-Start >= 16) return true;
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000163
164 // If there is nothing to merge, don't do anything.
165 if (TheStores.size() < 2) return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000166
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000167 // If any of the stores are a memset, then it is always good to extend the
168 // memset.
Craig Toppere325e382015-11-20 07:18:48 +0000169 for (Instruction *SI : TheStores)
170 if (!isa<StoreInst>(SI))
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000171 return true;
Nadav Rotem465834c2012-07-24 10:51:42 +0000172
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000173 // Assume that the code generator is capable of merging pairs of stores
174 // together if it wants to.
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000175 if (TheStores.size() == 2) return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000176
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000177 // If we have fewer than 8 stores, it can still be worthwhile to do this.
178 // For example, merging 4 i8 stores into an i32 store is useful almost always.
179 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
180 // memset will be split into 2 32-bit stores anyway) and doing so can
181 // pessimize the llvm optimizer.
182 //
183 // Since we don't have perfect knowledge here, make some assumptions: assume
Matt Arsenault899f7d22013-09-16 22:43:16 +0000184 // the maximum GPR width is the same size as the largest legal integer
185 // size. If so, check to see whether we will end up actually reducing the
186 // number of stores used.
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000187 unsigned Bytes = unsigned(End-Start);
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000188 unsigned MaxIntSize = DL.getLargestLegalIntTypeSize();
Matt Arsenault899f7d22013-09-16 22:43:16 +0000189 if (MaxIntSize == 0)
190 MaxIntSize = 1;
191 unsigned NumPointerStores = Bytes / MaxIntSize;
Nadav Rotem465834c2012-07-24 10:51:42 +0000192
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000193 // Assume the remaining bytes if any are done a byte at a time.
Craig Toppera5ea5282015-11-21 17:44:42 +0000194 unsigned NumByteStores = Bytes % MaxIntSize;
Nadav Rotem465834c2012-07-24 10:51:42 +0000195
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000196 // If we will reduce the # stores (according to this heuristic), do the
197 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
198 // etc.
199 return TheStores.size() > NumPointerStores+NumByteStores;
Nadav Rotem465834c2012-07-24 10:51:42 +0000200}
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000201
202
203namespace {
204class MemsetRanges {
Sanjay Patela75c41e2015-08-13 22:53:20 +0000205 /// A sorted list of the memset ranges.
Nick Lewyckyf836c892015-07-21 21:56:26 +0000206 SmallVector<MemsetRange, 8> Ranges;
207 typedef SmallVectorImpl<MemsetRange>::iterator range_iterator;
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000208 const DataLayout &DL;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000209public:
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000210 MemsetRanges(const DataLayout &DL) : DL(DL) {}
Nadav Rotem465834c2012-07-24 10:51:42 +0000211
Nick Lewyckyf836c892015-07-21 21:56:26 +0000212 typedef SmallVectorImpl<MemsetRange>::const_iterator const_iterator;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000213 const_iterator begin() const { return Ranges.begin(); }
214 const_iterator end() const { return Ranges.end(); }
215 bool empty() const { return Ranges.empty(); }
Nadav Rotem465834c2012-07-24 10:51:42 +0000216
Chris Lattnerc6381472011-01-08 20:24:01 +0000217 void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000218 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
219 addStore(OffsetFromFirst, SI);
220 else
221 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
Chris Lattnerc6381472011-01-08 20:24:01 +0000222 }
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000223
224 void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000225 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
Nadav Rotem465834c2012-07-24 10:51:42 +0000226
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000227 addRange(OffsetFromFirst, StoreSize,
228 SI->getPointerOperand(), SI->getAlignment(), SI);
229 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000230
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000231 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
232 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
Pete Cooper67cf9a72015-11-19 05:56:52 +0000233 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI);
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000234 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000235
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000236 void addRange(int64_t Start, int64_t Size, Value *Ptr,
237 unsigned Alignment, Instruction *Inst);
238
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000239};
Nadav Rotem465834c2012-07-24 10:51:42 +0000240
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000241} // end anon namespace
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000242
243
Sanjay Patela75c41e2015-08-13 22:53:20 +0000244/// Add a new store to the MemsetRanges data structure. This adds a
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000245/// new range for the specified store at the specified offset, merging into
246/// existing ranges as appropriate.
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000247void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
248 unsigned Alignment, Instruction *Inst) {
249 int64_t End = Start+Size;
Nadav Rotem465834c2012-07-24 10:51:42 +0000250
Nick Lewyckyf836c892015-07-21 21:56:26 +0000251 range_iterator I = std::lower_bound(Ranges.begin(), Ranges.end(), Start,
252 [](const MemsetRange &LHS, int64_t RHS) { return LHS.End < RHS; });
Nadav Rotem465834c2012-07-24 10:51:42 +0000253
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000254 // We now know that I == E, in which case we didn't find anything to merge
255 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
256 // to insert a new range. Handle this now.
Nick Lewyckyf836c892015-07-21 21:56:26 +0000257 if (I == Ranges.end() || End < I->Start) {
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000258 MemsetRange &R = *Ranges.insert(I, MemsetRange());
259 R.Start = Start;
260 R.End = End;
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000261 R.StartPtr = Ptr;
262 R.Alignment = Alignment;
263 R.TheStores.push_back(Inst);
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000264 return;
265 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000266
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000267 // This store overlaps with I, add it.
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000268 I->TheStores.push_back(Inst);
Nadav Rotem465834c2012-07-24 10:51:42 +0000269
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000270 // At this point, we may have an interval that completely contains our store.
271 // If so, just add it to the interval and return.
272 if (I->Start <= Start && I->End >= End)
273 return;
Nadav Rotem465834c2012-07-24 10:51:42 +0000274
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000275 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
276 // but is not entirely contained within the range.
Nadav Rotem465834c2012-07-24 10:51:42 +0000277
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000278 // See if the range extends the start of the range. In this case, it couldn't
279 // possibly cause it to join the prior range, because otherwise we would have
280 // stopped on *it*.
281 if (Start < I->Start) {
282 I->Start = Start;
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000283 I->StartPtr = Ptr;
284 I->Alignment = Alignment;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000285 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000286
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000287 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
288 // is in or right at the end of I), and that End >= I->Start. Extend I out to
289 // End.
290 if (End > I->End) {
291 I->End = End;
Nick Lewyckybfd4ad62009-03-19 05:51:39 +0000292 range_iterator NextI = I;
Nick Lewyckyf836c892015-07-21 21:56:26 +0000293 while (++NextI != Ranges.end() && End >= NextI->Start) {
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000294 // Merge the range in.
295 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
296 if (NextI->End > I->End)
297 I->End = NextI->End;
298 Ranges.erase(NextI);
299 NextI = I;
300 }
301 }
302}
303
304//===----------------------------------------------------------------------===//
305// MemCpyOpt Pass
306//===----------------------------------------------------------------------===//
307
308namespace {
Chris Lattner2dd09db2009-09-02 06:11:42 +0000309 class MemCpyOpt : public FunctionPass {
Chandler Carruth61440d22016-03-10 00:55:30 +0000310 MemoryDependenceResults *MD;
Chris Lattner23f61a02011-05-01 18:27:11 +0000311 TargetLibraryInfo *TLI;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000312 public:
313 static char ID; // Pass identification, replacement for typeid
Owen Anderson6c18d1a2010-10-19 17:21:58 +0000314 MemCpyOpt() : FunctionPass(ID) {
315 initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
Craig Topperf40110f2014-04-25 05:29:35 +0000316 MD = nullptr;
317 TLI = nullptr;
Owen Anderson6c18d1a2010-10-19 17:21:58 +0000318 }
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000319
Craig Topper3e4c6972014-03-05 09:10:37 +0000320 bool runOnFunction(Function &F) override;
Chris Lattnerc6381472011-01-08 20:24:01 +0000321
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000322 private:
323 // This transformation requires dominator postdominator info
Craig Topper3e4c6972014-03-05 09:10:37 +0000324 void getAnalysisUsage(AnalysisUsage &AU) const override {
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000325 AU.setPreservesCFG();
Chandler Carruth66b31302015-01-04 12:03:27 +0000326 AU.addRequired<AssumptionCacheTracker>();
Chandler Carruth73523022014-01-13 13:07:17 +0000327 AU.addRequired<DominatorTreeWrapperPass>();
Chandler Carruth61440d22016-03-10 00:55:30 +0000328 AU.addRequired<MemoryDependenceWrapperPass>();
Chandler Carruth7b560d42015-09-09 17:55:00 +0000329 AU.addRequired<AAResultsWrapperPass>();
Chandler Carruthb98f63d2015-01-15 10:41:28 +0000330 AU.addRequired<TargetLibraryInfoWrapperPass>();
Chandler Carruth7b560d42015-09-09 17:55:00 +0000331 AU.addPreserved<GlobalsAAWrapperPass>();
Chandler Carruth61440d22016-03-10 00:55:30 +0000332 AU.addPreserved<MemoryDependenceWrapperPass>();
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000333 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000334
Matt Walaa4afccd2015-06-12 18:16:51 +0000335 // Helper functions
Chris Lattnerb5557a72009-09-01 17:09:55 +0000336 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
Chris Lattner9a1d63b2011-01-08 21:19:19 +0000337 bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI);
Chris Lattnerb5557a72009-09-01 17:09:55 +0000338 bool processMemCpy(MemCpyInst *M);
Chris Lattner1145e332009-09-01 17:56:32 +0000339 bool processMemMove(MemMoveInst *M);
Owen Anderson18e4fed2010-10-15 22:52:12 +0000340 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc,
Duncan Sandsc6ada692012-10-04 10:54:40 +0000341 uint64_t cpyLen, unsigned cpyAlign, CallInst *C);
Ahmed Bougacha15a31f62015-05-16 01:23:47 +0000342 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep);
Ahmed Bougacha83f78a42015-04-17 22:20:57 +0000343 bool processMemSetMemCpyDependence(MemCpyInst *M, MemSetInst *MDep);
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +0000344 bool performMemCpyToMemSetOptzn(MemCpyInst *M, MemSetInst *MDep);
Chris Lattner58f9f582010-11-21 00:28:59 +0000345 bool processByValArgument(CallSite CS, unsigned ArgNo);
Chris Lattnerc6381472011-01-08 20:24:01 +0000346 Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr,
347 Value *ByteVal);
348
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000349 bool iterateOnFunction(Function &F);
350 };
Nadav Rotem465834c2012-07-24 10:51:42 +0000351
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000352 char MemCpyOpt::ID = 0;
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000353}
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000354
Sanjay Patela75c41e2015-08-13 22:53:20 +0000355/// The public interface to this file...
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000356FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
357
Owen Anderson8ac477f2010-10-12 19:48:12 +0000358INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
359 false, false)
Chandler Carruth66b31302015-01-04 12:03:27 +0000360INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
Chandler Carruth73523022014-01-13 13:07:17 +0000361INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
Chandler Carruth61440d22016-03-10 00:55:30 +0000362INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
Chandler Carruthb98f63d2015-01-15 10:41:28 +0000363INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
Chandler Carruth7b560d42015-09-09 17:55:00 +0000364INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
365INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
Owen Anderson8ac477f2010-10-12 19:48:12 +0000366INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
367 false, false)
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000368
Sanjay Patela75c41e2015-08-13 22:53:20 +0000369/// When scanning forward over instructions, we look for some other patterns to
370/// fold away. In particular, this looks for stores to neighboring locations of
371/// memory. If it sees enough consecutive ones, it attempts to merge them
372/// together into a memcpy/memset.
Nadav Rotem465834c2012-07-24 10:51:42 +0000373Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
Chris Lattnerc6381472011-01-08 20:24:01 +0000374 Value *StartPtr, Value *ByteVal) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000375 const DataLayout &DL = StartInst->getModule()->getDataLayout();
Nadav Rotem465834c2012-07-24 10:51:42 +0000376
Chris Lattnerc6381472011-01-08 20:24:01 +0000377 // Okay, so we now have a single store that can be splatable. Scan to find
378 // all subsequent stores of the same value to offset from the same pointer.
379 // Join these together into ranges, so we can decide whether contiguous blocks
380 // are stored.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000381 MemsetRanges Ranges(DL);
Nadav Rotem465834c2012-07-24 10:51:42 +0000382
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000383 BasicBlock::iterator BI(StartInst);
Chris Lattnerc6381472011-01-08 20:24:01 +0000384 for (++BI; !isa<TerminatorInst>(BI); ++BI) {
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000385 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
386 // If the instruction is readnone, ignore it, otherwise bail out. We
387 // don't even allow readonly here because we don't want something like:
Chris Lattnerc6381472011-01-08 20:24:01 +0000388 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000389 if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
390 break;
391 continue;
392 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000393
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000394 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
395 // If this is a store, see if we can merge it in.
Eli Friedman9a468152011-08-17 22:22:24 +0000396 if (!NextStore->isSimple()) break;
Nadav Rotem465834c2012-07-24 10:51:42 +0000397
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000398 // Check to see if this stored value is of the same byte-splattable value.
399 if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
400 break;
Nadav Rotem465834c2012-07-24 10:51:42 +0000401
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000402 // Check to see if this store is to a constant offset from the start ptr.
403 int64_t Offset;
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000404 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset,
405 DL))
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000406 break;
Nadav Rotem465834c2012-07-24 10:51:42 +0000407
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000408 Ranges.addStore(Offset, NextStore);
409 } else {
410 MemSetInst *MSI = cast<MemSetInst>(BI);
Nadav Rotem465834c2012-07-24 10:51:42 +0000411
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000412 if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
413 !isa<ConstantInt>(MSI->getLength()))
414 break;
Nadav Rotem465834c2012-07-24 10:51:42 +0000415
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000416 // Check to see if this store is to a constant offset from the start ptr.
417 int64_t Offset;
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000418 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL))
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000419 break;
Nadav Rotem465834c2012-07-24 10:51:42 +0000420
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000421 Ranges.addMemSet(Offset, MSI);
422 }
Chris Lattnerc6381472011-01-08 20:24:01 +0000423 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000424
Chris Lattnerc6381472011-01-08 20:24:01 +0000425 // If we have no ranges, then we just had a single store with nothing that
426 // could be merged in. This is a very common case of course.
427 if (Ranges.empty())
Craig Topperf40110f2014-04-25 05:29:35 +0000428 return nullptr;
Nadav Rotem465834c2012-07-24 10:51:42 +0000429
Chris Lattnerc6381472011-01-08 20:24:01 +0000430 // If we had at least one store that could be merged in, add the starting
431 // store as well. We try to avoid this unless there is at least something
432 // interesting as a small compile-time optimization.
433 Ranges.addInst(0, StartInst);
434
435 // If we create any memsets, we put it right before the first instruction that
436 // isn't part of the memset block. This ensure that the memset is dominated
437 // by any addressing instruction needed by the start of the block.
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000438 IRBuilder<> Builder(&*BI);
Chris Lattnerc6381472011-01-08 20:24:01 +0000439
440 // Now that we have full information about ranges, loop over the ranges and
441 // emit memset's for anything big enough to be worthwhile.
Craig Topperf40110f2014-04-25 05:29:35 +0000442 Instruction *AMemSet = nullptr;
Craig Toppere325e382015-11-20 07:18:48 +0000443 for (const MemsetRange &Range : Ranges) {
Nadav Rotem465834c2012-07-24 10:51:42 +0000444
Chris Lattnerc6381472011-01-08 20:24:01 +0000445 if (Range.TheStores.size() == 1) continue;
Nadav Rotem465834c2012-07-24 10:51:42 +0000446
Chris Lattnerc6381472011-01-08 20:24:01 +0000447 // If it is profitable to lower this range to memset, do so now.
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000448 if (!Range.isProfitableToUseMemset(DL))
Chris Lattnerc6381472011-01-08 20:24:01 +0000449 continue;
Nadav Rotem465834c2012-07-24 10:51:42 +0000450
Chris Lattnerc6381472011-01-08 20:24:01 +0000451 // Otherwise, we do want to transform this! Create a new memset.
452 // Get the starting pointer of the block.
453 StartPtr = Range.StartPtr;
Nadav Rotem465834c2012-07-24 10:51:42 +0000454
Chris Lattnerc6381472011-01-08 20:24:01 +0000455 // Determine alignment
456 unsigned Alignment = Range.Alignment;
457 if (Alignment == 0) {
Nadav Rotem465834c2012-07-24 10:51:42 +0000458 Type *EltType =
Chris Lattnerc6381472011-01-08 20:24:01 +0000459 cast<PointerType>(StartPtr->getType())->getElementType();
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000460 Alignment = DL.getABITypeAlignment(EltType);
Chris Lattnerc6381472011-01-08 20:24:01 +0000461 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000462
463 AMemSet =
Chris Lattnerc6381472011-01-08 20:24:01 +0000464 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
Nadav Rotem465834c2012-07-24 10:51:42 +0000465
Chris Lattnerc6381472011-01-08 20:24:01 +0000466 DEBUG(dbgs() << "Replace stores:\n";
Craig Toppere325e382015-11-20 07:18:48 +0000467 for (Instruction *SI : Range.TheStores)
468 dbgs() << *SI << '\n';
Chris Lattnerc6381472011-01-08 20:24:01 +0000469 dbgs() << "With: " << *AMemSet << '\n');
Devang Patelc7e4fa72011-05-04 21:58:58 +0000470
471 if (!Range.TheStores.empty())
472 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
473
Chris Lattnerc6381472011-01-08 20:24:01 +0000474 // Zap all the stores.
Craig Toppere325e382015-11-20 07:18:48 +0000475 for (Instruction *SI : Range.TheStores) {
476 MD->removeInstruction(SI);
477 SI->eraseFromParent();
Chris Lattner7d6433a2011-01-08 22:19:21 +0000478 }
Chris Lattnerc6381472011-01-08 20:24:01 +0000479 ++NumMemSetInfer;
480 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000481
Chris Lattnerc6381472011-01-08 20:24:01 +0000482 return AMemSet;
483}
484
Amaury Secheta0c242c2016-01-05 20:17:48 +0000485static unsigned findCommonAlignment(const DataLayout &DL, const StoreInst *SI,
486 const LoadInst *LI) {
487 unsigned StoreAlign = SI->getAlignment();
488 if (!StoreAlign)
489 StoreAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType());
490 unsigned LoadAlign = LI->getAlignment();
491 if (!LoadAlign)
492 LoadAlign = DL.getABITypeAlignment(LI->getType());
493
494 return std::min(StoreAlign, LoadAlign);
495}
Chris Lattnerc6381472011-01-08 20:24:01 +0000496
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000497// This method try to lift a store instruction before position P.
498// It will lift the store and its argument + that anything that
499// lay alias with these.
500// The method returns true if it was successful.
501static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P) {
502 // If the store alias this position, early bail out.
503 MemoryLocation StoreLoc = MemoryLocation::get(SI);
504 if (AA.getModRefInfo(P, StoreLoc) != MRI_NoModRef)
505 return false;
506
507 // Keep track of the arguments of all instruction we plan to lift
508 // so we can make sure to lift them as well if apropriate.
509 DenseSet<Instruction*> Args;
510 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand()))
511 if (Ptr->getParent() == SI->getParent())
512 Args.insert(Ptr);
513
514 // Instruction to lift before P.
515 SmallVector<Instruction*, 8> ToLift;
516
517 // Memory locations of lifted instructions.
518 SmallVector<MemoryLocation, 8> MemLocs;
519 MemLocs.push_back(StoreLoc);
520
521 // Lifted callsites.
522 SmallVector<ImmutableCallSite, 8> CallSites;
523
524 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
525 auto *C = &*I;
526
527 bool MayAlias = AA.getModRefInfo(C) != MRI_NoModRef;
528
529 bool NeedLift = false;
530 if (Args.erase(C))
531 NeedLift = true;
532 else if (MayAlias) {
533 NeedLift = std::any_of(MemLocs.begin(), MemLocs.end(),
534 [C, &AA](const MemoryLocation &ML) {
535 return AA.getModRefInfo(C, ML);
536 });
537
538 if (!NeedLift)
539 NeedLift = std::any_of(CallSites.begin(), CallSites.end(),
540 [C, &AA](const ImmutableCallSite &CS) {
541 return AA.getModRefInfo(C, CS);
542 });
543 }
544
545 if (!NeedLift)
546 continue;
547
548 if (MayAlias) {
549 if (auto CS = ImmutableCallSite(C)) {
550 // If we can't lift this before P, it's game over.
551 if (AA.getModRefInfo(P, CS) != MRI_NoModRef)
552 return false;
553
554 CallSites.push_back(CS);
555 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
556 // If we can't lift this before P, it's game over.
557 auto ML = MemoryLocation::get(C);
558 if (AA.getModRefInfo(P, ML) != MRI_NoModRef)
559 return false;
560
561 MemLocs.push_back(ML);
562 } else
563 // We don't know how to lift this instruction.
564 return false;
565 }
566
567 ToLift.push_back(C);
568 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k)
569 if (auto *A = dyn_cast<Instruction>(C->getOperand(k)))
570 if (A->getParent() == SI->getParent())
571 Args.insert(A);
572 }
573
574 // We made it, we need to lift
575 for (auto *I : reverse(ToLift)) {
576 DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n");
577 I->moveBefore(P);
578 }
579
580 return true;
581}
582
Chris Lattnerb5557a72009-09-01 17:09:55 +0000583bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
Eli Friedman9a468152011-08-17 22:22:24 +0000584 if (!SI->isSimple()) return false;
Andrea Di Biagio99493df2015-10-09 10:53:41 +0000585
586 // Avoid merging nontemporal stores since the resulting
587 // memcpy/memset would not be able to preserve the nontemporal hint.
588 // In theory we could teach how to propagate the !nontemporal metadata to
589 // memset calls. However, that change would force the backend to
590 // conservatively expand !nontemporal memset calls back to sequences of
591 // store instructions (effectively undoing the merging).
592 if (SI->getMetadata(LLVMContext::MD_nontemporal))
593 return false;
594
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000595 const DataLayout &DL = SI->getModule()->getDataLayout();
Owen Anderson18e4fed2010-10-15 22:52:12 +0000596
Amaury Secheta0c242c2016-01-05 20:17:48 +0000597 // Load to store forwarding can be interpreted as memcpy.
Owen Anderson18e4fed2010-10-15 22:52:12 +0000598 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
Eli Friedman9a468152011-08-17 22:22:24 +0000599 if (LI->isSimple() && LI->hasOneUse() &&
Eli Friedmane8bbc102011-06-15 01:25:56 +0000600 LI->getParent() == SI->getParent()) {
Amaury Secheta0c242c2016-01-05 20:17:48 +0000601
602 auto *T = LI->getType();
603 if (T->isAggregateType()) {
604 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
605 MemoryLocation LoadLoc = MemoryLocation::get(LI);
606
607 // We use alias analysis to check if an instruction may store to
608 // the memory we load from in between the load and the store. If
Amaury Sechetd3b2c0f2016-01-06 09:30:39 +0000609 // such an instruction is found, we try to promote there instead
610 // of at the store position.
611 Instruction *P = SI;
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000612 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) {
613 if (AA.getModRefInfo(&I, LoadLoc) & MRI_Mod) {
614 P = &I;
615 break;
Amaury Secheta0c242c2016-01-05 20:17:48 +0000616 }
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000617 }
Amaury Sechetd3b2c0f2016-01-06 09:30:39 +0000618
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000619 // We found an instruction that may write to the loaded memory.
620 // We can try to promote at this position instead of the store
621 // position if nothing alias the store memory after this and the store
622 // destination is not in the range.
623 if (P && P != SI) {
624 if (!moveUp(AA, SI, P))
625 P = nullptr;
Amaury Secheta0c242c2016-01-05 20:17:48 +0000626 }
627
Amaury Sechetd3b2c0f2016-01-06 09:30:39 +0000628 // If a valid insertion position is found, then we can promote
629 // the load/store pair to a memcpy.
630 if (P) {
Amaury Secheta0c242c2016-01-05 20:17:48 +0000631 // If we load from memory that may alias the memory we store to,
632 // memmove must be used to preserve semantic. If not, memcpy can
633 // be used.
634 bool UseMemMove = false;
635 if (!AA.isNoAlias(MemoryLocation::get(SI), LoadLoc))
636 UseMemMove = true;
637
638 unsigned Align = findCommonAlignment(DL, SI, LI);
639 uint64_t Size = DL.getTypeStoreSize(T);
640
Amaury Sechetd3b2c0f2016-01-06 09:30:39 +0000641 IRBuilder<> Builder(P);
Amaury Secheta0c242c2016-01-05 20:17:48 +0000642 Instruction *M;
643 if (UseMemMove)
644 M = Builder.CreateMemMove(SI->getPointerOperand(),
645 LI->getPointerOperand(), Size,
646 Align, SI->isVolatile());
647 else
648 M = Builder.CreateMemCpy(SI->getPointerOperand(),
649 LI->getPointerOperand(), Size,
650 Align, SI->isVolatile());
651
652 DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI
653 << " => " << *M << "\n");
654
655 MD->removeInstruction(SI);
656 SI->eraseFromParent();
657 MD->removeInstruction(LI);
658 LI->eraseFromParent();
659 ++NumMemCpyInstr;
660
661 // Make sure we do not invalidate the iterator.
662 BBI = M->getIterator();
663 return true;
664 }
665 }
666
667 // Detect cases where we're performing call slot forwarding, but
668 // happen to be using a load-store pair to implement it, rather than
669 // a memcpy.
Eli Friedman5da0ff42011-06-02 21:24:42 +0000670 MemDepResult ldep = MD->getDependency(LI);
Craig Topperf40110f2014-04-25 05:29:35 +0000671 CallInst *C = nullptr;
Eli Friedman5da0ff42011-06-02 21:24:42 +0000672 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
673 C = dyn_cast<CallInst>(ldep.getInst());
674
675 if (C) {
676 // Check that nothing touches the dest of the "copy" between
677 // the call and the store.
Chandler Carruth7b560d42015-09-09 17:55:00 +0000678 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
Chandler Carruthac80dc72015-06-17 07:18:54 +0000679 MemoryLocation StoreLoc = MemoryLocation::get(SI);
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000680 for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator();
681 I != E; --I) {
Chandler Carruth194f59c2015-07-22 23:15:57 +0000682 if (AA.getModRefInfo(&*I, StoreLoc) != MRI_NoModRef) {
Craig Topperf40110f2014-04-25 05:29:35 +0000683 C = nullptr;
Eli Friedmane8bbc102011-06-15 01:25:56 +0000684 break;
685 }
Eli Friedman5da0ff42011-06-02 21:24:42 +0000686 }
687 }
688
Owen Anderson18e4fed2010-10-15 22:52:12 +0000689 if (C) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000690 bool changed = performCallSlotOptzn(
691 LI, SI->getPointerOperand()->stripPointerCasts(),
692 LI->getPointerOperand()->stripPointerCasts(),
693 DL.getTypeStoreSize(SI->getOperand(0)->getType()),
Amaury Secheta0c242c2016-01-05 20:17:48 +0000694 findCommonAlignment(DL, SI, LI), C);
Owen Anderson18e4fed2010-10-15 22:52:12 +0000695 if (changed) {
Chris Lattner58f9f582010-11-21 00:28:59 +0000696 MD->removeInstruction(SI);
Owen Anderson18e4fed2010-10-15 22:52:12 +0000697 SI->eraseFromParent();
Chris Lattnercaf5c0d2011-01-09 19:26:10 +0000698 MD->removeInstruction(LI);
Owen Anderson18e4fed2010-10-15 22:52:12 +0000699 LI->eraseFromParent();
700 ++NumMemCpyInstr;
701 return true;
702 }
703 }
704 }
705 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000706
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000707 // There are two cases that are interesting for this code to handle: memcpy
708 // and memset. Right now we only handle memset.
Nadav Rotem465834c2012-07-24 10:51:42 +0000709
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000710 // Ensure that the value being stored is something that can be memset'able a
711 // byte at a time like "0" or "-1" or any width, as well as things like
712 // 0xA0A0A0A0 and 0.0.
Amaury Sechet3235c082016-01-06 19:47:24 +0000713 auto *V = SI->getOperand(0);
714 if (Value *ByteVal = isBytewiseValue(V)) {
Chris Lattnerc6381472011-01-08 20:24:01 +0000715 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
716 ByteVal)) {
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000717 BBI = I->getIterator(); // Don't invalidate iterator.
Chris Lattnerc6381472011-01-08 20:24:01 +0000718 return true;
Mon P Wangc576ee92010-04-04 03:10:48 +0000719 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000720
Amaury Sechet3235c082016-01-06 19:47:24 +0000721 // If we have an aggregate, we try to promote it to memset regardless
722 // of opportunity for merging as it can expose optimization opportunities
723 // in subsequent passes.
724 auto *T = V->getType();
725 if (T->isAggregateType()) {
726 uint64_t Size = DL.getTypeStoreSize(T);
727 unsigned Align = SI->getAlignment();
728 if (!Align)
729 Align = DL.getABITypeAlignment(T);
730 IRBuilder<> Builder(SI);
731 auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal,
732 Size, Align, SI->isVolatile());
733
734 DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n");
735
736 MD->removeInstruction(SI);
737 SI->eraseFromParent();
738 NumMemSetInfer++;
739
740 // Make sure we do not invalidate the iterator.
741 BBI = M->getIterator();
742 return true;
743 }
744 }
745
Chris Lattnerc6381472011-01-08 20:24:01 +0000746 return false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000747}
748
Chris Lattner9a1d63b2011-01-08 21:19:19 +0000749bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
750 // See if there is another memset or store neighboring this memset which
751 // allows us to widen out the memset to do a single larger store.
Chris Lattnerff6ed2a2011-01-08 22:11:56 +0000752 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
753 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
754 MSI->getValue())) {
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000755 BBI = I->getIterator(); // Don't invalidate iterator.
Chris Lattnerff6ed2a2011-01-08 22:11:56 +0000756 return true;
757 }
Chris Lattner9a1d63b2011-01-08 21:19:19 +0000758 return false;
759}
760
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000761
Sanjay Patela75c41e2015-08-13 22:53:20 +0000762/// Takes a memcpy and a call that it depends on,
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000763/// and checks for the possibility of a call slot optimization by having
764/// the call write its result directly into the destination of the memcpy.
Owen Anderson18e4fed2010-10-15 22:52:12 +0000765bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
766 Value *cpyDest, Value *cpySrc,
Duncan Sandsc6ada692012-10-04 10:54:40 +0000767 uint64_t cpyLen, unsigned cpyAlign,
768 CallInst *C) {
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000769 // The general transformation to keep in mind is
770 //
771 // call @func(..., src, ...)
772 // memcpy(dest, src, ...)
773 //
774 // ->
775 //
776 // memcpy(dest, src, ...)
777 // call @func(..., dest, ...)
778 //
779 // Since moving the memcpy is technically awkward, we additionally check that
780 // src only holds uninitialized values at the moment of the call, meaning that
781 // the memcpy can be discarded rather than moved.
782
783 // Deliberately get the source and destination with bitcasts stripped away,
784 // because we'll need to do type comparisons based on the underlying type.
Gabor Greif62f0aac2010-07-28 22:50:26 +0000785 CallSite CS(C);
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000786
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000787 // Require that src be an alloca. This simplifies the reasoning considerably.
Chris Lattnerb5557a72009-09-01 17:09:55 +0000788 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000789 if (!srcAlloca)
790 return false;
791
Chris Lattnerb5557a72009-09-01 17:09:55 +0000792 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000793 if (!srcArraySize)
794 return false;
795
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000796 const DataLayout &DL = cpy->getModule()->getDataLayout();
797 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
798 srcArraySize->getZExtValue();
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000799
Owen Anderson18e4fed2010-10-15 22:52:12 +0000800 if (cpyLen < srcSize)
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000801 return false;
802
803 // Check that accessing the first srcSize bytes of dest will not cause a
804 // trap. Otherwise the transform is invalid since it might cause a trap
805 // to occur earlier than it otherwise would.
Chris Lattnerb5557a72009-09-01 17:09:55 +0000806 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000807 // The destination is an alloca. Check it is larger than srcSize.
Chris Lattnerb5557a72009-09-01 17:09:55 +0000808 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000809 if (!destArraySize)
810 return false;
811
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000812 uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) *
813 destArraySize->getZExtValue();
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000814
815 if (destSize < srcSize)
816 return false;
Chris Lattnerb5557a72009-09-01 17:09:55 +0000817 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
Bjorn Steinbrinkd20816f2014-10-16 19:43:08 +0000818 if (A->getDereferenceableBytes() < srcSize) {
819 // If the destination is an sret parameter then only accesses that are
820 // outside of the returned struct type can trap.
821 if (!A->hasStructRetAttr())
822 return false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000823
Bjorn Steinbrinkd20816f2014-10-16 19:43:08 +0000824 Type *StructTy = cast<PointerType>(A->getType())->getElementType();
825 if (!StructTy->isSized()) {
826 // The call may never return and hence the copy-instruction may never
827 // be executed, and therefore it's not safe to say "the destination
828 // has at least <cpyLen> bytes, as implied by the copy-instruction",
829 return false;
830 }
831
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000832 uint64_t destSize = DL.getTypeAllocSize(StructTy);
Bjorn Steinbrinkd20816f2014-10-16 19:43:08 +0000833 if (destSize < srcSize)
834 return false;
Shuxin Yang140d5922013-06-08 04:56:05 +0000835 }
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000836 } else {
837 return false;
838 }
839
Duncan Sands933db772012-10-05 07:29:46 +0000840 // Check that dest points to memory that is at least as aligned as src.
841 unsigned srcAlign = srcAlloca->getAlignment();
842 if (!srcAlign)
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000843 srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType());
Duncan Sands933db772012-10-05 07:29:46 +0000844 bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
845 // If dest is not aligned enough and we can't increase its alignment then
846 // bail out.
847 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
848 return false;
849
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000850 // Check that src is not accessed except via the call and the memcpy. This
851 // guarantees that it holds only undefined values when passed in (so the final
852 // memcpy can be dropped), that it is not read or written between the call and
853 // the memcpy, and that writing beyond the end of it is undefined.
Chandler Carruthcdf47882014-03-09 03:16:01 +0000854 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
855 srcAlloca->user_end());
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000856 while (!srcUseList.empty()) {
Chandler Carruthcdf47882014-03-09 03:16:01 +0000857 User *U = srcUseList.pop_back_val();
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000858
Chandler Carruthcdf47882014-03-09 03:16:01 +0000859 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
860 for (User *UU : U->users())
861 srcUseList.push_back(UU);
Chandler Carruth18cee1d2014-09-01 10:09:18 +0000862 continue;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000863 }
Chandler Carruth18cee1d2014-09-01 10:09:18 +0000864 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
865 if (!G->hasAllZeroIndices())
866 return false;
867
868 for (User *UU : U->users())
869 srcUseList.push_back(UU);
870 continue;
871 }
872 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
873 if (IT->getIntrinsicID() == Intrinsic::lifetime_start ||
874 IT->getIntrinsicID() == Intrinsic::lifetime_end)
875 continue;
876
877 if (U != C && U != cpy)
878 return false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000879 }
880
Nick Lewycky703e4882014-07-14 18:52:02 +0000881 // Check that src isn't captured by the called function since the
882 // transformation can cause aliasing issues in that case.
883 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
884 if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i))
885 return false;
886
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000887 // Since we're changing the parameter to the callsite, we need to make sure
888 // that what would be the new parameter dominates the callsite.
Chandler Carruth73523022014-01-13 13:07:17 +0000889 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
Chris Lattnerb5557a72009-09-01 17:09:55 +0000890 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000891 if (!DT.dominates(cpyDestInst, C))
892 return false;
893
894 // In addition to knowing that the call does not access src in some
895 // unexpected manner, for example via a global, which we deduce from
896 // the use analysis, we also need to know that it does not sneakily
897 // access dest. We rely on AA to figure this out for us.
Chandler Carruth7b560d42015-09-09 17:55:00 +0000898 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
Chandler Carruth194f59c2015-07-22 23:15:57 +0000899 ModRefInfo MR = AA.getModRefInfo(C, cpyDest, srcSize);
Chad Rosiera968caf2012-05-14 20:35:04 +0000900 // If necessary, perform additional analysis.
Chandler Carruth194f59c2015-07-22 23:15:57 +0000901 if (MR != MRI_NoModRef)
Chad Rosiera968caf2012-05-14 20:35:04 +0000902 MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT);
Chandler Carruth194f59c2015-07-22 23:15:57 +0000903 if (MR != MRI_NoModRef)
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000904 return false;
905
906 // All the checks have passed, so do the transformation.
Owen Andersond071a872008-06-01 21:52:16 +0000907 bool changedArgument = false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000908 for (unsigned i = 0; i < CS.arg_size(); ++i)
Owen Anderson38099c12008-06-01 22:26:26 +0000909 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
Duncan Sandsa6d20012012-10-04 13:53:21 +0000910 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest
911 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
912 cpyDest->getName(), C);
Owen Andersond071a872008-06-01 21:52:16 +0000913 changedArgument = true;
Duncan Sandsa6d20012012-10-04 13:53:21 +0000914 if (CS.getArgument(i)->getType() == Dest->getType())
915 CS.setArgument(i, Dest);
Chris Lattnerb5557a72009-09-01 17:09:55 +0000916 else
Duncan Sandsa6d20012012-10-04 13:53:21 +0000917 CS.setArgument(i, CastInst::CreatePointerCast(Dest,
918 CS.getArgument(i)->getType(), Dest->getName(), C));
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000919 }
920
Owen Andersond071a872008-06-01 21:52:16 +0000921 if (!changedArgument)
922 return false;
923
Duncan Sandsc6ada692012-10-04 10:54:40 +0000924 // If the destination wasn't sufficiently aligned then increase its alignment.
925 if (!isDestSufficientlyAligned) {
926 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
927 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
928 }
929
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000930 // Drop any cached information about the call, because we may have changed
931 // its dependence information by changing its parameter.
Chris Lattner58f9f582010-11-21 00:28:59 +0000932 MD->removeInstruction(C);
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000933
Bjorn Steinbrink71bf3b82015-02-07 17:54:36 +0000934 // Update AA metadata
935 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
936 // handled here, but combineMetadata doesn't support them yet
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000937 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
938 LLVMContext::MD_noalias,
939 LLVMContext::MD_invariant_group};
Bjorn Steinbrink71bf3b82015-02-07 17:54:36 +0000940 combineMetadata(C, cpy, KnownIDs);
941
Chris Lattner58f9f582010-11-21 00:28:59 +0000942 // Remove the memcpy.
943 MD->removeInstruction(cpy);
Dan Gohmand2d1ae12010-06-22 15:08:57 +0000944 ++NumMemCpyInstr;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000945
946 return true;
947}
948
Sanjay Patela75c41e2015-08-13 22:53:20 +0000949/// We've found that the (upward scanning) memory dependence of memcpy 'M' is
950/// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
Ahmed Bougacha15a31f62015-05-16 01:23:47 +0000951bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep) {
Chris Lattner7e9b2ea2010-11-18 07:02:37 +0000952 // We can only transforms memcpy's where the dest of one is the source of the
953 // other.
Chris Lattner58f9f582010-11-21 00:28:59 +0000954 if (M->getSource() != MDep->getDest() || MDep->isVolatile())
Chris Lattner7e9b2ea2010-11-18 07:02:37 +0000955 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000956
Chris Lattnerfd51c522010-12-09 07:39:50 +0000957 // If dep instruction is reading from our current input, then it is a noop
958 // transfer and substituting the input won't change this instruction. Just
959 // ignore the input and let someone else zap MDep. This handles cases like:
960 // memcpy(a <- a)
961 // memcpy(b <- a)
962 if (M->getSource() == MDep->getSource())
963 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000964
Chris Lattner0ab5e2c2011-04-15 05:18:47 +0000965 // Second, the length of the memcpy's must be the same, or the preceding one
Chris Lattner7e9b2ea2010-11-18 07:02:37 +0000966 // must be larger than the following one.
Dan Gohman19e30d52011-01-21 22:07:57 +0000967 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
968 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
969 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
970 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000971
Chandler Carruth7b560d42015-09-09 17:55:00 +0000972 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
Chris Lattner59572292010-11-21 08:06:10 +0000973
974 // Verify that the copied-from memory doesn't change in between the two
975 // transfers. For example, in:
976 // memcpy(a <- b)
977 // *b = 42;
978 // memcpy(c <- a)
979 // It would be invalid to transform the second memcpy into memcpy(c <- b).
980 //
981 // TODO: If the code between M and MDep is transparent to the destination "c",
982 // then we could still perform the xform by moving M up to the first memcpy.
983 //
984 // NOTE: This is conservative, it will stop on any read from the source loc,
985 // not just the defining memcpy.
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000986 MemDepResult SourceDep =
987 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false,
988 M->getIterator(), M->getParent());
Chris Lattner59572292010-11-21 08:06:10 +0000989 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
990 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000991
Chris Lattner731caac2010-11-18 08:00:57 +0000992 // If the dest of the second might alias the source of the first, then the
993 // source and dest might overlap. We still want to eliminate the intermediate
994 // value, but we have to generate a memmove instead of memcpy.
Chris Lattner6cf8d6c2010-12-26 22:57:41 +0000995 bool UseMemMove = false;
Chandler Carruth70c61c12015-06-04 02:03:15 +0000996 if (!AA.isNoAlias(MemoryLocation::getForDest(M),
997 MemoryLocation::getForSource(MDep)))
Chris Lattner6cf8d6c2010-12-26 22:57:41 +0000998 UseMemMove = true;
Nadav Rotem465834c2012-07-24 10:51:42 +0000999
Chris Lattner58f9f582010-11-21 00:28:59 +00001000 // If all checks passed, then we can transform M.
Nadav Rotem465834c2012-07-24 10:51:42 +00001001
Pete Cooper67cf9a72015-11-19 05:56:52 +00001002 // Make sure to use the lesser of the alignment of the source and the dest
1003 // since we're changing where we're reading from, but don't want to increase
1004 // the alignment past what can be read from or written to.
Chris Lattner7e9b2ea2010-11-18 07:02:37 +00001005 // TODO: Is this worth it if we're creating a less aligned memcpy? For
1006 // example we could be moving from movaps -> movq on x86.
Pete Cooper67cf9a72015-11-19 05:56:52 +00001007 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment());
1008
Chris Lattner6cf8d6c2010-12-26 22:57:41 +00001009 IRBuilder<> Builder(M);
1010 if (UseMemMove)
1011 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(),
Pete Cooper67cf9a72015-11-19 05:56:52 +00001012 Align, M->isVolatile());
Chris Lattner6cf8d6c2010-12-26 22:57:41 +00001013 else
1014 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(),
Pete Cooper67cf9a72015-11-19 05:56:52 +00001015 Align, M->isVolatile());
Chris Lattner1385dff2010-11-18 08:07:09 +00001016
Chris Lattner59572292010-11-21 08:06:10 +00001017 // Remove the instruction we're replacing.
Chris Lattner58f9f582010-11-21 00:28:59 +00001018 MD->removeInstruction(M);
Chris Lattner1385dff2010-11-18 08:07:09 +00001019 M->eraseFromParent();
1020 ++NumMemCpyInstr;
1021 return true;
Chris Lattner7e9b2ea2010-11-18 07:02:37 +00001022}
1023
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001024/// We've found that the (upward scanning) memory dependence of \p MemCpy is
1025/// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that
1026/// weren't copied over by \p MemCpy.
1027///
1028/// In other words, transform:
1029/// \code
1030/// memset(dst, c, dst_size);
1031/// memcpy(dst, src, src_size);
1032/// \endcode
1033/// into:
1034/// \code
1035/// memcpy(dst, src, src_size);
1036/// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
1037/// \endcode
1038bool MemCpyOpt::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
1039 MemSetInst *MemSet) {
1040 // We can only transform memset/memcpy with the same destination.
1041 if (MemSet->getDest() != MemCpy->getDest())
1042 return false;
1043
Ahmed Bougacha97876fa2015-05-21 01:43:39 +00001044 // Check that there are no other dependencies on the memset destination.
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +00001045 MemDepResult DstDepInfo =
1046 MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false,
1047 MemCpy->getIterator(), MemCpy->getParent());
Ahmed Bougacha97876fa2015-05-21 01:43:39 +00001048 if (DstDepInfo.getInst() != MemSet)
1049 return false;
1050
Ahmed Bougacha9692e302015-04-21 21:28:33 +00001051 // Use the same i8* dest as the memcpy, killing the memset dest if different.
1052 Value *Dest = MemCpy->getRawDest();
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001053 Value *DestSize = MemSet->getLength();
1054 Value *SrcSize = MemCpy->getLength();
1055
1056 // By default, create an unaligned memset.
1057 unsigned Align = 1;
1058 // If Dest is aligned, and SrcSize is constant, use the minimum alignment
1059 // of the sum.
1060 const unsigned DestAlign =
Pete Cooper67cf9a72015-11-19 05:56:52 +00001061 std::max(MemSet->getAlignment(), MemCpy->getAlignment());
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001062 if (DestAlign > 1)
1063 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
1064 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
1065
Ahmed Bougacha97876fa2015-05-21 01:43:39 +00001066 IRBuilder<> Builder(MemCpy);
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001067
Ahmed Bougacha05b72c12015-04-18 23:06:04 +00001068 // If the sizes have different types, zext the smaller one.
Ahmed Bougacha7216ccc2015-04-18 17:57:41 +00001069 if (DestSize->getType() != SrcSize->getType()) {
Ahmed Bougacha05b72c12015-04-18 23:06:04 +00001070 if (DestSize->getType()->getIntegerBitWidth() >
1071 SrcSize->getType()->getIntegerBitWidth())
1072 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType());
1073 else
1074 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType());
Ahmed Bougacha7216ccc2015-04-18 17:57:41 +00001075 }
1076
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001077 Value *MemsetLen =
1078 Builder.CreateSelect(Builder.CreateICmpULE(DestSize, SrcSize),
1079 ConstantInt::getNullValue(DestSize->getType()),
1080 Builder.CreateSub(DestSize, SrcSize));
1081 Builder.CreateMemSet(Builder.CreateGEP(Dest, SrcSize), MemSet->getOperand(1),
1082 MemsetLen, Align);
1083
1084 MD->removeInstruction(MemSet);
1085 MemSet->eraseFromParent();
1086 return true;
1087}
Chris Lattner7e9b2ea2010-11-18 07:02:37 +00001088
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001089/// Transform memcpy to memset when its source was just memset.
1090/// In other words, turn:
1091/// \code
1092/// memset(dst1, c, dst1_size);
1093/// memcpy(dst2, dst1, dst2_size);
1094/// \endcode
1095/// into:
1096/// \code
1097/// memset(dst1, c, dst1_size);
1098/// memset(dst2, c, dst2_size);
1099/// \endcode
1100/// When dst2_size <= dst1_size.
1101///
1102/// The \p MemCpy must have a Constant length.
1103bool MemCpyOpt::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
1104 MemSetInst *MemSet) {
1105 // This only makes sense on memcpy(..., memset(...), ...).
1106 if (MemSet->getRawDest() != MemCpy->getRawSource())
1107 return false;
1108
1109 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength());
1110 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength());
1111 // Make sure the memcpy doesn't read any more than what the memset wrote.
1112 // Don't worry about sizes larger than i64.
1113 if (!MemSetSize || CopySize->getZExtValue() > MemSetSize->getZExtValue())
1114 return false;
1115
Ahmed Bougacha0541c672015-05-21 00:08:35 +00001116 IRBuilder<> Builder(MemCpy);
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001117 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1),
Pete Cooper67cf9a72015-11-19 05:56:52 +00001118 CopySize, MemCpy->getAlignment());
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001119 return true;
1120}
1121
Sanjay Patela75c41e2015-08-13 22:53:20 +00001122/// Perform simplification of memcpy's. If we have memcpy A
Gabor Greif62f0aac2010-07-28 22:50:26 +00001123/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
1124/// B to be a memcpy from X to Z (or potentially a memmove, depending on
1125/// circumstances). This allows later passes to remove the first memcpy
1126/// altogether.
Chris Lattnerb5557a72009-09-01 17:09:55 +00001127bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
Nick Lewycky00703e72014-02-04 00:18:54 +00001128 // We can only optimize non-volatile memcpy's.
1129 if (M->isVolatile()) return false;
Owen Anderson18e4fed2010-10-15 22:52:12 +00001130
Chris Lattnerbc4457e2010-12-09 07:45:45 +00001131 // If the source and destination of the memcpy are the same, then zap it.
1132 if (M->getSource() == M->getDest()) {
1133 MD->removeInstruction(M);
1134 M->eraseFromParent();
1135 return false;
1136 }
Benjamin Kramerea9152e2010-12-24 21:17:12 +00001137
1138 // If copying from a constant, try to turn the memcpy into a memset.
Benjamin Kramerb90b2f02010-12-24 22:23:59 +00001139 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
Benjamin Kramer30342fb2010-12-26 15:23:45 +00001140 if (GV->isConstant() && GV->hasDefinitiveInitializer())
Benjamin Kramerb90b2f02010-12-24 22:23:59 +00001141 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) {
Chris Lattner6cf8d6c2010-12-26 22:57:41 +00001142 IRBuilder<> Builder(M);
Nick Lewycky00703e72014-02-04 00:18:54 +00001143 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
Pete Cooper67cf9a72015-11-19 05:56:52 +00001144 M->getAlignment(), false);
Benjamin Kramerb90b2f02010-12-24 22:23:59 +00001145 MD->removeInstruction(M);
1146 M->eraseFromParent();
1147 ++NumCpyToSet;
1148 return true;
1149 }
Benjamin Kramerea9152e2010-12-24 21:17:12 +00001150
Ahmed Bougachab6169662015-05-11 23:09:46 +00001151 MemDepResult DepInfo = MD->getDependency(M);
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001152
1153 // Try to turn a partially redundant memset + memcpy into
1154 // memcpy + smaller memset. We don't need the memcpy size for this.
Ahmed Bougachab6169662015-05-11 23:09:46 +00001155 if (DepInfo.isClobber())
1156 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst()))
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001157 if (processMemSetMemCpyDependence(M, MDep))
1158 return true;
1159
Nick Lewycky00703e72014-02-04 00:18:54 +00001160 // The optimizations after this point require the memcpy size.
1161 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
Craig Topperf40110f2014-04-25 05:29:35 +00001162 if (!CopySize) return false;
Nick Lewycky00703e72014-02-04 00:18:54 +00001163
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001164 // There are four possible optimizations we can do for memcpy:
Chris Lattnerb5557a72009-09-01 17:09:55 +00001165 // a) memcpy-memcpy xform which exposes redundance for DSE.
1166 // b) call-memcpy xform for return slot optimization.
Nick Lewycky77d5fb42014-03-26 23:45:15 +00001167 // c) memcpy from freshly alloca'd space or space that has just started its
1168 // lifetime copies undefined data, and we can therefore eliminate the
1169 // memcpy in favor of the data that was already at the destination.
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001170 // d) memcpy from a just-memset'd source can be turned into memset.
Nick Lewycky0a7e9cc2011-10-16 20:13:32 +00001171 if (DepInfo.isClobber()) {
1172 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
1173 if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
Pete Cooper67cf9a72015-11-19 05:56:52 +00001174 CopySize->getZExtValue(), M->getAlignment(),
Duncan Sandsc6ada692012-10-04 10:54:40 +00001175 C)) {
Nick Lewycky0a7e9cc2011-10-16 20:13:32 +00001176 MD->removeInstruction(M);
1177 M->eraseFromParent();
1178 return true;
1179 }
Chris Lattnerbc4457e2010-12-09 07:45:45 +00001180 }
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001181 }
Ahmed Charles32e983e2012-02-13 06:30:56 +00001182
Chandler Carruthac80dc72015-06-17 07:18:54 +00001183 MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +00001184 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(
1185 SrcLoc, true, M->getIterator(), M->getParent());
Ahmed Bougachab6169662015-05-11 23:09:46 +00001186
Nick Lewycky0a7e9cc2011-10-16 20:13:32 +00001187 if (SrcDepInfo.isClobber()) {
1188 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
Ahmed Bougacha15a31f62015-05-16 01:23:47 +00001189 return processMemCpyMemCpyDependence(M, MDep);
Nick Lewycky99384942014-02-06 06:29:19 +00001190 } else if (SrcDepInfo.isDef()) {
Nick Lewycky77d5fb42014-03-26 23:45:15 +00001191 Instruction *I = SrcDepInfo.getInst();
1192 bool hasUndefContents = false;
1193
1194 if (isa<AllocaInst>(I)) {
1195 hasUndefContents = true;
1196 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1197 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1198 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
1199 if (LTSize->getZExtValue() >= CopySize->getZExtValue())
1200 hasUndefContents = true;
1201 }
1202
1203 if (hasUndefContents) {
Nick Lewycky99384942014-02-06 06:29:19 +00001204 MD->removeInstruction(M);
1205 M->eraseFromParent();
1206 ++NumMemCpyInstr;
1207 return true;
1208 }
Nick Lewycky0a7e9cc2011-10-16 20:13:32 +00001209 }
1210
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001211 if (SrcDepInfo.isClobber())
1212 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst()))
1213 if (performMemCpyToMemSetOptzn(M, MDep)) {
1214 MD->removeInstruction(M);
1215 M->eraseFromParent();
1216 ++NumCpyToSet;
1217 return true;
1218 }
1219
Owen Andersonad5367f2008-04-29 21:51:00 +00001220 return false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001221}
1222
Sanjay Patela75c41e2015-08-13 22:53:20 +00001223/// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
1224/// not to alias.
Chris Lattner1145e332009-09-01 17:56:32 +00001225bool MemCpyOpt::processMemMove(MemMoveInst *M) {
Chandler Carruth7b560d42015-09-09 17:55:00 +00001226 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
Chris Lattner1145e332009-09-01 17:56:32 +00001227
Chris Lattner23f61a02011-05-01 18:27:11 +00001228 if (!TLI->has(LibFunc::memmove))
1229 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001230
Chris Lattner1145e332009-09-01 17:56:32 +00001231 // See if the pointers alias.
Chandler Carruth70c61c12015-06-04 02:03:15 +00001232 if (!AA.isNoAlias(MemoryLocation::getForDest(M),
1233 MemoryLocation::getForSource(M)))
Chris Lattner1145e332009-09-01 17:56:32 +00001234 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001235
David Greene24199232010-01-05 01:27:47 +00001236 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n");
Nadav Rotem465834c2012-07-24 10:51:42 +00001237
Chris Lattner1145e332009-09-01 17:56:32 +00001238 // If not, then we know we can transform this.
Jay Foadb804a2b2011-07-12 14:06:48 +00001239 Type *ArgTys[3] = { M->getRawDest()->getType(),
1240 M->getRawSource()->getType(),
1241 M->getLength()->getType() };
Sanjay Patelaf674fb2015-12-14 17:24:23 +00001242 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(),
1243 Intrinsic::memcpy, ArgTys));
Duncan Sands0edc7102009-09-03 13:37:16 +00001244
Chris Lattner1145e332009-09-01 17:56:32 +00001245 // MemDep may have over conservative information about this instruction, just
1246 // conservatively flush it from the cache.
Chris Lattner58f9f582010-11-21 00:28:59 +00001247 MD->removeInstruction(M);
Duncan Sands0edc7102009-09-03 13:37:16 +00001248
1249 ++NumMoveToCpy;
Chris Lattner1145e332009-09-01 17:56:32 +00001250 return true;
1251}
Nadav Rotem465834c2012-07-24 10:51:42 +00001252
Sanjay Patela75c41e2015-08-13 22:53:20 +00001253/// This is called on every byval argument in call sites.
Chris Lattner58f9f582010-11-21 00:28:59 +00001254bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001255 const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout();
Chris Lattner59572292010-11-21 08:06:10 +00001256 // Find out what feeds this byval argument.
Chris Lattner58f9f582010-11-21 00:28:59 +00001257 Value *ByValArg = CS.getArgument(ArgNo);
Nick Lewyckyc585de62011-10-12 00:14:31 +00001258 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001259 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
Chandler Carruthac80dc72015-06-17 07:18:54 +00001260 MemDepResult DepInfo = MD->getPointerDependencyFrom(
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +00001261 MemoryLocation(ByValArg, ByValSize), true,
1262 CS.getInstruction()->getIterator(), CS.getInstruction()->getParent());
Chris Lattner58f9f582010-11-21 00:28:59 +00001263 if (!DepInfo.isClobber())
1264 return false;
1265
1266 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
1267 // a memcpy, see if we can byval from the source of the memcpy instead of the
1268 // result.
1269 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
Craig Topperf40110f2014-04-25 05:29:35 +00001270 if (!MDep || MDep->isVolatile() ||
Chris Lattner58f9f582010-11-21 00:28:59 +00001271 ByValArg->stripPointerCasts() != MDep->getDest())
1272 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001273
Chris Lattner58f9f582010-11-21 00:28:59 +00001274 // The length of the memcpy must be larger or equal to the size of the byval.
Chris Lattner58f9f582010-11-21 00:28:59 +00001275 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
Craig Topperf40110f2014-04-25 05:29:35 +00001276 if (!C1 || C1->getValue().getZExtValue() < ByValSize)
Chris Lattner58f9f582010-11-21 00:28:59 +00001277 return false;
1278
Chris Lattner83791ce2011-05-23 00:03:39 +00001279 // Get the alignment of the byval. If the call doesn't specify the alignment,
1280 // then it is some target specific value that we can't know.
Chris Lattner58f9f582010-11-21 00:28:59 +00001281 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1);
Chris Lattner83791ce2011-05-23 00:03:39 +00001282 if (ByValAlign == 0) return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001283
Chris Lattner83791ce2011-05-23 00:03:39 +00001284 // If it is greater than the memcpy, then we check to see if we can force the
1285 // source of the memcpy to the alignment we need. If we fail, we bail out.
Chandler Carruth66b31302015-01-04 12:03:27 +00001286 AssumptionCache &AC =
1287 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
1288 *CS->getParent()->getParent());
Hal Finkel60db0582014-09-07 18:57:58 +00001289 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
Pete Cooper67cf9a72015-11-19 05:56:52 +00001290 if (MDep->getAlignment() < ByValAlign &&
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001291 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL,
1292 CS.getInstruction(), &AC, &DT) < ByValAlign)
Chris Lattner83791ce2011-05-23 00:03:39 +00001293 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001294
Chris Lattner58f9f582010-11-21 00:28:59 +00001295 // Verify that the copied-from memory doesn't change in between the memcpy and
1296 // the byval call.
1297 // memcpy(a <- b)
1298 // *b = 42;
1299 // foo(*a)
1300 // It would be invalid to transform the second memcpy into foo(*b).
Chris Lattner59572292010-11-21 08:06:10 +00001301 //
1302 // NOTE: This is conservative, it will stop on any read from the source loc,
1303 // not just the defining memcpy.
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +00001304 MemDepResult SourceDep = MD->getPointerDependencyFrom(
1305 MemoryLocation::getForSource(MDep), false,
1306 CS.getInstruction()->getIterator(), MDep->getParent());
Chris Lattner59572292010-11-21 08:06:10 +00001307 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1308 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001309
Chris Lattner58f9f582010-11-21 00:28:59 +00001310 Value *TmpCast = MDep->getSource();
1311 if (MDep->getSource()->getType() != ByValArg->getType())
1312 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
1313 "tmpcast", CS.getInstruction());
Nadav Rotem465834c2012-07-24 10:51:42 +00001314
Chris Lattner58f9f582010-11-21 00:28:59 +00001315 DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n"
1316 << " " << *MDep << "\n"
1317 << " " << *CS.getInstruction() << "\n");
Nadav Rotem465834c2012-07-24 10:51:42 +00001318
Chris Lattner58f9f582010-11-21 00:28:59 +00001319 // Otherwise we're good! Update the byval argument.
1320 CS.setArgument(ArgNo, TmpCast);
1321 ++NumMemCpyInstr;
1322 return true;
1323}
1324
Sanjay Patela75c41e2015-08-13 22:53:20 +00001325/// Executes one iteration of MemCpyOpt.
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001326bool MemCpyOpt::iterateOnFunction(Function &F) {
Chris Lattnerb5557a72009-09-01 17:09:55 +00001327 bool MadeChange = false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001328
Chris Lattnerb5557a72009-09-01 17:09:55 +00001329 // Walk all instruction in the function.
Owen Anderson6a7355c2008-04-21 07:45:10 +00001330 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
Chris Lattner58f9f582010-11-21 00:28:59 +00001331 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
Chris Lattnerb5557a72009-09-01 17:09:55 +00001332 // Avoid invalidating the iterator.
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +00001333 Instruction *I = &*BI++;
Nadav Rotem465834c2012-07-24 10:51:42 +00001334
Chris Lattner58f9f582010-11-21 00:28:59 +00001335 bool RepeatInstruction = false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001336
Owen Anderson6a7355c2008-04-21 07:45:10 +00001337 if (StoreInst *SI = dyn_cast<StoreInst>(I))
Chris Lattnerb5557a72009-09-01 17:09:55 +00001338 MadeChange |= processStore(SI, BI);
Chris Lattner9a1d63b2011-01-08 21:19:19 +00001339 else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
1340 RepeatInstruction = processMemSet(M, BI);
1341 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
Chris Lattner58f9f582010-11-21 00:28:59 +00001342 RepeatInstruction = processMemCpy(M);
Chris Lattner9a1d63b2011-01-08 21:19:19 +00001343 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
Chris Lattner58f9f582010-11-21 00:28:59 +00001344 RepeatInstruction = processMemMove(M);
Benjamin Kramer3a09ef62015-04-10 14:50:08 +00001345 else if (auto CS = CallSite(I)) {
Chris Lattner58f9f582010-11-21 00:28:59 +00001346 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
Nick Lewycky612d70b2011-11-20 19:09:04 +00001347 if (CS.isByValArgument(i))
Chris Lattner58f9f582010-11-21 00:28:59 +00001348 MadeChange |= processByValArgument(CS, i);
1349 }
1350
1351 // Reprocess the instruction if desired.
1352 if (RepeatInstruction) {
Chris Lattner7d6433a2011-01-08 22:19:21 +00001353 if (BI != BB->begin()) --BI;
Chris Lattner58f9f582010-11-21 00:28:59 +00001354 MadeChange = true;
Chris Lattner1145e332009-09-01 17:56:32 +00001355 }
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001356 }
1357 }
Nadav Rotem465834c2012-07-24 10:51:42 +00001358
Chris Lattnerb5557a72009-09-01 17:09:55 +00001359 return MadeChange;
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001360}
Chris Lattnerb5557a72009-09-01 17:09:55 +00001361
Sanjay Patela75c41e2015-08-13 22:53:20 +00001362/// This is the main transformation entry point for a function.
Chris Lattnerb5557a72009-09-01 17:09:55 +00001363bool MemCpyOpt::runOnFunction(Function &F) {
Andrew Kayloraa641a52016-04-22 22:06:11 +00001364 if (skipFunction(F))
Paul Robinsonaf4e64d2014-02-06 00:07:05 +00001365 return false;
1366
Chris Lattnerb5557a72009-09-01 17:09:55 +00001367 bool MadeChange = false;
Chandler Carruth61440d22016-03-10 00:55:30 +00001368 MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
Chandler Carruthb98f63d2015-01-15 10:41:28 +00001369 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
Nadav Rotem465834c2012-07-24 10:51:42 +00001370
Chris Lattner23f61a02011-05-01 18:27:11 +00001371 // If we don't have at least memset and memcpy, there is little point of doing
1372 // anything here. These are required by a freestanding implementation, so if
1373 // even they are disabled, there is no point in trying hard.
1374 if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy))
1375 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001376
Chris Lattnerb5557a72009-09-01 17:09:55 +00001377 while (1) {
1378 if (!iterateOnFunction(F))
1379 break;
1380 MadeChange = true;
1381 }
Nadav Rotem465834c2012-07-24 10:51:42 +00001382
Craig Topperf40110f2014-04-25 05:29:35 +00001383 MD = nullptr;
Chris Lattnerb5557a72009-09-01 17:09:55 +00001384 return MadeChange;
1385}