blob: c74d9111dc53e031735a30ea98a2451a10bacfdd [file] [log] [blame]
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass performs various transformations related to eliminating memcpy
11// calls, or transforming sets of stores into memset's.
12//
13//===----------------------------------------------------------------------===//
14
Sean Silva6347df02016-06-14 02:44:55 +000015#include "llvm/Transforms/Scalar/MemCpyOptimizer.h"
Owen Andersonef9a6fd2008-04-09 08:23:16 +000016#include "llvm/Transforms/Scalar.h"
Amaury Sechetbdb261b2016-03-14 22:52:27 +000017#include "llvm/ADT/DenseSet.h"
Owen Andersonef9a6fd2008-04-09 08:23:16 +000018#include "llvm/ADT/SmallVector.h"
19#include "llvm/ADT/Statistic.h"
Chris Lattner9cb10352010-12-26 20:15:01 +000020#include "llvm/Analysis/ValueTracking.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000021#include "llvm/IR/DataLayout.h"
Chandler Carruth03eb0de2014-03-04 10:40:04 +000022#include "llvm/IR/GetElementPtrTypeIterator.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000023#include "llvm/IR/GlobalVariable.h"
24#include "llvm/IR/IRBuilder.h"
Owen Andersonef9a6fd2008-04-09 08:23:16 +000025#include "llvm/Support/Debug.h"
Chris Lattnerb25de3f2009-08-23 04:37:46 +000026#include "llvm/Support/raw_ostream.h"
Chandler Carruthaafe0912012-06-29 12:38:19 +000027#include "llvm/Transforms/Utils/Local.h"
Nick Lewyckyf836c892015-07-21 21:56:26 +000028#include <algorithm>
Owen Andersonef9a6fd2008-04-09 08:23:16 +000029using namespace llvm;
30
Chandler Carruth964daaa2014-04-22 02:55:47 +000031#define DEBUG_TYPE "memcpyopt"
32
Owen Andersonef9a6fd2008-04-09 08:23:16 +000033STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
34STATISTIC(NumMemSetInfer, "Number of memsets inferred");
Duncan Sands0edc7102009-09-03 13:37:16 +000035STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
Benjamin Kramerea9152e2010-12-24 21:17:12 +000036STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
Owen Andersonef9a6fd2008-04-09 08:23:16 +000037
Benjamin Kramer15a257d2012-09-13 16:29:49 +000038static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
Mehdi Aminia28d91d2015-03-10 02:37:25 +000039 bool &VariableIdxFound,
40 const DataLayout &DL) {
Owen Andersonef9a6fd2008-04-09 08:23:16 +000041 // Skip over the first indices.
42 gep_type_iterator GTI = gep_type_begin(GEP);
43 for (unsigned i = 1; i != Idx; ++i, ++GTI)
44 /*skip along*/;
Nadav Rotem465834c2012-07-24 10:51:42 +000045
Owen Andersonef9a6fd2008-04-09 08:23:16 +000046 // Compute the offset implied by the rest of the indices.
47 int64_t Offset = 0;
48 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
49 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
Craig Topperf40110f2014-04-25 05:29:35 +000050 if (!OpC)
Owen Andersonef9a6fd2008-04-09 08:23:16 +000051 return VariableIdxFound = true;
52 if (OpC->isZero()) continue; // No offset.
53
54 // Handle struct indices, which add their field offset to the pointer.
Chris Lattner229907c2011-07-18 04:54:35 +000055 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +000056 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
Owen Andersonef9a6fd2008-04-09 08:23:16 +000057 continue;
58 }
Nadav Rotem465834c2012-07-24 10:51:42 +000059
Owen Andersonef9a6fd2008-04-09 08:23:16 +000060 // Otherwise, we have a sequential type like an array or vector. Multiply
61 // the index by the ElementSize.
Mehdi Aminia28d91d2015-03-10 02:37:25 +000062 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
Owen Andersonef9a6fd2008-04-09 08:23:16 +000063 Offset += Size*OpC->getSExtValue();
64 }
65
66 return Offset;
67}
68
Sanjay Patela75c41e2015-08-13 22:53:20 +000069/// Return true if Ptr1 is provably equal to Ptr2 plus a constant offset, and
70/// return that constant offset. For example, Ptr1 might be &A[42], and Ptr2
71/// might be &A[40]. In this case offset would be -8.
Owen Andersonef9a6fd2008-04-09 08:23:16 +000072static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
Mehdi Aminia28d91d2015-03-10 02:37:25 +000073 const DataLayout &DL) {
Chris Lattnerfa7c29d2011-01-12 01:43:46 +000074 Ptr1 = Ptr1->stripPointerCasts();
75 Ptr2 = Ptr2->stripPointerCasts();
Benjamin Kramer3ef5e462014-03-10 21:05:13 +000076
77 // Handle the trivial case first.
78 if (Ptr1 == Ptr2) {
79 Offset = 0;
80 return true;
81 }
82
Benjamin Kramer15a257d2012-09-13 16:29:49 +000083 GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
84 GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
Nadav Rotem465834c2012-07-24 10:51:42 +000085
Chris Lattner5120ebf2011-01-08 21:07:56 +000086 bool VariableIdxFound = false;
87
88 // If one pointer is a GEP and the other isn't, then see if the GEP is a
89 // constant offset from the base, as in "P" and "gep P, 1".
Craig Topperf40110f2014-04-25 05:29:35 +000090 if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +000091 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL);
Chris Lattner5120ebf2011-01-08 21:07:56 +000092 return !VariableIdxFound;
93 }
94
Craig Topperf40110f2014-04-25 05:29:35 +000095 if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +000096 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL);
Chris Lattner5120ebf2011-01-08 21:07:56 +000097 return !VariableIdxFound;
98 }
Nadav Rotem465834c2012-07-24 10:51:42 +000099
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000100 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
101 // base. After that base, they may have some number of common (and
102 // potentially variable) indices. After that they handle some constant
103 // offset, which determines their offset from each other. At this point, we
104 // handle no other case.
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000105 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
106 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000107
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000108 // Skip any common indices and track the GEP types.
109 unsigned Idx = 1;
110 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
111 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
112 break;
113
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000114 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL);
115 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL);
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000116 if (VariableIdxFound) return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000117
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000118 Offset = Offset2-Offset1;
119 return true;
120}
121
122
Sanjay Patela75c41e2015-08-13 22:53:20 +0000123/// Represents a range of memset'd bytes with the ByteVal value.
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000124/// This allows us to analyze stores like:
125/// store 0 -> P+1
126/// store 0 -> P+0
127/// store 0 -> P+3
128/// store 0 -> P+2
129/// which sometimes happens with stores to arrays of structs etc. When we see
130/// the first store, we make a range [1, 2). The second store extends the range
131/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
132/// two ranges into [0, 3) which is memset'able.
133namespace {
Tim Northover39617352016-05-10 21:49:40 +0000134struct MemsetRange {
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000135 // Start/End - A semi range that describes the span that this range covers.
Nadav Rotem465834c2012-07-24 10:51:42 +0000136 // The range is closed at the start and open at the end: [Start, End).
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000137 int64_t Start, End;
138
139 /// StartPtr - The getelementptr instruction that points to the start of the
140 /// range.
Tim Northover39617352016-05-10 21:49:40 +0000141 Value *StartPtr;
Nadav Rotem465834c2012-07-24 10:51:42 +0000142
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000143 /// Alignment - The known alignment of the first store.
144 unsigned Alignment;
Nadav Rotem465834c2012-07-24 10:51:42 +0000145
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000146 /// TheStores - The actual stores that make up this range.
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000147 SmallVector<Instruction*, 16> TheStores;
Nadav Rotem465834c2012-07-24 10:51:42 +0000148
Tim Northover39617352016-05-10 21:49:40 +0000149 bool isProfitableToUseMemset(const DataLayout &DL) const;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000150};
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000151} // end anon namespace
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000152
Tim Northover39617352016-05-10 21:49:40 +0000153bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
154 // If we found more than 4 stores to merge or 16 bytes, use memset.
Chad Rosier19446a02011-12-05 22:37:00 +0000155 if (TheStores.size() >= 4 || End-Start >= 16) return true;
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000156
157 // If there is nothing to merge, don't do anything.
158 if (TheStores.size() < 2) return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000159
Tim Northover39617352016-05-10 21:49:40 +0000160 // If any of the stores are a memset, then it is always good to extend the
161 // memset.
Craig Toppere325e382015-11-20 07:18:48 +0000162 for (Instruction *SI : TheStores)
Tim Northover39617352016-05-10 21:49:40 +0000163 if (!isa<StoreInst>(SI))
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000164 return true;
Nadav Rotem465834c2012-07-24 10:51:42 +0000165
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000166 // Assume that the code generator is capable of merging pairs of stores
167 // together if it wants to.
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000168 if (TheStores.size() == 2) return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000169
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000170 // If we have fewer than 8 stores, it can still be worthwhile to do this.
171 // For example, merging 4 i8 stores into an i32 store is useful almost always.
172 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
173 // memset will be split into 2 32-bit stores anyway) and doing so can
174 // pessimize the llvm optimizer.
175 //
176 // Since we don't have perfect knowledge here, make some assumptions: assume
Matt Arsenault899f7d22013-09-16 22:43:16 +0000177 // the maximum GPR width is the same size as the largest legal integer
178 // size. If so, check to see whether we will end up actually reducing the
179 // number of stores used.
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000180 unsigned Bytes = unsigned(End-Start);
Jun Bum Limbe11bdc2016-05-13 18:38:35 +0000181 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8;
Matt Arsenault899f7d22013-09-16 22:43:16 +0000182 if (MaxIntSize == 0)
183 MaxIntSize = 1;
184 unsigned NumPointerStores = Bytes / MaxIntSize;
Nadav Rotem465834c2012-07-24 10:51:42 +0000185
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000186 // Assume the remaining bytes if any are done a byte at a time.
Craig Toppera5ea5282015-11-21 17:44:42 +0000187 unsigned NumByteStores = Bytes % MaxIntSize;
Nadav Rotem465834c2012-07-24 10:51:42 +0000188
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000189 // If we will reduce the # stores (according to this heuristic), do the
190 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
191 // etc.
192 return TheStores.size() > NumPointerStores+NumByteStores;
Nadav Rotem465834c2012-07-24 10:51:42 +0000193}
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000194
195
196namespace {
Tim Northover39617352016-05-10 21:49:40 +0000197class MemsetRanges {
Sanjay Patela75c41e2015-08-13 22:53:20 +0000198 /// A sorted list of the memset ranges.
Tim Northover39617352016-05-10 21:49:40 +0000199 SmallVector<MemsetRange, 8> Ranges;
200 typedef SmallVectorImpl<MemsetRange>::iterator range_iterator;
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000201 const DataLayout &DL;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000202public:
Tim Northover39617352016-05-10 21:49:40 +0000203 MemsetRanges(const DataLayout &DL) : DL(DL) {}
Nadav Rotem465834c2012-07-24 10:51:42 +0000204
Tim Northover39617352016-05-10 21:49:40 +0000205 typedef SmallVectorImpl<MemsetRange>::const_iterator const_iterator;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000206 const_iterator begin() const { return Ranges.begin(); }
207 const_iterator end() const { return Ranges.end(); }
208 bool empty() const { return Ranges.empty(); }
Nadav Rotem465834c2012-07-24 10:51:42 +0000209
Chris Lattnerc6381472011-01-08 20:24:01 +0000210 void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000211 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
212 addStore(OffsetFromFirst, SI);
213 else
214 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
Chris Lattnerc6381472011-01-08 20:24:01 +0000215 }
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000216
217 void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000218 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
Nadav Rotem465834c2012-07-24 10:51:42 +0000219
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000220 addRange(OffsetFromFirst, StoreSize,
Tim Northover39617352016-05-10 21:49:40 +0000221 SI->getPointerOperand(), SI->getAlignment(), SI);
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000222 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000223
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000224 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
225 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
Tim Northover39617352016-05-10 21:49:40 +0000226 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI);
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000227 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000228
Tim Northover39617352016-05-10 21:49:40 +0000229 void addRange(int64_t Start, int64_t Size, Value *Ptr,
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000230 unsigned Alignment, Instruction *Inst);
231
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000232};
Nadav Rotem465834c2012-07-24 10:51:42 +0000233
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000234} // end anon namespace
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000235
236
Tim Northover39617352016-05-10 21:49:40 +0000237/// Add a new store to the MemsetRanges data structure. This adds a
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000238/// new range for the specified store at the specified offset, merging into
239/// existing ranges as appropriate.
Tim Northover39617352016-05-10 21:49:40 +0000240void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
241 unsigned Alignment, Instruction *Inst) {
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000242 int64_t End = Start+Size;
Nadav Rotem465834c2012-07-24 10:51:42 +0000243
Nick Lewyckyf836c892015-07-21 21:56:26 +0000244 range_iterator I = std::lower_bound(Ranges.begin(), Ranges.end(), Start,
Tim Northover39617352016-05-10 21:49:40 +0000245 [](const MemsetRange &LHS, int64_t RHS) { return LHS.End < RHS; });
Nadav Rotem465834c2012-07-24 10:51:42 +0000246
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000247 // We now know that I == E, in which case we didn't find anything to merge
248 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
249 // to insert a new range. Handle this now.
Nick Lewyckyf836c892015-07-21 21:56:26 +0000250 if (I == Ranges.end() || End < I->Start) {
Tim Northover39617352016-05-10 21:49:40 +0000251 MemsetRange &R = *Ranges.insert(I, MemsetRange());
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000252 R.Start = Start;
253 R.End = End;
Tim Northover39617352016-05-10 21:49:40 +0000254 R.StartPtr = Ptr;
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000255 R.Alignment = Alignment;
256 R.TheStores.push_back(Inst);
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000257 return;
258 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000259
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000260 // This store overlaps with I, add it.
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000261 I->TheStores.push_back(Inst);
Nadav Rotem465834c2012-07-24 10:51:42 +0000262
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000263 // At this point, we may have an interval that completely contains our store.
264 // If so, just add it to the interval and return.
265 if (I->Start <= Start && I->End >= End)
266 return;
Nadav Rotem465834c2012-07-24 10:51:42 +0000267
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000268 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
269 // but is not entirely contained within the range.
Nadav Rotem465834c2012-07-24 10:51:42 +0000270
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000271 // See if the range extends the start of the range. In this case, it couldn't
272 // possibly cause it to join the prior range, because otherwise we would have
273 // stopped on *it*.
274 if (Start < I->Start) {
275 I->Start = Start;
Tim Northover39617352016-05-10 21:49:40 +0000276 I->StartPtr = Ptr;
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000277 I->Alignment = Alignment;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000278 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000279
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000280 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
281 // is in or right at the end of I), and that End >= I->Start. Extend I out to
282 // End.
283 if (End > I->End) {
284 I->End = End;
Nick Lewyckybfd4ad62009-03-19 05:51:39 +0000285 range_iterator NextI = I;
Nick Lewyckyf836c892015-07-21 21:56:26 +0000286 while (++NextI != Ranges.end() && End >= NextI->Start) {
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000287 // Merge the range in.
288 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
289 if (NextI->End > I->End)
290 I->End = NextI->End;
291 Ranges.erase(NextI);
292 NextI = I;
293 }
294 }
295}
296
297//===----------------------------------------------------------------------===//
Sean Silva6347df02016-06-14 02:44:55 +0000298// MemCpyOptLegacyPass Pass
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000299//===----------------------------------------------------------------------===//
300
301namespace {
Sean Silva6347df02016-06-14 02:44:55 +0000302 class MemCpyOptLegacyPass : public FunctionPass {
303 MemCpyOptPass Impl;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000304 public:
305 static char ID; // Pass identification, replacement for typeid
Sean Silva6347df02016-06-14 02:44:55 +0000306 MemCpyOptLegacyPass() : FunctionPass(ID) {
307 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry());
Owen Anderson6c18d1a2010-10-19 17:21:58 +0000308 }
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000309
Craig Topper3e4c6972014-03-05 09:10:37 +0000310 bool runOnFunction(Function &F) override;
Chris Lattnerc6381472011-01-08 20:24:01 +0000311
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000312 private:
313 // This transformation requires dominator postdominator info
Craig Topper3e4c6972014-03-05 09:10:37 +0000314 void getAnalysisUsage(AnalysisUsage &AU) const override {
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000315 AU.setPreservesCFG();
Chandler Carruth66b31302015-01-04 12:03:27 +0000316 AU.addRequired<AssumptionCacheTracker>();
Chandler Carruth73523022014-01-13 13:07:17 +0000317 AU.addRequired<DominatorTreeWrapperPass>();
Chandler Carruth61440d22016-03-10 00:55:30 +0000318 AU.addRequired<MemoryDependenceWrapperPass>();
Chandler Carruth7b560d42015-09-09 17:55:00 +0000319 AU.addRequired<AAResultsWrapperPass>();
Chandler Carruthb98f63d2015-01-15 10:41:28 +0000320 AU.addRequired<TargetLibraryInfoWrapperPass>();
Chandler Carruth7b560d42015-09-09 17:55:00 +0000321 AU.addPreserved<GlobalsAAWrapperPass>();
Chandler Carruth61440d22016-03-10 00:55:30 +0000322 AU.addPreserved<MemoryDependenceWrapperPass>();
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000323 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000324
Matt Walaa4afccd2015-06-12 18:16:51 +0000325 // Helper functions
Chris Lattnerb5557a72009-09-01 17:09:55 +0000326 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
Chris Lattner9a1d63b2011-01-08 21:19:19 +0000327 bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI);
Tim Northover39617352016-05-10 21:49:40 +0000328 bool processMemCpy(MemCpyInst *M);
Chris Lattner1145e332009-09-01 17:56:32 +0000329 bool processMemMove(MemMoveInst *M);
Owen Anderson18e4fed2010-10-15 22:52:12 +0000330 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc,
Duncan Sandsc6ada692012-10-04 10:54:40 +0000331 uint64_t cpyLen, unsigned cpyAlign, CallInst *C);
Ahmed Bougacha15a31f62015-05-16 01:23:47 +0000332 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep);
Ahmed Bougacha83f78a42015-04-17 22:20:57 +0000333 bool processMemSetMemCpyDependence(MemCpyInst *M, MemSetInst *MDep);
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +0000334 bool performMemCpyToMemSetOptzn(MemCpyInst *M, MemSetInst *MDep);
Chris Lattner58f9f582010-11-21 00:28:59 +0000335 bool processByValArgument(CallSite CS, unsigned ArgNo);
Chris Lattnerc6381472011-01-08 20:24:01 +0000336 Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr,
337 Value *ByteVal);
338
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000339 bool iterateOnFunction(Function &F);
340 };
Nadav Rotem465834c2012-07-24 10:51:42 +0000341
Sean Silva6347df02016-06-14 02:44:55 +0000342 char MemCpyOptLegacyPass::ID = 0;
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000343}
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000344
Sanjay Patela75c41e2015-08-13 22:53:20 +0000345/// The public interface to this file...
Sean Silva6347df02016-06-14 02:44:55 +0000346FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); }
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000347
Sean Silva6347df02016-06-14 02:44:55 +0000348INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
Owen Anderson8ac477f2010-10-12 19:48:12 +0000349 false, false)
Chandler Carruth66b31302015-01-04 12:03:27 +0000350INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
Chandler Carruth73523022014-01-13 13:07:17 +0000351INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
Chandler Carruth61440d22016-03-10 00:55:30 +0000352INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
Chandler Carruthb98f63d2015-01-15 10:41:28 +0000353INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
Chandler Carruth7b560d42015-09-09 17:55:00 +0000354INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
355INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
Sean Silva6347df02016-06-14 02:44:55 +0000356INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
Owen Anderson8ac477f2010-10-12 19:48:12 +0000357 false, false)
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000358
Sanjay Patela75c41e2015-08-13 22:53:20 +0000359/// When scanning forward over instructions, we look for some other patterns to
360/// fold away. In particular, this looks for stores to neighboring locations of
361/// memory. If it sees enough consecutive ones, it attempts to merge them
362/// together into a memcpy/memset.
Sean Silva6347df02016-06-14 02:44:55 +0000363Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
364 Value *StartPtr,
365 Value *ByteVal) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000366 const DataLayout &DL = StartInst->getModule()->getDataLayout();
Nadav Rotem465834c2012-07-24 10:51:42 +0000367
Chris Lattnerc6381472011-01-08 20:24:01 +0000368 // Okay, so we now have a single store that can be splatable. Scan to find
369 // all subsequent stores of the same value to offset from the same pointer.
370 // Join these together into ranges, so we can decide whether contiguous blocks
371 // are stored.
Tim Northover39617352016-05-10 21:49:40 +0000372 MemsetRanges Ranges(DL);
Nadav Rotem465834c2012-07-24 10:51:42 +0000373
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000374 BasicBlock::iterator BI(StartInst);
Chris Lattnerc6381472011-01-08 20:24:01 +0000375 for (++BI; !isa<TerminatorInst>(BI); ++BI) {
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000376 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
377 // If the instruction is readnone, ignore it, otherwise bail out. We
378 // don't even allow readonly here because we don't want something like:
Chris Lattnerc6381472011-01-08 20:24:01 +0000379 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000380 if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
381 break;
382 continue;
383 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000384
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000385 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
386 // If this is a store, see if we can merge it in.
Eli Friedman9a468152011-08-17 22:22:24 +0000387 if (!NextStore->isSimple()) break;
Nadav Rotem465834c2012-07-24 10:51:42 +0000388
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000389 // Check to see if this stored value is of the same byte-splattable value.
390 if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
391 break;
Nadav Rotem465834c2012-07-24 10:51:42 +0000392
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000393 // Check to see if this store is to a constant offset from the start ptr.
394 int64_t Offset;
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000395 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset,
396 DL))
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000397 break;
Nadav Rotem465834c2012-07-24 10:51:42 +0000398
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000399 Ranges.addStore(Offset, NextStore);
400 } else {
401 MemSetInst *MSI = cast<MemSetInst>(BI);
Nadav Rotem465834c2012-07-24 10:51:42 +0000402
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000403 if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
404 !isa<ConstantInt>(MSI->getLength()))
405 break;
Nadav Rotem465834c2012-07-24 10:51:42 +0000406
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000407 // Check to see if this store is to a constant offset from the start ptr.
408 int64_t Offset;
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000409 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL))
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000410 break;
Nadav Rotem465834c2012-07-24 10:51:42 +0000411
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000412 Ranges.addMemSet(Offset, MSI);
413 }
Chris Lattnerc6381472011-01-08 20:24:01 +0000414 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000415
Chris Lattnerc6381472011-01-08 20:24:01 +0000416 // If we have no ranges, then we just had a single store with nothing that
417 // could be merged in. This is a very common case of course.
418 if (Ranges.empty())
Craig Topperf40110f2014-04-25 05:29:35 +0000419 return nullptr;
Nadav Rotem465834c2012-07-24 10:51:42 +0000420
Chris Lattnerc6381472011-01-08 20:24:01 +0000421 // If we had at least one store that could be merged in, add the starting
422 // store as well. We try to avoid this unless there is at least something
423 // interesting as a small compile-time optimization.
424 Ranges.addInst(0, StartInst);
425
426 // If we create any memsets, we put it right before the first instruction that
427 // isn't part of the memset block. This ensure that the memset is dominated
428 // by any addressing instruction needed by the start of the block.
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000429 IRBuilder<> Builder(&*BI);
Chris Lattnerc6381472011-01-08 20:24:01 +0000430
431 // Now that we have full information about ranges, loop over the ranges and
432 // emit memset's for anything big enough to be worthwhile.
Craig Topperf40110f2014-04-25 05:29:35 +0000433 Instruction *AMemSet = nullptr;
Tim Northover39617352016-05-10 21:49:40 +0000434 for (const MemsetRange &Range : Ranges) {
Nadav Rotem465834c2012-07-24 10:51:42 +0000435
Chris Lattnerc6381472011-01-08 20:24:01 +0000436 if (Range.TheStores.size() == 1) continue;
Nadav Rotem465834c2012-07-24 10:51:42 +0000437
Chris Lattnerc6381472011-01-08 20:24:01 +0000438 // If it is profitable to lower this range to memset, do so now.
Tim Northover39617352016-05-10 21:49:40 +0000439 if (!Range.isProfitableToUseMemset(DL))
Chris Lattnerc6381472011-01-08 20:24:01 +0000440 continue;
Nadav Rotem465834c2012-07-24 10:51:42 +0000441
Chris Lattnerc6381472011-01-08 20:24:01 +0000442 // Otherwise, we do want to transform this! Create a new memset.
443 // Get the starting pointer of the block.
Tim Northover39617352016-05-10 21:49:40 +0000444 StartPtr = Range.StartPtr;
Nadav Rotem465834c2012-07-24 10:51:42 +0000445
Tim Northover39617352016-05-10 21:49:40 +0000446 // Determine alignment
447 unsigned Alignment = Range.Alignment;
448 if (Alignment == 0) {
449 Type *EltType =
450 cast<PointerType>(StartPtr->getType())->getElementType();
451 Alignment = DL.getABITypeAlignment(EltType);
452 }
453
454 AMemSet =
455 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
Nadav Rotem465834c2012-07-24 10:51:42 +0000456
Chris Lattnerc6381472011-01-08 20:24:01 +0000457 DEBUG(dbgs() << "Replace stores:\n";
Craig Toppere325e382015-11-20 07:18:48 +0000458 for (Instruction *SI : Range.TheStores)
459 dbgs() << *SI << '\n';
Chris Lattnerc6381472011-01-08 20:24:01 +0000460 dbgs() << "With: " << *AMemSet << '\n');
Devang Patelc7e4fa72011-05-04 21:58:58 +0000461
462 if (!Range.TheStores.empty())
463 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
464
Chris Lattnerc6381472011-01-08 20:24:01 +0000465 // Zap all the stores.
Craig Toppere325e382015-11-20 07:18:48 +0000466 for (Instruction *SI : Range.TheStores) {
467 MD->removeInstruction(SI);
468 SI->eraseFromParent();
Chris Lattner7d6433a2011-01-08 22:19:21 +0000469 }
Chris Lattnerc6381472011-01-08 20:24:01 +0000470 ++NumMemSetInfer;
471 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000472
Chris Lattnerc6381472011-01-08 20:24:01 +0000473 return AMemSet;
474}
475
Tim Northover39617352016-05-10 21:49:40 +0000476static unsigned findCommonAlignment(const DataLayout &DL, const StoreInst *SI,
477 const LoadInst *LI) {
478 unsigned StoreAlign = SI->getAlignment();
479 if (!StoreAlign)
480 StoreAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType());
481 unsigned LoadAlign = LI->getAlignment();
482 if (!LoadAlign)
483 LoadAlign = DL.getABITypeAlignment(LI->getType());
Amaury Secheta0c242c2016-01-05 20:17:48 +0000484
Tim Northover39617352016-05-10 21:49:40 +0000485 return std::min(StoreAlign, LoadAlign);
Amaury Secheta0c242c2016-01-05 20:17:48 +0000486}
Chris Lattnerc6381472011-01-08 20:24:01 +0000487
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000488// This method try to lift a store instruction before position P.
489// It will lift the store and its argument + that anything that
David Majnemerd99068d2016-05-26 19:24:24 +0000490// may alias with these.
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000491// The method returns true if it was successful.
492static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P) {
493 // If the store alias this position, early bail out.
494 MemoryLocation StoreLoc = MemoryLocation::get(SI);
495 if (AA.getModRefInfo(P, StoreLoc) != MRI_NoModRef)
496 return false;
497
498 // Keep track of the arguments of all instruction we plan to lift
499 // so we can make sure to lift them as well if apropriate.
500 DenseSet<Instruction*> Args;
501 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand()))
502 if (Ptr->getParent() == SI->getParent())
503 Args.insert(Ptr);
504
505 // Instruction to lift before P.
506 SmallVector<Instruction*, 8> ToLift;
507
508 // Memory locations of lifted instructions.
509 SmallVector<MemoryLocation, 8> MemLocs;
510 MemLocs.push_back(StoreLoc);
511
512 // Lifted callsites.
513 SmallVector<ImmutableCallSite, 8> CallSites;
514
515 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
516 auto *C = &*I;
517
518 bool MayAlias = AA.getModRefInfo(C) != MRI_NoModRef;
519
520 bool NeedLift = false;
521 if (Args.erase(C))
522 NeedLift = true;
523 else if (MayAlias) {
David Majnemer0a16c222016-08-11 21:15:00 +0000524 NeedLift = any_of(MemLocs, [C, &AA](const MemoryLocation &ML) {
525 return AA.getModRefInfo(C, ML);
526 });
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000527
528 if (!NeedLift)
David Majnemer0a16c222016-08-11 21:15:00 +0000529 NeedLift = any_of(CallSites, [C, &AA](const ImmutableCallSite &CS) {
530 return AA.getModRefInfo(C, CS);
531 });
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000532 }
533
534 if (!NeedLift)
535 continue;
536
537 if (MayAlias) {
538 if (auto CS = ImmutableCallSite(C)) {
539 // If we can't lift this before P, it's game over.
540 if (AA.getModRefInfo(P, CS) != MRI_NoModRef)
541 return false;
542
543 CallSites.push_back(CS);
544 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
545 // If we can't lift this before P, it's game over.
546 auto ML = MemoryLocation::get(C);
547 if (AA.getModRefInfo(P, ML) != MRI_NoModRef)
548 return false;
549
550 MemLocs.push_back(ML);
551 } else
552 // We don't know how to lift this instruction.
553 return false;
554 }
555
556 ToLift.push_back(C);
557 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k)
558 if (auto *A = dyn_cast<Instruction>(C->getOperand(k)))
559 if (A->getParent() == SI->getParent())
560 Args.insert(A);
561 }
562
563 // We made it, we need to lift
564 for (auto *I : reverse(ToLift)) {
565 DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n");
566 I->moveBefore(P);
567 }
568
569 return true;
570}
571
Sean Silva6347df02016-06-14 02:44:55 +0000572bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
Eli Friedman9a468152011-08-17 22:22:24 +0000573 if (!SI->isSimple()) return false;
Andrea Di Biagio99493df2015-10-09 10:53:41 +0000574
575 // Avoid merging nontemporal stores since the resulting
576 // memcpy/memset would not be able to preserve the nontemporal hint.
577 // In theory we could teach how to propagate the !nontemporal metadata to
578 // memset calls. However, that change would force the backend to
579 // conservatively expand !nontemporal memset calls back to sequences of
580 // store instructions (effectively undoing the merging).
581 if (SI->getMetadata(LLVMContext::MD_nontemporal))
582 return false;
583
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000584 const DataLayout &DL = SI->getModule()->getDataLayout();
Owen Anderson18e4fed2010-10-15 22:52:12 +0000585
Amaury Secheta0c242c2016-01-05 20:17:48 +0000586 // Load to store forwarding can be interpreted as memcpy.
Owen Anderson18e4fed2010-10-15 22:52:12 +0000587 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
Eli Friedman9a468152011-08-17 22:22:24 +0000588 if (LI->isSimple() && LI->hasOneUse() &&
Eli Friedmane8bbc102011-06-15 01:25:56 +0000589 LI->getParent() == SI->getParent()) {
Amaury Secheta0c242c2016-01-05 20:17:48 +0000590
591 auto *T = LI->getType();
592 if (T->isAggregateType()) {
Sean Silva6347df02016-06-14 02:44:55 +0000593 AliasAnalysis &AA = LookupAliasAnalysis();
Amaury Secheta0c242c2016-01-05 20:17:48 +0000594 MemoryLocation LoadLoc = MemoryLocation::get(LI);
595
596 // We use alias analysis to check if an instruction may store to
597 // the memory we load from in between the load and the store. If
Amaury Sechetd3b2c0f2016-01-06 09:30:39 +0000598 // such an instruction is found, we try to promote there instead
599 // of at the store position.
600 Instruction *P = SI;
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000601 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) {
602 if (AA.getModRefInfo(&I, LoadLoc) & MRI_Mod) {
603 P = &I;
604 break;
Amaury Secheta0c242c2016-01-05 20:17:48 +0000605 }
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000606 }
Amaury Sechetd3b2c0f2016-01-06 09:30:39 +0000607
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000608 // We found an instruction that may write to the loaded memory.
609 // We can try to promote at this position instead of the store
610 // position if nothing alias the store memory after this and the store
611 // destination is not in the range.
612 if (P && P != SI) {
613 if (!moveUp(AA, SI, P))
614 P = nullptr;
Amaury Secheta0c242c2016-01-05 20:17:48 +0000615 }
616
Amaury Sechetd3b2c0f2016-01-06 09:30:39 +0000617 // If a valid insertion position is found, then we can promote
618 // the load/store pair to a memcpy.
619 if (P) {
Amaury Secheta0c242c2016-01-05 20:17:48 +0000620 // If we load from memory that may alias the memory we store to,
621 // memmove must be used to preserve semantic. If not, memcpy can
622 // be used.
623 bool UseMemMove = false;
624 if (!AA.isNoAlias(MemoryLocation::get(SI), LoadLoc))
625 UseMemMove = true;
626
627 unsigned Align = findCommonAlignment(DL, SI, LI);
628 uint64_t Size = DL.getTypeStoreSize(T);
629
Amaury Sechetd3b2c0f2016-01-06 09:30:39 +0000630 IRBuilder<> Builder(P);
Amaury Secheta0c242c2016-01-05 20:17:48 +0000631 Instruction *M;
632 if (UseMemMove)
633 M = Builder.CreateMemMove(SI->getPointerOperand(),
634 LI->getPointerOperand(), Size,
635 Align, SI->isVolatile());
636 else
637 M = Builder.CreateMemCpy(SI->getPointerOperand(),
638 LI->getPointerOperand(), Size,
639 Align, SI->isVolatile());
640
641 DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI
642 << " => " << *M << "\n");
643
644 MD->removeInstruction(SI);
645 SI->eraseFromParent();
646 MD->removeInstruction(LI);
647 LI->eraseFromParent();
648 ++NumMemCpyInstr;
649
650 // Make sure we do not invalidate the iterator.
651 BBI = M->getIterator();
652 return true;
653 }
654 }
655
656 // Detect cases where we're performing call slot forwarding, but
657 // happen to be using a load-store pair to implement it, rather than
658 // a memcpy.
Eli Friedman5da0ff42011-06-02 21:24:42 +0000659 MemDepResult ldep = MD->getDependency(LI);
Craig Topperf40110f2014-04-25 05:29:35 +0000660 CallInst *C = nullptr;
Eli Friedman5da0ff42011-06-02 21:24:42 +0000661 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
662 C = dyn_cast<CallInst>(ldep.getInst());
663
664 if (C) {
665 // Check that nothing touches the dest of the "copy" between
666 // the call and the store.
David Majnemerd99068d2016-05-26 19:24:24 +0000667 Value *CpyDest = SI->getPointerOperand()->stripPointerCasts();
668 bool CpyDestIsLocal = isa<AllocaInst>(CpyDest);
Sean Silva6347df02016-06-14 02:44:55 +0000669 AliasAnalysis &AA = LookupAliasAnalysis();
Chandler Carruthac80dc72015-06-17 07:18:54 +0000670 MemoryLocation StoreLoc = MemoryLocation::get(SI);
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000671 for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator();
672 I != E; --I) {
Chandler Carruth194f59c2015-07-22 23:15:57 +0000673 if (AA.getModRefInfo(&*I, StoreLoc) != MRI_NoModRef) {
Craig Topperf40110f2014-04-25 05:29:35 +0000674 C = nullptr;
Eli Friedmane8bbc102011-06-15 01:25:56 +0000675 break;
676 }
David Majnemerd99068d2016-05-26 19:24:24 +0000677 // The store to dest may never happen if an exception can be thrown
678 // between the load and the store.
679 if (I->mayThrow() && !CpyDestIsLocal) {
680 C = nullptr;
681 break;
682 }
Eli Friedman5da0ff42011-06-02 21:24:42 +0000683 }
684 }
685
Owen Anderson18e4fed2010-10-15 22:52:12 +0000686 if (C) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000687 bool changed = performCallSlotOptzn(
688 LI, SI->getPointerOperand()->stripPointerCasts(),
689 LI->getPointerOperand()->stripPointerCasts(),
690 DL.getTypeStoreSize(SI->getOperand(0)->getType()),
Amaury Secheta0c242c2016-01-05 20:17:48 +0000691 findCommonAlignment(DL, SI, LI), C);
Owen Anderson18e4fed2010-10-15 22:52:12 +0000692 if (changed) {
Chris Lattner58f9f582010-11-21 00:28:59 +0000693 MD->removeInstruction(SI);
Owen Anderson18e4fed2010-10-15 22:52:12 +0000694 SI->eraseFromParent();
Chris Lattnercaf5c0d2011-01-09 19:26:10 +0000695 MD->removeInstruction(LI);
Owen Anderson18e4fed2010-10-15 22:52:12 +0000696 LI->eraseFromParent();
697 ++NumMemCpyInstr;
698 return true;
699 }
700 }
701 }
702 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000703
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000704 // There are two cases that are interesting for this code to handle: memcpy
705 // and memset. Right now we only handle memset.
Nadav Rotem465834c2012-07-24 10:51:42 +0000706
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000707 // Ensure that the value being stored is something that can be memset'able a
708 // byte at a time like "0" or "-1" or any width, as well as things like
709 // 0xA0A0A0A0 and 0.0.
Amaury Sechet3235c082016-01-06 19:47:24 +0000710 auto *V = SI->getOperand(0);
711 if (Value *ByteVal = isBytewiseValue(V)) {
Chris Lattnerc6381472011-01-08 20:24:01 +0000712 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
713 ByteVal)) {
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000714 BBI = I->getIterator(); // Don't invalidate iterator.
Chris Lattnerc6381472011-01-08 20:24:01 +0000715 return true;
Mon P Wangc576ee92010-04-04 03:10:48 +0000716 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000717
Amaury Sechet3235c082016-01-06 19:47:24 +0000718 // If we have an aggregate, we try to promote it to memset regardless
719 // of opportunity for merging as it can expose optimization opportunities
720 // in subsequent passes.
721 auto *T = V->getType();
722 if (T->isAggregateType()) {
723 uint64_t Size = DL.getTypeStoreSize(T);
724 unsigned Align = SI->getAlignment();
725 if (!Align)
726 Align = DL.getABITypeAlignment(T);
727 IRBuilder<> Builder(SI);
728 auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal,
729 Size, Align, SI->isVolatile());
730
731 DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n");
732
733 MD->removeInstruction(SI);
734 SI->eraseFromParent();
735 NumMemSetInfer++;
736
737 // Make sure we do not invalidate the iterator.
738 BBI = M->getIterator();
739 return true;
740 }
741 }
742
Chris Lattnerc6381472011-01-08 20:24:01 +0000743 return false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000744}
745
Sean Silva6347df02016-06-14 02:44:55 +0000746bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
Chris Lattner9a1d63b2011-01-08 21:19:19 +0000747 // See if there is another memset or store neighboring this memset which
748 // allows us to widen out the memset to do a single larger store.
Chris Lattnerff6ed2a2011-01-08 22:11:56 +0000749 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
750 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
751 MSI->getValue())) {
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000752 BBI = I->getIterator(); // Don't invalidate iterator.
Chris Lattnerff6ed2a2011-01-08 22:11:56 +0000753 return true;
754 }
Chris Lattner9a1d63b2011-01-08 21:19:19 +0000755 return false;
756}
757
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000758
Sanjay Patela75c41e2015-08-13 22:53:20 +0000759/// Takes a memcpy and a call that it depends on,
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000760/// and checks for the possibility of a call slot optimization by having
761/// the call write its result directly into the destination of the memcpy.
Sean Silva6347df02016-06-14 02:44:55 +0000762bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpy, Value *cpyDest,
763 Value *cpySrc, uint64_t cpyLen,
764 unsigned cpyAlign, CallInst *C) {
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000765 // The general transformation to keep in mind is
766 //
767 // call @func(..., src, ...)
768 // memcpy(dest, src, ...)
769 //
770 // ->
771 //
772 // memcpy(dest, src, ...)
773 // call @func(..., dest, ...)
774 //
775 // Since moving the memcpy is technically awkward, we additionally check that
776 // src only holds uninitialized values at the moment of the call, meaning that
777 // the memcpy can be discarded rather than moved.
778
Tim Shen7aa0ad62016-06-08 19:42:32 +0000779 // Lifetime marks shouldn't be operated on.
780 if (Function *F = C->getCalledFunction())
781 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start)
782 return false;
783
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000784 // Deliberately get the source and destination with bitcasts stripped away,
785 // because we'll need to do type comparisons based on the underlying type.
Gabor Greif62f0aac2010-07-28 22:50:26 +0000786 CallSite CS(C);
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000787
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000788 // Require that src be an alloca. This simplifies the reasoning considerably.
Chris Lattnerb5557a72009-09-01 17:09:55 +0000789 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000790 if (!srcAlloca)
791 return false;
792
Chris Lattnerb5557a72009-09-01 17:09:55 +0000793 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000794 if (!srcArraySize)
795 return false;
796
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000797 const DataLayout &DL = cpy->getModule()->getDataLayout();
798 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
799 srcArraySize->getZExtValue();
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000800
Owen Anderson18e4fed2010-10-15 22:52:12 +0000801 if (cpyLen < srcSize)
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000802 return false;
803
804 // Check that accessing the first srcSize bytes of dest will not cause a
805 // trap. Otherwise the transform is invalid since it might cause a trap
806 // to occur earlier than it otherwise would.
Chris Lattnerb5557a72009-09-01 17:09:55 +0000807 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000808 // The destination is an alloca. Check it is larger than srcSize.
Chris Lattnerb5557a72009-09-01 17:09:55 +0000809 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000810 if (!destArraySize)
811 return false;
812
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000813 uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) *
814 destArraySize->getZExtValue();
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000815
816 if (destSize < srcSize)
817 return false;
Chris Lattnerb5557a72009-09-01 17:09:55 +0000818 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
David Majnemerd99068d2016-05-26 19:24:24 +0000819 // The store to dest may never happen if the call can throw.
820 if (C->mayThrow())
821 return false;
822
Bjorn Steinbrinkd20816f2014-10-16 19:43:08 +0000823 if (A->getDereferenceableBytes() < srcSize) {
824 // If the destination is an sret parameter then only accesses that are
825 // outside of the returned struct type can trap.
826 if (!A->hasStructRetAttr())
827 return false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000828
Bjorn Steinbrinkd20816f2014-10-16 19:43:08 +0000829 Type *StructTy = cast<PointerType>(A->getType())->getElementType();
830 if (!StructTy->isSized()) {
831 // The call may never return and hence the copy-instruction may never
832 // be executed, and therefore it's not safe to say "the destination
833 // has at least <cpyLen> bytes, as implied by the copy-instruction",
834 return false;
835 }
836
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000837 uint64_t destSize = DL.getTypeAllocSize(StructTy);
Bjorn Steinbrinkd20816f2014-10-16 19:43:08 +0000838 if (destSize < srcSize)
839 return false;
Shuxin Yang140d5922013-06-08 04:56:05 +0000840 }
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000841 } else {
842 return false;
843 }
844
Duncan Sands933db772012-10-05 07:29:46 +0000845 // Check that dest points to memory that is at least as aligned as src.
846 unsigned srcAlign = srcAlloca->getAlignment();
847 if (!srcAlign)
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000848 srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType());
Duncan Sands933db772012-10-05 07:29:46 +0000849 bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
850 // If dest is not aligned enough and we can't increase its alignment then
851 // bail out.
852 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
853 return false;
854
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000855 // Check that src is not accessed except via the call and the memcpy. This
856 // guarantees that it holds only undefined values when passed in (so the final
857 // memcpy can be dropped), that it is not read or written between the call and
858 // the memcpy, and that writing beyond the end of it is undefined.
Chandler Carruthcdf47882014-03-09 03:16:01 +0000859 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
860 srcAlloca->user_end());
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000861 while (!srcUseList.empty()) {
Chandler Carruthcdf47882014-03-09 03:16:01 +0000862 User *U = srcUseList.pop_back_val();
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000863
Chandler Carruthcdf47882014-03-09 03:16:01 +0000864 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
865 for (User *UU : U->users())
866 srcUseList.push_back(UU);
Chandler Carruth18cee1d2014-09-01 10:09:18 +0000867 continue;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000868 }
Chandler Carruth18cee1d2014-09-01 10:09:18 +0000869 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
870 if (!G->hasAllZeroIndices())
871 return false;
872
873 for (User *UU : U->users())
874 srcUseList.push_back(UU);
875 continue;
876 }
877 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
878 if (IT->getIntrinsicID() == Intrinsic::lifetime_start ||
879 IT->getIntrinsicID() == Intrinsic::lifetime_end)
880 continue;
881
882 if (U != C && U != cpy)
883 return false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000884 }
885
Nick Lewycky703e4882014-07-14 18:52:02 +0000886 // Check that src isn't captured by the called function since the
887 // transformation can cause aliasing issues in that case.
888 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
889 if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i))
890 return false;
891
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000892 // Since we're changing the parameter to the callsite, we need to make sure
893 // that what would be the new parameter dominates the callsite.
Sean Silva6347df02016-06-14 02:44:55 +0000894 DominatorTree &DT = LookupDomTree();
Chris Lattnerb5557a72009-09-01 17:09:55 +0000895 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000896 if (!DT.dominates(cpyDestInst, C))
897 return false;
898
899 // In addition to knowing that the call does not access src in some
900 // unexpected manner, for example via a global, which we deduce from
901 // the use analysis, we also need to know that it does not sneakily
902 // access dest. We rely on AA to figure this out for us.
Sean Silva6347df02016-06-14 02:44:55 +0000903 AliasAnalysis &AA = LookupAliasAnalysis();
Chandler Carruth194f59c2015-07-22 23:15:57 +0000904 ModRefInfo MR = AA.getModRefInfo(C, cpyDest, srcSize);
Chad Rosiera968caf2012-05-14 20:35:04 +0000905 // If necessary, perform additional analysis.
Chandler Carruth194f59c2015-07-22 23:15:57 +0000906 if (MR != MRI_NoModRef)
Chad Rosiera968caf2012-05-14 20:35:04 +0000907 MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT);
Chandler Carruth194f59c2015-07-22 23:15:57 +0000908 if (MR != MRI_NoModRef)
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000909 return false;
910
911 // All the checks have passed, so do the transformation.
Owen Andersond071a872008-06-01 21:52:16 +0000912 bool changedArgument = false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000913 for (unsigned i = 0; i < CS.arg_size(); ++i)
Owen Anderson38099c12008-06-01 22:26:26 +0000914 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
Duncan Sandsa6d20012012-10-04 13:53:21 +0000915 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest
916 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
917 cpyDest->getName(), C);
Owen Andersond071a872008-06-01 21:52:16 +0000918 changedArgument = true;
Duncan Sandsa6d20012012-10-04 13:53:21 +0000919 if (CS.getArgument(i)->getType() == Dest->getType())
920 CS.setArgument(i, Dest);
Chris Lattnerb5557a72009-09-01 17:09:55 +0000921 else
Duncan Sandsa6d20012012-10-04 13:53:21 +0000922 CS.setArgument(i, CastInst::CreatePointerCast(Dest,
923 CS.getArgument(i)->getType(), Dest->getName(), C));
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000924 }
925
Owen Andersond071a872008-06-01 21:52:16 +0000926 if (!changedArgument)
927 return false;
928
Duncan Sandsc6ada692012-10-04 10:54:40 +0000929 // If the destination wasn't sufficiently aligned then increase its alignment.
930 if (!isDestSufficientlyAligned) {
931 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
932 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
933 }
934
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000935 // Drop any cached information about the call, because we may have changed
936 // its dependence information by changing its parameter.
Chris Lattner58f9f582010-11-21 00:28:59 +0000937 MD->removeInstruction(C);
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000938
Bjorn Steinbrink71bf3b82015-02-07 17:54:36 +0000939 // Update AA metadata
940 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
941 // handled here, but combineMetadata doesn't support them yet
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000942 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
943 LLVMContext::MD_noalias,
944 LLVMContext::MD_invariant_group};
Bjorn Steinbrink71bf3b82015-02-07 17:54:36 +0000945 combineMetadata(C, cpy, KnownIDs);
946
Chris Lattner58f9f582010-11-21 00:28:59 +0000947 // Remove the memcpy.
948 MD->removeInstruction(cpy);
Dan Gohmand2d1ae12010-06-22 15:08:57 +0000949 ++NumMemCpyInstr;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000950
951 return true;
952}
953
Sanjay Patela75c41e2015-08-13 22:53:20 +0000954/// We've found that the (upward scanning) memory dependence of memcpy 'M' is
955/// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
Sean Silva6347df02016-06-14 02:44:55 +0000956bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
957 MemCpyInst *MDep) {
Chris Lattner7e9b2ea2010-11-18 07:02:37 +0000958 // We can only transforms memcpy's where the dest of one is the source of the
959 // other.
Chris Lattner58f9f582010-11-21 00:28:59 +0000960 if (M->getSource() != MDep->getDest() || MDep->isVolatile())
Chris Lattner7e9b2ea2010-11-18 07:02:37 +0000961 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000962
Chris Lattnerfd51c522010-12-09 07:39:50 +0000963 // If dep instruction is reading from our current input, then it is a noop
964 // transfer and substituting the input won't change this instruction. Just
965 // ignore the input and let someone else zap MDep. This handles cases like:
966 // memcpy(a <- a)
967 // memcpy(b <- a)
968 if (M->getSource() == MDep->getSource())
969 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000970
Chris Lattner0ab5e2c2011-04-15 05:18:47 +0000971 // Second, the length of the memcpy's must be the same, or the preceding one
Chris Lattner7e9b2ea2010-11-18 07:02:37 +0000972 // must be larger than the following one.
Dan Gohman19e30d52011-01-21 22:07:57 +0000973 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
974 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
975 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
976 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000977
Sean Silva6347df02016-06-14 02:44:55 +0000978 AliasAnalysis &AA = LookupAliasAnalysis();
Chris Lattner59572292010-11-21 08:06:10 +0000979
980 // Verify that the copied-from memory doesn't change in between the two
981 // transfers. For example, in:
982 // memcpy(a <- b)
983 // *b = 42;
984 // memcpy(c <- a)
985 // It would be invalid to transform the second memcpy into memcpy(c <- b).
986 //
987 // TODO: If the code between M and MDep is transparent to the destination "c",
988 // then we could still perform the xform by moving M up to the first memcpy.
989 //
990 // NOTE: This is conservative, it will stop on any read from the source loc,
991 // not just the defining memcpy.
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000992 MemDepResult SourceDep =
993 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false,
994 M->getIterator(), M->getParent());
Chris Lattner59572292010-11-21 08:06:10 +0000995 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
996 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000997
Chris Lattner731caac2010-11-18 08:00:57 +0000998 // If the dest of the second might alias the source of the first, then the
999 // source and dest might overlap. We still want to eliminate the intermediate
1000 // value, but we have to generate a memmove instead of memcpy.
Chris Lattner6cf8d6c2010-12-26 22:57:41 +00001001 bool UseMemMove = false;
Chandler Carruth70c61c12015-06-04 02:03:15 +00001002 if (!AA.isNoAlias(MemoryLocation::getForDest(M),
1003 MemoryLocation::getForSource(MDep)))
Chris Lattner6cf8d6c2010-12-26 22:57:41 +00001004 UseMemMove = true;
Nadav Rotem465834c2012-07-24 10:51:42 +00001005
Chris Lattner58f9f582010-11-21 00:28:59 +00001006 // If all checks passed, then we can transform M.
Nadav Rotem465834c2012-07-24 10:51:42 +00001007
Pete Cooper67cf9a72015-11-19 05:56:52 +00001008 // Make sure to use the lesser of the alignment of the source and the dest
1009 // since we're changing where we're reading from, but don't want to increase
1010 // the alignment past what can be read from or written to.
Chris Lattner7e9b2ea2010-11-18 07:02:37 +00001011 // TODO: Is this worth it if we're creating a less aligned memcpy? For
1012 // example we could be moving from movaps -> movq on x86.
Pete Cooper67cf9a72015-11-19 05:56:52 +00001013 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment());
1014
Chris Lattner6cf8d6c2010-12-26 22:57:41 +00001015 IRBuilder<> Builder(M);
1016 if (UseMemMove)
1017 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(),
Pete Cooper67cf9a72015-11-19 05:56:52 +00001018 Align, M->isVolatile());
Chris Lattner6cf8d6c2010-12-26 22:57:41 +00001019 else
1020 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(),
Pete Cooper67cf9a72015-11-19 05:56:52 +00001021 Align, M->isVolatile());
Chris Lattner1385dff2010-11-18 08:07:09 +00001022
Chris Lattner59572292010-11-21 08:06:10 +00001023 // Remove the instruction we're replacing.
Chris Lattner58f9f582010-11-21 00:28:59 +00001024 MD->removeInstruction(M);
Chris Lattner1385dff2010-11-18 08:07:09 +00001025 M->eraseFromParent();
1026 ++NumMemCpyInstr;
1027 return true;
Chris Lattner7e9b2ea2010-11-18 07:02:37 +00001028}
1029
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001030/// We've found that the (upward scanning) memory dependence of \p MemCpy is
1031/// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that
1032/// weren't copied over by \p MemCpy.
1033///
1034/// In other words, transform:
1035/// \code
1036/// memset(dst, c, dst_size);
1037/// memcpy(dst, src, src_size);
1038/// \endcode
1039/// into:
1040/// \code
1041/// memcpy(dst, src, src_size);
1042/// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
1043/// \endcode
Sean Silva6347df02016-06-14 02:44:55 +00001044bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
1045 MemSetInst *MemSet) {
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001046 // We can only transform memset/memcpy with the same destination.
1047 if (MemSet->getDest() != MemCpy->getDest())
1048 return false;
1049
Ahmed Bougacha97876fa2015-05-21 01:43:39 +00001050 // Check that there are no other dependencies on the memset destination.
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +00001051 MemDepResult DstDepInfo =
1052 MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false,
1053 MemCpy->getIterator(), MemCpy->getParent());
Ahmed Bougacha97876fa2015-05-21 01:43:39 +00001054 if (DstDepInfo.getInst() != MemSet)
1055 return false;
1056
Ahmed Bougacha9692e302015-04-21 21:28:33 +00001057 // Use the same i8* dest as the memcpy, killing the memset dest if different.
1058 Value *Dest = MemCpy->getRawDest();
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001059 Value *DestSize = MemSet->getLength();
1060 Value *SrcSize = MemCpy->getLength();
1061
1062 // By default, create an unaligned memset.
1063 unsigned Align = 1;
1064 // If Dest is aligned, and SrcSize is constant, use the minimum alignment
1065 // of the sum.
1066 const unsigned DestAlign =
Pete Cooper67cf9a72015-11-19 05:56:52 +00001067 std::max(MemSet->getAlignment(), MemCpy->getAlignment());
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001068 if (DestAlign > 1)
1069 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
1070 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
1071
Ahmed Bougacha97876fa2015-05-21 01:43:39 +00001072 IRBuilder<> Builder(MemCpy);
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001073
Ahmed Bougacha05b72c12015-04-18 23:06:04 +00001074 // If the sizes have different types, zext the smaller one.
Ahmed Bougacha7216ccc2015-04-18 17:57:41 +00001075 if (DestSize->getType() != SrcSize->getType()) {
Ahmed Bougacha05b72c12015-04-18 23:06:04 +00001076 if (DestSize->getType()->getIntegerBitWidth() >
1077 SrcSize->getType()->getIntegerBitWidth())
1078 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType());
1079 else
1080 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType());
Ahmed Bougacha7216ccc2015-04-18 17:57:41 +00001081 }
1082
Benjamin Kramer1697d392016-11-07 17:47:28 +00001083 Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize);
1084 Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize);
1085 Value *MemsetLen = Builder.CreateSelect(
1086 Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff);
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001087 Builder.CreateMemSet(Builder.CreateGEP(Dest, SrcSize), MemSet->getOperand(1),
1088 MemsetLen, Align);
1089
1090 MD->removeInstruction(MemSet);
1091 MemSet->eraseFromParent();
1092 return true;
1093}
Chris Lattner7e9b2ea2010-11-18 07:02:37 +00001094
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001095/// Transform memcpy to memset when its source was just memset.
1096/// In other words, turn:
1097/// \code
1098/// memset(dst1, c, dst1_size);
1099/// memcpy(dst2, dst1, dst2_size);
1100/// \endcode
1101/// into:
1102/// \code
1103/// memset(dst1, c, dst1_size);
1104/// memset(dst2, c, dst2_size);
1105/// \endcode
1106/// When dst2_size <= dst1_size.
1107///
1108/// The \p MemCpy must have a Constant length.
Sean Silva6347df02016-06-14 02:44:55 +00001109bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
1110 MemSetInst *MemSet) {
Tim Shena3dbead2016-08-25 19:27:26 +00001111 AliasAnalysis &AA = LookupAliasAnalysis();
1112
Tim Shen3ad8b432016-08-25 21:03:46 +00001113 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and
1114 // memcpying from the same address. Otherwise it is hard to reason about.
Tim Shena3dbead2016-08-25 19:27:26 +00001115 if (!AA.isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource()))
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001116 return false;
1117
1118 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength());
1119 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength());
1120 // Make sure the memcpy doesn't read any more than what the memset wrote.
1121 // Don't worry about sizes larger than i64.
1122 if (!MemSetSize || CopySize->getZExtValue() > MemSetSize->getZExtValue())
1123 return false;
1124
Ahmed Bougacha0541c672015-05-21 00:08:35 +00001125 IRBuilder<> Builder(MemCpy);
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001126 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1),
Pete Cooper67cf9a72015-11-19 05:56:52 +00001127 CopySize, MemCpy->getAlignment());
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001128 return true;
1129}
1130
Sanjay Patela75c41e2015-08-13 22:53:20 +00001131/// Perform simplification of memcpy's. If we have memcpy A
Gabor Greif62f0aac2010-07-28 22:50:26 +00001132/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
1133/// B to be a memcpy from X to Z (or potentially a memmove, depending on
1134/// circumstances). This allows later passes to remove the first memcpy
1135/// altogether.
Sean Silva6347df02016-06-14 02:44:55 +00001136bool MemCpyOptPass::processMemCpy(MemCpyInst *M) {
Nick Lewycky00703e72014-02-04 00:18:54 +00001137 // We can only optimize non-volatile memcpy's.
1138 if (M->isVolatile()) return false;
Owen Anderson18e4fed2010-10-15 22:52:12 +00001139
Chris Lattnerbc4457e2010-12-09 07:45:45 +00001140 // If the source and destination of the memcpy are the same, then zap it.
1141 if (M->getSource() == M->getDest()) {
1142 MD->removeInstruction(M);
1143 M->eraseFromParent();
1144 return false;
1145 }
Benjamin Kramerea9152e2010-12-24 21:17:12 +00001146
1147 // If copying from a constant, try to turn the memcpy into a memset.
Benjamin Kramerb90b2f02010-12-24 22:23:59 +00001148 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
Benjamin Kramer30342fb2010-12-26 15:23:45 +00001149 if (GV->isConstant() && GV->hasDefinitiveInitializer())
Benjamin Kramerb90b2f02010-12-24 22:23:59 +00001150 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) {
Chris Lattner6cf8d6c2010-12-26 22:57:41 +00001151 IRBuilder<> Builder(M);
Nick Lewycky00703e72014-02-04 00:18:54 +00001152 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
Pete Cooper67cf9a72015-11-19 05:56:52 +00001153 M->getAlignment(), false);
Benjamin Kramerb90b2f02010-12-24 22:23:59 +00001154 MD->removeInstruction(M);
1155 M->eraseFromParent();
1156 ++NumCpyToSet;
1157 return true;
1158 }
Benjamin Kramerea9152e2010-12-24 21:17:12 +00001159
Ahmed Bougachab6169662015-05-11 23:09:46 +00001160 MemDepResult DepInfo = MD->getDependency(M);
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001161
1162 // Try to turn a partially redundant memset + memcpy into
1163 // memcpy + smaller memset. We don't need the memcpy size for this.
Ahmed Bougachab6169662015-05-11 23:09:46 +00001164 if (DepInfo.isClobber())
1165 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst()))
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001166 if (processMemSetMemCpyDependence(M, MDep))
1167 return true;
1168
Nick Lewycky00703e72014-02-04 00:18:54 +00001169 // The optimizations after this point require the memcpy size.
1170 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
Craig Topperf40110f2014-04-25 05:29:35 +00001171 if (!CopySize) return false;
Nick Lewycky00703e72014-02-04 00:18:54 +00001172
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001173 // There are four possible optimizations we can do for memcpy:
Chris Lattnerb5557a72009-09-01 17:09:55 +00001174 // a) memcpy-memcpy xform which exposes redundance for DSE.
1175 // b) call-memcpy xform for return slot optimization.
Nick Lewycky77d5fb42014-03-26 23:45:15 +00001176 // c) memcpy from freshly alloca'd space or space that has just started its
1177 // lifetime copies undefined data, and we can therefore eliminate the
1178 // memcpy in favor of the data that was already at the destination.
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001179 // d) memcpy from a just-memset'd source can be turned into memset.
Nick Lewycky0a7e9cc2011-10-16 20:13:32 +00001180 if (DepInfo.isClobber()) {
1181 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
1182 if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
Pete Cooper67cf9a72015-11-19 05:56:52 +00001183 CopySize->getZExtValue(), M->getAlignment(),
Duncan Sandsc6ada692012-10-04 10:54:40 +00001184 C)) {
Nick Lewycky0a7e9cc2011-10-16 20:13:32 +00001185 MD->removeInstruction(M);
1186 M->eraseFromParent();
1187 return true;
1188 }
Chris Lattnerbc4457e2010-12-09 07:45:45 +00001189 }
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001190 }
Ahmed Charles32e983e2012-02-13 06:30:56 +00001191
Chandler Carruthac80dc72015-06-17 07:18:54 +00001192 MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +00001193 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(
1194 SrcLoc, true, M->getIterator(), M->getParent());
Ahmed Bougachab6169662015-05-11 23:09:46 +00001195
Nick Lewycky0a7e9cc2011-10-16 20:13:32 +00001196 if (SrcDepInfo.isClobber()) {
1197 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
Ahmed Bougacha15a31f62015-05-16 01:23:47 +00001198 return processMemCpyMemCpyDependence(M, MDep);
Nick Lewycky99384942014-02-06 06:29:19 +00001199 } else if (SrcDepInfo.isDef()) {
Nick Lewycky77d5fb42014-03-26 23:45:15 +00001200 Instruction *I = SrcDepInfo.getInst();
1201 bool hasUndefContents = false;
1202
1203 if (isa<AllocaInst>(I)) {
1204 hasUndefContents = true;
1205 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1206 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1207 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
1208 if (LTSize->getZExtValue() >= CopySize->getZExtValue())
1209 hasUndefContents = true;
1210 }
1211
1212 if (hasUndefContents) {
Nick Lewycky99384942014-02-06 06:29:19 +00001213 MD->removeInstruction(M);
1214 M->eraseFromParent();
1215 ++NumMemCpyInstr;
1216 return true;
1217 }
Nick Lewycky0a7e9cc2011-10-16 20:13:32 +00001218 }
1219
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001220 if (SrcDepInfo.isClobber())
1221 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst()))
1222 if (performMemCpyToMemSetOptzn(M, MDep)) {
1223 MD->removeInstruction(M);
1224 M->eraseFromParent();
1225 ++NumCpyToSet;
1226 return true;
1227 }
1228
Owen Andersonad5367f2008-04-29 21:51:00 +00001229 return false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001230}
1231
Sanjay Patela75c41e2015-08-13 22:53:20 +00001232/// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
1233/// not to alias.
Sean Silva6347df02016-06-14 02:44:55 +00001234bool MemCpyOptPass::processMemMove(MemMoveInst *M) {
1235 AliasAnalysis &AA = LookupAliasAnalysis();
Chris Lattner1145e332009-09-01 17:56:32 +00001236
Chris Lattner23f61a02011-05-01 18:27:11 +00001237 if (!TLI->has(LibFunc::memmove))
1238 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001239
Chris Lattner1145e332009-09-01 17:56:32 +00001240 // See if the pointers alias.
Chandler Carruth70c61c12015-06-04 02:03:15 +00001241 if (!AA.isNoAlias(MemoryLocation::getForDest(M),
1242 MemoryLocation::getForSource(M)))
Chris Lattner1145e332009-09-01 17:56:32 +00001243 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001244
Sean Silva6347df02016-06-14 02:44:55 +00001245 DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M
1246 << "\n");
Nadav Rotem465834c2012-07-24 10:51:42 +00001247
Chris Lattner1145e332009-09-01 17:56:32 +00001248 // If not, then we know we can transform this.
Jay Foadb804a2b2011-07-12 14:06:48 +00001249 Type *ArgTys[3] = { M->getRawDest()->getType(),
1250 M->getRawSource()->getType(),
1251 M->getLength()->getType() };
Sanjay Patelaf674fb2015-12-14 17:24:23 +00001252 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(),
1253 Intrinsic::memcpy, ArgTys));
Duncan Sands0edc7102009-09-03 13:37:16 +00001254
Chris Lattner1145e332009-09-01 17:56:32 +00001255 // MemDep may have over conservative information about this instruction, just
1256 // conservatively flush it from the cache.
Chris Lattner58f9f582010-11-21 00:28:59 +00001257 MD->removeInstruction(M);
Duncan Sands0edc7102009-09-03 13:37:16 +00001258
1259 ++NumMoveToCpy;
Chris Lattner1145e332009-09-01 17:56:32 +00001260 return true;
1261}
Nadav Rotem465834c2012-07-24 10:51:42 +00001262
Sanjay Patela75c41e2015-08-13 22:53:20 +00001263/// This is called on every byval argument in call sites.
Sean Silva6347df02016-06-14 02:44:55 +00001264bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001265 const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout();
Chris Lattner59572292010-11-21 08:06:10 +00001266 // Find out what feeds this byval argument.
Chris Lattner58f9f582010-11-21 00:28:59 +00001267 Value *ByValArg = CS.getArgument(ArgNo);
Nick Lewyckyc585de62011-10-12 00:14:31 +00001268 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001269 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
Chandler Carruthac80dc72015-06-17 07:18:54 +00001270 MemDepResult DepInfo = MD->getPointerDependencyFrom(
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +00001271 MemoryLocation(ByValArg, ByValSize), true,
1272 CS.getInstruction()->getIterator(), CS.getInstruction()->getParent());
Chris Lattner58f9f582010-11-21 00:28:59 +00001273 if (!DepInfo.isClobber())
1274 return false;
1275
1276 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
1277 // a memcpy, see if we can byval from the source of the memcpy instead of the
1278 // result.
1279 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
Craig Topperf40110f2014-04-25 05:29:35 +00001280 if (!MDep || MDep->isVolatile() ||
Chris Lattner58f9f582010-11-21 00:28:59 +00001281 ByValArg->stripPointerCasts() != MDep->getDest())
1282 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001283
Chris Lattner58f9f582010-11-21 00:28:59 +00001284 // The length of the memcpy must be larger or equal to the size of the byval.
Chris Lattner58f9f582010-11-21 00:28:59 +00001285 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
Craig Topperf40110f2014-04-25 05:29:35 +00001286 if (!C1 || C1->getValue().getZExtValue() < ByValSize)
Chris Lattner58f9f582010-11-21 00:28:59 +00001287 return false;
1288
Chris Lattner83791ce2011-05-23 00:03:39 +00001289 // Get the alignment of the byval. If the call doesn't specify the alignment,
1290 // then it is some target specific value that we can't know.
Chris Lattner58f9f582010-11-21 00:28:59 +00001291 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1);
Chris Lattner83791ce2011-05-23 00:03:39 +00001292 if (ByValAlign == 0) return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001293
Chris Lattner83791ce2011-05-23 00:03:39 +00001294 // If it is greater than the memcpy, then we check to see if we can force the
1295 // source of the memcpy to the alignment we need. If we fail, we bail out.
Sean Silva6347df02016-06-14 02:44:55 +00001296 AssumptionCache &AC = LookupAssumptionCache();
1297 DominatorTree &DT = LookupDomTree();
Pete Cooper67cf9a72015-11-19 05:56:52 +00001298 if (MDep->getAlignment() < ByValAlign &&
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001299 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL,
1300 CS.getInstruction(), &AC, &DT) < ByValAlign)
Chris Lattner83791ce2011-05-23 00:03:39 +00001301 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001302
Chris Lattner58f9f582010-11-21 00:28:59 +00001303 // Verify that the copied-from memory doesn't change in between the memcpy and
1304 // the byval call.
1305 // memcpy(a <- b)
1306 // *b = 42;
1307 // foo(*a)
1308 // It would be invalid to transform the second memcpy into foo(*b).
Chris Lattner59572292010-11-21 08:06:10 +00001309 //
1310 // NOTE: This is conservative, it will stop on any read from the source loc,
1311 // not just the defining memcpy.
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +00001312 MemDepResult SourceDep = MD->getPointerDependencyFrom(
1313 MemoryLocation::getForSource(MDep), false,
1314 CS.getInstruction()->getIterator(), MDep->getParent());
Chris Lattner59572292010-11-21 08:06:10 +00001315 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1316 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001317
Chris Lattner58f9f582010-11-21 00:28:59 +00001318 Value *TmpCast = MDep->getSource();
1319 if (MDep->getSource()->getType() != ByValArg->getType())
1320 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
1321 "tmpcast", CS.getInstruction());
Nadav Rotem465834c2012-07-24 10:51:42 +00001322
Sean Silva6347df02016-06-14 02:44:55 +00001323 DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
Chris Lattner58f9f582010-11-21 00:28:59 +00001324 << " " << *MDep << "\n"
1325 << " " << *CS.getInstruction() << "\n");
Nadav Rotem465834c2012-07-24 10:51:42 +00001326
Chris Lattner58f9f582010-11-21 00:28:59 +00001327 // Otherwise we're good! Update the byval argument.
1328 CS.setArgument(ArgNo, TmpCast);
1329 ++NumMemCpyInstr;
1330 return true;
1331}
1332
Sean Silva6347df02016-06-14 02:44:55 +00001333/// Executes one iteration of MemCpyOptPass.
1334bool MemCpyOptPass::iterateOnFunction(Function &F) {
Chris Lattnerb5557a72009-09-01 17:09:55 +00001335 bool MadeChange = false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001336
Chris Lattnerb5557a72009-09-01 17:09:55 +00001337 // Walk all instruction in the function.
Benjamin Kramer135f7352016-06-26 12:28:59 +00001338 for (BasicBlock &BB : F) {
1339 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
Chris Lattnerb5557a72009-09-01 17:09:55 +00001340 // Avoid invalidating the iterator.
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +00001341 Instruction *I = &*BI++;
Nadav Rotem465834c2012-07-24 10:51:42 +00001342
Chris Lattner58f9f582010-11-21 00:28:59 +00001343 bool RepeatInstruction = false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001344
Owen Anderson6a7355c2008-04-21 07:45:10 +00001345 if (StoreInst *SI = dyn_cast<StoreInst>(I))
Chris Lattnerb5557a72009-09-01 17:09:55 +00001346 MadeChange |= processStore(SI, BI);
Chris Lattner9a1d63b2011-01-08 21:19:19 +00001347 else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
1348 RepeatInstruction = processMemSet(M, BI);
1349 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
Tim Northover39617352016-05-10 21:49:40 +00001350 RepeatInstruction = processMemCpy(M);
Chris Lattner9a1d63b2011-01-08 21:19:19 +00001351 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
Chris Lattner58f9f582010-11-21 00:28:59 +00001352 RepeatInstruction = processMemMove(M);
Benjamin Kramer3a09ef62015-04-10 14:50:08 +00001353 else if (auto CS = CallSite(I)) {
Chris Lattner58f9f582010-11-21 00:28:59 +00001354 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
Nick Lewycky612d70b2011-11-20 19:09:04 +00001355 if (CS.isByValArgument(i))
Chris Lattner58f9f582010-11-21 00:28:59 +00001356 MadeChange |= processByValArgument(CS, i);
1357 }
1358
1359 // Reprocess the instruction if desired.
1360 if (RepeatInstruction) {
Benjamin Kramer135f7352016-06-26 12:28:59 +00001361 if (BI != BB.begin())
1362 --BI;
Chris Lattner58f9f582010-11-21 00:28:59 +00001363 MadeChange = true;
Chris Lattner1145e332009-09-01 17:56:32 +00001364 }
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001365 }
1366 }
Nadav Rotem465834c2012-07-24 10:51:42 +00001367
Chris Lattnerb5557a72009-09-01 17:09:55 +00001368 return MadeChange;
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001369}
Chris Lattnerb5557a72009-09-01 17:09:55 +00001370
Sean Silva6347df02016-06-14 02:44:55 +00001371PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) {
Paul Robinsonaf4e64d2014-02-06 00:07:05 +00001372
Sean Silva6347df02016-06-14 02:44:55 +00001373 auto &MD = AM.getResult<MemoryDependenceAnalysis>(F);
1374 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1375
1376 auto LookupAliasAnalysis = [&]() -> AliasAnalysis & {
1377 return AM.getResult<AAManager>(F);
1378 };
1379 auto LookupAssumptionCache = [&]() -> AssumptionCache & {
1380 return AM.getResult<AssumptionAnalysis>(F);
1381 };
1382 auto LookupDomTree = [&]() -> DominatorTree & {
1383 return AM.getResult<DominatorTreeAnalysis>(F);
1384 };
1385
1386 bool MadeChange = runImpl(F, &MD, &TLI, LookupAliasAnalysis,
1387 LookupAssumptionCache, LookupDomTree);
1388 if (!MadeChange)
1389 return PreservedAnalyses::all();
1390 PreservedAnalyses PA;
1391 PA.preserve<GlobalsAA>();
1392 PA.preserve<MemoryDependenceAnalysis>();
1393 return PA;
1394}
1395
1396bool MemCpyOptPass::runImpl(
1397 Function &F, MemoryDependenceResults *MD_, TargetLibraryInfo *TLI_,
1398 std::function<AliasAnalysis &()> LookupAliasAnalysis_,
1399 std::function<AssumptionCache &()> LookupAssumptionCache_,
1400 std::function<DominatorTree &()> LookupDomTree_) {
Chris Lattnerb5557a72009-09-01 17:09:55 +00001401 bool MadeChange = false;
Sean Silva6347df02016-06-14 02:44:55 +00001402 MD = MD_;
1403 TLI = TLI_;
Benjamin Kramer1afc1de2016-06-17 20:41:14 +00001404 LookupAliasAnalysis = std::move(LookupAliasAnalysis_);
1405 LookupAssumptionCache = std::move(LookupAssumptionCache_);
1406 LookupDomTree = std::move(LookupDomTree_);
Nadav Rotem465834c2012-07-24 10:51:42 +00001407
Chris Lattner23f61a02011-05-01 18:27:11 +00001408 // If we don't have at least memset and memcpy, there is little point of doing
1409 // anything here. These are required by a freestanding implementation, so if
1410 // even they are disabled, there is no point in trying hard.
1411 if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy))
1412 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001413
Chris Lattnerb5557a72009-09-01 17:09:55 +00001414 while (1) {
1415 if (!iterateOnFunction(F))
1416 break;
1417 MadeChange = true;
1418 }
Nadav Rotem465834c2012-07-24 10:51:42 +00001419
Craig Topperf40110f2014-04-25 05:29:35 +00001420 MD = nullptr;
Chris Lattnerb5557a72009-09-01 17:09:55 +00001421 return MadeChange;
1422}
Sean Silva6347df02016-06-14 02:44:55 +00001423
1424/// This is the main transformation entry point for a function.
1425bool MemCpyOptLegacyPass::runOnFunction(Function &F) {
1426 if (skipFunction(F))
1427 return false;
1428
1429 auto *MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1430 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1431
1432 auto LookupAliasAnalysis = [this]() -> AliasAnalysis & {
1433 return getAnalysis<AAResultsWrapperPass>().getAAResults();
1434 };
1435 auto LookupAssumptionCache = [this, &F]() -> AssumptionCache & {
1436 return getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1437 };
1438 auto LookupDomTree = [this]() -> DominatorTree & {
1439 return getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1440 };
1441
1442 return Impl.runImpl(F, MD, TLI, LookupAliasAnalysis, LookupAssumptionCache,
1443 LookupDomTree);
1444}