blob: 9912d3dafed3b7a1496e18ae9ce15c2778b853af [file] [log] [blame]
Owen Andersona723d1e2008-04-09 08:23:16 +00001//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass performs various transformations related to eliminating memcpy
11// calls, or transforming sets of stores into memset's.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "memcpyopt"
16#include "llvm/Transforms/Scalar.h"
Owen Andersona723d1e2008-04-09 08:23:16 +000017#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/Statistic.h"
Owen Andersona723d1e2008-04-09 08:23:16 +000019#include "llvm/Analysis/AliasAnalysis.h"
Chandler Carruth06cb8ed2012-06-29 12:38:19 +000020#include "llvm/Analysis/Dominators.h"
Owen Andersona723d1e2008-04-09 08:23:16 +000021#include "llvm/Analysis/MemoryDependenceAnalysis.h"
Chris Lattnerbb897102010-12-26 20:15:01 +000022#include "llvm/Analysis/ValueTracking.h"
Chandler Carruth0b8c9a82013-01-02 11:36:10 +000023#include "llvm/IR/DataLayout.h"
24#include "llvm/IR/GlobalVariable.h"
25#include "llvm/IR/IRBuilder.h"
26#include "llvm/IR/Instructions.h"
27#include "llvm/IR/IntrinsicInst.h"
Owen Andersona723d1e2008-04-09 08:23:16 +000028#include "llvm/Support/Debug.h"
29#include "llvm/Support/GetElementPtrTypeIterator.h"
Chris Lattnerbdff5482009-08-23 04:37:46 +000030#include "llvm/Support/raw_ostream.h"
Chris Lattner149f5282011-05-01 18:27:11 +000031#include "llvm/Target/TargetLibraryInfo.h"
Chandler Carruth06cb8ed2012-06-29 12:38:19 +000032#include "llvm/Transforms/Utils/Local.h"
Owen Andersona723d1e2008-04-09 08:23:16 +000033#include <list>
34using namespace llvm;
35
36STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
37STATISTIC(NumMemSetInfer, "Number of memsets inferred");
Duncan Sands05cd03b2009-09-03 13:37:16 +000038STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
Benjamin Kramera1120872010-12-24 21:17:12 +000039STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
Owen Andersona723d1e2008-04-09 08:23:16 +000040
Benjamin Kramer39acdb02012-09-13 16:29:49 +000041static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
Micah Villmow3574eca2012-10-08 16:38:25 +000042 bool &VariableIdxFound, const DataLayout &TD){
Owen Andersona723d1e2008-04-09 08:23:16 +000043 // Skip over the first indices.
44 gep_type_iterator GTI = gep_type_begin(GEP);
45 for (unsigned i = 1; i != Idx; ++i, ++GTI)
46 /*skip along*/;
Nadav Rotema94d6e82012-07-24 10:51:42 +000047
Owen Andersona723d1e2008-04-09 08:23:16 +000048 // Compute the offset implied by the rest of the indices.
49 int64_t Offset = 0;
50 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
51 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
52 if (OpC == 0)
53 return VariableIdxFound = true;
54 if (OpC->isZero()) continue; // No offset.
55
56 // Handle struct indices, which add their field offset to the pointer.
Chris Lattnerdb125cf2011-07-18 04:54:35 +000057 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
Owen Andersona723d1e2008-04-09 08:23:16 +000058 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
59 continue;
60 }
Nadav Rotema94d6e82012-07-24 10:51:42 +000061
Owen Andersona723d1e2008-04-09 08:23:16 +000062 // Otherwise, we have a sequential type like an array or vector. Multiply
63 // the index by the ElementSize.
Duncan Sands777d2302009-05-09 07:06:46 +000064 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
Owen Andersona723d1e2008-04-09 08:23:16 +000065 Offset += Size*OpC->getSExtValue();
66 }
67
68 return Offset;
69}
70
71/// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a
72/// constant offset, and return that constant offset. For example, Ptr1 might
73/// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8.
74static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
Micah Villmow3574eca2012-10-08 16:38:25 +000075 const DataLayout &TD) {
Chris Lattner2d5c0cd2011-01-12 01:43:46 +000076 Ptr1 = Ptr1->stripPointerCasts();
77 Ptr2 = Ptr2->stripPointerCasts();
Benjamin Kramer39acdb02012-09-13 16:29:49 +000078 GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
79 GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
Nadav Rotema94d6e82012-07-24 10:51:42 +000080
Chris Lattner9fa11e92011-01-08 21:07:56 +000081 bool VariableIdxFound = false;
82
83 // If one pointer is a GEP and the other isn't, then see if the GEP is a
84 // constant offset from the base, as in "P" and "gep P, 1".
85 if (GEP1 && GEP2 == 0 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
86 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, TD);
87 return !VariableIdxFound;
88 }
89
90 if (GEP2 && GEP1 == 0 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
91 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, TD);
92 return !VariableIdxFound;
93 }
Nadav Rotema94d6e82012-07-24 10:51:42 +000094
Owen Andersona723d1e2008-04-09 08:23:16 +000095 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
96 // base. After that base, they may have some number of common (and
97 // potentially variable) indices. After that they handle some constant
98 // offset, which determines their offset from each other. At this point, we
99 // handle no other case.
Owen Andersona723d1e2008-04-09 08:23:16 +0000100 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
101 return false;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000102
Owen Andersona723d1e2008-04-09 08:23:16 +0000103 // Skip any common indices and track the GEP types.
104 unsigned Idx = 1;
105 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
106 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
107 break;
108
Owen Andersona723d1e2008-04-09 08:23:16 +0000109 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD);
110 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD);
111 if (VariableIdxFound) return false;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000112
Owen Andersona723d1e2008-04-09 08:23:16 +0000113 Offset = Offset2-Offset1;
114 return true;
115}
116
117
118/// MemsetRange - Represents a range of memset'd bytes with the ByteVal value.
119/// This allows us to analyze stores like:
120/// store 0 -> P+1
121/// store 0 -> P+0
122/// store 0 -> P+3
123/// store 0 -> P+2
124/// which sometimes happens with stores to arrays of structs etc. When we see
125/// the first store, we make a range [1, 2). The second store extends the range
126/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
127/// two ranges into [0, 3) which is memset'able.
128namespace {
129struct MemsetRange {
130 // Start/End - A semi range that describes the span that this range covers.
Nadav Rotema94d6e82012-07-24 10:51:42 +0000131 // The range is closed at the start and open at the end: [Start, End).
Owen Andersona723d1e2008-04-09 08:23:16 +0000132 int64_t Start, End;
133
134 /// StartPtr - The getelementptr instruction that points to the start of the
135 /// range.
136 Value *StartPtr;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000137
Owen Andersona723d1e2008-04-09 08:23:16 +0000138 /// Alignment - The known alignment of the first store.
139 unsigned Alignment;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000140
Owen Andersona723d1e2008-04-09 08:23:16 +0000141 /// TheStores - The actual stores that make up this range.
Chris Lattner06511262011-01-08 20:54:51 +0000142 SmallVector<Instruction*, 16> TheStores;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000143
Micah Villmow3574eca2012-10-08 16:38:25 +0000144 bool isProfitableToUseMemset(const DataLayout &TD) const;
Owen Andersona723d1e2008-04-09 08:23:16 +0000145
146};
147} // end anon namespace
148
Micah Villmow3574eca2012-10-08 16:38:25 +0000149bool MemsetRange::isProfitableToUseMemset(const DataLayout &TD) const {
Chad Rosiera4b6fd52011-12-05 22:53:09 +0000150 // If we found more than 4 stores to merge or 16 bytes, use memset.
Chad Rosierd8bd26e2011-12-05 22:37:00 +0000151 if (TheStores.size() >= 4 || End-Start >= 16) return true;
Chris Lattner06511262011-01-08 20:54:51 +0000152
153 // If there is nothing to merge, don't do anything.
154 if (TheStores.size() < 2) return false;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000155
Chris Lattner06511262011-01-08 20:54:51 +0000156 // If any of the stores are a memset, then it is always good to extend the
157 // memset.
158 for (unsigned i = 0, e = TheStores.size(); i != e; ++i)
159 if (!isa<StoreInst>(TheStores[i]))
160 return true;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000161
Owen Andersona723d1e2008-04-09 08:23:16 +0000162 // Assume that the code generator is capable of merging pairs of stores
163 // together if it wants to.
Chris Lattner06511262011-01-08 20:54:51 +0000164 if (TheStores.size() == 2) return false;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000165
Owen Andersona723d1e2008-04-09 08:23:16 +0000166 // If we have fewer than 8 stores, it can still be worthwhile to do this.
167 // For example, merging 4 i8 stores into an i32 store is useful almost always.
168 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
169 // memset will be split into 2 32-bit stores anyway) and doing so can
170 // pessimize the llvm optimizer.
171 //
172 // Since we don't have perfect knowledge here, make some assumptions: assume
Matt Arsenault4b28ee22013-09-16 22:43:16 +0000173 // the maximum GPR width is the same size as the largest legal integer
174 // size. If so, check to see whether we will end up actually reducing the
175 // number of stores used.
Owen Andersona723d1e2008-04-09 08:23:16 +0000176 unsigned Bytes = unsigned(End-Start);
Matt Arsenault4b28ee22013-09-16 22:43:16 +0000177 unsigned MaxIntSize = TD.getLargestLegalIntTypeSize();
178 if (MaxIntSize == 0)
179 MaxIntSize = 1;
180 unsigned NumPointerStores = Bytes / MaxIntSize;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000181
Owen Andersona723d1e2008-04-09 08:23:16 +0000182 // Assume the remaining bytes if any are done a byte at a time.
Matt Arsenault4b28ee22013-09-16 22:43:16 +0000183 unsigned NumByteStores = Bytes - NumPointerStores * MaxIntSize;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000184
Owen Andersona723d1e2008-04-09 08:23:16 +0000185 // If we will reduce the # stores (according to this heuristic), do the
186 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
187 // etc.
188 return TheStores.size() > NumPointerStores+NumByteStores;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000189}
Owen Andersona723d1e2008-04-09 08:23:16 +0000190
191
192namespace {
193class MemsetRanges {
194 /// Ranges - A sorted list of the memset ranges. We use std::list here
195 /// because each element is relatively large and expensive to copy.
196 std::list<MemsetRange> Ranges;
197 typedef std::list<MemsetRange>::iterator range_iterator;
Micah Villmow3574eca2012-10-08 16:38:25 +0000198 const DataLayout &TD;
Owen Andersona723d1e2008-04-09 08:23:16 +0000199public:
Micah Villmow3574eca2012-10-08 16:38:25 +0000200 MemsetRanges(const DataLayout &td) : TD(td) {}
Nadav Rotema94d6e82012-07-24 10:51:42 +0000201
Owen Andersona723d1e2008-04-09 08:23:16 +0000202 typedef std::list<MemsetRange>::const_iterator const_iterator;
203 const_iterator begin() const { return Ranges.begin(); }
204 const_iterator end() const { return Ranges.end(); }
205 bool empty() const { return Ranges.empty(); }
Nadav Rotema94d6e82012-07-24 10:51:42 +0000206
Chris Lattner67a716a2011-01-08 20:24:01 +0000207 void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
Chris Lattner06511262011-01-08 20:54:51 +0000208 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
209 addStore(OffsetFromFirst, SI);
210 else
211 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
Chris Lattner67a716a2011-01-08 20:24:01 +0000212 }
Chris Lattner06511262011-01-08 20:54:51 +0000213
214 void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
215 int64_t StoreSize = TD.getTypeStoreSize(SI->getOperand(0)->getType());
Nadav Rotema94d6e82012-07-24 10:51:42 +0000216
Chris Lattner06511262011-01-08 20:54:51 +0000217 addRange(OffsetFromFirst, StoreSize,
218 SI->getPointerOperand(), SI->getAlignment(), SI);
219 }
Nadav Rotema94d6e82012-07-24 10:51:42 +0000220
Chris Lattner06511262011-01-08 20:54:51 +0000221 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
222 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
223 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI);
224 }
Nadav Rotema94d6e82012-07-24 10:51:42 +0000225
Chris Lattner06511262011-01-08 20:54:51 +0000226 void addRange(int64_t Start, int64_t Size, Value *Ptr,
227 unsigned Alignment, Instruction *Inst);
228
Owen Andersona723d1e2008-04-09 08:23:16 +0000229};
Nadav Rotema94d6e82012-07-24 10:51:42 +0000230
Owen Andersona723d1e2008-04-09 08:23:16 +0000231} // end anon namespace
232
233
Chris Lattner06511262011-01-08 20:54:51 +0000234/// addRange - Add a new store to the MemsetRanges data structure. This adds a
Owen Andersona723d1e2008-04-09 08:23:16 +0000235/// new range for the specified store at the specified offset, merging into
236/// existing ranges as appropriate.
Chris Lattner06511262011-01-08 20:54:51 +0000237///
238/// Do a linear search of the ranges to see if this can be joined and/or to
239/// find the insertion point in the list. We keep the ranges sorted for
240/// simplicity here. This is a linear search of a linked list, which is ugly,
241/// however the number of ranges is limited, so this won't get crazy slow.
242void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
243 unsigned Alignment, Instruction *Inst) {
244 int64_t End = Start+Size;
Owen Andersona723d1e2008-04-09 08:23:16 +0000245 range_iterator I = Ranges.begin(), E = Ranges.end();
Nadav Rotema94d6e82012-07-24 10:51:42 +0000246
Owen Andersona723d1e2008-04-09 08:23:16 +0000247 while (I != E && Start > I->End)
248 ++I;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000249
Owen Andersona723d1e2008-04-09 08:23:16 +0000250 // We now know that I == E, in which case we didn't find anything to merge
251 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
252 // to insert a new range. Handle this now.
253 if (I == E || End < I->Start) {
254 MemsetRange &R = *Ranges.insert(I, MemsetRange());
255 R.Start = Start;
256 R.End = End;
Chris Lattner06511262011-01-08 20:54:51 +0000257 R.StartPtr = Ptr;
258 R.Alignment = Alignment;
259 R.TheStores.push_back(Inst);
Owen Andersona723d1e2008-04-09 08:23:16 +0000260 return;
261 }
Nadav Rotema94d6e82012-07-24 10:51:42 +0000262
Owen Andersona723d1e2008-04-09 08:23:16 +0000263 // This store overlaps with I, add it.
Chris Lattner06511262011-01-08 20:54:51 +0000264 I->TheStores.push_back(Inst);
Nadav Rotema94d6e82012-07-24 10:51:42 +0000265
Owen Andersona723d1e2008-04-09 08:23:16 +0000266 // At this point, we may have an interval that completely contains our store.
267 // If so, just add it to the interval and return.
268 if (I->Start <= Start && I->End >= End)
269 return;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000270
Owen Andersona723d1e2008-04-09 08:23:16 +0000271 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
272 // but is not entirely contained within the range.
Nadav Rotema94d6e82012-07-24 10:51:42 +0000273
Owen Andersona723d1e2008-04-09 08:23:16 +0000274 // See if the range extends the start of the range. In this case, it couldn't
275 // possibly cause it to join the prior range, because otherwise we would have
276 // stopped on *it*.
277 if (Start < I->Start) {
278 I->Start = Start;
Chris Lattner06511262011-01-08 20:54:51 +0000279 I->StartPtr = Ptr;
280 I->Alignment = Alignment;
Owen Andersona723d1e2008-04-09 08:23:16 +0000281 }
Nadav Rotema94d6e82012-07-24 10:51:42 +0000282
Owen Andersona723d1e2008-04-09 08:23:16 +0000283 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
284 // is in or right at the end of I), and that End >= I->Start. Extend I out to
285 // End.
286 if (End > I->End) {
287 I->End = End;
Nick Lewycky9c0f1462009-03-19 05:51:39 +0000288 range_iterator NextI = I;
Owen Andersona723d1e2008-04-09 08:23:16 +0000289 while (++NextI != E && End >= NextI->Start) {
290 // Merge the range in.
291 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
292 if (NextI->End > I->End)
293 I->End = NextI->End;
294 Ranges.erase(NextI);
295 NextI = I;
296 }
297 }
298}
299
300//===----------------------------------------------------------------------===//
301// MemCpyOpt Pass
302//===----------------------------------------------------------------------===//
303
304namespace {
Chris Lattner3e8b6632009-09-02 06:11:42 +0000305 class MemCpyOpt : public FunctionPass {
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000306 MemoryDependenceAnalysis *MD;
Chris Lattner149f5282011-05-01 18:27:11 +0000307 TargetLibraryInfo *TLI;
Micah Villmow3574eca2012-10-08 16:38:25 +0000308 const DataLayout *TD;
Owen Andersona723d1e2008-04-09 08:23:16 +0000309 public:
310 static char ID; // Pass identification, replacement for typeid
Owen Anderson081c34b2010-10-19 17:21:58 +0000311 MemCpyOpt() : FunctionPass(ID) {
312 initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000313 MD = 0;
Chris Lattner149f5282011-05-01 18:27:11 +0000314 TLI = 0;
315 TD = 0;
Owen Anderson081c34b2010-10-19 17:21:58 +0000316 }
Owen Andersona723d1e2008-04-09 08:23:16 +0000317
Chris Lattner67a716a2011-01-08 20:24:01 +0000318 bool runOnFunction(Function &F);
319
Owen Andersona723d1e2008-04-09 08:23:16 +0000320 private:
321 // This transformation requires dominator postdominator info
322 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
323 AU.setPreservesCFG();
324 AU.addRequired<DominatorTree>();
325 AU.addRequired<MemoryDependenceAnalysis>();
326 AU.addRequired<AliasAnalysis>();
Chris Lattner149f5282011-05-01 18:27:11 +0000327 AU.addRequired<TargetLibraryInfo>();
Owen Andersona723d1e2008-04-09 08:23:16 +0000328 AU.addPreserved<AliasAnalysis>();
329 AU.addPreserved<MemoryDependenceAnalysis>();
Owen Andersona723d1e2008-04-09 08:23:16 +0000330 }
Nadav Rotema94d6e82012-07-24 10:51:42 +0000331
Owen Andersona723d1e2008-04-09 08:23:16 +0000332 // Helper fuctions
Chris Lattner61c6ba82009-09-01 17:09:55 +0000333 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
Chris Lattnerd90a1922011-01-08 21:19:19 +0000334 bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI);
Chris Lattner61c6ba82009-09-01 17:09:55 +0000335 bool processMemCpy(MemCpyInst *M);
Chris Lattnerf41eaac2009-09-01 17:56:32 +0000336 bool processMemMove(MemMoveInst *M);
Owen Anderson65491212010-10-15 22:52:12 +0000337 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc,
Duncan Sandsf5874752012-10-04 10:54:40 +0000338 uint64_t cpyLen, unsigned cpyAlign, CallInst *C);
Chris Lattner43f8e432010-11-18 07:02:37 +0000339 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
340 uint64_t MSize);
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000341 bool processByValArgument(CallSite CS, unsigned ArgNo);
Chris Lattner67a716a2011-01-08 20:24:01 +0000342 Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr,
343 Value *ByteVal);
344
Owen Andersona723d1e2008-04-09 08:23:16 +0000345 bool iterateOnFunction(Function &F);
346 };
Nadav Rotema94d6e82012-07-24 10:51:42 +0000347
Owen Andersona723d1e2008-04-09 08:23:16 +0000348 char MemCpyOpt::ID = 0;
349}
350
351// createMemCpyOptPass - The public interface to this file...
352FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
353
Owen Anderson2ab36d32010-10-12 19:48:12 +0000354INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
355 false, false)
356INITIALIZE_PASS_DEPENDENCY(DominatorTree)
357INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
Chris Lattner149f5282011-05-01 18:27:11 +0000358INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
Owen Anderson2ab36d32010-10-12 19:48:12 +0000359INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
360INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
361 false, false)
Owen Andersona723d1e2008-04-09 08:23:16 +0000362
Chris Lattner67a716a2011-01-08 20:24:01 +0000363/// tryMergingIntoMemset - When scanning forward over instructions, we look for
Owen Andersona723d1e2008-04-09 08:23:16 +0000364/// some other patterns to fold away. In particular, this looks for stores to
Duncan Sandsab4c3662011-02-15 09:23:02 +0000365/// neighboring locations of memory. If it sees enough consecutive ones, it
Chris Lattner67a716a2011-01-08 20:24:01 +0000366/// attempts to merge them together into a memcpy/memset.
Nadav Rotema94d6e82012-07-24 10:51:42 +0000367Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
Chris Lattner67a716a2011-01-08 20:24:01 +0000368 Value *StartPtr, Value *ByteVal) {
369 if (TD == 0) return 0;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000370
Chris Lattner67a716a2011-01-08 20:24:01 +0000371 // Okay, so we now have a single store that can be splatable. Scan to find
372 // all subsequent stores of the same value to offset from the same pointer.
373 // Join these together into ranges, so we can decide whether contiguous blocks
374 // are stored.
375 MemsetRanges Ranges(*TD);
Nadav Rotema94d6e82012-07-24 10:51:42 +0000376
Chris Lattner67a716a2011-01-08 20:24:01 +0000377 BasicBlock::iterator BI = StartInst;
378 for (++BI; !isa<TerminatorInst>(BI); ++BI) {
Chris Lattner06511262011-01-08 20:54:51 +0000379 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
380 // If the instruction is readnone, ignore it, otherwise bail out. We
381 // don't even allow readonly here because we don't want something like:
Chris Lattner67a716a2011-01-08 20:24:01 +0000382 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
Chris Lattner06511262011-01-08 20:54:51 +0000383 if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
384 break;
385 continue;
386 }
Nadav Rotema94d6e82012-07-24 10:51:42 +0000387
Chris Lattner06511262011-01-08 20:54:51 +0000388 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
389 // If this is a store, see if we can merge it in.
Eli Friedman56efe242011-08-17 22:22:24 +0000390 if (!NextStore->isSimple()) break;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000391
Chris Lattner06511262011-01-08 20:54:51 +0000392 // Check to see if this stored value is of the same byte-splattable value.
393 if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
394 break;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000395
Chris Lattner06511262011-01-08 20:54:51 +0000396 // Check to see if this store is to a constant offset from the start ptr.
397 int64_t Offset;
Chris Lattnerf4268502011-01-09 19:26:10 +0000398 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(),
399 Offset, *TD))
Chris Lattner06511262011-01-08 20:54:51 +0000400 break;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000401
Chris Lattner06511262011-01-08 20:54:51 +0000402 Ranges.addStore(Offset, NextStore);
403 } else {
404 MemSetInst *MSI = cast<MemSetInst>(BI);
Nadav Rotema94d6e82012-07-24 10:51:42 +0000405
Chris Lattner06511262011-01-08 20:54:51 +0000406 if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
407 !isa<ConstantInt>(MSI->getLength()))
408 break;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000409
Chris Lattner06511262011-01-08 20:54:51 +0000410 // Check to see if this store is to a constant offset from the start ptr.
411 int64_t Offset;
412 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD))
413 break;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000414
Chris Lattner06511262011-01-08 20:54:51 +0000415 Ranges.addMemSet(Offset, MSI);
416 }
Chris Lattner67a716a2011-01-08 20:24:01 +0000417 }
Nadav Rotema94d6e82012-07-24 10:51:42 +0000418
Chris Lattner67a716a2011-01-08 20:24:01 +0000419 // If we have no ranges, then we just had a single store with nothing that
420 // could be merged in. This is a very common case of course.
421 if (Ranges.empty())
422 return 0;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000423
Chris Lattner67a716a2011-01-08 20:24:01 +0000424 // If we had at least one store that could be merged in, add the starting
425 // store as well. We try to avoid this unless there is at least something
426 // interesting as a small compile-time optimization.
427 Ranges.addInst(0, StartInst);
428
429 // If we create any memsets, we put it right before the first instruction that
430 // isn't part of the memset block. This ensure that the memset is dominated
431 // by any addressing instruction needed by the start of the block.
432 IRBuilder<> Builder(BI);
433
434 // Now that we have full information about ranges, loop over the ranges and
435 // emit memset's for anything big enough to be worthwhile.
436 Instruction *AMemSet = 0;
437 for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
438 I != E; ++I) {
439 const MemsetRange &Range = *I;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000440
Chris Lattner67a716a2011-01-08 20:24:01 +0000441 if (Range.TheStores.size() == 1) continue;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000442
Chris Lattner67a716a2011-01-08 20:24:01 +0000443 // If it is profitable to lower this range to memset, do so now.
444 if (!Range.isProfitableToUseMemset(*TD))
445 continue;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000446
Chris Lattner67a716a2011-01-08 20:24:01 +0000447 // Otherwise, we do want to transform this! Create a new memset.
448 // Get the starting pointer of the block.
449 StartPtr = Range.StartPtr;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000450
Chris Lattner67a716a2011-01-08 20:24:01 +0000451 // Determine alignment
452 unsigned Alignment = Range.Alignment;
453 if (Alignment == 0) {
Nadav Rotema94d6e82012-07-24 10:51:42 +0000454 Type *EltType =
Chris Lattner67a716a2011-01-08 20:24:01 +0000455 cast<PointerType>(StartPtr->getType())->getElementType();
456 Alignment = TD->getABITypeAlignment(EltType);
457 }
Nadav Rotema94d6e82012-07-24 10:51:42 +0000458
459 AMemSet =
Chris Lattner67a716a2011-01-08 20:24:01 +0000460 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
Nadav Rotema94d6e82012-07-24 10:51:42 +0000461
Chris Lattner67a716a2011-01-08 20:24:01 +0000462 DEBUG(dbgs() << "Replace stores:\n";
463 for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
464 dbgs() << *Range.TheStores[i] << '\n';
465 dbgs() << "With: " << *AMemSet << '\n');
Devang Patelb90584a2011-05-04 21:58:58 +0000466
467 if (!Range.TheStores.empty())
468 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
469
Chris Lattner67a716a2011-01-08 20:24:01 +0000470 // Zap all the stores.
Craig Topper365ef0b2013-07-03 15:07:05 +0000471 for (SmallVectorImpl<Instruction *>::const_iterator
Chris Lattner67a716a2011-01-08 20:24:01 +0000472 SI = Range.TheStores.begin(),
Chris Lattner8a629572011-01-08 22:19:21 +0000473 SE = Range.TheStores.end(); SI != SE; ++SI) {
474 MD->removeInstruction(*SI);
Chris Lattner67a716a2011-01-08 20:24:01 +0000475 (*SI)->eraseFromParent();
Chris Lattner8a629572011-01-08 22:19:21 +0000476 }
Chris Lattner67a716a2011-01-08 20:24:01 +0000477 ++NumMemSetInfer;
478 }
Nadav Rotema94d6e82012-07-24 10:51:42 +0000479
Chris Lattner67a716a2011-01-08 20:24:01 +0000480 return AMemSet;
481}
482
483
Chris Lattner61c6ba82009-09-01 17:09:55 +0000484bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
Eli Friedman56efe242011-08-17 22:22:24 +0000485 if (!SI->isSimple()) return false;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000486
Chris Lattner67a716a2011-01-08 20:24:01 +0000487 if (TD == 0) return false;
Owen Anderson65491212010-10-15 22:52:12 +0000488
489 // Detect cases where we're performing call slot forwarding, but
490 // happen to be using a load-store pair to implement it, rather than
491 // a memcpy.
492 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
Eli Friedman56efe242011-08-17 22:22:24 +0000493 if (LI->isSimple() && LI->hasOneUse() &&
Eli Friedman5d40ef22011-06-15 01:25:56 +0000494 LI->getParent() == SI->getParent()) {
Eli Friedman70d893e2011-06-02 21:24:42 +0000495 MemDepResult ldep = MD->getDependency(LI);
Owen Anderson65491212010-10-15 22:52:12 +0000496 CallInst *C = 0;
Eli Friedman70d893e2011-06-02 21:24:42 +0000497 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
498 C = dyn_cast<CallInst>(ldep.getInst());
499
500 if (C) {
501 // Check that nothing touches the dest of the "copy" between
502 // the call and the store.
Eli Friedman5d40ef22011-06-15 01:25:56 +0000503 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
504 AliasAnalysis::Location StoreLoc = AA.getLocation(SI);
505 for (BasicBlock::iterator I = --BasicBlock::iterator(SI),
506 E = C; I != E; --I) {
507 if (AA.getModRefInfo(&*I, StoreLoc) != AliasAnalysis::NoModRef) {
Eli Friedman70d893e2011-06-02 21:24:42 +0000508 C = 0;
Eli Friedman5d40ef22011-06-15 01:25:56 +0000509 break;
510 }
Eli Friedman70d893e2011-06-02 21:24:42 +0000511 }
512 }
513
Owen Anderson65491212010-10-15 22:52:12 +0000514 if (C) {
Duncan Sandsf5874752012-10-04 10:54:40 +0000515 unsigned storeAlign = SI->getAlignment();
516 if (!storeAlign)
517 storeAlign = TD->getABITypeAlignment(SI->getOperand(0)->getType());
518 unsigned loadAlign = LI->getAlignment();
519 if (!loadAlign)
520 loadAlign = TD->getABITypeAlignment(LI->getType());
521
Owen Anderson65491212010-10-15 22:52:12 +0000522 bool changed = performCallSlotOptzn(LI,
Nadav Rotema94d6e82012-07-24 10:51:42 +0000523 SI->getPointerOperand()->stripPointerCasts(),
Owen Anderson65491212010-10-15 22:52:12 +0000524 LI->getPointerOperand()->stripPointerCasts(),
Duncan Sandsf5874752012-10-04 10:54:40 +0000525 TD->getTypeStoreSize(SI->getOperand(0)->getType()),
526 std::min(storeAlign, loadAlign), C);
Owen Anderson65491212010-10-15 22:52:12 +0000527 if (changed) {
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000528 MD->removeInstruction(SI);
Owen Anderson65491212010-10-15 22:52:12 +0000529 SI->eraseFromParent();
Chris Lattnerf4268502011-01-09 19:26:10 +0000530 MD->removeInstruction(LI);
Owen Anderson65491212010-10-15 22:52:12 +0000531 LI->eraseFromParent();
532 ++NumMemCpyInstr;
533 return true;
534 }
535 }
536 }
537 }
Nadav Rotema94d6e82012-07-24 10:51:42 +0000538
Owen Andersona723d1e2008-04-09 08:23:16 +0000539 // There are two cases that are interesting for this code to handle: memcpy
540 // and memset. Right now we only handle memset.
Nadav Rotema94d6e82012-07-24 10:51:42 +0000541
Owen Andersona723d1e2008-04-09 08:23:16 +0000542 // Ensure that the value being stored is something that can be memset'able a
543 // byte at a time like "0" or "-1" or any width, as well as things like
544 // 0xA0A0A0A0 and 0.0.
Chris Lattner67a716a2011-01-08 20:24:01 +0000545 if (Value *ByteVal = isBytewiseValue(SI->getOperand(0)))
546 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
547 ByteVal)) {
548 BBI = I; // Don't invalidate iterator.
549 return true;
Mon P Wang20adc9d2010-04-04 03:10:48 +0000550 }
Nadav Rotema94d6e82012-07-24 10:51:42 +0000551
Chris Lattner67a716a2011-01-08 20:24:01 +0000552 return false;
Owen Andersona723d1e2008-04-09 08:23:16 +0000553}
554
Chris Lattnerd90a1922011-01-08 21:19:19 +0000555bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
556 // See if there is another memset or store neighboring this memset which
557 // allows us to widen out the memset to do a single larger store.
Chris Lattner0468e3e2011-01-08 22:11:56 +0000558 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
559 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
560 MSI->getValue())) {
561 BBI = I; // Don't invalidate iterator.
562 return true;
563 }
Chris Lattnerd90a1922011-01-08 21:19:19 +0000564 return false;
565}
566
Owen Andersona723d1e2008-04-09 08:23:16 +0000567
568/// performCallSlotOptzn - takes a memcpy and a call that it depends on,
569/// and checks for the possibility of a call slot optimization by having
570/// the call write its result directly into the destination of the memcpy.
Owen Anderson65491212010-10-15 22:52:12 +0000571bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
572 Value *cpyDest, Value *cpySrc,
Duncan Sandsf5874752012-10-04 10:54:40 +0000573 uint64_t cpyLen, unsigned cpyAlign,
574 CallInst *C) {
Owen Andersona723d1e2008-04-09 08:23:16 +0000575 // The general transformation to keep in mind is
576 //
577 // call @func(..., src, ...)
578 // memcpy(dest, src, ...)
579 //
580 // ->
581 //
582 // memcpy(dest, src, ...)
583 // call @func(..., dest, ...)
584 //
585 // Since moving the memcpy is technically awkward, we additionally check that
586 // src only holds uninitialized values at the moment of the call, meaning that
587 // the memcpy can be discarded rather than moved.
588
589 // Deliberately get the source and destination with bitcasts stripped away,
590 // because we'll need to do type comparisons based on the underlying type.
Gabor Greif7d3056b2010-07-28 22:50:26 +0000591 CallSite CS(C);
Owen Andersona723d1e2008-04-09 08:23:16 +0000592
Owen Andersona723d1e2008-04-09 08:23:16 +0000593 // Require that src be an alloca. This simplifies the reasoning considerably.
Chris Lattner61c6ba82009-09-01 17:09:55 +0000594 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
Owen Andersona723d1e2008-04-09 08:23:16 +0000595 if (!srcAlloca)
596 return false;
597
598 // Check that all of src is copied to dest.
Chris Lattner67a716a2011-01-08 20:24:01 +0000599 if (TD == 0) return false;
Owen Andersona723d1e2008-04-09 08:23:16 +0000600
Chris Lattner61c6ba82009-09-01 17:09:55 +0000601 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
Owen Andersona723d1e2008-04-09 08:23:16 +0000602 if (!srcArraySize)
603 return false;
604
Dan Gohman8942f9bb2009-08-18 01:17:52 +0000605 uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) *
Owen Andersona723d1e2008-04-09 08:23:16 +0000606 srcArraySize->getZExtValue();
607
Owen Anderson65491212010-10-15 22:52:12 +0000608 if (cpyLen < srcSize)
Owen Andersona723d1e2008-04-09 08:23:16 +0000609 return false;
610
611 // Check that accessing the first srcSize bytes of dest will not cause a
612 // trap. Otherwise the transform is invalid since it might cause a trap
613 // to occur earlier than it otherwise would.
Chris Lattner61c6ba82009-09-01 17:09:55 +0000614 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
Owen Andersona723d1e2008-04-09 08:23:16 +0000615 // The destination is an alloca. Check it is larger than srcSize.
Chris Lattner61c6ba82009-09-01 17:09:55 +0000616 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
Owen Andersona723d1e2008-04-09 08:23:16 +0000617 if (!destArraySize)
618 return false;
619
Dan Gohman8942f9bb2009-08-18 01:17:52 +0000620 uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) *
Owen Andersona723d1e2008-04-09 08:23:16 +0000621 destArraySize->getZExtValue();
622
623 if (destSize < srcSize)
624 return false;
Chris Lattner61c6ba82009-09-01 17:09:55 +0000625 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
Owen Andersona723d1e2008-04-09 08:23:16 +0000626 // If the destination is an sret parameter then only accesses that are
627 // outside of the returned struct type can trap.
628 if (!A->hasStructRetAttr())
629 return false;
630
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000631 Type *StructTy = cast<PointerType>(A->getType())->getElementType();
Shuxin Yang9792b642013-06-08 04:56:05 +0000632 if (!StructTy->isSized()) {
633 // The call may never return and hence the copy-instruction may never
634 // be executed, and therefore it's not safe to say "the destination
635 // has at least <cpyLen> bytes, as implied by the copy-instruction",
636 return false;
637 }
638
639 uint64_t destSize = TD->getTypeAllocSize(StructTy);
Owen Andersona723d1e2008-04-09 08:23:16 +0000640 if (destSize < srcSize)
641 return false;
642 } else {
643 return false;
644 }
645
Duncan Sands3372c5a2012-10-05 07:29:46 +0000646 // Check that dest points to memory that is at least as aligned as src.
647 unsigned srcAlign = srcAlloca->getAlignment();
648 if (!srcAlign)
649 srcAlign = TD->getABITypeAlignment(srcAlloca->getAllocatedType());
650 bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
651 // If dest is not aligned enough and we can't increase its alignment then
652 // bail out.
653 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
654 return false;
655
Owen Andersona723d1e2008-04-09 08:23:16 +0000656 // Check that src is not accessed except via the call and the memcpy. This
657 // guarantees that it holds only undefined values when passed in (so the final
658 // memcpy can be dropped), that it is not read or written between the call and
659 // the memcpy, and that writing beyond the end of it is undefined.
660 SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(),
661 srcAlloca->use_end());
662 while (!srcUseList.empty()) {
Dan Gohman321a8132010-01-05 16:27:25 +0000663 User *UI = srcUseList.pop_back_val();
Owen Andersona723d1e2008-04-09 08:23:16 +0000664
Owen Anderson009e4f72008-06-01 22:26:26 +0000665 if (isa<BitCastInst>(UI)) {
Owen Andersona723d1e2008-04-09 08:23:16 +0000666 for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
667 I != E; ++I)
668 srcUseList.push_back(*I);
Chris Lattner61c6ba82009-09-01 17:09:55 +0000669 } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) {
Owen Anderson009e4f72008-06-01 22:26:26 +0000670 if (G->hasAllZeroIndices())
671 for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
672 I != E; ++I)
673 srcUseList.push_back(*I);
674 else
675 return false;
Owen Andersona723d1e2008-04-09 08:23:16 +0000676 } else if (UI != C && UI != cpy) {
677 return false;
678 }
679 }
680
681 // Since we're changing the parameter to the callsite, we need to make sure
682 // that what would be the new parameter dominates the callsite.
Chris Lattner61c6ba82009-09-01 17:09:55 +0000683 DominatorTree &DT = getAnalysis<DominatorTree>();
684 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
Owen Andersona723d1e2008-04-09 08:23:16 +0000685 if (!DT.dominates(cpyDestInst, C))
686 return false;
687
688 // In addition to knowing that the call does not access src in some
689 // unexpected manner, for example via a global, which we deduce from
690 // the use analysis, we also need to know that it does not sneakily
691 // access dest. We rely on AA to figure this out for us.
Chris Lattner61c6ba82009-09-01 17:09:55 +0000692 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
Chad Rosier3a884f52012-05-14 20:35:04 +0000693 AliasAnalysis::ModRefResult MR = AA.getModRefInfo(C, cpyDest, srcSize);
694 // If necessary, perform additional analysis.
695 if (MR != AliasAnalysis::NoModRef)
696 MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT);
697 if (MR != AliasAnalysis::NoModRef)
Owen Andersona723d1e2008-04-09 08:23:16 +0000698 return false;
699
700 // All the checks have passed, so do the transformation.
Owen Anderson12cb36c2008-06-01 21:52:16 +0000701 bool changedArgument = false;
Owen Andersona723d1e2008-04-09 08:23:16 +0000702 for (unsigned i = 0; i < CS.arg_size(); ++i)
Owen Anderson009e4f72008-06-01 22:26:26 +0000703 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
Duncan Sands7508f942012-10-04 13:53:21 +0000704 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest
705 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
706 cpyDest->getName(), C);
Owen Anderson12cb36c2008-06-01 21:52:16 +0000707 changedArgument = true;
Duncan Sands7508f942012-10-04 13:53:21 +0000708 if (CS.getArgument(i)->getType() == Dest->getType())
709 CS.setArgument(i, Dest);
Chris Lattner61c6ba82009-09-01 17:09:55 +0000710 else
Duncan Sands7508f942012-10-04 13:53:21 +0000711 CS.setArgument(i, CastInst::CreatePointerCast(Dest,
712 CS.getArgument(i)->getType(), Dest->getName(), C));
Owen Andersona723d1e2008-04-09 08:23:16 +0000713 }
714
Owen Anderson12cb36c2008-06-01 21:52:16 +0000715 if (!changedArgument)
716 return false;
717
Duncan Sandsf5874752012-10-04 10:54:40 +0000718 // If the destination wasn't sufficiently aligned then increase its alignment.
719 if (!isDestSufficientlyAligned) {
720 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
721 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
722 }
723
Owen Andersona723d1e2008-04-09 08:23:16 +0000724 // Drop any cached information about the call, because we may have changed
725 // its dependence information by changing its parameter.
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000726 MD->removeInstruction(C);
Owen Andersona723d1e2008-04-09 08:23:16 +0000727
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000728 // Remove the memcpy.
729 MD->removeInstruction(cpy);
Dan Gohmanfe601042010-06-22 15:08:57 +0000730 ++NumMemCpyInstr;
Owen Andersona723d1e2008-04-09 08:23:16 +0000731
732 return true;
733}
734
Chris Lattner43f8e432010-11-18 07:02:37 +0000735/// processMemCpyMemCpyDependence - We've found that the (upward scanning)
736/// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to
737/// copy from MDep's input if we can. MSize is the size of M's copy.
Nadav Rotema94d6e82012-07-24 10:51:42 +0000738///
Chris Lattner43f8e432010-11-18 07:02:37 +0000739bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
740 uint64_t MSize) {
741 // We can only transforms memcpy's where the dest of one is the source of the
742 // other.
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000743 if (M->getSource() != MDep->getDest() || MDep->isVolatile())
Chris Lattner43f8e432010-11-18 07:02:37 +0000744 return false;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000745
Chris Lattnerf7f35462010-12-09 07:39:50 +0000746 // If dep instruction is reading from our current input, then it is a noop
747 // transfer and substituting the input won't change this instruction. Just
748 // ignore the input and let someone else zap MDep. This handles cases like:
749 // memcpy(a <- a)
750 // memcpy(b <- a)
751 if (M->getSource() == MDep->getSource())
752 return false;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000753
Chris Lattner7a2bdde2011-04-15 05:18:47 +0000754 // Second, the length of the memcpy's must be the same, or the preceding one
Chris Lattner43f8e432010-11-18 07:02:37 +0000755 // must be larger than the following one.
Dan Gohman8fb25c52011-01-21 22:07:57 +0000756 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
757 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
758 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
759 return false;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000760
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000761 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
Chris Lattner604f6fe2010-11-21 08:06:10 +0000762
763 // Verify that the copied-from memory doesn't change in between the two
764 // transfers. For example, in:
765 // memcpy(a <- b)
766 // *b = 42;
767 // memcpy(c <- a)
768 // It would be invalid to transform the second memcpy into memcpy(c <- b).
769 //
770 // TODO: If the code between M and MDep is transparent to the destination "c",
771 // then we could still perform the xform by moving M up to the first memcpy.
772 //
773 // NOTE: This is conservative, it will stop on any read from the source loc,
774 // not just the defining memcpy.
775 MemDepResult SourceDep =
776 MD->getPointerDependencyFrom(AA.getLocationForSource(MDep),
777 false, M, M->getParent());
778 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
779 return false;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000780
Chris Lattner5a7aeaa2010-11-18 08:00:57 +0000781 // If the dest of the second might alias the source of the first, then the
782 // source and dest might overlap. We still want to eliminate the intermediate
783 // value, but we have to generate a memmove instead of memcpy.
Chris Lattner61db1f52010-12-26 22:57:41 +0000784 bool UseMemMove = false;
785 if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(MDep)))
786 UseMemMove = true;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000787
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000788 // If all checks passed, then we can transform M.
Nadav Rotema94d6e82012-07-24 10:51:42 +0000789
Chris Lattner43f8e432010-11-18 07:02:37 +0000790 // Make sure to use the lesser of the alignment of the source and the dest
791 // since we're changing where we're reading from, but don't want to increase
792 // the alignment past what can be read from or written to.
793 // TODO: Is this worth it if we're creating a less aligned memcpy? For
794 // example we could be moving from movaps -> movq on x86.
Chris Lattnerd528be62010-11-18 08:07:09 +0000795 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment());
Nadav Rotema94d6e82012-07-24 10:51:42 +0000796
Chris Lattner61db1f52010-12-26 22:57:41 +0000797 IRBuilder<> Builder(M);
798 if (UseMemMove)
799 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(),
800 Align, M->isVolatile());
801 else
802 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(),
803 Align, M->isVolatile());
Chris Lattnerd528be62010-11-18 08:07:09 +0000804
Chris Lattner604f6fe2010-11-21 08:06:10 +0000805 // Remove the instruction we're replacing.
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000806 MD->removeInstruction(M);
Chris Lattnerd528be62010-11-18 08:07:09 +0000807 M->eraseFromParent();
808 ++NumMemCpyInstr;
809 return true;
Chris Lattner43f8e432010-11-18 07:02:37 +0000810}
811
812
Gabor Greif7d3056b2010-07-28 22:50:26 +0000813/// processMemCpy - perform simplification of memcpy's. If we have memcpy A
814/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
815/// B to be a memcpy from X to Z (or potentially a memmove, depending on
816/// circumstances). This allows later passes to remove the first memcpy
817/// altogether.
Chris Lattner61c6ba82009-09-01 17:09:55 +0000818bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000819 // We can only optimize statically-sized memcpy's that are non-volatile.
820 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
821 if (CopySize == 0 || M->isVolatile()) return false;
Owen Anderson65491212010-10-15 22:52:12 +0000822
Chris Lattner8fdca6a2010-12-09 07:45:45 +0000823 // If the source and destination of the memcpy are the same, then zap it.
824 if (M->getSource() == M->getDest()) {
825 MD->removeInstruction(M);
826 M->eraseFromParent();
827 return false;
828 }
Benjamin Kramera1120872010-12-24 21:17:12 +0000829
830 // If copying from a constant, try to turn the memcpy into a memset.
Benjamin Kramer49c7e3e2010-12-24 22:23:59 +0000831 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
Benjamin Kramer3fed0d92010-12-26 15:23:45 +0000832 if (GV->isConstant() && GV->hasDefinitiveInitializer())
Benjamin Kramer49c7e3e2010-12-24 22:23:59 +0000833 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) {
Chris Lattner61db1f52010-12-26 22:57:41 +0000834 IRBuilder<> Builder(M);
835 Builder.CreateMemSet(M->getRawDest(), ByteVal, CopySize,
836 M->getAlignment(), false);
Benjamin Kramer49c7e3e2010-12-24 22:23:59 +0000837 MD->removeInstruction(M);
838 M->eraseFromParent();
839 ++NumCpyToSet;
840 return true;
841 }
Benjamin Kramera1120872010-12-24 21:17:12 +0000842
Owen Andersona8bd6582008-04-21 07:45:10 +0000843 // The are two possible optimizations we can do for memcpy:
Chris Lattner61c6ba82009-09-01 17:09:55 +0000844 // a) memcpy-memcpy xform which exposes redundance for DSE.
845 // b) call-memcpy xform for return slot optimization.
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000846 MemDepResult DepInfo = MD->getDependency(M);
Nick Lewycky36c7e6c2011-10-16 20:13:32 +0000847 if (DepInfo.isClobber()) {
848 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
849 if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
Duncan Sandsf5874752012-10-04 10:54:40 +0000850 CopySize->getZExtValue(), M->getAlignment(),
851 C)) {
Nick Lewycky36c7e6c2011-10-16 20:13:32 +0000852 MD->removeInstruction(M);
853 M->eraseFromParent();
854 return true;
855 }
Chris Lattner8fdca6a2010-12-09 07:45:45 +0000856 }
Owen Andersona723d1e2008-04-09 08:23:16 +0000857 }
Ahmed Charlesb83a67e2012-02-13 06:30:56 +0000858
859 AliasAnalysis::Location SrcLoc = AliasAnalysis::getLocationForSource(M);
Nick Lewycky36c7e6c2011-10-16 20:13:32 +0000860 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(SrcLoc, true,
861 M, M->getParent());
862 if (SrcDepInfo.isClobber()) {
863 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
864 return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue());
865 }
866
Owen Anderson02e99882008-04-29 21:51:00 +0000867 return false;
Owen Andersona723d1e2008-04-09 08:23:16 +0000868}
869
Chris Lattnerf41eaac2009-09-01 17:56:32 +0000870/// processMemMove - Transforms memmove calls to memcpy calls when the src/dst
871/// are guaranteed not to alias.
872bool MemCpyOpt::processMemMove(MemMoveInst *M) {
873 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
874
Chris Lattner149f5282011-05-01 18:27:11 +0000875 if (!TLI->has(LibFunc::memmove))
876 return false;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000877
Chris Lattnerf41eaac2009-09-01 17:56:32 +0000878 // See if the pointers alias.
Chris Lattner61db1f52010-12-26 22:57:41 +0000879 if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(M)))
Chris Lattnerf41eaac2009-09-01 17:56:32 +0000880 return false;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000881
David Greenecb33fd12010-01-05 01:27:47 +0000882 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n");
Nadav Rotema94d6e82012-07-24 10:51:42 +0000883
Chris Lattnerf41eaac2009-09-01 17:56:32 +0000884 // If not, then we know we can transform this.
885 Module *Mod = M->getParent()->getParent()->getParent();
Jay Foad5fdd6c82011-07-12 14:06:48 +0000886 Type *ArgTys[3] = { M->getRawDest()->getType(),
887 M->getRawSource()->getType(),
888 M->getLength()->getType() };
Gabor Greifa3997812010-07-22 10:37:47 +0000889 M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy,
Benjamin Kramereb9a85f2011-07-14 17:45:39 +0000890 ArgTys));
Duncan Sands05cd03b2009-09-03 13:37:16 +0000891
Chris Lattnerf41eaac2009-09-01 17:56:32 +0000892 // MemDep may have over conservative information about this instruction, just
893 // conservatively flush it from the cache.
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000894 MD->removeInstruction(M);
Duncan Sands05cd03b2009-09-03 13:37:16 +0000895
896 ++NumMoveToCpy;
Chris Lattnerf41eaac2009-09-01 17:56:32 +0000897 return true;
898}
Nadav Rotema94d6e82012-07-24 10:51:42 +0000899
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000900/// processByValArgument - This is called on every byval argument in call sites.
901bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
Chris Lattner67a716a2011-01-08 20:24:01 +0000902 if (TD == 0) return false;
Chris Lattnerf41eaac2009-09-01 17:56:32 +0000903
Chris Lattner604f6fe2010-11-21 08:06:10 +0000904 // Find out what feeds this byval argument.
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000905 Value *ByValArg = CS.getArgument(ArgNo);
Nick Lewycky865703e2011-10-12 00:14:31 +0000906 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
Chris Lattnerb5a31962010-12-01 01:24:55 +0000907 uint64_t ByValSize = TD->getTypeAllocSize(ByValTy);
Chris Lattner604f6fe2010-11-21 08:06:10 +0000908 MemDepResult DepInfo =
909 MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize),
910 true, CS.getInstruction(),
911 CS.getInstruction()->getParent());
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000912 if (!DepInfo.isClobber())
913 return false;
914
915 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
916 // a memcpy, see if we can byval from the source of the memcpy instead of the
917 // result.
918 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
919 if (MDep == 0 || MDep->isVolatile() ||
920 ByValArg->stripPointerCasts() != MDep->getDest())
921 return false;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000922
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000923 // The length of the memcpy must be larger or equal to the size of the byval.
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000924 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
Chris Lattner604f6fe2010-11-21 08:06:10 +0000925 if (C1 == 0 || C1->getValue().getZExtValue() < ByValSize)
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000926 return false;
927
Chris Lattnerb3f06732011-05-23 00:03:39 +0000928 // Get the alignment of the byval. If the call doesn't specify the alignment,
929 // then it is some target specific value that we can't know.
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000930 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1);
Chris Lattnerb3f06732011-05-23 00:03:39 +0000931 if (ByValAlign == 0) return false;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000932
Chris Lattnerb3f06732011-05-23 00:03:39 +0000933 // If it is greater than the memcpy, then we check to see if we can force the
934 // source of the memcpy to the alignment we need. If we fail, we bail out.
935 if (MDep->getAlignment() < ByValAlign &&
936 getOrEnforceKnownAlignment(MDep->getSource(),ByValAlign, TD) < ByValAlign)
937 return false;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000938
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000939 // Verify that the copied-from memory doesn't change in between the memcpy and
940 // the byval call.
941 // memcpy(a <- b)
942 // *b = 42;
943 // foo(*a)
944 // It would be invalid to transform the second memcpy into foo(*b).
Chris Lattner604f6fe2010-11-21 08:06:10 +0000945 //
946 // NOTE: This is conservative, it will stop on any read from the source loc,
947 // not just the defining memcpy.
948 MemDepResult SourceDep =
949 MD->getPointerDependencyFrom(AliasAnalysis::getLocationForSource(MDep),
950 false, CS.getInstruction(), MDep->getParent());
951 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
952 return false;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000953
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000954 Value *TmpCast = MDep->getSource();
955 if (MDep->getSource()->getType() != ByValArg->getType())
956 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
957 "tmpcast", CS.getInstruction());
Nadav Rotema94d6e82012-07-24 10:51:42 +0000958
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000959 DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n"
960 << " " << *MDep << "\n"
961 << " " << *CS.getInstruction() << "\n");
Nadav Rotema94d6e82012-07-24 10:51:42 +0000962
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000963 // Otherwise we're good! Update the byval argument.
964 CS.setArgument(ArgNo, TmpCast);
965 ++NumMemCpyInstr;
966 return true;
967}
968
969/// iterateOnFunction - Executes one iteration of MemCpyOpt.
Owen Andersona723d1e2008-04-09 08:23:16 +0000970bool MemCpyOpt::iterateOnFunction(Function &F) {
Chris Lattner61c6ba82009-09-01 17:09:55 +0000971 bool MadeChange = false;
Owen Andersona723d1e2008-04-09 08:23:16 +0000972
Chris Lattner61c6ba82009-09-01 17:09:55 +0000973 // Walk all instruction in the function.
Owen Andersona8bd6582008-04-21 07:45:10 +0000974 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000975 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
Chris Lattner61c6ba82009-09-01 17:09:55 +0000976 // Avoid invalidating the iterator.
977 Instruction *I = BI++;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000978
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000979 bool RepeatInstruction = false;
Nadav Rotema94d6e82012-07-24 10:51:42 +0000980
Owen Andersona8bd6582008-04-21 07:45:10 +0000981 if (StoreInst *SI = dyn_cast<StoreInst>(I))
Chris Lattner61c6ba82009-09-01 17:09:55 +0000982 MadeChange |= processStore(SI, BI);
Chris Lattnerd90a1922011-01-08 21:19:19 +0000983 else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
984 RepeatInstruction = processMemSet(M, BI);
985 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000986 RepeatInstruction = processMemCpy(M);
Chris Lattnerd90a1922011-01-08 21:19:19 +0000987 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000988 RepeatInstruction = processMemMove(M);
Chris Lattnerd90a1922011-01-08 21:19:19 +0000989 else if (CallSite CS = (Value*)I) {
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000990 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
Nick Lewycky173862e2011-11-20 19:09:04 +0000991 if (CS.isByValArgument(i))
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000992 MadeChange |= processByValArgument(CS, i);
993 }
994
995 // Reprocess the instruction if desired.
996 if (RepeatInstruction) {
Chris Lattner8a629572011-01-08 22:19:21 +0000997 if (BI != BB->begin()) --BI;
Chris Lattner2f5f90a2010-11-21 00:28:59 +0000998 MadeChange = true;
Chris Lattnerf41eaac2009-09-01 17:56:32 +0000999 }
Owen Andersona723d1e2008-04-09 08:23:16 +00001000 }
1001 }
Nadav Rotema94d6e82012-07-24 10:51:42 +00001002
Chris Lattner61c6ba82009-09-01 17:09:55 +00001003 return MadeChange;
Owen Andersona723d1e2008-04-09 08:23:16 +00001004}
Chris Lattner61c6ba82009-09-01 17:09:55 +00001005
1006// MemCpyOpt::runOnFunction - This is the main transformation entry point for a
1007// function.
1008//
1009bool MemCpyOpt::runOnFunction(Function &F) {
1010 bool MadeChange = false;
Chris Lattner2f5f90a2010-11-21 00:28:59 +00001011 MD = &getAnalysis<MemoryDependenceAnalysis>();
Micah Villmow3574eca2012-10-08 16:38:25 +00001012 TD = getAnalysisIfAvailable<DataLayout>();
Chris Lattner149f5282011-05-01 18:27:11 +00001013 TLI = &getAnalysis<TargetLibraryInfo>();
Nadav Rotema94d6e82012-07-24 10:51:42 +00001014
Chris Lattner149f5282011-05-01 18:27:11 +00001015 // If we don't have at least memset and memcpy, there is little point of doing
1016 // anything here. These are required by a freestanding implementation, so if
1017 // even they are disabled, there is no point in trying hard.
1018 if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy))
1019 return false;
Nadav Rotema94d6e82012-07-24 10:51:42 +00001020
Chris Lattner61c6ba82009-09-01 17:09:55 +00001021 while (1) {
1022 if (!iterateOnFunction(F))
1023 break;
1024 MadeChange = true;
1025 }
Nadav Rotema94d6e82012-07-24 10:51:42 +00001026
Chris Lattner2f5f90a2010-11-21 00:28:59 +00001027 MD = 0;
Chris Lattner61c6ba82009-09-01 17:09:55 +00001028 return MadeChange;
1029}