blob: 3b74421a47a0a2cf08ee336d31880127b47d5d4f [file] [log] [blame]
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass performs various transformations related to eliminating memcpy
11// calls, or transforming sets of stores into memset's.
12//
13//===----------------------------------------------------------------------===//
14
Chandler Carruth6bda14b2017-06-06 11:49:48 +000015#include "llvm/Transforms/Scalar/MemCpyOptimizer.h"
Amaury Sechetbdb261b2016-03-14 22:52:27 +000016#include "llvm/ADT/DenseSet.h"
Eugene Zelenko306d2992017-10-18 21:46:47 +000017#include "llvm/ADT/None.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000018#include "llvm/ADT/STLExtras.h"
Owen Andersonef9a6fd2008-04-09 08:23:16 +000019#include "llvm/ADT/SmallVector.h"
20#include "llvm/ADT/Statistic.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000021#include "llvm/ADT/iterator_range.h"
Eugene Zelenko306d2992017-10-18 21:46:47 +000022#include "llvm/Analysis/AliasAnalysis.h"
Eugene Zelenko34c23272017-01-18 00:57:48 +000023#include "llvm/Analysis/AssumptionCache.h"
24#include "llvm/Analysis/GlobalsModRef.h"
25#include "llvm/Analysis/MemoryDependenceAnalysis.h"
26#include "llvm/Analysis/MemoryLocation.h"
27#include "llvm/Analysis/TargetLibraryInfo.h"
David Blaikie31b98d22018-06-04 21:23:21 +000028#include "llvm/Transforms/Utils/Local.h"
Chris Lattner9cb10352010-12-26 20:15:01 +000029#include "llvm/Analysis/ValueTracking.h"
Eugene Zelenko34c23272017-01-18 00:57:48 +000030#include "llvm/IR/Argument.h"
Eugene Zelenko306d2992017-10-18 21:46:47 +000031#include "llvm/IR/BasicBlock.h"
32#include "llvm/IR/CallSite.h"
Eugene Zelenko34c23272017-01-18 00:57:48 +000033#include "llvm/IR/Constants.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000034#include "llvm/IR/DataLayout.h"
Eugene Zelenko34c23272017-01-18 00:57:48 +000035#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/Dominators.h"
37#include "llvm/IR/Function.h"
Chandler Carruth03eb0de2014-03-04 10:40:04 +000038#include "llvm/IR/GetElementPtrTypeIterator.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000039#include "llvm/IR/GlobalVariable.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000040#include "llvm/IR/IRBuilder.h"
Eugene Zelenko34c23272017-01-18 00:57:48 +000041#include "llvm/IR/InstrTypes.h"
42#include "llvm/IR/Instruction.h"
43#include "llvm/IR/Instructions.h"
44#include "llvm/IR/IntrinsicInst.h"
45#include "llvm/IR/Intrinsics.h"
Eugene Zelenko34c23272017-01-18 00:57:48 +000046#include "llvm/IR/LLVMContext.h"
47#include "llvm/IR/Module.h"
48#include "llvm/IR/Operator.h"
Eugene Zelenko306d2992017-10-18 21:46:47 +000049#include "llvm/IR/PassManager.h"
Eugene Zelenko34c23272017-01-18 00:57:48 +000050#include "llvm/IR/Type.h"
51#include "llvm/IR/User.h"
52#include "llvm/IR/Value.h"
53#include "llvm/Pass.h"
54#include "llvm/Support/Casting.h"
Owen Andersonef9a6fd2008-04-09 08:23:16 +000055#include "llvm/Support/Debug.h"
Eugene Zelenko34c23272017-01-18 00:57:48 +000056#include "llvm/Support/MathExtras.h"
Chris Lattnerb25de3f2009-08-23 04:37:46 +000057#include "llvm/Support/raw_ostream.h"
Eugene Zelenko34c23272017-01-18 00:57:48 +000058#include "llvm/Transforms/Scalar.h"
Nick Lewyckyf836c892015-07-21 21:56:26 +000059#include <algorithm>
Eugene Zelenko34c23272017-01-18 00:57:48 +000060#include <cassert>
61#include <cstdint>
Eugene Zelenko306d2992017-10-18 21:46:47 +000062#include <utility>
Eugene Zelenko34c23272017-01-18 00:57:48 +000063
Owen Andersonef9a6fd2008-04-09 08:23:16 +000064using namespace llvm;
65
Chandler Carruth964daaa2014-04-22 02:55:47 +000066#define DEBUG_TYPE "memcpyopt"
67
Owen Andersonef9a6fd2008-04-09 08:23:16 +000068STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
69STATISTIC(NumMemSetInfer, "Number of memsets inferred");
Duncan Sands0edc7102009-09-03 13:37:16 +000070STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
Benjamin Kramerea9152e2010-12-24 21:17:12 +000071STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
Owen Andersonef9a6fd2008-04-09 08:23:16 +000072
Benjamin Kramer15a257d2012-09-13 16:29:49 +000073static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
Mehdi Aminia28d91d2015-03-10 02:37:25 +000074 bool &VariableIdxFound,
75 const DataLayout &DL) {
Owen Andersonef9a6fd2008-04-09 08:23:16 +000076 // Skip over the first indices.
77 gep_type_iterator GTI = gep_type_begin(GEP);
78 for (unsigned i = 1; i != Idx; ++i, ++GTI)
79 /*skip along*/;
Nadav Rotem465834c2012-07-24 10:51:42 +000080
Owen Andersonef9a6fd2008-04-09 08:23:16 +000081 // Compute the offset implied by the rest of the indices.
82 int64_t Offset = 0;
83 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
84 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
Craig Topperf40110f2014-04-25 05:29:35 +000085 if (!OpC)
Owen Andersonef9a6fd2008-04-09 08:23:16 +000086 return VariableIdxFound = true;
87 if (OpC->isZero()) continue; // No offset.
88
89 // Handle struct indices, which add their field offset to the pointer.
Peter Collingbourneab85225b2016-12-02 02:24:42 +000090 if (StructType *STy = GTI.getStructTypeOrNull()) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +000091 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
Owen Andersonef9a6fd2008-04-09 08:23:16 +000092 continue;
93 }
Nadav Rotem465834c2012-07-24 10:51:42 +000094
Owen Andersonef9a6fd2008-04-09 08:23:16 +000095 // Otherwise, we have a sequential type like an array or vector. Multiply
96 // the index by the ElementSize.
Mehdi Aminia28d91d2015-03-10 02:37:25 +000097 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
Owen Andersonef9a6fd2008-04-09 08:23:16 +000098 Offset += Size*OpC->getSExtValue();
99 }
100
101 return Offset;
102}
103
Sanjay Patela75c41e2015-08-13 22:53:20 +0000104/// Return true if Ptr1 is provably equal to Ptr2 plus a constant offset, and
105/// return that constant offset. For example, Ptr1 might be &A[42], and Ptr2
106/// might be &A[40]. In this case offset would be -8.
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000107static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000108 const DataLayout &DL) {
Chris Lattnerfa7c29d2011-01-12 01:43:46 +0000109 Ptr1 = Ptr1->stripPointerCasts();
110 Ptr2 = Ptr2->stripPointerCasts();
Benjamin Kramer3ef5e462014-03-10 21:05:13 +0000111
112 // Handle the trivial case first.
113 if (Ptr1 == Ptr2) {
114 Offset = 0;
115 return true;
116 }
117
Benjamin Kramer15a257d2012-09-13 16:29:49 +0000118 GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
119 GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
Nadav Rotem465834c2012-07-24 10:51:42 +0000120
Chris Lattner5120ebf2011-01-08 21:07:56 +0000121 bool VariableIdxFound = false;
122
123 // If one pointer is a GEP and the other isn't, then see if the GEP is a
124 // constant offset from the base, as in "P" and "gep P, 1".
Craig Topperf40110f2014-04-25 05:29:35 +0000125 if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000126 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL);
Chris Lattner5120ebf2011-01-08 21:07:56 +0000127 return !VariableIdxFound;
128 }
129
Craig Topperf40110f2014-04-25 05:29:35 +0000130 if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000131 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL);
Chris Lattner5120ebf2011-01-08 21:07:56 +0000132 return !VariableIdxFound;
133 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000134
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000135 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
136 // base. After that base, they may have some number of common (and
137 // potentially variable) indices. After that they handle some constant
138 // offset, which determines their offset from each other. At this point, we
139 // handle no other case.
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000140 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
141 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000142
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000143 // Skip any common indices and track the GEP types.
144 unsigned Idx = 1;
145 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
146 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
147 break;
148
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000149 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL);
150 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL);
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000151 if (VariableIdxFound) return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000152
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000153 Offset = Offset2-Offset1;
154 return true;
155}
156
Eugene Zelenko34c23272017-01-18 00:57:48 +0000157namespace {
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000158
Sanjay Patela75c41e2015-08-13 22:53:20 +0000159/// Represents a range of memset'd bytes with the ByteVal value.
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000160/// This allows us to analyze stores like:
161/// store 0 -> P+1
162/// store 0 -> P+0
163/// store 0 -> P+3
164/// store 0 -> P+2
165/// which sometimes happens with stores to arrays of structs etc. When we see
166/// the first store, we make a range [1, 2). The second store extends the range
167/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
168/// two ranges into [0, 3) which is memset'able.
Tim Northover39617352016-05-10 21:49:40 +0000169struct MemsetRange {
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000170 // Start/End - A semi range that describes the span that this range covers.
Nadav Rotem465834c2012-07-24 10:51:42 +0000171 // The range is closed at the start and open at the end: [Start, End).
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000172 int64_t Start, End;
173
174 /// StartPtr - The getelementptr instruction that points to the start of the
175 /// range.
Tim Northover39617352016-05-10 21:49:40 +0000176 Value *StartPtr;
Nadav Rotem465834c2012-07-24 10:51:42 +0000177
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000178 /// Alignment - The known alignment of the first store.
179 unsigned Alignment;
Nadav Rotem465834c2012-07-24 10:51:42 +0000180
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000181 /// TheStores - The actual stores that make up this range.
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000182 SmallVector<Instruction*, 16> TheStores;
Nadav Rotem465834c2012-07-24 10:51:42 +0000183
Tim Northover39617352016-05-10 21:49:40 +0000184 bool isProfitableToUseMemset(const DataLayout &DL) const;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000185};
Eugene Zelenko34c23272017-01-18 00:57:48 +0000186
187} // end anonymous namespace
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000188
Tim Northover39617352016-05-10 21:49:40 +0000189bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
190 // If we found more than 4 stores to merge or 16 bytes, use memset.
Chad Rosier19446a02011-12-05 22:37:00 +0000191 if (TheStores.size() >= 4 || End-Start >= 16) return true;
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000192
193 // If there is nothing to merge, don't do anything.
194 if (TheStores.size() < 2) return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000195
Tim Northover39617352016-05-10 21:49:40 +0000196 // If any of the stores are a memset, then it is always good to extend the
197 // memset.
Craig Toppere325e382015-11-20 07:18:48 +0000198 for (Instruction *SI : TheStores)
Tim Northover39617352016-05-10 21:49:40 +0000199 if (!isa<StoreInst>(SI))
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000200 return true;
Nadav Rotem465834c2012-07-24 10:51:42 +0000201
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000202 // Assume that the code generator is capable of merging pairs of stores
203 // together if it wants to.
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000204 if (TheStores.size() == 2) return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000205
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000206 // If we have fewer than 8 stores, it can still be worthwhile to do this.
207 // For example, merging 4 i8 stores into an i32 store is useful almost always.
208 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
209 // memset will be split into 2 32-bit stores anyway) and doing so can
210 // pessimize the llvm optimizer.
211 //
212 // Since we don't have perfect knowledge here, make some assumptions: assume
Matt Arsenault899f7d22013-09-16 22:43:16 +0000213 // the maximum GPR width is the same size as the largest legal integer
214 // size. If so, check to see whether we will end up actually reducing the
215 // number of stores used.
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000216 unsigned Bytes = unsigned(End-Start);
Jun Bum Limbe11bdc2016-05-13 18:38:35 +0000217 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8;
Matt Arsenault899f7d22013-09-16 22:43:16 +0000218 if (MaxIntSize == 0)
219 MaxIntSize = 1;
220 unsigned NumPointerStores = Bytes / MaxIntSize;
Nadav Rotem465834c2012-07-24 10:51:42 +0000221
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000222 // Assume the remaining bytes if any are done a byte at a time.
Craig Toppera5ea5282015-11-21 17:44:42 +0000223 unsigned NumByteStores = Bytes % MaxIntSize;
Nadav Rotem465834c2012-07-24 10:51:42 +0000224
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000225 // If we will reduce the # stores (according to this heuristic), do the
226 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
227 // etc.
228 return TheStores.size() > NumPointerStores+NumByteStores;
Nadav Rotem465834c2012-07-24 10:51:42 +0000229}
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000230
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000231namespace {
Eugene Zelenko34c23272017-01-18 00:57:48 +0000232
Tim Northover39617352016-05-10 21:49:40 +0000233class MemsetRanges {
Eugene Zelenko306d2992017-10-18 21:46:47 +0000234 using range_iterator = SmallVectorImpl<MemsetRange>::iterator;
235
Sanjay Patela75c41e2015-08-13 22:53:20 +0000236 /// A sorted list of the memset ranges.
Tim Northover39617352016-05-10 21:49:40 +0000237 SmallVector<MemsetRange, 8> Ranges;
Eugene Zelenko306d2992017-10-18 21:46:47 +0000238
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000239 const DataLayout &DL;
Eugene Zelenko34c23272017-01-18 00:57:48 +0000240
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000241public:
Tim Northover39617352016-05-10 21:49:40 +0000242 MemsetRanges(const DataLayout &DL) : DL(DL) {}
Nadav Rotem465834c2012-07-24 10:51:42 +0000243
Eugene Zelenko306d2992017-10-18 21:46:47 +0000244 using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator;
245
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000246 const_iterator begin() const { return Ranges.begin(); }
247 const_iterator end() const { return Ranges.end(); }
248 bool empty() const { return Ranges.empty(); }
Nadav Rotem465834c2012-07-24 10:51:42 +0000249
Chris Lattnerc6381472011-01-08 20:24:01 +0000250 void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000251 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
252 addStore(OffsetFromFirst, SI);
253 else
254 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
Chris Lattnerc6381472011-01-08 20:24:01 +0000255 }
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000256
257 void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
Rafael Espindola37dc9e12014-02-21 00:06:31 +0000258 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
Nadav Rotem465834c2012-07-24 10:51:42 +0000259
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000260 addRange(OffsetFromFirst, StoreSize,
Tim Northover39617352016-05-10 21:49:40 +0000261 SI->getPointerOperand(), SI->getAlignment(), SI);
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000262 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000263
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000264 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
265 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
Daniel Neilson6f1eb582018-03-21 14:14:55 +0000266 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI);
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000267 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000268
Tim Northover39617352016-05-10 21:49:40 +0000269 void addRange(int64_t Start, int64_t Size, Value *Ptr,
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000270 unsigned Alignment, Instruction *Inst);
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000271};
Nadav Rotem465834c2012-07-24 10:51:42 +0000272
Eugene Zelenko34c23272017-01-18 00:57:48 +0000273} // end anonymous namespace
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000274
Tim Northover39617352016-05-10 21:49:40 +0000275/// Add a new store to the MemsetRanges data structure. This adds a
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000276/// new range for the specified store at the specified offset, merging into
277/// existing ranges as appropriate.
Tim Northover39617352016-05-10 21:49:40 +0000278void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
279 unsigned Alignment, Instruction *Inst) {
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000280 int64_t End = Start+Size;
Nadav Rotem465834c2012-07-24 10:51:42 +0000281
Nick Lewyckyf836c892015-07-21 21:56:26 +0000282 range_iterator I = std::lower_bound(Ranges.begin(), Ranges.end(), Start,
Tim Northover39617352016-05-10 21:49:40 +0000283 [](const MemsetRange &LHS, int64_t RHS) { return LHS.End < RHS; });
Nadav Rotem465834c2012-07-24 10:51:42 +0000284
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000285 // We now know that I == E, in which case we didn't find anything to merge
286 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
287 // to insert a new range. Handle this now.
Nick Lewyckyf836c892015-07-21 21:56:26 +0000288 if (I == Ranges.end() || End < I->Start) {
Tim Northover39617352016-05-10 21:49:40 +0000289 MemsetRange &R = *Ranges.insert(I, MemsetRange());
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000290 R.Start = Start;
291 R.End = End;
Tim Northover39617352016-05-10 21:49:40 +0000292 R.StartPtr = Ptr;
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000293 R.Alignment = Alignment;
294 R.TheStores.push_back(Inst);
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000295 return;
296 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000297
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000298 // This store overlaps with I, add it.
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000299 I->TheStores.push_back(Inst);
Nadav Rotem465834c2012-07-24 10:51:42 +0000300
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000301 // At this point, we may have an interval that completely contains our store.
302 // If so, just add it to the interval and return.
303 if (I->Start <= Start && I->End >= End)
304 return;
Nadav Rotem465834c2012-07-24 10:51:42 +0000305
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000306 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
307 // but is not entirely contained within the range.
Nadav Rotem465834c2012-07-24 10:51:42 +0000308
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000309 // See if the range extends the start of the range. In this case, it couldn't
310 // possibly cause it to join the prior range, because otherwise we would have
311 // stopped on *it*.
312 if (Start < I->Start) {
313 I->Start = Start;
Tim Northover39617352016-05-10 21:49:40 +0000314 I->StartPtr = Ptr;
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000315 I->Alignment = Alignment;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000316 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000317
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000318 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
319 // is in or right at the end of I), and that End >= I->Start. Extend I out to
320 // End.
321 if (End > I->End) {
322 I->End = End;
Nick Lewyckybfd4ad62009-03-19 05:51:39 +0000323 range_iterator NextI = I;
Nick Lewyckyf836c892015-07-21 21:56:26 +0000324 while (++NextI != Ranges.end() && End >= NextI->Start) {
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000325 // Merge the range in.
326 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
327 if (NextI->End > I->End)
328 I->End = NextI->End;
329 Ranges.erase(NextI);
330 NextI = I;
331 }
332 }
333}
334
335//===----------------------------------------------------------------------===//
Sean Silva6347df02016-06-14 02:44:55 +0000336// MemCpyOptLegacyPass Pass
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000337//===----------------------------------------------------------------------===//
338
339namespace {
Eugene Zelenko34c23272017-01-18 00:57:48 +0000340
George Burgess IVecb95f52017-03-08 21:28:19 +0000341class MemCpyOptLegacyPass : public FunctionPass {
342 MemCpyOptPass Impl;
Eugene Zelenko34c23272017-01-18 00:57:48 +0000343
George Burgess IVecb95f52017-03-08 21:28:19 +0000344public:
345 static char ID; // Pass identification, replacement for typeid
Eugene Zelenko34c23272017-01-18 00:57:48 +0000346
George Burgess IVecb95f52017-03-08 21:28:19 +0000347 MemCpyOptLegacyPass() : FunctionPass(ID) {
348 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry());
349 }
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000350
George Burgess IVecb95f52017-03-08 21:28:19 +0000351 bool runOnFunction(Function &F) override;
Chris Lattnerc6381472011-01-08 20:24:01 +0000352
George Burgess IVecb95f52017-03-08 21:28:19 +0000353private:
354 // This transformation requires dominator postdominator info
355 void getAnalysisUsage(AnalysisUsage &AU) const override {
356 AU.setPreservesCFG();
357 AU.addRequired<AssumptionCacheTracker>();
358 AU.addRequired<DominatorTreeWrapperPass>();
359 AU.addRequired<MemoryDependenceWrapperPass>();
360 AU.addRequired<AAResultsWrapperPass>();
361 AU.addRequired<TargetLibraryInfoWrapperPass>();
362 AU.addPreserved<GlobalsAAWrapperPass>();
363 AU.addPreserved<MemoryDependenceWrapperPass>();
364 }
365};
Nadav Rotem465834c2012-07-24 10:51:42 +0000366
Eugene Zelenko34c23272017-01-18 00:57:48 +0000367} // end anonymous namespace
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000368
Eugene Zelenko306d2992017-10-18 21:46:47 +0000369char MemCpyOptLegacyPass::ID = 0;
370
Sanjay Patela75c41e2015-08-13 22:53:20 +0000371/// The public interface to this file...
Sean Silva6347df02016-06-14 02:44:55 +0000372FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); }
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000373
Sean Silva6347df02016-06-14 02:44:55 +0000374INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
Owen Anderson8ac477f2010-10-12 19:48:12 +0000375 false, false)
Daniel Jasperaec2fa32016-12-19 08:22:17 +0000376INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
Chandler Carruth73523022014-01-13 13:07:17 +0000377INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
Chandler Carruth61440d22016-03-10 00:55:30 +0000378INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
Chandler Carruthb98f63d2015-01-15 10:41:28 +0000379INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
Chandler Carruth7b560d42015-09-09 17:55:00 +0000380INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
381INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
Sean Silva6347df02016-06-14 02:44:55 +0000382INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
Owen Anderson8ac477f2010-10-12 19:48:12 +0000383 false, false)
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000384
Sanjay Patela75c41e2015-08-13 22:53:20 +0000385/// When scanning forward over instructions, we look for some other patterns to
386/// fold away. In particular, this looks for stores to neighboring locations of
387/// memory. If it sees enough consecutive ones, it attempts to merge them
388/// together into a memcpy/memset.
Sean Silva6347df02016-06-14 02:44:55 +0000389Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
390 Value *StartPtr,
391 Value *ByteVal) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000392 const DataLayout &DL = StartInst->getModule()->getDataLayout();
Nadav Rotem465834c2012-07-24 10:51:42 +0000393
Chris Lattnerc6381472011-01-08 20:24:01 +0000394 // Okay, so we now have a single store that can be splatable. Scan to find
395 // all subsequent stores of the same value to offset from the same pointer.
396 // Join these together into ranges, so we can decide whether contiguous blocks
397 // are stored.
Tim Northover39617352016-05-10 21:49:40 +0000398 MemsetRanges Ranges(DL);
Nadav Rotem465834c2012-07-24 10:51:42 +0000399
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000400 BasicBlock::iterator BI(StartInst);
Chris Lattnerc6381472011-01-08 20:24:01 +0000401 for (++BI; !isa<TerminatorInst>(BI); ++BI) {
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000402 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
403 // If the instruction is readnone, ignore it, otherwise bail out. We
404 // don't even allow readonly here because we don't want something like:
Chris Lattnerc6381472011-01-08 20:24:01 +0000405 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000406 if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
407 break;
408 continue;
409 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000410
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000411 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
412 // If this is a store, see if we can merge it in.
Eli Friedman9a468152011-08-17 22:22:24 +0000413 if (!NextStore->isSimple()) break;
Nadav Rotem465834c2012-07-24 10:51:42 +0000414
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000415 // Check to see if this stored value is of the same byte-splattable value.
416 if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
417 break;
Nadav Rotem465834c2012-07-24 10:51:42 +0000418
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000419 // Check to see if this store is to a constant offset from the start ptr.
420 int64_t Offset;
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000421 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset,
422 DL))
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000423 break;
Nadav Rotem465834c2012-07-24 10:51:42 +0000424
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000425 Ranges.addStore(Offset, NextStore);
426 } else {
427 MemSetInst *MSI = cast<MemSetInst>(BI);
Nadav Rotem465834c2012-07-24 10:51:42 +0000428
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000429 if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
430 !isa<ConstantInt>(MSI->getLength()))
431 break;
Nadav Rotem465834c2012-07-24 10:51:42 +0000432
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000433 // Check to see if this store is to a constant offset from the start ptr.
434 int64_t Offset;
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000435 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL))
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000436 break;
Nadav Rotem465834c2012-07-24 10:51:42 +0000437
Chris Lattner4dc1fd92011-01-08 20:54:51 +0000438 Ranges.addMemSet(Offset, MSI);
439 }
Chris Lattnerc6381472011-01-08 20:24:01 +0000440 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000441
Chris Lattnerc6381472011-01-08 20:24:01 +0000442 // If we have no ranges, then we just had a single store with nothing that
443 // could be merged in. This is a very common case of course.
444 if (Ranges.empty())
Craig Topperf40110f2014-04-25 05:29:35 +0000445 return nullptr;
Nadav Rotem465834c2012-07-24 10:51:42 +0000446
Chris Lattnerc6381472011-01-08 20:24:01 +0000447 // If we had at least one store that could be merged in, add the starting
448 // store as well. We try to avoid this unless there is at least something
449 // interesting as a small compile-time optimization.
450 Ranges.addInst(0, StartInst);
451
452 // If we create any memsets, we put it right before the first instruction that
453 // isn't part of the memset block. This ensure that the memset is dominated
454 // by any addressing instruction needed by the start of the block.
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000455 IRBuilder<> Builder(&*BI);
Chris Lattnerc6381472011-01-08 20:24:01 +0000456
457 // Now that we have full information about ranges, loop over the ranges and
458 // emit memset's for anything big enough to be worthwhile.
Craig Topperf40110f2014-04-25 05:29:35 +0000459 Instruction *AMemSet = nullptr;
Tim Northover39617352016-05-10 21:49:40 +0000460 for (const MemsetRange &Range : Ranges) {
Chris Lattnerc6381472011-01-08 20:24:01 +0000461 if (Range.TheStores.size() == 1) continue;
Nadav Rotem465834c2012-07-24 10:51:42 +0000462
Chris Lattnerc6381472011-01-08 20:24:01 +0000463 // If it is profitable to lower this range to memset, do so now.
Tim Northover39617352016-05-10 21:49:40 +0000464 if (!Range.isProfitableToUseMemset(DL))
Chris Lattnerc6381472011-01-08 20:24:01 +0000465 continue;
Nadav Rotem465834c2012-07-24 10:51:42 +0000466
Chris Lattnerc6381472011-01-08 20:24:01 +0000467 // Otherwise, we do want to transform this! Create a new memset.
468 // Get the starting pointer of the block.
Tim Northover39617352016-05-10 21:49:40 +0000469 StartPtr = Range.StartPtr;
Nadav Rotem465834c2012-07-24 10:51:42 +0000470
Tim Northover39617352016-05-10 21:49:40 +0000471 // Determine alignment
472 unsigned Alignment = Range.Alignment;
473 if (Alignment == 0) {
474 Type *EltType =
475 cast<PointerType>(StartPtr->getType())->getElementType();
476 Alignment = DL.getABITypeAlignment(EltType);
477 }
478
Reid Kleckner6d310012017-12-28 05:10:33 +0000479 AMemSet =
480 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
Nadav Rotem465834c2012-07-24 10:51:42 +0000481
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000482 LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI
483 : Range.TheStores) dbgs()
484 << *SI << '\n';
485 dbgs() << "With: " << *AMemSet << '\n');
Reid Kleckner6d310012017-12-28 05:10:33 +0000486
487 if (!Range.TheStores.empty())
488 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
Devang Patelc7e4fa72011-05-04 21:58:58 +0000489
Chris Lattnerc6381472011-01-08 20:24:01 +0000490 // Zap all the stores.
Craig Toppere325e382015-11-20 07:18:48 +0000491 for (Instruction *SI : Range.TheStores) {
492 MD->removeInstruction(SI);
493 SI->eraseFromParent();
Chris Lattner7d6433a2011-01-08 22:19:21 +0000494 }
Chris Lattnerc6381472011-01-08 20:24:01 +0000495 ++NumMemSetInfer;
496 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000497
Chris Lattnerc6381472011-01-08 20:24:01 +0000498 return AMemSet;
499}
500
Daniel Neilson6f1eb582018-03-21 14:14:55 +0000501static unsigned findStoreAlignment(const DataLayout &DL, const StoreInst *SI) {
Tim Northover39617352016-05-10 21:49:40 +0000502 unsigned StoreAlign = SI->getAlignment();
503 if (!StoreAlign)
504 StoreAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType());
Daniel Neilson6f1eb582018-03-21 14:14:55 +0000505 return StoreAlign;
506}
507
508static unsigned findLoadAlignment(const DataLayout &DL, const LoadInst *LI) {
Tim Northover39617352016-05-10 21:49:40 +0000509 unsigned LoadAlign = LI->getAlignment();
510 if (!LoadAlign)
511 LoadAlign = DL.getABITypeAlignment(LI->getType());
Daniel Neilson6f1eb582018-03-21 14:14:55 +0000512 return LoadAlign;
513}
Amaury Secheta0c242c2016-01-05 20:17:48 +0000514
Daniel Neilson6f1eb582018-03-21 14:14:55 +0000515static unsigned findCommonAlignment(const DataLayout &DL, const StoreInst *SI,
516 const LoadInst *LI) {
517 unsigned StoreAlign = findStoreAlignment(DL, SI);
518 unsigned LoadAlign = findLoadAlignment(DL, LI);
519 return MinAlign(StoreAlign, LoadAlign);
Amaury Secheta0c242c2016-01-05 20:17:48 +0000520}
Chris Lattnerc6381472011-01-08 20:24:01 +0000521
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000522// This method try to lift a store instruction before position P.
523// It will lift the store and its argument + that anything that
David Majnemerd99068d2016-05-26 19:24:24 +0000524// may alias with these.
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000525// The method returns true if it was successful.
Bryant Wong7cb74462016-12-27 17:58:12 +0000526static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
527 const LoadInst *LI) {
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000528 // If the store alias this position, early bail out.
529 MemoryLocation StoreLoc = MemoryLocation::get(SI);
Alina Sbirlea63d22502017-12-05 20:12:23 +0000530 if (isModOrRefSet(AA.getModRefInfo(P, StoreLoc)))
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000531 return false;
532
533 // Keep track of the arguments of all instruction we plan to lift
Hiroshi Inouef2096492018-06-14 05:41:49 +0000534 // so we can make sure to lift them as well if appropriate.
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000535 DenseSet<Instruction*> Args;
536 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand()))
537 if (Ptr->getParent() == SI->getParent())
538 Args.insert(Ptr);
539
540 // Instruction to lift before P.
541 SmallVector<Instruction*, 8> ToLift;
542
543 // Memory locations of lifted instructions.
Bryant Wong7cb74462016-12-27 17:58:12 +0000544 SmallVector<MemoryLocation, 8> MemLocs{StoreLoc};
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000545
546 // Lifted callsites.
547 SmallVector<ImmutableCallSite, 8> CallSites;
548
Bryant Wong7cb74462016-12-27 17:58:12 +0000549 const MemoryLocation LoadLoc = MemoryLocation::get(LI);
550
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000551 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
552 auto *C = &*I;
553
Alina Sbirlea63d22502017-12-05 20:12:23 +0000554 bool MayAlias = isModOrRefSet(AA.getModRefInfo(C, None));
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000555
556 bool NeedLift = false;
557 if (Args.erase(C))
558 NeedLift = true;
559 else if (MayAlias) {
Eugene Zelenko34c23272017-01-18 00:57:48 +0000560 NeedLift = llvm::any_of(MemLocs, [C, &AA](const MemoryLocation &ML) {
Alina Sbirlea63d22502017-12-05 20:12:23 +0000561 return isModOrRefSet(AA.getModRefInfo(C, ML));
David Majnemer0a16c222016-08-11 21:15:00 +0000562 });
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000563
564 if (!NeedLift)
Eugene Zelenko34c23272017-01-18 00:57:48 +0000565 NeedLift =
566 llvm::any_of(CallSites, [C, &AA](const ImmutableCallSite &CS) {
Alina Sbirlea63d22502017-12-05 20:12:23 +0000567 return isModOrRefSet(AA.getModRefInfo(C, CS));
Eugene Zelenko34c23272017-01-18 00:57:48 +0000568 });
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000569 }
570
571 if (!NeedLift)
572 continue;
573
574 if (MayAlias) {
Bryant Wong7cb74462016-12-27 17:58:12 +0000575 // Since LI is implicitly moved downwards past the lifted instructions,
576 // none of them may modify its source.
Alina Sbirlea63d22502017-12-05 20:12:23 +0000577 if (isModSet(AA.getModRefInfo(C, LoadLoc)))
Bryant Wong7cb74462016-12-27 17:58:12 +0000578 return false;
579 else if (auto CS = ImmutableCallSite(C)) {
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000580 // If we can't lift this before P, it's game over.
Alina Sbirlea63d22502017-12-05 20:12:23 +0000581 if (isModOrRefSet(AA.getModRefInfo(P, CS)))
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000582 return false;
583
584 CallSites.push_back(CS);
585 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
586 // If we can't lift this before P, it's game over.
587 auto ML = MemoryLocation::get(C);
Alina Sbirlea63d22502017-12-05 20:12:23 +0000588 if (isModOrRefSet(AA.getModRefInfo(P, ML)))
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000589 return false;
590
591 MemLocs.push_back(ML);
592 } else
593 // We don't know how to lift this instruction.
594 return false;
595 }
596
597 ToLift.push_back(C);
598 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k)
599 if (auto *A = dyn_cast<Instruction>(C->getOperand(k)))
600 if (A->getParent() == SI->getParent())
601 Args.insert(A);
602 }
603
604 // We made it, we need to lift
Eugene Zelenko34c23272017-01-18 00:57:48 +0000605 for (auto *I : llvm::reverse(ToLift)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000606 LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n");
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000607 I->moveBefore(P);
608 }
609
610 return true;
611}
612
Sean Silva6347df02016-06-14 02:44:55 +0000613bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
Eli Friedman9a468152011-08-17 22:22:24 +0000614 if (!SI->isSimple()) return false;
Andrea Di Biagio99493df2015-10-09 10:53:41 +0000615
616 // Avoid merging nontemporal stores since the resulting
617 // memcpy/memset would not be able to preserve the nontemporal hint.
618 // In theory we could teach how to propagate the !nontemporal metadata to
619 // memset calls. However, that change would force the backend to
620 // conservatively expand !nontemporal memset calls back to sequences of
621 // store instructions (effectively undoing the merging).
622 if (SI->getMetadata(LLVMContext::MD_nontemporal))
623 return false;
624
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000625 const DataLayout &DL = SI->getModule()->getDataLayout();
Owen Anderson18e4fed2010-10-15 22:52:12 +0000626
Amaury Secheta0c242c2016-01-05 20:17:48 +0000627 // Load to store forwarding can be interpreted as memcpy.
Owen Anderson18e4fed2010-10-15 22:52:12 +0000628 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
Eli Friedman9a468152011-08-17 22:22:24 +0000629 if (LI->isSimple() && LI->hasOneUse() &&
Eli Friedmane8bbc102011-06-15 01:25:56 +0000630 LI->getParent() == SI->getParent()) {
Amaury Secheta0c242c2016-01-05 20:17:48 +0000631
632 auto *T = LI->getType();
633 if (T->isAggregateType()) {
Sean Silva6347df02016-06-14 02:44:55 +0000634 AliasAnalysis &AA = LookupAliasAnalysis();
Amaury Secheta0c242c2016-01-05 20:17:48 +0000635 MemoryLocation LoadLoc = MemoryLocation::get(LI);
636
637 // We use alias analysis to check if an instruction may store to
638 // the memory we load from in between the load and the store. If
Amaury Sechetd3b2c0f2016-01-06 09:30:39 +0000639 // such an instruction is found, we try to promote there instead
640 // of at the store position.
641 Instruction *P = SI;
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000642 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) {
Alina Sbirlea63d22502017-12-05 20:12:23 +0000643 if (isModSet(AA.getModRefInfo(&I, LoadLoc))) {
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000644 P = &I;
645 break;
Amaury Secheta0c242c2016-01-05 20:17:48 +0000646 }
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000647 }
Amaury Sechetd3b2c0f2016-01-06 09:30:39 +0000648
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000649 // We found an instruction that may write to the loaded memory.
650 // We can try to promote at this position instead of the store
651 // position if nothing alias the store memory after this and the store
652 // destination is not in the range.
653 if (P && P != SI) {
Bryant Wong7cb74462016-12-27 17:58:12 +0000654 if (!moveUp(AA, SI, P, LI))
Amaury Sechetbdb261b2016-03-14 22:52:27 +0000655 P = nullptr;
Amaury Secheta0c242c2016-01-05 20:17:48 +0000656 }
657
Amaury Sechetd3b2c0f2016-01-06 09:30:39 +0000658 // If a valid insertion position is found, then we can promote
659 // the load/store pair to a memcpy.
660 if (P) {
Amaury Secheta0c242c2016-01-05 20:17:48 +0000661 // If we load from memory that may alias the memory we store to,
662 // memmove must be used to preserve semantic. If not, memcpy can
663 // be used.
664 bool UseMemMove = false;
665 if (!AA.isNoAlias(MemoryLocation::get(SI), LoadLoc))
666 UseMemMove = true;
667
Amaury Secheta0c242c2016-01-05 20:17:48 +0000668 uint64_t Size = DL.getTypeStoreSize(T);
669
Amaury Sechetd3b2c0f2016-01-06 09:30:39 +0000670 IRBuilder<> Builder(P);
Amaury Secheta0c242c2016-01-05 20:17:48 +0000671 Instruction *M;
672 if (UseMemMove)
Daniel Neilson6f1eb582018-03-21 14:14:55 +0000673 M = Builder.CreateMemMove(
674 SI->getPointerOperand(), findStoreAlignment(DL, SI),
675 LI->getPointerOperand(), findLoadAlignment(DL, LI), Size,
676 SI->isVolatile());
Amaury Secheta0c242c2016-01-05 20:17:48 +0000677 else
Daniel Neilson6f1eb582018-03-21 14:14:55 +0000678 M = Builder.CreateMemCpy(
679 SI->getPointerOperand(), findStoreAlignment(DL, SI),
680 LI->getPointerOperand(), findLoadAlignment(DL, LI), Size,
681 SI->isVolatile());
Amaury Secheta0c242c2016-01-05 20:17:48 +0000682
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000683 LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "
684 << *M << "\n");
Amaury Secheta0c242c2016-01-05 20:17:48 +0000685
686 MD->removeInstruction(SI);
687 SI->eraseFromParent();
688 MD->removeInstruction(LI);
689 LI->eraseFromParent();
690 ++NumMemCpyInstr;
691
692 // Make sure we do not invalidate the iterator.
693 BBI = M->getIterator();
694 return true;
695 }
696 }
697
698 // Detect cases where we're performing call slot forwarding, but
699 // happen to be using a load-store pair to implement it, rather than
700 // a memcpy.
Eli Friedman5da0ff42011-06-02 21:24:42 +0000701 MemDepResult ldep = MD->getDependency(LI);
Craig Topperf40110f2014-04-25 05:29:35 +0000702 CallInst *C = nullptr;
Eli Friedman5da0ff42011-06-02 21:24:42 +0000703 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
704 C = dyn_cast<CallInst>(ldep.getInst());
705
706 if (C) {
707 // Check that nothing touches the dest of the "copy" between
708 // the call and the store.
David Majnemerd99068d2016-05-26 19:24:24 +0000709 Value *CpyDest = SI->getPointerOperand()->stripPointerCasts();
710 bool CpyDestIsLocal = isa<AllocaInst>(CpyDest);
Sean Silva6347df02016-06-14 02:44:55 +0000711 AliasAnalysis &AA = LookupAliasAnalysis();
Chandler Carruthac80dc72015-06-17 07:18:54 +0000712 MemoryLocation StoreLoc = MemoryLocation::get(SI);
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000713 for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator();
714 I != E; --I) {
Alina Sbirlea63d22502017-12-05 20:12:23 +0000715 if (isModOrRefSet(AA.getModRefInfo(&*I, StoreLoc))) {
Craig Topperf40110f2014-04-25 05:29:35 +0000716 C = nullptr;
Eli Friedmane8bbc102011-06-15 01:25:56 +0000717 break;
718 }
David Majnemerd99068d2016-05-26 19:24:24 +0000719 // The store to dest may never happen if an exception can be thrown
720 // between the load and the store.
721 if (I->mayThrow() && !CpyDestIsLocal) {
722 C = nullptr;
723 break;
724 }
Eli Friedman5da0ff42011-06-02 21:24:42 +0000725 }
726 }
727
Owen Anderson18e4fed2010-10-15 22:52:12 +0000728 if (C) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000729 bool changed = performCallSlotOptzn(
730 LI, SI->getPointerOperand()->stripPointerCasts(),
731 LI->getPointerOperand()->stripPointerCasts(),
732 DL.getTypeStoreSize(SI->getOperand(0)->getType()),
Amaury Secheta0c242c2016-01-05 20:17:48 +0000733 findCommonAlignment(DL, SI, LI), C);
Owen Anderson18e4fed2010-10-15 22:52:12 +0000734 if (changed) {
Chris Lattner58f9f582010-11-21 00:28:59 +0000735 MD->removeInstruction(SI);
Owen Anderson18e4fed2010-10-15 22:52:12 +0000736 SI->eraseFromParent();
Chris Lattnercaf5c0d2011-01-09 19:26:10 +0000737 MD->removeInstruction(LI);
Owen Anderson18e4fed2010-10-15 22:52:12 +0000738 LI->eraseFromParent();
739 ++NumMemCpyInstr;
740 return true;
741 }
742 }
743 }
744 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000745
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000746 // There are two cases that are interesting for this code to handle: memcpy
747 // and memset. Right now we only handle memset.
Nadav Rotem465834c2012-07-24 10:51:42 +0000748
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000749 // Ensure that the value being stored is something that can be memset'able a
750 // byte at a time like "0" or "-1" or any width, as well as things like
751 // 0xA0A0A0A0 and 0.0.
Amaury Sechet3235c082016-01-06 19:47:24 +0000752 auto *V = SI->getOperand(0);
753 if (Value *ByteVal = isBytewiseValue(V)) {
Chris Lattnerc6381472011-01-08 20:24:01 +0000754 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
755 ByteVal)) {
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000756 BBI = I->getIterator(); // Don't invalidate iterator.
Chris Lattnerc6381472011-01-08 20:24:01 +0000757 return true;
Mon P Wangc576ee92010-04-04 03:10:48 +0000758 }
Nadav Rotem465834c2012-07-24 10:51:42 +0000759
Amaury Sechet3235c082016-01-06 19:47:24 +0000760 // If we have an aggregate, we try to promote it to memset regardless
761 // of opportunity for merging as it can expose optimization opportunities
762 // in subsequent passes.
763 auto *T = V->getType();
764 if (T->isAggregateType()) {
765 uint64_t Size = DL.getTypeStoreSize(T);
766 unsigned Align = SI->getAlignment();
767 if (!Align)
768 Align = DL.getABITypeAlignment(T);
769 IRBuilder<> Builder(SI);
770 auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal,
771 Size, Align, SI->isVolatile());
772
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000773 LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n");
Amaury Sechet3235c082016-01-06 19:47:24 +0000774
775 MD->removeInstruction(SI);
776 SI->eraseFromParent();
777 NumMemSetInfer++;
778
779 // Make sure we do not invalidate the iterator.
780 BBI = M->getIterator();
781 return true;
782 }
783 }
784
Chris Lattnerc6381472011-01-08 20:24:01 +0000785 return false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000786}
787
Sean Silva6347df02016-06-14 02:44:55 +0000788bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
Chris Lattner9a1d63b2011-01-08 21:19:19 +0000789 // See if there is another memset or store neighboring this memset which
790 // allows us to widen out the memset to do a single larger store.
Chris Lattnerff6ed2a2011-01-08 22:11:56 +0000791 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
792 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
793 MSI->getValue())) {
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +0000794 BBI = I->getIterator(); // Don't invalidate iterator.
Chris Lattnerff6ed2a2011-01-08 22:11:56 +0000795 return true;
796 }
Chris Lattner9a1d63b2011-01-08 21:19:19 +0000797 return false;
798}
799
Sanjay Patela75c41e2015-08-13 22:53:20 +0000800/// Takes a memcpy and a call that it depends on,
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000801/// and checks for the possibility of a call slot optimization by having
802/// the call write its result directly into the destination of the memcpy.
Sean Silva6347df02016-06-14 02:44:55 +0000803bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpy, Value *cpyDest,
804 Value *cpySrc, uint64_t cpyLen,
805 unsigned cpyAlign, CallInst *C) {
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000806 // The general transformation to keep in mind is
807 //
808 // call @func(..., src, ...)
809 // memcpy(dest, src, ...)
810 //
811 // ->
812 //
813 // memcpy(dest, src, ...)
814 // call @func(..., dest, ...)
815 //
816 // Since moving the memcpy is technically awkward, we additionally check that
817 // src only holds uninitialized values at the moment of the call, meaning that
818 // the memcpy can be discarded rather than moved.
819
Tim Shen7aa0ad62016-06-08 19:42:32 +0000820 // Lifetime marks shouldn't be operated on.
821 if (Function *F = C->getCalledFunction())
822 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start)
823 return false;
824
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000825 // Deliberately get the source and destination with bitcasts stripped away,
826 // because we'll need to do type comparisons based on the underlying type.
Gabor Greif62f0aac2010-07-28 22:50:26 +0000827 CallSite CS(C);
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000828
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000829 // Require that src be an alloca. This simplifies the reasoning considerably.
Chris Lattnerb5557a72009-09-01 17:09:55 +0000830 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000831 if (!srcAlloca)
832 return false;
833
Chris Lattnerb5557a72009-09-01 17:09:55 +0000834 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000835 if (!srcArraySize)
836 return false;
837
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000838 const DataLayout &DL = cpy->getModule()->getDataLayout();
839 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
840 srcArraySize->getZExtValue();
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000841
Owen Anderson18e4fed2010-10-15 22:52:12 +0000842 if (cpyLen < srcSize)
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000843 return false;
844
845 // Check that accessing the first srcSize bytes of dest will not cause a
846 // trap. Otherwise the transform is invalid since it might cause a trap
847 // to occur earlier than it otherwise would.
Chris Lattnerb5557a72009-09-01 17:09:55 +0000848 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000849 // The destination is an alloca. Check it is larger than srcSize.
Chris Lattnerb5557a72009-09-01 17:09:55 +0000850 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000851 if (!destArraySize)
852 return false;
853
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000854 uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) *
855 destArraySize->getZExtValue();
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000856
857 if (destSize < srcSize)
858 return false;
Chris Lattnerb5557a72009-09-01 17:09:55 +0000859 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
David Majnemerd99068d2016-05-26 19:24:24 +0000860 // The store to dest may never happen if the call can throw.
861 if (C->mayThrow())
862 return false;
863
Bjorn Steinbrinkd20816f2014-10-16 19:43:08 +0000864 if (A->getDereferenceableBytes() < srcSize) {
865 // If the destination is an sret parameter then only accesses that are
866 // outside of the returned struct type can trap.
867 if (!A->hasStructRetAttr())
868 return false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000869
Bjorn Steinbrinkd20816f2014-10-16 19:43:08 +0000870 Type *StructTy = cast<PointerType>(A->getType())->getElementType();
871 if (!StructTy->isSized()) {
872 // The call may never return and hence the copy-instruction may never
873 // be executed, and therefore it's not safe to say "the destination
874 // has at least <cpyLen> bytes, as implied by the copy-instruction",
875 return false;
876 }
877
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000878 uint64_t destSize = DL.getTypeAllocSize(StructTy);
Bjorn Steinbrinkd20816f2014-10-16 19:43:08 +0000879 if (destSize < srcSize)
880 return false;
Shuxin Yang140d5922013-06-08 04:56:05 +0000881 }
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000882 } else {
883 return false;
884 }
885
Duncan Sands933db772012-10-05 07:29:46 +0000886 // Check that dest points to memory that is at least as aligned as src.
887 unsigned srcAlign = srcAlloca->getAlignment();
888 if (!srcAlign)
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000889 srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType());
Duncan Sands933db772012-10-05 07:29:46 +0000890 bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
891 // If dest is not aligned enough and we can't increase its alignment then
892 // bail out.
893 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
894 return false;
895
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000896 // Check that src is not accessed except via the call and the memcpy. This
897 // guarantees that it holds only undefined values when passed in (so the final
898 // memcpy can be dropped), that it is not read or written between the call and
899 // the memcpy, and that writing beyond the end of it is undefined.
Chandler Carruthcdf47882014-03-09 03:16:01 +0000900 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
901 srcAlloca->user_end());
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000902 while (!srcUseList.empty()) {
Chandler Carruthcdf47882014-03-09 03:16:01 +0000903 User *U = srcUseList.pop_back_val();
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000904
Chandler Carruthcdf47882014-03-09 03:16:01 +0000905 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
906 for (User *UU : U->users())
907 srcUseList.push_back(UU);
Chandler Carruth18cee1d2014-09-01 10:09:18 +0000908 continue;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000909 }
Chandler Carruth18cee1d2014-09-01 10:09:18 +0000910 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
911 if (!G->hasAllZeroIndices())
912 return false;
913
914 for (User *UU : U->users())
915 srcUseList.push_back(UU);
916 continue;
917 }
918 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
919 if (IT->getIntrinsicID() == Intrinsic::lifetime_start ||
920 IT->getIntrinsicID() == Intrinsic::lifetime_end)
921 continue;
922
923 if (U != C && U != cpy)
924 return false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000925 }
926
Nick Lewycky703e4882014-07-14 18:52:02 +0000927 // Check that src isn't captured by the called function since the
928 // transformation can cause aliasing issues in that case.
929 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
930 if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i))
931 return false;
932
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000933 // Since we're changing the parameter to the callsite, we need to make sure
934 // that what would be the new parameter dominates the callsite.
Sean Silva6347df02016-06-14 02:44:55 +0000935 DominatorTree &DT = LookupDomTree();
Chris Lattnerb5557a72009-09-01 17:09:55 +0000936 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000937 if (!DT.dominates(cpyDestInst, C))
938 return false;
939
940 // In addition to knowing that the call does not access src in some
941 // unexpected manner, for example via a global, which we deduce from
942 // the use analysis, we also need to know that it does not sneakily
943 // access dest. We rely on AA to figure this out for us.
Sean Silva6347df02016-06-14 02:44:55 +0000944 AliasAnalysis &AA = LookupAliasAnalysis();
Chandler Carruth194f59c2015-07-22 23:15:57 +0000945 ModRefInfo MR = AA.getModRefInfo(C, cpyDest, srcSize);
Chad Rosiera968caf2012-05-14 20:35:04 +0000946 // If necessary, perform additional analysis.
Alina Sbirlea63d22502017-12-05 20:12:23 +0000947 if (isModOrRefSet(MR))
Chad Rosiera968caf2012-05-14 20:35:04 +0000948 MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT);
Alina Sbirlea63d22502017-12-05 20:12:23 +0000949 if (isModOrRefSet(MR))
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000950 return false;
951
Fiona Glasera9bd5722017-03-14 22:37:38 +0000952 // We can't create address space casts here because we don't know if they're
953 // safe for the target.
954 if (cpySrc->getType()->getPointerAddressSpace() !=
955 cpyDest->getType()->getPointerAddressSpace())
956 return false;
957 for (unsigned i = 0; i < CS.arg_size(); ++i)
958 if (CS.getArgument(i)->stripPointerCasts() == cpySrc &&
959 cpySrc->getType()->getPointerAddressSpace() !=
960 CS.getArgument(i)->getType()->getPointerAddressSpace())
961 return false;
962
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000963 // All the checks have passed, so do the transformation.
Owen Andersond071a872008-06-01 21:52:16 +0000964 bool changedArgument = false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000965 for (unsigned i = 0; i < CS.arg_size(); ++i)
Owen Anderson38099c12008-06-01 22:26:26 +0000966 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
Duncan Sandsa6d20012012-10-04 13:53:21 +0000967 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest
968 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
969 cpyDest->getName(), C);
Owen Andersond071a872008-06-01 21:52:16 +0000970 changedArgument = true;
Duncan Sandsa6d20012012-10-04 13:53:21 +0000971 if (CS.getArgument(i)->getType() == Dest->getType())
972 CS.setArgument(i, Dest);
Chris Lattnerb5557a72009-09-01 17:09:55 +0000973 else
Duncan Sandsa6d20012012-10-04 13:53:21 +0000974 CS.setArgument(i, CastInst::CreatePointerCast(Dest,
975 CS.getArgument(i)->getType(), Dest->getName(), C));
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000976 }
977
Owen Andersond071a872008-06-01 21:52:16 +0000978 if (!changedArgument)
979 return false;
980
Duncan Sandsc6ada692012-10-04 10:54:40 +0000981 // If the destination wasn't sufficiently aligned then increase its alignment.
982 if (!isDestSufficientlyAligned) {
983 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
984 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
985 }
986
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000987 // Drop any cached information about the call, because we may have changed
988 // its dependence information by changing its parameter.
Chris Lattner58f9f582010-11-21 00:28:59 +0000989 MD->removeInstruction(C);
Owen Andersonef9a6fd2008-04-09 08:23:16 +0000990
Bjorn Steinbrink71bf3b82015-02-07 17:54:36 +0000991 // Update AA metadata
992 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
993 // handled here, but combineMetadata doesn't support them yet
Piotr Padlewskidc9b2cf2015-10-02 22:12:22 +0000994 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
995 LLVMContext::MD_noalias,
996 LLVMContext::MD_invariant_group};
Bjorn Steinbrink71bf3b82015-02-07 17:54:36 +0000997 combineMetadata(C, cpy, KnownIDs);
998
Chris Lattner58f9f582010-11-21 00:28:59 +0000999 // Remove the memcpy.
1000 MD->removeInstruction(cpy);
Dan Gohmand2d1ae12010-06-22 15:08:57 +00001001 ++NumMemCpyInstr;
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001002
1003 return true;
1004}
1005
Sanjay Patela75c41e2015-08-13 22:53:20 +00001006/// We've found that the (upward scanning) memory dependence of memcpy 'M' is
1007/// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
Sean Silva6347df02016-06-14 02:44:55 +00001008bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
1009 MemCpyInst *MDep) {
Chris Lattner7e9b2ea2010-11-18 07:02:37 +00001010 // We can only transforms memcpy's where the dest of one is the source of the
1011 // other.
Chris Lattner58f9f582010-11-21 00:28:59 +00001012 if (M->getSource() != MDep->getDest() || MDep->isVolatile())
Chris Lattner7e9b2ea2010-11-18 07:02:37 +00001013 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001014
Chris Lattnerfd51c522010-12-09 07:39:50 +00001015 // If dep instruction is reading from our current input, then it is a noop
1016 // transfer and substituting the input won't change this instruction. Just
1017 // ignore the input and let someone else zap MDep. This handles cases like:
1018 // memcpy(a <- a)
1019 // memcpy(b <- a)
1020 if (M->getSource() == MDep->getSource())
1021 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001022
Chris Lattner0ab5e2c2011-04-15 05:18:47 +00001023 // Second, the length of the memcpy's must be the same, or the preceding one
Chris Lattner7e9b2ea2010-11-18 07:02:37 +00001024 // must be larger than the following one.
Dan Gohman19e30d52011-01-21 22:07:57 +00001025 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
1026 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
1027 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
1028 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001029
Sean Silva6347df02016-06-14 02:44:55 +00001030 AliasAnalysis &AA = LookupAliasAnalysis();
Chris Lattner59572292010-11-21 08:06:10 +00001031
1032 // Verify that the copied-from memory doesn't change in between the two
1033 // transfers. For example, in:
1034 // memcpy(a <- b)
1035 // *b = 42;
1036 // memcpy(c <- a)
1037 // It would be invalid to transform the second memcpy into memcpy(c <- b).
1038 //
1039 // TODO: If the code between M and MDep is transparent to the destination "c",
1040 // then we could still perform the xform by moving M up to the first memcpy.
1041 //
1042 // NOTE: This is conservative, it will stop on any read from the source loc,
1043 // not just the defining memcpy.
Reid Kleckner6d310012017-12-28 05:10:33 +00001044 MemDepResult SourceDep =
1045 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false,
1046 M->getIterator(), M->getParent());
Chris Lattner59572292010-11-21 08:06:10 +00001047 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1048 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001049
Chris Lattner731caac2010-11-18 08:00:57 +00001050 // If the dest of the second might alias the source of the first, then the
1051 // source and dest might overlap. We still want to eliminate the intermediate
1052 // value, but we have to generate a memmove instead of memcpy.
Chris Lattner6cf8d6c2010-12-26 22:57:41 +00001053 bool UseMemMove = false;
Chandler Carruth70c61c12015-06-04 02:03:15 +00001054 if (!AA.isNoAlias(MemoryLocation::getForDest(M),
1055 MemoryLocation::getForSource(MDep)))
Chris Lattner6cf8d6c2010-12-26 22:57:41 +00001056 UseMemMove = true;
Nadav Rotem465834c2012-07-24 10:51:42 +00001057
Chris Lattner58f9f582010-11-21 00:28:59 +00001058 // If all checks passed, then we can transform M.
Nadav Rotem465834c2012-07-24 10:51:42 +00001059
Chris Lattner7e9b2ea2010-11-18 07:02:37 +00001060 // TODO: Is this worth it if we're creating a less aligned memcpy? For
1061 // example we could be moving from movaps -> movq on x86.
Chris Lattner6cf8d6c2010-12-26 22:57:41 +00001062 IRBuilder<> Builder(M);
1063 if (UseMemMove)
Daniel Neilson6f1eb582018-03-21 14:14:55 +00001064 Builder.CreateMemMove(M->getRawDest(), M->getDestAlignment(),
1065 MDep->getRawSource(), MDep->getSourceAlignment(),
1066 M->getLength(), M->isVolatile());
Chris Lattner6cf8d6c2010-12-26 22:57:41 +00001067 else
Daniel Neilson6f1eb582018-03-21 14:14:55 +00001068 Builder.CreateMemCpy(M->getRawDest(), M->getDestAlignment(),
1069 MDep->getRawSource(), MDep->getSourceAlignment(),
1070 M->getLength(), M->isVolatile());
Chris Lattner1385dff2010-11-18 08:07:09 +00001071
Chris Lattner59572292010-11-21 08:06:10 +00001072 // Remove the instruction we're replacing.
Chris Lattner58f9f582010-11-21 00:28:59 +00001073 MD->removeInstruction(M);
Chris Lattner1385dff2010-11-18 08:07:09 +00001074 M->eraseFromParent();
1075 ++NumMemCpyInstr;
1076 return true;
Chris Lattner7e9b2ea2010-11-18 07:02:37 +00001077}
1078
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001079/// We've found that the (upward scanning) memory dependence of \p MemCpy is
1080/// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that
1081/// weren't copied over by \p MemCpy.
1082///
1083/// In other words, transform:
1084/// \code
1085/// memset(dst, c, dst_size);
1086/// memcpy(dst, src, src_size);
1087/// \endcode
1088/// into:
1089/// \code
1090/// memcpy(dst, src, src_size);
1091/// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
1092/// \endcode
Sean Silva6347df02016-06-14 02:44:55 +00001093bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
1094 MemSetInst *MemSet) {
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001095 // We can only transform memset/memcpy with the same destination.
1096 if (MemSet->getDest() != MemCpy->getDest())
1097 return false;
1098
Ahmed Bougacha97876fa2015-05-21 01:43:39 +00001099 // Check that there are no other dependencies on the memset destination.
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +00001100 MemDepResult DstDepInfo =
1101 MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false,
1102 MemCpy->getIterator(), MemCpy->getParent());
Ahmed Bougacha97876fa2015-05-21 01:43:39 +00001103 if (DstDepInfo.getInst() != MemSet)
1104 return false;
1105
Ahmed Bougacha9692e302015-04-21 21:28:33 +00001106 // Use the same i8* dest as the memcpy, killing the memset dest if different.
1107 Value *Dest = MemCpy->getRawDest();
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001108 Value *DestSize = MemSet->getLength();
1109 Value *SrcSize = MemCpy->getLength();
1110
1111 // By default, create an unaligned memset.
1112 unsigned Align = 1;
1113 // If Dest is aligned, and SrcSize is constant, use the minimum alignment
1114 // of the sum.
1115 const unsigned DestAlign =
Daniel Neilson6f1eb582018-03-21 14:14:55 +00001116 std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment());
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001117 if (DestAlign > 1)
1118 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
1119 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
1120
Ahmed Bougacha97876fa2015-05-21 01:43:39 +00001121 IRBuilder<> Builder(MemCpy);
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001122
Ahmed Bougacha05b72c12015-04-18 23:06:04 +00001123 // If the sizes have different types, zext the smaller one.
Ahmed Bougacha7216ccc2015-04-18 17:57:41 +00001124 if (DestSize->getType() != SrcSize->getType()) {
Ahmed Bougacha05b72c12015-04-18 23:06:04 +00001125 if (DestSize->getType()->getIntegerBitWidth() >
1126 SrcSize->getType()->getIntegerBitWidth())
1127 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType());
1128 else
1129 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType());
Ahmed Bougacha7216ccc2015-04-18 17:57:41 +00001130 }
1131
Benjamin Kramer1697d392016-11-07 17:47:28 +00001132 Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize);
1133 Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize);
1134 Value *MemsetLen = Builder.CreateSelect(
1135 Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff);
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001136 Builder.CreateMemSet(Builder.CreateGEP(Dest, SrcSize), MemSet->getOperand(1),
1137 MemsetLen, Align);
1138
1139 MD->removeInstruction(MemSet);
1140 MemSet->eraseFromParent();
1141 return true;
1142}
Chris Lattner7e9b2ea2010-11-18 07:02:37 +00001143
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001144/// Transform memcpy to memset when its source was just memset.
1145/// In other words, turn:
1146/// \code
1147/// memset(dst1, c, dst1_size);
1148/// memcpy(dst2, dst1, dst2_size);
1149/// \endcode
1150/// into:
1151/// \code
1152/// memset(dst1, c, dst1_size);
1153/// memset(dst2, c, dst2_size);
1154/// \endcode
1155/// When dst2_size <= dst1_size.
1156///
1157/// The \p MemCpy must have a Constant length.
Sean Silva6347df02016-06-14 02:44:55 +00001158bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
1159 MemSetInst *MemSet) {
Tim Shena3dbead2016-08-25 19:27:26 +00001160 AliasAnalysis &AA = LookupAliasAnalysis();
1161
Tim Shen3ad8b432016-08-25 21:03:46 +00001162 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and
1163 // memcpying from the same address. Otherwise it is hard to reason about.
Tim Shena3dbead2016-08-25 19:27:26 +00001164 if (!AA.isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource()))
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001165 return false;
1166
1167 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength());
1168 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength());
1169 // Make sure the memcpy doesn't read any more than what the memset wrote.
1170 // Don't worry about sizes larger than i64.
1171 if (!MemSetSize || CopySize->getZExtValue() > MemSetSize->getZExtValue())
1172 return false;
1173
Ahmed Bougacha0541c672015-05-21 00:08:35 +00001174 IRBuilder<> Builder(MemCpy);
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001175 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1),
Daniel Neilson6f1eb582018-03-21 14:14:55 +00001176 CopySize, MemCpy->getDestAlignment());
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001177 return true;
1178}
1179
Sanjay Patela75c41e2015-08-13 22:53:20 +00001180/// Perform simplification of memcpy's. If we have memcpy A
Gabor Greif62f0aac2010-07-28 22:50:26 +00001181/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
1182/// B to be a memcpy from X to Z (or potentially a memmove, depending on
1183/// circumstances). This allows later passes to remove the first memcpy
1184/// altogether.
Sean Silva6347df02016-06-14 02:44:55 +00001185bool MemCpyOptPass::processMemCpy(MemCpyInst *M) {
Nick Lewycky00703e72014-02-04 00:18:54 +00001186 // We can only optimize non-volatile memcpy's.
1187 if (M->isVolatile()) return false;
Owen Anderson18e4fed2010-10-15 22:52:12 +00001188
Chris Lattnerbc4457e2010-12-09 07:45:45 +00001189 // If the source and destination of the memcpy are the same, then zap it.
1190 if (M->getSource() == M->getDest()) {
1191 MD->removeInstruction(M);
1192 M->eraseFromParent();
1193 return false;
1194 }
Benjamin Kramerea9152e2010-12-24 21:17:12 +00001195
1196 // If copying from a constant, try to turn the memcpy into a memset.
Benjamin Kramerb90b2f02010-12-24 22:23:59 +00001197 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
Benjamin Kramer30342fb2010-12-26 15:23:45 +00001198 if (GV->isConstant() && GV->hasDefinitiveInitializer())
Benjamin Kramerb90b2f02010-12-24 22:23:59 +00001199 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) {
Chris Lattner6cf8d6c2010-12-26 22:57:41 +00001200 IRBuilder<> Builder(M);
Nick Lewycky00703e72014-02-04 00:18:54 +00001201 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
Daniel Neilson6f1eb582018-03-21 14:14:55 +00001202 M->getDestAlignment(), false);
Benjamin Kramerb90b2f02010-12-24 22:23:59 +00001203 MD->removeInstruction(M);
1204 M->eraseFromParent();
1205 ++NumCpyToSet;
1206 return true;
1207 }
Benjamin Kramerea9152e2010-12-24 21:17:12 +00001208
Ahmed Bougachab6169662015-05-11 23:09:46 +00001209 MemDepResult DepInfo = MD->getDependency(M);
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001210
1211 // Try to turn a partially redundant memset + memcpy into
1212 // memcpy + smaller memset. We don't need the memcpy size for this.
Ahmed Bougachab6169662015-05-11 23:09:46 +00001213 if (DepInfo.isClobber())
1214 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst()))
Ahmed Bougacha83f78a42015-04-17 22:20:57 +00001215 if (processMemSetMemCpyDependence(M, MDep))
1216 return true;
1217
Nick Lewycky00703e72014-02-04 00:18:54 +00001218 // The optimizations after this point require the memcpy size.
1219 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
Craig Topperf40110f2014-04-25 05:29:35 +00001220 if (!CopySize) return false;
Nick Lewycky00703e72014-02-04 00:18:54 +00001221
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001222 // There are four possible optimizations we can do for memcpy:
Chris Lattnerb5557a72009-09-01 17:09:55 +00001223 // a) memcpy-memcpy xform which exposes redundance for DSE.
1224 // b) call-memcpy xform for return slot optimization.
Nick Lewycky77d5fb42014-03-26 23:45:15 +00001225 // c) memcpy from freshly alloca'd space or space that has just started its
1226 // lifetime copies undefined data, and we can therefore eliminate the
1227 // memcpy in favor of the data that was already at the destination.
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001228 // d) memcpy from a just-memset'd source can be turned into memset.
Nick Lewycky0a7e9cc2011-10-16 20:13:32 +00001229 if (DepInfo.isClobber()) {
1230 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
Daniel Neilson6f1eb582018-03-21 14:14:55 +00001231 // FIXME: Can we pass in either of dest/src alignment here instead
1232 // of conservatively taking the minimum?
1233 unsigned Align = MinAlign(M->getDestAlignment(), M->getSourceAlignment());
Nick Lewycky0a7e9cc2011-10-16 20:13:32 +00001234 if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
Daniel Neilson6f1eb582018-03-21 14:14:55 +00001235 CopySize->getZExtValue(), Align,
Duncan Sandsc6ada692012-10-04 10:54:40 +00001236 C)) {
Nick Lewycky0a7e9cc2011-10-16 20:13:32 +00001237 MD->removeInstruction(M);
1238 M->eraseFromParent();
1239 return true;
1240 }
Chris Lattnerbc4457e2010-12-09 07:45:45 +00001241 }
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001242 }
Ahmed Charles32e983e2012-02-13 06:30:56 +00001243
Chandler Carruthac80dc72015-06-17 07:18:54 +00001244 MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +00001245 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(
1246 SrcLoc, true, M->getIterator(), M->getParent());
Ahmed Bougachab6169662015-05-11 23:09:46 +00001247
Nick Lewycky0a7e9cc2011-10-16 20:13:32 +00001248 if (SrcDepInfo.isClobber()) {
1249 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
Ahmed Bougacha15a31f62015-05-16 01:23:47 +00001250 return processMemCpyMemCpyDependence(M, MDep);
Nick Lewycky99384942014-02-06 06:29:19 +00001251 } else if (SrcDepInfo.isDef()) {
Nick Lewycky77d5fb42014-03-26 23:45:15 +00001252 Instruction *I = SrcDepInfo.getInst();
1253 bool hasUndefContents = false;
1254
1255 if (isa<AllocaInst>(I)) {
1256 hasUndefContents = true;
1257 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1258 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1259 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
1260 if (LTSize->getZExtValue() >= CopySize->getZExtValue())
1261 hasUndefContents = true;
1262 }
1263
1264 if (hasUndefContents) {
Nick Lewycky99384942014-02-06 06:29:19 +00001265 MD->removeInstruction(M);
1266 M->eraseFromParent();
1267 ++NumMemCpyInstr;
1268 return true;
1269 }
Nick Lewycky0a7e9cc2011-10-16 20:13:32 +00001270 }
1271
Ahmed Bougachaf8fa3b82015-05-16 01:32:26 +00001272 if (SrcDepInfo.isClobber())
1273 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst()))
1274 if (performMemCpyToMemSetOptzn(M, MDep)) {
1275 MD->removeInstruction(M);
1276 M->eraseFromParent();
1277 ++NumCpyToSet;
1278 return true;
1279 }
1280
Owen Andersonad5367f2008-04-29 21:51:00 +00001281 return false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001282}
1283
Sanjay Patela75c41e2015-08-13 22:53:20 +00001284/// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
1285/// not to alias.
Sean Silva6347df02016-06-14 02:44:55 +00001286bool MemCpyOptPass::processMemMove(MemMoveInst *M) {
1287 AliasAnalysis &AA = LookupAliasAnalysis();
Chris Lattner1145e332009-09-01 17:56:32 +00001288
David L. Jonesd21529f2017-01-23 23:16:46 +00001289 if (!TLI->has(LibFunc_memmove))
Chris Lattner23f61a02011-05-01 18:27:11 +00001290 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001291
Chris Lattner1145e332009-09-01 17:56:32 +00001292 // See if the pointers alias.
Chandler Carruth70c61c12015-06-04 02:03:15 +00001293 if (!AA.isNoAlias(MemoryLocation::getForDest(M),
1294 MemoryLocation::getForSource(M)))
Chris Lattner1145e332009-09-01 17:56:32 +00001295 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001296
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001297 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M
1298 << "\n");
Nadav Rotem465834c2012-07-24 10:51:42 +00001299
Chris Lattner1145e332009-09-01 17:56:32 +00001300 // If not, then we know we can transform this.
Jay Foadb804a2b2011-07-12 14:06:48 +00001301 Type *ArgTys[3] = { M->getRawDest()->getType(),
1302 M->getRawSource()->getType(),
1303 M->getLength()->getType() };
Sanjay Patelaf674fb2015-12-14 17:24:23 +00001304 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(),
1305 Intrinsic::memcpy, ArgTys));
Duncan Sands0edc7102009-09-03 13:37:16 +00001306
Chris Lattner1145e332009-09-01 17:56:32 +00001307 // MemDep may have over conservative information about this instruction, just
1308 // conservatively flush it from the cache.
Chris Lattner58f9f582010-11-21 00:28:59 +00001309 MD->removeInstruction(M);
Duncan Sands0edc7102009-09-03 13:37:16 +00001310
1311 ++NumMoveToCpy;
Chris Lattner1145e332009-09-01 17:56:32 +00001312 return true;
1313}
Nadav Rotem465834c2012-07-24 10:51:42 +00001314
Sanjay Patela75c41e2015-08-13 22:53:20 +00001315/// This is called on every byval argument in call sites.
Sean Silva6347df02016-06-14 02:44:55 +00001316bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001317 const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout();
Chris Lattner59572292010-11-21 08:06:10 +00001318 // Find out what feeds this byval argument.
Chris Lattner58f9f582010-11-21 00:28:59 +00001319 Value *ByValArg = CS.getArgument(ArgNo);
Nick Lewyckyc585de62011-10-12 00:14:31 +00001320 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001321 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
Chandler Carruthac80dc72015-06-17 07:18:54 +00001322 MemDepResult DepInfo = MD->getPointerDependencyFrom(
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +00001323 MemoryLocation(ByValArg, ByValSize), true,
1324 CS.getInstruction()->getIterator(), CS.getInstruction()->getParent());
Chris Lattner58f9f582010-11-21 00:28:59 +00001325 if (!DepInfo.isClobber())
1326 return false;
1327
1328 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
1329 // a memcpy, see if we can byval from the source of the memcpy instead of the
1330 // result.
1331 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
Craig Topperf40110f2014-04-25 05:29:35 +00001332 if (!MDep || MDep->isVolatile() ||
Chris Lattner58f9f582010-11-21 00:28:59 +00001333 ByValArg->stripPointerCasts() != MDep->getDest())
1334 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001335
Chris Lattner58f9f582010-11-21 00:28:59 +00001336 // The length of the memcpy must be larger or equal to the size of the byval.
Chris Lattner58f9f582010-11-21 00:28:59 +00001337 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
Craig Topperf40110f2014-04-25 05:29:35 +00001338 if (!C1 || C1->getValue().getZExtValue() < ByValSize)
Chris Lattner58f9f582010-11-21 00:28:59 +00001339 return false;
1340
Chris Lattner83791ce2011-05-23 00:03:39 +00001341 // Get the alignment of the byval. If the call doesn't specify the alignment,
1342 // then it is some target specific value that we can't know.
Reid Kleckner859f8b52017-04-28 20:34:27 +00001343 unsigned ByValAlign = CS.getParamAlignment(ArgNo);
Chris Lattner83791ce2011-05-23 00:03:39 +00001344 if (ByValAlign == 0) return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001345
Chris Lattner83791ce2011-05-23 00:03:39 +00001346 // If it is greater than the memcpy, then we check to see if we can force the
1347 // source of the memcpy to the alignment we need. If we fail, we bail out.
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001348 AssumptionCache &AC = LookupAssumptionCache();
Sean Silva6347df02016-06-14 02:44:55 +00001349 DominatorTree &DT = LookupDomTree();
Daniel Neilson6f1eb582018-03-21 14:14:55 +00001350 if (MDep->getSourceAlignment() < ByValAlign &&
Mehdi Aminia28d91d2015-03-10 02:37:25 +00001351 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL,
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001352 CS.getInstruction(), &AC, &DT) < ByValAlign)
Chris Lattner83791ce2011-05-23 00:03:39 +00001353 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001354
Matt Arsenaultdaa08872017-04-10 19:00:25 +00001355 // The address space of the memcpy source must match the byval argument
1356 if (MDep->getSource()->getType()->getPointerAddressSpace() !=
1357 ByValArg->getType()->getPointerAddressSpace())
1358 return false;
1359
Chris Lattner58f9f582010-11-21 00:28:59 +00001360 // Verify that the copied-from memory doesn't change in between the memcpy and
1361 // the byval call.
1362 // memcpy(a <- b)
1363 // *b = 42;
1364 // foo(*a)
1365 // It would be invalid to transform the second memcpy into foo(*b).
Chris Lattner59572292010-11-21 08:06:10 +00001366 //
1367 // NOTE: This is conservative, it will stop on any read from the source loc,
1368 // not just the defining memcpy.
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +00001369 MemDepResult SourceDep = MD->getPointerDependencyFrom(
1370 MemoryLocation::getForSource(MDep), false,
1371 CS.getInstruction()->getIterator(), MDep->getParent());
Chris Lattner59572292010-11-21 08:06:10 +00001372 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1373 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001374
Chris Lattner58f9f582010-11-21 00:28:59 +00001375 Value *TmpCast = MDep->getSource();
1376 if (MDep->getSource()->getType() != ByValArg->getType())
1377 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
1378 "tmpcast", CS.getInstruction());
Nadav Rotem465834c2012-07-24 10:51:42 +00001379
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001380 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
1381 << " " << *MDep << "\n"
1382 << " " << *CS.getInstruction() << "\n");
Nadav Rotem465834c2012-07-24 10:51:42 +00001383
Chris Lattner58f9f582010-11-21 00:28:59 +00001384 // Otherwise we're good! Update the byval argument.
1385 CS.setArgument(ArgNo, TmpCast);
1386 ++NumMemCpyInstr;
1387 return true;
1388}
1389
Sean Silva6347df02016-06-14 02:44:55 +00001390/// Executes one iteration of MemCpyOptPass.
1391bool MemCpyOptPass::iterateOnFunction(Function &F) {
Chris Lattnerb5557a72009-09-01 17:09:55 +00001392 bool MadeChange = false;
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001393
Bjorn Pettersson8e484dc2018-04-23 19:55:04 +00001394 DominatorTree &DT = LookupDomTree();
1395
Chris Lattnerb5557a72009-09-01 17:09:55 +00001396 // Walk all instruction in the function.
Benjamin Kramer135f7352016-06-26 12:28:59 +00001397 for (BasicBlock &BB : F) {
Bjorn Pettersson8e484dc2018-04-23 19:55:04 +00001398 // Skip unreachable blocks. For example processStore assumes that an
1399 // instruction in a BB can't be dominated by a later instruction in the
1400 // same BB (which is a scenario that can happen for an unreachable BB that
1401 // has itself as a predecessor).
1402 if (!DT.isReachableFromEntry(&BB))
1403 continue;
1404
Benjamin Kramer135f7352016-06-26 12:28:59 +00001405 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
Bjorn Pettersson8e484dc2018-04-23 19:55:04 +00001406 // Avoid invalidating the iterator.
Duncan P. N. Exon Smithbe4d8cb2015-10-13 19:26:58 +00001407 Instruction *I = &*BI++;
Nadav Rotem465834c2012-07-24 10:51:42 +00001408
Chris Lattner58f9f582010-11-21 00:28:59 +00001409 bool RepeatInstruction = false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001410
Owen Anderson6a7355c2008-04-21 07:45:10 +00001411 if (StoreInst *SI = dyn_cast<StoreInst>(I))
Chris Lattnerb5557a72009-09-01 17:09:55 +00001412 MadeChange |= processStore(SI, BI);
Chris Lattner9a1d63b2011-01-08 21:19:19 +00001413 else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
1414 RepeatInstruction = processMemSet(M, BI);
1415 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
Tim Northover39617352016-05-10 21:49:40 +00001416 RepeatInstruction = processMemCpy(M);
Chris Lattner9a1d63b2011-01-08 21:19:19 +00001417 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
Chris Lattner58f9f582010-11-21 00:28:59 +00001418 RepeatInstruction = processMemMove(M);
Benjamin Kramer3a09ef62015-04-10 14:50:08 +00001419 else if (auto CS = CallSite(I)) {
Chris Lattner58f9f582010-11-21 00:28:59 +00001420 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
Nick Lewycky612d70b2011-11-20 19:09:04 +00001421 if (CS.isByValArgument(i))
Chris Lattner58f9f582010-11-21 00:28:59 +00001422 MadeChange |= processByValArgument(CS, i);
1423 }
1424
1425 // Reprocess the instruction if desired.
1426 if (RepeatInstruction) {
Benjamin Kramer135f7352016-06-26 12:28:59 +00001427 if (BI != BB.begin())
1428 --BI;
Chris Lattner58f9f582010-11-21 00:28:59 +00001429 MadeChange = true;
Chris Lattner1145e332009-09-01 17:56:32 +00001430 }
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001431 }
1432 }
Nadav Rotem465834c2012-07-24 10:51:42 +00001433
Chris Lattnerb5557a72009-09-01 17:09:55 +00001434 return MadeChange;
Owen Andersonef9a6fd2008-04-09 08:23:16 +00001435}
Chris Lattnerb5557a72009-09-01 17:09:55 +00001436
Sean Silva6347df02016-06-14 02:44:55 +00001437PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) {
Sean Silva6347df02016-06-14 02:44:55 +00001438 auto &MD = AM.getResult<MemoryDependenceAnalysis>(F);
1439 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1440
1441 auto LookupAliasAnalysis = [&]() -> AliasAnalysis & {
1442 return AM.getResult<AAManager>(F);
1443 };
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001444 auto LookupAssumptionCache = [&]() -> AssumptionCache & {
1445 return AM.getResult<AssumptionAnalysis>(F);
1446 };
Sean Silva6347df02016-06-14 02:44:55 +00001447 auto LookupDomTree = [&]() -> DominatorTree & {
1448 return AM.getResult<DominatorTreeAnalysis>(F);
1449 };
1450
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001451 bool MadeChange = runImpl(F, &MD, &TLI, LookupAliasAnalysis,
1452 LookupAssumptionCache, LookupDomTree);
Sean Silva6347df02016-06-14 02:44:55 +00001453 if (!MadeChange)
1454 return PreservedAnalyses::all();
Chandler Carruthca68a3e2017-01-15 06:32:49 +00001455
Sean Silva6347df02016-06-14 02:44:55 +00001456 PreservedAnalyses PA;
Chandler Carruthca68a3e2017-01-15 06:32:49 +00001457 PA.preserveSet<CFGAnalyses>();
Sean Silva6347df02016-06-14 02:44:55 +00001458 PA.preserve<GlobalsAA>();
1459 PA.preserve<MemoryDependenceAnalysis>();
1460 return PA;
1461}
1462
1463bool MemCpyOptPass::runImpl(
1464 Function &F, MemoryDependenceResults *MD_, TargetLibraryInfo *TLI_,
1465 std::function<AliasAnalysis &()> LookupAliasAnalysis_,
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001466 std::function<AssumptionCache &()> LookupAssumptionCache_,
Sean Silva6347df02016-06-14 02:44:55 +00001467 std::function<DominatorTree &()> LookupDomTree_) {
Chris Lattnerb5557a72009-09-01 17:09:55 +00001468 bool MadeChange = false;
Sean Silva6347df02016-06-14 02:44:55 +00001469 MD = MD_;
1470 TLI = TLI_;
Benjamin Kramer1afc1de2016-06-17 20:41:14 +00001471 LookupAliasAnalysis = std::move(LookupAliasAnalysis_);
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001472 LookupAssumptionCache = std::move(LookupAssumptionCache_);
Benjamin Kramer1afc1de2016-06-17 20:41:14 +00001473 LookupDomTree = std::move(LookupDomTree_);
Nadav Rotem465834c2012-07-24 10:51:42 +00001474
Chris Lattner23f61a02011-05-01 18:27:11 +00001475 // If we don't have at least memset and memcpy, there is little point of doing
1476 // anything here. These are required by a freestanding implementation, so if
1477 // even they are disabled, there is no point in trying hard.
David L. Jonesd21529f2017-01-23 23:16:46 +00001478 if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy))
Chris Lattner23f61a02011-05-01 18:27:11 +00001479 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +00001480
Eugene Zelenko34c23272017-01-18 00:57:48 +00001481 while (true) {
Chris Lattnerb5557a72009-09-01 17:09:55 +00001482 if (!iterateOnFunction(F))
1483 break;
1484 MadeChange = true;
1485 }
Nadav Rotem465834c2012-07-24 10:51:42 +00001486
Craig Topperf40110f2014-04-25 05:29:35 +00001487 MD = nullptr;
Chris Lattnerb5557a72009-09-01 17:09:55 +00001488 return MadeChange;
1489}
Sean Silva6347df02016-06-14 02:44:55 +00001490
1491/// This is the main transformation entry point for a function.
1492bool MemCpyOptLegacyPass::runOnFunction(Function &F) {
1493 if (skipFunction(F))
1494 return false;
1495
1496 auto *MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1497 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1498
1499 auto LookupAliasAnalysis = [this]() -> AliasAnalysis & {
1500 return getAnalysis<AAResultsWrapperPass>().getAAResults();
1501 };
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001502 auto LookupAssumptionCache = [this, &F]() -> AssumptionCache & {
1503 return getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1504 };
Sean Silva6347df02016-06-14 02:44:55 +00001505 auto LookupDomTree = [this]() -> DominatorTree & {
1506 return getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1507 };
1508
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001509 return Impl.runImpl(F, MD, TLI, LookupAliasAnalysis, LookupAssumptionCache,
1510 LookupDomTree);
Sean Silva6347df02016-06-14 02:44:55 +00001511}