Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 1 | //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This pass performs various transformations related to eliminating memcpy |
| 11 | // calls, or transforming sets of stores into memset's. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #define DEBUG_TYPE "memcpyopt" |
| 16 | #include "llvm/Transforms/Scalar.h" |
Benjamin Kramer | a112087 | 2010-12-24 21:17:12 +0000 | [diff] [blame^] | 17 | #include "llvm/GlobalVariable.h" |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 18 | #include "llvm/IntrinsicInst.h" |
| 19 | #include "llvm/Instructions.h" |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 20 | #include "llvm/ADT/SmallVector.h" |
| 21 | #include "llvm/ADT/Statistic.h" |
| 22 | #include "llvm/Analysis/Dominators.h" |
| 23 | #include "llvm/Analysis/AliasAnalysis.h" |
| 24 | #include "llvm/Analysis/MemoryDependenceAnalysis.h" |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 25 | #include "llvm/Support/Debug.h" |
| 26 | #include "llvm/Support/GetElementPtrTypeIterator.h" |
Chris Lattner | bdff548 | 2009-08-23 04:37:46 +0000 | [diff] [blame] | 27 | #include "llvm/Support/raw_ostream.h" |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 28 | #include "llvm/Target/TargetData.h" |
| 29 | #include <list> |
| 30 | using namespace llvm; |
| 31 | |
| 32 | STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); |
| 33 | STATISTIC(NumMemSetInfer, "Number of memsets inferred"); |
Duncan Sands | 05cd03b | 2009-09-03 13:37:16 +0000 | [diff] [blame] | 34 | STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); |
Benjamin Kramer | a112087 | 2010-12-24 21:17:12 +0000 | [diff] [blame^] | 35 | STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 36 | |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 37 | /// isBytewiseValue - If the specified value can be set by repeating the same |
| 38 | /// byte in memory, return the i8 value that it is represented with. This is |
| 39 | /// true for all i8 values obviously, but is also true for i32 0, i32 -1, |
| 40 | /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated |
| 41 | /// byte store (e.g. i16 0x1234), return null. |
Chris Lattner | cf0fe8d | 2009-10-05 05:54:46 +0000 | [diff] [blame] | 42 | static Value *isBytewiseValue(Value *V) { |
Benjamin Kramer | a112087 | 2010-12-24 21:17:12 +0000 | [diff] [blame^] | 43 | // Look through constant globals. |
| 44 | if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { |
| 45 | if (GV->mayBeOverridden() || !GV->isConstant() || !GV->hasInitializer()) |
| 46 | return 0; |
| 47 | V = GV->getInitializer(); |
| 48 | } |
| 49 | |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 50 | // All byte-wide stores are splatable, even of arbitrary variables. |
Duncan Sands | b0bc6c3 | 2010-02-15 16:12:20 +0000 | [diff] [blame] | 51 | if (V->getType()->isIntegerTy(8)) return V; |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 52 | |
| 53 | // Constant float and double values can be handled as integer values if the |
| 54 | // corresponding integer value is "byteable". An important case is 0.0. |
| 55 | if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { |
Chris Lattner | cf0fe8d | 2009-10-05 05:54:46 +0000 | [diff] [blame] | 56 | if (CFP->getType()->isFloatTy()) |
Chris Lattner | 7a0b4fd | 2010-11-29 23:35:33 +0000 | [diff] [blame] | 57 | V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); |
Chris Lattner | cf0fe8d | 2009-10-05 05:54:46 +0000 | [diff] [blame] | 58 | if (CFP->getType()->isDoubleTy()) |
Chris Lattner | 7a0b4fd | 2010-11-29 23:35:33 +0000 | [diff] [blame] | 59 | V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 60 | // Don't handle long double formats, which have strange constraints. |
| 61 | } |
| 62 | |
| 63 | // We can handle constant integers that are power of two in size and a |
| 64 | // multiple of 8 bits. |
| 65 | if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { |
| 66 | unsigned Width = CI->getBitWidth(); |
| 67 | if (isPowerOf2_32(Width) && Width > 8) { |
| 68 | // We can handle this value if the recursive binary decomposition is the |
| 69 | // same at all levels. |
| 70 | APInt Val = CI->getValue(); |
| 71 | APInt Val2; |
| 72 | while (Val.getBitWidth() != 8) { |
| 73 | unsigned NextWidth = Val.getBitWidth()/2; |
| 74 | Val2 = Val.lshr(NextWidth); |
Jay Foad | 40f8f62 | 2010-12-07 08:25:19 +0000 | [diff] [blame] | 75 | Val2 = Val2.trunc(Val.getBitWidth()/2); |
| 76 | Val = Val.trunc(Val.getBitWidth()/2); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 77 | |
| 78 | // If the top/bottom halves aren't the same, reject it. |
| 79 | if (Val != Val2) |
| 80 | return 0; |
| 81 | } |
Chris Lattner | 7a0b4fd | 2010-11-29 23:35:33 +0000 | [diff] [blame] | 82 | return ConstantInt::get(V->getContext(), Val); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 83 | } |
| 84 | } |
Benjamin Kramer | a112087 | 2010-12-24 21:17:12 +0000 | [diff] [blame^] | 85 | |
| 86 | // A ConstantArray is splatable if all its members are equal and also |
| 87 | // splatable. |
| 88 | if (ConstantArray *CA = dyn_cast<ConstantArray>(V)) { |
| 89 | if (CA->getNumOperands() == 0) |
| 90 | return 0; |
| 91 | |
| 92 | Value *Val = isBytewiseValue(CA->getOperand(0)); |
| 93 | if (!Val) |
| 94 | return 0; |
| 95 | |
| 96 | for (unsigned I = 1, E = CA->getNumOperands(); I != E; ++I) |
| 97 | if (CA->getOperand(I-1) != CA->getOperand(I)) |
| 98 | return 0; |
| 99 | |
| 100 | return Val; |
| 101 | } |
| 102 | |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 103 | // Conceptually, we could handle things like: |
| 104 | // %a = zext i8 %X to i16 |
| 105 | // %b = shl i16 %a, 8 |
| 106 | // %c = or i16 %a, %b |
| 107 | // but until there is an example that actually needs this, it doesn't seem |
| 108 | // worth worrying about. |
| 109 | return 0; |
| 110 | } |
| 111 | |
| 112 | static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx, |
| 113 | bool &VariableIdxFound, TargetData &TD) { |
| 114 | // Skip over the first indices. |
| 115 | gep_type_iterator GTI = gep_type_begin(GEP); |
| 116 | for (unsigned i = 1; i != Idx; ++i, ++GTI) |
| 117 | /*skip along*/; |
| 118 | |
| 119 | // Compute the offset implied by the rest of the indices. |
| 120 | int64_t Offset = 0; |
| 121 | for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { |
| 122 | ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); |
| 123 | if (OpC == 0) |
| 124 | return VariableIdxFound = true; |
| 125 | if (OpC->isZero()) continue; // No offset. |
| 126 | |
| 127 | // Handle struct indices, which add their field offset to the pointer. |
| 128 | if (const StructType *STy = dyn_cast<StructType>(*GTI)) { |
| 129 | Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); |
| 130 | continue; |
| 131 | } |
| 132 | |
| 133 | // Otherwise, we have a sequential type like an array or vector. Multiply |
| 134 | // the index by the ElementSize. |
Duncan Sands | 777d230 | 2009-05-09 07:06:46 +0000 | [diff] [blame] | 135 | uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 136 | Offset += Size*OpC->getSExtValue(); |
| 137 | } |
| 138 | |
| 139 | return Offset; |
| 140 | } |
| 141 | |
| 142 | /// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a |
| 143 | /// constant offset, and return that constant offset. For example, Ptr1 might |
| 144 | /// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8. |
| 145 | static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, |
| 146 | TargetData &TD) { |
| 147 | // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical |
| 148 | // base. After that base, they may have some number of common (and |
| 149 | // potentially variable) indices. After that they handle some constant |
| 150 | // offset, which determines their offset from each other. At this point, we |
| 151 | // handle no other case. |
| 152 | GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1); |
| 153 | GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2); |
| 154 | if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) |
| 155 | return false; |
| 156 | |
| 157 | // Skip any common indices and track the GEP types. |
| 158 | unsigned Idx = 1; |
| 159 | for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) |
| 160 | if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) |
| 161 | break; |
| 162 | |
| 163 | bool VariableIdxFound = false; |
| 164 | int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD); |
| 165 | int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD); |
| 166 | if (VariableIdxFound) return false; |
| 167 | |
| 168 | Offset = Offset2-Offset1; |
| 169 | return true; |
| 170 | } |
| 171 | |
| 172 | |
| 173 | /// MemsetRange - Represents a range of memset'd bytes with the ByteVal value. |
| 174 | /// This allows us to analyze stores like: |
| 175 | /// store 0 -> P+1 |
| 176 | /// store 0 -> P+0 |
| 177 | /// store 0 -> P+3 |
| 178 | /// store 0 -> P+2 |
| 179 | /// which sometimes happens with stores to arrays of structs etc. When we see |
| 180 | /// the first store, we make a range [1, 2). The second store extends the range |
| 181 | /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the |
| 182 | /// two ranges into [0, 3) which is memset'able. |
| 183 | namespace { |
| 184 | struct MemsetRange { |
| 185 | // Start/End - A semi range that describes the span that this range covers. |
| 186 | // The range is closed at the start and open at the end: [Start, End). |
| 187 | int64_t Start, End; |
| 188 | |
| 189 | /// StartPtr - The getelementptr instruction that points to the start of the |
| 190 | /// range. |
| 191 | Value *StartPtr; |
| 192 | |
| 193 | /// Alignment - The known alignment of the first store. |
| 194 | unsigned Alignment; |
| 195 | |
| 196 | /// TheStores - The actual stores that make up this range. |
| 197 | SmallVector<StoreInst*, 16> TheStores; |
| 198 | |
| 199 | bool isProfitableToUseMemset(const TargetData &TD) const; |
| 200 | |
| 201 | }; |
| 202 | } // end anon namespace |
| 203 | |
| 204 | bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const { |
| 205 | // If we found more than 8 stores to merge or 64 bytes, use memset. |
| 206 | if (TheStores.size() >= 8 || End-Start >= 64) return true; |
| 207 | |
| 208 | // Assume that the code generator is capable of merging pairs of stores |
| 209 | // together if it wants to. |
| 210 | if (TheStores.size() <= 2) return false; |
| 211 | |
| 212 | // If we have fewer than 8 stores, it can still be worthwhile to do this. |
| 213 | // For example, merging 4 i8 stores into an i32 store is useful almost always. |
| 214 | // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the |
| 215 | // memset will be split into 2 32-bit stores anyway) and doing so can |
| 216 | // pessimize the llvm optimizer. |
| 217 | // |
| 218 | // Since we don't have perfect knowledge here, make some assumptions: assume |
| 219 | // the maximum GPR width is the same size as the pointer size and assume that |
| 220 | // this width can be stored. If so, check to see whether we will end up |
| 221 | // actually reducing the number of stores used. |
| 222 | unsigned Bytes = unsigned(End-Start); |
| 223 | unsigned NumPointerStores = Bytes/TD.getPointerSize(); |
| 224 | |
| 225 | // Assume the remaining bytes if any are done a byte at a time. |
| 226 | unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(); |
| 227 | |
| 228 | // If we will reduce the # stores (according to this heuristic), do the |
| 229 | // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 |
| 230 | // etc. |
| 231 | return TheStores.size() > NumPointerStores+NumByteStores; |
| 232 | } |
| 233 | |
| 234 | |
| 235 | namespace { |
| 236 | class MemsetRanges { |
| 237 | /// Ranges - A sorted list of the memset ranges. We use std::list here |
| 238 | /// because each element is relatively large and expensive to copy. |
| 239 | std::list<MemsetRange> Ranges; |
| 240 | typedef std::list<MemsetRange>::iterator range_iterator; |
| 241 | TargetData &TD; |
| 242 | public: |
| 243 | MemsetRanges(TargetData &td) : TD(td) {} |
| 244 | |
| 245 | typedef std::list<MemsetRange>::const_iterator const_iterator; |
| 246 | const_iterator begin() const { return Ranges.begin(); } |
| 247 | const_iterator end() const { return Ranges.end(); } |
| 248 | bool empty() const { return Ranges.empty(); } |
| 249 | |
| 250 | void addStore(int64_t OffsetFromFirst, StoreInst *SI); |
| 251 | }; |
| 252 | |
| 253 | } // end anon namespace |
| 254 | |
| 255 | |
| 256 | /// addStore - Add a new store to the MemsetRanges data structure. This adds a |
| 257 | /// new range for the specified store at the specified offset, merging into |
| 258 | /// existing ranges as appropriate. |
| 259 | void MemsetRanges::addStore(int64_t Start, StoreInst *SI) { |
| 260 | int64_t End = Start+TD.getTypeStoreSize(SI->getOperand(0)->getType()); |
| 261 | |
| 262 | // Do a linear search of the ranges to see if this can be joined and/or to |
| 263 | // find the insertion point in the list. We keep the ranges sorted for |
| 264 | // simplicity here. This is a linear search of a linked list, which is ugly, |
| 265 | // however the number of ranges is limited, so this won't get crazy slow. |
| 266 | range_iterator I = Ranges.begin(), E = Ranges.end(); |
| 267 | |
| 268 | while (I != E && Start > I->End) |
| 269 | ++I; |
| 270 | |
| 271 | // We now know that I == E, in which case we didn't find anything to merge |
| 272 | // with, or that Start <= I->End. If End < I->Start or I == E, then we need |
| 273 | // to insert a new range. Handle this now. |
| 274 | if (I == E || End < I->Start) { |
| 275 | MemsetRange &R = *Ranges.insert(I, MemsetRange()); |
| 276 | R.Start = Start; |
| 277 | R.End = End; |
| 278 | R.StartPtr = SI->getPointerOperand(); |
| 279 | R.Alignment = SI->getAlignment(); |
| 280 | R.TheStores.push_back(SI); |
| 281 | return; |
| 282 | } |
| 283 | |
| 284 | // This store overlaps with I, add it. |
| 285 | I->TheStores.push_back(SI); |
| 286 | |
| 287 | // At this point, we may have an interval that completely contains our store. |
| 288 | // If so, just add it to the interval and return. |
| 289 | if (I->Start <= Start && I->End >= End) |
| 290 | return; |
| 291 | |
| 292 | // Now we know that Start <= I->End and End >= I->Start so the range overlaps |
| 293 | // but is not entirely contained within the range. |
| 294 | |
| 295 | // See if the range extends the start of the range. In this case, it couldn't |
| 296 | // possibly cause it to join the prior range, because otherwise we would have |
| 297 | // stopped on *it*. |
| 298 | if (Start < I->Start) { |
| 299 | I->Start = Start; |
| 300 | I->StartPtr = SI->getPointerOperand(); |
Dan Gohman | 264d245 | 2009-09-14 23:39:10 +0000 | [diff] [blame] | 301 | I->Alignment = SI->getAlignment(); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 302 | } |
| 303 | |
| 304 | // Now we know that Start <= I->End and Start >= I->Start (so the startpoint |
| 305 | // is in or right at the end of I), and that End >= I->Start. Extend I out to |
| 306 | // End. |
| 307 | if (End > I->End) { |
| 308 | I->End = End; |
Nick Lewycky | 9c0f146 | 2009-03-19 05:51:39 +0000 | [diff] [blame] | 309 | range_iterator NextI = I; |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 310 | while (++NextI != E && End >= NextI->Start) { |
| 311 | // Merge the range in. |
| 312 | I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); |
| 313 | if (NextI->End > I->End) |
| 314 | I->End = NextI->End; |
| 315 | Ranges.erase(NextI); |
| 316 | NextI = I; |
| 317 | } |
| 318 | } |
| 319 | } |
| 320 | |
| 321 | //===----------------------------------------------------------------------===// |
| 322 | // MemCpyOpt Pass |
| 323 | //===----------------------------------------------------------------------===// |
| 324 | |
| 325 | namespace { |
Chris Lattner | 3e8b663 | 2009-09-02 06:11:42 +0000 | [diff] [blame] | 326 | class MemCpyOpt : public FunctionPass { |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 327 | MemoryDependenceAnalysis *MD; |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 328 | bool runOnFunction(Function &F); |
| 329 | public: |
| 330 | static char ID; // Pass identification, replacement for typeid |
Owen Anderson | 081c34b | 2010-10-19 17:21:58 +0000 | [diff] [blame] | 331 | MemCpyOpt() : FunctionPass(ID) { |
| 332 | initializeMemCpyOptPass(*PassRegistry::getPassRegistry()); |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 333 | MD = 0; |
Owen Anderson | 081c34b | 2010-10-19 17:21:58 +0000 | [diff] [blame] | 334 | } |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 335 | |
| 336 | private: |
| 337 | // This transformation requires dominator postdominator info |
| 338 | virtual void getAnalysisUsage(AnalysisUsage &AU) const { |
| 339 | AU.setPreservesCFG(); |
| 340 | AU.addRequired<DominatorTree>(); |
| 341 | AU.addRequired<MemoryDependenceAnalysis>(); |
| 342 | AU.addRequired<AliasAnalysis>(); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 343 | AU.addPreserved<AliasAnalysis>(); |
| 344 | AU.addPreserved<MemoryDependenceAnalysis>(); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 345 | } |
| 346 | |
| 347 | // Helper fuctions |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 348 | bool processStore(StoreInst *SI, BasicBlock::iterator &BBI); |
| 349 | bool processMemCpy(MemCpyInst *M); |
Chris Lattner | f41eaac | 2009-09-01 17:56:32 +0000 | [diff] [blame] | 350 | bool processMemMove(MemMoveInst *M); |
Owen Anderson | 6549121 | 2010-10-15 22:52:12 +0000 | [diff] [blame] | 351 | bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc, |
| 352 | uint64_t cpyLen, CallInst *C); |
Chris Lattner | 43f8e43 | 2010-11-18 07:02:37 +0000 | [diff] [blame] | 353 | bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, |
| 354 | uint64_t MSize); |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 355 | bool processByValArgument(CallSite CS, unsigned ArgNo); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 356 | bool iterateOnFunction(Function &F); |
| 357 | }; |
| 358 | |
| 359 | char MemCpyOpt::ID = 0; |
| 360 | } |
| 361 | |
| 362 | // createMemCpyOptPass - The public interface to this file... |
| 363 | FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); } |
| 364 | |
Owen Anderson | 2ab36d3 | 2010-10-12 19:48:12 +0000 | [diff] [blame] | 365 | INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization", |
| 366 | false, false) |
| 367 | INITIALIZE_PASS_DEPENDENCY(DominatorTree) |
| 368 | INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) |
| 369 | INITIALIZE_AG_DEPENDENCY(AliasAnalysis) |
| 370 | INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization", |
| 371 | false, false) |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 372 | |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 373 | /// processStore - When GVN is scanning forward over instructions, we look for |
| 374 | /// some other patterns to fold away. In particular, this looks for stores to |
| 375 | /// neighboring locations of memory. If it sees enough consequtive ones |
| 376 | /// (currently 4) it attempts to merge them together into a memcpy/memset. |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 377 | bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 378 | if (SI->isVolatile()) return false; |
| 379 | |
Owen Anderson | 6549121 | 2010-10-15 22:52:12 +0000 | [diff] [blame] | 380 | TargetData *TD = getAnalysisIfAvailable<TargetData>(); |
| 381 | if (!TD) return false; |
| 382 | |
| 383 | // Detect cases where we're performing call slot forwarding, but |
| 384 | // happen to be using a load-store pair to implement it, rather than |
| 385 | // a memcpy. |
| 386 | if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) { |
| 387 | if (!LI->isVolatile() && LI->hasOneUse()) { |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 388 | MemDepResult dep = MD->getDependency(LI); |
Owen Anderson | 6549121 | 2010-10-15 22:52:12 +0000 | [diff] [blame] | 389 | CallInst *C = 0; |
| 390 | if (dep.isClobber() && !isa<MemCpyInst>(dep.getInst())) |
| 391 | C = dyn_cast<CallInst>(dep.getInst()); |
| 392 | |
| 393 | if (C) { |
| 394 | bool changed = performCallSlotOptzn(LI, |
| 395 | SI->getPointerOperand()->stripPointerCasts(), |
| 396 | LI->getPointerOperand()->stripPointerCasts(), |
| 397 | TD->getTypeStoreSize(SI->getOperand(0)->getType()), C); |
| 398 | if (changed) { |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 399 | MD->removeInstruction(SI); |
Owen Anderson | 6549121 | 2010-10-15 22:52:12 +0000 | [diff] [blame] | 400 | SI->eraseFromParent(); |
| 401 | LI->eraseFromParent(); |
| 402 | ++NumMemCpyInstr; |
| 403 | return true; |
| 404 | } |
| 405 | } |
| 406 | } |
| 407 | } |
| 408 | |
Chris Lattner | ff1e98c | 2009-09-08 00:27:14 +0000 | [diff] [blame] | 409 | LLVMContext &Context = SI->getContext(); |
| 410 | |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 411 | // There are two cases that are interesting for this code to handle: memcpy |
| 412 | // and memset. Right now we only handle memset. |
| 413 | |
| 414 | // Ensure that the value being stored is something that can be memset'able a |
| 415 | // byte at a time like "0" or "-1" or any width, as well as things like |
| 416 | // 0xA0A0A0A0 and 0.0. |
Chris Lattner | cf0fe8d | 2009-10-05 05:54:46 +0000 | [diff] [blame] | 417 | Value *ByteVal = isBytewiseValue(SI->getOperand(0)); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 418 | if (!ByteVal) |
| 419 | return false; |
| 420 | |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 421 | AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); |
Dan Gohman | a195b7f | 2009-07-28 00:37:06 +0000 | [diff] [blame] | 422 | Module *M = SI->getParent()->getParent()->getParent(); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 423 | |
| 424 | // Okay, so we now have a single store that can be splatable. Scan to find |
| 425 | // all subsequent stores of the same value to offset from the same pointer. |
| 426 | // Join these together into ranges, so we can decide whether contiguous blocks |
| 427 | // are stored. |
Dan Gohman | 8942f9bb | 2009-08-18 01:17:52 +0000 | [diff] [blame] | 428 | MemsetRanges Ranges(*TD); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 429 | |
| 430 | Value *StartPtr = SI->getPointerOperand(); |
| 431 | |
| 432 | BasicBlock::iterator BI = SI; |
| 433 | for (++BI; !isa<TerminatorInst>(BI); ++BI) { |
| 434 | if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) { |
| 435 | // If the call is readnone, ignore it, otherwise bail out. We don't even |
| 436 | // allow readonly here because we don't want something like: |
| 437 | // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). |
Gabor Greif | a292b2f | 2010-07-27 16:44:23 +0000 | [diff] [blame] | 438 | if (AA.getModRefBehavior(CallSite(BI)) == |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 439 | AliasAnalysis::DoesNotAccessMemory) |
| 440 | continue; |
| 441 | |
| 442 | // TODO: If this is a memset, try to join it in. |
| 443 | |
| 444 | break; |
| 445 | } else if (isa<VAArgInst>(BI) || isa<LoadInst>(BI)) |
| 446 | break; |
| 447 | |
| 448 | // If this is a non-store instruction it is fine, ignore it. |
| 449 | StoreInst *NextStore = dyn_cast<StoreInst>(BI); |
| 450 | if (NextStore == 0) continue; |
| 451 | |
| 452 | // If this is a store, see if we can merge it in. |
| 453 | if (NextStore->isVolatile()) break; |
| 454 | |
| 455 | // Check to see if this stored value is of the same byte-splattable value. |
Chris Lattner | cf0fe8d | 2009-10-05 05:54:46 +0000 | [diff] [blame] | 456 | if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 457 | break; |
| 458 | |
| 459 | // Check to see if this store is to a constant offset from the start ptr. |
| 460 | int64_t Offset; |
Dan Gohman | 8942f9bb | 2009-08-18 01:17:52 +0000 | [diff] [blame] | 461 | if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, *TD)) |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 462 | break; |
| 463 | |
| 464 | Ranges.addStore(Offset, NextStore); |
| 465 | } |
| 466 | |
| 467 | // If we have no ranges, then we just had a single store with nothing that |
| 468 | // could be merged in. This is a very common case of course. |
| 469 | if (Ranges.empty()) |
| 470 | return false; |
| 471 | |
| 472 | // If we had at least one store that could be merged in, add the starting |
| 473 | // store as well. We try to avoid this unless there is at least something |
| 474 | // interesting as a small compile-time optimization. |
| 475 | Ranges.addStore(0, SI); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 476 | |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 477 | |
| 478 | // Now that we have full information about ranges, loop over the ranges and |
| 479 | // emit memset's for anything big enough to be worthwhile. |
| 480 | bool MadeChange = false; |
| 481 | for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end(); |
| 482 | I != E; ++I) { |
| 483 | const MemsetRange &Range = *I; |
| 484 | |
| 485 | if (Range.TheStores.size() == 1) continue; |
| 486 | |
| 487 | // If it is profitable to lower this range to memset, do so now. |
Dan Gohman | 8942f9bb | 2009-08-18 01:17:52 +0000 | [diff] [blame] | 488 | if (!Range.isProfitableToUseMemset(*TD)) |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 489 | continue; |
| 490 | |
| 491 | // Otherwise, we do want to transform this! Create a new memset. We put |
| 492 | // the memset right before the first instruction that isn't part of this |
| 493 | // memset block. This ensure that the memset is dominated by any addressing |
| 494 | // instruction needed by the start of the block. |
| 495 | BasicBlock::iterator InsertPt = BI; |
Mon P Wang | 20adc9d | 2010-04-04 03:10:48 +0000 | [diff] [blame] | 496 | |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 497 | // Get the starting pointer of the block. |
| 498 | StartPtr = Range.StartPtr; |
Mon P Wang | 20adc9d | 2010-04-04 03:10:48 +0000 | [diff] [blame] | 499 | |
| 500 | // Determine alignment |
| 501 | unsigned Alignment = Range.Alignment; |
| 502 | if (Alignment == 0) { |
| 503 | const Type *EltType = |
| 504 | cast<PointerType>(StartPtr->getType())->getElementType(); |
| 505 | Alignment = TD->getABITypeAlignment(EltType); |
| 506 | } |
| 507 | |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 508 | // Cast the start ptr to be i8* as memset requires. |
Mon P Wang | 20adc9d | 2010-04-04 03:10:48 +0000 | [diff] [blame] | 509 | const PointerType* StartPTy = cast<PointerType>(StartPtr->getType()); |
| 510 | const PointerType *i8Ptr = Type::getInt8PtrTy(Context, |
| 511 | StartPTy->getAddressSpace()); |
| 512 | if (StartPTy!= i8Ptr) |
Daniel Dunbar | 460f656 | 2009-07-26 09:48:23 +0000 | [diff] [blame] | 513 | StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getName(), |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 514 | InsertPt); |
Mon P Wang | 20adc9d | 2010-04-04 03:10:48 +0000 | [diff] [blame] | 515 | |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 516 | Value *Ops[] = { |
| 517 | StartPtr, ByteVal, // Start, value |
Owen Anderson | e922c02 | 2009-07-22 00:24:57 +0000 | [diff] [blame] | 518 | // size |
Chris Lattner | ff1e98c | 2009-09-08 00:27:14 +0000 | [diff] [blame] | 519 | ConstantInt::get(Type::getInt64Ty(Context), Range.End-Range.Start), |
Owen Anderson | e922c02 | 2009-07-22 00:24:57 +0000 | [diff] [blame] | 520 | // align |
Mon P Wang | 20adc9d | 2010-04-04 03:10:48 +0000 | [diff] [blame] | 521 | ConstantInt::get(Type::getInt32Ty(Context), Alignment), |
| 522 | // volatile |
Benjamin Kramer | f601d6d | 2010-11-20 18:43:35 +0000 | [diff] [blame] | 523 | ConstantInt::getFalse(Context), |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 524 | }; |
Mon P Wang | 20adc9d | 2010-04-04 03:10:48 +0000 | [diff] [blame] | 525 | const Type *Tys[] = { Ops[0]->getType(), Ops[2]->getType() }; |
| 526 | |
| 527 | Function *MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2); |
| 528 | |
| 529 | Value *C = CallInst::Create(MemSetF, Ops, Ops+5, "", InsertPt); |
David Greene | cb33fd1 | 2010-01-05 01:27:47 +0000 | [diff] [blame] | 530 | DEBUG(dbgs() << "Replace stores:\n"; |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 531 | for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i) |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 532 | dbgs() << *Range.TheStores[i] << '\n'; |
Jeffrey Yasskin | 8e68c38 | 2010-12-23 00:58:24 +0000 | [diff] [blame] | 533 | dbgs() << "With: " << *C << '\n'); (void)C; |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 534 | |
Owen Anderson | a8bd658 | 2008-04-21 07:45:10 +0000 | [diff] [blame] | 535 | // Don't invalidate the iterator |
| 536 | BBI = BI; |
| 537 | |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 538 | // Zap all the stores. |
Chris Lattner | ff1e98c | 2009-09-08 00:27:14 +0000 | [diff] [blame] | 539 | for (SmallVector<StoreInst*, 16>::const_iterator |
| 540 | SI = Range.TheStores.begin(), |
Owen Anderson | a8bd658 | 2008-04-21 07:45:10 +0000 | [diff] [blame] | 541 | SE = Range.TheStores.end(); SI != SE; ++SI) |
| 542 | (*SI)->eraseFromParent(); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 543 | ++NumMemSetInfer; |
| 544 | MadeChange = true; |
| 545 | } |
| 546 | |
| 547 | return MadeChange; |
| 548 | } |
| 549 | |
| 550 | |
| 551 | /// performCallSlotOptzn - takes a memcpy and a call that it depends on, |
| 552 | /// and checks for the possibility of a call slot optimization by having |
| 553 | /// the call write its result directly into the destination of the memcpy. |
Owen Anderson | 6549121 | 2010-10-15 22:52:12 +0000 | [diff] [blame] | 554 | bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, |
| 555 | Value *cpyDest, Value *cpySrc, |
| 556 | uint64_t cpyLen, CallInst *C) { |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 557 | // The general transformation to keep in mind is |
| 558 | // |
| 559 | // call @func(..., src, ...) |
| 560 | // memcpy(dest, src, ...) |
| 561 | // |
| 562 | // -> |
| 563 | // |
| 564 | // memcpy(dest, src, ...) |
| 565 | // call @func(..., dest, ...) |
| 566 | // |
| 567 | // Since moving the memcpy is technically awkward, we additionally check that |
| 568 | // src only holds uninitialized values at the moment of the call, meaning that |
| 569 | // the memcpy can be discarded rather than moved. |
| 570 | |
| 571 | // Deliberately get the source and destination with bitcasts stripped away, |
| 572 | // because we'll need to do type comparisons based on the underlying type. |
Gabor Greif | 7d3056b | 2010-07-28 22:50:26 +0000 | [diff] [blame] | 573 | CallSite CS(C); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 574 | |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 575 | // Require that src be an alloca. This simplifies the reasoning considerably. |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 576 | AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 577 | if (!srcAlloca) |
| 578 | return false; |
| 579 | |
| 580 | // Check that all of src is copied to dest. |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 581 | TargetData *TD = getAnalysisIfAvailable<TargetData>(); |
Dan Gohman | 8942f9bb | 2009-08-18 01:17:52 +0000 | [diff] [blame] | 582 | if (!TD) return false; |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 583 | |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 584 | ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 585 | if (!srcArraySize) |
| 586 | return false; |
| 587 | |
Dan Gohman | 8942f9bb | 2009-08-18 01:17:52 +0000 | [diff] [blame] | 588 | uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) * |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 589 | srcArraySize->getZExtValue(); |
| 590 | |
Owen Anderson | 6549121 | 2010-10-15 22:52:12 +0000 | [diff] [blame] | 591 | if (cpyLen < srcSize) |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 592 | return false; |
| 593 | |
| 594 | // Check that accessing the first srcSize bytes of dest will not cause a |
| 595 | // trap. Otherwise the transform is invalid since it might cause a trap |
| 596 | // to occur earlier than it otherwise would. |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 597 | if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 598 | // The destination is an alloca. Check it is larger than srcSize. |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 599 | ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 600 | if (!destArraySize) |
| 601 | return false; |
| 602 | |
Dan Gohman | 8942f9bb | 2009-08-18 01:17:52 +0000 | [diff] [blame] | 603 | uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) * |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 604 | destArraySize->getZExtValue(); |
| 605 | |
| 606 | if (destSize < srcSize) |
| 607 | return false; |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 608 | } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 609 | // If the destination is an sret parameter then only accesses that are |
| 610 | // outside of the returned struct type can trap. |
| 611 | if (!A->hasStructRetAttr()) |
| 612 | return false; |
| 613 | |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 614 | const Type *StructTy = cast<PointerType>(A->getType())->getElementType(); |
Dan Gohman | 8942f9bb | 2009-08-18 01:17:52 +0000 | [diff] [blame] | 615 | uint64_t destSize = TD->getTypeAllocSize(StructTy); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 616 | |
| 617 | if (destSize < srcSize) |
| 618 | return false; |
| 619 | } else { |
| 620 | return false; |
| 621 | } |
| 622 | |
| 623 | // Check that src is not accessed except via the call and the memcpy. This |
| 624 | // guarantees that it holds only undefined values when passed in (so the final |
| 625 | // memcpy can be dropped), that it is not read or written between the call and |
| 626 | // the memcpy, and that writing beyond the end of it is undefined. |
| 627 | SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(), |
| 628 | srcAlloca->use_end()); |
| 629 | while (!srcUseList.empty()) { |
Dan Gohman | 321a813 | 2010-01-05 16:27:25 +0000 | [diff] [blame] | 630 | User *UI = srcUseList.pop_back_val(); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 631 | |
Owen Anderson | 009e4f7 | 2008-06-01 22:26:26 +0000 | [diff] [blame] | 632 | if (isa<BitCastInst>(UI)) { |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 633 | for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); |
| 634 | I != E; ++I) |
| 635 | srcUseList.push_back(*I); |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 636 | } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) { |
Owen Anderson | 009e4f7 | 2008-06-01 22:26:26 +0000 | [diff] [blame] | 637 | if (G->hasAllZeroIndices()) |
| 638 | for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); |
| 639 | I != E; ++I) |
| 640 | srcUseList.push_back(*I); |
| 641 | else |
| 642 | return false; |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 643 | } else if (UI != C && UI != cpy) { |
| 644 | return false; |
| 645 | } |
| 646 | } |
| 647 | |
| 648 | // Since we're changing the parameter to the callsite, we need to make sure |
| 649 | // that what would be the new parameter dominates the callsite. |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 650 | DominatorTree &DT = getAnalysis<DominatorTree>(); |
| 651 | if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 652 | if (!DT.dominates(cpyDestInst, C)) |
| 653 | return false; |
| 654 | |
| 655 | // In addition to knowing that the call does not access src in some |
| 656 | // unexpected manner, for example via a global, which we deduce from |
| 657 | // the use analysis, we also need to know that it does not sneakily |
| 658 | // access dest. We rely on AA to figure this out for us. |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 659 | AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); |
Owen Anderson | 6549121 | 2010-10-15 22:52:12 +0000 | [diff] [blame] | 660 | if (AA.getModRefInfo(C, cpyDest, srcSize) != |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 661 | AliasAnalysis::NoModRef) |
| 662 | return false; |
| 663 | |
| 664 | // All the checks have passed, so do the transformation. |
Owen Anderson | 12cb36c | 2008-06-01 21:52:16 +0000 | [diff] [blame] | 665 | bool changedArgument = false; |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 666 | for (unsigned i = 0; i < CS.arg_size(); ++i) |
Owen Anderson | 009e4f7 | 2008-06-01 22:26:26 +0000 | [diff] [blame] | 667 | if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 668 | if (cpySrc->getType() != cpyDest->getType()) |
Gabor Greif | 7cbd8a3 | 2008-05-16 19:29:10 +0000 | [diff] [blame] | 669 | cpyDest = CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 670 | cpyDest->getName(), C); |
Owen Anderson | 12cb36c | 2008-06-01 21:52:16 +0000 | [diff] [blame] | 671 | changedArgument = true; |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 672 | if (CS.getArgument(i)->getType() == cpyDest->getType()) |
Owen Anderson | 009e4f7 | 2008-06-01 22:26:26 +0000 | [diff] [blame] | 673 | CS.setArgument(i, cpyDest); |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 674 | else |
| 675 | CS.setArgument(i, CastInst::CreatePointerCast(cpyDest, |
| 676 | CS.getArgument(i)->getType(), cpyDest->getName(), C)); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 677 | } |
| 678 | |
Owen Anderson | 12cb36c | 2008-06-01 21:52:16 +0000 | [diff] [blame] | 679 | if (!changedArgument) |
| 680 | return false; |
| 681 | |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 682 | // Drop any cached information about the call, because we may have changed |
| 683 | // its dependence information by changing its parameter. |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 684 | MD->removeInstruction(C); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 685 | |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 686 | // Remove the memcpy. |
| 687 | MD->removeInstruction(cpy); |
Dan Gohman | fe60104 | 2010-06-22 15:08:57 +0000 | [diff] [blame] | 688 | ++NumMemCpyInstr; |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 689 | |
| 690 | return true; |
| 691 | } |
| 692 | |
Chris Lattner | 43f8e43 | 2010-11-18 07:02:37 +0000 | [diff] [blame] | 693 | /// processMemCpyMemCpyDependence - We've found that the (upward scanning) |
| 694 | /// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to |
| 695 | /// copy from MDep's input if we can. MSize is the size of M's copy. |
| 696 | /// |
| 697 | bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, |
| 698 | uint64_t MSize) { |
| 699 | // We can only transforms memcpy's where the dest of one is the source of the |
| 700 | // other. |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 701 | if (M->getSource() != MDep->getDest() || MDep->isVolatile()) |
Chris Lattner | 43f8e43 | 2010-11-18 07:02:37 +0000 | [diff] [blame] | 702 | return false; |
| 703 | |
Chris Lattner | f7f3546 | 2010-12-09 07:39:50 +0000 | [diff] [blame] | 704 | // If dep instruction is reading from our current input, then it is a noop |
| 705 | // transfer and substituting the input won't change this instruction. Just |
| 706 | // ignore the input and let someone else zap MDep. This handles cases like: |
| 707 | // memcpy(a <- a) |
| 708 | // memcpy(b <- a) |
| 709 | if (M->getSource() == MDep->getSource()) |
| 710 | return false; |
| 711 | |
Chris Lattner | 43f8e43 | 2010-11-18 07:02:37 +0000 | [diff] [blame] | 712 | // Second, the length of the memcpy's must be the same, or the preceeding one |
| 713 | // must be larger than the following one. |
| 714 | ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); |
| 715 | if (!C1) return false; |
| 716 | |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 717 | AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); |
Chris Lattner | 604f6fe | 2010-11-21 08:06:10 +0000 | [diff] [blame] | 718 | |
| 719 | // Verify that the copied-from memory doesn't change in between the two |
| 720 | // transfers. For example, in: |
| 721 | // memcpy(a <- b) |
| 722 | // *b = 42; |
| 723 | // memcpy(c <- a) |
| 724 | // It would be invalid to transform the second memcpy into memcpy(c <- b). |
| 725 | // |
| 726 | // TODO: If the code between M and MDep is transparent to the destination "c", |
| 727 | // then we could still perform the xform by moving M up to the first memcpy. |
| 728 | // |
| 729 | // NOTE: This is conservative, it will stop on any read from the source loc, |
| 730 | // not just the defining memcpy. |
| 731 | MemDepResult SourceDep = |
| 732 | MD->getPointerDependencyFrom(AA.getLocationForSource(MDep), |
| 733 | false, M, M->getParent()); |
| 734 | if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) |
| 735 | return false; |
Chris Lattner | 5a7aeaa | 2010-11-18 08:00:57 +0000 | [diff] [blame] | 736 | |
| 737 | // If the dest of the second might alias the source of the first, then the |
| 738 | // source and dest might overlap. We still want to eliminate the intermediate |
| 739 | // value, but we have to generate a memmove instead of memcpy. |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 740 | Intrinsic::ID ResultFn = Intrinsic::memcpy; |
Dan Gohman | 387f28a | 2010-12-16 02:51:19 +0000 | [diff] [blame] | 741 | if (AA.alias(AA.getLocationForDest(M), AA.getLocationForSource(MDep)) != |
| 742 | AliasAnalysis::NoAlias) |
Chris Lattner | 5a7aeaa | 2010-11-18 08:00:57 +0000 | [diff] [blame] | 743 | ResultFn = Intrinsic::memmove; |
Chris Lattner | 43f8e43 | 2010-11-18 07:02:37 +0000 | [diff] [blame] | 744 | |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 745 | // If all checks passed, then we can transform M. |
Chris Lattner | 245b7f6 | 2010-11-18 07:38:43 +0000 | [diff] [blame] | 746 | const Type *ArgTys[3] = { |
| 747 | M->getRawDest()->getType(), |
Chris Lattner | 43f8e43 | 2010-11-18 07:02:37 +0000 | [diff] [blame] | 748 | MDep->getRawSource()->getType(), |
Chris Lattner | 245b7f6 | 2010-11-18 07:38:43 +0000 | [diff] [blame] | 749 | M->getLength()->getType() |
| 750 | }; |
Chris Lattner | 43f8e43 | 2010-11-18 07:02:37 +0000 | [diff] [blame] | 751 | Function *MemCpyFun = |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 752 | Intrinsic::getDeclaration(MDep->getParent()->getParent()->getParent(), |
Chris Lattner | 5a7aeaa | 2010-11-18 08:00:57 +0000 | [diff] [blame] | 753 | ResultFn, ArgTys, 3); |
Chris Lattner | 43f8e43 | 2010-11-18 07:02:37 +0000 | [diff] [blame] | 754 | |
| 755 | // Make sure to use the lesser of the alignment of the source and the dest |
| 756 | // since we're changing where we're reading from, but don't want to increase |
| 757 | // the alignment past what can be read from or written to. |
| 758 | // TODO: Is this worth it if we're creating a less aligned memcpy? For |
| 759 | // example we could be moving from movaps -> movq on x86. |
Chris Lattner | d528be6 | 2010-11-18 08:07:09 +0000 | [diff] [blame] | 760 | unsigned Align = std::min(MDep->getAlignment(), M->getAlignment()); |
Chris Lattner | 43f8e43 | 2010-11-18 07:02:37 +0000 | [diff] [blame] | 761 | Value *Args[5] = { |
Chris Lattner | d528be6 | 2010-11-18 08:07:09 +0000 | [diff] [blame] | 762 | M->getRawDest(), |
| 763 | MDep->getRawSource(), |
| 764 | M->getLength(), |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 765 | ConstantInt::get(Type::getInt32Ty(MemCpyFun->getContext()), Align), |
Chris Lattner | d528be6 | 2010-11-18 08:07:09 +0000 | [diff] [blame] | 766 | M->getVolatileCst() |
Chris Lattner | 43f8e43 | 2010-11-18 07:02:37 +0000 | [diff] [blame] | 767 | }; |
Chris Lattner | 604f6fe | 2010-11-21 08:06:10 +0000 | [diff] [blame] | 768 | CallInst::Create(MemCpyFun, Args, Args+5, "", M); |
Chris Lattner | d528be6 | 2010-11-18 08:07:09 +0000 | [diff] [blame] | 769 | |
Chris Lattner | 604f6fe | 2010-11-21 08:06:10 +0000 | [diff] [blame] | 770 | // Remove the instruction we're replacing. |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 771 | MD->removeInstruction(M); |
Chris Lattner | d528be6 | 2010-11-18 08:07:09 +0000 | [diff] [blame] | 772 | M->eraseFromParent(); |
| 773 | ++NumMemCpyInstr; |
| 774 | return true; |
Chris Lattner | 43f8e43 | 2010-11-18 07:02:37 +0000 | [diff] [blame] | 775 | } |
| 776 | |
| 777 | |
Gabor Greif | 7d3056b | 2010-07-28 22:50:26 +0000 | [diff] [blame] | 778 | /// processMemCpy - perform simplification of memcpy's. If we have memcpy A |
| 779 | /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite |
| 780 | /// B to be a memcpy from X to Z (or potentially a memmove, depending on |
| 781 | /// circumstances). This allows later passes to remove the first memcpy |
| 782 | /// altogether. |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 783 | bool MemCpyOpt::processMemCpy(MemCpyInst *M) { |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 784 | // We can only optimize statically-sized memcpy's that are non-volatile. |
| 785 | ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); |
| 786 | if (CopySize == 0 || M->isVolatile()) return false; |
Owen Anderson | 6549121 | 2010-10-15 22:52:12 +0000 | [diff] [blame] | 787 | |
Chris Lattner | 8fdca6a | 2010-12-09 07:45:45 +0000 | [diff] [blame] | 788 | // If the source and destination of the memcpy are the same, then zap it. |
| 789 | if (M->getSource() == M->getDest()) { |
| 790 | MD->removeInstruction(M); |
| 791 | M->eraseFromParent(); |
| 792 | return false; |
| 793 | } |
Benjamin Kramer | a112087 | 2010-12-24 21:17:12 +0000 | [diff] [blame^] | 794 | |
| 795 | // If copying from a constant, try to turn the memcpy into a memset. |
| 796 | if (Value *ByteVal = isBytewiseValue(M->getSource())) { |
| 797 | Value *Ops[] = { |
| 798 | M->getRawDest(), ByteVal, // Start, value |
| 799 | CopySize, // Size |
| 800 | M->getAlignmentCst(), // Alignment |
| 801 | ConstantInt::getFalse(M->getContext()), // volatile |
| 802 | }; |
| 803 | const Type *Tys[] = { Ops[0]->getType(), Ops[2]->getType() }; |
| 804 | Module *Mod = M->getParent()->getParent()->getParent(); |
| 805 | Function *MemSetF = Intrinsic::getDeclaration(Mod, Intrinsic::memset, Tys, 2); |
| 806 | CallInst::Create(MemSetF, Ops, Ops+5, "", M); |
| 807 | M->eraseFromParent(); |
| 808 | ++NumCpyToSet; |
| 809 | return true; |
| 810 | } |
| 811 | |
Owen Anderson | a8bd658 | 2008-04-21 07:45:10 +0000 | [diff] [blame] | 812 | // The are two possible optimizations we can do for memcpy: |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 813 | // a) memcpy-memcpy xform which exposes redundance for DSE. |
| 814 | // b) call-memcpy xform for return slot optimization. |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 815 | MemDepResult DepInfo = MD->getDependency(M); |
| 816 | if (!DepInfo.isClobber()) |
Owen Anderson | a8bd658 | 2008-04-21 07:45:10 +0000 | [diff] [blame] | 817 | return false; |
Owen Anderson | a8bd658 | 2008-04-21 07:45:10 +0000 | [diff] [blame] | 818 | |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 819 | if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst())) |
| 820 | return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue()); |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 821 | |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 822 | if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { |
Chris Lattner | 8fdca6a | 2010-12-09 07:45:45 +0000 | [diff] [blame] | 823 | if (performCallSlotOptzn(M, M->getDest(), M->getSource(), |
| 824 | CopySize->getZExtValue(), C)) { |
| 825 | M->eraseFromParent(); |
| 826 | return true; |
| 827 | } |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 828 | } |
Owen Anderson | 02e9988 | 2008-04-29 21:51:00 +0000 | [diff] [blame] | 829 | return false; |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 830 | } |
| 831 | |
Chris Lattner | f41eaac | 2009-09-01 17:56:32 +0000 | [diff] [blame] | 832 | /// processMemMove - Transforms memmove calls to memcpy calls when the src/dst |
| 833 | /// are guaranteed not to alias. |
| 834 | bool MemCpyOpt::processMemMove(MemMoveInst *M) { |
| 835 | AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); |
| 836 | |
Chris Lattner | f41eaac | 2009-09-01 17:56:32 +0000 | [diff] [blame] | 837 | // See if the pointers alias. |
Dan Gohman | 387f28a | 2010-12-16 02:51:19 +0000 | [diff] [blame] | 838 | if (AA.alias(AA.getLocationForDest(M), |
| 839 | AA.getLocationForSource(M)) != |
Chris Lattner | f41eaac | 2009-09-01 17:56:32 +0000 | [diff] [blame] | 840 | AliasAnalysis::NoAlias) |
| 841 | return false; |
| 842 | |
David Greene | cb33fd1 | 2010-01-05 01:27:47 +0000 | [diff] [blame] | 843 | DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n"); |
Chris Lattner | f41eaac | 2009-09-01 17:56:32 +0000 | [diff] [blame] | 844 | |
| 845 | // If not, then we know we can transform this. |
| 846 | Module *Mod = M->getParent()->getParent()->getParent(); |
Mon P Wang | 20adc9d | 2010-04-04 03:10:48 +0000 | [diff] [blame] | 847 | const Type *ArgTys[3] = { M->getRawDest()->getType(), |
| 848 | M->getRawSource()->getType(), |
| 849 | M->getLength()->getType() }; |
Gabor Greif | a399781 | 2010-07-22 10:37:47 +0000 | [diff] [blame] | 850 | M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, |
| 851 | ArgTys, 3)); |
Duncan Sands | 05cd03b | 2009-09-03 13:37:16 +0000 | [diff] [blame] | 852 | |
Chris Lattner | f41eaac | 2009-09-01 17:56:32 +0000 | [diff] [blame] | 853 | // MemDep may have over conservative information about this instruction, just |
| 854 | // conservatively flush it from the cache. |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 855 | MD->removeInstruction(M); |
Duncan Sands | 05cd03b | 2009-09-03 13:37:16 +0000 | [diff] [blame] | 856 | |
| 857 | ++NumMoveToCpy; |
Chris Lattner | f41eaac | 2009-09-01 17:56:32 +0000 | [diff] [blame] | 858 | return true; |
| 859 | } |
| 860 | |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 861 | /// processByValArgument - This is called on every byval argument in call sites. |
| 862 | bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) { |
| 863 | TargetData *TD = getAnalysisIfAvailable<TargetData>(); |
| 864 | if (!TD) return false; |
Chris Lattner | f41eaac | 2009-09-01 17:56:32 +0000 | [diff] [blame] | 865 | |
Chris Lattner | 604f6fe | 2010-11-21 08:06:10 +0000 | [diff] [blame] | 866 | // Find out what feeds this byval argument. |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 867 | Value *ByValArg = CS.getArgument(ArgNo); |
Chris Lattner | b5a3196 | 2010-12-01 01:24:55 +0000 | [diff] [blame] | 868 | const Type *ByValTy =cast<PointerType>(ByValArg->getType())->getElementType(); |
| 869 | uint64_t ByValSize = TD->getTypeAllocSize(ByValTy); |
Chris Lattner | 604f6fe | 2010-11-21 08:06:10 +0000 | [diff] [blame] | 870 | MemDepResult DepInfo = |
| 871 | MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize), |
| 872 | true, CS.getInstruction(), |
| 873 | CS.getInstruction()->getParent()); |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 874 | if (!DepInfo.isClobber()) |
| 875 | return false; |
| 876 | |
| 877 | // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by |
| 878 | // a memcpy, see if we can byval from the source of the memcpy instead of the |
| 879 | // result. |
| 880 | MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); |
| 881 | if (MDep == 0 || MDep->isVolatile() || |
| 882 | ByValArg->stripPointerCasts() != MDep->getDest()) |
| 883 | return false; |
| 884 | |
| 885 | // The length of the memcpy must be larger or equal to the size of the byval. |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 886 | ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); |
Chris Lattner | 604f6fe | 2010-11-21 08:06:10 +0000 | [diff] [blame] | 887 | if (C1 == 0 || C1->getValue().getZExtValue() < ByValSize) |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 888 | return false; |
| 889 | |
| 890 | // Get the alignment of the byval. If it is greater than the memcpy, then we |
| 891 | // can't do the substitution. If the call doesn't specify the alignment, then |
| 892 | // it is some target specific value that we can't know. |
| 893 | unsigned ByValAlign = CS.getParamAlignment(ArgNo+1); |
| 894 | if (ByValAlign == 0 || MDep->getAlignment() < ByValAlign) |
| 895 | return false; |
| 896 | |
| 897 | // Verify that the copied-from memory doesn't change in between the memcpy and |
| 898 | // the byval call. |
| 899 | // memcpy(a <- b) |
| 900 | // *b = 42; |
| 901 | // foo(*a) |
| 902 | // It would be invalid to transform the second memcpy into foo(*b). |
Chris Lattner | 604f6fe | 2010-11-21 08:06:10 +0000 | [diff] [blame] | 903 | // |
| 904 | // NOTE: This is conservative, it will stop on any read from the source loc, |
| 905 | // not just the defining memcpy. |
| 906 | MemDepResult SourceDep = |
| 907 | MD->getPointerDependencyFrom(AliasAnalysis::getLocationForSource(MDep), |
| 908 | false, CS.getInstruction(), MDep->getParent()); |
| 909 | if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) |
| 910 | return false; |
| 911 | |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 912 | Value *TmpCast = MDep->getSource(); |
| 913 | if (MDep->getSource()->getType() != ByValArg->getType()) |
| 914 | TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), |
| 915 | "tmpcast", CS.getInstruction()); |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 916 | |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 917 | DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n" |
| 918 | << " " << *MDep << "\n" |
| 919 | << " " << *CS.getInstruction() << "\n"); |
| 920 | |
| 921 | // Otherwise we're good! Update the byval argument. |
| 922 | CS.setArgument(ArgNo, TmpCast); |
| 923 | ++NumMemCpyInstr; |
| 924 | return true; |
| 925 | } |
| 926 | |
| 927 | /// iterateOnFunction - Executes one iteration of MemCpyOpt. |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 928 | bool MemCpyOpt::iterateOnFunction(Function &F) { |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 929 | bool MadeChange = false; |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 930 | |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 931 | // Walk all instruction in the function. |
Owen Anderson | a8bd658 | 2008-04-21 07:45:10 +0000 | [diff] [blame] | 932 | for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) { |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 933 | for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) { |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 934 | // Avoid invalidating the iterator. |
| 935 | Instruction *I = BI++; |
Owen Anderson | a8bd658 | 2008-04-21 07:45:10 +0000 | [diff] [blame] | 936 | |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 937 | bool RepeatInstruction = false; |
| 938 | |
Owen Anderson | a8bd658 | 2008-04-21 07:45:10 +0000 | [diff] [blame] | 939 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 940 | MadeChange |= processStore(SI, BI); |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 941 | else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) { |
| 942 | RepeatInstruction = processMemCpy(M); |
| 943 | } else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) { |
| 944 | RepeatInstruction = processMemMove(M); |
| 945 | } else if (CallSite CS = (Value*)I) { |
| 946 | for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) |
| 947 | if (CS.paramHasAttr(i+1, Attribute::ByVal)) |
| 948 | MadeChange |= processByValArgument(CS, i); |
| 949 | } |
| 950 | |
| 951 | // Reprocess the instruction if desired. |
| 952 | if (RepeatInstruction) { |
| 953 | --BI; |
| 954 | MadeChange = true; |
Chris Lattner | f41eaac | 2009-09-01 17:56:32 +0000 | [diff] [blame] | 955 | } |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 956 | } |
| 957 | } |
| 958 | |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 959 | return MadeChange; |
Owen Anderson | a723d1e | 2008-04-09 08:23:16 +0000 | [diff] [blame] | 960 | } |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 961 | |
| 962 | // MemCpyOpt::runOnFunction - This is the main transformation entry point for a |
| 963 | // function. |
| 964 | // |
| 965 | bool MemCpyOpt::runOnFunction(Function &F) { |
| 966 | bool MadeChange = false; |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 967 | MD = &getAnalysis<MemoryDependenceAnalysis>(); |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 968 | while (1) { |
| 969 | if (!iterateOnFunction(F)) |
| 970 | break; |
| 971 | MadeChange = true; |
| 972 | } |
| 973 | |
Chris Lattner | 2f5f90a | 2010-11-21 00:28:59 +0000 | [diff] [blame] | 974 | MD = 0; |
Chris Lattner | 61c6ba8 | 2009-09-01 17:09:55 +0000 | [diff] [blame] | 975 | return MadeChange; |
| 976 | } |