Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 1 | //===-- LoopIdiomRecognize.cpp - Loop idiom recognition -------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This pass implements an idiom recognizer that transforms simple loops into a |
| 11 | // non-loop form. In cases that this kicks in, it can be a significant |
| 12 | // performance win. |
| 13 | // |
| 14 | //===----------------------------------------------------------------------===// |
Chris Lattner | bdce572 | 2011-01-02 18:32:09 +0000 | [diff] [blame] | 15 | // |
| 16 | // TODO List: |
| 17 | // |
| 18 | // Future loop memory idioms to recognize: |
| 19 | // memcmp, memmove, strlen, etc. |
| 20 | // Future floating point idioms to recognize in -ffast-math mode: |
| 21 | // fpowi |
| 22 | // Future integer operation idioms to recognize: |
| 23 | // ctpop, ctlz, cttz |
| 24 | // |
| 25 | // Beware that isel's default lowering for ctpop is highly inefficient for |
| 26 | // i64 and larger types when i64 is legal and the value has few bits set. It |
| 27 | // would be good to enhance isel to emit a loop for ctpop in this case. |
| 28 | // |
| 29 | // We should enhance the memset/memcpy recognition to handle multiple stores in |
| 30 | // the loop. This would handle things like: |
| 31 | // void foo(_Complex float *P) |
| 32 | // for (i) { __real__(*P) = 0; __imag__(*P) = 0; } |
Chris Lattner | 91139cc | 2011-01-02 23:19:45 +0000 | [diff] [blame] | 33 | // |
Chris Lattner | d957c71 | 2011-01-03 01:10:08 +0000 | [diff] [blame] | 34 | // This could recognize common matrix multiplies and dot product idioms and |
Chris Lattner | 91139cc | 2011-01-02 23:19:45 +0000 | [diff] [blame] | 35 | // replace them with calls to BLAS (if linked in??). |
| 36 | // |
Chris Lattner | bdce572 | 2011-01-02 18:32:09 +0000 | [diff] [blame] | 37 | //===----------------------------------------------------------------------===// |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 38 | |
| 39 | #define DEBUG_TYPE "loop-idiom" |
| 40 | #include "llvm/Transforms/Scalar.h" |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 41 | #include "llvm/IntrinsicInst.h" |
Chris Lattner | 2e12f1a | 2010-12-27 18:39:08 +0000 | [diff] [blame] | 42 | #include "llvm/Analysis/AliasAnalysis.h" |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 43 | #include "llvm/Analysis/LoopPass.h" |
| 44 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 45 | #include "llvm/Analysis/ScalarEvolutionExpander.h" |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 46 | #include "llvm/Analysis/ValueTracking.h" |
| 47 | #include "llvm/Target/TargetData.h" |
Chris Lattner | c19175c | 2011-02-18 22:22:15 +0000 | [diff] [blame^] | 48 | #include "llvm/Target/TargetLibraryInfo.h" |
Chris Lattner | 9f39188 | 2010-12-27 00:03:23 +0000 | [diff] [blame] | 49 | #include "llvm/Transforms/Utils/Local.h" |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 50 | #include "llvm/Support/Debug.h" |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 51 | #include "llvm/Support/IRBuilder.h" |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 52 | #include "llvm/Support/raw_ostream.h" |
Chris Lattner | 4ce31fb | 2011-01-02 07:36:44 +0000 | [diff] [blame] | 53 | #include "llvm/ADT/Statistic.h" |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 54 | using namespace llvm; |
| 55 | |
Chris Lattner | 4ce31fb | 2011-01-02 07:36:44 +0000 | [diff] [blame] | 56 | STATISTIC(NumMemSet, "Number of memset's formed from loop stores"); |
| 57 | STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores"); |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 58 | |
| 59 | namespace { |
| 60 | class LoopIdiomRecognize : public LoopPass { |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 61 | Loop *CurLoop; |
| 62 | const TargetData *TD; |
Chris Lattner | 62c50fd | 2011-01-02 19:01:03 +0000 | [diff] [blame] | 63 | DominatorTree *DT; |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 64 | ScalarEvolution *SE; |
Chris Lattner | c19175c | 2011-02-18 22:22:15 +0000 | [diff] [blame^] | 65 | TargetLibraryInfo *TLI; |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 66 | public: |
| 67 | static char ID; |
| 68 | explicit LoopIdiomRecognize() : LoopPass(ID) { |
| 69 | initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry()); |
| 70 | } |
| 71 | |
| 72 | bool runOnLoop(Loop *L, LPPassManager &LPM); |
Chris Lattner | 62c50fd | 2011-01-02 19:01:03 +0000 | [diff] [blame] | 73 | bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount, |
| 74 | SmallVectorImpl<BasicBlock*> &ExitBlocks); |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 75 | |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 76 | bool processLoopStore(StoreInst *SI, const SCEV *BECount); |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 77 | bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount); |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 78 | |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 79 | bool processLoopStoreOfSplatValue(Value *DestPtr, unsigned StoreSize, |
| 80 | unsigned StoreAlignment, |
| 81 | Value *SplatValue, Instruction *TheStore, |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 82 | const SCEVAddRecExpr *Ev, |
| 83 | const SCEV *BECount); |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 84 | bool processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize, |
| 85 | const SCEVAddRecExpr *StoreEv, |
| 86 | const SCEVAddRecExpr *LoadEv, |
| 87 | const SCEV *BECount); |
| 88 | |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 89 | /// This transformation requires natural loop information & requires that |
| 90 | /// loop preheaders be inserted into the CFG. |
| 91 | /// |
| 92 | virtual void getAnalysisUsage(AnalysisUsage &AU) const { |
| 93 | AU.addRequired<LoopInfo>(); |
| 94 | AU.addPreserved<LoopInfo>(); |
| 95 | AU.addRequiredID(LoopSimplifyID); |
| 96 | AU.addPreservedID(LoopSimplifyID); |
| 97 | AU.addRequiredID(LCSSAID); |
| 98 | AU.addPreservedID(LCSSAID); |
Chris Lattner | 2e12f1a | 2010-12-27 18:39:08 +0000 | [diff] [blame] | 99 | AU.addRequired<AliasAnalysis>(); |
| 100 | AU.addPreserved<AliasAnalysis>(); |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 101 | AU.addRequired<ScalarEvolution>(); |
| 102 | AU.addPreserved<ScalarEvolution>(); |
| 103 | AU.addPreserved<DominatorTree>(); |
Chris Lattner | 62c50fd | 2011-01-02 19:01:03 +0000 | [diff] [blame] | 104 | AU.addRequired<DominatorTree>(); |
Chris Lattner | c19175c | 2011-02-18 22:22:15 +0000 | [diff] [blame^] | 105 | AU.addRequired<TargetLibraryInfo>(); |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 106 | } |
| 107 | }; |
| 108 | } |
| 109 | |
| 110 | char LoopIdiomRecognize::ID = 0; |
| 111 | INITIALIZE_PASS_BEGIN(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms", |
| 112 | false, false) |
| 113 | INITIALIZE_PASS_DEPENDENCY(LoopInfo) |
Chris Lattner | 62c50fd | 2011-01-02 19:01:03 +0000 | [diff] [blame] | 114 | INITIALIZE_PASS_DEPENDENCY(DominatorTree) |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 115 | INITIALIZE_PASS_DEPENDENCY(LoopSimplify) |
| 116 | INITIALIZE_PASS_DEPENDENCY(LCSSA) |
| 117 | INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) |
Chris Lattner | c19175c | 2011-02-18 22:22:15 +0000 | [diff] [blame^] | 118 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) |
Chris Lattner | 2e12f1a | 2010-12-27 18:39:08 +0000 | [diff] [blame] | 119 | INITIALIZE_AG_DEPENDENCY(AliasAnalysis) |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 120 | INITIALIZE_PASS_END(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms", |
| 121 | false, false) |
| 122 | |
| 123 | Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognize(); } |
| 124 | |
Chris Lattner | 9f39188 | 2010-12-27 00:03:23 +0000 | [diff] [blame] | 125 | /// DeleteDeadInstruction - Delete this instruction. Before we do, go through |
| 126 | /// and zero out all the operands of this instruction. If any of them become |
| 127 | /// dead, delete them and the computation tree that feeds them. |
| 128 | /// |
| 129 | static void DeleteDeadInstruction(Instruction *I, ScalarEvolution &SE) { |
| 130 | SmallVector<Instruction*, 32> NowDeadInsts; |
| 131 | |
| 132 | NowDeadInsts.push_back(I); |
| 133 | |
| 134 | // Before we touch this instruction, remove it from SE! |
| 135 | do { |
| 136 | Instruction *DeadInst = NowDeadInsts.pop_back_val(); |
| 137 | |
| 138 | // This instruction is dead, zap it, in stages. Start by removing it from |
| 139 | // SCEV. |
| 140 | SE.forgetValue(DeadInst); |
| 141 | |
| 142 | for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) { |
| 143 | Value *Op = DeadInst->getOperand(op); |
| 144 | DeadInst->setOperand(op, 0); |
| 145 | |
| 146 | // If this operand just became dead, add it to the NowDeadInsts list. |
| 147 | if (!Op->use_empty()) continue; |
| 148 | |
| 149 | if (Instruction *OpI = dyn_cast<Instruction>(Op)) |
| 150 | if (isInstructionTriviallyDead(OpI)) |
| 151 | NowDeadInsts.push_back(OpI); |
| 152 | } |
| 153 | |
| 154 | DeadInst->eraseFromParent(); |
| 155 | |
| 156 | } while (!NowDeadInsts.empty()); |
| 157 | } |
| 158 | |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 159 | bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) { |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 160 | CurLoop = L; |
| 161 | |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 162 | // The trip count of the loop must be analyzable. |
| 163 | SE = &getAnalysis<ScalarEvolution>(); |
| 164 | if (!SE->hasLoopInvariantBackedgeTakenCount(L)) |
| 165 | return false; |
| 166 | const SCEV *BECount = SE->getBackedgeTakenCount(L); |
| 167 | if (isa<SCEVCouldNotCompute>(BECount)) return false; |
| 168 | |
Chris Lattner | 8e08e73 | 2011-01-02 20:24:21 +0000 | [diff] [blame] | 169 | // If this loop executes exactly one time, then it should be peeled, not |
| 170 | // optimized by this pass. |
| 171 | if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) |
| 172 | if (BECst->getValue()->getValue() == 0) |
| 173 | return false; |
| 174 | |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 175 | // We require target data for now. |
| 176 | TD = getAnalysisIfAvailable<TargetData>(); |
| 177 | if (TD == 0) return false; |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 178 | |
Chris Lattner | 62c50fd | 2011-01-02 19:01:03 +0000 | [diff] [blame] | 179 | DT = &getAnalysis<DominatorTree>(); |
| 180 | LoopInfo &LI = getAnalysis<LoopInfo>(); |
Chris Lattner | c19175c | 2011-02-18 22:22:15 +0000 | [diff] [blame^] | 181 | TLI = &getAnalysis<TargetLibraryInfo>(); |
Chris Lattner | 62c50fd | 2011-01-02 19:01:03 +0000 | [diff] [blame] | 182 | |
| 183 | SmallVector<BasicBlock*, 8> ExitBlocks; |
| 184 | CurLoop->getUniqueExitBlocks(ExitBlocks); |
| 185 | |
Chris Lattner | 63f9c3c | 2011-01-02 21:14:18 +0000 | [diff] [blame] | 186 | DEBUG(dbgs() << "loop-idiom Scanning: F[" |
| 187 | << L->getHeader()->getParent()->getName() |
| 188 | << "] Loop %" << L->getHeader()->getName() << "\n"); |
| 189 | |
Chris Lattner | 62c50fd | 2011-01-02 19:01:03 +0000 | [diff] [blame] | 190 | bool MadeChange = false; |
| 191 | // Scan all the blocks in the loop that are not in subloops. |
| 192 | for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E; |
| 193 | ++BI) { |
| 194 | // Ignore blocks in subloops. |
| 195 | if (LI.getLoopFor(*BI) != CurLoop) |
| 196 | continue; |
| 197 | |
| 198 | MadeChange |= runOnLoopBlock(*BI, BECount, ExitBlocks); |
| 199 | } |
| 200 | return MadeChange; |
| 201 | } |
| 202 | |
| 203 | /// runOnLoopBlock - Process the specified block, which lives in a counted loop |
| 204 | /// with the specified backedge count. This block is known to be in the current |
| 205 | /// loop and not in any subloops. |
| 206 | bool LoopIdiomRecognize::runOnLoopBlock(BasicBlock *BB, const SCEV *BECount, |
| 207 | SmallVectorImpl<BasicBlock*> &ExitBlocks) { |
| 208 | // We can only promote stores in this block if they are unconditionally |
| 209 | // executed in the loop. For a block to be unconditionally executed, it has |
| 210 | // to dominate all the exit blocks of the loop. Verify this now. |
| 211 | for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) |
| 212 | if (!DT->dominates(BB, ExitBlocks[i])) |
| 213 | return false; |
| 214 | |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 215 | bool MadeChange = false; |
| 216 | for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) { |
Chris Lattner | b7e9ef0 | 2011-01-04 07:27:30 +0000 | [diff] [blame] | 217 | Instruction *Inst = I++; |
| 218 | // Look for store instructions, which may be optimized to memset/memcpy. |
| 219 | if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { |
Chris Lattner | b7e9ef0 | 2011-01-04 07:27:30 +0000 | [diff] [blame] | 220 | WeakVH InstPtr(I); |
| 221 | if (!processLoopStore(SI, BECount)) continue; |
| 222 | MadeChange = true; |
| 223 | |
| 224 | // If processing the store invalidated our iterator, start over from the |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 225 | // top of the block. |
Chris Lattner | b7e9ef0 | 2011-01-04 07:27:30 +0000 | [diff] [blame] | 226 | if (InstPtr == 0) |
| 227 | I = BB->begin(); |
| 228 | continue; |
| 229 | } |
Chris Lattner | 2e12f1a | 2010-12-27 18:39:08 +0000 | [diff] [blame] | 230 | |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 231 | // Look for memset instructions, which may be optimized to a larger memset. |
| 232 | if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) { |
| 233 | WeakVH InstPtr(I); |
| 234 | if (!processLoopMemSet(MSI, BECount)) continue; |
| 235 | MadeChange = true; |
| 236 | |
| 237 | // If processing the memset invalidated our iterator, start over from the |
| 238 | // top of the block. |
| 239 | if (InstPtr == 0) |
| 240 | I = BB->begin(); |
| 241 | continue; |
| 242 | } |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 243 | } |
| 244 | |
| 245 | return MadeChange; |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 246 | } |
| 247 | |
Chris Lattner | 62c50fd | 2011-01-02 19:01:03 +0000 | [diff] [blame] | 248 | |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 249 | /// processLoopStore - See if this store can be promoted to a memset or memcpy. |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 250 | bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) { |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 251 | if (SI->isVolatile()) return false; |
| 252 | |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 253 | Value *StoredVal = SI->getValueOperand(); |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 254 | Value *StorePtr = SI->getPointerOperand(); |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 255 | |
Chris Lattner | 95ae676 | 2010-12-28 18:53:48 +0000 | [diff] [blame] | 256 | // Reject stores that are so large that they overflow an unsigned. |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 257 | uint64_t SizeInBits = TD->getTypeSizeInBits(StoredVal->getType()); |
Chris Lattner | 95ae676 | 2010-12-28 18:53:48 +0000 | [diff] [blame] | 258 | if ((SizeInBits & 7) || (SizeInBits >> 32) != 0) |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 259 | return false; |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 260 | |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 261 | // See if the pointer expression is an AddRec like {base,+,1} on the current |
| 262 | // loop, which indicates a strided store. If we have something else, it's a |
| 263 | // random store we can't handle. |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 264 | const SCEVAddRecExpr *StoreEv = |
| 265 | dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); |
| 266 | if (StoreEv == 0 || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine()) |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 267 | return false; |
| 268 | |
| 269 | // Check to see if the stride matches the size of the store. If so, then we |
| 270 | // know that every byte is touched in the loop. |
| 271 | unsigned StoreSize = (unsigned)SizeInBits >> 3; |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 272 | const SCEVConstant *Stride = dyn_cast<SCEVConstant>(StoreEv->getOperand(1)); |
Chris Lattner | 30980b6 | 2011-01-01 19:39:01 +0000 | [diff] [blame] | 273 | |
| 274 | // TODO: Could also handle negative stride here someday, that will require the |
Owen Anderson | 6f96b27 | 2011-01-03 23:51:56 +0000 | [diff] [blame] | 275 | // validity check in mayLoopAccessLocation to be updated though. |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 276 | if (Stride == 0 || StoreSize != Stride->getValue()->getValue()) |
| 277 | return false; |
| 278 | |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 279 | // If the stored value is a byte-wise value (like i32 -1), then it may be |
Duncan Sands | ab4c366 | 2011-02-15 09:23:02 +0000 | [diff] [blame] | 280 | // turned into a memset of i8 -1, assuming that all the consecutive bytes |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 281 | // are stored. A store of i32 0x01020304 can never be turned into a memset. |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 282 | if (Value *SplatValue = isBytewiseValue(StoredVal)) |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 283 | if (processLoopStoreOfSplatValue(StorePtr, StoreSize, SI->getAlignment(), |
| 284 | SplatValue, SI, StoreEv, BECount)) |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 285 | return true; |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 286 | |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 287 | // If the stored value is a strided load in the same loop with the same stride |
| 288 | // this this may be transformable into a memcpy. This kicks in for stuff like |
| 289 | // for (i) A[i] = B[i]; |
| 290 | if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { |
| 291 | const SCEVAddRecExpr *LoadEv = |
| 292 | dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getOperand(0))); |
| 293 | if (LoadEv && LoadEv->getLoop() == CurLoop && LoadEv->isAffine() && |
| 294 | StoreEv->getOperand(1) == LoadEv->getOperand(1) && !LI->isVolatile()) |
| 295 | if (processLoopStoreOfLoopLoad(SI, StoreSize, StoreEv, LoadEv, BECount)) |
| 296 | return true; |
| 297 | } |
Chris Lattner | 4ce31fb | 2011-01-02 07:36:44 +0000 | [diff] [blame] | 298 | //errs() << "UNHANDLED strided store: " << *StoreEv << " - " << *SI << "\n"; |
Chris Lattner | 22920b5 | 2010-12-26 20:45:45 +0000 | [diff] [blame] | 299 | |
Chris Lattner | e6bb649 | 2010-12-26 19:39:38 +0000 | [diff] [blame] | 300 | return false; |
| 301 | } |
| 302 | |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 303 | /// processLoopMemSet - See if this memset can be promoted to a large memset. |
| 304 | bool LoopIdiomRecognize:: |
| 305 | processLoopMemSet(MemSetInst *MSI, const SCEV *BECount) { |
| 306 | // We can only handle non-volatile memsets with a constant size. |
| 307 | if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength())) return false; |
| 308 | |
Chris Lattner | c19175c | 2011-02-18 22:22:15 +0000 | [diff] [blame^] | 309 | // If we're not allowed to hack on memset, we fail. |
| 310 | if (!TLI->has(LibFunc::memset)) |
| 311 | return false; |
| 312 | |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 313 | Value *Pointer = MSI->getDest(); |
| 314 | |
| 315 | // See if the pointer expression is an AddRec like {base,+,1} on the current |
| 316 | // loop, which indicates a strided store. If we have something else, it's a |
| 317 | // random store we can't handle. |
| 318 | const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer)); |
| 319 | if (Ev == 0 || Ev->getLoop() != CurLoop || !Ev->isAffine()) |
| 320 | return false; |
| 321 | |
| 322 | // Reject memsets that are so large that they overflow an unsigned. |
| 323 | uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); |
| 324 | if ((SizeInBytes >> 32) != 0) |
| 325 | return false; |
| 326 | |
| 327 | // Check to see if the stride matches the size of the memset. If so, then we |
| 328 | // know that every byte is touched in the loop. |
| 329 | const SCEVConstant *Stride = dyn_cast<SCEVConstant>(Ev->getOperand(1)); |
| 330 | |
| 331 | // TODO: Could also handle negative stride here someday, that will require the |
| 332 | // validity check in mayLoopAccessLocation to be updated though. |
| 333 | if (Stride == 0 || MSI->getLength() != Stride->getValue()) |
| 334 | return false; |
| 335 | |
| 336 | return processLoopStoreOfSplatValue(Pointer, (unsigned)SizeInBytes, |
| 337 | MSI->getAlignment(), MSI->getValue(), |
| 338 | MSI, Ev, BECount); |
| 339 | } |
| 340 | |
| 341 | |
Chris Lattner | 63f9c3c | 2011-01-02 21:14:18 +0000 | [diff] [blame] | 342 | /// mayLoopAccessLocation - Return true if the specified loop might access the |
| 343 | /// specified pointer location, which is a loop-strided access. The 'Access' |
| 344 | /// argument specifies what the verboten forms of access are (read or write). |
| 345 | static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access, |
| 346 | Loop *L, const SCEV *BECount, |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 347 | unsigned StoreSize, AliasAnalysis &AA, |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 348 | Instruction *IgnoredStore) { |
Chris Lattner | 30980b6 | 2011-01-01 19:39:01 +0000 | [diff] [blame] | 349 | // Get the location that may be stored across the loop. Since the access is |
| 350 | // strided positively through memory, we say that the modified location starts |
| 351 | // at the pointer and has infinite size. |
Chris Lattner | a64cbf0 | 2011-01-01 19:54:22 +0000 | [diff] [blame] | 352 | uint64_t AccessSize = AliasAnalysis::UnknownSize; |
| 353 | |
| 354 | // If the loop iterates a fixed number of times, we can refine the access size |
| 355 | // to be exactly the size of the memset, which is (BECount+1)*StoreSize |
| 356 | if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) |
| 357 | AccessSize = (BECst->getValue()->getZExtValue()+1)*StoreSize; |
| 358 | |
| 359 | // TODO: For this to be really effective, we have to dive into the pointer |
| 360 | // operand in the store. Store to &A[i] of 100 will always return may alias |
| 361 | // with store of &A[100], we need to StoreLoc to be "A" with size of 100, |
| 362 | // which will then no-alias a store to &A[100]. |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 363 | AliasAnalysis::Location StoreLoc(Ptr, AccessSize); |
Chris Lattner | 30980b6 | 2011-01-01 19:39:01 +0000 | [diff] [blame] | 364 | |
| 365 | for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E; |
| 366 | ++BI) |
| 367 | for (BasicBlock::iterator I = (*BI)->begin(), E = (*BI)->end(); I != E; ++I) |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 368 | if (&*I != IgnoredStore && |
Chris Lattner | 63f9c3c | 2011-01-02 21:14:18 +0000 | [diff] [blame] | 369 | (AA.getModRefInfo(I, StoreLoc) & Access)) |
Chris Lattner | 30980b6 | 2011-01-01 19:39:01 +0000 | [diff] [blame] | 370 | return true; |
| 371 | |
| 372 | return false; |
| 373 | } |
| 374 | |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 375 | /// processLoopStoreOfSplatValue - We see a strided store of a memsetable value. |
| 376 | /// If we can transform this into a memset in the loop preheader, do so. |
| 377 | bool LoopIdiomRecognize:: |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 378 | processLoopStoreOfSplatValue(Value *DestPtr, unsigned StoreSize, |
| 379 | unsigned StoreAlignment, Value *SplatValue, |
| 380 | Instruction *TheStore, |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 381 | const SCEVAddRecExpr *Ev, const SCEV *BECount) { |
Chris Lattner | c19175c | 2011-02-18 22:22:15 +0000 | [diff] [blame^] | 382 | // If we're not allowed to form memset, we fail. |
| 383 | if (!TLI->has(LibFunc::memset)) |
| 384 | return false; |
| 385 | |
| 386 | |
Chris Lattner | bafa117 | 2011-01-01 20:12:04 +0000 | [diff] [blame] | 387 | // Verify that the stored value is loop invariant. If not, we can't promote |
| 388 | // the memset. |
| 389 | if (!CurLoop->isLoopInvariant(SplatValue)) |
| 390 | return false; |
| 391 | |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 392 | // Okay, we have a strided store "p[i]" of a splattable value. We can turn |
| 393 | // this into a memset in the loop preheader now if we want. However, this |
| 394 | // would be unsafe to do if there is anything else in the loop that may read |
| 395 | // or write to the aliased location. Check for an alias. |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 396 | if (mayLoopAccessLocation(DestPtr, AliasAnalysis::ModRef, |
Chris Lattner | 63f9c3c | 2011-01-02 21:14:18 +0000 | [diff] [blame] | 397 | CurLoop, BECount, |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 398 | StoreSize, getAnalysis<AliasAnalysis>(), TheStore)) |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 399 | return false; |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 400 | |
| 401 | // Okay, everything looks good, insert the memset. |
| 402 | BasicBlock *Preheader = CurLoop->getLoopPreheader(); |
| 403 | |
| 404 | IRBuilder<> Builder(Preheader->getTerminator()); |
| 405 | |
| 406 | // The trip count of the loop and the base pointer of the addrec SCEV is |
| 407 | // guaranteed to be loop invariant, which means that it should dominate the |
| 408 | // header. Just insert code for it in the preheader. |
| 409 | SCEVExpander Expander(*SE); |
| 410 | |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 411 | unsigned AddrSpace = cast<PointerType>(DestPtr->getType())->getAddressSpace(); |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 412 | Value *BasePtr = |
| 413 | Expander.expandCodeFor(Ev->getStart(), Builder.getInt8PtrTy(AddrSpace), |
| 414 | Preheader->getTerminator()); |
| 415 | |
| 416 | // The # stored bytes is (BECount+1)*Size. Expand the trip count out to |
| 417 | // pointer size if it isn't already. |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 418 | const Type *IntPtr = TD->getIntPtrType(SplatValue->getContext()); |
Chris Lattner | 7c90b90 | 2011-01-04 00:06:55 +0000 | [diff] [blame] | 419 | BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr); |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 420 | |
| 421 | const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1), |
Chris Lattner | 7c90b90 | 2011-01-04 00:06:55 +0000 | [diff] [blame] | 422 | true /*no unsigned overflow*/); |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 423 | if (StoreSize != 1) |
| 424 | NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize), |
Chris Lattner | 7c90b90 | 2011-01-04 00:06:55 +0000 | [diff] [blame] | 425 | true /*no unsigned overflow*/); |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 426 | |
| 427 | Value *NumBytes = |
| 428 | Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator()); |
| 429 | |
| 430 | Value *NewCall = |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 431 | Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, StoreAlignment); |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 432 | |
| 433 | DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n" |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 434 | << " from store to: " << *Ev << " at: " << *TheStore << "\n"); |
Duncan Sands | 7922d34 | 2010-12-28 09:41:15 +0000 | [diff] [blame] | 435 | (void)NewCall; |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 436 | |
Chris Lattner | 9f39188 | 2010-12-27 00:03:23 +0000 | [diff] [blame] | 437 | // Okay, the memset has been formed. Zap the original store and anything that |
| 438 | // feeds into it. |
Chris Lattner | e41d3c0 | 2011-01-04 07:46:33 +0000 | [diff] [blame] | 439 | DeleteDeadInstruction(TheStore, *SE); |
Chris Lattner | 4ce31fb | 2011-01-02 07:36:44 +0000 | [diff] [blame] | 440 | ++NumMemSet; |
Chris Lattner | a92ff91 | 2010-12-26 23:42:51 +0000 | [diff] [blame] | 441 | return true; |
| 442 | } |
| 443 | |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 444 | /// processLoopStoreOfLoopLoad - We see a strided store whose value is a |
| 445 | /// same-strided load. |
| 446 | bool LoopIdiomRecognize:: |
| 447 | processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize, |
| 448 | const SCEVAddRecExpr *StoreEv, |
| 449 | const SCEVAddRecExpr *LoadEv, |
| 450 | const SCEV *BECount) { |
Chris Lattner | c19175c | 2011-02-18 22:22:15 +0000 | [diff] [blame^] | 451 | // If we're not allowed to form memcpy, we fail. |
| 452 | if (!TLI->has(LibFunc::memcpy)) |
| 453 | return false; |
| 454 | |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 455 | LoadInst *LI = cast<LoadInst>(SI->getValueOperand()); |
| 456 | |
| 457 | // Okay, we have a strided store "p[i]" of a loaded value. We can turn |
Chris Lattner | bdce572 | 2011-01-02 18:32:09 +0000 | [diff] [blame] | 458 | // this into a memcpy in the loop preheader now if we want. However, this |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 459 | // would be unsafe to do if there is anything else in the loop that may read |
Chris Lattner | 63f9c3c | 2011-01-02 21:14:18 +0000 | [diff] [blame] | 460 | // or write to the stored location (including the load feeding the stores). |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 461 | // Check for an alias. |
Chris Lattner | 63f9c3c | 2011-01-02 21:14:18 +0000 | [diff] [blame] | 462 | if (mayLoopAccessLocation(SI->getPointerOperand(), AliasAnalysis::ModRef, |
| 463 | CurLoop, BECount, StoreSize, |
| 464 | getAnalysis<AliasAnalysis>(), SI)) |
| 465 | return false; |
| 466 | |
| 467 | // For a memcpy, we have to make sure that the input array is not being |
| 468 | // mutated by the loop. |
| 469 | if (mayLoopAccessLocation(LI->getPointerOperand(), AliasAnalysis::Mod, |
| 470 | CurLoop, BECount, StoreSize, |
| 471 | getAnalysis<AliasAnalysis>(), SI)) |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 472 | return false; |
| 473 | |
| 474 | // Okay, everything looks good, insert the memcpy. |
| 475 | BasicBlock *Preheader = CurLoop->getLoopPreheader(); |
| 476 | |
| 477 | IRBuilder<> Builder(Preheader->getTerminator()); |
| 478 | |
| 479 | // The trip count of the loop and the base pointer of the addrec SCEV is |
| 480 | // guaranteed to be loop invariant, which means that it should dominate the |
| 481 | // header. Just insert code for it in the preheader. |
| 482 | SCEVExpander Expander(*SE); |
| 483 | |
| 484 | Value *LoadBasePtr = |
| 485 | Expander.expandCodeFor(LoadEv->getStart(), |
| 486 | Builder.getInt8PtrTy(LI->getPointerAddressSpace()), |
| 487 | Preheader->getTerminator()); |
| 488 | Value *StoreBasePtr = |
| 489 | Expander.expandCodeFor(StoreEv->getStart(), |
| 490 | Builder.getInt8PtrTy(SI->getPointerAddressSpace()), |
| 491 | Preheader->getTerminator()); |
| 492 | |
| 493 | // The # stored bytes is (BECount+1)*Size. Expand the trip count out to |
| 494 | // pointer size if it isn't already. |
| 495 | const Type *IntPtr = TD->getIntPtrType(SI->getContext()); |
Chris Lattner | 7c90b90 | 2011-01-04 00:06:55 +0000 | [diff] [blame] | 496 | BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr); |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 497 | |
| 498 | const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1), |
Chris Lattner | 7c90b90 | 2011-01-04 00:06:55 +0000 | [diff] [blame] | 499 | true /*no unsigned overflow*/); |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 500 | if (StoreSize != 1) |
| 501 | NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize), |
Chris Lattner | 7c90b90 | 2011-01-04 00:06:55 +0000 | [diff] [blame] | 502 | true /*no unsigned overflow*/); |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 503 | |
| 504 | Value *NumBytes = |
| 505 | Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator()); |
| 506 | |
| 507 | Value *NewCall = |
| 508 | Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes, |
| 509 | std::min(SI->getAlignment(), LI->getAlignment())); |
| 510 | |
| 511 | DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n" |
| 512 | << " from load ptr=" << *LoadEv << " at: " << *LI << "\n" |
| 513 | << " from store ptr=" << *StoreEv << " at: " << *SI << "\n"); |
| 514 | (void)NewCall; |
| 515 | |
| 516 | // Okay, the memset has been formed. Zap the original store and anything that |
| 517 | // feeds into it. |
| 518 | DeleteDeadInstruction(SI, *SE); |
Chris Lattner | 4ce31fb | 2011-01-02 07:36:44 +0000 | [diff] [blame] | 519 | ++NumMemCpy; |
Chris Lattner | e2c4392 | 2011-01-02 03:37:56 +0000 | [diff] [blame] | 520 | return true; |
| 521 | } |