Eugene Zelenko | 57bd5a0 | 2017-10-27 01:09:08 +0000 | [diff] [blame^] | 1 | //===- UnrollLoopPeel.cpp - Loop peeling utilities ------------------------===// |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file implements some loop unrolling utilities for peeling loops |
| 11 | // with dynamically inferred (from PGO) trip counts. See LoopUnroll.cpp for |
| 12 | // unrolling loops with compile-time constant trip counts. |
| 13 | // |
| 14 | //===----------------------------------------------------------------------===// |
| 15 | |
Eugene Zelenko | 57bd5a0 | 2017-10-27 01:09:08 +0000 | [diff] [blame^] | 16 | #include "llvm/ADT/DenseMap.h" |
| 17 | #include "llvm/ADT/Optional.h" |
| 18 | #include "llvm/ADT/SmallVector.h" |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 19 | #include "llvm/ADT/Statistic.h" |
Eugene Zelenko | 57bd5a0 | 2017-10-27 01:09:08 +0000 | [diff] [blame^] | 20 | #include "llvm/Analysis/LoopInfo.h" |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 21 | #include "llvm/Analysis/LoopIterator.h" |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 22 | #include "llvm/Analysis/ScalarEvolution.h" |
| 23 | #include "llvm/Analysis/TargetTransformInfo.h" |
| 24 | #include "llvm/IR/BasicBlock.h" |
| 25 | #include "llvm/IR/Dominators.h" |
Eugene Zelenko | 57bd5a0 | 2017-10-27 01:09:08 +0000 | [diff] [blame^] | 26 | #include "llvm/IR/Function.h" |
| 27 | #include "llvm/IR/InstrTypes.h" |
| 28 | #include "llvm/IR/Instruction.h" |
| 29 | #include "llvm/IR/Instructions.h" |
| 30 | #include "llvm/IR/LLVMContext.h" |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 31 | #include "llvm/IR/MDBuilder.h" |
| 32 | #include "llvm/IR/Metadata.h" |
Eugene Zelenko | 57bd5a0 | 2017-10-27 01:09:08 +0000 | [diff] [blame^] | 33 | #include "llvm/Support/Casting.h" |
| 34 | #include "llvm/Support/CommandLine.h" |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 35 | #include "llvm/Support/Debug.h" |
| 36 | #include "llvm/Support/raw_ostream.h" |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 37 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
| 38 | #include "llvm/Transforms/Utils/Cloning.h" |
Eli Friedman | 0a21745 | 2017-01-18 23:26:37 +0000 | [diff] [blame] | 39 | #include "llvm/Transforms/Utils/LoopSimplify.h" |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 40 | #include "llvm/Transforms/Utils/LoopUtils.h" |
| 41 | #include "llvm/Transforms/Utils/UnrollLoop.h" |
Eugene Zelenko | 57bd5a0 | 2017-10-27 01:09:08 +0000 | [diff] [blame^] | 42 | #include "llvm/Transforms/Utils/ValueMapper.h" |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 43 | #include <algorithm> |
Eugene Zelenko | 57bd5a0 | 2017-10-27 01:09:08 +0000 | [diff] [blame^] | 44 | #include <cassert> |
| 45 | #include <cstdint> |
| 46 | #include <limits> |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 47 | |
| 48 | using namespace llvm; |
| 49 | |
| 50 | #define DEBUG_TYPE "loop-unroll" |
Eugene Zelenko | 57bd5a0 | 2017-10-27 01:09:08 +0000 | [diff] [blame^] | 51 | |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 52 | STATISTIC(NumPeeled, "Number of loops peeled"); |
| 53 | |
| 54 | static cl::opt<unsigned> UnrollPeelMaxCount( |
| 55 | "unroll-peel-max-count", cl::init(7), cl::Hidden, |
| 56 | cl::desc("Max average trip count which will cause loop peeling.")); |
| 57 | |
| 58 | static cl::opt<unsigned> UnrollForcePeelCount( |
| 59 | "unroll-force-peel-count", cl::init(0), cl::Hidden, |
| 60 | cl::desc("Force a peel count regardless of profiling information.")); |
| 61 | |
Max Kazantsev | 751579c | 2017-04-17 09:52:02 +0000 | [diff] [blame] | 62 | // Designates that a Phi is estimated to become invariant after an "infinite" |
| 63 | // number of loop iterations (i.e. only may become an invariant if the loop is |
| 64 | // fully unrolled). |
Eugene Zelenko | 57bd5a0 | 2017-10-27 01:09:08 +0000 | [diff] [blame^] | 65 | static const unsigned InfiniteIterationsToInvariance = |
| 66 | std::numeric_limits<unsigned>::max(); |
Max Kazantsev | 751579c | 2017-04-17 09:52:02 +0000 | [diff] [blame] | 67 | |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 68 | // Check whether we are capable of peeling this loop. |
| 69 | static bool canPeel(Loop *L) { |
| 70 | // Make sure the loop is in simplified form |
| 71 | if (!L->isLoopSimplifyForm()) |
| 72 | return false; |
| 73 | |
| 74 | // Only peel loops that contain a single exit |
| 75 | if (!L->getExitingBlock() || !L->getUniqueExitBlock()) |
| 76 | return false; |
| 77 | |
Michael Kuperstein | 2da2bfa | 2017-03-16 21:07:48 +0000 | [diff] [blame] | 78 | // Don't try to peel loops where the latch is not the exiting block. |
| 79 | // This can be an indication of two different things: |
| 80 | // 1) The loop is not rotated. |
| 81 | // 2) The loop contains irreducible control flow that involves the latch. |
| 82 | if (L->getLoopLatch() != L->getExitingBlock()) |
| 83 | return false; |
| 84 | |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 85 | return true; |
| 86 | } |
| 87 | |
Max Kazantsev | 751579c | 2017-04-17 09:52:02 +0000 | [diff] [blame] | 88 | // This function calculates the number of iterations after which the given Phi |
| 89 | // becomes an invariant. The pre-calculated values are memorized in the map. The |
| 90 | // function (shortcut is I) is calculated according to the following definition: |
| 91 | // Given %x = phi <Inputs from above the loop>, ..., [%y, %back.edge]. |
| 92 | // If %y is a loop invariant, then I(%x) = 1. |
| 93 | // If %y is a Phi from the loop header, I(%x) = I(%y) + 1. |
| 94 | // Otherwise, I(%x) is infinite. |
| 95 | // TODO: Actually if %y is an expression that depends only on Phi %z and some |
| 96 | // loop invariants, we can estimate I(%x) = I(%z) + 1. The example |
| 97 | // looks like: |
| 98 | // %x = phi(0, %a), <-- becomes invariant starting from 3rd iteration. |
| 99 | // %y = phi(0, 5), |
| 100 | // %a = %y + 1. |
| 101 | static unsigned calculateIterationsToInvariance( |
| 102 | PHINode *Phi, Loop *L, BasicBlock *BackEdge, |
| 103 | SmallDenseMap<PHINode *, unsigned> &IterationsToInvariance) { |
| 104 | assert(Phi->getParent() == L->getHeader() && |
| 105 | "Non-loop Phi should not be checked for turning into invariant."); |
| 106 | assert(BackEdge == L->getLoopLatch() && "Wrong latch?"); |
| 107 | // If we already know the answer, take it from the map. |
| 108 | auto I = IterationsToInvariance.find(Phi); |
| 109 | if (I != IterationsToInvariance.end()) |
| 110 | return I->second; |
| 111 | |
| 112 | // Otherwise we need to analyze the input from the back edge. |
| 113 | Value *Input = Phi->getIncomingValueForBlock(BackEdge); |
| 114 | // Place infinity to map to avoid infinite recursion for cycled Phis. Such |
| 115 | // cycles can never stop on an invariant. |
| 116 | IterationsToInvariance[Phi] = InfiniteIterationsToInvariance; |
| 117 | unsigned ToInvariance = InfiniteIterationsToInvariance; |
| 118 | |
| 119 | if (L->isLoopInvariant(Input)) |
| 120 | ToInvariance = 1u; |
| 121 | else if (PHINode *IncPhi = dyn_cast<PHINode>(Input)) { |
| 122 | // Only consider Phis in header block. |
| 123 | if (IncPhi->getParent() != L->getHeader()) |
| 124 | return InfiniteIterationsToInvariance; |
| 125 | // If the input becomes an invariant after X iterations, then our Phi |
| 126 | // becomes an invariant after X + 1 iterations. |
| 127 | unsigned InputToInvariance = calculateIterationsToInvariance( |
| 128 | IncPhi, L, BackEdge, IterationsToInvariance); |
| 129 | if (InputToInvariance != InfiniteIterationsToInvariance) |
| 130 | ToInvariance = InputToInvariance + 1u; |
| 131 | } |
| 132 | |
| 133 | // If we found that this Phi lies in an invariant chain, update the map. |
| 134 | if (ToInvariance != InfiniteIterationsToInvariance) |
| 135 | IterationsToInvariance[Phi] = ToInvariance; |
| 136 | return ToInvariance; |
| 137 | } |
| 138 | |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 139 | // Return the number of iterations we want to peel off. |
| 140 | void llvm::computePeelCount(Loop *L, unsigned LoopSize, |
Sanjoy Das | eed71b9 | 2017-03-03 18:19:10 +0000 | [diff] [blame] | 141 | TargetTransformInfo::UnrollingPreferences &UP, |
| 142 | unsigned &TripCount) { |
Max Kazantsev | 751579c | 2017-04-17 09:52:02 +0000 | [diff] [blame] | 143 | assert(LoopSize > 0 && "Zero loop size is not allowed!"); |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 144 | UP.PeelCount = 0; |
| 145 | if (!canPeel(L)) |
| 146 | return; |
| 147 | |
| 148 | // Only try to peel innermost loops. |
| 149 | if (!L->empty()) |
| 150 | return; |
| 151 | |
Max Kazantsev | 751579c | 2017-04-17 09:52:02 +0000 | [diff] [blame] | 152 | // Here we try to get rid of Phis which become invariants after 1, 2, ..., N |
| 153 | // iterations of the loop. For this we compute the number for iterations after |
| 154 | // which every Phi is guaranteed to become an invariant, and try to peel the |
| 155 | // maximum number of iterations among these values, thus turning all those |
| 156 | // Phis into invariants. |
Max Kazantsev | 8ed6b66 | 2017-04-17 05:38:28 +0000 | [diff] [blame] | 157 | // First, check that we can peel at least one iteration. |
| 158 | if (2 * LoopSize <= UP.Threshold && UnrollPeelMaxCount > 0) { |
Max Kazantsev | 751579c | 2017-04-17 09:52:02 +0000 | [diff] [blame] | 159 | // Store the pre-calculated values here. |
| 160 | SmallDenseMap<PHINode *, unsigned> IterationsToInvariance; |
| 161 | // Now go through all Phis to calculate their the number of iterations they |
| 162 | // need to become invariants. |
| 163 | unsigned DesiredPeelCount = 0; |
Sanjoy Das | 30c3538 | 2017-03-07 06:03:15 +0000 | [diff] [blame] | 164 | BasicBlock *BackEdge = L->getLoopLatch(); |
| 165 | assert(BackEdge && "Loop is not in simplified form?"); |
Max Kazantsev | 751579c | 2017-04-17 09:52:02 +0000 | [diff] [blame] | 166 | for (auto BI = L->getHeader()->begin(); isa<PHINode>(&*BI); ++BI) { |
| 167 | PHINode *Phi = cast<PHINode>(&*BI); |
| 168 | unsigned ToInvariance = calculateIterationsToInvariance( |
| 169 | Phi, L, BackEdge, IterationsToInvariance); |
| 170 | if (ToInvariance != InfiniteIterationsToInvariance) |
| 171 | DesiredPeelCount = std::max(DesiredPeelCount, ToInvariance); |
Sanjoy Das | 664c925 | 2017-03-03 18:19:15 +0000 | [diff] [blame] | 172 | } |
Max Kazantsev | 751579c | 2017-04-17 09:52:02 +0000 | [diff] [blame] | 173 | if (DesiredPeelCount > 0) { |
| 174 | // Pay respect to limitations implied by loop size and the max peel count. |
| 175 | unsigned MaxPeelCount = UnrollPeelMaxCount; |
| 176 | MaxPeelCount = std::min(MaxPeelCount, UP.Threshold / LoopSize - 1); |
| 177 | DesiredPeelCount = std::min(DesiredPeelCount, MaxPeelCount); |
| 178 | // Consider max peel count limitation. |
| 179 | assert(DesiredPeelCount > 0 && "Wrong loop size estimation?"); |
| 180 | DEBUG(dbgs() << "Peel " << DesiredPeelCount << " iteration(s) to turn" |
| 181 | << " some Phis into invariants.\n"); |
| 182 | UP.PeelCount = DesiredPeelCount; |
Sanjoy Das | 664c925 | 2017-03-03 18:19:15 +0000 | [diff] [blame] | 183 | return; |
| 184 | } |
| 185 | } |
| 186 | |
Sanjoy Das | eed71b9 | 2017-03-03 18:19:10 +0000 | [diff] [blame] | 187 | // Bail if we know the statically calculated trip count. |
| 188 | // In this case we rather prefer partial unrolling. |
| 189 | if (TripCount) |
| 190 | return; |
| 191 | |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 192 | // If the user provided a peel count, use that. |
| 193 | bool UserPeelCount = UnrollForcePeelCount.getNumOccurrences() > 0; |
| 194 | if (UserPeelCount) { |
| 195 | DEBUG(dbgs() << "Force-peeling first " << UnrollForcePeelCount |
| 196 | << " iterations.\n"); |
| 197 | UP.PeelCount = UnrollForcePeelCount; |
| 198 | return; |
| 199 | } |
| 200 | |
| 201 | // If we don't know the trip count, but have reason to believe the average |
| 202 | // trip count is low, peeling should be beneficial, since we will usually |
| 203 | // hit the peeled section. |
| 204 | // We only do this in the presence of profile information, since otherwise |
| 205 | // our estimates of the trip count are not reliable enough. |
| 206 | if (UP.AllowPeeling && L->getHeader()->getParent()->getEntryCount()) { |
| 207 | Optional<unsigned> PeelCount = getLoopEstimatedTripCount(L); |
| 208 | if (!PeelCount) |
| 209 | return; |
| 210 | |
| 211 | DEBUG(dbgs() << "Profile-based estimated trip count is " << *PeelCount |
| 212 | << "\n"); |
| 213 | |
| 214 | if (*PeelCount) { |
| 215 | if ((*PeelCount <= UnrollPeelMaxCount) && |
| 216 | (LoopSize * (*PeelCount + 1) <= UP.Threshold)) { |
| 217 | DEBUG(dbgs() << "Peeling first " << *PeelCount << " iterations.\n"); |
| 218 | UP.PeelCount = *PeelCount; |
| 219 | return; |
| 220 | } |
| 221 | DEBUG(dbgs() << "Requested peel count: " << *PeelCount << "\n"); |
| 222 | DEBUG(dbgs() << "Max peel count: " << UnrollPeelMaxCount << "\n"); |
| 223 | DEBUG(dbgs() << "Peel cost: " << LoopSize * (*PeelCount + 1) << "\n"); |
| 224 | DEBUG(dbgs() << "Max peel cost: " << UP.Threshold << "\n"); |
| 225 | } |
| 226 | } |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 227 | } |
| 228 | |
| 229 | /// \brief Update the branch weights of the latch of a peeled-off loop |
| 230 | /// iteration. |
| 231 | /// This sets the branch weights for the latch of the recently peeled off loop |
| 232 | /// iteration correctly. |
| 233 | /// Our goal is to make sure that: |
| 234 | /// a) The total weight of all the copies of the loop body is preserved. |
| 235 | /// b) The total weight of the loop exit is preserved. |
| 236 | /// c) The body weight is reasonably distributed between the peeled iterations. |
| 237 | /// |
| 238 | /// \param Header The copy of the header block that belongs to next iteration. |
| 239 | /// \param LatchBR The copy of the latch branch that belongs to this iteration. |
| 240 | /// \param IterNumber The serial number of the iteration that was just |
| 241 | /// peeled off. |
| 242 | /// \param AvgIters The average number of iterations we expect the loop to have. |
| 243 | /// \param[in,out] PeeledHeaderWeight The total number of dynamic loop |
| 244 | /// iterations that are unaccounted for. As an input, it represents the number |
| 245 | /// of times we expect to enter the header of the iteration currently being |
| 246 | /// peeled off. The output is the number of times we expect to enter the |
| 247 | /// header of the next iteration. |
| 248 | static void updateBranchWeights(BasicBlock *Header, BranchInst *LatchBR, |
| 249 | unsigned IterNumber, unsigned AvgIters, |
| 250 | uint64_t &PeeledHeaderWeight) { |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 251 | // FIXME: Pick a more realistic distribution. |
| 252 | // Currently the proportion of weight we assign to the fall-through |
| 253 | // side of the branch drops linearly with the iteration number, and we use |
| 254 | // a 0.9 fudge factor to make the drop-off less sharp... |
| 255 | if (PeeledHeaderWeight) { |
| 256 | uint64_t FallThruWeight = |
| 257 | PeeledHeaderWeight * ((float)(AvgIters - IterNumber) / AvgIters * 0.9); |
| 258 | uint64_t ExitWeight = PeeledHeaderWeight - FallThruWeight; |
| 259 | PeeledHeaderWeight -= ExitWeight; |
| 260 | |
| 261 | unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0 : 1); |
| 262 | MDBuilder MDB(LatchBR->getContext()); |
| 263 | MDNode *WeightNode = |
| 264 | HeaderIdx ? MDB.createBranchWeights(ExitWeight, FallThruWeight) |
| 265 | : MDB.createBranchWeights(FallThruWeight, ExitWeight); |
| 266 | LatchBR->setMetadata(LLVMContext::MD_prof, WeightNode); |
| 267 | } |
| 268 | } |
| 269 | |
| 270 | /// \brief Clones the body of the loop L, putting it between \p InsertTop and \p |
| 271 | /// InsertBot. |
| 272 | /// \param IterNumber The serial number of the iteration currently being |
| 273 | /// peeled off. |
| 274 | /// \param Exit The exit block of the original loop. |
| 275 | /// \param[out] NewBlocks A list of the the blocks in the newly created clone |
| 276 | /// \param[out] VMap The value map between the loop and the new clone. |
| 277 | /// \param LoopBlocks A helper for DFS-traversal of the loop. |
| 278 | /// \param LVMap A value-map that maps instructions from the original loop to |
| 279 | /// instructions in the last peeled-off iteration. |
| 280 | static void cloneLoopBlocks(Loop *L, unsigned IterNumber, BasicBlock *InsertTop, |
| 281 | BasicBlock *InsertBot, BasicBlock *Exit, |
| 282 | SmallVectorImpl<BasicBlock *> &NewBlocks, |
| 283 | LoopBlocksDFS &LoopBlocks, ValueToValueMapTy &VMap, |
Serge Pavlov | 098ee2f | 2017-01-24 06:58:39 +0000 | [diff] [blame] | 284 | ValueToValueMapTy &LVMap, DominatorTree *DT, |
| 285 | LoopInfo *LI) { |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 286 | BasicBlock *Header = L->getHeader(); |
| 287 | BasicBlock *Latch = L->getLoopLatch(); |
| 288 | BasicBlock *PreHeader = L->getLoopPreheader(); |
| 289 | |
| 290 | Function *F = Header->getParent(); |
| 291 | LoopBlocksDFS::RPOIterator BlockBegin = LoopBlocks.beginRPO(); |
| 292 | LoopBlocksDFS::RPOIterator BlockEnd = LoopBlocks.endRPO(); |
| 293 | Loop *ParentLoop = L->getParentLoop(); |
| 294 | |
| 295 | // For each block in the original loop, create a new copy, |
| 296 | // and update the value map with the newly created values. |
| 297 | for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) { |
| 298 | BasicBlock *NewBB = CloneBasicBlock(*BB, VMap, ".peel", F); |
| 299 | NewBlocks.push_back(NewBB); |
| 300 | |
| 301 | if (ParentLoop) |
| 302 | ParentLoop->addBasicBlockToLoop(NewBB, *LI); |
| 303 | |
| 304 | VMap[*BB] = NewBB; |
Serge Pavlov | 098ee2f | 2017-01-24 06:58:39 +0000 | [diff] [blame] | 305 | |
| 306 | // If dominator tree is available, insert nodes to represent cloned blocks. |
| 307 | if (DT) { |
| 308 | if (Header == *BB) |
| 309 | DT->addNewBlock(NewBB, InsertTop); |
| 310 | else { |
| 311 | DomTreeNode *IDom = DT->getNode(*BB)->getIDom(); |
| 312 | // VMap must contain entry for IDom, as the iteration order is RPO. |
| 313 | DT->addNewBlock(NewBB, cast<BasicBlock>(VMap[IDom->getBlock()])); |
| 314 | } |
| 315 | } |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 316 | } |
| 317 | |
| 318 | // Hook-up the control flow for the newly inserted blocks. |
| 319 | // The new header is hooked up directly to the "top", which is either |
| 320 | // the original loop preheader (for the first iteration) or the previous |
| 321 | // iteration's exiting block (for every other iteration) |
| 322 | InsertTop->getTerminator()->setSuccessor(0, cast<BasicBlock>(VMap[Header])); |
| 323 | |
| 324 | // Similarly, for the latch: |
| 325 | // The original exiting edge is still hooked up to the loop exit. |
| 326 | // The backedge now goes to the "bottom", which is either the loop's real |
| 327 | // header (for the last peeled iteration) or the copied header of the next |
| 328 | // iteration (for every other iteration) |
Serge Pavlov | 098ee2f | 2017-01-24 06:58:39 +0000 | [diff] [blame] | 329 | BasicBlock *NewLatch = cast<BasicBlock>(VMap[Latch]); |
| 330 | BranchInst *LatchBR = cast<BranchInst>(NewLatch->getTerminator()); |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 331 | unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0 : 1); |
| 332 | LatchBR->setSuccessor(HeaderIdx, InsertBot); |
| 333 | LatchBR->setSuccessor(1 - HeaderIdx, Exit); |
Serge Pavlov | 098ee2f | 2017-01-24 06:58:39 +0000 | [diff] [blame] | 334 | if (DT) |
| 335 | DT->changeImmediateDominator(InsertBot, NewLatch); |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 336 | |
| 337 | // The new copy of the loop body starts with a bunch of PHI nodes |
| 338 | // that pick an incoming value from either the preheader, or the previous |
| 339 | // loop iteration. Since this copy is no longer part of the loop, we |
| 340 | // resolve this statically: |
| 341 | // For the first iteration, we use the value from the preheader directly. |
| 342 | // For any other iteration, we replace the phi with the value generated by |
| 343 | // the immediately preceding clone of the loop body (which represents |
| 344 | // the previous iteration). |
| 345 | for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) { |
| 346 | PHINode *NewPHI = cast<PHINode>(VMap[&*I]); |
| 347 | if (IterNumber == 0) { |
| 348 | VMap[&*I] = NewPHI->getIncomingValueForBlock(PreHeader); |
| 349 | } else { |
| 350 | Value *LatchVal = NewPHI->getIncomingValueForBlock(Latch); |
| 351 | Instruction *LatchInst = dyn_cast<Instruction>(LatchVal); |
| 352 | if (LatchInst && L->contains(LatchInst)) |
| 353 | VMap[&*I] = LVMap[LatchInst]; |
| 354 | else |
| 355 | VMap[&*I] = LatchVal; |
| 356 | } |
| 357 | cast<BasicBlock>(VMap[Header])->getInstList().erase(NewPHI); |
| 358 | } |
| 359 | |
| 360 | // Fix up the outgoing values - we need to add a value for the iteration |
| 361 | // we've just created. Note that this must happen *after* the incoming |
| 362 | // values are adjusted, since the value going out of the latch may also be |
| 363 | // a value coming into the header. |
| 364 | for (BasicBlock::iterator I = Exit->begin(); isa<PHINode>(I); ++I) { |
| 365 | PHINode *PHI = cast<PHINode>(I); |
| 366 | Value *LatchVal = PHI->getIncomingValueForBlock(Latch); |
| 367 | Instruction *LatchInst = dyn_cast<Instruction>(LatchVal); |
| 368 | if (LatchInst && L->contains(LatchInst)) |
| 369 | LatchVal = VMap[LatchVal]; |
| 370 | PHI->addIncoming(LatchVal, cast<BasicBlock>(VMap[Latch])); |
| 371 | } |
| 372 | |
| 373 | // LastValueMap is updated with the values for the current loop |
| 374 | // which are used the next time this function is called. |
| 375 | for (const auto &KV : VMap) |
| 376 | LVMap[KV.first] = KV.second; |
| 377 | } |
| 378 | |
| 379 | /// \brief Peel off the first \p PeelCount iterations of loop \p L. |
| 380 | /// |
| 381 | /// Note that this does not peel them off as a single straight-line block. |
| 382 | /// Rather, each iteration is peeled off separately, and needs to check the |
| 383 | /// exit condition. |
| 384 | /// For loops that dynamically execute \p PeelCount iterations or less |
| 385 | /// this provides a benefit, since the peeled off iterations, which account |
| 386 | /// for the bulk of dynamic execution, can be further simplified by scalar |
| 387 | /// optimizations. |
| 388 | bool llvm::peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI, |
| 389 | ScalarEvolution *SE, DominatorTree *DT, |
Eli Friedman | 0a21745 | 2017-01-18 23:26:37 +0000 | [diff] [blame] | 390 | AssumptionCache *AC, bool PreserveLCSSA) { |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 391 | if (!canPeel(L)) |
| 392 | return false; |
| 393 | |
| 394 | LoopBlocksDFS LoopBlocks(L); |
| 395 | LoopBlocks.perform(LI); |
| 396 | |
| 397 | BasicBlock *Header = L->getHeader(); |
| 398 | BasicBlock *PreHeader = L->getLoopPreheader(); |
| 399 | BasicBlock *Latch = L->getLoopLatch(); |
| 400 | BasicBlock *Exit = L->getUniqueExitBlock(); |
| 401 | |
| 402 | Function *F = Header->getParent(); |
| 403 | |
| 404 | // Set up all the necessary basic blocks. It is convenient to split the |
| 405 | // preheader into 3 parts - two blocks to anchor the peeled copy of the loop |
| 406 | // body, and a new preheader for the "real" loop. |
| 407 | |
| 408 | // Peeling the first iteration transforms. |
| 409 | // |
| 410 | // PreHeader: |
| 411 | // ... |
| 412 | // Header: |
| 413 | // LoopBody |
| 414 | // If (cond) goto Header |
| 415 | // Exit: |
| 416 | // |
| 417 | // into |
| 418 | // |
| 419 | // InsertTop: |
| 420 | // LoopBody |
| 421 | // If (!cond) goto Exit |
| 422 | // InsertBot: |
| 423 | // NewPreHeader: |
| 424 | // ... |
| 425 | // Header: |
| 426 | // LoopBody |
| 427 | // If (cond) goto Header |
| 428 | // Exit: |
| 429 | // |
| 430 | // Each following iteration will split the current bottom anchor in two, |
| 431 | // and put the new copy of the loop body between these two blocks. That is, |
| 432 | // after peeling another iteration from the example above, we'll split |
| 433 | // InsertBot, and get: |
| 434 | // |
| 435 | // InsertTop: |
| 436 | // LoopBody |
| 437 | // If (!cond) goto Exit |
| 438 | // InsertBot: |
| 439 | // LoopBody |
| 440 | // If (!cond) goto Exit |
| 441 | // InsertBot.next: |
| 442 | // NewPreHeader: |
| 443 | // ... |
| 444 | // Header: |
| 445 | // LoopBody |
| 446 | // If (cond) goto Header |
| 447 | // Exit: |
| 448 | |
| 449 | BasicBlock *InsertTop = SplitEdge(PreHeader, Header, DT, LI); |
| 450 | BasicBlock *InsertBot = |
| 451 | SplitBlock(InsertTop, InsertTop->getTerminator(), DT, LI); |
| 452 | BasicBlock *NewPreHeader = |
| 453 | SplitBlock(InsertBot, InsertBot->getTerminator(), DT, LI); |
| 454 | |
| 455 | InsertTop->setName(Header->getName() + ".peel.begin"); |
| 456 | InsertBot->setName(Header->getName() + ".peel.next"); |
| 457 | NewPreHeader->setName(PreHeader->getName() + ".peel.newph"); |
| 458 | |
| 459 | ValueToValueMapTy LVMap; |
| 460 | |
| 461 | // If we have branch weight information, we'll want to update it for the |
| 462 | // newly created branches. |
| 463 | BranchInst *LatchBR = |
| 464 | cast<BranchInst>(cast<BasicBlock>(Latch)->getTerminator()); |
| 465 | unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0 : 1); |
| 466 | |
| 467 | uint64_t TrueWeight, FalseWeight; |
Xin Tong | 2940231 | 2017-01-02 20:27:23 +0000 | [diff] [blame] | 468 | uint64_t ExitWeight = 0, CurHeaderWeight = 0; |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 469 | if (LatchBR->extractProfMetadata(TrueWeight, FalseWeight)) { |
| 470 | ExitWeight = HeaderIdx ? TrueWeight : FalseWeight; |
Xin Tong | 2940231 | 2017-01-02 20:27:23 +0000 | [diff] [blame] | 471 | // The # of times the loop body executes is the sum of the exit block |
| 472 | // weight and the # of times the backedges are taken. |
| 473 | CurHeaderWeight = TrueWeight + FalseWeight; |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 474 | } |
| 475 | |
| 476 | // For each peeled-off iteration, make a copy of the loop. |
| 477 | for (unsigned Iter = 0; Iter < PeelCount; ++Iter) { |
| 478 | SmallVector<BasicBlock *, 8> NewBlocks; |
| 479 | ValueToValueMapTy VMap; |
| 480 | |
Xin Tong | 2940231 | 2017-01-02 20:27:23 +0000 | [diff] [blame] | 481 | // Subtract the exit weight from the current header weight -- the exit |
| 482 | // weight is exactly the weight of the previous iteration's header. |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 483 | // FIXME: due to the way the distribution is constructed, we need a |
| 484 | // guard here to make sure we don't end up with non-positive weights. |
Xin Tong | 2940231 | 2017-01-02 20:27:23 +0000 | [diff] [blame] | 485 | if (ExitWeight < CurHeaderWeight) |
| 486 | CurHeaderWeight -= ExitWeight; |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 487 | else |
Xin Tong | 2940231 | 2017-01-02 20:27:23 +0000 | [diff] [blame] | 488 | CurHeaderWeight = 1; |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 489 | |
| 490 | cloneLoopBlocks(L, Iter, InsertTop, InsertBot, Exit, |
Serge Pavlov | 098ee2f | 2017-01-24 06:58:39 +0000 | [diff] [blame] | 491 | NewBlocks, LoopBlocks, VMap, LVMap, DT, LI); |
Serge Pavlov | b71bb80 | 2017-03-26 16:46:53 +0000 | [diff] [blame] | 492 | |
| 493 | // Remap to use values from the current iteration instead of the |
| 494 | // previous one. |
| 495 | remapInstructionsInBlocks(NewBlocks, VMap); |
| 496 | |
Serge Pavlov | 098ee2f | 2017-01-24 06:58:39 +0000 | [diff] [blame] | 497 | if (DT) { |
| 498 | // Latches of the cloned loops dominate over the loop exit, so idom of the |
| 499 | // latter is the first cloned loop body, as original PreHeader dominates |
| 500 | // the original loop body. |
| 501 | if (Iter == 0) |
| 502 | DT->changeImmediateDominator(Exit, cast<BasicBlock>(LVMap[Latch])); |
| 503 | #ifndef NDEBUG |
| 504 | if (VerifyDomInfo) |
| 505 | DT->verifyDomTree(); |
| 506 | #endif |
| 507 | } |
| 508 | |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 509 | updateBranchWeights(InsertBot, cast<BranchInst>(VMap[LatchBR]), Iter, |
| 510 | PeelCount, ExitWeight); |
| 511 | |
| 512 | InsertTop = InsertBot; |
| 513 | InsertBot = SplitBlock(InsertBot, InsertBot->getTerminator(), DT, LI); |
| 514 | InsertBot->setName(Header->getName() + ".peel.next"); |
| 515 | |
| 516 | F->getBasicBlockList().splice(InsertTop->getIterator(), |
| 517 | F->getBasicBlockList(), |
| 518 | NewBlocks[0]->getIterator(), F->end()); |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 519 | } |
| 520 | |
| 521 | // Now adjust the phi nodes in the loop header to get their initial values |
| 522 | // from the last peeled-off iteration instead of the preheader. |
| 523 | for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) { |
| 524 | PHINode *PHI = cast<PHINode>(I); |
| 525 | Value *NewVal = PHI->getIncomingValueForBlock(Latch); |
| 526 | Instruction *LatchInst = dyn_cast<Instruction>(NewVal); |
| 527 | if (LatchInst && L->contains(LatchInst)) |
| 528 | NewVal = LVMap[LatchInst]; |
| 529 | |
| 530 | PHI->setIncomingValue(PHI->getBasicBlockIndex(NewPreHeader), NewVal); |
| 531 | } |
| 532 | |
| 533 | // Adjust the branch weights on the loop exit. |
| 534 | if (ExitWeight) { |
Xin Tong | 2940231 | 2017-01-02 20:27:23 +0000 | [diff] [blame] | 535 | // The backedge count is the difference of current header weight and |
| 536 | // current loop exit weight. If the current header weight is smaller than |
| 537 | // the current loop exit weight, we mark the loop backedge weight as 1. |
| 538 | uint64_t BackEdgeWeight = 0; |
| 539 | if (ExitWeight < CurHeaderWeight) |
| 540 | BackEdgeWeight = CurHeaderWeight - ExitWeight; |
| 541 | else |
| 542 | BackEdgeWeight = 1; |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 543 | MDBuilder MDB(LatchBR->getContext()); |
| 544 | MDNode *WeightNode = |
| 545 | HeaderIdx ? MDB.createBranchWeights(ExitWeight, BackEdgeWeight) |
| 546 | : MDB.createBranchWeights(BackEdgeWeight, ExitWeight); |
| 547 | LatchBR->setMetadata(LLVMContext::MD_prof, WeightNode); |
| 548 | } |
| 549 | |
| 550 | // If the loop is nested, we changed the parent loop, update SE. |
Eli Friedman | 0a21745 | 2017-01-18 23:26:37 +0000 | [diff] [blame] | 551 | if (Loop *ParentLoop = L->getParentLoop()) { |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 552 | SE->forgetLoop(ParentLoop); |
| 553 | |
Eli Friedman | 0a21745 | 2017-01-18 23:26:37 +0000 | [diff] [blame] | 554 | // FIXME: Incrementally update loop-simplify |
| 555 | simplifyLoop(ParentLoop, DT, LI, SE, AC, PreserveLCSSA); |
| 556 | } else { |
| 557 | // FIXME: Incrementally update loop-simplify |
| 558 | simplifyLoop(L, DT, LI, SE, AC, PreserveLCSSA); |
| 559 | } |
| 560 | |
Michael Kuperstein | b151a64 | 2016-11-30 21:13:57 +0000 | [diff] [blame] | 561 | NumPeeled++; |
| 562 | |
| 563 | return true; |
| 564 | } |