blob: ad973cf31de47790ce9752a2d19d0eeba3d5758e [file] [log] [blame]
Adam Nemet938d3d62015-05-14 12:05:18 +00001//===- LoopDistribute.cpp - Loop Distribution Pass ------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the Loop Distribution Pass. Its main focus is to
11// distribute loops that cannot be vectorized due to dependence cycles. It
12// tries to isolate the offending dependences into a new loop allowing
13// vectorization of the remaining parts.
14//
15// For dependence analysis, the pass uses the LoopVectorizer's
16// LoopAccessAnalysis. Because this analysis presumes no change in the order of
17// memory operations, special care is taken to preserve the lexical order of
18// these operations.
19//
20// Similarly to the Vectorizer, the pass also supports loop versioning to
21// run-time disambiguate potentially overlapping arrays.
22//
23//===----------------------------------------------------------------------===//
24
Adam Nemetb2593f72016-07-18 16:29:27 +000025#include "llvm/Transforms/Scalar/LoopDistribute.h"
Adam Nemet938d3d62015-05-14 12:05:18 +000026#include "llvm/ADT/DepthFirstIterator.h"
27#include "llvm/ADT/EquivalenceClasses.h"
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/Statistic.h"
Adam Nemetaad81602016-07-15 17:23:20 +000030#include "llvm/Analysis/BlockFrequencyInfo.h"
Adam Nemet938d3d62015-05-14 12:05:18 +000031#include "llvm/Analysis/LoopAccessAnalysis.h"
32#include "llvm/Analysis/LoopInfo.h"
Adam Nemetb2593f72016-07-18 16:29:27 +000033#include "llvm/Analysis/LoopPassManager.h"
Adam Nemetaad81602016-07-15 17:23:20 +000034#include "llvm/Analysis/OptimizationDiagnosticInfo.h"
Adam Nemet0ba164b2016-04-28 23:08:32 +000035#include "llvm/IR/DiagnosticInfo.h"
Adam Nemet938d3d62015-05-14 12:05:18 +000036#include "llvm/IR/Dominators.h"
37#include "llvm/Pass.h"
38#include "llvm/Support/CommandLine.h"
39#include "llvm/Support/Debug.h"
40#include "llvm/Transforms/Utils/BasicBlockUtils.h"
41#include "llvm/Transforms/Utils/Cloning.h"
Ashutosh Nemac5b7b552015-08-19 05:40:42 +000042#include "llvm/Transforms/Utils/LoopUtils.h"
Adam Nemet215746b2015-07-10 18:55:13 +000043#include "llvm/Transforms/Utils/LoopVersioning.h"
Adam Nemet938d3d62015-05-14 12:05:18 +000044#include <list>
45
46#define LDIST_NAME "loop-distribute"
47#define DEBUG_TYPE LDIST_NAME
48
49using namespace llvm;
50
51static cl::opt<bool>
52 LDistVerify("loop-distribute-verify", cl::Hidden,
53 cl::desc("Turn on DominatorTree and LoopInfo verification "
54 "after Loop Distribution"),
55 cl::init(false));
56
57static cl::opt<bool> DistributeNonIfConvertible(
58 "loop-distribute-non-if-convertible", cl::Hidden,
59 cl::desc("Whether to distribute into a loop that may not be "
60 "if-convertible by the loop vectorizer"),
61 cl::init(false));
62
Silviu Baranga2910a4f2015-11-09 13:26:09 +000063static cl::opt<unsigned> DistributeSCEVCheckThreshold(
64 "loop-distribute-scev-check-threshold", cl::init(8), cl::Hidden,
65 cl::desc("The maximum number of SCEV checks allowed for Loop "
66 "Distribution"));
67
Adam Nemetd2fa4142016-04-27 05:28:18 +000068static cl::opt<unsigned> PragmaDistributeSCEVCheckThreshold(
69 "loop-distribute-scev-check-threshold-with-pragma", cl::init(128),
70 cl::Hidden,
71 cl::desc(
72 "The maximum number of SCEV checks allowed for Loop "
73 "Distribution for loop marked with #pragma loop distribute(enable)"));
74
75// Note that the initial value for this depends on whether the pass is invoked
76// directly or from the optimization pipeline.
77static cl::opt<bool> EnableLoopDistribute(
78 "enable-loop-distribute", cl::Hidden,
79 cl::desc("Enable the new, experimental LoopDistribution Pass"));
80
Adam Nemet938d3d62015-05-14 12:05:18 +000081STATISTIC(NumLoopsDistributed, "Number of loops distributed");
82
Adam Nemet2f85b732015-05-14 12:33:32 +000083namespace {
Adam Nemet938d3d62015-05-14 12:05:18 +000084/// \brief Maintains the set of instructions of the loop for a partition before
85/// cloning. After cloning, it hosts the new loop.
86class InstPartition {
87 typedef SmallPtrSet<Instruction *, 8> InstructionSet;
88
89public:
90 InstPartition(Instruction *I, Loop *L, bool DepCycle = false)
91 : DepCycle(DepCycle), OrigLoop(L), ClonedLoop(nullptr) {
92 Set.insert(I);
93 }
94
95 /// \brief Returns whether this partition contains a dependence cycle.
96 bool hasDepCycle() const { return DepCycle; }
97
98 /// \brief Adds an instruction to this partition.
99 void add(Instruction *I) { Set.insert(I); }
100
101 /// \brief Collection accessors.
102 InstructionSet::iterator begin() { return Set.begin(); }
103 InstructionSet::iterator end() { return Set.end(); }
104 InstructionSet::const_iterator begin() const { return Set.begin(); }
105 InstructionSet::const_iterator end() const { return Set.end(); }
106 bool empty() const { return Set.empty(); }
107
108 /// \brief Moves this partition into \p Other. This partition becomes empty
109 /// after this.
110 void moveTo(InstPartition &Other) {
111 Other.Set.insert(Set.begin(), Set.end());
112 Set.clear();
113 Other.DepCycle |= DepCycle;
114 }
115
116 /// \brief Populates the partition with a transitive closure of all the
117 /// instructions that the seeded instructions dependent on.
118 void populateUsedSet() {
119 // FIXME: We currently don't use control-dependence but simply include all
120 // blocks (possibly empty at the end) and let simplifycfg mostly clean this
121 // up.
122 for (auto *B : OrigLoop->getBlocks())
123 Set.insert(B->getTerminator());
124
125 // Follow the use-def chains to form a transitive closure of all the
126 // instructions that the originally seeded instructions depend on.
127 SmallVector<Instruction *, 8> Worklist(Set.begin(), Set.end());
128 while (!Worklist.empty()) {
129 Instruction *I = Worklist.pop_back_val();
130 // Insert instructions from the loop that we depend on.
131 for (Value *V : I->operand_values()) {
132 auto *I = dyn_cast<Instruction>(V);
133 if (I && OrigLoop->contains(I->getParent()) && Set.insert(I).second)
134 Worklist.push_back(I);
135 }
136 }
137 }
138
139 /// \brief Clones the original loop.
140 ///
141 /// Updates LoopInfo and DominatorTree using the information that block \p
142 /// LoopDomBB dominates the loop.
143 Loop *cloneLoopWithPreheader(BasicBlock *InsertBefore, BasicBlock *LoopDomBB,
144 unsigned Index, LoopInfo *LI,
145 DominatorTree *DT) {
146 ClonedLoop = ::cloneLoopWithPreheader(InsertBefore, LoopDomBB, OrigLoop,
147 VMap, Twine(".ldist") + Twine(Index),
148 LI, DT, ClonedLoopBlocks);
149 return ClonedLoop;
150 }
151
152 /// \brief The cloned loop. If this partition is mapped to the original loop,
153 /// this is null.
154 const Loop *getClonedLoop() const { return ClonedLoop; }
155
156 /// \brief Returns the loop where this partition ends up after distribution.
157 /// If this partition is mapped to the original loop then use the block from
158 /// the loop.
159 const Loop *getDistributedLoop() const {
160 return ClonedLoop ? ClonedLoop : OrigLoop;
161 }
162
163 /// \brief The VMap that is populated by cloning and then used in
164 /// remapinstruction to remap the cloned instructions.
165 ValueToValueMapTy &getVMap() { return VMap; }
166
167 /// \brief Remaps the cloned instructions using VMap.
Adam Nemet1a689182015-07-10 18:55:09 +0000168 void remapInstructions() {
169 remapInstructionsInBlocks(ClonedLoopBlocks, VMap);
170 }
Adam Nemet938d3d62015-05-14 12:05:18 +0000171
172 /// \brief Based on the set of instructions selected for this partition,
173 /// removes the unnecessary ones.
174 void removeUnusedInsts() {
175 SmallVector<Instruction *, 8> Unused;
176
177 for (auto *Block : OrigLoop->getBlocks())
178 for (auto &Inst : *Block)
179 if (!Set.count(&Inst)) {
180 Instruction *NewInst = &Inst;
181 if (!VMap.empty())
182 NewInst = cast<Instruction>(VMap[NewInst]);
183
184 assert(!isa<BranchInst>(NewInst) &&
185 "Branches are marked used early on");
186 Unused.push_back(NewInst);
187 }
188
189 // Delete the instructions backwards, as it has a reduced likelihood of
190 // having to update as many def-use and use-def chains.
David Majnemerd7708772016-06-24 04:05:21 +0000191 for (auto *Inst : reverse(Unused)) {
Adam Nemet938d3d62015-05-14 12:05:18 +0000192 if (!Inst->use_empty())
193 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
194 Inst->eraseFromParent();
195 }
196 }
197
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000198 void print() const {
Adam Nemet938d3d62015-05-14 12:05:18 +0000199 if (DepCycle)
200 dbgs() << " (cycle)\n";
201 for (auto *I : Set)
202 // Prefix with the block name.
203 dbgs() << " " << I->getParent()->getName() << ":" << *I << "\n";
204 }
205
206 void printBlocks() const {
207 for (auto *BB : getDistributedLoop()->getBlocks())
208 dbgs() << *BB;
209 }
210
211private:
212 /// \brief Instructions from OrigLoop selected for this partition.
213 InstructionSet Set;
214
215 /// \brief Whether this partition contains a dependence cycle.
216 bool DepCycle;
217
218 /// \brief The original loop.
219 Loop *OrigLoop;
220
221 /// \brief The cloned loop. If this partition is mapped to the original loop,
222 /// this is null.
223 Loop *ClonedLoop;
224
225 /// \brief The blocks of ClonedLoop including the preheader. If this
226 /// partition is mapped to the original loop, this is empty.
227 SmallVector<BasicBlock *, 8> ClonedLoopBlocks;
228
229 /// \brief These gets populated once the set of instructions have been
230 /// finalized. If this partition is mapped to the original loop, these are not
231 /// set.
232 ValueToValueMapTy VMap;
233};
234
235/// \brief Holds the set of Partitions. It populates them, merges them and then
236/// clones the loops.
237class InstPartitionContainer {
238 typedef DenseMap<Instruction *, int> InstToPartitionIdT;
239
240public:
241 InstPartitionContainer(Loop *L, LoopInfo *LI, DominatorTree *DT)
242 : L(L), LI(LI), DT(DT) {}
243
244 /// \brief Returns the number of partitions.
245 unsigned getSize() const { return PartitionContainer.size(); }
246
247 /// \brief Adds \p Inst into the current partition if that is marked to
248 /// contain cycles. Otherwise start a new partition for it.
249 void addToCyclicPartition(Instruction *Inst) {
250 // If the current partition is non-cyclic. Start a new one.
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000251 if (PartitionContainer.empty() || !PartitionContainer.back().hasDepCycle())
252 PartitionContainer.emplace_back(Inst, L, /*DepCycle=*/true);
Adam Nemet938d3d62015-05-14 12:05:18 +0000253 else
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000254 PartitionContainer.back().add(Inst);
Adam Nemet938d3d62015-05-14 12:05:18 +0000255 }
256
257 /// \brief Adds \p Inst into a partition that is not marked to contain
258 /// dependence cycles.
259 ///
260 // Initially we isolate memory instructions into as many partitions as
261 // possible, then later we may merge them back together.
262 void addToNewNonCyclicPartition(Instruction *Inst) {
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000263 PartitionContainer.emplace_back(Inst, L);
Adam Nemet938d3d62015-05-14 12:05:18 +0000264 }
265
266 /// \brief Merges adjacent non-cyclic partitions.
267 ///
268 /// The idea is that we currently only want to isolate the non-vectorizable
269 /// partition. We could later allow more distribution among these partition
270 /// too.
271 void mergeAdjacentNonCyclic() {
272 mergeAdjacentPartitionsIf(
273 [](const InstPartition *P) { return !P->hasDepCycle(); });
274 }
275
276 /// \brief If a partition contains only conditional stores, we won't vectorize
277 /// it. Try to merge it with a previous cyclic partition.
278 void mergeNonIfConvertible() {
279 mergeAdjacentPartitionsIf([&](const InstPartition *Partition) {
280 if (Partition->hasDepCycle())
281 return true;
282
283 // Now, check if all stores are conditional in this partition.
284 bool seenStore = false;
285
286 for (auto *Inst : *Partition)
287 if (isa<StoreInst>(Inst)) {
288 seenStore = true;
289 if (!LoopAccessInfo::blockNeedsPredication(Inst->getParent(), L, DT))
290 return false;
291 }
292 return seenStore;
293 });
294 }
295
296 /// \brief Merges the partitions according to various heuristics.
297 void mergeBeforePopulating() {
298 mergeAdjacentNonCyclic();
299 if (!DistributeNonIfConvertible)
300 mergeNonIfConvertible();
301 }
302
303 /// \brief Merges partitions in order to ensure that no loads are duplicated.
304 ///
305 /// We can't duplicate loads because that could potentially reorder them.
306 /// LoopAccessAnalysis provides dependency information with the context that
307 /// the order of memory operation is preserved.
308 ///
309 /// Return if any partitions were merged.
310 bool mergeToAvoidDuplicatedLoads() {
311 typedef DenseMap<Instruction *, InstPartition *> LoadToPartitionT;
312 typedef EquivalenceClasses<InstPartition *> ToBeMergedT;
313
314 LoadToPartitionT LoadToPartition;
315 ToBeMergedT ToBeMerged;
316
317 // Step through the partitions and create equivalence between partitions
318 // that contain the same load. Also put partitions in between them in the
319 // same equivalence class to avoid reordering of memory operations.
320 for (PartitionContainerT::iterator I = PartitionContainer.begin(),
321 E = PartitionContainer.end();
322 I != E; ++I) {
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000323 auto *PartI = &*I;
Adam Nemet938d3d62015-05-14 12:05:18 +0000324
325 // If a load occurs in two partitions PartI and PartJ, merge all
326 // partitions (PartI, PartJ] into PartI.
327 for (Instruction *Inst : *PartI)
328 if (isa<LoadInst>(Inst)) {
329 bool NewElt;
330 LoadToPartitionT::iterator LoadToPart;
331
332 std::tie(LoadToPart, NewElt) =
333 LoadToPartition.insert(std::make_pair(Inst, PartI));
334 if (!NewElt) {
335 DEBUG(dbgs() << "Merging partitions due to this load in multiple "
336 << "partitions: " << PartI << ", "
337 << LoadToPart->second << "\n" << *Inst << "\n");
338
339 auto PartJ = I;
340 do {
341 --PartJ;
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000342 ToBeMerged.unionSets(PartI, &*PartJ);
343 } while (&*PartJ != LoadToPart->second);
Adam Nemet938d3d62015-05-14 12:05:18 +0000344 }
345 }
346 }
347 if (ToBeMerged.empty())
348 return false;
349
350 // Merge the member of an equivalence class into its class leader. This
351 // makes the members empty.
352 for (ToBeMergedT::iterator I = ToBeMerged.begin(), E = ToBeMerged.end();
353 I != E; ++I) {
354 if (!I->isLeader())
355 continue;
356
357 auto PartI = I->getData();
358 for (auto PartJ : make_range(std::next(ToBeMerged.member_begin(I)),
359 ToBeMerged.member_end())) {
360 PartJ->moveTo(*PartI);
361 }
362 }
363
364 // Remove the empty partitions.
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000365 PartitionContainer.remove_if(
366 [](const InstPartition &P) { return P.empty(); });
Adam Nemet938d3d62015-05-14 12:05:18 +0000367
368 return true;
369 }
370
371 /// \brief Sets up the mapping between instructions to partitions. If the
372 /// instruction is duplicated across multiple partitions, set the entry to -1.
373 void setupPartitionIdOnInstructions() {
374 int PartitionID = 0;
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000375 for (const auto &Partition : PartitionContainer) {
376 for (Instruction *Inst : Partition) {
Adam Nemet938d3d62015-05-14 12:05:18 +0000377 bool NewElt;
378 InstToPartitionIdT::iterator Iter;
379
380 std::tie(Iter, NewElt) =
381 InstToPartitionId.insert(std::make_pair(Inst, PartitionID));
382 if (!NewElt)
383 Iter->second = -1;
384 }
385 ++PartitionID;
386 }
387 }
388
389 /// \brief Populates the partition with everything that the seeding
390 /// instructions require.
391 void populateUsedSet() {
392 for (auto &P : PartitionContainer)
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000393 P.populateUsedSet();
Adam Nemet938d3d62015-05-14 12:05:18 +0000394 }
395
396 /// \brief This performs the main chunk of the work of cloning the loops for
397 /// the partitions.
Justin Bogner843fb202015-12-15 19:40:57 +0000398 void cloneLoops() {
Adam Nemet938d3d62015-05-14 12:05:18 +0000399 BasicBlock *OrigPH = L->getLoopPreheader();
400 // At this point the predecessor of the preheader is either the memcheck
401 // block or the top part of the original preheader.
402 BasicBlock *Pred = OrigPH->getSinglePredecessor();
403 assert(Pred && "Preheader does not have a single predecessor");
404 BasicBlock *ExitBlock = L->getExitBlock();
405 assert(ExitBlock && "No single exit block");
406 Loop *NewLoop;
407
408 assert(!PartitionContainer.empty() && "at least two partitions expected");
409 // We're cloning the preheader along with the loop so we already made sure
410 // it was empty.
411 assert(&*OrigPH->begin() == OrigPH->getTerminator() &&
412 "preheader not empty");
413
414 // Create a loop for each partition except the last. Clone the original
415 // loop before PH along with adding a preheader for the cloned loop. Then
416 // update PH to point to the newly added preheader.
417 BasicBlock *TopPH = OrigPH;
418 unsigned Index = getSize() - 1;
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000419 for (auto I = std::next(PartitionContainer.rbegin()),
420 E = PartitionContainer.rend();
Adam Nemet938d3d62015-05-14 12:05:18 +0000421 I != E; ++I, --Index, TopPH = NewLoop->getLoopPreheader()) {
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000422 auto *Part = &*I;
Adam Nemet938d3d62015-05-14 12:05:18 +0000423
424 NewLoop = Part->cloneLoopWithPreheader(TopPH, Pred, Index, LI, DT);
425
426 Part->getVMap()[ExitBlock] = TopPH;
427 Part->remapInstructions();
428 }
429 Pred->getTerminator()->replaceUsesOfWith(OrigPH, TopPH);
430
431 // Now go in forward order and update the immediate dominator for the
432 // preheaders with the exiting block of the previous loop. Dominance
433 // within the loop is updated in cloneLoopWithPreheader.
434 for (auto Curr = PartitionContainer.cbegin(),
435 Next = std::next(PartitionContainer.cbegin()),
436 E = PartitionContainer.cend();
437 Next != E; ++Curr, ++Next)
438 DT->changeImmediateDominator(
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000439 Next->getDistributedLoop()->getLoopPreheader(),
440 Curr->getDistributedLoop()->getExitingBlock());
Adam Nemet938d3d62015-05-14 12:05:18 +0000441 }
442
443 /// \brief Removes the dead instructions from the cloned loops.
444 void removeUnusedInsts() {
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000445 for (auto &Partition : PartitionContainer)
446 Partition.removeUnusedInsts();
Adam Nemet938d3d62015-05-14 12:05:18 +0000447 }
448
449 /// \brief For each memory pointer, it computes the partitionId the pointer is
450 /// used in.
451 ///
452 /// This returns an array of int where the I-th entry corresponds to I-th
453 /// entry in LAI.getRuntimePointerCheck(). If the pointer is used in multiple
454 /// partitions its entry is set to -1.
455 SmallVector<int, 8>
456 computePartitionSetForPointers(const LoopAccessInfo &LAI) {
Adam Nemet7cdebac2015-07-14 22:32:44 +0000457 const RuntimePointerChecking *RtPtrCheck = LAI.getRuntimePointerChecking();
Adam Nemet938d3d62015-05-14 12:05:18 +0000458
459 unsigned N = RtPtrCheck->Pointers.size();
460 SmallVector<int, 8> PtrToPartitions(N);
461 for (unsigned I = 0; I < N; ++I) {
Adam Nemet9f7dedc2015-07-14 22:32:50 +0000462 Value *Ptr = RtPtrCheck->Pointers[I].PointerValue;
Adam Nemet938d3d62015-05-14 12:05:18 +0000463 auto Instructions =
Adam Nemet9f7dedc2015-07-14 22:32:50 +0000464 LAI.getInstructionsForAccess(Ptr, RtPtrCheck->Pointers[I].IsWritePtr);
Adam Nemet938d3d62015-05-14 12:05:18 +0000465
466 int &Partition = PtrToPartitions[I];
467 // First set it to uninitialized.
468 Partition = -2;
469 for (Instruction *Inst : Instructions) {
470 // Note that this could be -1 if Inst is duplicated across multiple
471 // partitions.
472 int ThisPartition = this->InstToPartitionId[Inst];
473 if (Partition == -2)
474 Partition = ThisPartition;
475 // -1 means belonging to multiple partitions.
476 else if (Partition == -1)
477 break;
478 else if (Partition != (int)ThisPartition)
479 Partition = -1;
480 }
481 assert(Partition != -2 && "Pointer not belonging to any partition");
482 }
483
484 return PtrToPartitions;
485 }
486
487 void print(raw_ostream &OS) const {
488 unsigned Index = 0;
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000489 for (const auto &P : PartitionContainer) {
490 OS << "Partition " << Index++ << " (" << &P << "):\n";
491 P.print();
Adam Nemet938d3d62015-05-14 12:05:18 +0000492 }
493 }
494
495 void dump() const { print(dbgs()); }
496
497#ifndef NDEBUG
498 friend raw_ostream &operator<<(raw_ostream &OS,
499 const InstPartitionContainer &Partitions) {
500 Partitions.print(OS);
501 return OS;
502 }
503#endif
504
505 void printBlocks() const {
506 unsigned Index = 0;
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000507 for (const auto &P : PartitionContainer) {
508 dbgs() << "\nPartition " << Index++ << " (" << &P << "):\n";
509 P.printBlocks();
Adam Nemet938d3d62015-05-14 12:05:18 +0000510 }
511 }
512
513private:
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000514 typedef std::list<InstPartition> PartitionContainerT;
Adam Nemet938d3d62015-05-14 12:05:18 +0000515
516 /// \brief List of partitions.
517 PartitionContainerT PartitionContainer;
518
519 /// \brief Mapping from Instruction to partition Id. If the instruction
520 /// belongs to multiple partitions the entry contains -1.
521 InstToPartitionIdT InstToPartitionId;
522
523 Loop *L;
524 LoopInfo *LI;
525 DominatorTree *DT;
526
527 /// \brief The control structure to merge adjacent partitions if both satisfy
528 /// the \p Predicate.
529 template <class UnaryPredicate>
530 void mergeAdjacentPartitionsIf(UnaryPredicate Predicate) {
531 InstPartition *PrevMatch = nullptr;
532 for (auto I = PartitionContainer.begin(); I != PartitionContainer.end();) {
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000533 auto DoesMatch = Predicate(&*I);
Adam Nemet938d3d62015-05-14 12:05:18 +0000534 if (PrevMatch == nullptr && DoesMatch) {
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000535 PrevMatch = &*I;
Adam Nemet938d3d62015-05-14 12:05:18 +0000536 ++I;
537 } else if (PrevMatch != nullptr && DoesMatch) {
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000538 I->moveTo(*PrevMatch);
Adam Nemet938d3d62015-05-14 12:05:18 +0000539 I = PartitionContainer.erase(I);
540 } else {
541 PrevMatch = nullptr;
542 ++I;
543 }
544 }
545 }
546};
547
548/// \brief For each memory instruction, this class maintains difference of the
549/// number of unsafe dependences that start out from this instruction minus
550/// those that end here.
551///
552/// By traversing the memory instructions in program order and accumulating this
553/// number, we know whether any unsafe dependence crosses over a program point.
554class MemoryInstructionDependences {
555 typedef MemoryDepChecker::Dependence Dependence;
556
557public:
558 struct Entry {
559 Instruction *Inst;
560 unsigned NumUnsafeDependencesStartOrEnd;
561
562 Entry(Instruction *Inst) : Inst(Inst), NumUnsafeDependencesStartOrEnd(0) {}
563 };
564
565 typedef SmallVector<Entry, 8> AccessesType;
566
567 AccessesType::const_iterator begin() const { return Accesses.begin(); }
568 AccessesType::const_iterator end() const { return Accesses.end(); }
569
570 MemoryInstructionDependences(
571 const SmallVectorImpl<Instruction *> &Instructions,
Adam Nemeta2df7502015-11-03 21:39:52 +0000572 const SmallVectorImpl<Dependence> &Dependences) {
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000573 Accesses.append(Instructions.begin(), Instructions.end());
Adam Nemet938d3d62015-05-14 12:05:18 +0000574
575 DEBUG(dbgs() << "Backward dependences:\n");
Adam Nemeta2df7502015-11-03 21:39:52 +0000576 for (auto &Dep : Dependences)
Adam Nemet938d3d62015-05-14 12:05:18 +0000577 if (Dep.isPossiblyBackward()) {
578 // Note that the designations source and destination follow the program
579 // order, i.e. source is always first. (The direction is given by the
580 // DepType.)
581 ++Accesses[Dep.Source].NumUnsafeDependencesStartOrEnd;
582 --Accesses[Dep.Destination].NumUnsafeDependencesStartOrEnd;
583
584 DEBUG(Dep.print(dbgs(), 2, Instructions));
585 }
586 }
587
588private:
589 AccessesType Accesses;
590};
591
Adam Nemet61399ac2016-04-27 00:31:03 +0000592/// \brief The actual class performing the per-loop work.
593class LoopDistributeForLoop {
Adam Nemet938d3d62015-05-14 12:05:18 +0000594public:
Adam Nemeteff76642016-05-13 04:20:31 +0000595 LoopDistributeForLoop(Loop *L, Function *F, LoopInfo *LI, DominatorTree *DT,
Adam Nemetaad81602016-07-15 17:23:20 +0000596 ScalarEvolution *SE, OptimizationRemarkEmitter *ORE)
597 : L(L), F(F), LI(LI), LAI(nullptr), DT(DT), SE(SE), ORE(ORE) {
Adam Nemetd2fa4142016-04-27 05:28:18 +0000598 setForced();
599 }
Adam Nemetc75ad692015-07-30 03:29:16 +0000600
Adam Nemet938d3d62015-05-14 12:05:18 +0000601 /// \brief Try to distribute an inner-most loop.
Adam Nemetb2593f72016-07-18 16:29:27 +0000602 bool processLoop(std::function<const LoopAccessInfo &(Loop &)> &GetLAA) {
Adam Nemet938d3d62015-05-14 12:05:18 +0000603 assert(L->empty() && "Only process inner loops.");
604
605 DEBUG(dbgs() << "\nLDist: In \"" << L->getHeader()->getParent()->getName()
606 << "\" checking " << *L << "\n");
607
608 BasicBlock *PH = L->getLoopPreheader();
Adam Nemet7f38e112016-04-28 23:08:27 +0000609 if (!PH)
Adam Nemetadeccf72016-04-28 23:08:30 +0000610 return fail("no preheader");
Adam Nemet7f38e112016-04-28 23:08:27 +0000611 if (!L->getExitBlock())
Adam Nemetadeccf72016-04-28 23:08:30 +0000612 return fail("multiple exit blocks");
Adam Nemeteff76642016-05-13 04:20:31 +0000613
Adam Nemet938d3d62015-05-14 12:05:18 +0000614 // LAA will check that we only have a single exiting block.
Adam Nemetb2593f72016-07-18 16:29:27 +0000615 LAI = &GetLAA(*L);
Adam Nemet938d3d62015-05-14 12:05:18 +0000616
Adam Nemet938d3d62015-05-14 12:05:18 +0000617 // Currently, we only distribute to isolate the part of the loop with
618 // dependence cycles to enable partial vectorization.
Adam Nemeteff76642016-05-13 04:20:31 +0000619 if (LAI->canVectorizeMemory())
Adam Nemetadeccf72016-04-28 23:08:30 +0000620 return fail("memory operations are safe for vectorization");
Adam Nemet7f38e112016-04-28 23:08:27 +0000621
Adam Nemeteff76642016-05-13 04:20:31 +0000622 auto *Dependences = LAI->getDepChecker().getDependences();
Adam Nemet7f38e112016-04-28 23:08:27 +0000623 if (!Dependences || Dependences->empty())
Adam Nemetadeccf72016-04-28 23:08:30 +0000624 return fail("no unsafe dependences to isolate");
Adam Nemet938d3d62015-05-14 12:05:18 +0000625
626 InstPartitionContainer Partitions(L, LI, DT);
627
628 // First, go through each memory operation and assign them to consecutive
629 // partitions (the order of partitions follows program order). Put those
630 // with unsafe dependences into "cyclic" partition otherwise put each store
631 // in its own "non-cyclic" partition (we'll merge these later).
632 //
633 // Note that a memory operation (e.g. Load2 below) at a program point that
634 // has an unsafe dependence (Store3->Load1) spanning over it must be
635 // included in the same cyclic partition as the dependent operations. This
636 // is to preserve the original program order after distribution. E.g.:
637 //
638 // NumUnsafeDependencesStartOrEnd NumUnsafeDependencesActive
639 // Load1 -. 1 0->1
640 // Load2 | /Unsafe/ 0 1
641 // Store3 -' -1 1->0
642 // Load4 0 0
643 //
644 // NumUnsafeDependencesActive > 0 indicates this situation and in this case
645 // we just keep assigning to the same cyclic partition until
646 // NumUnsafeDependencesActive reaches 0.
Adam Nemeteff76642016-05-13 04:20:31 +0000647 const MemoryDepChecker &DepChecker = LAI->getDepChecker();
Adam Nemet938d3d62015-05-14 12:05:18 +0000648 MemoryInstructionDependences MID(DepChecker.getMemoryInstructions(),
Adam Nemeta2df7502015-11-03 21:39:52 +0000649 *Dependences);
Adam Nemet938d3d62015-05-14 12:05:18 +0000650
651 int NumUnsafeDependencesActive = 0;
652 for (auto &InstDep : MID) {
653 Instruction *I = InstDep.Inst;
654 // We update NumUnsafeDependencesActive post-instruction, catch the
655 // start of a dependence directly via NumUnsafeDependencesStartOrEnd.
656 if (NumUnsafeDependencesActive ||
657 InstDep.NumUnsafeDependencesStartOrEnd > 0)
658 Partitions.addToCyclicPartition(I);
659 else
660 Partitions.addToNewNonCyclicPartition(I);
661 NumUnsafeDependencesActive += InstDep.NumUnsafeDependencesStartOrEnd;
662 assert(NumUnsafeDependencesActive >= 0 &&
663 "Negative number of dependences active");
664 }
665
666 // Add partitions for values used outside. These partitions can be out of
667 // order from the original program order. This is OK because if the
668 // partition uses a load we will merge this partition with the original
669 // partition of the load that we set up in the previous loop (see
670 // mergeToAvoidDuplicatedLoads).
671 auto DefsUsedOutside = findDefsUsedOutsideOfLoop(L);
672 for (auto *Inst : DefsUsedOutside)
673 Partitions.addToNewNonCyclicPartition(Inst);
674
675 DEBUG(dbgs() << "Seeded partitions:\n" << Partitions);
676 if (Partitions.getSize() < 2)
Adam Nemet7f38e112016-04-28 23:08:27 +0000677 return fail("cannot isolate unsafe dependencies");
Adam Nemet938d3d62015-05-14 12:05:18 +0000678
679 // Run the merge heuristics: Merge non-cyclic adjacent partitions since we
680 // should be able to vectorize these together.
681 Partitions.mergeBeforePopulating();
682 DEBUG(dbgs() << "\nMerged partitions:\n" << Partitions);
683 if (Partitions.getSize() < 2)
Adam Nemet7f38e112016-04-28 23:08:27 +0000684 return fail("cannot isolate unsafe dependencies");
Adam Nemet938d3d62015-05-14 12:05:18 +0000685
686 // Now, populate the partitions with non-memory operations.
687 Partitions.populateUsedSet();
688 DEBUG(dbgs() << "\nPopulated partitions:\n" << Partitions);
689
690 // In order to preserve original lexical order for loads, keep them in the
691 // partition that we set up in the MemoryInstructionDependences loop.
692 if (Partitions.mergeToAvoidDuplicatedLoads()) {
693 DEBUG(dbgs() << "\nPartitions merged to ensure unique loads:\n"
694 << Partitions);
695 if (Partitions.getSize() < 2)
Adam Nemet7f38e112016-04-28 23:08:27 +0000696 return fail("cannot isolate unsafe dependencies");
Adam Nemet938d3d62015-05-14 12:05:18 +0000697 }
698
Silviu Baranga2910a4f2015-11-09 13:26:09 +0000699 // Don't distribute the loop if we need too many SCEV run-time checks.
Xinliang David Li94734ee2016-07-01 05:59:55 +0000700 const SCEVUnionPredicate &Pred = LAI->getPSE().getUnionPredicate();
Adam Nemetd2fa4142016-04-27 05:28:18 +0000701 if (Pred.getComplexity() > (IsForced.getValueOr(false)
702 ? PragmaDistributeSCEVCheckThreshold
Adam Nemet7f38e112016-04-28 23:08:27 +0000703 : DistributeSCEVCheckThreshold))
Adam Nemetadeccf72016-04-28 23:08:30 +0000704 return fail("too many SCEV run-time checks needed.\n");
Silviu Baranga2910a4f2015-11-09 13:26:09 +0000705
Adam Nemet938d3d62015-05-14 12:05:18 +0000706 DEBUG(dbgs() << "\nDistributing loop: " << *L << "\n");
707 // We're done forming the partitions set up the reverse mapping from
708 // instructions to partitions.
709 Partitions.setupPartitionIdOnInstructions();
710
711 // To keep things simple have an empty preheader before we version or clone
712 // the loop. (Also split if this has no predecessor, i.e. entry, because we
713 // rely on PH having a predecessor.)
714 if (!PH->getSinglePredecessor() || &*PH->begin() != PH->getTerminator())
715 SplitBlock(PH, PH->getTerminator(), DT, LI);
716
Silviu Baranga2910a4f2015-11-09 13:26:09 +0000717 // If we need run-time checks, version the loop now.
Adam Nemeteff76642016-05-13 04:20:31 +0000718 auto PtrToPartition = Partitions.computePartitionSetForPointers(*LAI);
719 const auto *RtPtrChecking = LAI->getRuntimePointerChecking();
Adam Nemet15840392015-08-07 22:44:15 +0000720 const auto &AllChecks = RtPtrChecking->getChecks();
Adam Nemetc75ad692015-07-30 03:29:16 +0000721 auto Checks = includeOnlyCrossPartitionChecks(AllChecks, PtrToPartition,
722 RtPtrChecking);
Silviu Baranga2910a4f2015-11-09 13:26:09 +0000723
724 if (!Pred.isAlwaysTrue() || !Checks.empty()) {
Adam Nemet772a1502015-06-19 19:32:41 +0000725 DEBUG(dbgs() << "\nPointers:\n");
Adam Nemeteff76642016-05-13 04:20:31 +0000726 DEBUG(LAI->getRuntimePointerChecking()->printChecks(dbgs(), Checks));
727 LoopVersioning LVer(*LAI, L, LI, DT, SE, false);
Silviu Baranga2910a4f2015-11-09 13:26:09 +0000728 LVer.setAliasChecks(std::move(Checks));
Xinliang David Li94734ee2016-07-01 05:59:55 +0000729 LVer.setSCEVChecks(LAI->getPSE().getUnionPredicate());
Adam Nemete4813402015-08-20 17:22:29 +0000730 LVer.versionLoop(DefsUsedOutside);
Adam Nemet5eccf072016-03-17 20:32:32 +0000731 LVer.annotateLoopWithNoAlias();
Adam Nemet938d3d62015-05-14 12:05:18 +0000732 }
733
734 // Create identical copies of the original loop for each partition and hook
735 // them up sequentially.
Justin Bogner843fb202015-12-15 19:40:57 +0000736 Partitions.cloneLoops();
Adam Nemet938d3d62015-05-14 12:05:18 +0000737
738 // Now, we remove the instruction from each loop that don't belong to that
739 // partition.
740 Partitions.removeUnusedInsts();
741 DEBUG(dbgs() << "\nAfter removing unused Instrs:\n");
742 DEBUG(Partitions.printBlocks());
743
744 if (LDistVerify) {
Michael Zolotukhine0b2d972016-08-31 19:26:19 +0000745 LI->verify(*DT);
Adam Nemet938d3d62015-05-14 12:05:18 +0000746 DT->verifyDomTree();
747 }
748
749 ++NumLoopsDistributed;
Adam Nemet88ec4912016-04-29 07:10:46 +0000750 // Report the success.
Adam Nemet84a64252016-07-21 21:21:34 +0000751 ORE->emitOptimizationRemark(LDIST_NAME, L, "distributed loop");
Adam Nemet938d3d62015-05-14 12:05:18 +0000752 return true;
753 }
754
Adam Nemet7f38e112016-04-28 23:08:27 +0000755 /// \brief Provide diagnostics then \return with false.
756 bool fail(llvm::StringRef Message) {
Adam Nemet0ba164b2016-04-28 23:08:32 +0000757 LLVMContext &Ctx = F->getContext();
758 bool Forced = isForced().getValueOr(false);
759
Adam Nemetadeccf72016-04-28 23:08:30 +0000760 DEBUG(dbgs() << "Skipping; " << Message << "\n");
Adam Nemet0ba164b2016-04-28 23:08:32 +0000761
762 // With Rpass-missed report that distribution failed.
Adam Nemetaad81602016-07-15 17:23:20 +0000763 ORE->emitOptimizationRemarkMissed(
764 LDIST_NAME, L,
Adam Nemet0ba164b2016-04-28 23:08:32 +0000765 "loop not distributed: use -Rpass-analysis=loop-distribute for more "
766 "info");
767
768 // With Rpass-analysis report why. This is on by default if distribution
769 // was requested explicitly.
Adam Nemet84a64252016-07-21 21:21:34 +0000770 ORE->emitOptimizationRemarkAnalysis(
771 Forced ? DiagnosticInfoOptimizationRemarkAnalysis::AlwaysPrint
772 : LDIST_NAME,
773 L, Twine("loop not distributed: ") + Message);
Adam Nemet0ba164b2016-04-28 23:08:32 +0000774
775 // Also issue a warning if distribution was requested explicitly but it
776 // failed.
777 if (Forced)
778 Ctx.diagnose(DiagnosticInfoOptimizationFailure(
Adam Nemet74730d92016-07-14 22:33:46 +0000779 *F, L->getStartLoc(), "loop not distributed: failed "
Adam Nemet0ba164b2016-04-28 23:08:32 +0000780 "explicitly specified loop distribution"));
781
Adam Nemet7f38e112016-04-28 23:08:27 +0000782 return false;
783 }
784
Adam Nemetd2fa4142016-04-27 05:28:18 +0000785 /// \brief Return if distribution forced to be enabled/disabled for the loop.
786 ///
787 /// If the optional has a value, it indicates whether distribution was forced
788 /// to be enabled (true) or disabled (false). If the optional has no value
789 /// distribution was not forced either way.
790 const Optional<bool> &isForced() const { return IsForced; }
791
Adam Nemet61399ac2016-04-27 00:31:03 +0000792private:
793 /// \brief Filter out checks between pointers from the same partition.
794 ///
795 /// \p PtrToPartition contains the partition number for pointers. Partition
796 /// number -1 means that the pointer is used in multiple partitions. In this
797 /// case we can't safely omit the check.
798 SmallVector<RuntimePointerChecking::PointerCheck, 4>
799 includeOnlyCrossPartitionChecks(
800 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &AllChecks,
801 const SmallVectorImpl<int> &PtrToPartition,
802 const RuntimePointerChecking *RtPtrChecking) {
803 SmallVector<RuntimePointerChecking::PointerCheck, 4> Checks;
804
805 std::copy_if(AllChecks.begin(), AllChecks.end(), std::back_inserter(Checks),
806 [&](const RuntimePointerChecking::PointerCheck &Check) {
807 for (unsigned PtrIdx1 : Check.first->Members)
808 for (unsigned PtrIdx2 : Check.second->Members)
809 // Only include this check if there is a pair of pointers
810 // that require checking and the pointers fall into
811 // separate partitions.
812 //
813 // (Note that we already know at this point that the two
814 // pointer groups need checking but it doesn't follow
815 // that each pair of pointers within the two groups need
816 // checking as well.
817 //
818 // In other words we don't want to include a check just
819 // because there is a pair of pointers between the two
820 // pointer groups that require checks and a different
821 // pair whose pointers fall into different partitions.)
822 if (RtPtrChecking->needsChecking(PtrIdx1, PtrIdx2) &&
823 !RuntimePointerChecking::arePointersInSamePartition(
824 PtrToPartition, PtrIdx1, PtrIdx2))
825 return true;
826 return false;
827 });
828
829 return Checks;
830 }
831
Adam Nemetd2fa4142016-04-27 05:28:18 +0000832 /// \brief Check whether the loop metadata is forcing distribution to be
833 /// enabled/disabled.
834 void setForced() {
835 Optional<const MDOperand *> Value =
836 findStringMetadataForLoop(L, "llvm.loop.distribute.enable");
837 if (!Value)
838 return;
839
840 const MDOperand *Op = *Value;
841 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
842 IsForced = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
843 }
844
Adam Nemet61399ac2016-04-27 00:31:03 +0000845 Loop *L;
Adam Nemet4338d672016-04-29 07:10:39 +0000846 Function *F;
847
848 // Analyses used.
Adam Nemet938d3d62015-05-14 12:05:18 +0000849 LoopInfo *LI;
Adam Nemeteff76642016-05-13 04:20:31 +0000850 const LoopAccessInfo *LAI;
Adam Nemet938d3d62015-05-14 12:05:18 +0000851 DominatorTree *DT;
Silviu Baranga2910a4f2015-11-09 13:26:09 +0000852 ScalarEvolution *SE;
Adam Nemetaad81602016-07-15 17:23:20 +0000853 OptimizationRemarkEmitter *ORE;
Adam Nemetd2fa4142016-04-27 05:28:18 +0000854
855 /// \brief Indicates whether distribution is forced to be enabled/disabled for
856 /// the loop.
857 ///
858 /// If the optional has a value, it indicates whether distribution was forced
859 /// to be enabled (true) or disabled (false). If the optional has no value
860 /// distribution was not forced either way.
861 Optional<bool> IsForced;
Adam Nemet938d3d62015-05-14 12:05:18 +0000862};
Adam Nemet61399ac2016-04-27 00:31:03 +0000863
Adam Nemetb2593f72016-07-18 16:29:27 +0000864/// Shared implementation between new and old PMs.
865static bool runImpl(Function &F, LoopInfo *LI, DominatorTree *DT,
866 ScalarEvolution *SE, OptimizationRemarkEmitter *ORE,
867 std::function<const LoopAccessInfo &(Loop &)> &GetLAA,
868 bool ProcessAllLoops) {
869 // Build up a worklist of inner-loops to vectorize. This is necessary as the
870 // act of distributing a loop creates new loops and can invalidate iterators
871 // across the loops.
872 SmallVector<Loop *, 8> Worklist;
873
874 for (Loop *TopLevelLoop : *LI)
875 for (Loop *L : depth_first(TopLevelLoop))
876 // We only handle inner-most loops.
877 if (L->empty())
878 Worklist.push_back(L);
879
880 // Now walk the identified inner loops.
881 bool Changed = false;
882 for (Loop *L : Worklist) {
883 LoopDistributeForLoop LDL(L, &F, LI, DT, SE, ORE);
884
885 // If distribution was forced for the specific loop to be
886 // enabled/disabled, follow that. Otherwise use the global flag.
887 if (LDL.isForced().getValueOr(ProcessAllLoops))
888 Changed |= LDL.processLoop(GetLAA);
889 }
890
891 // Process each loop nest in the function.
892 return Changed;
893}
894
Adam Nemet61399ac2016-04-27 00:31:03 +0000895/// \brief The pass class.
Adam Nemetb2593f72016-07-18 16:29:27 +0000896class LoopDistributeLegacy : public FunctionPass {
Adam Nemet61399ac2016-04-27 00:31:03 +0000897public:
Adam Nemetd2fa4142016-04-27 05:28:18 +0000898 /// \p ProcessAllLoopsByDefault specifies whether loop distribution should be
899 /// performed by default. Pass -enable-loop-distribute={0,1} overrides this
900 /// default. We use this to keep LoopDistribution off by default when invoked
901 /// from the optimization pipeline but on when invoked explicitly from opt.
Adam Nemetb2593f72016-07-18 16:29:27 +0000902 LoopDistributeLegacy(bool ProcessAllLoopsByDefault = true)
Adam Nemetd2fa4142016-04-27 05:28:18 +0000903 : FunctionPass(ID), ProcessAllLoops(ProcessAllLoopsByDefault) {
904 // The default is set by the caller.
905 if (EnableLoopDistribute.getNumOccurrences() > 0)
906 ProcessAllLoops = EnableLoopDistribute;
Adam Nemetb2593f72016-07-18 16:29:27 +0000907 initializeLoopDistributeLegacyPass(*PassRegistry::getPassRegistry());
Adam Nemet61399ac2016-04-27 00:31:03 +0000908 }
909
910 bool runOnFunction(Function &F) override {
Andrew Kaylor50271f72016-05-03 22:32:30 +0000911 if (skipFunction(F))
912 return false;
913
Adam Nemet61399ac2016-04-27 00:31:03 +0000914 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
Xinliang David Li7853c1d2016-07-08 20:55:26 +0000915 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
Adam Nemet61399ac2016-04-27 00:31:03 +0000916 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
917 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
Adam Nemet79ac42a2016-07-18 16:29:21 +0000918 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
Adam Nemetb2593f72016-07-18 16:29:27 +0000919 std::function<const LoopAccessInfo &(Loop &)> GetLAA =
920 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
Adam Nemet61399ac2016-04-27 00:31:03 +0000921
Adam Nemetb2593f72016-07-18 16:29:27 +0000922 return runImpl(F, LI, DT, SE, ORE, GetLAA, ProcessAllLoops);
Adam Nemet61399ac2016-04-27 00:31:03 +0000923 }
924
925 void getAnalysisUsage(AnalysisUsage &AU) const override {
926 AU.addRequired<ScalarEvolutionWrapperPass>();
927 AU.addRequired<LoopInfoWrapperPass>();
928 AU.addPreserved<LoopInfoWrapperPass>();
Xinliang David Li7853c1d2016-07-08 20:55:26 +0000929 AU.addRequired<LoopAccessLegacyAnalysis>();
Adam Nemet61399ac2016-04-27 00:31:03 +0000930 AU.addRequired<DominatorTreeWrapperPass>();
931 AU.addPreserved<DominatorTreeWrapperPass>();
Adam Nemet79ac42a2016-07-18 16:29:21 +0000932 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
Adam Nemet61399ac2016-04-27 00:31:03 +0000933 }
934
935 static char ID;
Adam Nemetd2fa4142016-04-27 05:28:18 +0000936
937private:
938 /// \brief Whether distribution should be on in this function. The per-loop
939 /// pragma can override this.
940 bool ProcessAllLoops;
Adam Nemet61399ac2016-04-27 00:31:03 +0000941};
Adam Nemet938d3d62015-05-14 12:05:18 +0000942} // anonymous namespace
943
Adam Nemetb2593f72016-07-18 16:29:27 +0000944PreservedAnalyses LoopDistributePass::run(Function &F,
945 FunctionAnalysisManager &AM) {
946 // FIXME: This does not currently match the behavior from the old PM.
947 // ProcessAllLoops with the old PM defaults to true when invoked from opt and
948 // false when invoked from the optimization pipeline.
949 bool ProcessAllLoops = false;
950 if (EnableLoopDistribute.getNumOccurrences() > 0)
951 ProcessAllLoops = EnableLoopDistribute;
952
953 auto &LI = AM.getResult<LoopAnalysis>(F);
954 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
955 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
956 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
957
958 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
959 std::function<const LoopAccessInfo &(Loop &)> GetLAA =
960 [&](Loop &L) -> const LoopAccessInfo & {
961 return LAM.getResult<LoopAccessAnalysis>(L);
962 };
963
964 bool Changed = runImpl(F, &LI, &DT, &SE, &ORE, GetLAA, ProcessAllLoops);
965 if (!Changed)
966 return PreservedAnalyses::all();
967 PreservedAnalyses PA;
968 PA.preserve<LoopAnalysis>();
969 PA.preserve<DominatorTreeAnalysis>();
970 return PA;
971}
972
973char LoopDistributeLegacy::ID;
Adam Nemet938d3d62015-05-14 12:05:18 +0000974static const char ldist_name[] = "Loop Distribition";
975
Adam Nemetb2593f72016-07-18 16:29:27 +0000976INITIALIZE_PASS_BEGIN(LoopDistributeLegacy, LDIST_NAME, ldist_name, false,
977 false)
Adam Nemet938d3d62015-05-14 12:05:18 +0000978INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
Xinliang David Li7853c1d2016-07-08 20:55:26 +0000979INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
Adam Nemet938d3d62015-05-14 12:05:18 +0000980INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
Silviu Baranga2910a4f2015-11-09 13:26:09 +0000981INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
Adam Nemet79ac42a2016-07-18 16:29:21 +0000982INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
Adam Nemetb2593f72016-07-18 16:29:27 +0000983INITIALIZE_PASS_END(LoopDistributeLegacy, LDIST_NAME, ldist_name, false, false)
Adam Nemet938d3d62015-05-14 12:05:18 +0000984
985namespace llvm {
Adam Nemetd2fa4142016-04-27 05:28:18 +0000986FunctionPass *createLoopDistributePass(bool ProcessAllLoopsByDefault) {
Adam Nemetb2593f72016-07-18 16:29:27 +0000987 return new LoopDistributeLegacy(ProcessAllLoopsByDefault);
Adam Nemetd2fa4142016-04-27 05:28:18 +0000988}
Adam Nemet938d3d62015-05-14 12:05:18 +0000989}