blob: 1db8caa0a3462dfa652188f6cef56cccb0089475 [file] [log] [blame]
Adam Nemet938d3d62015-05-14 12:05:18 +00001//===- LoopDistribute.cpp - Loop Distribution Pass ------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the Loop Distribution Pass. Its main focus is to
11// distribute loops that cannot be vectorized due to dependence cycles. It
12// tries to isolate the offending dependences into a new loop allowing
13// vectorization of the remaining parts.
14//
15// For dependence analysis, the pass uses the LoopVectorizer's
16// LoopAccessAnalysis. Because this analysis presumes no change in the order of
17// memory operations, special care is taken to preserve the lexical order of
18// these operations.
19//
20// Similarly to the Vectorizer, the pass also supports loop versioning to
21// run-time disambiguate potentially overlapping arrays.
22//
23//===----------------------------------------------------------------------===//
24
Adam Nemetb2593f72016-07-18 16:29:27 +000025#include "llvm/Transforms/Scalar/LoopDistribute.h"
Adam Nemet938d3d62015-05-14 12:05:18 +000026#include "llvm/ADT/DepthFirstIterator.h"
27#include "llvm/ADT/EquivalenceClasses.h"
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/Statistic.h"
Adam Nemetaad81602016-07-15 17:23:20 +000030#include "llvm/Analysis/BlockFrequencyInfo.h"
Eli Friedman66fdba82016-09-16 18:01:48 +000031#include "llvm/Analysis/GlobalsModRef.h"
Adam Nemet938d3d62015-05-14 12:05:18 +000032#include "llvm/Analysis/LoopAccessAnalysis.h"
33#include "llvm/Analysis/LoopInfo.h"
Adam Nemetb2593f72016-07-18 16:29:27 +000034#include "llvm/Analysis/LoopPassManager.h"
Adam Nemetaad81602016-07-15 17:23:20 +000035#include "llvm/Analysis/OptimizationDiagnosticInfo.h"
Adam Nemet0ba164b2016-04-28 23:08:32 +000036#include "llvm/IR/DiagnosticInfo.h"
Adam Nemet938d3d62015-05-14 12:05:18 +000037#include "llvm/IR/Dominators.h"
38#include "llvm/Pass.h"
39#include "llvm/Support/CommandLine.h"
40#include "llvm/Support/Debug.h"
41#include "llvm/Transforms/Utils/BasicBlockUtils.h"
42#include "llvm/Transforms/Utils/Cloning.h"
Ashutosh Nemac5b7b552015-08-19 05:40:42 +000043#include "llvm/Transforms/Utils/LoopUtils.h"
Adam Nemet215746b2015-07-10 18:55:13 +000044#include "llvm/Transforms/Utils/LoopVersioning.h"
Adam Nemet938d3d62015-05-14 12:05:18 +000045#include <list>
46
47#define LDIST_NAME "loop-distribute"
48#define DEBUG_TYPE LDIST_NAME
49
50using namespace llvm;
51
52static cl::opt<bool>
53 LDistVerify("loop-distribute-verify", cl::Hidden,
54 cl::desc("Turn on DominatorTree and LoopInfo verification "
55 "after Loop Distribution"),
56 cl::init(false));
57
58static cl::opt<bool> DistributeNonIfConvertible(
59 "loop-distribute-non-if-convertible", cl::Hidden,
60 cl::desc("Whether to distribute into a loop that may not be "
61 "if-convertible by the loop vectorizer"),
62 cl::init(false));
63
Silviu Baranga2910a4f2015-11-09 13:26:09 +000064static cl::opt<unsigned> DistributeSCEVCheckThreshold(
65 "loop-distribute-scev-check-threshold", cl::init(8), cl::Hidden,
66 cl::desc("The maximum number of SCEV checks allowed for Loop "
67 "Distribution"));
68
Adam Nemetd2fa4142016-04-27 05:28:18 +000069static cl::opt<unsigned> PragmaDistributeSCEVCheckThreshold(
70 "loop-distribute-scev-check-threshold-with-pragma", cl::init(128),
71 cl::Hidden,
72 cl::desc(
73 "The maximum number of SCEV checks allowed for Loop "
74 "Distribution for loop marked with #pragma loop distribute(enable)"));
75
76// Note that the initial value for this depends on whether the pass is invoked
77// directly or from the optimization pipeline.
78static cl::opt<bool> EnableLoopDistribute(
79 "enable-loop-distribute", cl::Hidden,
80 cl::desc("Enable the new, experimental LoopDistribution Pass"));
81
Adam Nemet938d3d62015-05-14 12:05:18 +000082STATISTIC(NumLoopsDistributed, "Number of loops distributed");
83
Adam Nemet2f85b732015-05-14 12:33:32 +000084namespace {
Adam Nemet938d3d62015-05-14 12:05:18 +000085/// \brief Maintains the set of instructions of the loop for a partition before
86/// cloning. After cloning, it hosts the new loop.
87class InstPartition {
88 typedef SmallPtrSet<Instruction *, 8> InstructionSet;
89
90public:
91 InstPartition(Instruction *I, Loop *L, bool DepCycle = false)
92 : DepCycle(DepCycle), OrigLoop(L), ClonedLoop(nullptr) {
93 Set.insert(I);
94 }
95
96 /// \brief Returns whether this partition contains a dependence cycle.
97 bool hasDepCycle() const { return DepCycle; }
98
99 /// \brief Adds an instruction to this partition.
100 void add(Instruction *I) { Set.insert(I); }
101
102 /// \brief Collection accessors.
103 InstructionSet::iterator begin() { return Set.begin(); }
104 InstructionSet::iterator end() { return Set.end(); }
105 InstructionSet::const_iterator begin() const { return Set.begin(); }
106 InstructionSet::const_iterator end() const { return Set.end(); }
107 bool empty() const { return Set.empty(); }
108
109 /// \brief Moves this partition into \p Other. This partition becomes empty
110 /// after this.
111 void moveTo(InstPartition &Other) {
112 Other.Set.insert(Set.begin(), Set.end());
113 Set.clear();
114 Other.DepCycle |= DepCycle;
115 }
116
117 /// \brief Populates the partition with a transitive closure of all the
118 /// instructions that the seeded instructions dependent on.
119 void populateUsedSet() {
120 // FIXME: We currently don't use control-dependence but simply include all
121 // blocks (possibly empty at the end) and let simplifycfg mostly clean this
122 // up.
123 for (auto *B : OrigLoop->getBlocks())
124 Set.insert(B->getTerminator());
125
126 // Follow the use-def chains to form a transitive closure of all the
127 // instructions that the originally seeded instructions depend on.
128 SmallVector<Instruction *, 8> Worklist(Set.begin(), Set.end());
129 while (!Worklist.empty()) {
130 Instruction *I = Worklist.pop_back_val();
131 // Insert instructions from the loop that we depend on.
132 for (Value *V : I->operand_values()) {
133 auto *I = dyn_cast<Instruction>(V);
134 if (I && OrigLoop->contains(I->getParent()) && Set.insert(I).second)
135 Worklist.push_back(I);
136 }
137 }
138 }
139
140 /// \brief Clones the original loop.
141 ///
142 /// Updates LoopInfo and DominatorTree using the information that block \p
143 /// LoopDomBB dominates the loop.
144 Loop *cloneLoopWithPreheader(BasicBlock *InsertBefore, BasicBlock *LoopDomBB,
145 unsigned Index, LoopInfo *LI,
146 DominatorTree *DT) {
147 ClonedLoop = ::cloneLoopWithPreheader(InsertBefore, LoopDomBB, OrigLoop,
148 VMap, Twine(".ldist") + Twine(Index),
149 LI, DT, ClonedLoopBlocks);
150 return ClonedLoop;
151 }
152
153 /// \brief The cloned loop. If this partition is mapped to the original loop,
154 /// this is null.
155 const Loop *getClonedLoop() const { return ClonedLoop; }
156
157 /// \brief Returns the loop where this partition ends up after distribution.
158 /// If this partition is mapped to the original loop then use the block from
159 /// the loop.
160 const Loop *getDistributedLoop() const {
161 return ClonedLoop ? ClonedLoop : OrigLoop;
162 }
163
164 /// \brief The VMap that is populated by cloning and then used in
165 /// remapinstruction to remap the cloned instructions.
166 ValueToValueMapTy &getVMap() { return VMap; }
167
168 /// \brief Remaps the cloned instructions using VMap.
Adam Nemet1a689182015-07-10 18:55:09 +0000169 void remapInstructions() {
170 remapInstructionsInBlocks(ClonedLoopBlocks, VMap);
171 }
Adam Nemet938d3d62015-05-14 12:05:18 +0000172
173 /// \brief Based on the set of instructions selected for this partition,
174 /// removes the unnecessary ones.
175 void removeUnusedInsts() {
176 SmallVector<Instruction *, 8> Unused;
177
178 for (auto *Block : OrigLoop->getBlocks())
179 for (auto &Inst : *Block)
180 if (!Set.count(&Inst)) {
181 Instruction *NewInst = &Inst;
182 if (!VMap.empty())
183 NewInst = cast<Instruction>(VMap[NewInst]);
184
185 assert(!isa<BranchInst>(NewInst) &&
186 "Branches are marked used early on");
187 Unused.push_back(NewInst);
188 }
189
190 // Delete the instructions backwards, as it has a reduced likelihood of
191 // having to update as many def-use and use-def chains.
David Majnemerd7708772016-06-24 04:05:21 +0000192 for (auto *Inst : reverse(Unused)) {
Adam Nemet938d3d62015-05-14 12:05:18 +0000193 if (!Inst->use_empty())
194 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
195 Inst->eraseFromParent();
196 }
197 }
198
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000199 void print() const {
Adam Nemet938d3d62015-05-14 12:05:18 +0000200 if (DepCycle)
201 dbgs() << " (cycle)\n";
202 for (auto *I : Set)
203 // Prefix with the block name.
204 dbgs() << " " << I->getParent()->getName() << ":" << *I << "\n";
205 }
206
207 void printBlocks() const {
208 for (auto *BB : getDistributedLoop()->getBlocks())
209 dbgs() << *BB;
210 }
211
212private:
213 /// \brief Instructions from OrigLoop selected for this partition.
214 InstructionSet Set;
215
216 /// \brief Whether this partition contains a dependence cycle.
217 bool DepCycle;
218
219 /// \brief The original loop.
220 Loop *OrigLoop;
221
222 /// \brief The cloned loop. If this partition is mapped to the original loop,
223 /// this is null.
224 Loop *ClonedLoop;
225
226 /// \brief The blocks of ClonedLoop including the preheader. If this
227 /// partition is mapped to the original loop, this is empty.
228 SmallVector<BasicBlock *, 8> ClonedLoopBlocks;
229
230 /// \brief These gets populated once the set of instructions have been
231 /// finalized. If this partition is mapped to the original loop, these are not
232 /// set.
233 ValueToValueMapTy VMap;
234};
235
236/// \brief Holds the set of Partitions. It populates them, merges them and then
237/// clones the loops.
238class InstPartitionContainer {
239 typedef DenseMap<Instruction *, int> InstToPartitionIdT;
240
241public:
242 InstPartitionContainer(Loop *L, LoopInfo *LI, DominatorTree *DT)
243 : L(L), LI(LI), DT(DT) {}
244
245 /// \brief Returns the number of partitions.
246 unsigned getSize() const { return PartitionContainer.size(); }
247
248 /// \brief Adds \p Inst into the current partition if that is marked to
249 /// contain cycles. Otherwise start a new partition for it.
250 void addToCyclicPartition(Instruction *Inst) {
251 // If the current partition is non-cyclic. Start a new one.
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000252 if (PartitionContainer.empty() || !PartitionContainer.back().hasDepCycle())
253 PartitionContainer.emplace_back(Inst, L, /*DepCycle=*/true);
Adam Nemet938d3d62015-05-14 12:05:18 +0000254 else
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000255 PartitionContainer.back().add(Inst);
Adam Nemet938d3d62015-05-14 12:05:18 +0000256 }
257
258 /// \brief Adds \p Inst into a partition that is not marked to contain
259 /// dependence cycles.
260 ///
261 // Initially we isolate memory instructions into as many partitions as
262 // possible, then later we may merge them back together.
263 void addToNewNonCyclicPartition(Instruction *Inst) {
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000264 PartitionContainer.emplace_back(Inst, L);
Adam Nemet938d3d62015-05-14 12:05:18 +0000265 }
266
267 /// \brief Merges adjacent non-cyclic partitions.
268 ///
269 /// The idea is that we currently only want to isolate the non-vectorizable
270 /// partition. We could later allow more distribution among these partition
271 /// too.
272 void mergeAdjacentNonCyclic() {
273 mergeAdjacentPartitionsIf(
274 [](const InstPartition *P) { return !P->hasDepCycle(); });
275 }
276
277 /// \brief If a partition contains only conditional stores, we won't vectorize
278 /// it. Try to merge it with a previous cyclic partition.
279 void mergeNonIfConvertible() {
280 mergeAdjacentPartitionsIf([&](const InstPartition *Partition) {
281 if (Partition->hasDepCycle())
282 return true;
283
284 // Now, check if all stores are conditional in this partition.
285 bool seenStore = false;
286
287 for (auto *Inst : *Partition)
288 if (isa<StoreInst>(Inst)) {
289 seenStore = true;
290 if (!LoopAccessInfo::blockNeedsPredication(Inst->getParent(), L, DT))
291 return false;
292 }
293 return seenStore;
294 });
295 }
296
297 /// \brief Merges the partitions according to various heuristics.
298 void mergeBeforePopulating() {
299 mergeAdjacentNonCyclic();
300 if (!DistributeNonIfConvertible)
301 mergeNonIfConvertible();
302 }
303
304 /// \brief Merges partitions in order to ensure that no loads are duplicated.
305 ///
306 /// We can't duplicate loads because that could potentially reorder them.
307 /// LoopAccessAnalysis provides dependency information with the context that
308 /// the order of memory operation is preserved.
309 ///
310 /// Return if any partitions were merged.
311 bool mergeToAvoidDuplicatedLoads() {
312 typedef DenseMap<Instruction *, InstPartition *> LoadToPartitionT;
313 typedef EquivalenceClasses<InstPartition *> ToBeMergedT;
314
315 LoadToPartitionT LoadToPartition;
316 ToBeMergedT ToBeMerged;
317
318 // Step through the partitions and create equivalence between partitions
319 // that contain the same load. Also put partitions in between them in the
320 // same equivalence class to avoid reordering of memory operations.
321 for (PartitionContainerT::iterator I = PartitionContainer.begin(),
322 E = PartitionContainer.end();
323 I != E; ++I) {
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000324 auto *PartI = &*I;
Adam Nemet938d3d62015-05-14 12:05:18 +0000325
326 // If a load occurs in two partitions PartI and PartJ, merge all
327 // partitions (PartI, PartJ] into PartI.
328 for (Instruction *Inst : *PartI)
329 if (isa<LoadInst>(Inst)) {
330 bool NewElt;
331 LoadToPartitionT::iterator LoadToPart;
332
333 std::tie(LoadToPart, NewElt) =
334 LoadToPartition.insert(std::make_pair(Inst, PartI));
335 if (!NewElt) {
336 DEBUG(dbgs() << "Merging partitions due to this load in multiple "
337 << "partitions: " << PartI << ", "
338 << LoadToPart->second << "\n" << *Inst << "\n");
339
340 auto PartJ = I;
341 do {
342 --PartJ;
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000343 ToBeMerged.unionSets(PartI, &*PartJ);
344 } while (&*PartJ != LoadToPart->second);
Adam Nemet938d3d62015-05-14 12:05:18 +0000345 }
346 }
347 }
348 if (ToBeMerged.empty())
349 return false;
350
351 // Merge the member of an equivalence class into its class leader. This
352 // makes the members empty.
353 for (ToBeMergedT::iterator I = ToBeMerged.begin(), E = ToBeMerged.end();
354 I != E; ++I) {
355 if (!I->isLeader())
356 continue;
357
358 auto PartI = I->getData();
359 for (auto PartJ : make_range(std::next(ToBeMerged.member_begin(I)),
360 ToBeMerged.member_end())) {
361 PartJ->moveTo(*PartI);
362 }
363 }
364
365 // Remove the empty partitions.
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000366 PartitionContainer.remove_if(
367 [](const InstPartition &P) { return P.empty(); });
Adam Nemet938d3d62015-05-14 12:05:18 +0000368
369 return true;
370 }
371
372 /// \brief Sets up the mapping between instructions to partitions. If the
373 /// instruction is duplicated across multiple partitions, set the entry to -1.
374 void setupPartitionIdOnInstructions() {
375 int PartitionID = 0;
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000376 for (const auto &Partition : PartitionContainer) {
377 for (Instruction *Inst : Partition) {
Adam Nemet938d3d62015-05-14 12:05:18 +0000378 bool NewElt;
379 InstToPartitionIdT::iterator Iter;
380
381 std::tie(Iter, NewElt) =
382 InstToPartitionId.insert(std::make_pair(Inst, PartitionID));
383 if (!NewElt)
384 Iter->second = -1;
385 }
386 ++PartitionID;
387 }
388 }
389
390 /// \brief Populates the partition with everything that the seeding
391 /// instructions require.
392 void populateUsedSet() {
393 for (auto &P : PartitionContainer)
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000394 P.populateUsedSet();
Adam Nemet938d3d62015-05-14 12:05:18 +0000395 }
396
397 /// \brief This performs the main chunk of the work of cloning the loops for
398 /// the partitions.
Justin Bogner843fb202015-12-15 19:40:57 +0000399 void cloneLoops() {
Adam Nemet938d3d62015-05-14 12:05:18 +0000400 BasicBlock *OrigPH = L->getLoopPreheader();
401 // At this point the predecessor of the preheader is either the memcheck
402 // block or the top part of the original preheader.
403 BasicBlock *Pred = OrigPH->getSinglePredecessor();
404 assert(Pred && "Preheader does not have a single predecessor");
405 BasicBlock *ExitBlock = L->getExitBlock();
406 assert(ExitBlock && "No single exit block");
407 Loop *NewLoop;
408
409 assert(!PartitionContainer.empty() && "at least two partitions expected");
410 // We're cloning the preheader along with the loop so we already made sure
411 // it was empty.
412 assert(&*OrigPH->begin() == OrigPH->getTerminator() &&
413 "preheader not empty");
414
415 // Create a loop for each partition except the last. Clone the original
416 // loop before PH along with adding a preheader for the cloned loop. Then
417 // update PH to point to the newly added preheader.
418 BasicBlock *TopPH = OrigPH;
419 unsigned Index = getSize() - 1;
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000420 for (auto I = std::next(PartitionContainer.rbegin()),
421 E = PartitionContainer.rend();
Adam Nemet938d3d62015-05-14 12:05:18 +0000422 I != E; ++I, --Index, TopPH = NewLoop->getLoopPreheader()) {
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000423 auto *Part = &*I;
Adam Nemet938d3d62015-05-14 12:05:18 +0000424
425 NewLoop = Part->cloneLoopWithPreheader(TopPH, Pred, Index, LI, DT);
426
427 Part->getVMap()[ExitBlock] = TopPH;
428 Part->remapInstructions();
429 }
430 Pred->getTerminator()->replaceUsesOfWith(OrigPH, TopPH);
431
432 // Now go in forward order and update the immediate dominator for the
433 // preheaders with the exiting block of the previous loop. Dominance
434 // within the loop is updated in cloneLoopWithPreheader.
435 for (auto Curr = PartitionContainer.cbegin(),
436 Next = std::next(PartitionContainer.cbegin()),
437 E = PartitionContainer.cend();
438 Next != E; ++Curr, ++Next)
439 DT->changeImmediateDominator(
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000440 Next->getDistributedLoop()->getLoopPreheader(),
441 Curr->getDistributedLoop()->getExitingBlock());
Adam Nemet938d3d62015-05-14 12:05:18 +0000442 }
443
444 /// \brief Removes the dead instructions from the cloned loops.
445 void removeUnusedInsts() {
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000446 for (auto &Partition : PartitionContainer)
447 Partition.removeUnusedInsts();
Adam Nemet938d3d62015-05-14 12:05:18 +0000448 }
449
450 /// \brief For each memory pointer, it computes the partitionId the pointer is
451 /// used in.
452 ///
453 /// This returns an array of int where the I-th entry corresponds to I-th
454 /// entry in LAI.getRuntimePointerCheck(). If the pointer is used in multiple
455 /// partitions its entry is set to -1.
456 SmallVector<int, 8>
457 computePartitionSetForPointers(const LoopAccessInfo &LAI) {
Adam Nemet7cdebac2015-07-14 22:32:44 +0000458 const RuntimePointerChecking *RtPtrCheck = LAI.getRuntimePointerChecking();
Adam Nemet938d3d62015-05-14 12:05:18 +0000459
460 unsigned N = RtPtrCheck->Pointers.size();
461 SmallVector<int, 8> PtrToPartitions(N);
462 for (unsigned I = 0; I < N; ++I) {
Adam Nemet9f7dedc2015-07-14 22:32:50 +0000463 Value *Ptr = RtPtrCheck->Pointers[I].PointerValue;
Adam Nemet938d3d62015-05-14 12:05:18 +0000464 auto Instructions =
Adam Nemet9f7dedc2015-07-14 22:32:50 +0000465 LAI.getInstructionsForAccess(Ptr, RtPtrCheck->Pointers[I].IsWritePtr);
Adam Nemet938d3d62015-05-14 12:05:18 +0000466
467 int &Partition = PtrToPartitions[I];
468 // First set it to uninitialized.
469 Partition = -2;
470 for (Instruction *Inst : Instructions) {
471 // Note that this could be -1 if Inst is duplicated across multiple
472 // partitions.
473 int ThisPartition = this->InstToPartitionId[Inst];
474 if (Partition == -2)
475 Partition = ThisPartition;
476 // -1 means belonging to multiple partitions.
477 else if (Partition == -1)
478 break;
479 else if (Partition != (int)ThisPartition)
480 Partition = -1;
481 }
482 assert(Partition != -2 && "Pointer not belonging to any partition");
483 }
484
485 return PtrToPartitions;
486 }
487
488 void print(raw_ostream &OS) const {
489 unsigned Index = 0;
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000490 for (const auto &P : PartitionContainer) {
491 OS << "Partition " << Index++ << " (" << &P << "):\n";
492 P.print();
Adam Nemet938d3d62015-05-14 12:05:18 +0000493 }
494 }
495
496 void dump() const { print(dbgs()); }
497
498#ifndef NDEBUG
499 friend raw_ostream &operator<<(raw_ostream &OS,
500 const InstPartitionContainer &Partitions) {
501 Partitions.print(OS);
502 return OS;
503 }
504#endif
505
506 void printBlocks() const {
507 unsigned Index = 0;
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000508 for (const auto &P : PartitionContainer) {
509 dbgs() << "\nPartition " << Index++ << " (" << &P << "):\n";
510 P.printBlocks();
Adam Nemet938d3d62015-05-14 12:05:18 +0000511 }
512 }
513
514private:
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000515 typedef std::list<InstPartition> PartitionContainerT;
Adam Nemet938d3d62015-05-14 12:05:18 +0000516
517 /// \brief List of partitions.
518 PartitionContainerT PartitionContainer;
519
520 /// \brief Mapping from Instruction to partition Id. If the instruction
521 /// belongs to multiple partitions the entry contains -1.
522 InstToPartitionIdT InstToPartitionId;
523
524 Loop *L;
525 LoopInfo *LI;
526 DominatorTree *DT;
527
528 /// \brief The control structure to merge adjacent partitions if both satisfy
529 /// the \p Predicate.
530 template <class UnaryPredicate>
531 void mergeAdjacentPartitionsIf(UnaryPredicate Predicate) {
532 InstPartition *PrevMatch = nullptr;
533 for (auto I = PartitionContainer.begin(); I != PartitionContainer.end();) {
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000534 auto DoesMatch = Predicate(&*I);
Adam Nemet938d3d62015-05-14 12:05:18 +0000535 if (PrevMatch == nullptr && DoesMatch) {
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000536 PrevMatch = &*I;
Adam Nemet938d3d62015-05-14 12:05:18 +0000537 ++I;
538 } else if (PrevMatch != nullptr && DoesMatch) {
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000539 I->moveTo(*PrevMatch);
Adam Nemet938d3d62015-05-14 12:05:18 +0000540 I = PartitionContainer.erase(I);
541 } else {
542 PrevMatch = nullptr;
543 ++I;
544 }
545 }
546 }
547};
548
549/// \brief For each memory instruction, this class maintains difference of the
550/// number of unsafe dependences that start out from this instruction minus
551/// those that end here.
552///
553/// By traversing the memory instructions in program order and accumulating this
554/// number, we know whether any unsafe dependence crosses over a program point.
555class MemoryInstructionDependences {
556 typedef MemoryDepChecker::Dependence Dependence;
557
558public:
559 struct Entry {
560 Instruction *Inst;
561 unsigned NumUnsafeDependencesStartOrEnd;
562
563 Entry(Instruction *Inst) : Inst(Inst), NumUnsafeDependencesStartOrEnd(0) {}
564 };
565
566 typedef SmallVector<Entry, 8> AccessesType;
567
568 AccessesType::const_iterator begin() const { return Accesses.begin(); }
569 AccessesType::const_iterator end() const { return Accesses.end(); }
570
571 MemoryInstructionDependences(
572 const SmallVectorImpl<Instruction *> &Instructions,
Adam Nemeta2df7502015-11-03 21:39:52 +0000573 const SmallVectorImpl<Dependence> &Dependences) {
Benjamin Kramere6987bf2015-05-21 18:32:07 +0000574 Accesses.append(Instructions.begin(), Instructions.end());
Adam Nemet938d3d62015-05-14 12:05:18 +0000575
576 DEBUG(dbgs() << "Backward dependences:\n");
Adam Nemeta2df7502015-11-03 21:39:52 +0000577 for (auto &Dep : Dependences)
Adam Nemet938d3d62015-05-14 12:05:18 +0000578 if (Dep.isPossiblyBackward()) {
579 // Note that the designations source and destination follow the program
580 // order, i.e. source is always first. (The direction is given by the
581 // DepType.)
582 ++Accesses[Dep.Source].NumUnsafeDependencesStartOrEnd;
583 --Accesses[Dep.Destination].NumUnsafeDependencesStartOrEnd;
584
585 DEBUG(Dep.print(dbgs(), 2, Instructions));
586 }
587 }
588
589private:
590 AccessesType Accesses;
591};
592
Adam Nemet61399ac2016-04-27 00:31:03 +0000593/// \brief The actual class performing the per-loop work.
594class LoopDistributeForLoop {
Adam Nemet938d3d62015-05-14 12:05:18 +0000595public:
Adam Nemeteff76642016-05-13 04:20:31 +0000596 LoopDistributeForLoop(Loop *L, Function *F, LoopInfo *LI, DominatorTree *DT,
Adam Nemetaad81602016-07-15 17:23:20 +0000597 ScalarEvolution *SE, OptimizationRemarkEmitter *ORE)
598 : L(L), F(F), LI(LI), LAI(nullptr), DT(DT), SE(SE), ORE(ORE) {
Adam Nemetd2fa4142016-04-27 05:28:18 +0000599 setForced();
600 }
Adam Nemetc75ad692015-07-30 03:29:16 +0000601
Adam Nemet938d3d62015-05-14 12:05:18 +0000602 /// \brief Try to distribute an inner-most loop.
Adam Nemetb2593f72016-07-18 16:29:27 +0000603 bool processLoop(std::function<const LoopAccessInfo &(Loop &)> &GetLAA) {
Adam Nemet938d3d62015-05-14 12:05:18 +0000604 assert(L->empty() && "Only process inner loops.");
605
606 DEBUG(dbgs() << "\nLDist: In \"" << L->getHeader()->getParent()->getName()
607 << "\" checking " << *L << "\n");
608
609 BasicBlock *PH = L->getLoopPreheader();
Adam Nemet7f38e112016-04-28 23:08:27 +0000610 if (!PH)
Adam Nemetf744ad72016-09-30 04:56:25 +0000611 return fail("NoHeader", "no preheader");
Adam Nemet7f38e112016-04-28 23:08:27 +0000612 if (!L->getExitBlock())
Adam Nemetf744ad72016-09-30 04:56:25 +0000613 return fail("MultipleExitBlocks", "multiple exit blocks");
Adam Nemeteff76642016-05-13 04:20:31 +0000614
Adam Nemet938d3d62015-05-14 12:05:18 +0000615 // LAA will check that we only have a single exiting block.
Adam Nemetb2593f72016-07-18 16:29:27 +0000616 LAI = &GetLAA(*L);
Adam Nemet938d3d62015-05-14 12:05:18 +0000617
Adam Nemet938d3d62015-05-14 12:05:18 +0000618 // Currently, we only distribute to isolate the part of the loop with
619 // dependence cycles to enable partial vectorization.
Adam Nemeteff76642016-05-13 04:20:31 +0000620 if (LAI->canVectorizeMemory())
Adam Nemetf744ad72016-09-30 04:56:25 +0000621 return fail("MemOpsCanBeVectorized",
622 "memory operations are safe for vectorization");
Adam Nemet7f38e112016-04-28 23:08:27 +0000623
Adam Nemeteff76642016-05-13 04:20:31 +0000624 auto *Dependences = LAI->getDepChecker().getDependences();
Adam Nemet7f38e112016-04-28 23:08:27 +0000625 if (!Dependences || Dependences->empty())
Adam Nemetf744ad72016-09-30 04:56:25 +0000626 return fail("NoUnsafeDeps", "no unsafe dependences to isolate");
Adam Nemet938d3d62015-05-14 12:05:18 +0000627
628 InstPartitionContainer Partitions(L, LI, DT);
629
630 // First, go through each memory operation and assign them to consecutive
631 // partitions (the order of partitions follows program order). Put those
632 // with unsafe dependences into "cyclic" partition otherwise put each store
633 // in its own "non-cyclic" partition (we'll merge these later).
634 //
635 // Note that a memory operation (e.g. Load2 below) at a program point that
636 // has an unsafe dependence (Store3->Load1) spanning over it must be
637 // included in the same cyclic partition as the dependent operations. This
638 // is to preserve the original program order after distribution. E.g.:
639 //
640 // NumUnsafeDependencesStartOrEnd NumUnsafeDependencesActive
641 // Load1 -. 1 0->1
642 // Load2 | /Unsafe/ 0 1
643 // Store3 -' -1 1->0
644 // Load4 0 0
645 //
646 // NumUnsafeDependencesActive > 0 indicates this situation and in this case
647 // we just keep assigning to the same cyclic partition until
648 // NumUnsafeDependencesActive reaches 0.
Adam Nemeteff76642016-05-13 04:20:31 +0000649 const MemoryDepChecker &DepChecker = LAI->getDepChecker();
Adam Nemet938d3d62015-05-14 12:05:18 +0000650 MemoryInstructionDependences MID(DepChecker.getMemoryInstructions(),
Adam Nemeta2df7502015-11-03 21:39:52 +0000651 *Dependences);
Adam Nemet938d3d62015-05-14 12:05:18 +0000652
653 int NumUnsafeDependencesActive = 0;
654 for (auto &InstDep : MID) {
655 Instruction *I = InstDep.Inst;
656 // We update NumUnsafeDependencesActive post-instruction, catch the
657 // start of a dependence directly via NumUnsafeDependencesStartOrEnd.
658 if (NumUnsafeDependencesActive ||
659 InstDep.NumUnsafeDependencesStartOrEnd > 0)
660 Partitions.addToCyclicPartition(I);
661 else
662 Partitions.addToNewNonCyclicPartition(I);
663 NumUnsafeDependencesActive += InstDep.NumUnsafeDependencesStartOrEnd;
664 assert(NumUnsafeDependencesActive >= 0 &&
665 "Negative number of dependences active");
666 }
667
668 // Add partitions for values used outside. These partitions can be out of
669 // order from the original program order. This is OK because if the
670 // partition uses a load we will merge this partition with the original
671 // partition of the load that we set up in the previous loop (see
672 // mergeToAvoidDuplicatedLoads).
673 auto DefsUsedOutside = findDefsUsedOutsideOfLoop(L);
674 for (auto *Inst : DefsUsedOutside)
675 Partitions.addToNewNonCyclicPartition(Inst);
676
677 DEBUG(dbgs() << "Seeded partitions:\n" << Partitions);
678 if (Partitions.getSize() < 2)
Adam Nemetf744ad72016-09-30 04:56:25 +0000679 return fail("CantIsolateUnsafeDeps",
680 "cannot isolate unsafe dependencies");
Adam Nemet938d3d62015-05-14 12:05:18 +0000681
682 // Run the merge heuristics: Merge non-cyclic adjacent partitions since we
683 // should be able to vectorize these together.
684 Partitions.mergeBeforePopulating();
685 DEBUG(dbgs() << "\nMerged partitions:\n" << Partitions);
686 if (Partitions.getSize() < 2)
Adam Nemetf744ad72016-09-30 04:56:25 +0000687 return fail("CantIsolateUnsafeDeps",
688 "cannot isolate unsafe dependencies");
Adam Nemet938d3d62015-05-14 12:05:18 +0000689
690 // Now, populate the partitions with non-memory operations.
691 Partitions.populateUsedSet();
692 DEBUG(dbgs() << "\nPopulated partitions:\n" << Partitions);
693
694 // In order to preserve original lexical order for loads, keep them in the
695 // partition that we set up in the MemoryInstructionDependences loop.
696 if (Partitions.mergeToAvoidDuplicatedLoads()) {
697 DEBUG(dbgs() << "\nPartitions merged to ensure unique loads:\n"
698 << Partitions);
699 if (Partitions.getSize() < 2)
Adam Nemetf744ad72016-09-30 04:56:25 +0000700 return fail("CantIsolateUnsafeDeps",
701 "cannot isolate unsafe dependencies");
Adam Nemet938d3d62015-05-14 12:05:18 +0000702 }
703
Silviu Baranga2910a4f2015-11-09 13:26:09 +0000704 // Don't distribute the loop if we need too many SCEV run-time checks.
Xinliang David Li94734ee2016-07-01 05:59:55 +0000705 const SCEVUnionPredicate &Pred = LAI->getPSE().getUnionPredicate();
Adam Nemetd2fa4142016-04-27 05:28:18 +0000706 if (Pred.getComplexity() > (IsForced.getValueOr(false)
707 ? PragmaDistributeSCEVCheckThreshold
Adam Nemet7f38e112016-04-28 23:08:27 +0000708 : DistributeSCEVCheckThreshold))
Adam Nemetf744ad72016-09-30 04:56:25 +0000709 return fail("TooManySCEVRuntimeChecks",
710 "too many SCEV run-time checks needed.\n");
Silviu Baranga2910a4f2015-11-09 13:26:09 +0000711
Adam Nemet938d3d62015-05-14 12:05:18 +0000712 DEBUG(dbgs() << "\nDistributing loop: " << *L << "\n");
713 // We're done forming the partitions set up the reverse mapping from
714 // instructions to partitions.
715 Partitions.setupPartitionIdOnInstructions();
716
717 // To keep things simple have an empty preheader before we version or clone
718 // the loop. (Also split if this has no predecessor, i.e. entry, because we
719 // rely on PH having a predecessor.)
720 if (!PH->getSinglePredecessor() || &*PH->begin() != PH->getTerminator())
721 SplitBlock(PH, PH->getTerminator(), DT, LI);
722
Silviu Baranga2910a4f2015-11-09 13:26:09 +0000723 // If we need run-time checks, version the loop now.
Adam Nemeteff76642016-05-13 04:20:31 +0000724 auto PtrToPartition = Partitions.computePartitionSetForPointers(*LAI);
725 const auto *RtPtrChecking = LAI->getRuntimePointerChecking();
Adam Nemet15840392015-08-07 22:44:15 +0000726 const auto &AllChecks = RtPtrChecking->getChecks();
Adam Nemetc75ad692015-07-30 03:29:16 +0000727 auto Checks = includeOnlyCrossPartitionChecks(AllChecks, PtrToPartition,
728 RtPtrChecking);
Silviu Baranga2910a4f2015-11-09 13:26:09 +0000729
730 if (!Pred.isAlwaysTrue() || !Checks.empty()) {
Adam Nemet772a1502015-06-19 19:32:41 +0000731 DEBUG(dbgs() << "\nPointers:\n");
Adam Nemeteff76642016-05-13 04:20:31 +0000732 DEBUG(LAI->getRuntimePointerChecking()->printChecks(dbgs(), Checks));
733 LoopVersioning LVer(*LAI, L, LI, DT, SE, false);
Silviu Baranga2910a4f2015-11-09 13:26:09 +0000734 LVer.setAliasChecks(std::move(Checks));
Xinliang David Li94734ee2016-07-01 05:59:55 +0000735 LVer.setSCEVChecks(LAI->getPSE().getUnionPredicate());
Adam Nemete4813402015-08-20 17:22:29 +0000736 LVer.versionLoop(DefsUsedOutside);
Adam Nemet5eccf072016-03-17 20:32:32 +0000737 LVer.annotateLoopWithNoAlias();
Adam Nemet938d3d62015-05-14 12:05:18 +0000738 }
739
740 // Create identical copies of the original loop for each partition and hook
741 // them up sequentially.
Justin Bogner843fb202015-12-15 19:40:57 +0000742 Partitions.cloneLoops();
Adam Nemet938d3d62015-05-14 12:05:18 +0000743
744 // Now, we remove the instruction from each loop that don't belong to that
745 // partition.
746 Partitions.removeUnusedInsts();
747 DEBUG(dbgs() << "\nAfter removing unused Instrs:\n");
748 DEBUG(Partitions.printBlocks());
749
750 if (LDistVerify) {
Michael Zolotukhine0b2d972016-08-31 19:26:19 +0000751 LI->verify(*DT);
Adam Nemet938d3d62015-05-14 12:05:18 +0000752 DT->verifyDomTree();
753 }
754
755 ++NumLoopsDistributed;
Adam Nemet88ec4912016-04-29 07:10:46 +0000756 // Report the success.
Adam Nemetf744ad72016-09-30 04:56:25 +0000757 ORE->emit(OptimizationRemark(LDIST_NAME, "Distribute", L->getStartLoc(),
758 L->getHeader())
759 << "distributed loop");
Adam Nemet938d3d62015-05-14 12:05:18 +0000760 return true;
761 }
762
Adam Nemet7f38e112016-04-28 23:08:27 +0000763 /// \brief Provide diagnostics then \return with false.
Adam Nemetf744ad72016-09-30 04:56:25 +0000764 bool fail(StringRef RemarkName, StringRef Message) {
Adam Nemet0ba164b2016-04-28 23:08:32 +0000765 LLVMContext &Ctx = F->getContext();
766 bool Forced = isForced().getValueOr(false);
767
Adam Nemetadeccf72016-04-28 23:08:30 +0000768 DEBUG(dbgs() << "Skipping; " << Message << "\n");
Adam Nemet0ba164b2016-04-28 23:08:32 +0000769
770 // With Rpass-missed report that distribution failed.
Adam Nemetf744ad72016-09-30 04:56:25 +0000771 ORE->emit(
772 OptimizationRemarkMissed(LDIST_NAME, "NotDistributed", L->getStartLoc(),
773 L->getHeader())
774 << "loop not distributed: use -Rpass-analysis=loop-distribute for more "
775 "info");
Adam Nemet0ba164b2016-04-28 23:08:32 +0000776
777 // With Rpass-analysis report why. This is on by default if distribution
778 // was requested explicitly.
Adam Nemetf744ad72016-09-30 04:56:25 +0000779 ORE->emit(OptimizationRemarkAnalysis(
780 Forced ? OptimizationRemarkAnalysis::AlwaysPrint : LDIST_NAME,
781 RemarkName, L->getStartLoc(), L->getHeader())
782 << "loop not distributed: " << Message);
Adam Nemet0ba164b2016-04-28 23:08:32 +0000783
784 // Also issue a warning if distribution was requested explicitly but it
785 // failed.
786 if (Forced)
787 Ctx.diagnose(DiagnosticInfoOptimizationFailure(
Adam Nemet74730d92016-07-14 22:33:46 +0000788 *F, L->getStartLoc(), "loop not distributed: failed "
Adam Nemet0ba164b2016-04-28 23:08:32 +0000789 "explicitly specified loop distribution"));
790
Adam Nemet7f38e112016-04-28 23:08:27 +0000791 return false;
792 }
793
Adam Nemetd2fa4142016-04-27 05:28:18 +0000794 /// \brief Return if distribution forced to be enabled/disabled for the loop.
795 ///
796 /// If the optional has a value, it indicates whether distribution was forced
797 /// to be enabled (true) or disabled (false). If the optional has no value
798 /// distribution was not forced either way.
799 const Optional<bool> &isForced() const { return IsForced; }
800
Adam Nemet61399ac2016-04-27 00:31:03 +0000801private:
802 /// \brief Filter out checks between pointers from the same partition.
803 ///
804 /// \p PtrToPartition contains the partition number for pointers. Partition
805 /// number -1 means that the pointer is used in multiple partitions. In this
806 /// case we can't safely omit the check.
807 SmallVector<RuntimePointerChecking::PointerCheck, 4>
808 includeOnlyCrossPartitionChecks(
809 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &AllChecks,
810 const SmallVectorImpl<int> &PtrToPartition,
811 const RuntimePointerChecking *RtPtrChecking) {
812 SmallVector<RuntimePointerChecking::PointerCheck, 4> Checks;
813
814 std::copy_if(AllChecks.begin(), AllChecks.end(), std::back_inserter(Checks),
815 [&](const RuntimePointerChecking::PointerCheck &Check) {
816 for (unsigned PtrIdx1 : Check.first->Members)
817 for (unsigned PtrIdx2 : Check.second->Members)
818 // Only include this check if there is a pair of pointers
819 // that require checking and the pointers fall into
820 // separate partitions.
821 //
822 // (Note that we already know at this point that the two
823 // pointer groups need checking but it doesn't follow
824 // that each pair of pointers within the two groups need
825 // checking as well.
826 //
827 // In other words we don't want to include a check just
828 // because there is a pair of pointers between the two
829 // pointer groups that require checks and a different
830 // pair whose pointers fall into different partitions.)
831 if (RtPtrChecking->needsChecking(PtrIdx1, PtrIdx2) &&
832 !RuntimePointerChecking::arePointersInSamePartition(
833 PtrToPartition, PtrIdx1, PtrIdx2))
834 return true;
835 return false;
836 });
837
838 return Checks;
839 }
840
Adam Nemetd2fa4142016-04-27 05:28:18 +0000841 /// \brief Check whether the loop metadata is forcing distribution to be
842 /// enabled/disabled.
843 void setForced() {
844 Optional<const MDOperand *> Value =
845 findStringMetadataForLoop(L, "llvm.loop.distribute.enable");
846 if (!Value)
847 return;
848
849 const MDOperand *Op = *Value;
850 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
851 IsForced = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
852 }
853
Adam Nemet61399ac2016-04-27 00:31:03 +0000854 Loop *L;
Adam Nemet4338d672016-04-29 07:10:39 +0000855 Function *F;
856
857 // Analyses used.
Adam Nemet938d3d62015-05-14 12:05:18 +0000858 LoopInfo *LI;
Adam Nemeteff76642016-05-13 04:20:31 +0000859 const LoopAccessInfo *LAI;
Adam Nemet938d3d62015-05-14 12:05:18 +0000860 DominatorTree *DT;
Silviu Baranga2910a4f2015-11-09 13:26:09 +0000861 ScalarEvolution *SE;
Adam Nemetaad81602016-07-15 17:23:20 +0000862 OptimizationRemarkEmitter *ORE;
Adam Nemetd2fa4142016-04-27 05:28:18 +0000863
864 /// \brief Indicates whether distribution is forced to be enabled/disabled for
865 /// the loop.
866 ///
867 /// If the optional has a value, it indicates whether distribution was forced
868 /// to be enabled (true) or disabled (false). If the optional has no value
869 /// distribution was not forced either way.
870 Optional<bool> IsForced;
Adam Nemet938d3d62015-05-14 12:05:18 +0000871};
Adam Nemet61399ac2016-04-27 00:31:03 +0000872
Adam Nemetb2593f72016-07-18 16:29:27 +0000873/// Shared implementation between new and old PMs.
874static bool runImpl(Function &F, LoopInfo *LI, DominatorTree *DT,
875 ScalarEvolution *SE, OptimizationRemarkEmitter *ORE,
876 std::function<const LoopAccessInfo &(Loop &)> &GetLAA,
877 bool ProcessAllLoops) {
878 // Build up a worklist of inner-loops to vectorize. This is necessary as the
879 // act of distributing a loop creates new loops and can invalidate iterators
880 // across the loops.
881 SmallVector<Loop *, 8> Worklist;
882
883 for (Loop *TopLevelLoop : *LI)
884 for (Loop *L : depth_first(TopLevelLoop))
885 // We only handle inner-most loops.
886 if (L->empty())
887 Worklist.push_back(L);
888
889 // Now walk the identified inner loops.
890 bool Changed = false;
891 for (Loop *L : Worklist) {
892 LoopDistributeForLoop LDL(L, &F, LI, DT, SE, ORE);
893
894 // If distribution was forced for the specific loop to be
895 // enabled/disabled, follow that. Otherwise use the global flag.
896 if (LDL.isForced().getValueOr(ProcessAllLoops))
897 Changed |= LDL.processLoop(GetLAA);
898 }
899
900 // Process each loop nest in the function.
901 return Changed;
902}
903
Adam Nemet61399ac2016-04-27 00:31:03 +0000904/// \brief The pass class.
Adam Nemetb2593f72016-07-18 16:29:27 +0000905class LoopDistributeLegacy : public FunctionPass {
Adam Nemet61399ac2016-04-27 00:31:03 +0000906public:
Adam Nemetd2fa4142016-04-27 05:28:18 +0000907 /// \p ProcessAllLoopsByDefault specifies whether loop distribution should be
908 /// performed by default. Pass -enable-loop-distribute={0,1} overrides this
909 /// default. We use this to keep LoopDistribution off by default when invoked
910 /// from the optimization pipeline but on when invoked explicitly from opt.
Adam Nemetb2593f72016-07-18 16:29:27 +0000911 LoopDistributeLegacy(bool ProcessAllLoopsByDefault = true)
Adam Nemetd2fa4142016-04-27 05:28:18 +0000912 : FunctionPass(ID), ProcessAllLoops(ProcessAllLoopsByDefault) {
913 // The default is set by the caller.
914 if (EnableLoopDistribute.getNumOccurrences() > 0)
915 ProcessAllLoops = EnableLoopDistribute;
Adam Nemetb2593f72016-07-18 16:29:27 +0000916 initializeLoopDistributeLegacyPass(*PassRegistry::getPassRegistry());
Adam Nemet61399ac2016-04-27 00:31:03 +0000917 }
918
919 bool runOnFunction(Function &F) override {
Andrew Kaylor50271f72016-05-03 22:32:30 +0000920 if (skipFunction(F))
921 return false;
922
Adam Nemet61399ac2016-04-27 00:31:03 +0000923 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
Xinliang David Li7853c1d2016-07-08 20:55:26 +0000924 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
Adam Nemet61399ac2016-04-27 00:31:03 +0000925 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
926 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
Adam Nemet79ac42a2016-07-18 16:29:21 +0000927 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
Adam Nemetb2593f72016-07-18 16:29:27 +0000928 std::function<const LoopAccessInfo &(Loop &)> GetLAA =
929 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
Adam Nemet61399ac2016-04-27 00:31:03 +0000930
Adam Nemetb2593f72016-07-18 16:29:27 +0000931 return runImpl(F, LI, DT, SE, ORE, GetLAA, ProcessAllLoops);
Adam Nemet61399ac2016-04-27 00:31:03 +0000932 }
933
934 void getAnalysisUsage(AnalysisUsage &AU) const override {
935 AU.addRequired<ScalarEvolutionWrapperPass>();
936 AU.addRequired<LoopInfoWrapperPass>();
937 AU.addPreserved<LoopInfoWrapperPass>();
Xinliang David Li7853c1d2016-07-08 20:55:26 +0000938 AU.addRequired<LoopAccessLegacyAnalysis>();
Adam Nemet61399ac2016-04-27 00:31:03 +0000939 AU.addRequired<DominatorTreeWrapperPass>();
940 AU.addPreserved<DominatorTreeWrapperPass>();
Adam Nemet79ac42a2016-07-18 16:29:21 +0000941 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
Eli Friedman66fdba82016-09-16 18:01:48 +0000942 AU.addPreserved<GlobalsAAWrapperPass>();
Adam Nemet61399ac2016-04-27 00:31:03 +0000943 }
944
945 static char ID;
Adam Nemetd2fa4142016-04-27 05:28:18 +0000946
947private:
948 /// \brief Whether distribution should be on in this function. The per-loop
949 /// pragma can override this.
950 bool ProcessAllLoops;
Adam Nemet61399ac2016-04-27 00:31:03 +0000951};
Adam Nemet938d3d62015-05-14 12:05:18 +0000952} // anonymous namespace
953
Adam Nemetb2593f72016-07-18 16:29:27 +0000954PreservedAnalyses LoopDistributePass::run(Function &F,
955 FunctionAnalysisManager &AM) {
956 // FIXME: This does not currently match the behavior from the old PM.
957 // ProcessAllLoops with the old PM defaults to true when invoked from opt and
958 // false when invoked from the optimization pipeline.
959 bool ProcessAllLoops = false;
960 if (EnableLoopDistribute.getNumOccurrences() > 0)
961 ProcessAllLoops = EnableLoopDistribute;
962
963 auto &LI = AM.getResult<LoopAnalysis>(F);
964 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
965 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
966 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
967
968 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
969 std::function<const LoopAccessInfo &(Loop &)> GetLAA =
970 [&](Loop &L) -> const LoopAccessInfo & {
971 return LAM.getResult<LoopAccessAnalysis>(L);
972 };
973
974 bool Changed = runImpl(F, &LI, &DT, &SE, &ORE, GetLAA, ProcessAllLoops);
975 if (!Changed)
976 return PreservedAnalyses::all();
977 PreservedAnalyses PA;
978 PA.preserve<LoopAnalysis>();
979 PA.preserve<DominatorTreeAnalysis>();
Davide Italiano11a871b2016-11-08 19:52:32 +0000980 PA.preserve<GlobalsAA>();
Adam Nemetb2593f72016-07-18 16:29:27 +0000981 return PA;
982}
983
984char LoopDistributeLegacy::ID;
Michael Zolotukhin5cda89a2016-10-05 00:44:52 +0000985static const char ldist_name[] = "Loop Distribution";
Adam Nemet938d3d62015-05-14 12:05:18 +0000986
Adam Nemetb2593f72016-07-18 16:29:27 +0000987INITIALIZE_PASS_BEGIN(LoopDistributeLegacy, LDIST_NAME, ldist_name, false,
988 false)
Adam Nemet938d3d62015-05-14 12:05:18 +0000989INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
Xinliang David Li7853c1d2016-07-08 20:55:26 +0000990INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
Adam Nemet938d3d62015-05-14 12:05:18 +0000991INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
Silviu Baranga2910a4f2015-11-09 13:26:09 +0000992INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
Adam Nemet79ac42a2016-07-18 16:29:21 +0000993INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
Adam Nemetb2593f72016-07-18 16:29:27 +0000994INITIALIZE_PASS_END(LoopDistributeLegacy, LDIST_NAME, ldist_name, false, false)
Adam Nemet938d3d62015-05-14 12:05:18 +0000995
996namespace llvm {
Adam Nemetd2fa4142016-04-27 05:28:18 +0000997FunctionPass *createLoopDistributePass(bool ProcessAllLoopsByDefault) {
Adam Nemetb2593f72016-07-18 16:29:27 +0000998 return new LoopDistributeLegacy(ProcessAllLoopsByDefault);
Adam Nemetd2fa4142016-04-27 05:28:18 +0000999}
Adam Nemet938d3d62015-05-14 12:05:18 +00001000}