blob: 63715806c04bf99e5e3c154c0ac65a94818ed125 [file] [log] [blame]
Chandler Carruth572e3402014-04-21 11:12:00 +00001//===- CGSCCPassManager.cpp - Managing & running CGSCC passes -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "llvm/Analysis/CGSCCPassManager.h"
Chandler Carruth88823462016-08-24 09:37:14 +000011#include "llvm/IR/CallSite.h"
Chandler Carruth572e3402014-04-21 11:12:00 +000012
13using namespace llvm;
14
Chandler Carruth2a540942016-02-27 10:38:10 +000015namespace llvm {
Chandler Carruth88823462016-08-24 09:37:14 +000016
17// Explicit instantiations for the core proxy templates.
18template class AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
19template class PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager,
20 LazyCallGraph &, CGSCCUpdateResult &>;
Chandler Carruth2a540942016-02-27 10:38:10 +000021template class InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>;
22template class OuterAnalysisManagerProxy<ModuleAnalysisManager,
NAKAMURA Takumib673b162016-08-30 15:47:13 +000023 LazyCallGraph::SCC>;
Chandler Carruth2a540942016-02-27 10:38:10 +000024template class InnerAnalysisManagerProxy<FunctionAnalysisManager,
NAKAMURA Takumib673b162016-08-30 15:47:13 +000025 LazyCallGraph::SCC>;
Chandler Carruth2a540942016-02-27 10:38:10 +000026template class OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;
Chandler Carruth88823462016-08-24 09:37:14 +000027
28/// Explicitly specialize the pass manager run method to handle call graph
29/// updates.
30template <>
31PreservedAnalyses
32PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &,
33 CGSCCUpdateResult &>::run(LazyCallGraph::SCC &InitialC,
34 CGSCCAnalysisManager &AM,
35 LazyCallGraph &G, CGSCCUpdateResult &UR) {
36 PreservedAnalyses PA = PreservedAnalyses::all();
37
38 if (DebugLogging)
39 dbgs() << "Starting CGSCC pass manager run.\n";
40
41 // The SCC may be refined while we are running passes over it, so set up
42 // a pointer that we can update.
43 LazyCallGraph::SCC *C = &InitialC;
44
45 for (auto &Pass : Passes) {
46 if (DebugLogging)
47 dbgs() << "Running pass: " << Pass->name() << " on " << *C << "\n";
48
49 PreservedAnalyses PassPA = Pass->run(*C, AM, G, UR);
50
51 // Update the SCC if necessary.
52 C = UR.UpdatedC ? UR.UpdatedC : C;
53
54 // Check that we didn't miss any update scenario.
55 assert(!UR.InvalidatedSCCs.count(C) && "Processing an invalid SCC!");
56 assert(C->begin() != C->end() && "Cannot have an empty SCC!");
57
58 // Update the analysis manager as each pass runs and potentially
Chandler Carruth0c6efff12016-11-28 10:42:21 +000059 // invalidates analyses.
60 AM.invalidate(*C, PassPA);
Chandler Carruth88823462016-08-24 09:37:14 +000061
62 // Finally, we intersect the final preserved analyses to compute the
63 // aggregate preserved set for this pass manager.
64 PA.intersect(std::move(PassPA));
65
66 // FIXME: Historically, the pass managers all called the LLVM context's
67 // yield function here. We don't have a generic way to acquire the
68 // context and it isn't yet clear what the right pattern is for yielding
69 // in the new pass manager so it is currently omitted.
70 // ...getContext().yield();
71 }
72
Chandler Carruth0c6efff12016-11-28 10:42:21 +000073 // Invaliadtion was handled after each pass in the above loop for the current
74 // SCC. Therefore, the remaining analysis results in the AnalysisManager are
75 // preserved. We mark this with a set so that we don't need to inspect each
76 // one individually.
77 PA.preserve<AllAnalysesOn<LazyCallGraph::SCC>>();
78
Chandler Carruth88823462016-08-24 09:37:14 +000079 if (DebugLogging)
80 dbgs() << "Finished CGSCC pass manager run.\n";
81
82 return PA;
83}
84
85} // End llvm namespace
86
87namespace {
88/// Helper function to update both the \c CGSCCAnalysisManager \p AM and the \c
89/// CGSCCPassManager's \c CGSCCUpdateResult \p UR based on a range of newly
90/// added SCCs.
91///
92/// The range of new SCCs must be in postorder already. The SCC they were split
93/// out of must be provided as \p C. The current node being mutated and
94/// triggering updates must be passed as \p N.
95///
96/// This function returns the SCC containing \p N. This will be either \p C if
97/// no new SCCs have been split out, or it will be the new SCC containing \p N.
98template <typename SCCRangeT>
99LazyCallGraph::SCC *
100incorporateNewSCCRange(const SCCRangeT &NewSCCRange, LazyCallGraph &G,
101 LazyCallGraph::Node &N, LazyCallGraph::SCC *C,
102 CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
103 bool DebugLogging = false) {
104 typedef LazyCallGraph::SCC SCC;
105
106 if (NewSCCRange.begin() == NewSCCRange.end())
107 return C;
108
109 // Invalidate the analyses of the current SCC and add it to the worklist since
110 // it has changed its shape.
111 AM.invalidate(*C, PreservedAnalyses::none());
112 UR.CWorklist.insert(C);
113 if (DebugLogging)
114 dbgs() << "Enqueuing the existing SCC in the worklist:" << *C << "\n";
115
116 SCC *OldC = C;
117 (void)OldC;
118
119 // Update the current SCC. Note that if we have new SCCs, this must actually
120 // change the SCC.
121 assert(C != &*NewSCCRange.begin() &&
122 "Cannot insert new SCCs without changing current SCC!");
123 C = &*NewSCCRange.begin();
124 assert(G.lookupSCC(N) == C && "Failed to update current SCC!");
125
126 for (SCC &NewC :
127 reverse(make_range(std::next(NewSCCRange.begin()), NewSCCRange.end()))) {
128 assert(C != &NewC && "No need to re-visit the current SCC!");
129 assert(OldC != &NewC && "Already handled the original SCC!");
130 UR.CWorklist.insert(&NewC);
131 if (DebugLogging)
132 dbgs() << "Enqueuing a newly formed SCC:" << NewC << "\n";
133 }
134 return C;
135}
136}
137
138LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
139 LazyCallGraph &G, LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N,
140 CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, bool DebugLogging) {
141 typedef LazyCallGraph::Node Node;
142 typedef LazyCallGraph::Edge Edge;
143 typedef LazyCallGraph::SCC SCC;
144 typedef LazyCallGraph::RefSCC RefSCC;
145
146 RefSCC &InitialRC = InitialC.getOuterRefSCC();
147 SCC *C = &InitialC;
148 RefSCC *RC = &InitialRC;
149 Function &F = N.getFunction();
150
151 // Walk the function body and build up the set of retained, promoted, and
152 // demoted edges.
153 SmallVector<Constant *, 16> Worklist;
154 SmallPtrSet<Constant *, 16> Visited;
155 SmallPtrSet<Function *, 16> RetainedEdges;
156 SmallSetVector<Function *, 4> PromotedRefTargets;
157 SmallSetVector<Function *, 4> DemotedCallTargets;
158 // First walk the function and handle all called functions. We do this first
159 // because if there is a single call edge, whether there are ref edges is
160 // irrelevant.
161 for (BasicBlock &BB : F)
162 for (Instruction &I : BB)
163 if (auto CS = CallSite(&I))
164 if (Function *Callee = CS.getCalledFunction())
165 if (Visited.insert(Callee).second && !Callee->isDeclaration()) {
166 const Edge *E = N.lookup(*Callee);
167 // FIXME: We should really handle adding new calls. While it will
168 // make downstream usage more complex, there is no fundamental
169 // limitation and it will allow passes within the CGSCC to be a bit
170 // more flexible in what transforms they can do. Until then, we
171 // verify that new calls haven't been introduced.
172 assert(E && "No function transformations should introduce *new* "
173 "call edges! Any new calls should be modeled as "
174 "promoted existing ref edges!");
175 RetainedEdges.insert(Callee);
176 if (!E->isCall())
177 PromotedRefTargets.insert(Callee);
178 }
179
180 // Now walk all references.
181 for (BasicBlock &BB : F)
182 for (Instruction &I : BB) {
183 for (Value *Op : I.operand_values())
184 if (Constant *C = dyn_cast<Constant>(Op))
185 if (Visited.insert(C).second)
186 Worklist.push_back(C);
187
188 LazyCallGraph::visitReferences(Worklist, Visited, [&](Function &Referee) {
189 // Skip declarations.
190 if (Referee.isDeclaration())
191 return;
192
193 const Edge *E = N.lookup(Referee);
194 // FIXME: Similarly to new calls, we also currently preclude
195 // introducing new references. See above for details.
196 assert(E && "No function transformations should introduce *new* ref "
197 "edges! Any new ref edges would require IPO which "
198 "function passes aren't allowed to do!");
199 RetainedEdges.insert(&Referee);
200 if (E->isCall())
201 DemotedCallTargets.insert(&Referee);
202 });
203 }
204
205 // First remove all of the edges that are no longer present in this function.
206 // We have to build a list of dead targets first and then remove them as the
207 // data structures will all be invalidated by removing them.
208 SmallVector<PointerIntPair<Node *, 1, Edge::Kind>, 4> DeadTargets;
209 for (Edge &E : N)
210 if (!RetainedEdges.count(&E.getFunction()))
211 DeadTargets.push_back({E.getNode(), E.getKind()});
212 for (auto DeadTarget : DeadTargets) {
213 Node &TargetN = *DeadTarget.getPointer();
214 bool IsCall = DeadTarget.getInt() == Edge::Call;
215 SCC &TargetC = *G.lookupSCC(TargetN);
216 RefSCC &TargetRC = TargetC.getOuterRefSCC();
217
218 if (&TargetRC != RC) {
219 RC->removeOutgoingEdge(N, TargetN);
220 if (DebugLogging)
221 dbgs() << "Deleting outgoing edge from '" << N << "' to '" << TargetN
222 << "'\n";
223 continue;
224 }
225 if (DebugLogging)
226 dbgs() << "Deleting internal " << (IsCall ? "call" : "ref")
227 << " edge from '" << N << "' to '" << TargetN << "'\n";
228
229 if (IsCall)
230 C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, TargetN), G, N,
231 C, AM, UR, DebugLogging);
232
233 auto NewRefSCCs = RC->removeInternalRefEdge(N, TargetN);
234 if (!NewRefSCCs.empty()) {
235 // Note that we don't bother to invalidate analyses as ref-edge
236 // connectivity is not really observable in any way and is intended
237 // exclusively to be used for ordering of transforms rather than for
238 // analysis conclusions.
239
240 // The RC worklist is in reverse postorder, so we first enqueue the
241 // current RefSCC as it will remain the parent of all split RefSCCs, then
242 // we enqueue the new ones in RPO except for the one which contains the
243 // source node as that is the "bottom" we will continue processing in the
244 // bottom-up walk.
245 UR.RCWorklist.insert(RC);
246 if (DebugLogging)
247 dbgs() << "Enqueuing the existing RefSCC in the update worklist: "
248 << *RC << "\n";
249 // Update the RC to the "bottom".
250 assert(G.lookupSCC(N) == C && "Changed the SCC when splitting RefSCCs!");
251 RC = &C->getOuterRefSCC();
252 assert(G.lookupRefSCC(N) == RC && "Failed to update current RefSCC!");
253 for (RefSCC *NewRC : reverse(NewRefSCCs))
254 if (NewRC != RC) {
255 UR.RCWorklist.insert(NewRC);
256 if (DebugLogging)
257 dbgs() << "Enqueuing a new RefSCC in the update worklist: "
258 << *NewRC << "\n";
259 }
260 }
261 }
262
263 // Next demote all the call edges that are now ref edges. This helps make
264 // the SCCs small which should minimize the work below as we don't want to
265 // form cycles that this would break.
266 for (Function *RefTarget : DemotedCallTargets) {
267 Node &TargetN = *G.lookup(*RefTarget);
268 SCC &TargetC = *G.lookupSCC(TargetN);
269 RefSCC &TargetRC = TargetC.getOuterRefSCC();
270
271 // The easy case is when the target RefSCC is not this RefSCC. This is
272 // only supported when the target RefSCC is a child of this RefSCC.
273 if (&TargetRC != RC) {
274 assert(RC->isAncestorOf(TargetRC) &&
275 "Cannot potentially form RefSCC cycles here!");
276 RC->switchOutgoingEdgeToRef(N, TargetN);
277 if (DebugLogging)
278 dbgs() << "Switch outgoing call edge to a ref edge from '" << N
279 << "' to '" << TargetN << "'\n";
280 continue;
281 }
282
283 // Otherwise we are switching an internal call edge to a ref edge. This
284 // may split up some SCCs.
285 C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, TargetN), G, N, C,
286 AM, UR, DebugLogging);
287 }
288
289 // Now promote ref edges into call edges.
290 for (Function *CallTarget : PromotedRefTargets) {
291 Node &TargetN = *G.lookup(*CallTarget);
292 SCC &TargetC = *G.lookupSCC(TargetN);
293 RefSCC &TargetRC = TargetC.getOuterRefSCC();
294
295 // The easy case is when the target RefSCC is not this RefSCC. This is
296 // only supported when the target RefSCC is a child of this RefSCC.
297 if (&TargetRC != RC) {
298 assert(RC->isAncestorOf(TargetRC) &&
299 "Cannot potentially form RefSCC cycles here!");
300 RC->switchOutgoingEdgeToCall(N, TargetN);
301 if (DebugLogging)
302 dbgs() << "Switch outgoing ref edge to a call edge from '" << N
303 << "' to '" << TargetN << "'\n";
304 continue;
305 }
306 if (DebugLogging)
307 dbgs() << "Switch an internal ref edge to a call edge from '" << N
308 << "' to '" << TargetN << "'\n";
309
310 // Otherwise we are switching an internal ref edge to a call edge. This
311 // may merge away some SCCs, and we add those to the UpdateResult. We also
312 // need to make sure to update the worklist in the event SCCs have moved
313 // before the current one in the post-order sequence.
314 auto InitialSCCIndex = RC->find(*C) - RC->begin();
315 auto InvalidatedSCCs = RC->switchInternalEdgeToCall(N, TargetN);
316 if (!InvalidatedSCCs.empty()) {
317 C = &TargetC;
318 assert(G.lookupSCC(N) == C && "Failed to update current SCC!");
319
320 // Any analyses cached for this SCC are no longer precise as the shape
321 // has changed by introducing this cycle.
322 AM.invalidate(*C, PreservedAnalyses::none());
323
324 for (SCC *InvalidatedC : InvalidatedSCCs) {
325 assert(InvalidatedC != C && "Cannot invalidate the current SCC!");
326 UR.InvalidatedSCCs.insert(InvalidatedC);
327
328 // Also clear any cached analyses for the SCCs that are dead. This
329 // isn't really necessary for correctness but can release memory.
330 AM.clear(*InvalidatedC);
331 }
332 }
333 auto NewSCCIndex = RC->find(*C) - RC->begin();
334 if (InitialSCCIndex < NewSCCIndex) {
335 // Put our current SCC back onto the worklist as we'll visit other SCCs
336 // that are now definitively ordered prior to the current one in the
337 // post-order sequence, and may end up observing more precise context to
338 // optimize the current SCC.
339 UR.CWorklist.insert(C);
340 if (DebugLogging)
341 dbgs() << "Enqueuing the existing SCC in the worklist: " << *C << "\n";
342 // Enqueue in reverse order as we pop off the back of the worklist.
343 for (SCC &MovedC : reverse(make_range(RC->begin() + InitialSCCIndex,
344 RC->begin() + NewSCCIndex))) {
345 UR.CWorklist.insert(&MovedC);
346 if (DebugLogging)
347 dbgs() << "Enqueuing a newly earlier in post-order SCC: " << MovedC
348 << "\n";
349 }
350 }
351 }
352
353 assert(!UR.InvalidatedSCCs.count(C) && "Invalidated the current SCC!");
354 assert(!UR.InvalidatedRefSCCs.count(RC) && "Invalidated the current RefSCC!");
355 assert(&C->getOuterRefSCC() == RC && "Current SCC not in current RefSCC!");
356
357 // Record the current RefSCC and SCC for higher layers of the CGSCC pass
358 // manager now that all the updates have been applied.
359 if (RC != &InitialRC)
360 UR.UpdatedRC = RC;
361 if (C != &InitialC)
362 UR.UpdatedC = C;
363
364 return *C;
Chandler Carruth572e3402014-04-21 11:12:00 +0000365}