blob: 81a525267cbfe6e32793b9a505940bde00305084 [file] [log] [blame]
Vikram S. Advee12c74c2002-12-10 00:43:34 +00001//===- Parallelize.cpp - Auto parallelization using DS Graphs ---*- C++ -*-===//
2//
3// This file implements a pass that automatically parallelizes a program,
4// using the Cilk multi-threaded runtime system to execute parallel code.
5//
6// The pass uses the Program Dependence Graph (class PDGIterator) to
7// identify parallelizable function calls, i.e., calls whose instances
8// can be executed in parallel with instances of other function calls.
9// (In the future, this should also execute different instances of the same
10// function call in parallel, but that requires parallelizing across
11// loop iterations.)
12//
13// The output of the pass is LLVM code with:
14// (1) all parallelizable functions renamed to flag them as parallelizable;
15// (2) calls to a sync() function introduced at synchronization points.
16// The CWriter recognizes these functions and inserts the appropriate Cilk
17// keywords when writing out C code. This C code must be compiled with cilk2c.
18//
19// Current algorithmic limitations:
20// -- no array dependence analysis
21// -- no parallelization for function calls in different loop iterations
22// (except in unlikely trivial cases)
23//
24// Limitations of using Cilk:
25// -- No parallelism within a function body, e.g., in a loop;
26// -- Simplistic synchronization model requiring all parallel threads
27// created within a function to block at a sync().
28// -- Excessive overhead at "spawned" function calls, which has no benefit
29// once all threads are busy (especially common when the degree of
30// parallelism is low).
31//===----------------------------------------------------------------------===//
32
33
34#include "llvm/Transforms/Parallelize.h"
35#include "llvm/Transforms/Utils/DemoteRegToStack.h"
36#include "llvm/Analysis/PgmDependenceGraph.h"
37#include "llvm/Analysis/Dominators.h"
38#include "llvm/Analysis/DataStructure.h"
39#include "llvm/Analysis/DSGraph.h"
40#include "llvm/Module.h"
41#include "llvm/Function.h"
42#include "llvm/iOther.h"
43#include "llvm/iPHINode.h"
44#include "llvm/iTerminators.h"
45#include "llvm/DerivedTypes.h"
46#include "llvm/Support/InstVisitor.h"
47#include "llvm/Support/Cilkifier.h"
Vikram S. Advee12c74c2002-12-10 00:43:34 +000048#include "Support/Statistic.h"
49#include "Support/STLExtras.h"
50#include "Support/hash_set"
51#include "Support/hash_map"
52#include <vector>
53#include <stack>
54#include <functional>
55#include <algorithm>
56
57
58
59#if 0
60void AddToDomSet(vector<BasicBlock*>& domSet, BasicBlock* bb,
61 const DominatorTree& domTree)
62{
63 DominatorTreeBase::Node* bbNode = domTree.getNode(bb);
64 const std::vector<Node*>& domKids = bbNode.getChildren();
65 domSet.insert(domSet.end(), domKids.begin(), domKids.end());
66 for (unsigned i = 0; i < domKids.size(); ++i)
67 AddToDomSet(domSet, domKids[i]->getNode(), domTree);
68}
69
70bool CheckDominance(Function& func,
71 const CallInst& callInst1,
72 const CallInst& callInst2)
73{
74 if (callInst1 == callInst2) // makes sense if this is in a loop but
75 return false; // we're not handling loops yet
76
77 // Check first if one call dominates the other
78 DominatorSet& domSet = getAnalysis<DominatorSet>(func);
79 if (domSet.dominates(callInst2, callInst1))
80 { // swap callInst1 and callInst2
81 const CallInst& tmp = callInst2; callInst2 = callInst1; callInst1 = tmp;
82 }
83 else if (! domSet.dominates(callInst1, callInst2))
84 return false; // neither dominates the other:
85
86 //
87 if (! AreIndependent(func, callInst1, callInst2))
88 return false;
89}
90
91#endif
92
93
94//----------------------------------------------------------------------------
95// class Cilkifier
96//
97// Code generation pass that transforms code to identify where Cilk keywords
98// should be inserted. This relies on dis -c to print out the keywords.
99//----------------------------------------------------------------------------
100
101
102class Cilkifier: public InstVisitor<Cilkifier>
103{
104 Function* DummySyncFunc;
105
106 // Data used when transforming each function.
107 hash_set<const Instruction*> stmtsVisited; // Flags for recursive DFS
108 hash_map<const CallInst*, hash_set<CallInst*> > spawnToSyncsMap;
109
110 // Input data for the transformation.
111 const hash_set<Function*>* cilkFunctions; // Set of parallel functions
112 PgmDependenceGraph* depGraph;
113
114 void DFSVisitInstr (Instruction* I,
115 Instruction* root,
116 hash_set<const Instruction*>& depsOfRoot);
117
118public:
119 /*ctor*/ Cilkifier (Module& M);
120
121 // Transform a single function including its name, its call sites, and syncs
122 //
123 void TransformFunc (Function* F,
124 const hash_set<Function*>& cilkFunctions,
125 PgmDependenceGraph& _depGraph);
126
127 // The visitor function that does most of the hard work, via DFSVisitInstr
128 //
129 void visitCallInst(CallInst& CI);
130};
131
132
133Cilkifier::Cilkifier(Module& M)
134{
135 // create the dummy Sync function and add it to the Module
136 DummySyncFunc = new Function(FunctionType::get( Type::VoidTy,
137 std::vector<const Type*>(),
138 /*isVararg*/ false),
Chris Lattner4ad02e72003-04-16 20:28:45 +0000139 GlobalValue::ExternalLinkage, DummySyncFuncName,
140 &M);
Vikram S. Advee12c74c2002-12-10 00:43:34 +0000141}
142
143void Cilkifier::TransformFunc(Function* F,
144 const hash_set<Function*>& _cilkFunctions,
145 PgmDependenceGraph& _depGraph)
146{
147 // Memoize the information for this function
148 cilkFunctions = &_cilkFunctions;
149 depGraph = &_depGraph;
150
151 // Add the marker suffix to the Function name
152 // This should automatically mark all calls to the function also!
153 F->setName(F->getName() + CilkSuffix);
154
155 // Insert sync operations for each separate spawn
156 visit(*F);
157
158 // Now traverse the CFG in rPostorder and eliminate redundant syncs, i.e.,
159 // two consecutive sync's on a straight-line path with no intervening spawn.
160
161}
162
163
164void Cilkifier::DFSVisitInstr(Instruction* I,
165 Instruction* root,
166 hash_set<const Instruction*>& depsOfRoot)
167{
168 assert(stmtsVisited.find(I) == stmtsVisited.end());
169 stmtsVisited.insert(I);
170
171 // If there is a dependence from root to I, insert Sync and return
172 if (depsOfRoot.find(I) != depsOfRoot.end())
173 { // Insert a sync before I and stop searching along this path.
174 // If I is a Phi instruction, the dependence can only be an SSA dep.
175 // and we need to insert the sync in the predecessor on the appropriate
176 // incoming edge!
177 CallInst* syncI = 0;
178 if (PHINode* phiI = dyn_cast<PHINode>(I))
179 { // check all operands of the Phi and insert before each one
180 for (unsigned i = 0, N = phiI->getNumIncomingValues(); i < N; ++i)
181 if (phiI->getIncomingValue(i) == root)
182 syncI = new CallInst(DummySyncFunc, std::vector<Value*>(), "",
183 phiI->getIncomingBlock(i)->getTerminator());
184 }
185 else
186 syncI = new CallInst(DummySyncFunc, std::vector<Value*>(), "", I);
187
188 // Remember the sync for each spawn to eliminate rendundant ones later
189 spawnToSyncsMap[cast<CallInst>(root)].insert(syncI);
190
191 return;
192 }
193
194 // else visit unvisited successors
195 if (BranchInst* brI = dyn_cast<BranchInst>(I))
196 { // visit first instruction in each successor BB
197 for (unsigned i = 0, N = brI->getNumSuccessors(); i < N; ++i)
198 if (stmtsVisited.find(&brI->getSuccessor(i)->front())
199 == stmtsVisited.end())
200 DFSVisitInstr(&brI->getSuccessor(i)->front(), root, depsOfRoot);
201 }
202 else
203 if (Instruction* nextI = I->getNext())
204 if (stmtsVisited.find(nextI) == stmtsVisited.end())
205 DFSVisitInstr(nextI, root, depsOfRoot);
206}
207
208
209void Cilkifier::visitCallInst(CallInst& CI)
210{
211 assert(CI.getCalledFunction() != 0 && "Only direct calls can be spawned.");
212 if (cilkFunctions->find(CI.getCalledFunction()) == cilkFunctions->end())
213 return; // not a spawn
214
215 // Find all the outgoing memory dependences.
216 hash_set<const Instruction*> depsOfRoot;
217 for (PgmDependenceGraph::iterator DI =
218 depGraph->outDepBegin(CI, MemoryDeps); ! DI.fini(); ++DI)
219 depsOfRoot.insert(&DI->getSink()->getInstr());
220
221 // Now find all outgoing SSA dependences to the eventual non-Phi users of
222 // the call value (i.e., direct users that are not phis, and for any
223 // user that is a Phi, direct non-Phi users of that Phi, and recursively).
224 std::stack<const PHINode*> phiUsers;
225 hash_set<const PHINode*> phisSeen; // ensures we don't visit a phi twice
226 for (Value::use_iterator UI=CI.use_begin(), UE=CI.use_end(); UI != UE; ++UI)
227 if (const PHINode* phiUser = dyn_cast<PHINode>(*UI))
228 {
229 if (phisSeen.find(phiUser) == phisSeen.end())
230 {
231 phiUsers.push(phiUser);
232 phisSeen.insert(phiUser);
233 }
234 }
235 else
236 depsOfRoot.insert(cast<Instruction>(*UI));
237
238 // Now we've found the non-Phi users and immediate phi users.
239 // Recursively walk the phi users and add their non-phi users.
240 for (const PHINode* phiUser; !phiUsers.empty(); phiUsers.pop())
241 {
242 phiUser = phiUsers.top();
243 for (Value::use_const_iterator UI=phiUser->use_begin(),
244 UE=phiUser->use_end(); UI != UE; ++UI)
245 if (const PHINode* pn = dyn_cast<PHINode>(*UI))
246 {
247 if (phisSeen.find(pn) == phisSeen.end())
248 {
249 phiUsers.push(pn);
250 phisSeen.insert(pn);
251 }
252 }
253 else
254 depsOfRoot.insert(cast<Instruction>(*UI));
255 }
256
257 // Walk paths of the CFG starting at the call instruction and insert
258 // one sync before the first dependence on each path, if any.
259 if (! depsOfRoot.empty())
260 {
261 stmtsVisited.clear(); // start a new DFS for this CallInst
262 assert(CI.getNext() && "Call instruction cannot be a terminator!");
263 DFSVisitInstr(CI.getNext(), &CI, depsOfRoot);
264 }
265
266 // Now, eliminate all users of the SSA value of the CallInst, i.e.,
267 // if the call instruction returns a value, delete the return value
268 // register and replace it by a stack slot.
269 if (CI.getType() != Type::VoidTy)
270 DemoteRegToStack(CI);
271}
272
273
274//----------------------------------------------------------------------------
275// class FindParallelCalls
276//
277// Find all CallInst instructions that have at least one other CallInst
278// that is independent. These are the instructions that can produce
279// useful parallelism.
280//----------------------------------------------------------------------------
281
Chris Lattner80431272003-08-06 17:16:24 +0000282class FindParallelCalls : public InstVisitor<FindParallelCalls> {
Vikram S. Advee12c74c2002-12-10 00:43:34 +0000283 typedef hash_set<CallInst*> DependentsSet;
284 typedef DependentsSet::iterator Dependents_iterator;
285 typedef DependentsSet::const_iterator Dependents_const_iterator;
286
287 PgmDependenceGraph& depGraph; // dependence graph for the function
288 hash_set<Instruction*> stmtsVisited; // flags for DFS walk of depGraph
289 hash_map<CallInst*, bool > completed; // flags marking if a CI is done
290 hash_map<CallInst*, DependentsSet> dependents; // dependent CIs for each CI
291
292 void VisitOutEdges(Instruction* I,
293 CallInst* root,
294 DependentsSet& depsOfRoot);
295
Chris Lattner80431272003-08-06 17:16:24 +0000296 FindParallelCalls(const FindParallelCalls &); // DO NOT IMPLEMENT
297 void operator=(const FindParallelCalls&); // DO NOT IMPLEMENT
Vikram S. Advee12c74c2002-12-10 00:43:34 +0000298public:
299 std::vector<CallInst*> parallelCalls;
300
301public:
302 /*ctor*/ FindParallelCalls (Function& F, PgmDependenceGraph& DG);
303 void visitCallInst (CallInst& CI);
304};
305
306
307FindParallelCalls::FindParallelCalls(Function& F,
308 PgmDependenceGraph& DG)
309 : depGraph(DG)
310{
311 // Find all CallInsts reachable from each CallInst using a recursive DFS
312 visit(F);
313
314 // Now we've found all CallInsts reachable from each CallInst.
315 // Find those CallInsts that are parallel with at least one other CallInst
316 // by counting total inEdges and outEdges.
317 //
318 unsigned long totalNumCalls = completed.size();
319
320 if (totalNumCalls == 1)
321 { // Check first for the special case of a single call instruction not
322 // in any loop. It is not parallel, even if it has no dependences
323 // (this is why it is a special case).
324 //
325 // FIXME:
326 // THIS CASE IS NOT HANDLED RIGHT NOW, I.E., THERE IS NO
327 // PARALLELISM FOR CALLS IN DIFFERENT ITERATIONS OF A LOOP.
328 //
329 return;
330 }
331
332 hash_map<CallInst*, unsigned long> numDeps;
333 for (hash_map<CallInst*, DependentsSet>::iterator II = dependents.begin(),
334 IE = dependents.end(); II != IE; ++II)
335 {
336 CallInst* fromCI = II->first;
337 numDeps[fromCI] += II->second.size();
338 for (Dependents_iterator DI = II->second.begin(), DE = II->second.end();
339 DI != DE; ++DI)
340 numDeps[*DI]++; // *DI can be reached from II->first
341 }
342
343 for (hash_map<CallInst*, DependentsSet>::iterator
344 II = dependents.begin(), IE = dependents.end(); II != IE; ++II)
345
346 // FIXME: Remove "- 1" when considering parallelism in loops
347 if (numDeps[II->first] < totalNumCalls - 1)
348 parallelCalls.push_back(II->first);
349}
350
351
352void FindParallelCalls::VisitOutEdges(Instruction* I,
353 CallInst* root,
354 DependentsSet& depsOfRoot)
355{
356 assert(stmtsVisited.find(I) == stmtsVisited.end() && "Stmt visited twice?");
357 stmtsVisited.insert(I);
358
359 if (CallInst* CI = dyn_cast<CallInst>(I))
360
361 // FIXME: Ignoring parallelism in a loop. Here we're actually *ignoring*
362 // a self-dependence in order to get the count comparison right above.
363 // When we include loop parallelism, self-dependences should be included.
364 //
365 if (CI != root)
366
367 { // CallInst root has a path to CallInst I and any calls reachable from I
368 depsOfRoot.insert(CI);
369 if (completed[CI])
370 { // We have already visited I so we know all nodes it can reach!
371 DependentsSet& depsOfI = dependents[CI];
372 depsOfRoot.insert(depsOfI.begin(), depsOfI.end());
373 return;
374 }
375 }
376
377 // If we reach here, we need to visit all children of I
378 for (PgmDependenceGraph::iterator DI = depGraph.outDepBegin(*I);
379 ! DI.fini(); ++DI)
380 {
381 Instruction* sink = &DI->getSink()->getInstr();
382 if (stmtsVisited.find(sink) == stmtsVisited.end())
383 VisitOutEdges(sink, root, depsOfRoot);
384 }
385}
386
387
388void FindParallelCalls::visitCallInst(CallInst& CI)
389{
390 if (completed[&CI])
391 return;
392 stmtsVisited.clear(); // clear flags to do a fresh DFS
393
394 // Visit all children of CI using a recursive walk through dep graph
395 DependentsSet& depsOfRoot = dependents[&CI];
396 for (PgmDependenceGraph::iterator DI = depGraph.outDepBegin(CI);
397 ! DI.fini(); ++DI)
398 {
399 Instruction* sink = &DI->getSink()->getInstr();
400 if (stmtsVisited.find(sink) == stmtsVisited.end())
401 VisitOutEdges(sink, &CI, depsOfRoot);
402 }
403
404 completed[&CI] = true;
405}
406
407
408//----------------------------------------------------------------------------
409// class Parallelize
410//
411// (1) Find candidate parallel functions: any function F s.t.
412// there is a call C1 to the function F that is followed or preceded
413// by at least one other call C2 that is independent of this one
414// (i.e., there is no dependence path from C1 to C2 or C2 to C1)
415// (2) Label such a function F as a cilk function.
416// (3) Convert every call to F to a spawn
417// (4) For every function X, insert sync statements so that
418// every spawn is postdominated by a sync before any statements
419// with a data dependence to/from the call site for the spawn
420//
421//----------------------------------------------------------------------------
422
423namespace {
424 class Parallelize: public Pass
425 {
426 public:
427 /// Driver functions to transform a program
428 ///
429 bool run(Module& M);
430
431 /// getAnalysisUsage - Modifies extensively so preserve nothing.
432 /// Uses the DependenceGraph and the Top-down DS Graph (only to find
433 /// all functions called via an indirect call).
434 ///
435 void getAnalysisUsage(AnalysisUsage &AU) const {
436 AU.addRequired<TDDataStructures>();
437 AU.addRequired<MemoryDepAnalysis>(); // force this not to be released
438 AU.addRequired<PgmDependenceGraph>(); // because it is needed by this
439 }
440 };
441
442 RegisterOpt<Parallelize> X("parallel", "Parallelize program using Cilk");
443}
444
445
446static Function* FindMain(Module& M)
447{
448 for (Module::iterator FI = M.begin(), FE = M.end(); FI != FE; ++FI)
449 if (FI->getName() == std::string("main"))
450 return FI;
451 return NULL;
452}
453
454
455bool Parallelize::run(Module& M)
456{
457 hash_set<Function*> parallelFunctions;
458 hash_set<Function*> safeParallelFunctions;
459 hash_set<const GlobalValue*> indirectlyCalled;
460
461 // If there is no main (i.e., for an incomplete program), we can do nothing.
462 // If there is a main, mark main as a parallel function.
463 //
464 Function* mainFunc = FindMain(M);
465 if (!mainFunc)
466 return false;
467
468 // (1) Find candidate parallel functions and mark them as Cilk functions
469 //
470 for (Module::iterator FI = M.begin(), FE = M.end(); FI != FE; ++FI)
471 if (! FI->isExternal())
472 {
473 Function* F = FI;
474 DSGraph& tdg = getAnalysis<TDDataStructures>().getDSGraph(*F);
475
476 // All the hard analysis work gets done here!
477 //
478 FindParallelCalls finder(*F,
479 getAnalysis<PgmDependenceGraph>().getGraph(*F));
480 /* getAnalysis<MemoryDepAnalysis>().getGraph(*F)); */
481
482 // Now we know which call instructions are useful to parallelize.
483 // Remember those callee functions.
484 //
485 for (std::vector<CallInst*>::iterator
486 CII = finder.parallelCalls.begin(),
487 CIE = finder.parallelCalls.end(); CII != CIE; ++CII)
488 {
489 // Check if this is a direct call...
490 if ((*CII)->getCalledFunction() != NULL)
491 { // direct call: if this is to a non-external function,
492 // mark it as a parallelizable function
493 if (! (*CII)->getCalledFunction()->isExternal())
494 parallelFunctions.insert((*CII)->getCalledFunction());
495 }
496 else
497 { // Indirect call: mark all potential callees as bad
498 std::vector<GlobalValue*> callees =
499 tdg.getNodeForValue((*CII)->getCalledValue())
500 .getNode()->getGlobals();
501 indirectlyCalled.insert(callees.begin(), callees.end());
502 }
503 }
504 }
505
506 // Remove all indirectly called functions from the list of Cilk functions.
507 //
508 for (hash_set<Function*>::iterator PFI = parallelFunctions.begin(),
509 PFE = parallelFunctions.end(); PFI != PFE; ++PFI)
510 if (indirectlyCalled.count(*PFI) == 0)
511 safeParallelFunctions.insert(*PFI);
512
513#undef CAN_USE_BIND1ST_ON_REFERENCE_TYPE_ARGS
514#ifdef CAN_USE_BIND1ST_ON_REFERENCE_TYPE_ARGS
515 // Use this undecipherable STLese because erase invalidates iterators.
516 // Otherwise we have to copy sets as above.
517 hash_set<Function*>::iterator extrasBegin =
518 std::remove_if(parallelFunctions.begin(), parallelFunctions.end(),
519 compose1(std::bind2nd(std::greater<int>(), 0),
520 bind_obj(&indirectlyCalled,
521 &hash_set<const GlobalValue*>::count)));
522 parallelFunctions.erase(extrasBegin, parallelFunctions.end());
523#endif
524
525 // If there are no parallel functions, we can just give up.
526 if (safeParallelFunctions.empty())
527 return false;
528
529 // Add main as a parallel function since Cilk requires this.
530 safeParallelFunctions.insert(mainFunc);
531
532 // (2,3) Transform each Cilk function and all its calls simply by
533 // adding a unique suffix to the function name.
534 // This should identify both functions and calls to such functions
535 // to the code generator.
536 // (4) Also, insert calls to sync at appropriate points.
537 //
538 Cilkifier cilkifier(M);
539 for (hash_set<Function*>::iterator CFI = safeParallelFunctions.begin(),
540 CFE = safeParallelFunctions.end(); CFI != CFE; ++CFI)
541 {
542 cilkifier.TransformFunc(*CFI, safeParallelFunctions,
543 getAnalysis<PgmDependenceGraph>().getGraph(**CFI));
544 /* getAnalysis<MemoryDepAnalysis>().getGraph(**CFI)); */
545 }
546
547 return true;
548}