blob: 477a28a2cb2d8d935c411fb96554813136487b88 [file] [log] [blame]
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +00001//===- AMDGPUUnifyDivergentExitNodes.cpp ----------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This is a variant of the UnifyDivergentExitNodes pass. Rather than ensuring
11// there is at most one ret and one unreachable instruction, it ensures there is
12// at most one divergent exiting block.
13//
14// StructurizeCFG can't deal with multi-exit regions formed by branches to
15// multiple return nodes. It is not desirable to structurize regions with
16// uniform branches, so unifying those to the same return block as divergent
17// branches inhibits use of scalar branching. It still can't deal with the case
18// where one branch goes to return, and one unreachable. Replace unreachable in
19// this case with a return.
20//
21//===----------------------------------------------------------------------===//
22
23#include "AMDGPU.h"
Eugene Zelenko6cadde72017-10-17 21:27:42 +000024#include "llvm/ADT/ArrayRef.h"
25#include "llvm/ADT/SmallPtrSet.h"
26#include "llvm/ADT/SmallVector.h"
27#include "llvm/ADT/StringRef.h"
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +000028#include "llvm/Analysis/DivergenceAnalysis.h"
29#include "llvm/Analysis/PostDominators.h"
30#include "llvm/Analysis/TargetTransformInfo.h"
David Blaikie2be39222018-03-21 22:34:23 +000031#include "llvm/Analysis/Utils/Local.h"
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +000032#include "llvm/IR/BasicBlock.h"
33#include "llvm/IR/CFG.h"
Eugene Zelenko6cadde72017-10-17 21:27:42 +000034#include "llvm/IR/Constants.h"
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +000035#include "llvm/IR/Function.h"
Eugene Zelenko6cadde72017-10-17 21:27:42 +000036#include "llvm/IR/InstrTypes.h"
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +000037#include "llvm/IR/Instructions.h"
Eugene Zelenko6cadde72017-10-17 21:27:42 +000038#include "llvm/IR/Intrinsics.h"
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +000039#include "llvm/IR/Type.h"
Eugene Zelenko6cadde72017-10-17 21:27:42 +000040#include "llvm/Pass.h"
41#include "llvm/Support/Casting.h"
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +000042#include "llvm/Transforms/Scalar.h"
David Blaikiea373d182018-03-28 17:44:36 +000043#include "llvm/Transforms/Utils.h"
Eugene Zelenko6cadde72017-10-17 21:27:42 +000044
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +000045using namespace llvm;
46
47#define DEBUG_TYPE "amdgpu-unify-divergent-exit-nodes"
48
49namespace {
50
51class AMDGPUUnifyDivergentExitNodes : public FunctionPass {
52public:
53 static char ID; // Pass identification, replacement for typeid
Eugene Zelenko6cadde72017-10-17 21:27:42 +000054
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +000055 AMDGPUUnifyDivergentExitNodes() : FunctionPass(ID) {
56 initializeAMDGPUUnifyDivergentExitNodesPass(*PassRegistry::getPassRegistry());
57 }
58
59 // We can preserve non-critical-edgeness when we unify function exit nodes
60 void getAnalysisUsage(AnalysisUsage &AU) const override;
61 bool runOnFunction(Function &F) override;
62};
63
Eugene Zelenko6cadde72017-10-17 21:27:42 +000064} // end anonymous namespace
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +000065
66char AMDGPUUnifyDivergentExitNodes::ID = 0;
Eugene Zelenko6cadde72017-10-17 21:27:42 +000067
68char &llvm::AMDGPUUnifyDivergentExitNodesID = AMDGPUUnifyDivergentExitNodes::ID;
69
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +000070INITIALIZE_PASS_BEGIN(AMDGPUUnifyDivergentExitNodes, DEBUG_TYPE,
71 "Unify divergent function exit nodes", false, false)
72INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
73INITIALIZE_PASS_DEPENDENCY(DivergenceAnalysis)
74INITIALIZE_PASS_END(AMDGPUUnifyDivergentExitNodes, DEBUG_TYPE,
75 "Unify divergent function exit nodes", false, false)
76
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +000077void AMDGPUUnifyDivergentExitNodes::getAnalysisUsage(AnalysisUsage &AU) const{
78 // TODO: Preserve dominator tree.
79 AU.addRequired<PostDominatorTreeWrapperPass>();
80
81 AU.addRequired<DivergenceAnalysis>();
82
83 // No divergent values are changed, only blocks and branch edges.
84 AU.addPreserved<DivergenceAnalysis>();
85
86 // We preserve the non-critical-edgeness property
87 AU.addPreservedID(BreakCriticalEdgesID);
88
89 // This is a cluster of orthogonal Transforms
90 AU.addPreservedID(LowerSwitchID);
91 FunctionPass::getAnalysisUsage(AU);
92
93 AU.addRequired<TargetTransformInfoWrapperPass>();
94}
95
96/// \returns true if \p BB is reachable through only uniform branches.
97/// XXX - Is there a more efficient way to find this?
98static bool isUniformlyReached(const DivergenceAnalysis &DA,
99 BasicBlock &BB) {
100 SmallVector<BasicBlock *, 8> Stack;
101 SmallPtrSet<BasicBlock *, 8> Visited;
102
103 for (BasicBlock *Pred : predecessors(&BB))
104 Stack.push_back(Pred);
105
106 while (!Stack.empty()) {
107 BasicBlock *Top = Stack.pop_back_val();
108 if (!DA.isUniform(Top->getTerminator()))
109 return false;
110
111 for (BasicBlock *Pred : predecessors(Top)) {
112 if (Visited.insert(Pred).second)
113 Stack.push_back(Pred);
114 }
115 }
116
117 return true;
118}
119
120static BasicBlock *unifyReturnBlockSet(Function &F,
121 ArrayRef<BasicBlock *> ReturningBlocks,
122 const TargetTransformInfo &TTI,
123 StringRef Name) {
124 // Otherwise, we need to insert a new basic block into the function, add a PHI
125 // nodes (if the function returns values), and convert all of the return
126 // instructions into unconditional branches.
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +0000127 BasicBlock *NewRetBlock = BasicBlock::Create(F.getContext(), Name, &F);
128
129 PHINode *PN = nullptr;
130 if (F.getReturnType()->isVoidTy()) {
131 ReturnInst::Create(F.getContext(), nullptr, NewRetBlock);
132 } else {
133 // If the function doesn't return void... add a PHI node to the block...
134 PN = PHINode::Create(F.getReturnType(), ReturningBlocks.size(),
135 "UnifiedRetVal");
136 NewRetBlock->getInstList().push_back(PN);
137 ReturnInst::Create(F.getContext(), PN, NewRetBlock);
138 }
139
140 // Loop over all of the blocks, replacing the return instruction with an
141 // unconditional branch.
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +0000142 for (BasicBlock *BB : ReturningBlocks) {
143 // Add an incoming element to the PHI node for every return instruction that
144 // is merging into this new block...
145 if (PN)
146 PN->addIncoming(BB->getTerminator()->getOperand(0), BB);
147
148 BB->getInstList().pop_back(); // Remove the return insn
149 BranchInst::Create(NewRetBlock, BB);
150 }
151
152 for (BasicBlock *BB : ReturningBlocks) {
153 // Cleanup possible branch to unconditional branch to the return.
Sanjay Patel4c33d522017-10-04 20:26:25 +0000154 simplifyCFG(BB, TTI, {2});
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +0000155 }
156
157 return NewRetBlock;
158}
159
160bool AMDGPUUnifyDivergentExitNodes::runOnFunction(Function &F) {
161 auto &PDT = getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
162 if (PDT.getRoots().size() <= 1)
163 return false;
164
165 DivergenceAnalysis &DA = getAnalysis<DivergenceAnalysis>();
166
167 // Loop over all of the blocks in a function, tracking all of the blocks that
168 // return.
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +0000169 SmallVector<BasicBlock *, 4> ReturningBlocks;
170 SmallVector<BasicBlock *, 4> UnreachableBlocks;
171
172 for (BasicBlock *BB : PDT.getRoots()) {
173 if (isa<ReturnInst>(BB->getTerminator())) {
174 if (!isUniformlyReached(DA, *BB))
175 ReturningBlocks.push_back(BB);
176 } else if (isa<UnreachableInst>(BB->getTerminator())) {
177 if (!isUniformlyReached(DA, *BB))
178 UnreachableBlocks.push_back(BB);
179 }
180 }
181
182 if (!UnreachableBlocks.empty()) {
183 BasicBlock *UnreachableBlock = nullptr;
184
185 if (UnreachableBlocks.size() == 1) {
186 UnreachableBlock = UnreachableBlocks.front();
187 } else {
188 UnreachableBlock = BasicBlock::Create(F.getContext(),
189 "UnifiedUnreachableBlock", &F);
190 new UnreachableInst(F.getContext(), UnreachableBlock);
191
192 for (BasicBlock *BB : UnreachableBlocks) {
193 BB->getInstList().pop_back(); // Remove the unreachable inst.
194 BranchInst::Create(UnreachableBlock, BB);
195 }
196 }
197
198 if (!ReturningBlocks.empty()) {
199 // Don't create a new unreachable inst if we have a return. The
200 // structurizer/annotator can't handle the multiple exits
201
202 Type *RetTy = F.getReturnType();
203 Value *RetVal = RetTy->isVoidTy() ? nullptr : UndefValue::get(RetTy);
204 UnreachableBlock->getInstList().pop_back(); // Remove the unreachable inst.
205
206 Function *UnreachableIntrin =
207 Intrinsic::getDeclaration(F.getParent(), Intrinsic::amdgcn_unreachable);
208
209 // Insert a call to an intrinsic tracking that this is an unreachable
210 // point, in case we want to kill the active lanes or something later.
211 CallInst::Create(UnreachableIntrin, {}, "", UnreachableBlock);
212
213 // Don't create a scalar trap. We would only want to trap if this code was
214 // really reached, but a scalar trap would happen even if no lanes
215 // actually reached here.
216 ReturnInst::Create(F.getContext(), RetVal, UnreachableBlock);
217 ReturningBlocks.push_back(UnreachableBlock);
218 }
219 }
220
221 // Now handle return blocks.
222 if (ReturningBlocks.empty())
223 return false; // No blocks return
224
225 if (ReturningBlocks.size() == 1)
226 return false; // Already has a single return block
227
228 const TargetTransformInfo &TTI
229 = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
230
231 unifyReturnBlockSet(F, ReturningBlocks, TTI, "UnifiedReturnBlock");
232 return true;
233}