blob: c011be6fa1692735ad32956ff3c457b977e82f78 [file] [log] [blame]
Tom Stellarda6f24c62015-12-15 20:55:55 +00001//===-- AMDGPUAnnotateUniformValues.cpp - ---------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// This pass adds amdgpu.uniform metadata to IR values so this information
12/// can be used during instruction selection.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPU.h"
17#include "AMDGPUIntrinsicInfo.h"
Alexander Timofeev18009562016-12-08 17:28:47 +000018#include "llvm/ADT/SetVector.h"
Tom Stellarda6f24c62015-12-15 20:55:55 +000019#include "llvm/Analysis/DivergenceAnalysis.h"
Alexander Timofeev18009562016-12-08 17:28:47 +000020#include "llvm/Analysis/LoopInfo.h"
21#include "llvm/Analysis/MemoryDependenceAnalysis.h"
Tom Stellarda6f24c62015-12-15 20:55:55 +000022#include "llvm/IR/InstVisitor.h"
23#include "llvm/IR/IRBuilder.h"
24#include "llvm/Support/Debug.h"
25#include "llvm/Support/raw_ostream.h"
26
27#define DEBUG_TYPE "amdgpu-annotate-uniform"
28
29using namespace llvm;
30
31namespace {
32
33class AMDGPUAnnotateUniformValues : public FunctionPass,
34 public InstVisitor<AMDGPUAnnotateUniformValues> {
35 DivergenceAnalysis *DA;
Alexander Timofeev18009562016-12-08 17:28:47 +000036 MemoryDependenceResults *MDR;
37 LoopInfo *LI;
38 DenseMap<Value*, GetElementPtrInst*> noClobberClones;
39 bool isKernelFunc;
Tom Stellarda6f24c62015-12-15 20:55:55 +000040
41public:
42 static char ID;
43 AMDGPUAnnotateUniformValues() :
44 FunctionPass(ID) { }
45 bool doInitialization(Module &M) override;
46 bool runOnFunction(Function &F) override;
Mehdi Amini117296c2016-10-01 02:56:57 +000047 StringRef getPassName() const override {
48 return "AMDGPU Annotate Uniform Values";
49 }
Tom Stellarda6f24c62015-12-15 20:55:55 +000050 void getAnalysisUsage(AnalysisUsage &AU) const override {
51 AU.addRequired<DivergenceAnalysis>();
Alexander Timofeev18009562016-12-08 17:28:47 +000052 AU.addRequired<MemoryDependenceWrapperPass>();
53 AU.addRequired<LoopInfoWrapperPass>();
Tom Stellarda6f24c62015-12-15 20:55:55 +000054 AU.setPreservesAll();
55 }
56
Tom Stellardbc4497b2016-02-12 23:45:29 +000057 void visitBranchInst(BranchInst &I);
Tom Stellarda6f24c62015-12-15 20:55:55 +000058 void visitLoadInst(LoadInst &I);
Alexander Timofeev18009562016-12-08 17:28:47 +000059 bool isClobberedInFunction(LoadInst * Load);
Tom Stellarda6f24c62015-12-15 20:55:55 +000060};
61
62} // End anonymous namespace
63
64INITIALIZE_PASS_BEGIN(AMDGPUAnnotateUniformValues, DEBUG_TYPE,
65 "Add AMDGPU uniform metadata", false, false)
66INITIALIZE_PASS_DEPENDENCY(DivergenceAnalysis)
Alexander Timofeev18009562016-12-08 17:28:47 +000067INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
68INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
Tom Stellarda6f24c62015-12-15 20:55:55 +000069INITIALIZE_PASS_END(AMDGPUAnnotateUniformValues, DEBUG_TYPE,
70 "Add AMDGPU uniform metadata", false, false)
71
72char AMDGPUAnnotateUniformValues::ID = 0;
73
Tom Stellardbc4497b2016-02-12 23:45:29 +000074static void setUniformMetadata(Instruction *I) {
75 I->setMetadata("amdgpu.uniform", MDNode::get(I->getContext(), {}));
76}
Alexander Timofeev18009562016-12-08 17:28:47 +000077static void setNoClobberMetadata(Instruction *I) {
78 I->setMetadata("amdgpu.noclobber", MDNode::get(I->getContext(), {}));
79}
80
81static void DFS(BasicBlock *Root, SetVector<BasicBlock*> & Set) {
82 for (auto I : predecessors(Root))
83 if (Set.insert(I))
84 DFS(I, Set);
85}
86
87bool AMDGPUAnnotateUniformValues::isClobberedInFunction(LoadInst * Load) {
88 // 1. get Loop for the Load->getparent();
89 // 2. if it exists, collect all the BBs from the most outer
90 // loop and check for the writes. If NOT - start DFS over all preds.
91 // 3. Start DFS over all preds from the most outer loop header.
92 SetVector<BasicBlock *> Checklist;
93 BasicBlock *Start = Load->getParent();
94 Checklist.insert(Start);
95 const Value *Ptr = Load->getPointerOperand();
96 const Loop *L = LI->getLoopFor(Start);
97 if (L) {
98 const Loop *P = L;
99 do {
100 L = P;
101 P = P->getParentLoop();
102 } while (P);
103 Checklist.insert(L->block_begin(), L->block_end());
104 Start = L->getHeader();
105 }
106
107 DFS(Start, Checklist);
108 for (auto &BB : Checklist) {
109 BasicBlock::iterator StartIt = (BB == Load->getParent()) ?
110 BasicBlock::iterator(Load) : BB->end();
111 if (MDR->getPointerDependencyFrom(MemoryLocation(Ptr),
112 true, StartIt, BB, Load).isClobber())
113 return true;
114 }
115 return false;
116}
Tom Stellardbc4497b2016-02-12 23:45:29 +0000117
118void AMDGPUAnnotateUniformValues::visitBranchInst(BranchInst &I) {
119 if (I.isUnconditional())
120 return;
121
122 Value *Cond = I.getCondition();
123 if (!DA->isUniform(Cond))
124 return;
125
126 setUniformMetadata(I.getParent()->getTerminator());
127}
128
Tom Stellarda6f24c62015-12-15 20:55:55 +0000129void AMDGPUAnnotateUniformValues::visitLoadInst(LoadInst &I) {
130 Value *Ptr = I.getPointerOperand();
131 if (!DA->isUniform(Ptr))
132 return;
Alexander Timofeev18009562016-12-08 17:28:47 +0000133 auto isGlobalLoad = [](LoadInst &Load)->bool {
134 return Load.getPointerAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
135 };
136 // We're tracking up to the Function boundaries
137 // We cannot go beyond because of FunctionPass restrictions
138 // Thus we can ensure that memory not clobbered for memory
139 // operations that live in kernel only.
140 bool NotClobbered = isKernelFunc && !isClobberedInFunction(&I);
141 Instruction *PtrI = dyn_cast<Instruction>(Ptr);
142 if (!PtrI && NotClobbered && isGlobalLoad(I)) {
143 if (isa<Argument>(Ptr) || isa<GlobalValue>(Ptr)) {
144 // Lookup for the existing GEP
145 if (noClobberClones.count(Ptr)) {
146 PtrI = noClobberClones[Ptr];
147 } else {
148 // Create GEP of the Value
149 Function *F = I.getParent()->getParent();
150 Value *Idx = Constant::getIntegerValue(
151 Type::getInt32Ty(Ptr->getContext()), APInt(64, 0));
152 // Insert GEP at the entry to make it dominate all uses
153 PtrI = GetElementPtrInst::Create(
154 Ptr->getType()->getPointerElementType(), Ptr,
155 ArrayRef<Value*>(Idx), Twine(""), F->getEntryBlock().getFirstNonPHI());
156 }
157 I.replaceUsesOfWith(Ptr, PtrI);
158 }
159 }
Tom Stellarda6f24c62015-12-15 20:55:55 +0000160
Alexander Timofeev18009562016-12-08 17:28:47 +0000161 if (PtrI) {
Tom Stellardbc4497b2016-02-12 23:45:29 +0000162 setUniformMetadata(PtrI);
Alexander Timofeev18009562016-12-08 17:28:47 +0000163 if (NotClobbered)
164 setNoClobberMetadata(PtrI);
165 }
Tom Stellarda6f24c62015-12-15 20:55:55 +0000166}
167
168bool AMDGPUAnnotateUniformValues::doInitialization(Module &M) {
169 return false;
170}
171
172bool AMDGPUAnnotateUniformValues::runOnFunction(Function &F) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000173 if (skipFunction(F))
174 return false;
175
Alexander Timofeev18009562016-12-08 17:28:47 +0000176 DA = &getAnalysis<DivergenceAnalysis>();
177 MDR = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
178 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
179 isKernelFunc = F.getCallingConv() == CallingConv::AMDGPU_KERNEL;
Tom Stellarda6f24c62015-12-15 20:55:55 +0000180
Alexander Timofeev18009562016-12-08 17:28:47 +0000181 visit(F);
182 noClobberClones.clear();
Tom Stellarda6f24c62015-12-15 20:55:55 +0000183 return true;
184}
185
186FunctionPass *
187llvm::createAMDGPUAnnotateUniformValues() {
188 return new AMDGPUAnnotateUniformValues();
189}