Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 1 | //===-- AMDGPUAnnotateUniformValues.cpp - ---------------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | /// \file |
| 11 | /// This pass adds amdgpu.uniform metadata to IR values so this information |
| 12 | /// can be used during instruction selection. |
| 13 | // |
| 14 | //===----------------------------------------------------------------------===// |
| 15 | |
| 16 | #include "AMDGPU.h" |
| 17 | #include "AMDGPUIntrinsicInfo.h" |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 18 | #include "llvm/ADT/SetVector.h" |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 19 | #include "llvm/Analysis/DivergenceAnalysis.h" |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 20 | #include "llvm/Analysis/LoopInfo.h" |
| 21 | #include "llvm/Analysis/MemoryDependenceAnalysis.h" |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 22 | #include "llvm/IR/InstVisitor.h" |
| 23 | #include "llvm/IR/IRBuilder.h" |
| 24 | #include "llvm/Support/Debug.h" |
| 25 | #include "llvm/Support/raw_ostream.h" |
| 26 | |
| 27 | #define DEBUG_TYPE "amdgpu-annotate-uniform" |
| 28 | |
| 29 | using namespace llvm; |
| 30 | |
| 31 | namespace { |
| 32 | |
| 33 | class AMDGPUAnnotateUniformValues : public FunctionPass, |
| 34 | public InstVisitor<AMDGPUAnnotateUniformValues> { |
| 35 | DivergenceAnalysis *DA; |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 36 | MemoryDependenceResults *MDR; |
| 37 | LoopInfo *LI; |
| 38 | DenseMap<Value*, GetElementPtrInst*> noClobberClones; |
| 39 | bool isKernelFunc; |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 40 | |
| 41 | public: |
| 42 | static char ID; |
| 43 | AMDGPUAnnotateUniformValues() : |
| 44 | FunctionPass(ID) { } |
| 45 | bool doInitialization(Module &M) override; |
| 46 | bool runOnFunction(Function &F) override; |
Mehdi Amini | 117296c | 2016-10-01 02:56:57 +0000 | [diff] [blame] | 47 | StringRef getPassName() const override { |
| 48 | return "AMDGPU Annotate Uniform Values"; |
| 49 | } |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 50 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
| 51 | AU.addRequired<DivergenceAnalysis>(); |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 52 | AU.addRequired<MemoryDependenceWrapperPass>(); |
| 53 | AU.addRequired<LoopInfoWrapperPass>(); |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 54 | AU.setPreservesAll(); |
| 55 | } |
| 56 | |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 57 | void visitBranchInst(BranchInst &I); |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 58 | void visitLoadInst(LoadInst &I); |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 59 | bool isClobberedInFunction(LoadInst * Load); |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 60 | }; |
| 61 | |
| 62 | } // End anonymous namespace |
| 63 | |
| 64 | INITIALIZE_PASS_BEGIN(AMDGPUAnnotateUniformValues, DEBUG_TYPE, |
| 65 | "Add AMDGPU uniform metadata", false, false) |
| 66 | INITIALIZE_PASS_DEPENDENCY(DivergenceAnalysis) |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 67 | INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) |
| 68 | INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 69 | INITIALIZE_PASS_END(AMDGPUAnnotateUniformValues, DEBUG_TYPE, |
| 70 | "Add AMDGPU uniform metadata", false, false) |
| 71 | |
| 72 | char AMDGPUAnnotateUniformValues::ID = 0; |
| 73 | |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 74 | static void setUniformMetadata(Instruction *I) { |
| 75 | I->setMetadata("amdgpu.uniform", MDNode::get(I->getContext(), {})); |
| 76 | } |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 77 | static void setNoClobberMetadata(Instruction *I) { |
| 78 | I->setMetadata("amdgpu.noclobber", MDNode::get(I->getContext(), {})); |
| 79 | } |
| 80 | |
| 81 | static void DFS(BasicBlock *Root, SetVector<BasicBlock*> & Set) { |
| 82 | for (auto I : predecessors(Root)) |
| 83 | if (Set.insert(I)) |
| 84 | DFS(I, Set); |
| 85 | } |
| 86 | |
| 87 | bool AMDGPUAnnotateUniformValues::isClobberedInFunction(LoadInst * Load) { |
| 88 | // 1. get Loop for the Load->getparent(); |
| 89 | // 2. if it exists, collect all the BBs from the most outer |
| 90 | // loop and check for the writes. If NOT - start DFS over all preds. |
| 91 | // 3. Start DFS over all preds from the most outer loop header. |
| 92 | SetVector<BasicBlock *> Checklist; |
| 93 | BasicBlock *Start = Load->getParent(); |
| 94 | Checklist.insert(Start); |
| 95 | const Value *Ptr = Load->getPointerOperand(); |
| 96 | const Loop *L = LI->getLoopFor(Start); |
| 97 | if (L) { |
| 98 | const Loop *P = L; |
| 99 | do { |
| 100 | L = P; |
| 101 | P = P->getParentLoop(); |
| 102 | } while (P); |
| 103 | Checklist.insert(L->block_begin(), L->block_end()); |
| 104 | Start = L->getHeader(); |
| 105 | } |
| 106 | |
| 107 | DFS(Start, Checklist); |
| 108 | for (auto &BB : Checklist) { |
| 109 | BasicBlock::iterator StartIt = (BB == Load->getParent()) ? |
| 110 | BasicBlock::iterator(Load) : BB->end(); |
| 111 | if (MDR->getPointerDependencyFrom(MemoryLocation(Ptr), |
| 112 | true, StartIt, BB, Load).isClobber()) |
| 113 | return true; |
| 114 | } |
| 115 | return false; |
| 116 | } |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 117 | |
| 118 | void AMDGPUAnnotateUniformValues::visitBranchInst(BranchInst &I) { |
| 119 | if (I.isUnconditional()) |
| 120 | return; |
| 121 | |
| 122 | Value *Cond = I.getCondition(); |
| 123 | if (!DA->isUniform(Cond)) |
| 124 | return; |
| 125 | |
| 126 | setUniformMetadata(I.getParent()->getTerminator()); |
| 127 | } |
| 128 | |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 129 | void AMDGPUAnnotateUniformValues::visitLoadInst(LoadInst &I) { |
| 130 | Value *Ptr = I.getPointerOperand(); |
| 131 | if (!DA->isUniform(Ptr)) |
| 132 | return; |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 133 | auto isGlobalLoad = [](LoadInst &Load)->bool { |
| 134 | return Load.getPointerAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; |
| 135 | }; |
| 136 | // We're tracking up to the Function boundaries |
| 137 | // We cannot go beyond because of FunctionPass restrictions |
| 138 | // Thus we can ensure that memory not clobbered for memory |
| 139 | // operations that live in kernel only. |
| 140 | bool NotClobbered = isKernelFunc && !isClobberedInFunction(&I); |
| 141 | Instruction *PtrI = dyn_cast<Instruction>(Ptr); |
| 142 | if (!PtrI && NotClobbered && isGlobalLoad(I)) { |
| 143 | if (isa<Argument>(Ptr) || isa<GlobalValue>(Ptr)) { |
| 144 | // Lookup for the existing GEP |
| 145 | if (noClobberClones.count(Ptr)) { |
| 146 | PtrI = noClobberClones[Ptr]; |
| 147 | } else { |
| 148 | // Create GEP of the Value |
| 149 | Function *F = I.getParent()->getParent(); |
| 150 | Value *Idx = Constant::getIntegerValue( |
| 151 | Type::getInt32Ty(Ptr->getContext()), APInt(64, 0)); |
| 152 | // Insert GEP at the entry to make it dominate all uses |
| 153 | PtrI = GetElementPtrInst::Create( |
| 154 | Ptr->getType()->getPointerElementType(), Ptr, |
| 155 | ArrayRef<Value*>(Idx), Twine(""), F->getEntryBlock().getFirstNonPHI()); |
| 156 | } |
| 157 | I.replaceUsesOfWith(Ptr, PtrI); |
| 158 | } |
| 159 | } |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 160 | |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 161 | if (PtrI) { |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 162 | setUniformMetadata(PtrI); |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 163 | if (NotClobbered) |
| 164 | setNoClobberMetadata(PtrI); |
| 165 | } |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 166 | } |
| 167 | |
| 168 | bool AMDGPUAnnotateUniformValues::doInitialization(Module &M) { |
| 169 | return false; |
| 170 | } |
| 171 | |
| 172 | bool AMDGPUAnnotateUniformValues::runOnFunction(Function &F) { |
Andrew Kaylor | 7de74af | 2016-04-25 22:23:44 +0000 | [diff] [blame] | 173 | if (skipFunction(F)) |
| 174 | return false; |
| 175 | |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 176 | DA = &getAnalysis<DivergenceAnalysis>(); |
| 177 | MDR = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep(); |
| 178 | LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); |
| 179 | isKernelFunc = F.getCallingConv() == CallingConv::AMDGPU_KERNEL; |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 180 | |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 181 | visit(F); |
| 182 | noClobberClones.clear(); |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 183 | return true; |
| 184 | } |
| 185 | |
| 186 | FunctionPass * |
| 187 | llvm::createAMDGPUAnnotateUniformValues() { |
| 188 | return new AMDGPUAnnotateUniformValues(); |
| 189 | } |