blob: 551737c1d2707120ea9c94bf6eb8a85c9438ae88 [file] [log] [blame]
Matt Arsenault39319482015-11-06 18:01:57 +00001//===-- AMDGPUAnnotateKernelFeaturesPass.cpp ------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file This pass adds target attributes to functions which use intrinsics
11/// which will impact calling convention lowering.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
Matt Arsenaulte823d922017-02-18 18:29:53 +000016#include "AMDGPUSubtarget.h"
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +000017#include "llvm/ADT/Triple.h"
Matt Arsenault6b930462017-07-13 21:43:42 +000018#include "llvm/Analysis/CallGraphSCCPass.h"
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +000019#include "llvm/CodeGen/TargetPassConfig.h"
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000020#include "llvm/IR/Constants.h"
Matt Arsenault6b930462017-07-13 21:43:42 +000021#include "llvm/IR/InstIterator.h"
Matt Arsenault39319482015-11-06 18:01:57 +000022#include "llvm/IR/Instructions.h"
23#include "llvm/IR/Module.h"
24
25#define DEBUG_TYPE "amdgpu-annotate-kernel-features"
26
27using namespace llvm;
28
29namespace {
30
Matt Arsenault6b930462017-07-13 21:43:42 +000031class AMDGPUAnnotateKernelFeatures : public CallGraphSCCPass {
Matt Arsenault39319482015-11-06 18:01:57 +000032private:
Matt Arsenault6b930462017-07-13 21:43:42 +000033 const TargetMachine *TM = nullptr;
Yaxun Liu1a14bfa2017-03-27 14:04:01 +000034 AMDGPUAS AS;
Matt Arsenault99c14522016-04-25 19:27:24 +000035
Matt Arsenault6b930462017-07-13 21:43:42 +000036 bool addFeatureAttributes(Function &F);
37
Matt Arsenault39319482015-11-06 18:01:57 +000038public:
39 static char ID;
40
Matt Arsenault6b930462017-07-13 21:43:42 +000041 AMDGPUAnnotateKernelFeatures() : CallGraphSCCPass(ID) {}
42
43 bool doInitialization(CallGraph &CG) override;
44 bool runOnSCC(CallGraphSCC &SCC) override;
Mehdi Amini117296c2016-10-01 02:56:57 +000045 StringRef getPassName() const override {
Matt Arsenault39319482015-11-06 18:01:57 +000046 return "AMDGPU Annotate Kernel Features";
47 }
48
49 void getAnalysisUsage(AnalysisUsage &AU) const override {
50 AU.setPreservesAll();
Matt Arsenault6b930462017-07-13 21:43:42 +000051 CallGraphSCCPass::getAnalysisUsage(AU);
Matt Arsenault39319482015-11-06 18:01:57 +000052 }
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000053
Yaxun Liu1a14bfa2017-03-27 14:04:01 +000054 static bool visitConstantExpr(const ConstantExpr *CE, AMDGPUAS AS);
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000055 static bool visitConstantExprsRecursively(
56 const Constant *EntryC,
Yaxun Liu1a14bfa2017-03-27 14:04:01 +000057 SmallPtrSet<const Constant *, 8> &ConstantExprVisited,
58 AMDGPUAS AS);
Matt Arsenault39319482015-11-06 18:01:57 +000059};
60
61}
62
63char AMDGPUAnnotateKernelFeatures::ID = 0;
64
65char &llvm::AMDGPUAnnotateKernelFeaturesID = AMDGPUAnnotateKernelFeatures::ID;
66
Matt Arsenault99c14522016-04-25 19:27:24 +000067INITIALIZE_PASS(AMDGPUAnnotateKernelFeatures, DEBUG_TYPE,
68 "Add AMDGPU function attributes", false, false)
Matt Arsenault39319482015-11-06 18:01:57 +000069
Matt Arsenault39319482015-11-06 18:01:57 +000070
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000071// The queue ptr is only needed when casting to flat, not from it.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +000072static bool castRequiresQueuePtr(unsigned SrcAS, const AMDGPUAS &AS) {
73 return SrcAS == AS.LOCAL_ADDRESS || SrcAS == AS.PRIVATE_ADDRESS;
Matt Arsenault99c14522016-04-25 19:27:24 +000074}
75
Yaxun Liu1a14bfa2017-03-27 14:04:01 +000076static bool castRequiresQueuePtr(const AddrSpaceCastInst *ASC,
77 const AMDGPUAS &AS) {
78 return castRequiresQueuePtr(ASC->getSrcAddressSpace(), AS);
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000079}
80
Yaxun Liu1a14bfa2017-03-27 14:04:01 +000081bool AMDGPUAnnotateKernelFeatures::visitConstantExpr(const ConstantExpr *CE,
82 AMDGPUAS AS) {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000083 if (CE->getOpcode() == Instruction::AddrSpaceCast) {
84 unsigned SrcAS = CE->getOperand(0)->getType()->getPointerAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +000085 return castRequiresQueuePtr(SrcAS, AS);
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000086 }
87
88 return false;
89}
90
91bool AMDGPUAnnotateKernelFeatures::visitConstantExprsRecursively(
92 const Constant *EntryC,
Yaxun Liu1a14bfa2017-03-27 14:04:01 +000093 SmallPtrSet<const Constant *, 8> &ConstantExprVisited,
94 AMDGPUAS AS) {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000095
96 if (!ConstantExprVisited.insert(EntryC).second)
97 return false;
98
99 SmallVector<const Constant *, 16> Stack;
100 Stack.push_back(EntryC);
101
102 while (!Stack.empty()) {
103 const Constant *C = Stack.pop_back_val();
104
105 // Check this constant expression.
106 if (const auto *CE = dyn_cast<ConstantExpr>(C)) {
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000107 if (visitConstantExpr(CE, AS))
Matt Arsenault3b2e2a52016-06-06 20:03:31 +0000108 return true;
109 }
110
111 // Visit all sub-expressions.
112 for (const Use &U : C->operands()) {
113 const auto *OpC = dyn_cast<Constant>(U);
114 if (!OpC)
115 continue;
116
117 if (!ConstantExprVisited.insert(OpC).second)
118 continue;
119
120 Stack.push_back(OpC);
121 }
122 }
123
124 return false;
125}
126
Matt Arsenault6b930462017-07-13 21:43:42 +0000127// We do not need to note the x workitem or workgroup id because they are always
128// initialized.
129//
130// TODO: We should not add the attributes if the known compile time workgroup
131// size is 1 for y/z.
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000132static StringRef intrinsicToAttrName(Intrinsic::ID ID,
133 bool &NonKernelOnly,
134 bool &IsQueuePtr) {
Matt Arsenault6b930462017-07-13 21:43:42 +0000135 switch (ID) {
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000136 case Intrinsic::amdgcn_workitem_id_x:
137 NonKernelOnly = true;
138 return "amdgpu-work-item-id-x";
139 case Intrinsic::amdgcn_workgroup_id_x:
140 NonKernelOnly = true;
141 return "amdgpu-work-group-id-x";
Matt Arsenault6b930462017-07-13 21:43:42 +0000142 case Intrinsic::amdgcn_workitem_id_y:
143 case Intrinsic::r600_read_tidig_y:
144 return "amdgpu-work-item-id-y";
145 case Intrinsic::amdgcn_workitem_id_z:
146 case Intrinsic::r600_read_tidig_z:
147 return "amdgpu-work-item-id-z";
148 case Intrinsic::amdgcn_workgroup_id_y:
149 case Intrinsic::r600_read_tgid_y:
150 return "amdgpu-work-group-id-y";
151 case Intrinsic::amdgcn_workgroup_id_z:
152 case Intrinsic::r600_read_tgid_z:
153 return "amdgpu-work-group-id-z";
154 case Intrinsic::amdgcn_dispatch_ptr:
155 return "amdgpu-dispatch-ptr";
156 case Intrinsic::amdgcn_dispatch_id:
157 return "amdgpu-dispatch-id";
Matt Arsenault23e4df62017-07-14 00:11:13 +0000158 case Intrinsic::amdgcn_kernarg_segment_ptr:
Matt Arsenault23e4df62017-07-14 00:11:13 +0000159 return "amdgpu-kernarg-segment-ptr";
Matt Arsenault9166ce82017-07-28 15:52:08 +0000160 case Intrinsic::amdgcn_implicitarg_ptr:
161 return "amdgpu-implicitarg-ptr";
Matt Arsenault6b930462017-07-13 21:43:42 +0000162 case Intrinsic::amdgcn_queue_ptr:
163 case Intrinsic::trap:
164 case Intrinsic::debugtrap:
165 IsQueuePtr = true;
166 return "amdgpu-queue-ptr";
167 default:
168 return "";
169 }
170}
171
172static bool handleAttr(Function &Parent, const Function &Callee,
173 StringRef Name) {
174 if (Callee.hasFnAttribute(Name)) {
175 Parent.addFnAttr(Name);
176 return true;
177 }
178
179 return false;
180}
181
182static void copyFeaturesToFunction(Function &Parent, const Function &Callee,
183 bool &NeedQueuePtr) {
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000184 // X ids unnecessarily propagated to kernels.
Matt Arsenault6b930462017-07-13 21:43:42 +0000185 static const StringRef AttrNames[] = {
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000186 { "amdgpu-work-item-id-x" },
Matt Arsenault6b930462017-07-13 21:43:42 +0000187 { "amdgpu-work-item-id-y" },
188 { "amdgpu-work-item-id-z" },
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000189 { "amdgpu-work-group-id-x" },
Matt Arsenault6b930462017-07-13 21:43:42 +0000190 { "amdgpu-work-group-id-y" },
191 { "amdgpu-work-group-id-z" },
192 { "amdgpu-dispatch-ptr" },
Matt Arsenault23e4df62017-07-14 00:11:13 +0000193 { "amdgpu-dispatch-id" },
Matt Arsenault9166ce82017-07-28 15:52:08 +0000194 { "amdgpu-kernarg-segment-ptr" },
195 { "amdgpu-implicitarg-ptr" }
Matt Arsenault6b930462017-07-13 21:43:42 +0000196 };
197
198 if (handleAttr(Parent, Callee, "amdgpu-queue-ptr"))
199 NeedQueuePtr = true;
200
201 for (StringRef AttrName : AttrNames)
202 handleAttr(Parent, Callee, AttrName);
203}
204
205bool AMDGPUAnnotateKernelFeatures::addFeatureAttributes(Function &F) {
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000206 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F);
207 bool HasFlat = ST.hasFlatAddressSpace();
208 bool HasApertureRegs = ST.hasApertureRegs();
Matt Arsenault3b2e2a52016-06-06 20:03:31 +0000209 SmallPtrSet<const Constant *, 8> ConstantExprVisited;
210
Matt Arsenault6b930462017-07-13 21:43:42 +0000211 bool Changed = false;
212 bool NeedQueuePtr = false;
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000213 bool HaveCall = false;
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000214 bool IsFunc = !AMDGPU::isEntryFunctionCC(F.getCallingConv());
Matt Arsenault6b930462017-07-13 21:43:42 +0000215
216 for (BasicBlock &BB : F) {
217 for (Instruction &I : BB) {
218 CallSite CS(&I);
219 if (CS) {
220 Function *Callee = CS.getCalledFunction();
221
222 // TODO: Do something with indirect calls.
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000223 if (!Callee) {
224 if (!CS.isInlineAsm())
225 HaveCall = true;
Matt Arsenault6b930462017-07-13 21:43:42 +0000226 continue;
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000227 }
Matt Arsenault6b930462017-07-13 21:43:42 +0000228
229 Intrinsic::ID IID = Callee->getIntrinsicID();
230 if (IID == Intrinsic::not_intrinsic) {
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000231 HaveCall = true;
Matt Arsenault6b930462017-07-13 21:43:42 +0000232 copyFeaturesToFunction(F, *Callee, NeedQueuePtr);
233 Changed = true;
234 } else {
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000235 bool NonKernelOnly = false;
236 StringRef AttrName = intrinsicToAttrName(IID,
237 NonKernelOnly, NeedQueuePtr);
238 if (!AttrName.empty() && (IsFunc || !NonKernelOnly)) {
Matt Arsenault6b930462017-07-13 21:43:42 +0000239 F.addFnAttr(AttrName);
240 Changed = true;
241 }
242 }
243 }
244
245 if (NeedQueuePtr || HasApertureRegs)
246 continue;
247
Matt Arsenault99c14522016-04-25 19:27:24 +0000248 if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(&I)) {
Matt Arsenault6b930462017-07-13 21:43:42 +0000249 if (castRequiresQueuePtr(ASC, AS)) {
250 NeedQueuePtr = true;
251 continue;
252 }
Matt Arsenault99c14522016-04-25 19:27:24 +0000253 }
Matt Arsenault3b2e2a52016-06-06 20:03:31 +0000254
255 for (const Use &U : I.operands()) {
256 const auto *OpC = dyn_cast<Constant>(U);
257 if (!OpC)
258 continue;
259
Matt Arsenault6b930462017-07-13 21:43:42 +0000260 if (visitConstantExprsRecursively(OpC, ConstantExprVisited, AS)) {
261 NeedQueuePtr = true;
262 break;
263 }
Matt Arsenault3b2e2a52016-06-06 20:03:31 +0000264 }
Matt Arsenault99c14522016-04-25 19:27:24 +0000265 }
266 }
267
Matt Arsenault6b930462017-07-13 21:43:42 +0000268 if (NeedQueuePtr) {
269 F.addFnAttr("amdgpu-queue-ptr");
270 Changed = true;
271 }
272
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000273 // TODO: We could refine this to captured pointers that could possibly be
274 // accessed by flat instructions. For now this is mostly a poor way of
275 // estimating whether there are calls before argument lowering.
276 if (HasFlat && !IsFunc && HaveCall) {
277 F.addFnAttr("amdgpu-flat-scratch");
278 Changed = true;
279 }
280
Matt Arsenault6b930462017-07-13 21:43:42 +0000281 return Changed;
Matt Arsenault99c14522016-04-25 19:27:24 +0000282}
Matt Arsenault39319482015-11-06 18:01:57 +0000283
Matt Arsenault6b930462017-07-13 21:43:42 +0000284bool AMDGPUAnnotateKernelFeatures::runOnSCC(CallGraphSCC &SCC) {
285 Module &M = SCC.getCallGraph().getModule();
Matt Arsenault39319482015-11-06 18:01:57 +0000286 Triple TT(M.getTargetTriple());
287
Matt Arsenault6b930462017-07-13 21:43:42 +0000288 bool Changed = false;
289 for (CallGraphNode *I : SCC) {
290 Function *F = I->getFunction();
291 if (!F || F->isDeclaration())
292 continue;
Matt Arsenault43976df2016-01-30 04:25:19 +0000293
Matt Arsenault6b930462017-07-13 21:43:42 +0000294 Changed |= addFeatureAttributes(*F);
Matt Arsenault99c14522016-04-25 19:27:24 +0000295 }
296
Matt Arsenault6b930462017-07-13 21:43:42 +0000297
Matt Arsenault39319482015-11-06 18:01:57 +0000298 return Changed;
299}
300
Matt Arsenault6b930462017-07-13 21:43:42 +0000301bool AMDGPUAnnotateKernelFeatures::doInitialization(CallGraph &CG) {
302 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
303 if (!TPC)
304 report_fatal_error("TargetMachine is required");
305
306 AS = AMDGPU::getAMDGPUAS(CG.getModule());
307 TM = &TPC->getTM<TargetMachine>();
308 return false;
309}
310
311Pass *llvm::createAMDGPUAnnotateKernelFeaturesPass() {
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000312 return new AMDGPUAnnotateKernelFeatures();
Matt Arsenault39319482015-11-06 18:01:57 +0000313}