blob: d8d2b6b899a4221dff9d13b9e739a0a242bb0c23 [file] [log] [blame]
Eugene Zelenkofa6434b2017-08-31 21:56:16 +00001//===- AMDGPUAnnotateKernelFeaturesPass.cpp -------------------------------===//
Matt Arsenault39319482015-11-06 18:01:57 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Matt Arsenault39319482015-11-06 18:01:57 +00006//
7//===----------------------------------------------------------------------===//
8//
9/// \file This pass adds target attributes to functions which use intrinsics
10/// which will impact calling convention lowering.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPU.h"
Matt Arsenaulte823d922017-02-18 18:29:53 +000015#include "AMDGPUSubtarget.h"
Eugene Zelenkofa6434b2017-08-31 21:56:16 +000016#include "Utils/AMDGPUBaseInfo.h"
17#include "llvm/ADT/SmallPtrSet.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/ADT/StringRef.h"
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +000020#include "llvm/ADT/Triple.h"
Eugene Zelenkofa6434b2017-08-31 21:56:16 +000021#include "llvm/Analysis/CallGraph.h"
Matt Arsenault6b930462017-07-13 21:43:42 +000022#include "llvm/Analysis/CallGraphSCCPass.h"
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +000023#include "llvm/CodeGen/TargetPassConfig.h"
Eugene Zelenkofa6434b2017-08-31 21:56:16 +000024#include "llvm/IR/CallSite.h"
25#include "llvm/IR/Constant.h"
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000026#include "llvm/IR/Constants.h"
Eugene Zelenkofa6434b2017-08-31 21:56:16 +000027#include "llvm/IR/Function.h"
28#include "llvm/IR/Instruction.h"
Matt Arsenault39319482015-11-06 18:01:57 +000029#include "llvm/IR/Instructions.h"
Eugene Zelenkofa6434b2017-08-31 21:56:16 +000030#include "llvm/IR/Intrinsics.h"
Matt Arsenault39319482015-11-06 18:01:57 +000031#include "llvm/IR/Module.h"
Eugene Zelenkofa6434b2017-08-31 21:56:16 +000032#include "llvm/IR/Type.h"
33#include "llvm/IR/Use.h"
34#include "llvm/Pass.h"
35#include "llvm/Support/Casting.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Target/TargetMachine.h"
Matt Arsenault39319482015-11-06 18:01:57 +000038
39#define DEBUG_TYPE "amdgpu-annotate-kernel-features"
40
41using namespace llvm;
42
43namespace {
44
Matt Arsenault6b930462017-07-13 21:43:42 +000045class AMDGPUAnnotateKernelFeatures : public CallGraphSCCPass {
Matt Arsenault39319482015-11-06 18:01:57 +000046private:
Matt Arsenault6b930462017-07-13 21:43:42 +000047 const TargetMachine *TM = nullptr;
Aakanksha Patilc56d2af2019-03-07 00:54:04 +000048 SmallVector<CallGraphNode*, 8> NodeList;
Matt Arsenault99c14522016-04-25 19:27:24 +000049
Matt Arsenault6b930462017-07-13 21:43:42 +000050 bool addFeatureAttributes(Function &F);
Aakanksha Patilc56d2af2019-03-07 00:54:04 +000051 bool processUniformWorkGroupAttribute();
52 bool propagateUniformWorkGroupAttribute(Function &Caller, Function &Callee);
Matt Arsenault6b930462017-07-13 21:43:42 +000053
Matt Arsenault39319482015-11-06 18:01:57 +000054public:
55 static char ID;
56
Matt Arsenault6b930462017-07-13 21:43:42 +000057 AMDGPUAnnotateKernelFeatures() : CallGraphSCCPass(ID) {}
58
59 bool doInitialization(CallGraph &CG) override;
60 bool runOnSCC(CallGraphSCC &SCC) override;
Eugene Zelenkofa6434b2017-08-31 21:56:16 +000061
Mehdi Amini117296c2016-10-01 02:56:57 +000062 StringRef getPassName() const override {
Matt Arsenault39319482015-11-06 18:01:57 +000063 return "AMDGPU Annotate Kernel Features";
64 }
65
66 void getAnalysisUsage(AnalysisUsage &AU) const override {
67 AU.setPreservesAll();
Matt Arsenault6b930462017-07-13 21:43:42 +000068 CallGraphSCCPass::getAnalysisUsage(AU);
Matt Arsenault39319482015-11-06 18:01:57 +000069 }
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000070
Matt Arsenault0da63502018-08-31 05:49:54 +000071 static bool visitConstantExpr(const ConstantExpr *CE);
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000072 static bool visitConstantExprsRecursively(
73 const Constant *EntryC,
Matt Arsenault0da63502018-08-31 05:49:54 +000074 SmallPtrSet<const Constant *, 8> &ConstantExprVisited);
Matt Arsenault39319482015-11-06 18:01:57 +000075};
76
Eugene Zelenkofa6434b2017-08-31 21:56:16 +000077} // end anonymous namespace
Matt Arsenault39319482015-11-06 18:01:57 +000078
79char AMDGPUAnnotateKernelFeatures::ID = 0;
80
81char &llvm::AMDGPUAnnotateKernelFeaturesID = AMDGPUAnnotateKernelFeatures::ID;
82
Matt Arsenault99c14522016-04-25 19:27:24 +000083INITIALIZE_PASS(AMDGPUAnnotateKernelFeatures, DEBUG_TYPE,
84 "Add AMDGPU function attributes", false, false)
Matt Arsenault39319482015-11-06 18:01:57 +000085
Matt Arsenault39319482015-11-06 18:01:57 +000086
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000087// The queue ptr is only needed when casting to flat, not from it.
Matt Arsenault0da63502018-08-31 05:49:54 +000088static bool castRequiresQueuePtr(unsigned SrcAS) {
89 return SrcAS == AMDGPUAS::LOCAL_ADDRESS || SrcAS == AMDGPUAS::PRIVATE_ADDRESS;
Matt Arsenault99c14522016-04-25 19:27:24 +000090}
91
Matt Arsenault0da63502018-08-31 05:49:54 +000092static bool castRequiresQueuePtr(const AddrSpaceCastInst *ASC) {
93 return castRequiresQueuePtr(ASC->getSrcAddressSpace());
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000094}
95
Matt Arsenault0da63502018-08-31 05:49:54 +000096bool AMDGPUAnnotateKernelFeatures::visitConstantExpr(const ConstantExpr *CE) {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000097 if (CE->getOpcode() == Instruction::AddrSpaceCast) {
98 unsigned SrcAS = CE->getOperand(0)->getType()->getPointerAddressSpace();
Matt Arsenault0da63502018-08-31 05:49:54 +000099 return castRequiresQueuePtr(SrcAS);
Matt Arsenault3b2e2a52016-06-06 20:03:31 +0000100 }
101
102 return false;
103}
104
105bool AMDGPUAnnotateKernelFeatures::visitConstantExprsRecursively(
106 const Constant *EntryC,
Matt Arsenault0da63502018-08-31 05:49:54 +0000107 SmallPtrSet<const Constant *, 8> &ConstantExprVisited) {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +0000108
109 if (!ConstantExprVisited.insert(EntryC).second)
110 return false;
111
112 SmallVector<const Constant *, 16> Stack;
113 Stack.push_back(EntryC);
114
115 while (!Stack.empty()) {
116 const Constant *C = Stack.pop_back_val();
117
118 // Check this constant expression.
119 if (const auto *CE = dyn_cast<ConstantExpr>(C)) {
Matt Arsenault0da63502018-08-31 05:49:54 +0000120 if (visitConstantExpr(CE))
Matt Arsenault3b2e2a52016-06-06 20:03:31 +0000121 return true;
122 }
123
124 // Visit all sub-expressions.
125 for (const Use &U : C->operands()) {
126 const auto *OpC = dyn_cast<Constant>(U);
127 if (!OpC)
128 continue;
129
130 if (!ConstantExprVisited.insert(OpC).second)
131 continue;
132
133 Stack.push_back(OpC);
134 }
135 }
136
137 return false;
138}
139
Matt Arsenault6b930462017-07-13 21:43:42 +0000140// We do not need to note the x workitem or workgroup id because they are always
141// initialized.
142//
143// TODO: We should not add the attributes if the known compile time workgroup
144// size is 1 for y/z.
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000145static StringRef intrinsicToAttrName(Intrinsic::ID ID,
146 bool &NonKernelOnly,
147 bool &IsQueuePtr) {
Matt Arsenault6b930462017-07-13 21:43:42 +0000148 switch (ID) {
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000149 case Intrinsic::amdgcn_workitem_id_x:
150 NonKernelOnly = true;
151 return "amdgpu-work-item-id-x";
152 case Intrinsic::amdgcn_workgroup_id_x:
153 NonKernelOnly = true;
154 return "amdgpu-work-group-id-x";
Matt Arsenault6b930462017-07-13 21:43:42 +0000155 case Intrinsic::amdgcn_workitem_id_y:
156 case Intrinsic::r600_read_tidig_y:
157 return "amdgpu-work-item-id-y";
158 case Intrinsic::amdgcn_workitem_id_z:
159 case Intrinsic::r600_read_tidig_z:
160 return "amdgpu-work-item-id-z";
161 case Intrinsic::amdgcn_workgroup_id_y:
162 case Intrinsic::r600_read_tgid_y:
163 return "amdgpu-work-group-id-y";
164 case Intrinsic::amdgcn_workgroup_id_z:
165 case Intrinsic::r600_read_tgid_z:
166 return "amdgpu-work-group-id-z";
167 case Intrinsic::amdgcn_dispatch_ptr:
168 return "amdgpu-dispatch-ptr";
169 case Intrinsic::amdgcn_dispatch_id:
170 return "amdgpu-dispatch-id";
Matt Arsenault23e4df62017-07-14 00:11:13 +0000171 case Intrinsic::amdgcn_kernarg_segment_ptr:
Matt Arsenault23e4df62017-07-14 00:11:13 +0000172 return "amdgpu-kernarg-segment-ptr";
Matt Arsenault9166ce82017-07-28 15:52:08 +0000173 case Intrinsic::amdgcn_implicitarg_ptr:
174 return "amdgpu-implicitarg-ptr";
Matt Arsenault6b930462017-07-13 21:43:42 +0000175 case Intrinsic::amdgcn_queue_ptr:
Matt Arsenaultf581d572019-09-05 02:20:39 +0000176 case Intrinsic::amdgcn_is_shared:
177 case Intrinsic::amdgcn_is_private:
178 // TODO: Does not require queue ptr on gfx9+
Matt Arsenault6b930462017-07-13 21:43:42 +0000179 case Intrinsic::trap:
180 case Intrinsic::debugtrap:
181 IsQueuePtr = true;
182 return "amdgpu-queue-ptr";
183 default:
184 return "";
185 }
186}
187
188static bool handleAttr(Function &Parent, const Function &Callee,
189 StringRef Name) {
190 if (Callee.hasFnAttribute(Name)) {
191 Parent.addFnAttr(Name);
192 return true;
193 }
Matt Arsenault6b930462017-07-13 21:43:42 +0000194 return false;
195}
196
197static void copyFeaturesToFunction(Function &Parent, const Function &Callee,
198 bool &NeedQueuePtr) {
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000199 // X ids unnecessarily propagated to kernels.
Matt Arsenault6b930462017-07-13 21:43:42 +0000200 static const StringRef AttrNames[] = {
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000201 { "amdgpu-work-item-id-x" },
Matt Arsenault6b930462017-07-13 21:43:42 +0000202 { "amdgpu-work-item-id-y" },
203 { "amdgpu-work-item-id-z" },
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000204 { "amdgpu-work-group-id-x" },
Matt Arsenault6b930462017-07-13 21:43:42 +0000205 { "amdgpu-work-group-id-y" },
206 { "amdgpu-work-group-id-z" },
207 { "amdgpu-dispatch-ptr" },
Matt Arsenault23e4df62017-07-14 00:11:13 +0000208 { "amdgpu-dispatch-id" },
Matt Arsenault9166ce82017-07-28 15:52:08 +0000209 { "amdgpu-kernarg-segment-ptr" },
210 { "amdgpu-implicitarg-ptr" }
Matt Arsenault6b930462017-07-13 21:43:42 +0000211 };
212
213 if (handleAttr(Parent, Callee, "amdgpu-queue-ptr"))
214 NeedQueuePtr = true;
215
216 for (StringRef AttrName : AttrNames)
217 handleAttr(Parent, Callee, AttrName);
218}
219
Aakanksha Patilc56d2af2019-03-07 00:54:04 +0000220bool AMDGPUAnnotateKernelFeatures::processUniformWorkGroupAttribute() {
221 bool Changed = false;
222
223 for (auto *Node : reverse(NodeList)) {
224 Function *Caller = Node->getFunction();
225
226 for (auto I : *Node) {
227 Function *Callee = std::get<1>(I)->getFunction();
228 if (Callee)
229 Changed = propagateUniformWorkGroupAttribute(*Caller, *Callee);
230 }
231 }
232
233 return Changed;
234}
235
236bool AMDGPUAnnotateKernelFeatures::propagateUniformWorkGroupAttribute(
237 Function &Caller, Function &Callee) {
238
239 // Check for externally defined function
240 if (!Callee.hasExactDefinition()) {
241 Callee.addFnAttr("uniform-work-group-size", "false");
242 if (!Caller.hasFnAttribute("uniform-work-group-size"))
243 Caller.addFnAttr("uniform-work-group-size", "false");
244
245 return true;
246 }
247 // Check if the Caller has the attribute
248 if (Caller.hasFnAttribute("uniform-work-group-size")) {
249 // Check if the value of the attribute is true
250 if (Caller.getFnAttribute("uniform-work-group-size")
251 .getValueAsString().equals("true")) {
252 // Propagate the attribute to the Callee, if it does not have it
253 if (!Callee.hasFnAttribute("uniform-work-group-size")) {
254 Callee.addFnAttr("uniform-work-group-size", "true");
255 return true;
256 }
257 } else {
258 Callee.addFnAttr("uniform-work-group-size", "false");
259 return true;
260 }
261 } else {
262 // If the attribute is absent, set it as false
263 Caller.addFnAttr("uniform-work-group-size", "false");
264 Callee.addFnAttr("uniform-work-group-size", "false");
265 return true;
266 }
267 return false;
268}
269
Matt Arsenault6b930462017-07-13 21:43:42 +0000270bool AMDGPUAnnotateKernelFeatures::addFeatureAttributes(Function &F) {
Tom Stellard5bfbae52018-07-11 20:59:01 +0000271 const GCNSubtarget &ST = TM->getSubtarget<GCNSubtarget>(F);
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000272 bool HasFlat = ST.hasFlatAddressSpace();
273 bool HasApertureRegs = ST.hasApertureRegs();
Matt Arsenault3b2e2a52016-06-06 20:03:31 +0000274 SmallPtrSet<const Constant *, 8> ConstantExprVisited;
275
Matt Arsenault6b930462017-07-13 21:43:42 +0000276 bool Changed = false;
277 bool NeedQueuePtr = false;
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000278 bool HaveCall = false;
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000279 bool IsFunc = !AMDGPU::isEntryFunctionCC(F.getCallingConv());
Matt Arsenault6b930462017-07-13 21:43:42 +0000280
281 for (BasicBlock &BB : F) {
282 for (Instruction &I : BB) {
283 CallSite CS(&I);
284 if (CS) {
285 Function *Callee = CS.getCalledFunction();
286
287 // TODO: Do something with indirect calls.
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000288 if (!Callee) {
289 if (!CS.isInlineAsm())
290 HaveCall = true;
Matt Arsenault6b930462017-07-13 21:43:42 +0000291 continue;
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000292 }
Matt Arsenault6b930462017-07-13 21:43:42 +0000293
294 Intrinsic::ID IID = Callee->getIntrinsicID();
295 if (IID == Intrinsic::not_intrinsic) {
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000296 HaveCall = true;
Matt Arsenault6b930462017-07-13 21:43:42 +0000297 copyFeaturesToFunction(F, *Callee, NeedQueuePtr);
298 Changed = true;
299 } else {
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000300 bool NonKernelOnly = false;
301 StringRef AttrName = intrinsicToAttrName(IID,
302 NonKernelOnly, NeedQueuePtr);
303 if (!AttrName.empty() && (IsFunc || !NonKernelOnly)) {
Matt Arsenault6b930462017-07-13 21:43:42 +0000304 F.addFnAttr(AttrName);
305 Changed = true;
306 }
307 }
308 }
309
310 if (NeedQueuePtr || HasApertureRegs)
311 continue;
312
Matt Arsenault99c14522016-04-25 19:27:24 +0000313 if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(&I)) {
Matt Arsenault0da63502018-08-31 05:49:54 +0000314 if (castRequiresQueuePtr(ASC)) {
Matt Arsenault6b930462017-07-13 21:43:42 +0000315 NeedQueuePtr = true;
316 continue;
317 }
Matt Arsenault99c14522016-04-25 19:27:24 +0000318 }
Matt Arsenault3b2e2a52016-06-06 20:03:31 +0000319
320 for (const Use &U : I.operands()) {
321 const auto *OpC = dyn_cast<Constant>(U);
322 if (!OpC)
323 continue;
324
Matt Arsenault0da63502018-08-31 05:49:54 +0000325 if (visitConstantExprsRecursively(OpC, ConstantExprVisited)) {
Matt Arsenault6b930462017-07-13 21:43:42 +0000326 NeedQueuePtr = true;
327 break;
328 }
Matt Arsenault3b2e2a52016-06-06 20:03:31 +0000329 }
Matt Arsenault99c14522016-04-25 19:27:24 +0000330 }
331 }
332
Matt Arsenault6b930462017-07-13 21:43:42 +0000333 if (NeedQueuePtr) {
334 F.addFnAttr("amdgpu-queue-ptr");
335 Changed = true;
336 }
337
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000338 // TODO: We could refine this to captured pointers that could possibly be
339 // accessed by flat instructions. For now this is mostly a poor way of
340 // estimating whether there are calls before argument lowering.
341 if (HasFlat && !IsFunc && HaveCall) {
342 F.addFnAttr("amdgpu-flat-scratch");
343 Changed = true;
344 }
345
Matt Arsenault6b930462017-07-13 21:43:42 +0000346 return Changed;
Matt Arsenault99c14522016-04-25 19:27:24 +0000347}
Matt Arsenault39319482015-11-06 18:01:57 +0000348
Matt Arsenault6b930462017-07-13 21:43:42 +0000349bool AMDGPUAnnotateKernelFeatures::runOnSCC(CallGraphSCC &SCC) {
Aakanksha Patilbc568762018-12-13 21:23:12 +0000350 bool Changed = false;
Aakanksha Patilc56d2af2019-03-07 00:54:04 +0000351
Aakanksha Patilbc568762018-12-13 21:23:12 +0000352 for (CallGraphNode *I : SCC) {
Aakanksha Patilc56d2af2019-03-07 00:54:04 +0000353 // Build a list of CallGraphNodes from most number of uses to least
354 if (I->getNumReferences())
355 NodeList.push_back(I);
356 else {
357 processUniformWorkGroupAttribute();
358 NodeList.clear();
359 }
360
Aakanksha Patilbc568762018-12-13 21:23:12 +0000361 Function *F = I->getFunction();
Aakanksha Patilc56d2af2019-03-07 00:54:04 +0000362 // Add feature attributes
Matt Arsenault6b930462017-07-13 21:43:42 +0000363 if (!F || F->isDeclaration())
364 continue;
Matt Arsenault6b930462017-07-13 21:43:42 +0000365 Changed |= addFeatureAttributes(*F);
Matt Arsenault99c14522016-04-25 19:27:24 +0000366 }
367
Matt Arsenault39319482015-11-06 18:01:57 +0000368 return Changed;
369}
370
Matt Arsenault6b930462017-07-13 21:43:42 +0000371bool AMDGPUAnnotateKernelFeatures::doInitialization(CallGraph &CG) {
372 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
373 if (!TPC)
374 report_fatal_error("TargetMachine is required");
375
Matt Arsenault6b930462017-07-13 21:43:42 +0000376 TM = &TPC->getTM<TargetMachine>();
377 return false;
378}
379
380Pass *llvm::createAMDGPUAnnotateKernelFeaturesPass() {
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000381 return new AMDGPUAnnotateKernelFeatures();
Matt Arsenault39319482015-11-06 18:01:57 +0000382}