blob: 4f2ed9fe6236dc52e2597d978af042042f415fb7 [file] [log] [blame]
Matt Arsenault39319482015-11-06 18:01:57 +00001//===-- AMDGPUAnnotateKernelFeaturesPass.cpp ------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file This pass adds target attributes to functions which use intrinsics
11/// which will impact calling convention lowering.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +000016#include "llvm/ADT/Triple.h"
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000017#include "llvm/IR/Constants.h"
Matt Arsenault39319482015-11-06 18:01:57 +000018#include "llvm/IR/Instructions.h"
19#include "llvm/IR/Module.h"
20
21#define DEBUG_TYPE "amdgpu-annotate-kernel-features"
22
23using namespace llvm;
24
25namespace {
26
27class AMDGPUAnnotateKernelFeatures : public ModulePass {
28private:
Matt Arsenault99c14522016-04-25 19:27:24 +000029 static bool hasAddrSpaceCast(const Function &F);
30
Matt Arsenault39319482015-11-06 18:01:57 +000031 void addAttrToCallers(Function *Intrin, StringRef AttrName);
32 bool addAttrsForIntrinsics(Module &M, ArrayRef<StringRef[2]>);
33
34public:
35 static char ID;
36
37 AMDGPUAnnotateKernelFeatures() : ModulePass(ID) { }
38 bool runOnModule(Module &M) override;
Mehdi Amini117296c2016-10-01 02:56:57 +000039 StringRef getPassName() const override {
Matt Arsenault39319482015-11-06 18:01:57 +000040 return "AMDGPU Annotate Kernel Features";
41 }
42
43 void getAnalysisUsage(AnalysisUsage &AU) const override {
44 AU.setPreservesAll();
45 ModulePass::getAnalysisUsage(AU);
46 }
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000047
48 static bool visitConstantExpr(const ConstantExpr *CE);
49 static bool visitConstantExprsRecursively(
50 const Constant *EntryC,
51 SmallPtrSet<const Constant *, 8> &ConstantExprVisited);
Matt Arsenault39319482015-11-06 18:01:57 +000052};
53
54}
55
56char AMDGPUAnnotateKernelFeatures::ID = 0;
57
58char &llvm::AMDGPUAnnotateKernelFeaturesID = AMDGPUAnnotateKernelFeatures::ID;
59
Matt Arsenault99c14522016-04-25 19:27:24 +000060INITIALIZE_PASS(AMDGPUAnnotateKernelFeatures, DEBUG_TYPE,
61 "Add AMDGPU function attributes", false, false)
Matt Arsenault39319482015-11-06 18:01:57 +000062
Matt Arsenault39319482015-11-06 18:01:57 +000063
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000064// The queue ptr is only needed when casting to flat, not from it.
65static bool castRequiresQueuePtr(unsigned SrcAS) {
Matt Arsenault99c14522016-04-25 19:27:24 +000066 return SrcAS == AMDGPUAS::LOCAL_ADDRESS || SrcAS == AMDGPUAS::PRIVATE_ADDRESS;
67}
68
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000069static bool castRequiresQueuePtr(const AddrSpaceCastInst *ASC) {
70 return castRequiresQueuePtr(ASC->getSrcAddressSpace());
71}
72
73bool AMDGPUAnnotateKernelFeatures::visitConstantExpr(const ConstantExpr *CE) {
74 if (CE->getOpcode() == Instruction::AddrSpaceCast) {
75 unsigned SrcAS = CE->getOperand(0)->getType()->getPointerAddressSpace();
76 return castRequiresQueuePtr(SrcAS);
77 }
78
79 return false;
80}
81
82bool AMDGPUAnnotateKernelFeatures::visitConstantExprsRecursively(
83 const Constant *EntryC,
84 SmallPtrSet<const Constant *, 8> &ConstantExprVisited) {
85
86 if (!ConstantExprVisited.insert(EntryC).second)
87 return false;
88
89 SmallVector<const Constant *, 16> Stack;
90 Stack.push_back(EntryC);
91
92 while (!Stack.empty()) {
93 const Constant *C = Stack.pop_back_val();
94
95 // Check this constant expression.
96 if (const auto *CE = dyn_cast<ConstantExpr>(C)) {
97 if (visitConstantExpr(CE))
98 return true;
99 }
100
101 // Visit all sub-expressions.
102 for (const Use &U : C->operands()) {
103 const auto *OpC = dyn_cast<Constant>(U);
104 if (!OpC)
105 continue;
106
107 if (!ConstantExprVisited.insert(OpC).second)
108 continue;
109
110 Stack.push_back(OpC);
111 }
112 }
113
114 return false;
115}
116
Matt Arsenault99c14522016-04-25 19:27:24 +0000117// Return true if an addrspacecast is used that requires the queue ptr.
118bool AMDGPUAnnotateKernelFeatures::hasAddrSpaceCast(const Function &F) {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +0000119 SmallPtrSet<const Constant *, 8> ConstantExprVisited;
120
Matt Arsenault99c14522016-04-25 19:27:24 +0000121 for (const BasicBlock &BB : F) {
122 for (const Instruction &I : BB) {
123 if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(&I)) {
124 if (castRequiresQueuePtr(ASC))
125 return true;
126 }
Matt Arsenault3b2e2a52016-06-06 20:03:31 +0000127
128 for (const Use &U : I.operands()) {
129 const auto *OpC = dyn_cast<Constant>(U);
130 if (!OpC)
131 continue;
132
133 if (visitConstantExprsRecursively(OpC, ConstantExprVisited))
134 return true;
135 }
Matt Arsenault99c14522016-04-25 19:27:24 +0000136 }
137 }
138
139 return false;
140}
Matt Arsenault39319482015-11-06 18:01:57 +0000141
142void AMDGPUAnnotateKernelFeatures::addAttrToCallers(Function *Intrin,
143 StringRef AttrName) {
144 SmallPtrSet<Function *, 4> SeenFuncs;
145
146 for (User *U : Intrin->users()) {
147 // CallInst is the only valid user for an intrinsic.
148 CallInst *CI = cast<CallInst>(U);
149
150 Function *CallingFunction = CI->getParent()->getParent();
151 if (SeenFuncs.insert(CallingFunction).second)
152 CallingFunction->addFnAttr(AttrName);
153 }
154}
155
156bool AMDGPUAnnotateKernelFeatures::addAttrsForIntrinsics(
157 Module &M,
158 ArrayRef<StringRef[2]> IntrinsicToAttr) {
159 bool Changed = false;
160
161 for (const StringRef *Arr : IntrinsicToAttr) {
162 if (Function *Fn = M.getFunction(Arr[0])) {
163 addAttrToCallers(Fn, Arr[1]);
164 Changed = true;
165 }
166 }
167
168 return Changed;
169}
170
171bool AMDGPUAnnotateKernelFeatures::runOnModule(Module &M) {
172 Triple TT(M.getTargetTriple());
173
174 static const StringRef IntrinsicToAttr[][2] = {
175 // .x omitted
Matt Arsenault43976df2016-01-30 04:25:19 +0000176 { "llvm.amdgcn.workitem.id.y", "amdgpu-work-item-id-y" },
177 { "llvm.amdgcn.workitem.id.z", "amdgpu-work-item-id-z" },
178
179 { "llvm.amdgcn.workgroup.id.y", "amdgpu-work-group-id-y" },
180 { "llvm.amdgcn.workgroup.id.z", "amdgpu-work-group-id-z" },
181
Matt Arsenault39319482015-11-06 18:01:57 +0000182 { "llvm.r600.read.tgid.y", "amdgpu-work-group-id-y" },
183 { "llvm.r600.read.tgid.z", "amdgpu-work-group-id-z" },
184
185 // .x omitted
186 { "llvm.r600.read.tidig.y", "amdgpu-work-item-id-y" },
187 { "llvm.r600.read.tidig.z", "amdgpu-work-item-id-z" }
Matt Arsenault39319482015-11-06 18:01:57 +0000188 };
189
190 static const StringRef HSAIntrinsicToAttr[][2] = {
Matt Arsenault48ab5262016-04-25 19:27:18 +0000191 { "llvm.amdgcn.dispatch.ptr", "amdgpu-dispatch-ptr" },
Matt Arsenault8d718dc2016-07-22 17:01:30 +0000192 { "llvm.amdgcn.queue.ptr", "amdgpu-queue-ptr" },
Wei Dingee21a362017-01-24 06:41:21 +0000193 { "llvm.amdgcn.dispatch.id", "amdgpu-dispatch-id" },
Wei Ding205bfdb2017-02-10 02:15:29 +0000194 { "llvm.trap", "amdgpu-queue-ptr" },
195 { "llvm.debugtrap", "amdgpu-queue-ptr" }
Matt Arsenault39319482015-11-06 18:01:57 +0000196 };
197
Matt Arsenaultd0799df2016-01-30 05:10:59 +0000198 // TODO: We should not add the attributes if the known compile time workgroup
199 // size is 1 for y/z.
200
Matt Arsenault39319482015-11-06 18:01:57 +0000201 // TODO: Intrinsics that require queue ptr.
202
203 // We do not need to note the x workitem or workgroup id because they are
204 // always initialized.
205
206 bool Changed = addAttrsForIntrinsics(M, IntrinsicToAttr);
Tom Stellard0b76fc4c2016-09-16 21:34:26 +0000207 if (TT.getOS() == Triple::AMDHSA || TT.getOS() == Triple::Mesa3D) {
Matt Arsenault39319482015-11-06 18:01:57 +0000208 Changed |= addAttrsForIntrinsics(M, HSAIntrinsicToAttr);
209
Matt Arsenault99c14522016-04-25 19:27:24 +0000210 for (Function &F : M) {
211 if (F.hasFnAttribute("amdgpu-queue-ptr"))
212 continue;
213
214 if (hasAddrSpaceCast(F))
215 F.addFnAttr("amdgpu-queue-ptr");
216 }
217 }
218
Matt Arsenault39319482015-11-06 18:01:57 +0000219 return Changed;
220}
221
222ModulePass *llvm::createAMDGPUAnnotateKernelFeaturesPass() {
223 return new AMDGPUAnnotateKernelFeatures();
224}