blob: 42d2b15a4ee434cd6427c27c1fefe97a0340902b [file] [log] [blame]
Peter Collingbournefe883422011-10-06 18:29:37 +00001//===----- CGCUDANV.cpp - Interface to NVIDIA CUDA Runtime ----------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Peter Collingbournefe883422011-10-06 18:29:37 +00006//
7//===----------------------------------------------------------------------===//
8//
9// This provides a class for CUDA code generation targeting the NVIDIA CUDA
10// runtime library.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGCUDARuntime.h"
Peter Collingbournefa4d6032011-10-06 18:51:56 +000015#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "clang/AST/Decl.h"
Artem Belevichc62214d2019-01-31 21:34:03 +000018#include "clang/Basic/Cuda.h"
19#include "clang/CodeGen/CodeGenABITypes.h"
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +000020#include "clang/CodeGen/ConstantInitBuilder.h"
Chandler Carruthffd55512013-01-02 11:45:17 +000021#include "llvm/IR/BasicBlock.h"
22#include "llvm/IR/Constants.h"
23#include "llvm/IR/DerivedTypes.h"
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +000024#include "llvm/Support/Format.h"
Peter Collingbournefe883422011-10-06 18:29:37 +000025
26using namespace clang;
27using namespace CodeGen;
28
29namespace {
Yaxun Liu29155b02018-05-18 15:07:56 +000030constexpr unsigned CudaFatMagic = 0x466243b1;
31constexpr unsigned HIPFatMagic = 0x48495046; // "HIPF"
Peter Collingbournefe883422011-10-06 18:29:37 +000032
33class CGNVCUDARuntime : public CGCUDARuntime {
Peter Collingbournefa4d6032011-10-06 18:51:56 +000034
35private:
John McCall6c9f1fdb2016-11-19 08:17:24 +000036 llvm::IntegerType *IntTy, *SizeTy;
37 llvm::Type *VoidTy;
Artem Belevich52cc4872015-05-07 19:34:16 +000038 llvm::PointerType *CharPtrTy, *VoidPtrTy, *VoidPtrPtrTy;
39
40 /// Convenience reference to LLVM Context
41 llvm::LLVMContext &Context;
42 /// Convenience reference to the current module
43 llvm::Module &TheModule;
44 /// Keeps track of kernel launch stubs emitted in this module
Yaxun Liuc18e9ec2019-02-14 02:00:09 +000045 struct KernelInfo {
46 llvm::Function *Kernel;
47 const Decl *D;
48 };
49 llvm::SmallVector<KernelInfo, 16> EmittedKernels;
50 struct VarInfo {
51 llvm::GlobalVariable *Var;
52 const VarDecl *D;
53 unsigned Flag;
54 };
55 llvm::SmallVector<VarInfo, 16> DeviceVars;
Jonas Hahnfelde7681322018-02-28 17:53:46 +000056 /// Keeps track of variable containing handle of GPU binary. Populated by
Artem Belevich52cc4872015-05-07 19:34:16 +000057 /// ModuleCtorFunction() and used to create corresponding cleanup calls in
58 /// ModuleDtorFunction()
Jonas Hahnfelde7681322018-02-28 17:53:46 +000059 llvm::GlobalVariable *GpuBinaryHandle = nullptr;
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +000060 /// Whether we generate relocatable device code.
61 bool RelocatableDeviceCode;
Yaxun Liuc18e9ec2019-02-14 02:00:09 +000062 /// Mangle context for device.
63 std::unique_ptr<MangleContext> DeviceMC;
Peter Collingbournefa4d6032011-10-06 18:51:56 +000064
James Y Knight9871db02019-02-05 16:42:33 +000065 llvm::FunctionCallee getSetupArgumentFn() const;
66 llvm::FunctionCallee getLaunchFn() const;
Peter Collingbournefa4d6032011-10-06 18:51:56 +000067
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +000068 llvm::FunctionType *getRegisterGlobalsFnTy() const;
69 llvm::FunctionType *getCallbackFnTy() const;
70 llvm::FunctionType *getRegisterLinkedBinaryFnTy() const;
Yaxun Liu887c5692018-04-25 01:10:37 +000071 std::string addPrefixToName(StringRef FuncName) const;
72 std::string addUnderscoredPrefixToName(StringRef FuncName) const;
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +000073
Artem Belevich52cc4872015-05-07 19:34:16 +000074 /// Creates a function to register all kernel stubs generated in this module.
Artem Belevich42e19492016-03-02 18:28:50 +000075 llvm::Function *makeRegisterGlobalsFn();
Artem Belevich52cc4872015-05-07 19:34:16 +000076
77 /// Helper function that generates a constant string and returns a pointer to
78 /// the start of the string. The result of this function can be used anywhere
79 /// where the C code specifies const char*.
80 llvm::Constant *makeConstantString(const std::string &Str,
81 const std::string &Name = "",
Artem Belevich4c093182016-08-12 18:44:01 +000082 const std::string &SectionName = "",
Artem Belevich52cc4872015-05-07 19:34:16 +000083 unsigned Alignment = 0) {
84 llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
85 llvm::ConstantInt::get(SizeTy, 0)};
John McCall7f416cc2015-09-08 08:05:57 +000086 auto ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
Artem Belevich4c093182016-08-12 18:44:01 +000087 llvm::GlobalVariable *GV =
88 cast<llvm::GlobalVariable>(ConstStr.getPointer());
Jonas Hahnfeld3b9cbba92018-06-08 11:17:08 +000089 if (!SectionName.empty()) {
Artem Belevich4c093182016-08-12 18:44:01 +000090 GV->setSection(SectionName);
Jonas Hahnfeld3b9cbba92018-06-08 11:17:08 +000091 // Mark the address as used which make sure that this section isn't
92 // merged and we will really have it in the object file.
93 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::None);
94 }
Artem Belevich4c093182016-08-12 18:44:01 +000095 if (Alignment)
96 GV->setAlignment(Alignment);
97
John McCall7f416cc2015-09-08 08:05:57 +000098 return llvm::ConstantExpr::getGetElementPtr(ConstStr.getElementType(),
99 ConstStr.getPointer(), Zeros);
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000100 }
101
102 /// Helper function that generates an empty dummy function returning void.
103 llvm::Function *makeDummyFunction(llvm::FunctionType *FnTy) {
104 assert(FnTy->getReturnType()->isVoidTy() &&
105 "Can only generate dummy functions returning void!");
106 llvm::Function *DummyFunc = llvm::Function::Create(
107 FnTy, llvm::GlobalValue::InternalLinkage, "dummy", &TheModule);
108
109 llvm::BasicBlock *DummyBlock =
110 llvm::BasicBlock::Create(Context, "", DummyFunc);
111 CGBuilderTy FuncBuilder(CGM, Context);
112 FuncBuilder.SetInsertPoint(DummyBlock);
113 FuncBuilder.CreateRetVoid();
114
115 return DummyFunc;
116 }
Artem Belevich52cc4872015-05-07 19:34:16 +0000117
Artem Belevichc62214d2019-01-31 21:34:03 +0000118 void emitDeviceStubBodyLegacy(CodeGenFunction &CGF, FunctionArgList &Args);
119 void emitDeviceStubBodyNew(CodeGenFunction &CGF, FunctionArgList &Args);
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000120 std::string getDeviceSideName(const Decl *ND);
Artem Belevich52cc4872015-05-07 19:34:16 +0000121
Peter Collingbournefe883422011-10-06 18:29:37 +0000122public:
123 CGNVCUDARuntime(CodeGenModule &CGM);
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000124
Artem Belevich52cc4872015-05-07 19:34:16 +0000125 void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) override;
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000126 void registerDeviceVar(const VarDecl *VD, llvm::GlobalVariable &Var,
127 unsigned Flags) override {
128 DeviceVars.push_back({&Var, VD, Flags});
Artem Belevich42e19492016-03-02 18:28:50 +0000129 }
130
Artem Belevich52cc4872015-05-07 19:34:16 +0000131 /// Creates module constructor function
132 llvm::Function *makeModuleCtorFunction() override;
133 /// Creates module destructor function
134 llvm::Function *makeModuleDtorFunction() override;
Peter Collingbournefe883422011-10-06 18:29:37 +0000135};
136
Alexander Kornienkoab9db512015-06-22 23:07:51 +0000137}
Peter Collingbournefe883422011-10-06 18:29:37 +0000138
Yaxun Liu887c5692018-04-25 01:10:37 +0000139std::string CGNVCUDARuntime::addPrefixToName(StringRef FuncName) const {
140 if (CGM.getLangOpts().HIP)
141 return ((Twine("hip") + Twine(FuncName)).str());
142 return ((Twine("cuda") + Twine(FuncName)).str());
143}
144std::string
145CGNVCUDARuntime::addUnderscoredPrefixToName(StringRef FuncName) const {
146 if (CGM.getLangOpts().HIP)
147 return ((Twine("__hip") + Twine(FuncName)).str());
148 return ((Twine("__cuda") + Twine(FuncName)).str());
149}
150
Artem Belevich52cc4872015-05-07 19:34:16 +0000151CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
152 : CGCUDARuntime(CGM), Context(CGM.getLLVMContext()),
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000153 TheModule(CGM.getModule()),
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000154 RelocatableDeviceCode(CGM.getLangOpts().GPURelocatableDeviceCode),
155 DeviceMC(CGM.getContext().createMangleContext(
156 CGM.getContext().getAuxTargetInfo())) {
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000157 CodeGen::CodeGenTypes &Types = CGM.getTypes();
158 ASTContext &Ctx = CGM.getContext();
159
John McCall6c9f1fdb2016-11-19 08:17:24 +0000160 IntTy = CGM.IntTy;
161 SizeTy = CGM.SizeTy;
162 VoidTy = CGM.VoidTy;
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000163
164 CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy));
165 VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy));
Artem Belevich52cc4872015-05-07 19:34:16 +0000166 VoidPtrPtrTy = VoidPtrTy->getPointerTo();
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000167}
168
James Y Knight9871db02019-02-05 16:42:33 +0000169llvm::FunctionCallee CGNVCUDARuntime::getSetupArgumentFn() const {
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000170 // cudaError_t cudaSetupArgument(void *, size_t, size_t)
Benjamin Kramer30934732016-07-02 11:41:41 +0000171 llvm::Type *Params[] = {VoidPtrTy, SizeTy, SizeTy};
Yaxun Liu887c5692018-04-25 01:10:37 +0000172 return CGM.CreateRuntimeFunction(
173 llvm::FunctionType::get(IntTy, Params, false),
174 addPrefixToName("SetupArgument"));
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000175}
176
James Y Knight9871db02019-02-05 16:42:33 +0000177llvm::FunctionCallee CGNVCUDARuntime::getLaunchFn() const {
Yaxun Liu887c5692018-04-25 01:10:37 +0000178 if (CGM.getLangOpts().HIP) {
179 // hipError_t hipLaunchByPtr(char *);
180 return CGM.CreateRuntimeFunction(
181 llvm::FunctionType::get(IntTy, CharPtrTy, false), "hipLaunchByPtr");
182 } else {
183 // cudaError_t cudaLaunch(char *);
184 return CGM.CreateRuntimeFunction(
185 llvm::FunctionType::get(IntTy, CharPtrTy, false), "cudaLaunch");
186 }
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000187}
188
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000189llvm::FunctionType *CGNVCUDARuntime::getRegisterGlobalsFnTy() const {
190 return llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false);
191}
192
193llvm::FunctionType *CGNVCUDARuntime::getCallbackFnTy() const {
194 return llvm::FunctionType::get(VoidTy, VoidPtrTy, false);
195}
196
197llvm::FunctionType *CGNVCUDARuntime::getRegisterLinkedBinaryFnTy() const {
198 auto CallbackFnTy = getCallbackFnTy();
199 auto RegisterGlobalsFnTy = getRegisterGlobalsFnTy();
200 llvm::Type *Params[] = {RegisterGlobalsFnTy->getPointerTo(), VoidPtrTy,
201 VoidPtrTy, CallbackFnTy->getPointerTo()};
202 return llvm::FunctionType::get(VoidTy, Params, false);
203}
204
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000205std::string CGNVCUDARuntime::getDeviceSideName(const Decl *D) {
206 auto *ND = cast<const NamedDecl>(D);
207 std::string DeviceSideName;
208 if (DeviceMC->shouldMangleDeclName(ND)) {
209 SmallString<256> Buffer;
210 llvm::raw_svector_ostream Out(Buffer);
211 DeviceMC->mangleName(ND, Out);
212 DeviceSideName = Out.str();
213 } else
214 DeviceSideName = ND->getIdentifier()->getName();
215 return DeviceSideName;
216}
217
Artem Belevich52cc4872015-05-07 19:34:16 +0000218void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
219 FunctionArgList &Args) {
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000220 assert(getDeviceSideName(CGF.CurFuncDecl) == CGF.CurFn->getName() ||
Yaxun Liue739ac02019-02-27 02:02:52 +0000221 getDeviceSideName(CGF.CurFuncDecl) + ".stub" == CGF.CurFn->getName() ||
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000222 CGF.CGM.getContext().getTargetInfo().getCXXABI() !=
223 CGF.CGM.getContext().getAuxTargetInfo()->getCXXABI());
224
225 EmittedKernels.push_back({CGF.CurFn, CGF.CurFuncDecl});
Artem Belevichc62214d2019-01-31 21:34:03 +0000226 if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
227 CudaFeature::CUDA_USES_NEW_LAUNCH))
228 emitDeviceStubBodyNew(CGF, Args);
229 else
230 emitDeviceStubBodyLegacy(CGF, Args);
Artem Belevich52cc4872015-05-07 19:34:16 +0000231}
232
Artem Belevichc62214d2019-01-31 21:34:03 +0000233// CUDA 9.0+ uses new way to launch kernels. Parameters are packed in a local
234// array and kernels are launched using cudaLaunchKernel().
235void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
236 FunctionArgList &Args) {
237 // Build the shadow stack entry at the very start of the function.
238
239 // Calculate amount of space we will need for all arguments. If we have no
240 // args, allocate a single pointer so we still have a valid pointer to the
241 // argument array that we can pass to runtime, even if it will be unused.
242 Address KernelArgs = CGF.CreateTempAlloca(
243 VoidPtrTy, CharUnits::fromQuantity(16), "kernel_args",
244 llvm::ConstantInt::get(SizeTy, std::max<size_t>(1, Args.size())));
245 // Store pointers to the arguments in a locally allocated launch_args.
246 for (unsigned i = 0; i < Args.size(); ++i) {
247 llvm::Value* VarPtr = CGF.GetAddrOfLocalVar(Args[i]).getPointer();
248 llvm::Value *VoidVarPtr = CGF.Builder.CreatePointerCast(VarPtr, VoidPtrTy);
249 CGF.Builder.CreateDefaultAlignedStore(
250 VoidVarPtr, CGF.Builder.CreateConstGEP1_32(KernelArgs.getPointer(), i));
251 }
252
253 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
254
255 // Lookup cudaLaunchKernel function.
256 // cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
257 // void **args, size_t sharedMem,
258 // cudaStream_t stream);
259 TranslationUnitDecl *TUDecl = CGM.getContext().getTranslationUnitDecl();
260 DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
261 IdentifierInfo &cudaLaunchKernelII =
262 CGM.getContext().Idents.get("cudaLaunchKernel");
263 FunctionDecl *cudaLaunchKernelFD = nullptr;
264 for (const auto &Result : DC->lookup(&cudaLaunchKernelII)) {
265 if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Result))
266 cudaLaunchKernelFD = FD;
267 }
268
269 if (cudaLaunchKernelFD == nullptr) {
270 CGM.Error(CGF.CurFuncDecl->getLocation(),
271 "Can't find declaration for cudaLaunchKernel()");
272 return;
273 }
274 // Create temporary dim3 grid_dim, block_dim.
275 ParmVarDecl *GridDimParam = cudaLaunchKernelFD->getParamDecl(1);
276 QualType Dim3Ty = GridDimParam->getType();
277 Address GridDim =
278 CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "grid_dim");
279 Address BlockDim =
280 CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "block_dim");
281 Address ShmemSize =
282 CGF.CreateTempAlloca(SizeTy, CGM.getSizeAlign(), "shmem_size");
283 Address Stream =
284 CGF.CreateTempAlloca(VoidPtrTy, CGM.getPointerAlign(), "stream");
James Y Knight9871db02019-02-05 16:42:33 +0000285 llvm::FunctionCallee cudaPopConfigFn = CGM.CreateRuntimeFunction(
Artem Belevichc62214d2019-01-31 21:34:03 +0000286 llvm::FunctionType::get(IntTy,
287 {/*gridDim=*/GridDim.getType(),
288 /*blockDim=*/BlockDim.getType(),
289 /*ShmemSize=*/ShmemSize.getType(),
290 /*Stream=*/Stream.getType()},
291 /*isVarArg=*/false),
292 "__cudaPopCallConfiguration");
293
294 CGF.EmitRuntimeCallOrInvoke(cudaPopConfigFn,
295 {GridDim.getPointer(), BlockDim.getPointer(),
296 ShmemSize.getPointer(), Stream.getPointer()});
297
298 // Emit the call to cudaLaunch
299 llvm::Value *Kernel = CGF.Builder.CreatePointerCast(CGF.CurFn, VoidPtrTy);
300 CallArgList LaunchKernelArgs;
301 LaunchKernelArgs.add(RValue::get(Kernel),
302 cudaLaunchKernelFD->getParamDecl(0)->getType());
303 LaunchKernelArgs.add(RValue::getAggregate(GridDim), Dim3Ty);
304 LaunchKernelArgs.add(RValue::getAggregate(BlockDim), Dim3Ty);
305 LaunchKernelArgs.add(RValue::get(KernelArgs.getPointer()),
306 cudaLaunchKernelFD->getParamDecl(3)->getType());
307 LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(ShmemSize)),
308 cudaLaunchKernelFD->getParamDecl(4)->getType());
309 LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(Stream)),
310 cudaLaunchKernelFD->getParamDecl(5)->getType());
311
312 QualType QT = cudaLaunchKernelFD->getType();
313 QualType CQT = QT.getCanonicalType();
James Y Knight916db652019-02-02 01:48:23 +0000314 llvm::Type *Ty = CGM.getTypes().ConvertType(CQT);
Artem Belevichc62214d2019-01-31 21:34:03 +0000315 llvm::FunctionType *FTy = dyn_cast<llvm::FunctionType>(Ty);
316
317 const CGFunctionInfo &FI =
318 CGM.getTypes().arrangeFunctionDeclaration(cudaLaunchKernelFD);
James Y Knight9871db02019-02-05 16:42:33 +0000319 llvm::FunctionCallee cudaLaunchKernelFn =
Artem Belevichc62214d2019-01-31 21:34:03 +0000320 CGM.CreateRuntimeFunction(FTy, "cudaLaunchKernel");
321 CGF.EmitCall(FI, CGCallee::forDirect(cudaLaunchKernelFn), ReturnValueSlot(),
322 LaunchKernelArgs);
323 CGF.EmitBranch(EndBlock);
324
325 CGF.EmitBlock(EndBlock);
326}
327
328void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF,
329 FunctionArgList &Args) {
Justin Lebare56360a2016-07-27 22:36:21 +0000330 // Emit a call to cudaSetupArgument for each arg in Args.
James Y Knight9871db02019-02-05 16:42:33 +0000331 llvm::FunctionCallee cudaSetupArgFn = getSetupArgumentFn();
Justin Lebare56360a2016-07-27 22:36:21 +0000332 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
333 CharUnits Offset = CharUnits::Zero();
334 for (const VarDecl *A : Args) {
335 CharUnits TyWidth, TyAlign;
336 std::tie(TyWidth, TyAlign) =
337 CGM.getContext().getTypeInfoInChars(A->getType());
338 Offset = Offset.alignTo(TyAlign);
339 llvm::Value *Args[] = {
340 CGF.Builder.CreatePointerCast(CGF.GetAddrOfLocalVar(A).getPointer(),
341 VoidPtrTy),
342 llvm::ConstantInt::get(SizeTy, TyWidth.getQuantity()),
343 llvm::ConstantInt::get(SizeTy, Offset.getQuantity()),
344 };
James Y Knight3933add2019-01-30 02:54:28 +0000345 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(cudaSetupArgFn, Args);
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000346 llvm::Constant *Zero = llvm::ConstantInt::get(IntTy, 0);
James Y Knight3933add2019-01-30 02:54:28 +0000347 llvm::Value *CBZero = CGF.Builder.CreateICmpEQ(CB, Zero);
Justin Lebare56360a2016-07-27 22:36:21 +0000348 llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
James Y Knight3933add2019-01-30 02:54:28 +0000349 CGF.Builder.CreateCondBr(CBZero, NextBlock, EndBlock);
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000350 CGF.EmitBlock(NextBlock);
Justin Lebare56360a2016-07-27 22:36:21 +0000351 Offset += TyWidth;
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000352 }
353
354 // Emit the call to cudaLaunch
James Y Knight9871db02019-02-05 16:42:33 +0000355 llvm::FunctionCallee cudaLaunchFn = getLaunchFn();
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000356 llvm::Value *Arg = CGF.Builder.CreatePointerCast(CGF.CurFn, CharPtrTy);
John McCall882987f2013-02-28 19:01:20 +0000357 CGF.EmitRuntimeCallOrInvoke(cudaLaunchFn, Arg);
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000358 CGF.EmitBranch(EndBlock);
359
360 CGF.EmitBlock(EndBlock);
Peter Collingbournefe883422011-10-06 18:29:37 +0000361}
362
Artem Belevich42e19492016-03-02 18:28:50 +0000363/// Creates a function that sets up state on the host side for CUDA objects that
364/// have a presence on both the host and device sides. Specifically, registers
365/// the host side of kernel functions and device global variables with the CUDA
366/// runtime.
Artem Belevich52cc4872015-05-07 19:34:16 +0000367/// \code
Artem Belevich42e19492016-03-02 18:28:50 +0000368/// void __cuda_register_globals(void** GpuBinaryHandle) {
Artem Belevich52cc4872015-05-07 19:34:16 +0000369/// __cudaRegisterFunction(GpuBinaryHandle,Kernel0,...);
370/// ...
371/// __cudaRegisterFunction(GpuBinaryHandle,KernelM,...);
Artem Belevich42e19492016-03-02 18:28:50 +0000372/// __cudaRegisterVar(GpuBinaryHandle, GlobalVar0, ...);
373/// ...
374/// __cudaRegisterVar(GpuBinaryHandle, GlobalVarN, ...);
Artem Belevich52cc4872015-05-07 19:34:16 +0000375/// }
376/// \endcode
Artem Belevich42e19492016-03-02 18:28:50 +0000377llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
Artem Belevich8c1ec1e2016-03-02 18:28:53 +0000378 // No need to register anything
379 if (EmittedKernels.empty() && DeviceVars.empty())
380 return nullptr;
381
Artem Belevich52cc4872015-05-07 19:34:16 +0000382 llvm::Function *RegisterKernelsFunc = llvm::Function::Create(
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000383 getRegisterGlobalsFnTy(), llvm::GlobalValue::InternalLinkage,
Yaxun Liu887c5692018-04-25 01:10:37 +0000384 addUnderscoredPrefixToName("_register_globals"), &TheModule);
Artem Belevich52cc4872015-05-07 19:34:16 +0000385 llvm::BasicBlock *EntryBB =
386 llvm::BasicBlock::Create(Context, "entry", RegisterKernelsFunc);
John McCall7f416cc2015-09-08 08:05:57 +0000387 CGBuilderTy Builder(CGM, Context);
Artem Belevich52cc4872015-05-07 19:34:16 +0000388 Builder.SetInsertPoint(EntryBB);
389
390 // void __cudaRegisterFunction(void **, const char *, char *, const char *,
391 // int, uint3*, uint3*, dim3*, dim3*, int*)
Benjamin Kramer6d1c10b2016-07-02 12:03:57 +0000392 llvm::Type *RegisterFuncParams[] = {
Artem Belevich52cc4872015-05-07 19:34:16 +0000393 VoidPtrPtrTy, CharPtrTy, CharPtrTy, CharPtrTy, IntTy,
394 VoidPtrTy, VoidPtrTy, VoidPtrTy, VoidPtrTy, IntTy->getPointerTo()};
James Y Knight9871db02019-02-05 16:42:33 +0000395 llvm::FunctionCallee RegisterFunc = CGM.CreateRuntimeFunction(
Artem Belevich52cc4872015-05-07 19:34:16 +0000396 llvm::FunctionType::get(IntTy, RegisterFuncParams, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000397 addUnderscoredPrefixToName("RegisterFunction"));
Artem Belevich52cc4872015-05-07 19:34:16 +0000398
399 // Extract GpuBinaryHandle passed as the first argument passed to
Artem Belevich42e19492016-03-02 18:28:50 +0000400 // __cuda_register_globals() and generate __cudaRegisterFunction() call for
Artem Belevich52cc4872015-05-07 19:34:16 +0000401 // each emitted kernel.
402 llvm::Argument &GpuBinaryHandlePtr = *RegisterKernelsFunc->arg_begin();
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000403 for (auto &&I : EmittedKernels) {
404 llvm::Constant *KernelName = makeConstantString(getDeviceSideName(I.D));
Artem Belevich52cc4872015-05-07 19:34:16 +0000405 llvm::Constant *NullPtr = llvm::ConstantPointerNull::get(VoidPtrTy);
Artem Belevich42e19492016-03-02 18:28:50 +0000406 llvm::Value *Args[] = {
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000407 &GpuBinaryHandlePtr,
408 Builder.CreateBitCast(I.Kernel, VoidPtrTy),
409 KernelName,
410 KernelName,
411 llvm::ConstantInt::get(IntTy, -1),
412 NullPtr,
413 NullPtr,
414 NullPtr,
415 NullPtr,
Artem Belevich52cc4872015-05-07 19:34:16 +0000416 llvm::ConstantPointerNull::get(IntTy->getPointerTo())};
Artem Belevich42e19492016-03-02 18:28:50 +0000417 Builder.CreateCall(RegisterFunc, Args);
418 }
419
420 // void __cudaRegisterVar(void **, char *, char *, const char *,
421 // int, int, int, int)
Benjamin Kramer6d1c10b2016-07-02 12:03:57 +0000422 llvm::Type *RegisterVarParams[] = {VoidPtrPtrTy, CharPtrTy, CharPtrTy,
423 CharPtrTy, IntTy, IntTy,
424 IntTy, IntTy};
James Y Knight9871db02019-02-05 16:42:33 +0000425 llvm::FunctionCallee RegisterVar = CGM.CreateRuntimeFunction(
Artem Belevich42e19492016-03-02 18:28:50 +0000426 llvm::FunctionType::get(IntTy, RegisterVarParams, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000427 addUnderscoredPrefixToName("RegisterVar"));
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000428 for (auto &&Info : DeviceVars) {
429 llvm::GlobalVariable *Var = Info.Var;
430 unsigned Flags = Info.Flag;
431 llvm::Constant *VarName = makeConstantString(getDeviceSideName(Info.D));
Artem Belevich42e19492016-03-02 18:28:50 +0000432 uint64_t VarSize =
433 CGM.getDataLayout().getTypeAllocSize(Var->getValueType());
434 llvm::Value *Args[] = {
435 &GpuBinaryHandlePtr,
436 Builder.CreateBitCast(Var, VoidPtrTy),
437 VarName,
438 VarName,
439 llvm::ConstantInt::get(IntTy, (Flags & ExternDeviceVar) ? 1 : 0),
440 llvm::ConstantInt::get(IntTy, VarSize),
441 llvm::ConstantInt::get(IntTy, (Flags & ConstantDeviceVar) ? 1 : 0),
442 llvm::ConstantInt::get(IntTy, 0)};
443 Builder.CreateCall(RegisterVar, Args);
Artem Belevich52cc4872015-05-07 19:34:16 +0000444 }
445
446 Builder.CreateRetVoid();
447 return RegisterKernelsFunc;
448}
449
450/// Creates a global constructor function for the module:
Yaxun Liuf99752b2018-07-20 22:45:24 +0000451///
452/// For CUDA:
Artem Belevich52cc4872015-05-07 19:34:16 +0000453/// \code
454/// void __cuda_module_ctor(void*) {
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000455/// Handle = __cudaRegisterFatBinary(GpuBinaryBlob);
456/// __cuda_register_globals(Handle);
Artem Belevich52cc4872015-05-07 19:34:16 +0000457/// }
458/// \endcode
Yaxun Liuf99752b2018-07-20 22:45:24 +0000459///
460/// For HIP:
461/// \code
462/// void __hip_module_ctor(void*) {
463/// if (__hip_gpubin_handle == 0) {
464/// __hip_gpubin_handle = __hipRegisterFatBinary(GpuBinaryBlob);
465/// __hip_register_globals(__hip_gpubin_handle);
466/// }
467/// }
468/// \endcode
Artem Belevich52cc4872015-05-07 19:34:16 +0000469llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
Yaxun Liu29155b02018-05-18 15:07:56 +0000470 bool IsHIP = CGM.getLangOpts().HIP;
Aaron Enye Shi81295212019-04-02 20:49:41 +0000471 bool IsCUDA = CGM.getLangOpts().CUDA;
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000472 // No need to generate ctors/dtors if there is no GPU binary.
Yaxun Liu29155b02018-05-18 15:07:56 +0000473 StringRef CudaGpuBinaryFileName = CGM.getCodeGenOpts().CudaGpuBinaryFileName;
474 if (CudaGpuBinaryFileName.empty() && !IsHIP)
Artem Belevich8c1ec1e2016-03-02 18:28:53 +0000475 return nullptr;
Aaron Enye Shi6d37f322019-04-02 21:54:41 +0000476 if ((IsHIP || (IsCUDA && !RelocatableDeviceCode)) && EmittedKernels.empty() &&
477 DeviceVars.empty())
Aaron Enye Shi13d8e922019-04-02 20:10:18 +0000478 return nullptr;
Artem Belevich8c1ec1e2016-03-02 18:28:53 +0000479
Yaxun Liu29155b02018-05-18 15:07:56 +0000480 // void __{cuda|hip}_register_globals(void* handle);
Artem Belevich42e19492016-03-02 18:28:50 +0000481 llvm::Function *RegisterGlobalsFunc = makeRegisterGlobalsFn();
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000482 // We always need a function to pass in as callback. Create a dummy
483 // implementation if we don't need to register anything.
484 if (RelocatableDeviceCode && !RegisterGlobalsFunc)
485 RegisterGlobalsFunc = makeDummyFunction(getRegisterGlobalsFnTy());
486
Yaxun Liu29155b02018-05-18 15:07:56 +0000487 // void ** __{cuda|hip}RegisterFatBinary(void *);
James Y Knight9871db02019-02-05 16:42:33 +0000488 llvm::FunctionCallee RegisterFatbinFunc = CGM.CreateRuntimeFunction(
Artem Belevich52cc4872015-05-07 19:34:16 +0000489 llvm::FunctionType::get(VoidPtrPtrTy, VoidPtrTy, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000490 addUnderscoredPrefixToName("RegisterFatBinary"));
Artem Belevich52cc4872015-05-07 19:34:16 +0000491 // struct { int magic, int version, void * gpu_binary, void * dont_care };
492 llvm::StructType *FatbinWrapperTy =
Serge Guelton1d993272017-05-09 19:31:30 +0000493 llvm::StructType::get(IntTy, IntTy, VoidPtrTy, VoidPtrTy);
Artem Belevich52cc4872015-05-07 19:34:16 +0000494
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000495 // Register GPU binary with the CUDA runtime, store returned handle in a
496 // global variable and save a reference in GpuBinaryHandle to be cleaned up
497 // in destructor on exit. Then associate all known kernels with the GPU binary
498 // handle so CUDA runtime can figure out what to call on the GPU side.
Yaxun Liu97670892018-10-02 17:48:54 +0000499 std::unique_ptr<llvm::MemoryBuffer> CudaGpuBinary = nullptr;
500 if (!CudaGpuBinaryFileName.empty()) {
Yaxun Liu29155b02018-05-18 15:07:56 +0000501 llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> CudaGpuBinaryOrErr =
502 llvm::MemoryBuffer::getFileOrSTDIN(CudaGpuBinaryFileName);
503 if (std::error_code EC = CudaGpuBinaryOrErr.getError()) {
504 CGM.getDiags().Report(diag::err_cannot_open_file)
505 << CudaGpuBinaryFileName << EC.message();
506 return nullptr;
507 }
508 CudaGpuBinary = std::move(CudaGpuBinaryOrErr.get());
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000509 }
510
Artem Belevich52cc4872015-05-07 19:34:16 +0000511 llvm::Function *ModuleCtorFunc = llvm::Function::Create(
512 llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000513 llvm::GlobalValue::InternalLinkage,
514 addUnderscoredPrefixToName("_module_ctor"), &TheModule);
Artem Belevich52cc4872015-05-07 19:34:16 +0000515 llvm::BasicBlock *CtorEntryBB =
516 llvm::BasicBlock::Create(Context, "entry", ModuleCtorFunc);
John McCall7f416cc2015-09-08 08:05:57 +0000517 CGBuilderTy CtorBuilder(CGM, Context);
Artem Belevich52cc4872015-05-07 19:34:16 +0000518
519 CtorBuilder.SetInsertPoint(CtorEntryBB);
520
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000521 const char *FatbinConstantName;
Yaxun Liu29155b02018-05-18 15:07:56 +0000522 const char *FatbinSectionName;
523 const char *ModuleIDSectionName;
524 StringRef ModuleIDPrefix;
525 llvm::Constant *FatBinStr;
526 unsigned FatMagic;
527 if (IsHIP) {
528 FatbinConstantName = ".hip_fatbin";
529 FatbinSectionName = ".hipFatBinSegment";
530
531 ModuleIDSectionName = "__hip_module_id";
532 ModuleIDPrefix = "__hip_";
533
Yaxun Liu97670892018-10-02 17:48:54 +0000534 if (CudaGpuBinary) {
535 // If fatbin is available from early finalization, create a string
536 // literal containing the fat binary loaded from the given file.
537 FatBinStr = makeConstantString(CudaGpuBinary->getBuffer(), "",
538 FatbinConstantName, 8);
539 } else {
540 // If fatbin is not available, create an external symbol
541 // __hip_fatbin in section .hip_fatbin. The external symbol is supposed
542 // to contain the fat binary but will be populated somewhere else,
543 // e.g. by lld through link script.
544 FatBinStr = new llvm::GlobalVariable(
Yaxun Liu29155b02018-05-18 15:07:56 +0000545 CGM.getModule(), CGM.Int8Ty,
546 /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr,
547 "__hip_fatbin", nullptr,
548 llvm::GlobalVariable::NotThreadLocal);
Yaxun Liu97670892018-10-02 17:48:54 +0000549 cast<llvm::GlobalVariable>(FatBinStr)->setSection(FatbinConstantName);
550 }
Yaxun Liu29155b02018-05-18 15:07:56 +0000551
552 FatMagic = HIPFatMagic;
553 } else {
554 if (RelocatableDeviceCode)
Artem Belevich5ce0a082018-06-28 17:15:52 +0000555 FatbinConstantName = CGM.getTriple().isMacOSX()
556 ? "__NV_CUDA,__nv_relfatbin"
557 : "__nv_relfatbin";
Yaxun Liu29155b02018-05-18 15:07:56 +0000558 else
559 FatbinConstantName =
560 CGM.getTriple().isMacOSX() ? "__NV_CUDA,__nv_fatbin" : ".nv_fatbin";
561 // NVIDIA's cuobjdump looks for fatbins in this section.
562 FatbinSectionName =
563 CGM.getTriple().isMacOSX() ? "__NV_CUDA,__fatbin" : ".nvFatBinSegment";
564
Artem Belevich5ce0a082018-06-28 17:15:52 +0000565 ModuleIDSectionName = CGM.getTriple().isMacOSX()
566 ? "__NV_CUDA,__nv_module_id"
567 : "__nv_module_id";
Yaxun Liu29155b02018-05-18 15:07:56 +0000568 ModuleIDPrefix = "__nv_";
569
570 // For CUDA, create a string literal containing the fat binary loaded from
571 // the given file.
572 FatBinStr = makeConstantString(CudaGpuBinary->getBuffer(), "",
573 FatbinConstantName, 8);
574 FatMagic = CudaFatMagic;
575 }
Artem Belevich52cc4872015-05-07 19:34:16 +0000576
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000577 // Create initialized wrapper structure that points to the loaded GPU binary
578 ConstantInitBuilder Builder(CGM);
579 auto Values = Builder.beginStruct(FatbinWrapperTy);
580 // Fatbin wrapper magic.
Yaxun Liu29155b02018-05-18 15:07:56 +0000581 Values.addInt(IntTy, FatMagic);
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000582 // Fatbin version.
583 Values.addInt(IntTy, 1);
584 // Data.
Yaxun Liu29155b02018-05-18 15:07:56 +0000585 Values.add(FatBinStr);
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000586 // Unused in fatbin v1.
587 Values.add(llvm::ConstantPointerNull::get(VoidPtrTy));
588 llvm::GlobalVariable *FatbinWrapper = Values.finishAndCreateGlobal(
Yaxun Liu887c5692018-04-25 01:10:37 +0000589 addUnderscoredPrefixToName("_fatbin_wrapper"), CGM.getPointerAlign(),
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000590 /*constant*/ true);
591 FatbinWrapper->setSection(FatbinSectionName);
Justin Lebard14fe882016-11-18 00:41:31 +0000592
Yaxun Liuf99752b2018-07-20 22:45:24 +0000593 // There is only one HIP fat binary per linked module, however there are
594 // multiple constructor functions. Make sure the fat binary is registered
595 // only once. The constructor functions are executed by the dynamic loader
596 // before the program gains control. The dynamic loader cannot execute the
597 // constructor functions concurrently since doing that would not guarantee
598 // thread safety of the loaded program. Therefore we can assume sequential
599 // execution of constructor functions here.
600 if (IsHIP) {
Yaxun Liu97670892018-10-02 17:48:54 +0000601 auto Linkage = CudaGpuBinary ? llvm::GlobalValue::InternalLinkage :
602 llvm::GlobalValue::LinkOnceAnyLinkage;
Yaxun Liuf99752b2018-07-20 22:45:24 +0000603 llvm::BasicBlock *IfBlock =
604 llvm::BasicBlock::Create(Context, "if", ModuleCtorFunc);
605 llvm::BasicBlock *ExitBlock =
606 llvm::BasicBlock::Create(Context, "exit", ModuleCtorFunc);
607 // The name, size, and initialization pattern of this variable is part
608 // of HIP ABI.
609 GpuBinaryHandle = new llvm::GlobalVariable(
610 TheModule, VoidPtrPtrTy, /*isConstant=*/false,
Yaxun Liu97670892018-10-02 17:48:54 +0000611 Linkage,
Yaxun Liuf99752b2018-07-20 22:45:24 +0000612 /*Initializer=*/llvm::ConstantPointerNull::get(VoidPtrPtrTy),
613 "__hip_gpubin_handle");
614 GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getQuantity());
Yaxun Liu94ff57f2018-08-17 17:47:31 +0000615 // Prevent the weak symbol in different shared libraries being merged.
Yaxun Liu97670892018-10-02 17:48:54 +0000616 if (Linkage != llvm::GlobalValue::InternalLinkage)
617 GpuBinaryHandle->setVisibility(llvm::GlobalValue::HiddenVisibility);
Yaxun Liuf99752b2018-07-20 22:45:24 +0000618 Address GpuBinaryAddr(
619 GpuBinaryHandle,
620 CharUnits::fromQuantity(GpuBinaryHandle->getAlignment()));
621 {
622 auto HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
623 llvm::Constant *Zero =
624 llvm::Constant::getNullValue(HandleValue->getType());
625 llvm::Value *EQZero = CtorBuilder.CreateICmpEQ(HandleValue, Zero);
626 CtorBuilder.CreateCondBr(EQZero, IfBlock, ExitBlock);
627 }
628 {
629 CtorBuilder.SetInsertPoint(IfBlock);
630 // GpuBinaryHandle = __hipRegisterFatBinary(&FatbinWrapper);
631 llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
632 RegisterFatbinFunc,
633 CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
634 CtorBuilder.CreateStore(RegisterFatbinCall, GpuBinaryAddr);
635 CtorBuilder.CreateBr(ExitBlock);
636 }
637 {
638 CtorBuilder.SetInsertPoint(ExitBlock);
639 // Call __hip_register_globals(GpuBinaryHandle);
640 if (RegisterGlobalsFunc) {
641 auto HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
642 CtorBuilder.CreateCall(RegisterGlobalsFunc, HandleValue);
643 }
644 }
645 } else if (!RelocatableDeviceCode) {
646 // Register binary with CUDA runtime. This is substantially different in
647 // default mode vs. separate compilation!
648 // GpuBinaryHandle = __cudaRegisterFatBinary(&FatbinWrapper);
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000649 llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
650 RegisterFatbinFunc,
651 CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
652 GpuBinaryHandle = new llvm::GlobalVariable(
653 TheModule, VoidPtrPtrTy, false, llvm::GlobalValue::InternalLinkage,
Yaxun Liuf99752b2018-07-20 22:45:24 +0000654 llvm::ConstantPointerNull::get(VoidPtrPtrTy), "__cuda_gpubin_handle");
655 GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getQuantity());
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000656 CtorBuilder.CreateAlignedStore(RegisterFatbinCall, GpuBinaryHandle,
657 CGM.getPointerAlign());
Artem Belevich52cc4872015-05-07 19:34:16 +0000658
Yaxun Liuf99752b2018-07-20 22:45:24 +0000659 // Call __cuda_register_globals(GpuBinaryHandle);
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000660 if (RegisterGlobalsFunc)
661 CtorBuilder.CreateCall(RegisterGlobalsFunc, RegisterFatbinCall);
Artem Belevich40717632019-02-05 22:38:58 +0000662
663 // Call __cudaRegisterFatBinaryEnd(Handle) if this CUDA version needs it.
664 if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
665 CudaFeature::CUDA_USES_FATBIN_REGISTER_END)) {
666 // void __cudaRegisterFatBinaryEnd(void **);
667 llvm::FunctionCallee RegisterFatbinEndFunc = CGM.CreateRuntimeFunction(
668 llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
669 "__cudaRegisterFatBinaryEnd");
670 CtorBuilder.CreateCall(RegisterFatbinEndFunc, RegisterFatbinCall);
671 }
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000672 } else {
673 // Generate a unique module ID.
Yaxun Liu29155b02018-05-18 15:07:56 +0000674 SmallString<64> ModuleID;
675 llvm::raw_svector_ostream OS(ModuleID);
Artem Belevich93552b32018-10-05 18:39:58 +0000676 OS << ModuleIDPrefix << llvm::format("%" PRIx64, FatbinWrapper->getGUID());
Yaxun Liu29155b02018-05-18 15:07:56 +0000677 llvm::Constant *ModuleIDConstant =
678 makeConstantString(ModuleID.str(), "", ModuleIDSectionName, 32);
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000679
Yaxun Liuf99752b2018-07-20 22:45:24 +0000680 // Create an alias for the FatbinWrapper that nvcc will look for.
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000681 llvm::GlobalAlias::create(llvm::GlobalValue::ExternalLinkage,
Yaxun Liu29155b02018-05-18 15:07:56 +0000682 Twine("__fatbinwrap") + ModuleID, FatbinWrapper);
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000683
Yaxun Liuf99752b2018-07-20 22:45:24 +0000684 // void __cudaRegisterLinkedBinary%ModuleID%(void (*)(void *), void *,
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000685 // void *, void (*)(void **))
Yaxun Liuf99752b2018-07-20 22:45:24 +0000686 SmallString<128> RegisterLinkedBinaryName("__cudaRegisterLinkedBinary");
Yaxun Liu29155b02018-05-18 15:07:56 +0000687 RegisterLinkedBinaryName += ModuleID;
James Y Knight9871db02019-02-05 16:42:33 +0000688 llvm::FunctionCallee RegisterLinkedBinaryFunc = CGM.CreateRuntimeFunction(
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000689 getRegisterLinkedBinaryFnTy(), RegisterLinkedBinaryName);
690
691 assert(RegisterGlobalsFunc && "Expecting at least dummy function!");
692 llvm::Value *Args[] = {RegisterGlobalsFunc,
693 CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy),
Yaxun Liu29155b02018-05-18 15:07:56 +0000694 ModuleIDConstant,
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000695 makeDummyFunction(getCallbackFnTy())};
696 CtorBuilder.CreateCall(RegisterLinkedBinaryFunc, Args);
697 }
Artem Belevich52cc4872015-05-07 19:34:16 +0000698
Artem Belevichc66d2542018-06-27 18:32:51 +0000699 // Create destructor and register it with atexit() the way NVCC does it. Doing
700 // it during regular destructor phase worked in CUDA before 9.2 but results in
701 // double-free in 9.2.
702 if (llvm::Function *CleanupFn = makeModuleDtorFunction()) {
703 // extern "C" int atexit(void (*f)(void));
704 llvm::FunctionType *AtExitTy =
705 llvm::FunctionType::get(IntTy, CleanupFn->getType(), false);
James Y Knight9871db02019-02-05 16:42:33 +0000706 llvm::FunctionCallee AtExitFunc =
Artem Belevichc66d2542018-06-27 18:32:51 +0000707 CGM.CreateRuntimeFunction(AtExitTy, "atexit", llvm::AttributeList(),
708 /*Local=*/true);
709 CtorBuilder.CreateCall(AtExitFunc, CleanupFn);
710 }
711
Artem Belevich52cc4872015-05-07 19:34:16 +0000712 CtorBuilder.CreateRetVoid();
713 return ModuleCtorFunc;
714}
715
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000716/// Creates a global destructor function that unregisters the GPU code blob
Artem Belevich52cc4872015-05-07 19:34:16 +0000717/// registered by constructor.
Yaxun Liuf99752b2018-07-20 22:45:24 +0000718///
719/// For CUDA:
Artem Belevich52cc4872015-05-07 19:34:16 +0000720/// \code
721/// void __cuda_module_dtor(void*) {
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000722/// __cudaUnregisterFatBinary(Handle);
Artem Belevich52cc4872015-05-07 19:34:16 +0000723/// }
724/// \endcode
Yaxun Liuf99752b2018-07-20 22:45:24 +0000725///
726/// For HIP:
727/// \code
728/// void __hip_module_dtor(void*) {
729/// if (__hip_gpubin_handle) {
730/// __hipUnregisterFatBinary(__hip_gpubin_handle);
731/// __hip_gpubin_handle = 0;
732/// }
733/// }
734/// \endcode
Artem Belevich52cc4872015-05-07 19:34:16 +0000735llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000736 // No need for destructor if we don't have a handle to unregister.
737 if (!GpuBinaryHandle)
Artem Belevich8c1ec1e2016-03-02 18:28:53 +0000738 return nullptr;
739
Artem Belevich52cc4872015-05-07 19:34:16 +0000740 // void __cudaUnregisterFatBinary(void ** handle);
James Y Knight9871db02019-02-05 16:42:33 +0000741 llvm::FunctionCallee UnregisterFatbinFunc = CGM.CreateRuntimeFunction(
Artem Belevich52cc4872015-05-07 19:34:16 +0000742 llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000743 addUnderscoredPrefixToName("UnregisterFatBinary"));
Artem Belevich52cc4872015-05-07 19:34:16 +0000744
745 llvm::Function *ModuleDtorFunc = llvm::Function::Create(
746 llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000747 llvm::GlobalValue::InternalLinkage,
748 addUnderscoredPrefixToName("_module_dtor"), &TheModule);
749
Artem Belevich52cc4872015-05-07 19:34:16 +0000750 llvm::BasicBlock *DtorEntryBB =
751 llvm::BasicBlock::Create(Context, "entry", ModuleDtorFunc);
John McCall7f416cc2015-09-08 08:05:57 +0000752 CGBuilderTy DtorBuilder(CGM, Context);
Artem Belevich52cc4872015-05-07 19:34:16 +0000753 DtorBuilder.SetInsertPoint(DtorEntryBB);
754
Yaxun Liuf99752b2018-07-20 22:45:24 +0000755 Address GpuBinaryAddr(GpuBinaryHandle, CharUnits::fromQuantity(
756 GpuBinaryHandle->getAlignment()));
757 auto HandleValue = DtorBuilder.CreateLoad(GpuBinaryAddr);
758 // There is only one HIP fat binary per linked module, however there are
759 // multiple destructor functions. Make sure the fat binary is unregistered
760 // only once.
761 if (CGM.getLangOpts().HIP) {
762 llvm::BasicBlock *IfBlock =
763 llvm::BasicBlock::Create(Context, "if", ModuleDtorFunc);
764 llvm::BasicBlock *ExitBlock =
765 llvm::BasicBlock::Create(Context, "exit", ModuleDtorFunc);
766 llvm::Constant *Zero = llvm::Constant::getNullValue(HandleValue->getType());
767 llvm::Value *NEZero = DtorBuilder.CreateICmpNE(HandleValue, Zero);
768 DtorBuilder.CreateCondBr(NEZero, IfBlock, ExitBlock);
Artem Belevich52cc4872015-05-07 19:34:16 +0000769
Yaxun Liuf99752b2018-07-20 22:45:24 +0000770 DtorBuilder.SetInsertPoint(IfBlock);
771 DtorBuilder.CreateCall(UnregisterFatbinFunc, HandleValue);
772 DtorBuilder.CreateStore(Zero, GpuBinaryAddr);
773 DtorBuilder.CreateBr(ExitBlock);
774
775 DtorBuilder.SetInsertPoint(ExitBlock);
776 } else {
777 DtorBuilder.CreateCall(UnregisterFatbinFunc, HandleValue);
778 }
Artem Belevich52cc4872015-05-07 19:34:16 +0000779 DtorBuilder.CreateRetVoid();
780 return ModuleDtorFunc;
781}
782
Peter Collingbournefe883422011-10-06 18:29:37 +0000783CGCUDARuntime *CodeGen::CreateNVCUDARuntime(CodeGenModule &CGM) {
784 return new CGNVCUDARuntime(CGM);
785}