blob: 5d8e545050d9bc928845954d29210347c9b1b5cf [file] [log] [blame]
Peter Collingbournefe883422011-10-06 18:29:37 +00001//===----- CGCUDANV.cpp - Interface to NVIDIA CUDA Runtime ----------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Peter Collingbournefe883422011-10-06 18:29:37 +00006//
7//===----------------------------------------------------------------------===//
8//
9// This provides a class for CUDA code generation targeting the NVIDIA CUDA
10// runtime library.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGCUDARuntime.h"
Peter Collingbournefa4d6032011-10-06 18:51:56 +000015#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "clang/AST/Decl.h"
Artem Belevichc62214d2019-01-31 21:34:03 +000018#include "clang/Basic/Cuda.h"
19#include "clang/CodeGen/CodeGenABITypes.h"
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +000020#include "clang/CodeGen/ConstantInitBuilder.h"
Chandler Carruthffd55512013-01-02 11:45:17 +000021#include "llvm/IR/BasicBlock.h"
22#include "llvm/IR/Constants.h"
23#include "llvm/IR/DerivedTypes.h"
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +000024#include "llvm/Support/Format.h"
Peter Collingbournefe883422011-10-06 18:29:37 +000025
26using namespace clang;
27using namespace CodeGen;
28
29namespace {
Yaxun Liu29155b02018-05-18 15:07:56 +000030constexpr unsigned CudaFatMagic = 0x466243b1;
31constexpr unsigned HIPFatMagic = 0x48495046; // "HIPF"
Peter Collingbournefe883422011-10-06 18:29:37 +000032
33class CGNVCUDARuntime : public CGCUDARuntime {
Peter Collingbournefa4d6032011-10-06 18:51:56 +000034
35private:
John McCall6c9f1fdb2016-11-19 08:17:24 +000036 llvm::IntegerType *IntTy, *SizeTy;
37 llvm::Type *VoidTy;
Artem Belevich52cc4872015-05-07 19:34:16 +000038 llvm::PointerType *CharPtrTy, *VoidPtrTy, *VoidPtrPtrTy;
39
40 /// Convenience reference to LLVM Context
41 llvm::LLVMContext &Context;
42 /// Convenience reference to the current module
43 llvm::Module &TheModule;
44 /// Keeps track of kernel launch stubs emitted in this module
Yaxun Liuc18e9ec2019-02-14 02:00:09 +000045 struct KernelInfo {
46 llvm::Function *Kernel;
47 const Decl *D;
48 };
49 llvm::SmallVector<KernelInfo, 16> EmittedKernels;
50 struct VarInfo {
51 llvm::GlobalVariable *Var;
52 const VarDecl *D;
53 unsigned Flag;
54 };
55 llvm::SmallVector<VarInfo, 16> DeviceVars;
Jonas Hahnfelde7681322018-02-28 17:53:46 +000056 /// Keeps track of variable containing handle of GPU binary. Populated by
Artem Belevich52cc4872015-05-07 19:34:16 +000057 /// ModuleCtorFunction() and used to create corresponding cleanup calls in
58 /// ModuleDtorFunction()
Jonas Hahnfelde7681322018-02-28 17:53:46 +000059 llvm::GlobalVariable *GpuBinaryHandle = nullptr;
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +000060 /// Whether we generate relocatable device code.
61 bool RelocatableDeviceCode;
Yaxun Liuc18e9ec2019-02-14 02:00:09 +000062 /// Mangle context for device.
63 std::unique_ptr<MangleContext> DeviceMC;
Peter Collingbournefa4d6032011-10-06 18:51:56 +000064
James Y Knight9871db02019-02-05 16:42:33 +000065 llvm::FunctionCallee getSetupArgumentFn() const;
66 llvm::FunctionCallee getLaunchFn() const;
Peter Collingbournefa4d6032011-10-06 18:51:56 +000067
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +000068 llvm::FunctionType *getRegisterGlobalsFnTy() const;
69 llvm::FunctionType *getCallbackFnTy() const;
70 llvm::FunctionType *getRegisterLinkedBinaryFnTy() const;
Yaxun Liu887c5692018-04-25 01:10:37 +000071 std::string addPrefixToName(StringRef FuncName) const;
72 std::string addUnderscoredPrefixToName(StringRef FuncName) const;
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +000073
Artem Belevich52cc4872015-05-07 19:34:16 +000074 /// Creates a function to register all kernel stubs generated in this module.
Artem Belevich42e19492016-03-02 18:28:50 +000075 llvm::Function *makeRegisterGlobalsFn();
Artem Belevich52cc4872015-05-07 19:34:16 +000076
77 /// Helper function that generates a constant string and returns a pointer to
78 /// the start of the string. The result of this function can be used anywhere
79 /// where the C code specifies const char*.
80 llvm::Constant *makeConstantString(const std::string &Str,
81 const std::string &Name = "",
Artem Belevich4c093182016-08-12 18:44:01 +000082 const std::string &SectionName = "",
Artem Belevich52cc4872015-05-07 19:34:16 +000083 unsigned Alignment = 0) {
84 llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
85 llvm::ConstantInt::get(SizeTy, 0)};
John McCall7f416cc2015-09-08 08:05:57 +000086 auto ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
Artem Belevich4c093182016-08-12 18:44:01 +000087 llvm::GlobalVariable *GV =
88 cast<llvm::GlobalVariable>(ConstStr.getPointer());
Jonas Hahnfeld3b9cbba92018-06-08 11:17:08 +000089 if (!SectionName.empty()) {
Artem Belevich4c093182016-08-12 18:44:01 +000090 GV->setSection(SectionName);
Jonas Hahnfeld3b9cbba92018-06-08 11:17:08 +000091 // Mark the address as used which make sure that this section isn't
92 // merged and we will really have it in the object file.
93 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::None);
94 }
Artem Belevich4c093182016-08-12 18:44:01 +000095 if (Alignment)
Guillaume Chateletc79099e2019-10-03 13:00:29 +000096 GV->setAlignment(llvm::Align(Alignment));
Artem Belevich4c093182016-08-12 18:44:01 +000097
John McCall7f416cc2015-09-08 08:05:57 +000098 return llvm::ConstantExpr::getGetElementPtr(ConstStr.getElementType(),
99 ConstStr.getPointer(), Zeros);
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000100 }
101
102 /// Helper function that generates an empty dummy function returning void.
103 llvm::Function *makeDummyFunction(llvm::FunctionType *FnTy) {
104 assert(FnTy->getReturnType()->isVoidTy() &&
105 "Can only generate dummy functions returning void!");
106 llvm::Function *DummyFunc = llvm::Function::Create(
107 FnTy, llvm::GlobalValue::InternalLinkage, "dummy", &TheModule);
108
109 llvm::BasicBlock *DummyBlock =
110 llvm::BasicBlock::Create(Context, "", DummyFunc);
111 CGBuilderTy FuncBuilder(CGM, Context);
112 FuncBuilder.SetInsertPoint(DummyBlock);
113 FuncBuilder.CreateRetVoid();
114
115 return DummyFunc;
116 }
Artem Belevich52cc4872015-05-07 19:34:16 +0000117
Artem Belevichc62214d2019-01-31 21:34:03 +0000118 void emitDeviceStubBodyLegacy(CodeGenFunction &CGF, FunctionArgList &Args);
119 void emitDeviceStubBodyNew(CodeGenFunction &CGF, FunctionArgList &Args);
Yaxun (Sam) Liu22c457a2020-03-05 12:59:33 -0500120 std::string getDeviceSideName(const NamedDecl *ND) override;
Artem Belevich52cc4872015-05-07 19:34:16 +0000121
Peter Collingbournefe883422011-10-06 18:29:37 +0000122public:
123 CGNVCUDARuntime(CodeGenModule &CGM);
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000124
Artem Belevich52cc4872015-05-07 19:34:16 +0000125 void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) override;
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000126 void registerDeviceVar(const VarDecl *VD, llvm::GlobalVariable &Var,
127 unsigned Flags) override {
128 DeviceVars.push_back({&Var, VD, Flags});
Artem Belevich42e19492016-03-02 18:28:50 +0000129 }
130
Artem Belevich52cc4872015-05-07 19:34:16 +0000131 /// Creates module constructor function
132 llvm::Function *makeModuleCtorFunction() override;
133 /// Creates module destructor function
134 llvm::Function *makeModuleDtorFunction() override;
Peter Collingbournefe883422011-10-06 18:29:37 +0000135};
136
Alexander Kornienkoab9db512015-06-22 23:07:51 +0000137}
Peter Collingbournefe883422011-10-06 18:29:37 +0000138
Yaxun Liu887c5692018-04-25 01:10:37 +0000139std::string CGNVCUDARuntime::addPrefixToName(StringRef FuncName) const {
140 if (CGM.getLangOpts().HIP)
141 return ((Twine("hip") + Twine(FuncName)).str());
142 return ((Twine("cuda") + Twine(FuncName)).str());
143}
144std::string
145CGNVCUDARuntime::addUnderscoredPrefixToName(StringRef FuncName) const {
146 if (CGM.getLangOpts().HIP)
147 return ((Twine("__hip") + Twine(FuncName)).str());
148 return ((Twine("__cuda") + Twine(FuncName)).str());
149}
150
Artem Belevich52cc4872015-05-07 19:34:16 +0000151CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
152 : CGCUDARuntime(CGM), Context(CGM.getLLVMContext()),
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000153 TheModule(CGM.getModule()),
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000154 RelocatableDeviceCode(CGM.getLangOpts().GPURelocatableDeviceCode),
155 DeviceMC(CGM.getContext().createMangleContext(
156 CGM.getContext().getAuxTargetInfo())) {
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000157 CodeGen::CodeGenTypes &Types = CGM.getTypes();
158 ASTContext &Ctx = CGM.getContext();
159
John McCall6c9f1fdb2016-11-19 08:17:24 +0000160 IntTy = CGM.IntTy;
161 SizeTy = CGM.SizeTy;
162 VoidTy = CGM.VoidTy;
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000163
164 CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy));
165 VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy));
Artem Belevich52cc4872015-05-07 19:34:16 +0000166 VoidPtrPtrTy = VoidPtrTy->getPointerTo();
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000167}
168
James Y Knight9871db02019-02-05 16:42:33 +0000169llvm::FunctionCallee CGNVCUDARuntime::getSetupArgumentFn() const {
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000170 // cudaError_t cudaSetupArgument(void *, size_t, size_t)
Benjamin Kramer30934732016-07-02 11:41:41 +0000171 llvm::Type *Params[] = {VoidPtrTy, SizeTy, SizeTy};
Yaxun Liu887c5692018-04-25 01:10:37 +0000172 return CGM.CreateRuntimeFunction(
173 llvm::FunctionType::get(IntTy, Params, false),
174 addPrefixToName("SetupArgument"));
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000175}
176
James Y Knight9871db02019-02-05 16:42:33 +0000177llvm::FunctionCallee CGNVCUDARuntime::getLaunchFn() const {
Yaxun Liu887c5692018-04-25 01:10:37 +0000178 if (CGM.getLangOpts().HIP) {
179 // hipError_t hipLaunchByPtr(char *);
180 return CGM.CreateRuntimeFunction(
181 llvm::FunctionType::get(IntTy, CharPtrTy, false), "hipLaunchByPtr");
182 } else {
183 // cudaError_t cudaLaunch(char *);
184 return CGM.CreateRuntimeFunction(
185 llvm::FunctionType::get(IntTy, CharPtrTy, false), "cudaLaunch");
186 }
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000187}
188
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000189llvm::FunctionType *CGNVCUDARuntime::getRegisterGlobalsFnTy() const {
190 return llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false);
191}
192
193llvm::FunctionType *CGNVCUDARuntime::getCallbackFnTy() const {
194 return llvm::FunctionType::get(VoidTy, VoidPtrTy, false);
195}
196
197llvm::FunctionType *CGNVCUDARuntime::getRegisterLinkedBinaryFnTy() const {
198 auto CallbackFnTy = getCallbackFnTy();
199 auto RegisterGlobalsFnTy = getRegisterGlobalsFnTy();
200 llvm::Type *Params[] = {RegisterGlobalsFnTy->getPointerTo(), VoidPtrTy,
201 VoidPtrTy, CallbackFnTy->getPointerTo()};
202 return llvm::FunctionType::get(VoidTy, Params, false);
203}
204
Yaxun (Sam) Liu22c457a2020-03-05 12:59:33 -0500205std::string CGNVCUDARuntime::getDeviceSideName(const NamedDecl *ND) {
206 GlobalDecl GD;
207 // D could be either a kernel or a variable.
208 if (auto *FD = dyn_cast<FunctionDecl>(ND))
209 GD = GlobalDecl(FD, KernelReferenceKind::Kernel);
210 else
211 GD = GlobalDecl(ND);
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000212 std::string DeviceSideName;
213 if (DeviceMC->shouldMangleDeclName(ND)) {
214 SmallString<256> Buffer;
215 llvm::raw_svector_ostream Out(Buffer);
Yaxun (Sam) Liu22c457a2020-03-05 12:59:33 -0500216 DeviceMC->mangleName(GD, Out);
Benjamin Krameradcd0262020-01-28 20:23:46 +0100217 DeviceSideName = std::string(Out.str());
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000218 } else
Benjamin Krameradcd0262020-01-28 20:23:46 +0100219 DeviceSideName = std::string(ND->getIdentifier()->getName());
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000220 return DeviceSideName;
221}
222
Artem Belevich52cc4872015-05-07 19:34:16 +0000223void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
224 FunctionArgList &Args) {
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000225 EmittedKernels.push_back({CGF.CurFn, CGF.CurFuncDecl});
Artem Belevichc62214d2019-01-31 21:34:03 +0000226 if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
Yaxun Liu12828892019-09-24 19:16:40 +0000227 CudaFeature::CUDA_USES_NEW_LAUNCH) ||
228 CGF.getLangOpts().HIPUseNewLaunchAPI)
Artem Belevichc62214d2019-01-31 21:34:03 +0000229 emitDeviceStubBodyNew(CGF, Args);
230 else
231 emitDeviceStubBodyLegacy(CGF, Args);
Artem Belevich52cc4872015-05-07 19:34:16 +0000232}
233
Artem Belevichc62214d2019-01-31 21:34:03 +0000234// CUDA 9.0+ uses new way to launch kernels. Parameters are packed in a local
235// array and kernels are launched using cudaLaunchKernel().
236void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
237 FunctionArgList &Args) {
238 // Build the shadow stack entry at the very start of the function.
239
240 // Calculate amount of space we will need for all arguments. If we have no
241 // args, allocate a single pointer so we still have a valid pointer to the
242 // argument array that we can pass to runtime, even if it will be unused.
243 Address KernelArgs = CGF.CreateTempAlloca(
244 VoidPtrTy, CharUnits::fromQuantity(16), "kernel_args",
245 llvm::ConstantInt::get(SizeTy, std::max<size_t>(1, Args.size())));
246 // Store pointers to the arguments in a locally allocated launch_args.
247 for (unsigned i = 0; i < Args.size(); ++i) {
248 llvm::Value* VarPtr = CGF.GetAddrOfLocalVar(Args[i]).getPointer();
249 llvm::Value *VoidVarPtr = CGF.Builder.CreatePointerCast(VarPtr, VoidPtrTy);
250 CGF.Builder.CreateDefaultAlignedStore(
251 VoidVarPtr, CGF.Builder.CreateConstGEP1_32(KernelArgs.getPointer(), i));
252 }
253
254 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
255
Yaxun Liu12828892019-09-24 19:16:40 +0000256 // Lookup cudaLaunchKernel/hipLaunchKernel function.
Artem Belevichc62214d2019-01-31 21:34:03 +0000257 // cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
258 // void **args, size_t sharedMem,
259 // cudaStream_t stream);
Yaxun Liu12828892019-09-24 19:16:40 +0000260 // hipError_t hipLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
261 // void **args, size_t sharedMem,
262 // hipStream_t stream);
Artem Belevichc62214d2019-01-31 21:34:03 +0000263 TranslationUnitDecl *TUDecl = CGM.getContext().getTranslationUnitDecl();
264 DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
Yaxun Liu12828892019-09-24 19:16:40 +0000265 auto LaunchKernelName = addPrefixToName("LaunchKernel");
Artem Belevichc62214d2019-01-31 21:34:03 +0000266 IdentifierInfo &cudaLaunchKernelII =
Yaxun Liu12828892019-09-24 19:16:40 +0000267 CGM.getContext().Idents.get(LaunchKernelName);
Artem Belevichc62214d2019-01-31 21:34:03 +0000268 FunctionDecl *cudaLaunchKernelFD = nullptr;
269 for (const auto &Result : DC->lookup(&cudaLaunchKernelII)) {
270 if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Result))
271 cudaLaunchKernelFD = FD;
272 }
273
274 if (cudaLaunchKernelFD == nullptr) {
275 CGM.Error(CGF.CurFuncDecl->getLocation(),
Yaxun Liu12828892019-09-24 19:16:40 +0000276 "Can't find declaration for " + LaunchKernelName);
Artem Belevichc62214d2019-01-31 21:34:03 +0000277 return;
278 }
279 // Create temporary dim3 grid_dim, block_dim.
280 ParmVarDecl *GridDimParam = cudaLaunchKernelFD->getParamDecl(1);
281 QualType Dim3Ty = GridDimParam->getType();
282 Address GridDim =
283 CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "grid_dim");
284 Address BlockDim =
285 CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "block_dim");
286 Address ShmemSize =
287 CGF.CreateTempAlloca(SizeTy, CGM.getSizeAlign(), "shmem_size");
288 Address Stream =
289 CGF.CreateTempAlloca(VoidPtrTy, CGM.getPointerAlign(), "stream");
James Y Knight9871db02019-02-05 16:42:33 +0000290 llvm::FunctionCallee cudaPopConfigFn = CGM.CreateRuntimeFunction(
Artem Belevichc62214d2019-01-31 21:34:03 +0000291 llvm::FunctionType::get(IntTy,
292 {/*gridDim=*/GridDim.getType(),
293 /*blockDim=*/BlockDim.getType(),
294 /*ShmemSize=*/ShmemSize.getType(),
295 /*Stream=*/Stream.getType()},
296 /*isVarArg=*/false),
Yaxun Liu12828892019-09-24 19:16:40 +0000297 addUnderscoredPrefixToName("PopCallConfiguration"));
Artem Belevichc62214d2019-01-31 21:34:03 +0000298
299 CGF.EmitRuntimeCallOrInvoke(cudaPopConfigFn,
300 {GridDim.getPointer(), BlockDim.getPointer(),
301 ShmemSize.getPointer(), Stream.getPointer()});
302
303 // Emit the call to cudaLaunch
304 llvm::Value *Kernel = CGF.Builder.CreatePointerCast(CGF.CurFn, VoidPtrTy);
305 CallArgList LaunchKernelArgs;
306 LaunchKernelArgs.add(RValue::get(Kernel),
307 cudaLaunchKernelFD->getParamDecl(0)->getType());
308 LaunchKernelArgs.add(RValue::getAggregate(GridDim), Dim3Ty);
309 LaunchKernelArgs.add(RValue::getAggregate(BlockDim), Dim3Ty);
310 LaunchKernelArgs.add(RValue::get(KernelArgs.getPointer()),
311 cudaLaunchKernelFD->getParamDecl(3)->getType());
312 LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(ShmemSize)),
313 cudaLaunchKernelFD->getParamDecl(4)->getType());
314 LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(Stream)),
315 cudaLaunchKernelFD->getParamDecl(5)->getType());
316
317 QualType QT = cudaLaunchKernelFD->getType();
318 QualType CQT = QT.getCanonicalType();
James Y Knight916db652019-02-02 01:48:23 +0000319 llvm::Type *Ty = CGM.getTypes().ConvertType(CQT);
Artem Belevichc62214d2019-01-31 21:34:03 +0000320 llvm::FunctionType *FTy = dyn_cast<llvm::FunctionType>(Ty);
321
322 const CGFunctionInfo &FI =
323 CGM.getTypes().arrangeFunctionDeclaration(cudaLaunchKernelFD);
James Y Knight9871db02019-02-05 16:42:33 +0000324 llvm::FunctionCallee cudaLaunchKernelFn =
Yaxun Liu12828892019-09-24 19:16:40 +0000325 CGM.CreateRuntimeFunction(FTy, LaunchKernelName);
Artem Belevichc62214d2019-01-31 21:34:03 +0000326 CGF.EmitCall(FI, CGCallee::forDirect(cudaLaunchKernelFn), ReturnValueSlot(),
327 LaunchKernelArgs);
328 CGF.EmitBranch(EndBlock);
329
330 CGF.EmitBlock(EndBlock);
331}
332
333void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF,
334 FunctionArgList &Args) {
Justin Lebare56360a2016-07-27 22:36:21 +0000335 // Emit a call to cudaSetupArgument for each arg in Args.
James Y Knight9871db02019-02-05 16:42:33 +0000336 llvm::FunctionCallee cudaSetupArgFn = getSetupArgumentFn();
Justin Lebare56360a2016-07-27 22:36:21 +0000337 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
338 CharUnits Offset = CharUnits::Zero();
339 for (const VarDecl *A : Args) {
340 CharUnits TyWidth, TyAlign;
341 std::tie(TyWidth, TyAlign) =
342 CGM.getContext().getTypeInfoInChars(A->getType());
343 Offset = Offset.alignTo(TyAlign);
344 llvm::Value *Args[] = {
345 CGF.Builder.CreatePointerCast(CGF.GetAddrOfLocalVar(A).getPointer(),
346 VoidPtrTy),
347 llvm::ConstantInt::get(SizeTy, TyWidth.getQuantity()),
348 llvm::ConstantInt::get(SizeTy, Offset.getQuantity()),
349 };
James Y Knight3933add2019-01-30 02:54:28 +0000350 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(cudaSetupArgFn, Args);
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000351 llvm::Constant *Zero = llvm::ConstantInt::get(IntTy, 0);
James Y Knight3933add2019-01-30 02:54:28 +0000352 llvm::Value *CBZero = CGF.Builder.CreateICmpEQ(CB, Zero);
Justin Lebare56360a2016-07-27 22:36:21 +0000353 llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
James Y Knight3933add2019-01-30 02:54:28 +0000354 CGF.Builder.CreateCondBr(CBZero, NextBlock, EndBlock);
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000355 CGF.EmitBlock(NextBlock);
Justin Lebare56360a2016-07-27 22:36:21 +0000356 Offset += TyWidth;
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000357 }
358
359 // Emit the call to cudaLaunch
James Y Knight9871db02019-02-05 16:42:33 +0000360 llvm::FunctionCallee cudaLaunchFn = getLaunchFn();
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000361 llvm::Value *Arg = CGF.Builder.CreatePointerCast(CGF.CurFn, CharPtrTy);
John McCall882987f2013-02-28 19:01:20 +0000362 CGF.EmitRuntimeCallOrInvoke(cudaLaunchFn, Arg);
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000363 CGF.EmitBranch(EndBlock);
364
365 CGF.EmitBlock(EndBlock);
Peter Collingbournefe883422011-10-06 18:29:37 +0000366}
367
Artem Belevich42e19492016-03-02 18:28:50 +0000368/// Creates a function that sets up state on the host side for CUDA objects that
369/// have a presence on both the host and device sides. Specifically, registers
370/// the host side of kernel functions and device global variables with the CUDA
371/// runtime.
Artem Belevich52cc4872015-05-07 19:34:16 +0000372/// \code
Artem Belevich42e19492016-03-02 18:28:50 +0000373/// void __cuda_register_globals(void** GpuBinaryHandle) {
Artem Belevich52cc4872015-05-07 19:34:16 +0000374/// __cudaRegisterFunction(GpuBinaryHandle,Kernel0,...);
375/// ...
376/// __cudaRegisterFunction(GpuBinaryHandle,KernelM,...);
Artem Belevich42e19492016-03-02 18:28:50 +0000377/// __cudaRegisterVar(GpuBinaryHandle, GlobalVar0, ...);
378/// ...
379/// __cudaRegisterVar(GpuBinaryHandle, GlobalVarN, ...);
Artem Belevich52cc4872015-05-07 19:34:16 +0000380/// }
381/// \endcode
Artem Belevich42e19492016-03-02 18:28:50 +0000382llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
Artem Belevich8c1ec1e2016-03-02 18:28:53 +0000383 // No need to register anything
384 if (EmittedKernels.empty() && DeviceVars.empty())
385 return nullptr;
386
Artem Belevich52cc4872015-05-07 19:34:16 +0000387 llvm::Function *RegisterKernelsFunc = llvm::Function::Create(
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000388 getRegisterGlobalsFnTy(), llvm::GlobalValue::InternalLinkage,
Yaxun Liu887c5692018-04-25 01:10:37 +0000389 addUnderscoredPrefixToName("_register_globals"), &TheModule);
Artem Belevich52cc4872015-05-07 19:34:16 +0000390 llvm::BasicBlock *EntryBB =
391 llvm::BasicBlock::Create(Context, "entry", RegisterKernelsFunc);
John McCall7f416cc2015-09-08 08:05:57 +0000392 CGBuilderTy Builder(CGM, Context);
Artem Belevich52cc4872015-05-07 19:34:16 +0000393 Builder.SetInsertPoint(EntryBB);
394
395 // void __cudaRegisterFunction(void **, const char *, char *, const char *,
396 // int, uint3*, uint3*, dim3*, dim3*, int*)
Benjamin Kramer6d1c10b2016-07-02 12:03:57 +0000397 llvm::Type *RegisterFuncParams[] = {
Artem Belevich52cc4872015-05-07 19:34:16 +0000398 VoidPtrPtrTy, CharPtrTy, CharPtrTy, CharPtrTy, IntTy,
399 VoidPtrTy, VoidPtrTy, VoidPtrTy, VoidPtrTy, IntTy->getPointerTo()};
James Y Knight9871db02019-02-05 16:42:33 +0000400 llvm::FunctionCallee RegisterFunc = CGM.CreateRuntimeFunction(
Artem Belevich52cc4872015-05-07 19:34:16 +0000401 llvm::FunctionType::get(IntTy, RegisterFuncParams, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000402 addUnderscoredPrefixToName("RegisterFunction"));
Artem Belevich52cc4872015-05-07 19:34:16 +0000403
404 // Extract GpuBinaryHandle passed as the first argument passed to
Artem Belevich42e19492016-03-02 18:28:50 +0000405 // __cuda_register_globals() and generate __cudaRegisterFunction() call for
Artem Belevich52cc4872015-05-07 19:34:16 +0000406 // each emitted kernel.
407 llvm::Argument &GpuBinaryHandlePtr = *RegisterKernelsFunc->arg_begin();
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000408 for (auto &&I : EmittedKernels) {
Yaxun (Sam) Liu22c457a2020-03-05 12:59:33 -0500409 llvm::Constant *KernelName =
410 makeConstantString(getDeviceSideName(cast<NamedDecl>(I.D)));
Artem Belevich52cc4872015-05-07 19:34:16 +0000411 llvm::Constant *NullPtr = llvm::ConstantPointerNull::get(VoidPtrTy);
Artem Belevich42e19492016-03-02 18:28:50 +0000412 llvm::Value *Args[] = {
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000413 &GpuBinaryHandlePtr,
414 Builder.CreateBitCast(I.Kernel, VoidPtrTy),
415 KernelName,
416 KernelName,
417 llvm::ConstantInt::get(IntTy, -1),
418 NullPtr,
419 NullPtr,
420 NullPtr,
421 NullPtr,
Artem Belevich52cc4872015-05-07 19:34:16 +0000422 llvm::ConstantPointerNull::get(IntTy->getPointerTo())};
Artem Belevich42e19492016-03-02 18:28:50 +0000423 Builder.CreateCall(RegisterFunc, Args);
424 }
425
426 // void __cudaRegisterVar(void **, char *, char *, const char *,
427 // int, int, int, int)
Benjamin Kramer6d1c10b2016-07-02 12:03:57 +0000428 llvm::Type *RegisterVarParams[] = {VoidPtrPtrTy, CharPtrTy, CharPtrTy,
429 CharPtrTy, IntTy, IntTy,
430 IntTy, IntTy};
James Y Knight9871db02019-02-05 16:42:33 +0000431 llvm::FunctionCallee RegisterVar = CGM.CreateRuntimeFunction(
Artem Belevich42e19492016-03-02 18:28:50 +0000432 llvm::FunctionType::get(IntTy, RegisterVarParams, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000433 addUnderscoredPrefixToName("RegisterVar"));
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000434 for (auto &&Info : DeviceVars) {
435 llvm::GlobalVariable *Var = Info.Var;
436 unsigned Flags = Info.Flag;
437 llvm::Constant *VarName = makeConstantString(getDeviceSideName(Info.D));
Artem Belevich42e19492016-03-02 18:28:50 +0000438 uint64_t VarSize =
439 CGM.getDataLayout().getTypeAllocSize(Var->getValueType());
440 llvm::Value *Args[] = {
441 &GpuBinaryHandlePtr,
442 Builder.CreateBitCast(Var, VoidPtrTy),
443 VarName,
444 VarName,
445 llvm::ConstantInt::get(IntTy, (Flags & ExternDeviceVar) ? 1 : 0),
446 llvm::ConstantInt::get(IntTy, VarSize),
447 llvm::ConstantInt::get(IntTy, (Flags & ConstantDeviceVar) ? 1 : 0),
448 llvm::ConstantInt::get(IntTy, 0)};
449 Builder.CreateCall(RegisterVar, Args);
Artem Belevich52cc4872015-05-07 19:34:16 +0000450 }
451
452 Builder.CreateRetVoid();
453 return RegisterKernelsFunc;
454}
455
456/// Creates a global constructor function for the module:
Yaxun Liuf99752b2018-07-20 22:45:24 +0000457///
458/// For CUDA:
Artem Belevich52cc4872015-05-07 19:34:16 +0000459/// \code
460/// void __cuda_module_ctor(void*) {
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000461/// Handle = __cudaRegisterFatBinary(GpuBinaryBlob);
462/// __cuda_register_globals(Handle);
Artem Belevich52cc4872015-05-07 19:34:16 +0000463/// }
464/// \endcode
Yaxun Liuf99752b2018-07-20 22:45:24 +0000465///
466/// For HIP:
467/// \code
468/// void __hip_module_ctor(void*) {
469/// if (__hip_gpubin_handle == 0) {
470/// __hip_gpubin_handle = __hipRegisterFatBinary(GpuBinaryBlob);
471/// __hip_register_globals(__hip_gpubin_handle);
472/// }
473/// }
474/// \endcode
Artem Belevich52cc4872015-05-07 19:34:16 +0000475llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
Yaxun Liu29155b02018-05-18 15:07:56 +0000476 bool IsHIP = CGM.getLangOpts().HIP;
Aaron Enye Shi81295212019-04-02 20:49:41 +0000477 bool IsCUDA = CGM.getLangOpts().CUDA;
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000478 // No need to generate ctors/dtors if there is no GPU binary.
Yaxun Liu29155b02018-05-18 15:07:56 +0000479 StringRef CudaGpuBinaryFileName = CGM.getCodeGenOpts().CudaGpuBinaryFileName;
480 if (CudaGpuBinaryFileName.empty() && !IsHIP)
Artem Belevich8c1ec1e2016-03-02 18:28:53 +0000481 return nullptr;
Aaron Enye Shi6d37f322019-04-02 21:54:41 +0000482 if ((IsHIP || (IsCUDA && !RelocatableDeviceCode)) && EmittedKernels.empty() &&
483 DeviceVars.empty())
Aaron Enye Shi13d8e922019-04-02 20:10:18 +0000484 return nullptr;
Artem Belevich8c1ec1e2016-03-02 18:28:53 +0000485
Yaxun Liu29155b02018-05-18 15:07:56 +0000486 // void __{cuda|hip}_register_globals(void* handle);
Artem Belevich42e19492016-03-02 18:28:50 +0000487 llvm::Function *RegisterGlobalsFunc = makeRegisterGlobalsFn();
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000488 // We always need a function to pass in as callback. Create a dummy
489 // implementation if we don't need to register anything.
490 if (RelocatableDeviceCode && !RegisterGlobalsFunc)
491 RegisterGlobalsFunc = makeDummyFunction(getRegisterGlobalsFnTy());
492
Yaxun Liu29155b02018-05-18 15:07:56 +0000493 // void ** __{cuda|hip}RegisterFatBinary(void *);
James Y Knight9871db02019-02-05 16:42:33 +0000494 llvm::FunctionCallee RegisterFatbinFunc = CGM.CreateRuntimeFunction(
Artem Belevich52cc4872015-05-07 19:34:16 +0000495 llvm::FunctionType::get(VoidPtrPtrTy, VoidPtrTy, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000496 addUnderscoredPrefixToName("RegisterFatBinary"));
Artem Belevich52cc4872015-05-07 19:34:16 +0000497 // struct { int magic, int version, void * gpu_binary, void * dont_care };
498 llvm::StructType *FatbinWrapperTy =
Serge Guelton1d993272017-05-09 19:31:30 +0000499 llvm::StructType::get(IntTy, IntTy, VoidPtrTy, VoidPtrTy);
Artem Belevich52cc4872015-05-07 19:34:16 +0000500
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000501 // Register GPU binary with the CUDA runtime, store returned handle in a
502 // global variable and save a reference in GpuBinaryHandle to be cleaned up
503 // in destructor on exit. Then associate all known kernels with the GPU binary
504 // handle so CUDA runtime can figure out what to call on the GPU side.
Yaxun Liu97670892018-10-02 17:48:54 +0000505 std::unique_ptr<llvm::MemoryBuffer> CudaGpuBinary = nullptr;
506 if (!CudaGpuBinaryFileName.empty()) {
Yaxun Liu29155b02018-05-18 15:07:56 +0000507 llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> CudaGpuBinaryOrErr =
508 llvm::MemoryBuffer::getFileOrSTDIN(CudaGpuBinaryFileName);
509 if (std::error_code EC = CudaGpuBinaryOrErr.getError()) {
510 CGM.getDiags().Report(diag::err_cannot_open_file)
511 << CudaGpuBinaryFileName << EC.message();
512 return nullptr;
513 }
514 CudaGpuBinary = std::move(CudaGpuBinaryOrErr.get());
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000515 }
516
Artem Belevich52cc4872015-05-07 19:34:16 +0000517 llvm::Function *ModuleCtorFunc = llvm::Function::Create(
518 llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000519 llvm::GlobalValue::InternalLinkage,
520 addUnderscoredPrefixToName("_module_ctor"), &TheModule);
Artem Belevich52cc4872015-05-07 19:34:16 +0000521 llvm::BasicBlock *CtorEntryBB =
522 llvm::BasicBlock::Create(Context, "entry", ModuleCtorFunc);
John McCall7f416cc2015-09-08 08:05:57 +0000523 CGBuilderTy CtorBuilder(CGM, Context);
Artem Belevich52cc4872015-05-07 19:34:16 +0000524
525 CtorBuilder.SetInsertPoint(CtorEntryBB);
526
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000527 const char *FatbinConstantName;
Yaxun Liu29155b02018-05-18 15:07:56 +0000528 const char *FatbinSectionName;
529 const char *ModuleIDSectionName;
530 StringRef ModuleIDPrefix;
531 llvm::Constant *FatBinStr;
532 unsigned FatMagic;
533 if (IsHIP) {
534 FatbinConstantName = ".hip_fatbin";
535 FatbinSectionName = ".hipFatBinSegment";
536
537 ModuleIDSectionName = "__hip_module_id";
538 ModuleIDPrefix = "__hip_";
539
Yaxun Liu97670892018-10-02 17:48:54 +0000540 if (CudaGpuBinary) {
541 // If fatbin is available from early finalization, create a string
542 // literal containing the fat binary loaded from the given file.
Benjamin Krameradcd0262020-01-28 20:23:46 +0100543 FatBinStr = makeConstantString(std::string(CudaGpuBinary->getBuffer()),
544 "", FatbinConstantName, 8);
Yaxun Liu97670892018-10-02 17:48:54 +0000545 } else {
546 // If fatbin is not available, create an external symbol
547 // __hip_fatbin in section .hip_fatbin. The external symbol is supposed
548 // to contain the fat binary but will be populated somewhere else,
549 // e.g. by lld through link script.
550 FatBinStr = new llvm::GlobalVariable(
Yaxun Liu29155b02018-05-18 15:07:56 +0000551 CGM.getModule(), CGM.Int8Ty,
552 /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr,
553 "__hip_fatbin", nullptr,
554 llvm::GlobalVariable::NotThreadLocal);
Yaxun Liu97670892018-10-02 17:48:54 +0000555 cast<llvm::GlobalVariable>(FatBinStr)->setSection(FatbinConstantName);
556 }
Yaxun Liu29155b02018-05-18 15:07:56 +0000557
558 FatMagic = HIPFatMagic;
559 } else {
560 if (RelocatableDeviceCode)
Artem Belevich5ce0a082018-06-28 17:15:52 +0000561 FatbinConstantName = CGM.getTriple().isMacOSX()
562 ? "__NV_CUDA,__nv_relfatbin"
563 : "__nv_relfatbin";
Yaxun Liu29155b02018-05-18 15:07:56 +0000564 else
565 FatbinConstantName =
566 CGM.getTriple().isMacOSX() ? "__NV_CUDA,__nv_fatbin" : ".nv_fatbin";
567 // NVIDIA's cuobjdump looks for fatbins in this section.
568 FatbinSectionName =
569 CGM.getTriple().isMacOSX() ? "__NV_CUDA,__fatbin" : ".nvFatBinSegment";
570
Artem Belevich5ce0a082018-06-28 17:15:52 +0000571 ModuleIDSectionName = CGM.getTriple().isMacOSX()
572 ? "__NV_CUDA,__nv_module_id"
573 : "__nv_module_id";
Yaxun Liu29155b02018-05-18 15:07:56 +0000574 ModuleIDPrefix = "__nv_";
575
576 // For CUDA, create a string literal containing the fat binary loaded from
577 // the given file.
Benjamin Krameradcd0262020-01-28 20:23:46 +0100578 FatBinStr = makeConstantString(std::string(CudaGpuBinary->getBuffer()), "",
Yaxun Liu29155b02018-05-18 15:07:56 +0000579 FatbinConstantName, 8);
580 FatMagic = CudaFatMagic;
581 }
Artem Belevich52cc4872015-05-07 19:34:16 +0000582
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000583 // Create initialized wrapper structure that points to the loaded GPU binary
584 ConstantInitBuilder Builder(CGM);
585 auto Values = Builder.beginStruct(FatbinWrapperTy);
586 // Fatbin wrapper magic.
Yaxun Liu29155b02018-05-18 15:07:56 +0000587 Values.addInt(IntTy, FatMagic);
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000588 // Fatbin version.
589 Values.addInt(IntTy, 1);
590 // Data.
Yaxun Liu29155b02018-05-18 15:07:56 +0000591 Values.add(FatBinStr);
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000592 // Unused in fatbin v1.
593 Values.add(llvm::ConstantPointerNull::get(VoidPtrTy));
594 llvm::GlobalVariable *FatbinWrapper = Values.finishAndCreateGlobal(
Yaxun Liu887c5692018-04-25 01:10:37 +0000595 addUnderscoredPrefixToName("_fatbin_wrapper"), CGM.getPointerAlign(),
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000596 /*constant*/ true);
597 FatbinWrapper->setSection(FatbinSectionName);
Justin Lebard14fe882016-11-18 00:41:31 +0000598
Yaxun Liuf99752b2018-07-20 22:45:24 +0000599 // There is only one HIP fat binary per linked module, however there are
600 // multiple constructor functions. Make sure the fat binary is registered
601 // only once. The constructor functions are executed by the dynamic loader
602 // before the program gains control. The dynamic loader cannot execute the
603 // constructor functions concurrently since doing that would not guarantee
604 // thread safety of the loaded program. Therefore we can assume sequential
605 // execution of constructor functions here.
606 if (IsHIP) {
Yaxun Liu97670892018-10-02 17:48:54 +0000607 auto Linkage = CudaGpuBinary ? llvm::GlobalValue::InternalLinkage :
608 llvm::GlobalValue::LinkOnceAnyLinkage;
Yaxun Liuf99752b2018-07-20 22:45:24 +0000609 llvm::BasicBlock *IfBlock =
610 llvm::BasicBlock::Create(Context, "if", ModuleCtorFunc);
611 llvm::BasicBlock *ExitBlock =
612 llvm::BasicBlock::Create(Context, "exit", ModuleCtorFunc);
613 // The name, size, and initialization pattern of this variable is part
614 // of HIP ABI.
615 GpuBinaryHandle = new llvm::GlobalVariable(
616 TheModule, VoidPtrPtrTy, /*isConstant=*/false,
Yaxun Liu97670892018-10-02 17:48:54 +0000617 Linkage,
Yaxun Liuf99752b2018-07-20 22:45:24 +0000618 /*Initializer=*/llvm::ConstantPointerNull::get(VoidPtrPtrTy),
619 "__hip_gpubin_handle");
Guillaume Chateletc79099e2019-10-03 13:00:29 +0000620 GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getAsAlign());
Yaxun Liu94ff57f2018-08-17 17:47:31 +0000621 // Prevent the weak symbol in different shared libraries being merged.
Yaxun Liu97670892018-10-02 17:48:54 +0000622 if (Linkage != llvm::GlobalValue::InternalLinkage)
623 GpuBinaryHandle->setVisibility(llvm::GlobalValue::HiddenVisibility);
Yaxun Liuf99752b2018-07-20 22:45:24 +0000624 Address GpuBinaryAddr(
625 GpuBinaryHandle,
626 CharUnits::fromQuantity(GpuBinaryHandle->getAlignment()));
627 {
628 auto HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
629 llvm::Constant *Zero =
630 llvm::Constant::getNullValue(HandleValue->getType());
631 llvm::Value *EQZero = CtorBuilder.CreateICmpEQ(HandleValue, Zero);
632 CtorBuilder.CreateCondBr(EQZero, IfBlock, ExitBlock);
633 }
634 {
635 CtorBuilder.SetInsertPoint(IfBlock);
636 // GpuBinaryHandle = __hipRegisterFatBinary(&FatbinWrapper);
637 llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
638 RegisterFatbinFunc,
639 CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
640 CtorBuilder.CreateStore(RegisterFatbinCall, GpuBinaryAddr);
641 CtorBuilder.CreateBr(ExitBlock);
642 }
643 {
644 CtorBuilder.SetInsertPoint(ExitBlock);
645 // Call __hip_register_globals(GpuBinaryHandle);
646 if (RegisterGlobalsFunc) {
647 auto HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
648 CtorBuilder.CreateCall(RegisterGlobalsFunc, HandleValue);
649 }
650 }
651 } else if (!RelocatableDeviceCode) {
652 // Register binary with CUDA runtime. This is substantially different in
653 // default mode vs. separate compilation!
654 // GpuBinaryHandle = __cudaRegisterFatBinary(&FatbinWrapper);
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000655 llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
656 RegisterFatbinFunc,
657 CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
658 GpuBinaryHandle = new llvm::GlobalVariable(
659 TheModule, VoidPtrPtrTy, false, llvm::GlobalValue::InternalLinkage,
Yaxun Liuf99752b2018-07-20 22:45:24 +0000660 llvm::ConstantPointerNull::get(VoidPtrPtrTy), "__cuda_gpubin_handle");
Guillaume Chateletc79099e2019-10-03 13:00:29 +0000661 GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getAsAlign());
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000662 CtorBuilder.CreateAlignedStore(RegisterFatbinCall, GpuBinaryHandle,
663 CGM.getPointerAlign());
Artem Belevich52cc4872015-05-07 19:34:16 +0000664
Yaxun Liuf99752b2018-07-20 22:45:24 +0000665 // Call __cuda_register_globals(GpuBinaryHandle);
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000666 if (RegisterGlobalsFunc)
667 CtorBuilder.CreateCall(RegisterGlobalsFunc, RegisterFatbinCall);
Artem Belevich40717632019-02-05 22:38:58 +0000668
669 // Call __cudaRegisterFatBinaryEnd(Handle) if this CUDA version needs it.
670 if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
671 CudaFeature::CUDA_USES_FATBIN_REGISTER_END)) {
672 // void __cudaRegisterFatBinaryEnd(void **);
673 llvm::FunctionCallee RegisterFatbinEndFunc = CGM.CreateRuntimeFunction(
674 llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
675 "__cudaRegisterFatBinaryEnd");
676 CtorBuilder.CreateCall(RegisterFatbinEndFunc, RegisterFatbinCall);
677 }
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000678 } else {
679 // Generate a unique module ID.
Yaxun Liu29155b02018-05-18 15:07:56 +0000680 SmallString<64> ModuleID;
681 llvm::raw_svector_ostream OS(ModuleID);
Artem Belevich93552b32018-10-05 18:39:58 +0000682 OS << ModuleIDPrefix << llvm::format("%" PRIx64, FatbinWrapper->getGUID());
Benjamin Krameradcd0262020-01-28 20:23:46 +0100683 llvm::Constant *ModuleIDConstant = makeConstantString(
684 std::string(ModuleID.str()), "", ModuleIDSectionName, 32);
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000685
Yaxun Liuf99752b2018-07-20 22:45:24 +0000686 // Create an alias for the FatbinWrapper that nvcc will look for.
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000687 llvm::GlobalAlias::create(llvm::GlobalValue::ExternalLinkage,
Yaxun Liu29155b02018-05-18 15:07:56 +0000688 Twine("__fatbinwrap") + ModuleID, FatbinWrapper);
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000689
Yaxun Liuf99752b2018-07-20 22:45:24 +0000690 // void __cudaRegisterLinkedBinary%ModuleID%(void (*)(void *), void *,
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000691 // void *, void (*)(void **))
Yaxun Liuf99752b2018-07-20 22:45:24 +0000692 SmallString<128> RegisterLinkedBinaryName("__cudaRegisterLinkedBinary");
Yaxun Liu29155b02018-05-18 15:07:56 +0000693 RegisterLinkedBinaryName += ModuleID;
James Y Knight9871db02019-02-05 16:42:33 +0000694 llvm::FunctionCallee RegisterLinkedBinaryFunc = CGM.CreateRuntimeFunction(
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000695 getRegisterLinkedBinaryFnTy(), RegisterLinkedBinaryName);
696
697 assert(RegisterGlobalsFunc && "Expecting at least dummy function!");
698 llvm::Value *Args[] = {RegisterGlobalsFunc,
699 CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy),
Yaxun Liu29155b02018-05-18 15:07:56 +0000700 ModuleIDConstant,
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000701 makeDummyFunction(getCallbackFnTy())};
702 CtorBuilder.CreateCall(RegisterLinkedBinaryFunc, Args);
703 }
Artem Belevich52cc4872015-05-07 19:34:16 +0000704
Artem Belevichc66d2542018-06-27 18:32:51 +0000705 // Create destructor and register it with atexit() the way NVCC does it. Doing
706 // it during regular destructor phase worked in CUDA before 9.2 but results in
707 // double-free in 9.2.
708 if (llvm::Function *CleanupFn = makeModuleDtorFunction()) {
709 // extern "C" int atexit(void (*f)(void));
710 llvm::FunctionType *AtExitTy =
711 llvm::FunctionType::get(IntTy, CleanupFn->getType(), false);
James Y Knight9871db02019-02-05 16:42:33 +0000712 llvm::FunctionCallee AtExitFunc =
Artem Belevichc66d2542018-06-27 18:32:51 +0000713 CGM.CreateRuntimeFunction(AtExitTy, "atexit", llvm::AttributeList(),
714 /*Local=*/true);
715 CtorBuilder.CreateCall(AtExitFunc, CleanupFn);
716 }
717
Artem Belevich52cc4872015-05-07 19:34:16 +0000718 CtorBuilder.CreateRetVoid();
719 return ModuleCtorFunc;
720}
721
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000722/// Creates a global destructor function that unregisters the GPU code blob
Artem Belevich52cc4872015-05-07 19:34:16 +0000723/// registered by constructor.
Yaxun Liuf99752b2018-07-20 22:45:24 +0000724///
725/// For CUDA:
Artem Belevich52cc4872015-05-07 19:34:16 +0000726/// \code
727/// void __cuda_module_dtor(void*) {
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000728/// __cudaUnregisterFatBinary(Handle);
Artem Belevich52cc4872015-05-07 19:34:16 +0000729/// }
730/// \endcode
Yaxun Liuf99752b2018-07-20 22:45:24 +0000731///
732/// For HIP:
733/// \code
734/// void __hip_module_dtor(void*) {
735/// if (__hip_gpubin_handle) {
736/// __hipUnregisterFatBinary(__hip_gpubin_handle);
737/// __hip_gpubin_handle = 0;
738/// }
739/// }
740/// \endcode
Artem Belevich52cc4872015-05-07 19:34:16 +0000741llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000742 // No need for destructor if we don't have a handle to unregister.
743 if (!GpuBinaryHandle)
Artem Belevich8c1ec1e2016-03-02 18:28:53 +0000744 return nullptr;
745
Artem Belevich52cc4872015-05-07 19:34:16 +0000746 // void __cudaUnregisterFatBinary(void ** handle);
James Y Knight9871db02019-02-05 16:42:33 +0000747 llvm::FunctionCallee UnregisterFatbinFunc = CGM.CreateRuntimeFunction(
Artem Belevich52cc4872015-05-07 19:34:16 +0000748 llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000749 addUnderscoredPrefixToName("UnregisterFatBinary"));
Artem Belevich52cc4872015-05-07 19:34:16 +0000750
751 llvm::Function *ModuleDtorFunc = llvm::Function::Create(
752 llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000753 llvm::GlobalValue::InternalLinkage,
754 addUnderscoredPrefixToName("_module_dtor"), &TheModule);
755
Artem Belevich52cc4872015-05-07 19:34:16 +0000756 llvm::BasicBlock *DtorEntryBB =
757 llvm::BasicBlock::Create(Context, "entry", ModuleDtorFunc);
John McCall7f416cc2015-09-08 08:05:57 +0000758 CGBuilderTy DtorBuilder(CGM, Context);
Artem Belevich52cc4872015-05-07 19:34:16 +0000759 DtorBuilder.SetInsertPoint(DtorEntryBB);
760
Yaxun Liuf99752b2018-07-20 22:45:24 +0000761 Address GpuBinaryAddr(GpuBinaryHandle, CharUnits::fromQuantity(
762 GpuBinaryHandle->getAlignment()));
763 auto HandleValue = DtorBuilder.CreateLoad(GpuBinaryAddr);
764 // There is only one HIP fat binary per linked module, however there are
765 // multiple destructor functions. Make sure the fat binary is unregistered
766 // only once.
767 if (CGM.getLangOpts().HIP) {
768 llvm::BasicBlock *IfBlock =
769 llvm::BasicBlock::Create(Context, "if", ModuleDtorFunc);
770 llvm::BasicBlock *ExitBlock =
771 llvm::BasicBlock::Create(Context, "exit", ModuleDtorFunc);
772 llvm::Constant *Zero = llvm::Constant::getNullValue(HandleValue->getType());
773 llvm::Value *NEZero = DtorBuilder.CreateICmpNE(HandleValue, Zero);
774 DtorBuilder.CreateCondBr(NEZero, IfBlock, ExitBlock);
Artem Belevich52cc4872015-05-07 19:34:16 +0000775
Yaxun Liuf99752b2018-07-20 22:45:24 +0000776 DtorBuilder.SetInsertPoint(IfBlock);
777 DtorBuilder.CreateCall(UnregisterFatbinFunc, HandleValue);
778 DtorBuilder.CreateStore(Zero, GpuBinaryAddr);
779 DtorBuilder.CreateBr(ExitBlock);
780
781 DtorBuilder.SetInsertPoint(ExitBlock);
782 } else {
783 DtorBuilder.CreateCall(UnregisterFatbinFunc, HandleValue);
784 }
Artem Belevich52cc4872015-05-07 19:34:16 +0000785 DtorBuilder.CreateRetVoid();
786 return ModuleDtorFunc;
787}
788
Peter Collingbournefe883422011-10-06 18:29:37 +0000789CGCUDARuntime *CodeGen::CreateNVCUDARuntime(CodeGenModule &CGM) {
790 return new CGNVCUDARuntime(CGM);
791}