blob: 351c5058aa4c9bb3b01e99529e4103bff70b2f20 [file] [log] [blame]
Peter Collingbournefe883422011-10-06 18:29:37 +00001//===----- CGCUDANV.cpp - Interface to NVIDIA CUDA Runtime ----------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Peter Collingbournefe883422011-10-06 18:29:37 +00006//
7//===----------------------------------------------------------------------===//
8//
9// This provides a class for CUDA code generation targeting the NVIDIA CUDA
10// runtime library.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGCUDARuntime.h"
Peter Collingbournefa4d6032011-10-06 18:51:56 +000015#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "clang/AST/Decl.h"
Artem Belevichc62214d2019-01-31 21:34:03 +000018#include "clang/Basic/Cuda.h"
19#include "clang/CodeGen/CodeGenABITypes.h"
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +000020#include "clang/CodeGen/ConstantInitBuilder.h"
Chandler Carruthffd55512013-01-02 11:45:17 +000021#include "llvm/IR/BasicBlock.h"
22#include "llvm/IR/Constants.h"
23#include "llvm/IR/DerivedTypes.h"
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +000024#include "llvm/Support/Format.h"
Peter Collingbournefe883422011-10-06 18:29:37 +000025
26using namespace clang;
27using namespace CodeGen;
28
29namespace {
Yaxun Liu29155b02018-05-18 15:07:56 +000030constexpr unsigned CudaFatMagic = 0x466243b1;
31constexpr unsigned HIPFatMagic = 0x48495046; // "HIPF"
Peter Collingbournefe883422011-10-06 18:29:37 +000032
33class CGNVCUDARuntime : public CGCUDARuntime {
Peter Collingbournefa4d6032011-10-06 18:51:56 +000034
35private:
John McCall6c9f1fdb2016-11-19 08:17:24 +000036 llvm::IntegerType *IntTy, *SizeTy;
37 llvm::Type *VoidTy;
Artem Belevich52cc4872015-05-07 19:34:16 +000038 llvm::PointerType *CharPtrTy, *VoidPtrTy, *VoidPtrPtrTy;
39
40 /// Convenience reference to LLVM Context
41 llvm::LLVMContext &Context;
42 /// Convenience reference to the current module
43 llvm::Module &TheModule;
44 /// Keeps track of kernel launch stubs emitted in this module
Yaxun Liuc18e9ec2019-02-14 02:00:09 +000045 struct KernelInfo {
46 llvm::Function *Kernel;
47 const Decl *D;
48 };
49 llvm::SmallVector<KernelInfo, 16> EmittedKernels;
50 struct VarInfo {
51 llvm::GlobalVariable *Var;
52 const VarDecl *D;
Michael Liao5be9b8c2020-03-27 15:47:12 -040053 DeviceVarFlags Flags;
Yaxun Liuc18e9ec2019-02-14 02:00:09 +000054 };
55 llvm::SmallVector<VarInfo, 16> DeviceVars;
Jonas Hahnfelde7681322018-02-28 17:53:46 +000056 /// Keeps track of variable containing handle of GPU binary. Populated by
Artem Belevich52cc4872015-05-07 19:34:16 +000057 /// ModuleCtorFunction() and used to create corresponding cleanup calls in
58 /// ModuleDtorFunction()
Jonas Hahnfelde7681322018-02-28 17:53:46 +000059 llvm::GlobalVariable *GpuBinaryHandle = nullptr;
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +000060 /// Whether we generate relocatable device code.
61 bool RelocatableDeviceCode;
Yaxun Liuc18e9ec2019-02-14 02:00:09 +000062 /// Mangle context for device.
63 std::unique_ptr<MangleContext> DeviceMC;
Peter Collingbournefa4d6032011-10-06 18:51:56 +000064
James Y Knight9871db02019-02-05 16:42:33 +000065 llvm::FunctionCallee getSetupArgumentFn() const;
66 llvm::FunctionCallee getLaunchFn() const;
Peter Collingbournefa4d6032011-10-06 18:51:56 +000067
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +000068 llvm::FunctionType *getRegisterGlobalsFnTy() const;
69 llvm::FunctionType *getCallbackFnTy() const;
70 llvm::FunctionType *getRegisterLinkedBinaryFnTy() const;
Yaxun Liu887c5692018-04-25 01:10:37 +000071 std::string addPrefixToName(StringRef FuncName) const;
72 std::string addUnderscoredPrefixToName(StringRef FuncName) const;
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +000073
Artem Belevich52cc4872015-05-07 19:34:16 +000074 /// Creates a function to register all kernel stubs generated in this module.
Artem Belevich42e19492016-03-02 18:28:50 +000075 llvm::Function *makeRegisterGlobalsFn();
Artem Belevich52cc4872015-05-07 19:34:16 +000076
77 /// Helper function that generates a constant string and returns a pointer to
78 /// the start of the string. The result of this function can be used anywhere
79 /// where the C code specifies const char*.
80 llvm::Constant *makeConstantString(const std::string &Str,
81 const std::string &Name = "",
Artem Belevich4c093182016-08-12 18:44:01 +000082 const std::string &SectionName = "",
Artem Belevich52cc4872015-05-07 19:34:16 +000083 unsigned Alignment = 0) {
84 llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
85 llvm::ConstantInt::get(SizeTy, 0)};
John McCall7f416cc2015-09-08 08:05:57 +000086 auto ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
Artem Belevich4c093182016-08-12 18:44:01 +000087 llvm::GlobalVariable *GV =
88 cast<llvm::GlobalVariable>(ConstStr.getPointer());
Jonas Hahnfeld3b9cbba92018-06-08 11:17:08 +000089 if (!SectionName.empty()) {
Artem Belevich4c093182016-08-12 18:44:01 +000090 GV->setSection(SectionName);
Jonas Hahnfeld3b9cbba92018-06-08 11:17:08 +000091 // Mark the address as used which make sure that this section isn't
92 // merged and we will really have it in the object file.
93 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::None);
94 }
Artem Belevich4c093182016-08-12 18:44:01 +000095 if (Alignment)
Guillaume Chateletc79099e2019-10-03 13:00:29 +000096 GV->setAlignment(llvm::Align(Alignment));
Artem Belevich4c093182016-08-12 18:44:01 +000097
John McCall7f416cc2015-09-08 08:05:57 +000098 return llvm::ConstantExpr::getGetElementPtr(ConstStr.getElementType(),
99 ConstStr.getPointer(), Zeros);
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000100 }
101
102 /// Helper function that generates an empty dummy function returning void.
103 llvm::Function *makeDummyFunction(llvm::FunctionType *FnTy) {
104 assert(FnTy->getReturnType()->isVoidTy() &&
105 "Can only generate dummy functions returning void!");
106 llvm::Function *DummyFunc = llvm::Function::Create(
107 FnTy, llvm::GlobalValue::InternalLinkage, "dummy", &TheModule);
108
109 llvm::BasicBlock *DummyBlock =
110 llvm::BasicBlock::Create(Context, "", DummyFunc);
111 CGBuilderTy FuncBuilder(CGM, Context);
112 FuncBuilder.SetInsertPoint(DummyBlock);
113 FuncBuilder.CreateRetVoid();
114
115 return DummyFunc;
116 }
Artem Belevich52cc4872015-05-07 19:34:16 +0000117
Artem Belevichc62214d2019-01-31 21:34:03 +0000118 void emitDeviceStubBodyLegacy(CodeGenFunction &CGF, FunctionArgList &Args);
119 void emitDeviceStubBodyNew(CodeGenFunction &CGF, FunctionArgList &Args);
Yaxun (Sam) Liu22c457a2020-03-05 12:59:33 -0500120 std::string getDeviceSideName(const NamedDecl *ND) override;
Artem Belevich52cc4872015-05-07 19:34:16 +0000121
Peter Collingbournefe883422011-10-06 18:29:37 +0000122public:
123 CGNVCUDARuntime(CodeGenModule &CGM);
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000124
Artem Belevich52cc4872015-05-07 19:34:16 +0000125 void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) override;
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000126 void registerDeviceVar(const VarDecl *VD, llvm::GlobalVariable &Var,
Michael Liao5be9b8c2020-03-27 15:47:12 -0400127 bool Extern, bool Constant) override {
128 DeviceVars.push_back({&Var,
129 VD,
130 {DeviceVarFlags::Variable, Extern, Constant,
131 /*Normalized*/ false, /*Type*/ 0}});
132 }
133 void registerDeviceSurf(const VarDecl *VD, llvm::GlobalVariable &Var,
134 bool Extern, int Type) override {
135 DeviceVars.push_back({&Var,
136 VD,
137 {DeviceVarFlags::Surface, Extern, /*Constant*/ false,
138 /*Normalized*/ false, Type}});
139 }
140 void registerDeviceTex(const VarDecl *VD, llvm::GlobalVariable &Var,
141 bool Extern, int Type, bool Normalized) override {
142 DeviceVars.push_back({&Var,
143 VD,
144 {DeviceVarFlags::Texture, Extern, /*Constant*/ false,
145 Normalized, Type}});
Artem Belevich42e19492016-03-02 18:28:50 +0000146 }
147
Artem Belevich52cc4872015-05-07 19:34:16 +0000148 /// Creates module constructor function
149 llvm::Function *makeModuleCtorFunction() override;
150 /// Creates module destructor function
151 llvm::Function *makeModuleDtorFunction() override;
Peter Collingbournefe883422011-10-06 18:29:37 +0000152};
153
Alexander Kornienkoab9db512015-06-22 23:07:51 +0000154}
Peter Collingbournefe883422011-10-06 18:29:37 +0000155
Yaxun Liu887c5692018-04-25 01:10:37 +0000156std::string CGNVCUDARuntime::addPrefixToName(StringRef FuncName) const {
157 if (CGM.getLangOpts().HIP)
158 return ((Twine("hip") + Twine(FuncName)).str());
159 return ((Twine("cuda") + Twine(FuncName)).str());
160}
161std::string
162CGNVCUDARuntime::addUnderscoredPrefixToName(StringRef FuncName) const {
163 if (CGM.getLangOpts().HIP)
164 return ((Twine("__hip") + Twine(FuncName)).str());
165 return ((Twine("__cuda") + Twine(FuncName)).str());
166}
167
Artem Belevich52cc4872015-05-07 19:34:16 +0000168CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
169 : CGCUDARuntime(CGM), Context(CGM.getLLVMContext()),
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000170 TheModule(CGM.getModule()),
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000171 RelocatableDeviceCode(CGM.getLangOpts().GPURelocatableDeviceCode),
172 DeviceMC(CGM.getContext().createMangleContext(
173 CGM.getContext().getAuxTargetInfo())) {
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000174 CodeGen::CodeGenTypes &Types = CGM.getTypes();
175 ASTContext &Ctx = CGM.getContext();
176
John McCall6c9f1fdb2016-11-19 08:17:24 +0000177 IntTy = CGM.IntTy;
178 SizeTy = CGM.SizeTy;
179 VoidTy = CGM.VoidTy;
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000180
181 CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy));
182 VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy));
Artem Belevich52cc4872015-05-07 19:34:16 +0000183 VoidPtrPtrTy = VoidPtrTy->getPointerTo();
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000184}
185
James Y Knight9871db02019-02-05 16:42:33 +0000186llvm::FunctionCallee CGNVCUDARuntime::getSetupArgumentFn() const {
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000187 // cudaError_t cudaSetupArgument(void *, size_t, size_t)
Benjamin Kramer30934732016-07-02 11:41:41 +0000188 llvm::Type *Params[] = {VoidPtrTy, SizeTy, SizeTy};
Yaxun Liu887c5692018-04-25 01:10:37 +0000189 return CGM.CreateRuntimeFunction(
190 llvm::FunctionType::get(IntTy, Params, false),
191 addPrefixToName("SetupArgument"));
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000192}
193
James Y Knight9871db02019-02-05 16:42:33 +0000194llvm::FunctionCallee CGNVCUDARuntime::getLaunchFn() const {
Yaxun Liu887c5692018-04-25 01:10:37 +0000195 if (CGM.getLangOpts().HIP) {
196 // hipError_t hipLaunchByPtr(char *);
197 return CGM.CreateRuntimeFunction(
198 llvm::FunctionType::get(IntTy, CharPtrTy, false), "hipLaunchByPtr");
199 } else {
200 // cudaError_t cudaLaunch(char *);
201 return CGM.CreateRuntimeFunction(
202 llvm::FunctionType::get(IntTy, CharPtrTy, false), "cudaLaunch");
203 }
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000204}
205
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000206llvm::FunctionType *CGNVCUDARuntime::getRegisterGlobalsFnTy() const {
207 return llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false);
208}
209
210llvm::FunctionType *CGNVCUDARuntime::getCallbackFnTy() const {
211 return llvm::FunctionType::get(VoidTy, VoidPtrTy, false);
212}
213
214llvm::FunctionType *CGNVCUDARuntime::getRegisterLinkedBinaryFnTy() const {
215 auto CallbackFnTy = getCallbackFnTy();
216 auto RegisterGlobalsFnTy = getRegisterGlobalsFnTy();
217 llvm::Type *Params[] = {RegisterGlobalsFnTy->getPointerTo(), VoidPtrTy,
218 VoidPtrTy, CallbackFnTy->getPointerTo()};
219 return llvm::FunctionType::get(VoidTy, Params, false);
220}
221
Yaxun (Sam) Liu22c457a2020-03-05 12:59:33 -0500222std::string CGNVCUDARuntime::getDeviceSideName(const NamedDecl *ND) {
223 GlobalDecl GD;
224 // D could be either a kernel or a variable.
225 if (auto *FD = dyn_cast<FunctionDecl>(ND))
226 GD = GlobalDecl(FD, KernelReferenceKind::Kernel);
227 else
228 GD = GlobalDecl(ND);
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000229 std::string DeviceSideName;
230 if (DeviceMC->shouldMangleDeclName(ND)) {
231 SmallString<256> Buffer;
232 llvm::raw_svector_ostream Out(Buffer);
Yaxun (Sam) Liu22c457a2020-03-05 12:59:33 -0500233 DeviceMC->mangleName(GD, Out);
Benjamin Krameradcd0262020-01-28 20:23:46 +0100234 DeviceSideName = std::string(Out.str());
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000235 } else
Benjamin Krameradcd0262020-01-28 20:23:46 +0100236 DeviceSideName = std::string(ND->getIdentifier()->getName());
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000237 return DeviceSideName;
238}
239
Artem Belevich52cc4872015-05-07 19:34:16 +0000240void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
241 FunctionArgList &Args) {
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000242 EmittedKernels.push_back({CGF.CurFn, CGF.CurFuncDecl});
Artem Belevichc62214d2019-01-31 21:34:03 +0000243 if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
Yaxun Liu12828892019-09-24 19:16:40 +0000244 CudaFeature::CUDA_USES_NEW_LAUNCH) ||
245 CGF.getLangOpts().HIPUseNewLaunchAPI)
Artem Belevichc62214d2019-01-31 21:34:03 +0000246 emitDeviceStubBodyNew(CGF, Args);
247 else
248 emitDeviceStubBodyLegacy(CGF, Args);
Artem Belevich52cc4872015-05-07 19:34:16 +0000249}
250
Artem Belevichc62214d2019-01-31 21:34:03 +0000251// CUDA 9.0+ uses new way to launch kernels. Parameters are packed in a local
252// array and kernels are launched using cudaLaunchKernel().
253void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
254 FunctionArgList &Args) {
255 // Build the shadow stack entry at the very start of the function.
256
257 // Calculate amount of space we will need for all arguments. If we have no
258 // args, allocate a single pointer so we still have a valid pointer to the
259 // argument array that we can pass to runtime, even if it will be unused.
260 Address KernelArgs = CGF.CreateTempAlloca(
261 VoidPtrTy, CharUnits::fromQuantity(16), "kernel_args",
262 llvm::ConstantInt::get(SizeTy, std::max<size_t>(1, Args.size())));
263 // Store pointers to the arguments in a locally allocated launch_args.
264 for (unsigned i = 0; i < Args.size(); ++i) {
265 llvm::Value* VarPtr = CGF.GetAddrOfLocalVar(Args[i]).getPointer();
266 llvm::Value *VoidVarPtr = CGF.Builder.CreatePointerCast(VarPtr, VoidPtrTy);
267 CGF.Builder.CreateDefaultAlignedStore(
268 VoidVarPtr, CGF.Builder.CreateConstGEP1_32(KernelArgs.getPointer(), i));
269 }
270
271 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
272
Yaxun Liu12828892019-09-24 19:16:40 +0000273 // Lookup cudaLaunchKernel/hipLaunchKernel function.
Artem Belevichc62214d2019-01-31 21:34:03 +0000274 // cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
275 // void **args, size_t sharedMem,
276 // cudaStream_t stream);
Yaxun Liu12828892019-09-24 19:16:40 +0000277 // hipError_t hipLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
278 // void **args, size_t sharedMem,
279 // hipStream_t stream);
Artem Belevichc62214d2019-01-31 21:34:03 +0000280 TranslationUnitDecl *TUDecl = CGM.getContext().getTranslationUnitDecl();
281 DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
Yaxun Liu12828892019-09-24 19:16:40 +0000282 auto LaunchKernelName = addPrefixToName("LaunchKernel");
Artem Belevichc62214d2019-01-31 21:34:03 +0000283 IdentifierInfo &cudaLaunchKernelII =
Yaxun Liu12828892019-09-24 19:16:40 +0000284 CGM.getContext().Idents.get(LaunchKernelName);
Artem Belevichc62214d2019-01-31 21:34:03 +0000285 FunctionDecl *cudaLaunchKernelFD = nullptr;
286 for (const auto &Result : DC->lookup(&cudaLaunchKernelII)) {
287 if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Result))
288 cudaLaunchKernelFD = FD;
289 }
290
291 if (cudaLaunchKernelFD == nullptr) {
292 CGM.Error(CGF.CurFuncDecl->getLocation(),
Yaxun Liu12828892019-09-24 19:16:40 +0000293 "Can't find declaration for " + LaunchKernelName);
Artem Belevichc62214d2019-01-31 21:34:03 +0000294 return;
295 }
296 // Create temporary dim3 grid_dim, block_dim.
297 ParmVarDecl *GridDimParam = cudaLaunchKernelFD->getParamDecl(1);
298 QualType Dim3Ty = GridDimParam->getType();
299 Address GridDim =
300 CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "grid_dim");
301 Address BlockDim =
302 CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "block_dim");
303 Address ShmemSize =
304 CGF.CreateTempAlloca(SizeTy, CGM.getSizeAlign(), "shmem_size");
305 Address Stream =
306 CGF.CreateTempAlloca(VoidPtrTy, CGM.getPointerAlign(), "stream");
James Y Knight9871db02019-02-05 16:42:33 +0000307 llvm::FunctionCallee cudaPopConfigFn = CGM.CreateRuntimeFunction(
Artem Belevichc62214d2019-01-31 21:34:03 +0000308 llvm::FunctionType::get(IntTy,
309 {/*gridDim=*/GridDim.getType(),
310 /*blockDim=*/BlockDim.getType(),
311 /*ShmemSize=*/ShmemSize.getType(),
312 /*Stream=*/Stream.getType()},
313 /*isVarArg=*/false),
Yaxun Liu12828892019-09-24 19:16:40 +0000314 addUnderscoredPrefixToName("PopCallConfiguration"));
Artem Belevichc62214d2019-01-31 21:34:03 +0000315
316 CGF.EmitRuntimeCallOrInvoke(cudaPopConfigFn,
317 {GridDim.getPointer(), BlockDim.getPointer(),
318 ShmemSize.getPointer(), Stream.getPointer()});
319
320 // Emit the call to cudaLaunch
321 llvm::Value *Kernel = CGF.Builder.CreatePointerCast(CGF.CurFn, VoidPtrTy);
322 CallArgList LaunchKernelArgs;
323 LaunchKernelArgs.add(RValue::get(Kernel),
324 cudaLaunchKernelFD->getParamDecl(0)->getType());
325 LaunchKernelArgs.add(RValue::getAggregate(GridDim), Dim3Ty);
326 LaunchKernelArgs.add(RValue::getAggregate(BlockDim), Dim3Ty);
327 LaunchKernelArgs.add(RValue::get(KernelArgs.getPointer()),
328 cudaLaunchKernelFD->getParamDecl(3)->getType());
329 LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(ShmemSize)),
330 cudaLaunchKernelFD->getParamDecl(4)->getType());
331 LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(Stream)),
332 cudaLaunchKernelFD->getParamDecl(5)->getType());
333
334 QualType QT = cudaLaunchKernelFD->getType();
335 QualType CQT = QT.getCanonicalType();
James Y Knight916db652019-02-02 01:48:23 +0000336 llvm::Type *Ty = CGM.getTypes().ConvertType(CQT);
Artem Belevichc62214d2019-01-31 21:34:03 +0000337 llvm::FunctionType *FTy = dyn_cast<llvm::FunctionType>(Ty);
338
339 const CGFunctionInfo &FI =
340 CGM.getTypes().arrangeFunctionDeclaration(cudaLaunchKernelFD);
James Y Knight9871db02019-02-05 16:42:33 +0000341 llvm::FunctionCallee cudaLaunchKernelFn =
Yaxun Liu12828892019-09-24 19:16:40 +0000342 CGM.CreateRuntimeFunction(FTy, LaunchKernelName);
Artem Belevichc62214d2019-01-31 21:34:03 +0000343 CGF.EmitCall(FI, CGCallee::forDirect(cudaLaunchKernelFn), ReturnValueSlot(),
344 LaunchKernelArgs);
345 CGF.EmitBranch(EndBlock);
346
347 CGF.EmitBlock(EndBlock);
348}
349
350void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF,
351 FunctionArgList &Args) {
Justin Lebare56360a2016-07-27 22:36:21 +0000352 // Emit a call to cudaSetupArgument for each arg in Args.
James Y Knight9871db02019-02-05 16:42:33 +0000353 llvm::FunctionCallee cudaSetupArgFn = getSetupArgumentFn();
Justin Lebare56360a2016-07-27 22:36:21 +0000354 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
355 CharUnits Offset = CharUnits::Zero();
356 for (const VarDecl *A : Args) {
357 CharUnits TyWidth, TyAlign;
358 std::tie(TyWidth, TyAlign) =
359 CGM.getContext().getTypeInfoInChars(A->getType());
360 Offset = Offset.alignTo(TyAlign);
361 llvm::Value *Args[] = {
362 CGF.Builder.CreatePointerCast(CGF.GetAddrOfLocalVar(A).getPointer(),
363 VoidPtrTy),
364 llvm::ConstantInt::get(SizeTy, TyWidth.getQuantity()),
365 llvm::ConstantInt::get(SizeTy, Offset.getQuantity()),
366 };
James Y Knight3933add2019-01-30 02:54:28 +0000367 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(cudaSetupArgFn, Args);
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000368 llvm::Constant *Zero = llvm::ConstantInt::get(IntTy, 0);
James Y Knight3933add2019-01-30 02:54:28 +0000369 llvm::Value *CBZero = CGF.Builder.CreateICmpEQ(CB, Zero);
Justin Lebare56360a2016-07-27 22:36:21 +0000370 llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
James Y Knight3933add2019-01-30 02:54:28 +0000371 CGF.Builder.CreateCondBr(CBZero, NextBlock, EndBlock);
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000372 CGF.EmitBlock(NextBlock);
Justin Lebare56360a2016-07-27 22:36:21 +0000373 Offset += TyWidth;
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000374 }
375
376 // Emit the call to cudaLaunch
James Y Knight9871db02019-02-05 16:42:33 +0000377 llvm::FunctionCallee cudaLaunchFn = getLaunchFn();
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000378 llvm::Value *Arg = CGF.Builder.CreatePointerCast(CGF.CurFn, CharPtrTy);
John McCall882987f2013-02-28 19:01:20 +0000379 CGF.EmitRuntimeCallOrInvoke(cudaLaunchFn, Arg);
Peter Collingbournefa4d6032011-10-06 18:51:56 +0000380 CGF.EmitBranch(EndBlock);
381
382 CGF.EmitBlock(EndBlock);
Peter Collingbournefe883422011-10-06 18:29:37 +0000383}
384
Artem Belevich42e19492016-03-02 18:28:50 +0000385/// Creates a function that sets up state on the host side for CUDA objects that
386/// have a presence on both the host and device sides. Specifically, registers
387/// the host side of kernel functions and device global variables with the CUDA
388/// runtime.
Artem Belevich52cc4872015-05-07 19:34:16 +0000389/// \code
Artem Belevich42e19492016-03-02 18:28:50 +0000390/// void __cuda_register_globals(void** GpuBinaryHandle) {
Artem Belevich52cc4872015-05-07 19:34:16 +0000391/// __cudaRegisterFunction(GpuBinaryHandle,Kernel0,...);
392/// ...
393/// __cudaRegisterFunction(GpuBinaryHandle,KernelM,...);
Artem Belevich42e19492016-03-02 18:28:50 +0000394/// __cudaRegisterVar(GpuBinaryHandle, GlobalVar0, ...);
395/// ...
396/// __cudaRegisterVar(GpuBinaryHandle, GlobalVarN, ...);
Artem Belevich52cc4872015-05-07 19:34:16 +0000397/// }
398/// \endcode
Artem Belevich42e19492016-03-02 18:28:50 +0000399llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
Artem Belevich8c1ec1e2016-03-02 18:28:53 +0000400 // No need to register anything
401 if (EmittedKernels.empty() && DeviceVars.empty())
402 return nullptr;
403
Artem Belevich52cc4872015-05-07 19:34:16 +0000404 llvm::Function *RegisterKernelsFunc = llvm::Function::Create(
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000405 getRegisterGlobalsFnTy(), llvm::GlobalValue::InternalLinkage,
Yaxun Liu887c5692018-04-25 01:10:37 +0000406 addUnderscoredPrefixToName("_register_globals"), &TheModule);
Artem Belevich52cc4872015-05-07 19:34:16 +0000407 llvm::BasicBlock *EntryBB =
408 llvm::BasicBlock::Create(Context, "entry", RegisterKernelsFunc);
John McCall7f416cc2015-09-08 08:05:57 +0000409 CGBuilderTy Builder(CGM, Context);
Artem Belevich52cc4872015-05-07 19:34:16 +0000410 Builder.SetInsertPoint(EntryBB);
411
412 // void __cudaRegisterFunction(void **, const char *, char *, const char *,
413 // int, uint3*, uint3*, dim3*, dim3*, int*)
Benjamin Kramer6d1c10b2016-07-02 12:03:57 +0000414 llvm::Type *RegisterFuncParams[] = {
Artem Belevich52cc4872015-05-07 19:34:16 +0000415 VoidPtrPtrTy, CharPtrTy, CharPtrTy, CharPtrTy, IntTy,
416 VoidPtrTy, VoidPtrTy, VoidPtrTy, VoidPtrTy, IntTy->getPointerTo()};
James Y Knight9871db02019-02-05 16:42:33 +0000417 llvm::FunctionCallee RegisterFunc = CGM.CreateRuntimeFunction(
Artem Belevich52cc4872015-05-07 19:34:16 +0000418 llvm::FunctionType::get(IntTy, RegisterFuncParams, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000419 addUnderscoredPrefixToName("RegisterFunction"));
Artem Belevich52cc4872015-05-07 19:34:16 +0000420
421 // Extract GpuBinaryHandle passed as the first argument passed to
Artem Belevich42e19492016-03-02 18:28:50 +0000422 // __cuda_register_globals() and generate __cudaRegisterFunction() call for
Artem Belevich52cc4872015-05-07 19:34:16 +0000423 // each emitted kernel.
424 llvm::Argument &GpuBinaryHandlePtr = *RegisterKernelsFunc->arg_begin();
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000425 for (auto &&I : EmittedKernels) {
Yaxun (Sam) Liu22c457a2020-03-05 12:59:33 -0500426 llvm::Constant *KernelName =
427 makeConstantString(getDeviceSideName(cast<NamedDecl>(I.D)));
Artem Belevich52cc4872015-05-07 19:34:16 +0000428 llvm::Constant *NullPtr = llvm::ConstantPointerNull::get(VoidPtrTy);
Artem Belevich42e19492016-03-02 18:28:50 +0000429 llvm::Value *Args[] = {
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000430 &GpuBinaryHandlePtr,
431 Builder.CreateBitCast(I.Kernel, VoidPtrTy),
432 KernelName,
433 KernelName,
434 llvm::ConstantInt::get(IntTy, -1),
435 NullPtr,
436 NullPtr,
437 NullPtr,
438 NullPtr,
Artem Belevich52cc4872015-05-07 19:34:16 +0000439 llvm::ConstantPointerNull::get(IntTy->getPointerTo())};
Artem Belevich42e19492016-03-02 18:28:50 +0000440 Builder.CreateCall(RegisterFunc, Args);
441 }
442
Michael Liaob952d792020-04-03 10:17:06 -0400443 llvm::Type *VarSizeTy = IntTy;
444 // For HIP or CUDA 9.0+, device variable size is type of `size_t`.
445 if (CGM.getLangOpts().HIP ||
446 ToCudaVersion(CGM.getTarget().getSDKVersion()) >= CudaVersion::CUDA_90)
447 VarSizeTy = SizeTy;
448
Artem Belevich42e19492016-03-02 18:28:50 +0000449 // void __cudaRegisterVar(void **, char *, char *, const char *,
450 // int, int, int, int)
Benjamin Kramer6d1c10b2016-07-02 12:03:57 +0000451 llvm::Type *RegisterVarParams[] = {VoidPtrPtrTy, CharPtrTy, CharPtrTy,
Michael Liaob952d792020-04-03 10:17:06 -0400452 CharPtrTy, IntTy, VarSizeTy,
Benjamin Kramer6d1c10b2016-07-02 12:03:57 +0000453 IntTy, IntTy};
James Y Knight9871db02019-02-05 16:42:33 +0000454 llvm::FunctionCallee RegisterVar = CGM.CreateRuntimeFunction(
Michael Liaob952d792020-04-03 10:17:06 -0400455 llvm::FunctionType::get(VoidTy, RegisterVarParams, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000456 addUnderscoredPrefixToName("RegisterVar"));
Michael Liao5be9b8c2020-03-27 15:47:12 -0400457 // void __cudaRegisterSurface(void **, const struct surfaceReference *,
458 // const void **, const char *, int, int);
459 llvm::FunctionCallee RegisterSurf = CGM.CreateRuntimeFunction(
460 llvm::FunctionType::get(
461 VoidTy, {VoidPtrPtrTy, VoidPtrTy, CharPtrTy, CharPtrTy, IntTy, IntTy},
462 false),
463 addUnderscoredPrefixToName("RegisterSurface"));
464 // void __cudaRegisterTexture(void **, const struct textureReference *,
465 // const void **, const char *, int, int, int)
466 llvm::FunctionCallee RegisterTex = CGM.CreateRuntimeFunction(
467 llvm::FunctionType::get(
468 VoidTy,
469 {VoidPtrPtrTy, VoidPtrTy, CharPtrTy, CharPtrTy, IntTy, IntTy, IntTy},
470 false),
471 addUnderscoredPrefixToName("RegisterTexture"));
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000472 for (auto &&Info : DeviceVars) {
473 llvm::GlobalVariable *Var = Info.Var;
Yaxun Liuc18e9ec2019-02-14 02:00:09 +0000474 llvm::Constant *VarName = makeConstantString(getDeviceSideName(Info.D));
Michael Liaocb638932020-03-28 10:12:58 -0400475 switch (Info.Flags.getKind()) {
Michael Liao5be9b8c2020-03-27 15:47:12 -0400476 case DeviceVarFlags::Variable: {
477 uint64_t VarSize =
478 CGM.getDataLayout().getTypeAllocSize(Var->getValueType());
Michael Liaocb638932020-03-28 10:12:58 -0400479 llvm::Value *Args[] = {
480 &GpuBinaryHandlePtr,
481 Builder.CreateBitCast(Var, VoidPtrTy),
482 VarName,
483 VarName,
484 llvm::ConstantInt::get(IntTy, Info.Flags.isExtern()),
Michael Liaob952d792020-04-03 10:17:06 -0400485 llvm::ConstantInt::get(VarSizeTy, VarSize),
Michael Liaocb638932020-03-28 10:12:58 -0400486 llvm::ConstantInt::get(IntTy, Info.Flags.isConstant()),
487 llvm::ConstantInt::get(IntTy, 0)};
Michael Liao5be9b8c2020-03-27 15:47:12 -0400488 Builder.CreateCall(RegisterVar, Args);
489 break;
490 }
491 case DeviceVarFlags::Surface:
492 Builder.CreateCall(
493 RegisterSurf,
494 {&GpuBinaryHandlePtr, Builder.CreateBitCast(Var, VoidPtrTy), VarName,
Michael Liaocb638932020-03-28 10:12:58 -0400495 VarName, llvm::ConstantInt::get(IntTy, Info.Flags.getSurfTexType()),
496 llvm::ConstantInt::get(IntTy, Info.Flags.isExtern())});
Michael Liao5be9b8c2020-03-27 15:47:12 -0400497 break;
498 case DeviceVarFlags::Texture:
499 Builder.CreateCall(
500 RegisterTex,
501 {&GpuBinaryHandlePtr, Builder.CreateBitCast(Var, VoidPtrTy), VarName,
Michael Liaocb638932020-03-28 10:12:58 -0400502 VarName, llvm::ConstantInt::get(IntTy, Info.Flags.getSurfTexType()),
503 llvm::ConstantInt::get(IntTy, Info.Flags.isNormalized()),
504 llvm::ConstantInt::get(IntTy, Info.Flags.isExtern())});
Michael Liao5be9b8c2020-03-27 15:47:12 -0400505 break;
506 }
Artem Belevich52cc4872015-05-07 19:34:16 +0000507 }
508
509 Builder.CreateRetVoid();
510 return RegisterKernelsFunc;
511}
512
513/// Creates a global constructor function for the module:
Yaxun Liuf99752b2018-07-20 22:45:24 +0000514///
515/// For CUDA:
Artem Belevich52cc4872015-05-07 19:34:16 +0000516/// \code
517/// void __cuda_module_ctor(void*) {
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000518/// Handle = __cudaRegisterFatBinary(GpuBinaryBlob);
519/// __cuda_register_globals(Handle);
Artem Belevich52cc4872015-05-07 19:34:16 +0000520/// }
521/// \endcode
Yaxun Liuf99752b2018-07-20 22:45:24 +0000522///
523/// For HIP:
524/// \code
525/// void __hip_module_ctor(void*) {
526/// if (__hip_gpubin_handle == 0) {
527/// __hip_gpubin_handle = __hipRegisterFatBinary(GpuBinaryBlob);
528/// __hip_register_globals(__hip_gpubin_handle);
529/// }
530/// }
531/// \endcode
Artem Belevich52cc4872015-05-07 19:34:16 +0000532llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
Yaxun Liu29155b02018-05-18 15:07:56 +0000533 bool IsHIP = CGM.getLangOpts().HIP;
Aaron Enye Shi81295212019-04-02 20:49:41 +0000534 bool IsCUDA = CGM.getLangOpts().CUDA;
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000535 // No need to generate ctors/dtors if there is no GPU binary.
Yaxun Liu29155b02018-05-18 15:07:56 +0000536 StringRef CudaGpuBinaryFileName = CGM.getCodeGenOpts().CudaGpuBinaryFileName;
537 if (CudaGpuBinaryFileName.empty() && !IsHIP)
Artem Belevich8c1ec1e2016-03-02 18:28:53 +0000538 return nullptr;
Aaron Enye Shi6d37f322019-04-02 21:54:41 +0000539 if ((IsHIP || (IsCUDA && !RelocatableDeviceCode)) && EmittedKernels.empty() &&
540 DeviceVars.empty())
Aaron Enye Shi13d8e922019-04-02 20:10:18 +0000541 return nullptr;
Artem Belevich8c1ec1e2016-03-02 18:28:53 +0000542
Yaxun Liu29155b02018-05-18 15:07:56 +0000543 // void __{cuda|hip}_register_globals(void* handle);
Artem Belevich42e19492016-03-02 18:28:50 +0000544 llvm::Function *RegisterGlobalsFunc = makeRegisterGlobalsFn();
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000545 // We always need a function to pass in as callback. Create a dummy
546 // implementation if we don't need to register anything.
547 if (RelocatableDeviceCode && !RegisterGlobalsFunc)
548 RegisterGlobalsFunc = makeDummyFunction(getRegisterGlobalsFnTy());
549
Yaxun Liu29155b02018-05-18 15:07:56 +0000550 // void ** __{cuda|hip}RegisterFatBinary(void *);
James Y Knight9871db02019-02-05 16:42:33 +0000551 llvm::FunctionCallee RegisterFatbinFunc = CGM.CreateRuntimeFunction(
Artem Belevich52cc4872015-05-07 19:34:16 +0000552 llvm::FunctionType::get(VoidPtrPtrTy, VoidPtrTy, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000553 addUnderscoredPrefixToName("RegisterFatBinary"));
Artem Belevich52cc4872015-05-07 19:34:16 +0000554 // struct { int magic, int version, void * gpu_binary, void * dont_care };
555 llvm::StructType *FatbinWrapperTy =
Serge Guelton1d993272017-05-09 19:31:30 +0000556 llvm::StructType::get(IntTy, IntTy, VoidPtrTy, VoidPtrTy);
Artem Belevich52cc4872015-05-07 19:34:16 +0000557
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000558 // Register GPU binary with the CUDA runtime, store returned handle in a
559 // global variable and save a reference in GpuBinaryHandle to be cleaned up
560 // in destructor on exit. Then associate all known kernels with the GPU binary
561 // handle so CUDA runtime can figure out what to call on the GPU side.
Yaxun Liu97670892018-10-02 17:48:54 +0000562 std::unique_ptr<llvm::MemoryBuffer> CudaGpuBinary = nullptr;
563 if (!CudaGpuBinaryFileName.empty()) {
Yaxun Liu29155b02018-05-18 15:07:56 +0000564 llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> CudaGpuBinaryOrErr =
565 llvm::MemoryBuffer::getFileOrSTDIN(CudaGpuBinaryFileName);
566 if (std::error_code EC = CudaGpuBinaryOrErr.getError()) {
567 CGM.getDiags().Report(diag::err_cannot_open_file)
568 << CudaGpuBinaryFileName << EC.message();
569 return nullptr;
570 }
571 CudaGpuBinary = std::move(CudaGpuBinaryOrErr.get());
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000572 }
573
Artem Belevich52cc4872015-05-07 19:34:16 +0000574 llvm::Function *ModuleCtorFunc = llvm::Function::Create(
575 llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000576 llvm::GlobalValue::InternalLinkage,
577 addUnderscoredPrefixToName("_module_ctor"), &TheModule);
Artem Belevich52cc4872015-05-07 19:34:16 +0000578 llvm::BasicBlock *CtorEntryBB =
579 llvm::BasicBlock::Create(Context, "entry", ModuleCtorFunc);
John McCall7f416cc2015-09-08 08:05:57 +0000580 CGBuilderTy CtorBuilder(CGM, Context);
Artem Belevich52cc4872015-05-07 19:34:16 +0000581
582 CtorBuilder.SetInsertPoint(CtorEntryBB);
583
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000584 const char *FatbinConstantName;
Yaxun Liu29155b02018-05-18 15:07:56 +0000585 const char *FatbinSectionName;
586 const char *ModuleIDSectionName;
587 StringRef ModuleIDPrefix;
588 llvm::Constant *FatBinStr;
589 unsigned FatMagic;
590 if (IsHIP) {
591 FatbinConstantName = ".hip_fatbin";
592 FatbinSectionName = ".hipFatBinSegment";
593
594 ModuleIDSectionName = "__hip_module_id";
595 ModuleIDPrefix = "__hip_";
596
Yaxun Liu97670892018-10-02 17:48:54 +0000597 if (CudaGpuBinary) {
598 // If fatbin is available from early finalization, create a string
599 // literal containing the fat binary loaded from the given file.
Benjamin Krameradcd0262020-01-28 20:23:46 +0100600 FatBinStr = makeConstantString(std::string(CudaGpuBinary->getBuffer()),
601 "", FatbinConstantName, 8);
Yaxun Liu97670892018-10-02 17:48:54 +0000602 } else {
603 // If fatbin is not available, create an external symbol
604 // __hip_fatbin in section .hip_fatbin. The external symbol is supposed
605 // to contain the fat binary but will be populated somewhere else,
606 // e.g. by lld through link script.
607 FatBinStr = new llvm::GlobalVariable(
Yaxun Liu29155b02018-05-18 15:07:56 +0000608 CGM.getModule(), CGM.Int8Ty,
609 /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr,
610 "__hip_fatbin", nullptr,
611 llvm::GlobalVariable::NotThreadLocal);
Yaxun Liu97670892018-10-02 17:48:54 +0000612 cast<llvm::GlobalVariable>(FatBinStr)->setSection(FatbinConstantName);
613 }
Yaxun Liu29155b02018-05-18 15:07:56 +0000614
615 FatMagic = HIPFatMagic;
616 } else {
617 if (RelocatableDeviceCode)
Artem Belevich5ce0a082018-06-28 17:15:52 +0000618 FatbinConstantName = CGM.getTriple().isMacOSX()
619 ? "__NV_CUDA,__nv_relfatbin"
620 : "__nv_relfatbin";
Yaxun Liu29155b02018-05-18 15:07:56 +0000621 else
622 FatbinConstantName =
623 CGM.getTriple().isMacOSX() ? "__NV_CUDA,__nv_fatbin" : ".nv_fatbin";
624 // NVIDIA's cuobjdump looks for fatbins in this section.
625 FatbinSectionName =
626 CGM.getTriple().isMacOSX() ? "__NV_CUDA,__fatbin" : ".nvFatBinSegment";
627
Artem Belevich5ce0a082018-06-28 17:15:52 +0000628 ModuleIDSectionName = CGM.getTriple().isMacOSX()
629 ? "__NV_CUDA,__nv_module_id"
630 : "__nv_module_id";
Yaxun Liu29155b02018-05-18 15:07:56 +0000631 ModuleIDPrefix = "__nv_";
632
633 // For CUDA, create a string literal containing the fat binary loaded from
634 // the given file.
Benjamin Krameradcd0262020-01-28 20:23:46 +0100635 FatBinStr = makeConstantString(std::string(CudaGpuBinary->getBuffer()), "",
Yaxun Liu29155b02018-05-18 15:07:56 +0000636 FatbinConstantName, 8);
637 FatMagic = CudaFatMagic;
638 }
Artem Belevich52cc4872015-05-07 19:34:16 +0000639
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000640 // Create initialized wrapper structure that points to the loaded GPU binary
641 ConstantInitBuilder Builder(CGM);
642 auto Values = Builder.beginStruct(FatbinWrapperTy);
643 // Fatbin wrapper magic.
Yaxun Liu29155b02018-05-18 15:07:56 +0000644 Values.addInt(IntTy, FatMagic);
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000645 // Fatbin version.
646 Values.addInt(IntTy, 1);
647 // Data.
Yaxun Liu29155b02018-05-18 15:07:56 +0000648 Values.add(FatBinStr);
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000649 // Unused in fatbin v1.
650 Values.add(llvm::ConstantPointerNull::get(VoidPtrTy));
651 llvm::GlobalVariable *FatbinWrapper = Values.finishAndCreateGlobal(
Yaxun Liu887c5692018-04-25 01:10:37 +0000652 addUnderscoredPrefixToName("_fatbin_wrapper"), CGM.getPointerAlign(),
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000653 /*constant*/ true);
654 FatbinWrapper->setSection(FatbinSectionName);
Justin Lebard14fe882016-11-18 00:41:31 +0000655
Yaxun Liuf99752b2018-07-20 22:45:24 +0000656 // There is only one HIP fat binary per linked module, however there are
657 // multiple constructor functions. Make sure the fat binary is registered
658 // only once. The constructor functions are executed by the dynamic loader
659 // before the program gains control. The dynamic loader cannot execute the
660 // constructor functions concurrently since doing that would not guarantee
661 // thread safety of the loaded program. Therefore we can assume sequential
662 // execution of constructor functions here.
663 if (IsHIP) {
Yaxun Liu97670892018-10-02 17:48:54 +0000664 auto Linkage = CudaGpuBinary ? llvm::GlobalValue::InternalLinkage :
665 llvm::GlobalValue::LinkOnceAnyLinkage;
Yaxun Liuf99752b2018-07-20 22:45:24 +0000666 llvm::BasicBlock *IfBlock =
667 llvm::BasicBlock::Create(Context, "if", ModuleCtorFunc);
668 llvm::BasicBlock *ExitBlock =
669 llvm::BasicBlock::Create(Context, "exit", ModuleCtorFunc);
670 // The name, size, and initialization pattern of this variable is part
671 // of HIP ABI.
672 GpuBinaryHandle = new llvm::GlobalVariable(
673 TheModule, VoidPtrPtrTy, /*isConstant=*/false,
Yaxun Liu97670892018-10-02 17:48:54 +0000674 Linkage,
Yaxun Liuf99752b2018-07-20 22:45:24 +0000675 /*Initializer=*/llvm::ConstantPointerNull::get(VoidPtrPtrTy),
676 "__hip_gpubin_handle");
Guillaume Chateletc79099e2019-10-03 13:00:29 +0000677 GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getAsAlign());
Yaxun Liu94ff57f2018-08-17 17:47:31 +0000678 // Prevent the weak symbol in different shared libraries being merged.
Yaxun Liu97670892018-10-02 17:48:54 +0000679 if (Linkage != llvm::GlobalValue::InternalLinkage)
680 GpuBinaryHandle->setVisibility(llvm::GlobalValue::HiddenVisibility);
Yaxun Liuf99752b2018-07-20 22:45:24 +0000681 Address GpuBinaryAddr(
682 GpuBinaryHandle,
683 CharUnits::fromQuantity(GpuBinaryHandle->getAlignment()));
684 {
685 auto HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
686 llvm::Constant *Zero =
687 llvm::Constant::getNullValue(HandleValue->getType());
688 llvm::Value *EQZero = CtorBuilder.CreateICmpEQ(HandleValue, Zero);
689 CtorBuilder.CreateCondBr(EQZero, IfBlock, ExitBlock);
690 }
691 {
692 CtorBuilder.SetInsertPoint(IfBlock);
693 // GpuBinaryHandle = __hipRegisterFatBinary(&FatbinWrapper);
694 llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
695 RegisterFatbinFunc,
696 CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
697 CtorBuilder.CreateStore(RegisterFatbinCall, GpuBinaryAddr);
698 CtorBuilder.CreateBr(ExitBlock);
699 }
700 {
701 CtorBuilder.SetInsertPoint(ExitBlock);
702 // Call __hip_register_globals(GpuBinaryHandle);
703 if (RegisterGlobalsFunc) {
704 auto HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
705 CtorBuilder.CreateCall(RegisterGlobalsFunc, HandleValue);
706 }
707 }
708 } else if (!RelocatableDeviceCode) {
709 // Register binary with CUDA runtime. This is substantially different in
710 // default mode vs. separate compilation!
711 // GpuBinaryHandle = __cudaRegisterFatBinary(&FatbinWrapper);
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000712 llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
713 RegisterFatbinFunc,
714 CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
715 GpuBinaryHandle = new llvm::GlobalVariable(
716 TheModule, VoidPtrPtrTy, false, llvm::GlobalValue::InternalLinkage,
Yaxun Liuf99752b2018-07-20 22:45:24 +0000717 llvm::ConstantPointerNull::get(VoidPtrPtrTy), "__cuda_gpubin_handle");
Guillaume Chateletc79099e2019-10-03 13:00:29 +0000718 GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getAsAlign());
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000719 CtorBuilder.CreateAlignedStore(RegisterFatbinCall, GpuBinaryHandle,
720 CGM.getPointerAlign());
Artem Belevich52cc4872015-05-07 19:34:16 +0000721
Yaxun Liuf99752b2018-07-20 22:45:24 +0000722 // Call __cuda_register_globals(GpuBinaryHandle);
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000723 if (RegisterGlobalsFunc)
724 CtorBuilder.CreateCall(RegisterGlobalsFunc, RegisterFatbinCall);
Artem Belevich40717632019-02-05 22:38:58 +0000725
726 // Call __cudaRegisterFatBinaryEnd(Handle) if this CUDA version needs it.
727 if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
728 CudaFeature::CUDA_USES_FATBIN_REGISTER_END)) {
729 // void __cudaRegisterFatBinaryEnd(void **);
730 llvm::FunctionCallee RegisterFatbinEndFunc = CGM.CreateRuntimeFunction(
731 llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
732 "__cudaRegisterFatBinaryEnd");
733 CtorBuilder.CreateCall(RegisterFatbinEndFunc, RegisterFatbinCall);
734 }
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000735 } else {
736 // Generate a unique module ID.
Yaxun Liu29155b02018-05-18 15:07:56 +0000737 SmallString<64> ModuleID;
738 llvm::raw_svector_ostream OS(ModuleID);
Artem Belevich93552b32018-10-05 18:39:58 +0000739 OS << ModuleIDPrefix << llvm::format("%" PRIx64, FatbinWrapper->getGUID());
Benjamin Krameradcd0262020-01-28 20:23:46 +0100740 llvm::Constant *ModuleIDConstant = makeConstantString(
741 std::string(ModuleID.str()), "", ModuleIDSectionName, 32);
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000742
Yaxun Liuf99752b2018-07-20 22:45:24 +0000743 // Create an alias for the FatbinWrapper that nvcc will look for.
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000744 llvm::GlobalAlias::create(llvm::GlobalValue::ExternalLinkage,
Yaxun Liu29155b02018-05-18 15:07:56 +0000745 Twine("__fatbinwrap") + ModuleID, FatbinWrapper);
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000746
Yaxun Liuf99752b2018-07-20 22:45:24 +0000747 // void __cudaRegisterLinkedBinary%ModuleID%(void (*)(void *), void *,
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000748 // void *, void (*)(void **))
Yaxun Liuf99752b2018-07-20 22:45:24 +0000749 SmallString<128> RegisterLinkedBinaryName("__cudaRegisterLinkedBinary");
Yaxun Liu29155b02018-05-18 15:07:56 +0000750 RegisterLinkedBinaryName += ModuleID;
James Y Knight9871db02019-02-05 16:42:33 +0000751 llvm::FunctionCallee RegisterLinkedBinaryFunc = CGM.CreateRuntimeFunction(
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000752 getRegisterLinkedBinaryFnTy(), RegisterLinkedBinaryName);
753
754 assert(RegisterGlobalsFunc && "Expecting at least dummy function!");
755 llvm::Value *Args[] = {RegisterGlobalsFunc,
756 CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy),
Yaxun Liu29155b02018-05-18 15:07:56 +0000757 ModuleIDConstant,
Jonas Hahnfeldf5527c22018-04-20 13:04:45 +0000758 makeDummyFunction(getCallbackFnTy())};
759 CtorBuilder.CreateCall(RegisterLinkedBinaryFunc, Args);
760 }
Artem Belevich52cc4872015-05-07 19:34:16 +0000761
Artem Belevichc66d2542018-06-27 18:32:51 +0000762 // Create destructor and register it with atexit() the way NVCC does it. Doing
763 // it during regular destructor phase worked in CUDA before 9.2 but results in
764 // double-free in 9.2.
765 if (llvm::Function *CleanupFn = makeModuleDtorFunction()) {
766 // extern "C" int atexit(void (*f)(void));
767 llvm::FunctionType *AtExitTy =
768 llvm::FunctionType::get(IntTy, CleanupFn->getType(), false);
James Y Knight9871db02019-02-05 16:42:33 +0000769 llvm::FunctionCallee AtExitFunc =
Artem Belevichc66d2542018-06-27 18:32:51 +0000770 CGM.CreateRuntimeFunction(AtExitTy, "atexit", llvm::AttributeList(),
771 /*Local=*/true);
772 CtorBuilder.CreateCall(AtExitFunc, CleanupFn);
773 }
774
Artem Belevich52cc4872015-05-07 19:34:16 +0000775 CtorBuilder.CreateRetVoid();
776 return ModuleCtorFunc;
777}
778
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000779/// Creates a global destructor function that unregisters the GPU code blob
Artem Belevich52cc4872015-05-07 19:34:16 +0000780/// registered by constructor.
Yaxun Liuf99752b2018-07-20 22:45:24 +0000781///
782/// For CUDA:
Artem Belevich52cc4872015-05-07 19:34:16 +0000783/// \code
784/// void __cuda_module_dtor(void*) {
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000785/// __cudaUnregisterFatBinary(Handle);
Artem Belevich52cc4872015-05-07 19:34:16 +0000786/// }
787/// \endcode
Yaxun Liuf99752b2018-07-20 22:45:24 +0000788///
789/// For HIP:
790/// \code
791/// void __hip_module_dtor(void*) {
792/// if (__hip_gpubin_handle) {
793/// __hipUnregisterFatBinary(__hip_gpubin_handle);
794/// __hip_gpubin_handle = 0;
795/// }
796/// }
797/// \endcode
Artem Belevich52cc4872015-05-07 19:34:16 +0000798llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
Jonas Hahnfelde7681322018-02-28 17:53:46 +0000799 // No need for destructor if we don't have a handle to unregister.
800 if (!GpuBinaryHandle)
Artem Belevich8c1ec1e2016-03-02 18:28:53 +0000801 return nullptr;
802
Artem Belevich52cc4872015-05-07 19:34:16 +0000803 // void __cudaUnregisterFatBinary(void ** handle);
James Y Knight9871db02019-02-05 16:42:33 +0000804 llvm::FunctionCallee UnregisterFatbinFunc = CGM.CreateRuntimeFunction(
Artem Belevich52cc4872015-05-07 19:34:16 +0000805 llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000806 addUnderscoredPrefixToName("UnregisterFatBinary"));
Artem Belevich52cc4872015-05-07 19:34:16 +0000807
808 llvm::Function *ModuleDtorFunc = llvm::Function::Create(
809 llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
Yaxun Liu887c5692018-04-25 01:10:37 +0000810 llvm::GlobalValue::InternalLinkage,
811 addUnderscoredPrefixToName("_module_dtor"), &TheModule);
812
Artem Belevich52cc4872015-05-07 19:34:16 +0000813 llvm::BasicBlock *DtorEntryBB =
814 llvm::BasicBlock::Create(Context, "entry", ModuleDtorFunc);
John McCall7f416cc2015-09-08 08:05:57 +0000815 CGBuilderTy DtorBuilder(CGM, Context);
Artem Belevich52cc4872015-05-07 19:34:16 +0000816 DtorBuilder.SetInsertPoint(DtorEntryBB);
817
Yaxun Liuf99752b2018-07-20 22:45:24 +0000818 Address GpuBinaryAddr(GpuBinaryHandle, CharUnits::fromQuantity(
819 GpuBinaryHandle->getAlignment()));
820 auto HandleValue = DtorBuilder.CreateLoad(GpuBinaryAddr);
821 // There is only one HIP fat binary per linked module, however there are
822 // multiple destructor functions. Make sure the fat binary is unregistered
823 // only once.
824 if (CGM.getLangOpts().HIP) {
825 llvm::BasicBlock *IfBlock =
826 llvm::BasicBlock::Create(Context, "if", ModuleDtorFunc);
827 llvm::BasicBlock *ExitBlock =
828 llvm::BasicBlock::Create(Context, "exit", ModuleDtorFunc);
829 llvm::Constant *Zero = llvm::Constant::getNullValue(HandleValue->getType());
830 llvm::Value *NEZero = DtorBuilder.CreateICmpNE(HandleValue, Zero);
831 DtorBuilder.CreateCondBr(NEZero, IfBlock, ExitBlock);
Artem Belevich52cc4872015-05-07 19:34:16 +0000832
Yaxun Liuf99752b2018-07-20 22:45:24 +0000833 DtorBuilder.SetInsertPoint(IfBlock);
834 DtorBuilder.CreateCall(UnregisterFatbinFunc, HandleValue);
835 DtorBuilder.CreateStore(Zero, GpuBinaryAddr);
836 DtorBuilder.CreateBr(ExitBlock);
837
838 DtorBuilder.SetInsertPoint(ExitBlock);
839 } else {
840 DtorBuilder.CreateCall(UnregisterFatbinFunc, HandleValue);
841 }
Artem Belevich52cc4872015-05-07 19:34:16 +0000842 DtorBuilder.CreateRetVoid();
843 return ModuleDtorFunc;
844}
845
Peter Collingbournefe883422011-10-06 18:29:37 +0000846CGCUDARuntime *CodeGen::CreateNVCUDARuntime(CodeGenModule &CGM) {
847 return new CGNVCUDARuntime(CGM);
848}