blob: b5fc8d308067e2cfc6579844d4bcdac11eb4f617 [file] [log] [blame]
Samuel Antao45bfe4c2016-02-08 15:59:20 +00001//===---- CGOpenMPRuntimeNVPTX.cpp - Interface to OpenMP NVPTX Runtimes ---===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This provides a class for OpenMP runtime code generation specialized to NVPTX
11// targets.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGOpenMPRuntimeNVPTX.h"
Alexey Bataevc5b1d322016-03-04 09:22:22 +000016#include "clang/AST/DeclOpenMP.h"
Carlo Bertollic6872252016-04-04 15:55:02 +000017#include "CodeGenFunction.h"
18#include "clang/AST/StmtOpenMP.h"
Samuel Antao45bfe4c2016-02-08 15:59:20 +000019
20using namespace clang;
21using namespace CodeGen;
22
Arpith Chacko Jacobccf2f732017-01-03 20:19:56 +000023namespace {
24enum OpenMPRTLFunctionNVPTX {
Jonas Hahnfeld891c7fb2017-11-22 14:46:49 +000025 /// \brief Call to void __kmpc_kernel_init(kmp_int32 thread_limit,
26 /// int16_t RequiresOMPRuntime);
Arpith Chacko Jacobccf2f732017-01-03 20:19:56 +000027 OMPRTL_NVPTX__kmpc_kernel_init,
Jonas Hahnfeld891c7fb2017-11-22 14:46:49 +000028 /// \brief Call to void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +000029 OMPRTL_NVPTX__kmpc_kernel_deinit,
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +000030 /// \brief Call to void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
Jonas Hahnfeld891c7fb2017-11-22 14:46:49 +000031 /// int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +000032 OMPRTL_NVPTX__kmpc_spmd_kernel_init,
33 /// \brief Call to void __kmpc_spmd_kernel_deinit();
34 OMPRTL_NVPTX__kmpc_spmd_kernel_deinit,
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +000035 /// \brief Call to void __kmpc_kernel_prepare_parallel(void
Jonas Hahnfeld891c7fb2017-11-22 14:46:49 +000036 /// *outlined_function, void ***args, kmp_int32 nArgs);
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +000037 OMPRTL_NVPTX__kmpc_kernel_prepare_parallel,
Jonas Hahnfeld891c7fb2017-11-22 14:46:49 +000038 /// \brief Call to bool __kmpc_kernel_parallel(void **outlined_function, void
39 /// ***args);
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +000040 OMPRTL_NVPTX__kmpc_kernel_parallel,
41 /// \brief Call to void __kmpc_kernel_end_parallel();
42 OMPRTL_NVPTX__kmpc_kernel_end_parallel,
43 /// Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
44 /// global_tid);
45 OMPRTL_NVPTX__kmpc_serialized_parallel,
46 /// Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
47 /// global_tid);
48 OMPRTL_NVPTX__kmpc_end_serialized_parallel,
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +000049 /// \brief Call to int32_t __kmpc_shuffle_int32(int32_t element,
50 /// int16_t lane_offset, int16_t warp_size);
51 OMPRTL_NVPTX__kmpc_shuffle_int32,
52 /// \brief Call to int64_t __kmpc_shuffle_int64(int64_t element,
53 /// int16_t lane_offset, int16_t warp_size);
54 OMPRTL_NVPTX__kmpc_shuffle_int64,
55 /// \brief Call to __kmpc_nvptx_parallel_reduce_nowait(kmp_int32
56 /// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
57 /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
58 /// lane_offset, int16_t shortCircuit),
59 /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
60 OMPRTL_NVPTX__kmpc_parallel_reduce_nowait,
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +000061 /// \brief Call to __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid,
62 /// int32_t num_vars, size_t reduce_size, void *reduce_data,
63 /// void (*kmp_ShuffleReductFctPtr)(void *rhs, int16_t lane_id, int16_t
64 /// lane_offset, int16_t shortCircuit),
65 /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num),
66 /// void (*kmp_CopyToScratchpadFctPtr)(void *reduce_data, void * scratchpad,
67 /// int32_t index, int32_t width),
68 /// void (*kmp_LoadReduceFctPtr)(void *reduce_data, void * scratchpad, int32_t
69 /// index, int32_t width, int32_t reduce))
70 OMPRTL_NVPTX__kmpc_teams_reduce_nowait,
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +000071 /// \brief Call to __kmpc_nvptx_end_reduce_nowait(int32_t global_tid);
72 OMPRTL_NVPTX__kmpc_end_reduce_nowait
Arpith Chacko Jacobccf2f732017-01-03 20:19:56 +000073};
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +000074
75/// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
76class NVPTXActionTy final : public PrePostActionTy {
77 llvm::Value *EnterCallee;
78 ArrayRef<llvm::Value *> EnterArgs;
79 llvm::Value *ExitCallee;
80 ArrayRef<llvm::Value *> ExitArgs;
81 bool Conditional;
82 llvm::BasicBlock *ContBlock = nullptr;
83
84public:
85 NVPTXActionTy(llvm::Value *EnterCallee, ArrayRef<llvm::Value *> EnterArgs,
86 llvm::Value *ExitCallee, ArrayRef<llvm::Value *> ExitArgs,
87 bool Conditional = false)
88 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
89 ExitArgs(ExitArgs), Conditional(Conditional) {}
90 void Enter(CodeGenFunction &CGF) override {
91 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
92 if (Conditional) {
93 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
94 auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
95 ContBlock = CGF.createBasicBlock("omp_if.end");
96 // Generate the branch (If-stmt)
97 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
98 CGF.EmitBlock(ThenBlock);
99 }
100 }
101 void Done(CodeGenFunction &CGF) {
102 // Emit the rest of blocks/branches
103 CGF.EmitBranch(ContBlock);
104 CGF.EmitBlock(ContBlock, true);
105 }
106 void Exit(CodeGenFunction &CGF) override {
107 CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
108 }
109};
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +0000110
111// A class to track the execution mode when codegening directives within
112// a target region. The appropriate mode (generic/spmd) is set on entry
113// to the target region and used by containing directives such as 'parallel'
114// to emit optimized code.
115class ExecutionModeRAII {
116private:
117 CGOpenMPRuntimeNVPTX::ExecutionMode SavedMode;
118 CGOpenMPRuntimeNVPTX::ExecutionMode &Mode;
119
120public:
121 ExecutionModeRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &Mode,
122 CGOpenMPRuntimeNVPTX::ExecutionMode NewMode)
123 : Mode(Mode) {
124 SavedMode = Mode;
125 Mode = NewMode;
126 }
127 ~ExecutionModeRAII() { Mode = SavedMode; }
128};
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +0000129
130/// GPU Configuration: This information can be derived from cuda registers,
131/// however, providing compile time constants helps generate more efficient
132/// code. For all practical purposes this is fine because the configuration
133/// is the same for all known NVPTX architectures.
134enum MachineConfiguration : unsigned {
135 WarpSize = 32,
136 /// Number of bits required to represent a lane identifier, which is
137 /// computed as log_2(WarpSize).
138 LaneIDBits = 5,
139 LaneIDMask = WarpSize - 1,
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +0000140
141 /// Global memory alignment for performance.
142 GlobalMemoryAlignment = 256,
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +0000143};
144
145enum NamedBarrier : unsigned {
146 /// Synchronize on this barrier #ID using a named barrier primitive.
147 /// Only the subset of active threads in a parallel region arrive at the
148 /// barrier.
149 NB_Parallel = 1,
150};
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +0000151} // anonymous namespace
Arpith Chacko Jacobccf2f732017-01-03 20:19:56 +0000152
153/// Get the GPU warp size.
154static llvm::Value *getNVPTXWarpSize(CodeGenFunction &CGF) {
Alexey Bataev3c595a62017-08-14 15:01:03 +0000155 return CGF.EmitRuntimeCall(
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000156 llvm::Intrinsic::getDeclaration(
Arpith Chacko Jacobccf2f732017-01-03 20:19:56 +0000157 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_warpsize),
Alexey Bataev3c595a62017-08-14 15:01:03 +0000158 "nvptx_warp_size");
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000159}
160
Arpith Chacko Jacobccf2f732017-01-03 20:19:56 +0000161/// Get the id of the current thread on the GPU.
162static llvm::Value *getNVPTXThreadID(CodeGenFunction &CGF) {
Alexey Bataev3c595a62017-08-14 15:01:03 +0000163 return CGF.EmitRuntimeCall(
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000164 llvm::Intrinsic::getDeclaration(
Arpith Chacko Jacobccf2f732017-01-03 20:19:56 +0000165 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x),
Alexey Bataev3c595a62017-08-14 15:01:03 +0000166 "nvptx_tid");
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000167}
168
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +0000169/// Get the id of the warp in the block.
170/// We assume that the warp size is 32, which is always the case
171/// on the NVPTX device, to generate more efficient code.
172static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
173 CGBuilderTy &Bld = CGF.Builder;
174 return Bld.CreateAShr(getNVPTXThreadID(CGF), LaneIDBits, "nvptx_warp_id");
175}
176
177/// Get the id of the current lane in the Warp.
178/// We assume that the warp size is 32, which is always the case
179/// on the NVPTX device, to generate more efficient code.
180static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
181 CGBuilderTy &Bld = CGF.Builder;
182 return Bld.CreateAnd(getNVPTXThreadID(CGF), Bld.getInt32(LaneIDMask),
183 "nvptx_lane_id");
184}
185
Arpith Chacko Jacobccf2f732017-01-03 20:19:56 +0000186/// Get the maximum number of threads in a block of the GPU.
187static llvm::Value *getNVPTXNumThreads(CodeGenFunction &CGF) {
Alexey Bataev3c595a62017-08-14 15:01:03 +0000188 return CGF.EmitRuntimeCall(
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000189 llvm::Intrinsic::getDeclaration(
Arpith Chacko Jacobccf2f732017-01-03 20:19:56 +0000190 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x),
Alexey Bataev3c595a62017-08-14 15:01:03 +0000191 "nvptx_num_threads");
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000192}
193
Arpith Chacko Jacobccf2f732017-01-03 20:19:56 +0000194/// Get barrier to synchronize all threads in a block.
195static void getNVPTXCTABarrier(CodeGenFunction &CGF) {
Alexey Bataev3c595a62017-08-14 15:01:03 +0000196 CGF.EmitRuntimeCall(llvm::Intrinsic::getDeclaration(
Arpith Chacko Jacobccf2f732017-01-03 20:19:56 +0000197 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_barrier0));
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000198}
199
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +0000200/// Get barrier #ID to synchronize selected (multiple of warp size) threads in
201/// a CTA.
202static void getNVPTXBarrier(CodeGenFunction &CGF, int ID,
203 llvm::Value *NumThreads) {
204 CGBuilderTy &Bld = CGF.Builder;
205 llvm::Value *Args[] = {Bld.getInt32(ID), NumThreads};
Alexey Bataev3c595a62017-08-14 15:01:03 +0000206 CGF.EmitRuntimeCall(llvm::Intrinsic::getDeclaration(
207 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_barrier),
208 Args);
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +0000209}
210
Arpith Chacko Jacobccf2f732017-01-03 20:19:56 +0000211/// Synchronize all GPU threads in a block.
212static void syncCTAThreads(CodeGenFunction &CGF) { getNVPTXCTABarrier(CGF); }
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000213
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +0000214/// Synchronize worker threads in a parallel region.
215static void syncParallelThreads(CodeGenFunction &CGF, llvm::Value *NumThreads) {
216 return getNVPTXBarrier(CGF, NB_Parallel, NumThreads);
217}
218
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000219/// Get the value of the thread_limit clause in the teams directive.
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +0000220/// For the 'generic' execution mode, the runtime encodes thread_limit in
221/// the launch parameters, always starting thread_limit+warpSize threads per
222/// CTA. The threads in the last warp are reserved for master execution.
223/// For the 'spmd' execution mode, all threads in a CTA are part of the team.
224static llvm::Value *getThreadLimit(CodeGenFunction &CGF,
225 bool IsInSpmdExecutionMode = false) {
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000226 CGBuilderTy &Bld = CGF.Builder;
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +0000227 return IsInSpmdExecutionMode
228 ? getNVPTXNumThreads(CGF)
229 : Bld.CreateSub(getNVPTXNumThreads(CGF), getNVPTXWarpSize(CGF),
230 "thread_limit");
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000231}
232
Arpith Chacko Jacobccf2f732017-01-03 20:19:56 +0000233/// Get the thread id of the OMP master thread.
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000234/// The master thread id is the first thread (lane) of the last warp in the
235/// GPU block. Warp size is assumed to be some power of 2.
236/// Thread id is 0 indexed.
237/// E.g: If NumThreads is 33, master id is 32.
238/// If NumThreads is 64, master id is 32.
239/// If NumThreads is 1024, master id is 992.
Arpith Chacko Jacobccf2f732017-01-03 20:19:56 +0000240static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) {
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000241 CGBuilderTy &Bld = CGF.Builder;
242 llvm::Value *NumThreads = getNVPTXNumThreads(CGF);
243
244 // We assume that the warp size is a power of 2.
245 llvm::Value *Mask = Bld.CreateSub(getNVPTXWarpSize(CGF), Bld.getInt32(1));
246
247 return Bld.CreateAnd(Bld.CreateSub(NumThreads, Bld.getInt32(1)),
248 Bld.CreateNot(Mask), "master_tid");
249}
250
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000251CGOpenMPRuntimeNVPTX::WorkerFunctionState::WorkerFunctionState(
252 CodeGenModule &CGM)
253 : WorkerFn(nullptr), CGFI(nullptr) {
254 createWorkerFunction(CGM);
Vasileios Kalintirise5c09592016-03-22 10:41:20 +0000255}
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000256
257void CGOpenMPRuntimeNVPTX::WorkerFunctionState::createWorkerFunction(
258 CodeGenModule &CGM) {
259 // Create an worker function with no arguments.
260 CGFI = &CGM.getTypes().arrangeNullaryFunction();
261
262 WorkerFn = llvm::Function::Create(
263 CGM.getTypes().GetFunctionType(*CGFI), llvm::GlobalValue::InternalLinkage,
264 /* placeholder */ "_worker", &CGM.getModule());
265 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, WorkerFn, *CGFI);
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000266}
267
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +0000268bool CGOpenMPRuntimeNVPTX::isInSpmdExecutionMode() const {
269 return CurrentExecutionMode == CGOpenMPRuntimeNVPTX::ExecutionMode::Spmd;
270}
271
272static CGOpenMPRuntimeNVPTX::ExecutionMode
273getExecutionModeForDirective(CodeGenModule &CGM,
274 const OMPExecutableDirective &D) {
275 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
276 switch (DirectiveKind) {
277 case OMPD_target:
Arpith Chacko Jacobcca61a32017-01-26 15:43:27 +0000278 case OMPD_target_teams:
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +0000279 return CGOpenMPRuntimeNVPTX::ExecutionMode::Generic;
280 case OMPD_target_parallel:
Alexey Bataevfb0ebec2017-11-08 20:16:14 +0000281 case OMPD_target_parallel_for:
Alexey Bataev5d7edca2017-11-09 17:32:15 +0000282 case OMPD_target_parallel_for_simd:
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +0000283 return CGOpenMPRuntimeNVPTX::ExecutionMode::Spmd;
284 default:
285 llvm_unreachable("Unsupported directive on NVPTX device.");
286 }
287 llvm_unreachable("Unsupported directive on NVPTX device.");
288}
289
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000290void CGOpenMPRuntimeNVPTX::emitGenericKernel(const OMPExecutableDirective &D,
291 StringRef ParentName,
292 llvm::Function *&OutlinedFn,
293 llvm::Constant *&OutlinedFnID,
294 bool IsOffloadEntry,
295 const RegionCodeGenTy &CodeGen) {
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +0000296 ExecutionModeRAII ModeRAII(CurrentExecutionMode,
297 CGOpenMPRuntimeNVPTX::ExecutionMode::Generic);
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000298 EntryFunctionState EST;
299 WorkerFunctionState WST(CGM);
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +0000300 Work.clear();
Gheorghe-Teodor Berceaeb89b1d2017-11-21 15:54:54 +0000301 WrapperFunctionsMap.clear();
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000302
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000303 // Emit target region as a standalone region.
304 class NVPTXPrePostActionTy : public PrePostActionTy {
305 CGOpenMPRuntimeNVPTX &RT;
306 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
307 CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST;
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000308
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000309 public:
310 NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT,
311 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
312 CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST)
313 : RT(RT), EST(EST), WST(WST) {}
314 void Enter(CodeGenFunction &CGF) override {
315 RT.emitGenericEntryHeader(CGF, EST, WST);
316 }
317 void Exit(CodeGenFunction &CGF) override {
318 RT.emitGenericEntryFooter(CGF, EST);
319 }
320 } Action(*this, EST, WST);
321 CodeGen.setAction(Action);
322 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
323 IsOffloadEntry, CodeGen);
324
325 // Create the worker function
326 emitWorkerFunction(WST);
327
328 // Now change the name of the worker function to correspond to this target
329 // region's entry function.
330 WST.WorkerFn->setName(OutlinedFn->getName() + "_worker");
331}
332
333// Setup NVPTX threads for master-worker OpenMP scheme.
334void CGOpenMPRuntimeNVPTX::emitGenericEntryHeader(CodeGenFunction &CGF,
335 EntryFunctionState &EST,
336 WorkerFunctionState &WST) {
337 CGBuilderTy &Bld = CGF.Builder;
338
339 llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
340 llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
341 llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
342 EST.ExitBB = CGF.createBasicBlock(".exit");
343
344 auto *IsWorker =
345 Bld.CreateICmpULT(getNVPTXThreadID(CGF), getThreadLimit(CGF));
346 Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
347
348 CGF.EmitBlock(WorkerBB);
Alexey Bataev3c595a62017-08-14 15:01:03 +0000349 emitCall(CGF, WST.WorkerFn);
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000350 CGF.EmitBranch(EST.ExitBB);
351
352 CGF.EmitBlock(MasterCheckBB);
353 auto *IsMaster =
354 Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF));
355 Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
356
357 CGF.EmitBlock(MasterBB);
358 // First action in sequential region:
359 // Initialize the state of the OpenMP runtime library on the GPU.
Jonas Hahnfeld891c7fb2017-11-22 14:46:49 +0000360 // TODO: Optimize runtime initialization and pass in correct value.
361 llvm::Value *Args[] = {getThreadLimit(CGF),
362 Bld.getInt16(/*RequiresOMPRuntime=*/1)};
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000363 CGF.EmitRuntimeCall(
364 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init), Args);
365}
366
367void CGOpenMPRuntimeNVPTX::emitGenericEntryFooter(CodeGenFunction &CGF,
368 EntryFunctionState &EST) {
369 if (!EST.ExitBB)
370 EST.ExitBB = CGF.createBasicBlock(".exit");
371
372 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
373 CGF.EmitBranch(TerminateBB);
374
375 CGF.EmitBlock(TerminateBB);
376 // Signal termination condition.
Jonas Hahnfeld891c7fb2017-11-22 14:46:49 +0000377 // TODO: Optimize runtime initialization and pass in correct value.
378 llvm::Value *Args[] = {CGF.Builder.getInt16(/*IsOMPRuntimeInitialized=*/1)};
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000379 CGF.EmitRuntimeCall(
Jonas Hahnfeld891c7fb2017-11-22 14:46:49 +0000380 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_deinit), Args);
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000381 // Barrier to terminate worker threads.
382 syncCTAThreads(CGF);
383 // Master thread jumps to exit point.
384 CGF.EmitBranch(EST.ExitBB);
385
386 CGF.EmitBlock(EST.ExitBB);
387 EST.ExitBB = nullptr;
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000388}
389
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +0000390void CGOpenMPRuntimeNVPTX::emitSpmdKernel(const OMPExecutableDirective &D,
391 StringRef ParentName,
392 llvm::Function *&OutlinedFn,
393 llvm::Constant *&OutlinedFnID,
394 bool IsOffloadEntry,
395 const RegionCodeGenTy &CodeGen) {
396 ExecutionModeRAII ModeRAII(CurrentExecutionMode,
397 CGOpenMPRuntimeNVPTX::ExecutionMode::Spmd);
398 EntryFunctionState EST;
399
400 // Emit target region as a standalone region.
401 class NVPTXPrePostActionTy : public PrePostActionTy {
402 CGOpenMPRuntimeNVPTX &RT;
403 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
404 const OMPExecutableDirective &D;
405
406 public:
407 NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT,
408 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
409 const OMPExecutableDirective &D)
410 : RT(RT), EST(EST), D(D) {}
411 void Enter(CodeGenFunction &CGF) override {
412 RT.emitSpmdEntryHeader(CGF, EST, D);
413 }
414 void Exit(CodeGenFunction &CGF) override {
415 RT.emitSpmdEntryFooter(CGF, EST);
416 }
417 } Action(*this, EST, D);
418 CodeGen.setAction(Action);
419 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
420 IsOffloadEntry, CodeGen);
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +0000421}
422
423void CGOpenMPRuntimeNVPTX::emitSpmdEntryHeader(
424 CodeGenFunction &CGF, EntryFunctionState &EST,
425 const OMPExecutableDirective &D) {
426 auto &Bld = CGF.Builder;
427
428 // Setup BBs in entry function.
429 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
430 EST.ExitBB = CGF.createBasicBlock(".exit");
431
432 // Initialize the OMP state in the runtime; called by all active threads.
433 // TODO: Set RequiresOMPRuntime and RequiresDataSharing parameters
434 // based on code analysis of the target region.
435 llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSpmdExecutionMode=*/true),
436 /*RequiresOMPRuntime=*/Bld.getInt16(1),
437 /*RequiresDataSharing=*/Bld.getInt16(1)};
438 CGF.EmitRuntimeCall(
439 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_init), Args);
440 CGF.EmitBranch(ExecuteBB);
441
442 CGF.EmitBlock(ExecuteBB);
443}
444
445void CGOpenMPRuntimeNVPTX::emitSpmdEntryFooter(CodeGenFunction &CGF,
446 EntryFunctionState &EST) {
447 if (!EST.ExitBB)
448 EST.ExitBB = CGF.createBasicBlock(".exit");
449
450 llvm::BasicBlock *OMPDeInitBB = CGF.createBasicBlock(".omp.deinit");
451 CGF.EmitBranch(OMPDeInitBB);
452
453 CGF.EmitBlock(OMPDeInitBB);
454 // DeInitialize the OMP state in the runtime; called by all active threads.
455 CGF.EmitRuntimeCall(
456 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_deinit), None);
457 CGF.EmitBranch(EST.ExitBB);
458
459 CGF.EmitBlock(EST.ExitBB);
460 EST.ExitBB = nullptr;
461}
462
463// Create a unique global variable to indicate the execution mode of this target
464// region. The execution mode is either 'generic', or 'spmd' depending on the
465// target directive. This variable is picked up by the offload library to setup
466// the device appropriately before kernel launch. If the execution mode is
467// 'generic', the runtime reserves one warp for the master, otherwise, all
468// warps participate in parallel work.
469static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
470 CGOpenMPRuntimeNVPTX::ExecutionMode Mode) {
471 (void)new llvm::GlobalVariable(
472 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
473 llvm::GlobalValue::WeakAnyLinkage,
474 llvm::ConstantInt::get(CGM.Int8Ty, Mode), Name + Twine("_exec_mode"));
475}
476
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000477void CGOpenMPRuntimeNVPTX::emitWorkerFunction(WorkerFunctionState &WST) {
Gheorghe-Teodor Berceaeb89b1d2017-11-21 15:54:54 +0000478 ASTContext &Ctx = CGM.getContext();
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000479
480 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000481 CGF.disableDebugInfo();
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000482 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, *WST.CGFI, {});
483 emitWorkerLoop(CGF, WST);
484 CGF.FinishFunction();
485}
486
487void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
488 WorkerFunctionState &WST) {
489 //
490 // The workers enter this loop and wait for parallel work from the master.
491 // When the master encounters a parallel region it sets up the work + variable
492 // arguments, and wakes up the workers. The workers first check to see if
493 // they are required for the parallel region, i.e., within the # of requested
494 // parallel threads. The activated workers load the variable arguments and
495 // execute the parallel work.
496 //
497
498 CGBuilderTy &Bld = CGF.Builder;
499
500 llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work");
501 llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers");
502 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel");
503 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel");
504 llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel");
505 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
506
507 CGF.EmitBranch(AwaitBB);
508
509 // Workers wait for work from master.
510 CGF.EmitBlock(AwaitBB);
511 // Wait for parallel work
512 syncCTAThreads(CGF);
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000513
514 Address WorkFn =
515 CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
516 Address ExecStatus =
517 CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
518 CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
519 CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
520
Gheorghe-Teodor Berceaeb89b1d2017-11-21 15:54:54 +0000521 // Set up shared arguments
522 Address SharedArgs =
523 CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrPtrTy, "shared_args");
524 llvm::Value *Args[] = {WorkFn.getPointer(), SharedArgs.getPointer()};
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +0000525 llvm::Value *Ret = CGF.EmitRuntimeCall(
526 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_parallel), Args);
527 Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000528
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000529 // On termination condition (workid == 0), exit loop.
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000530 llvm::Value *ShouldTerminate =
531 Bld.CreateIsNull(Bld.CreateLoad(WorkFn), "should_terminate");
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000532 Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
533
534 // Activate requested workers.
535 CGF.EmitBlock(SelectWorkersBB);
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000536 llvm::Value *IsActive =
537 Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active");
538 Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB);
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000539
540 // Signal start of parallel region.
541 CGF.EmitBlock(ExecuteBB);
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +0000542
Gheorghe-Teodor Berceaeb89b1d2017-11-21 15:54:54 +0000543 // Current context
544 ASTContext &Ctx = CGF.getContext();
545
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +0000546 // Process work items: outlined parallel functions.
547 for (auto *W : Work) {
548 // Try to match this outlined function.
549 auto *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy);
550
551 llvm::Value *WorkFnMatch =
552 Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match");
553
554 llvm::BasicBlock *ExecuteFNBB = CGF.createBasicBlock(".execute.fn");
555 llvm::BasicBlock *CheckNextBB = CGF.createBasicBlock(".check.next");
556 Bld.CreateCondBr(WorkFnMatch, ExecuteFNBB, CheckNextBB);
557
558 // Execute this outlined function.
559 CGF.EmitBlock(ExecuteFNBB);
560
Gheorghe-Teodor Berceaeb89b1d2017-11-21 15:54:54 +0000561 // Insert call to work function via shared wrapper. The shared
562 // wrapper takes exactly three arguments:
563 // - the parallelism level;
564 // - the master thread ID;
565 // - the list of references to shared arguments.
566 //
567 // TODO: Assert that the function is a wrapper function.s
568 Address Capture = CGF.EmitLoadOfPointer(SharedArgs,
569 Ctx.getPointerType(
570 Ctx.getPointerType(Ctx.VoidPtrTy)).castAs<PointerType>());
571 emitCall(CGF, W, {Bld.getInt16(/*ParallelLevel=*/0),
572 getMasterThreadID(CGF), Capture.getPointer()});
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +0000573
574 // Go to end of parallel region.
575 CGF.EmitBranch(TerminateBB);
576
577 CGF.EmitBlock(CheckNextBB);
578 }
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000579
580 // Signal end of parallel region.
581 CGF.EmitBlock(TerminateBB);
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +0000582 CGF.EmitRuntimeCall(
583 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_end_parallel),
584 llvm::None);
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000585 CGF.EmitBranch(BarrierBB);
586
587 // All active and inactive workers wait at a barrier after parallel region.
588 CGF.EmitBlock(BarrierBB);
589 // Barrier after parallel region.
590 syncCTAThreads(CGF);
591 CGF.EmitBranch(AwaitBB);
592
593 // Exit target region.
594 CGF.EmitBlock(ExitBB);
595}
596
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000597/// \brief Returns specified OpenMP runtime function for the current OpenMP
598/// implementation. Specialized for the NVPTX device.
599/// \param Function OpenMP runtime function.
600/// \return Specified function.
601llvm::Constant *
602CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
603 llvm::Constant *RTLFn = nullptr;
604 switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) {
605 case OMPRTL_NVPTX__kmpc_kernel_init: {
Jonas Hahnfeld891c7fb2017-11-22 14:46:49 +0000606 // Build void __kmpc_kernel_init(kmp_int32 thread_limit, int16_t
607 // RequiresOMPRuntime);
608 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty};
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000609 llvm::FunctionType *FnTy =
610 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
611 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init");
612 break;
613 }
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000614 case OMPRTL_NVPTX__kmpc_kernel_deinit: {
Jonas Hahnfeld891c7fb2017-11-22 14:46:49 +0000615 // Build void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
616 llvm::Type *TypeParams[] = {CGM.Int16Ty};
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000617 llvm::FunctionType *FnTy =
Jonas Hahnfeld891c7fb2017-11-22 14:46:49 +0000618 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
Arpith Chacko Jacob406acdb2017-01-05 15:24:05 +0000619 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_deinit");
620 break;
621 }
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +0000622 case OMPRTL_NVPTX__kmpc_spmd_kernel_init: {
623 // Build void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
Jonas Hahnfeld891c7fb2017-11-22 14:46:49 +0000624 // int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +0000625 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
626 llvm::FunctionType *FnTy =
627 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
628 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_init");
629 break;
630 }
631 case OMPRTL_NVPTX__kmpc_spmd_kernel_deinit: {
632 // Build void __kmpc_spmd_kernel_deinit();
633 llvm::FunctionType *FnTy =
634 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
635 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_deinit");
636 break;
637 }
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +0000638 case OMPRTL_NVPTX__kmpc_kernel_prepare_parallel: {
639 /// Build void __kmpc_kernel_prepare_parallel(
Gheorghe-Teodor Berceaeb89b1d2017-11-21 15:54:54 +0000640 /// void *outlined_function, void ***args, kmp_int32 nArgs);
641 llvm::Type *TypeParams[] = {CGM.Int8PtrTy,
642 CGM.Int8PtrPtrTy->getPointerTo(0), CGM.Int32Ty};
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +0000643 llvm::FunctionType *FnTy =
644 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
645 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_prepare_parallel");
646 break;
647 }
648 case OMPRTL_NVPTX__kmpc_kernel_parallel: {
Gheorghe-Teodor Berceaeb89b1d2017-11-21 15:54:54 +0000649 /// Build bool __kmpc_kernel_parallel(void **outlined_function, void ***args);
650 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy,
651 CGM.Int8PtrPtrTy->getPointerTo(0)};
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +0000652 llvm::Type *RetTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
653 llvm::FunctionType *FnTy =
654 llvm::FunctionType::get(RetTy, TypeParams, /*isVarArg*/ false);
655 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_parallel");
656 break;
657 }
658 case OMPRTL_NVPTX__kmpc_kernel_end_parallel: {
659 /// Build void __kmpc_kernel_end_parallel();
660 llvm::FunctionType *FnTy =
661 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
662 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_end_parallel");
663 break;
664 }
665 case OMPRTL_NVPTX__kmpc_serialized_parallel: {
666 // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
667 // global_tid);
668 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
669 llvm::FunctionType *FnTy =
670 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
671 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
672 break;
673 }
674 case OMPRTL_NVPTX__kmpc_end_serialized_parallel: {
675 // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
676 // global_tid);
677 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
678 llvm::FunctionType *FnTy =
679 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
680 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
681 break;
682 }
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +0000683 case OMPRTL_NVPTX__kmpc_shuffle_int32: {
684 // Build int32_t __kmpc_shuffle_int32(int32_t element,
685 // int16_t lane_offset, int16_t warp_size);
686 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
687 llvm::FunctionType *FnTy =
688 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
689 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int32");
690 break;
691 }
692 case OMPRTL_NVPTX__kmpc_shuffle_int64: {
693 // Build int64_t __kmpc_shuffle_int64(int64_t element,
694 // int16_t lane_offset, int16_t warp_size);
695 llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int16Ty, CGM.Int16Ty};
696 llvm::FunctionType *FnTy =
697 llvm::FunctionType::get(CGM.Int64Ty, TypeParams, /*isVarArg*/ false);
698 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int64");
699 break;
700 }
701 case OMPRTL_NVPTX__kmpc_parallel_reduce_nowait: {
702 // Build int32_t kmpc_nvptx_parallel_reduce_nowait(kmp_int32 global_tid,
703 // kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
704 // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
705 // lane_offset, int16_t Algorithm Version),
706 // void (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num));
707 llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
708 CGM.Int16Ty, CGM.Int16Ty};
709 auto *ShuffleReduceFnTy =
710 llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
711 /*isVarArg=*/false);
712 llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
713 auto *InterWarpCopyFnTy =
714 llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
715 /*isVarArg=*/false);
716 llvm::Type *TypeParams[] = {CGM.Int32Ty,
717 CGM.Int32Ty,
718 CGM.SizeTy,
719 CGM.VoidPtrTy,
720 ShuffleReduceFnTy->getPointerTo(),
721 InterWarpCopyFnTy->getPointerTo()};
722 llvm::FunctionType *FnTy =
723 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
724 RTLFn = CGM.CreateRuntimeFunction(
725 FnTy, /*Name=*/"__kmpc_nvptx_parallel_reduce_nowait");
726 break;
727 }
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +0000728 case OMPRTL_NVPTX__kmpc_teams_reduce_nowait: {
729 // Build int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid,
730 // int32_t num_vars, size_t reduce_size, void *reduce_data,
731 // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
732 // lane_offset, int16_t shortCircuit),
733 // void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num),
734 // void (*kmp_CopyToScratchpadFctPtr)(void *reduce_data, void * scratchpad,
735 // int32_t index, int32_t width),
736 // void (*kmp_LoadReduceFctPtr)(void *reduce_data, void * scratchpad,
737 // int32_t index, int32_t width, int32_t reduce))
738 llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
739 CGM.Int16Ty, CGM.Int16Ty};
740 auto *ShuffleReduceFnTy =
741 llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
742 /*isVarArg=*/false);
743 llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
744 auto *InterWarpCopyFnTy =
745 llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
746 /*isVarArg=*/false);
747 llvm::Type *CopyToScratchpadTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy,
748 CGM.Int32Ty, CGM.Int32Ty};
749 auto *CopyToScratchpadFnTy =
750 llvm::FunctionType::get(CGM.VoidTy, CopyToScratchpadTypeParams,
751 /*isVarArg=*/false);
752 llvm::Type *LoadReduceTypeParams[] = {
753 CGM.VoidPtrTy, CGM.VoidPtrTy, CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty};
754 auto *LoadReduceFnTy =
755 llvm::FunctionType::get(CGM.VoidTy, LoadReduceTypeParams,
756 /*isVarArg=*/false);
757 llvm::Type *TypeParams[] = {CGM.Int32Ty,
758 CGM.Int32Ty,
759 CGM.SizeTy,
760 CGM.VoidPtrTy,
761 ShuffleReduceFnTy->getPointerTo(),
762 InterWarpCopyFnTy->getPointerTo(),
763 CopyToScratchpadFnTy->getPointerTo(),
764 LoadReduceFnTy->getPointerTo()};
765 llvm::FunctionType *FnTy =
766 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
767 RTLFn = CGM.CreateRuntimeFunction(
768 FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait");
769 break;
770 }
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +0000771 case OMPRTL_NVPTX__kmpc_end_reduce_nowait: {
772 // Build __kmpc_end_reduce_nowait(kmp_int32 global_tid);
773 llvm::Type *TypeParams[] = {CGM.Int32Ty};
774 llvm::FunctionType *FnTy =
775 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
776 RTLFn = CGM.CreateRuntimeFunction(
777 FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait");
778 break;
779 }
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000780 }
781 return RTLFn;
782}
783
784void CGOpenMPRuntimeNVPTX::createOffloadEntry(llvm::Constant *ID,
785 llvm::Constant *Addr,
Samuel Antaof83efdb2017-01-05 16:02:49 +0000786 uint64_t Size, int32_t) {
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000787 auto *F = dyn_cast<llvm::Function>(Addr);
788 // TODO: Add support for global variables on the device after declare target
789 // support.
790 if (!F)
791 return;
792 llvm::Module *M = F->getParent();
793 llvm::LLVMContext &Ctx = M->getContext();
794
795 // Get "nvvm.annotations" metadata node
796 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
797
798 llvm::Metadata *MDVals[] = {
799 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, "kernel"),
800 llvm::ConstantAsMetadata::get(
801 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
802 // Append metadata to nvvm.annotations
803 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
804}
805
806void CGOpenMPRuntimeNVPTX::emitTargetOutlinedFunction(
807 const OMPExecutableDirective &D, StringRef ParentName,
808 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
Alexey Bataev14fa1c62016-03-29 05:34:15 +0000809 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000810 if (!IsOffloadEntry) // Nothing to do.
811 return;
812
813 assert(!ParentName.empty() && "Invalid target region parent name!");
814
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +0000815 CGOpenMPRuntimeNVPTX::ExecutionMode Mode =
816 getExecutionModeForDirective(CGM, D);
817 switch (Mode) {
818 case CGOpenMPRuntimeNVPTX::ExecutionMode::Generic:
819 emitGenericKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
820 CodeGen);
821 break;
822 case CGOpenMPRuntimeNVPTX::ExecutionMode::Spmd:
823 emitSpmdKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
824 CodeGen);
825 break;
826 case CGOpenMPRuntimeNVPTX::ExecutionMode::Unknown:
827 llvm_unreachable(
828 "Unknown programming model for OpenMP directive on NVPTX target.");
829 }
830
831 setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000832}
833
Samuel Antao45bfe4c2016-02-08 15:59:20 +0000834CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +0000835 : CGOpenMPRuntime(CGM), CurrentExecutionMode(ExecutionMode::Unknown) {
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000836 if (!CGM.getLangOpts().OpenMPIsDevice)
837 llvm_unreachable("OpenMP NVPTX can only handle device code.");
Arpith Chacko Jacob5c309e42016-03-22 01:48:56 +0000838}
Carlo Bertollic6872252016-04-04 15:55:02 +0000839
Arpith Chacko Jacob2cd6eea2017-01-25 16:55:10 +0000840void CGOpenMPRuntimeNVPTX::emitProcBindClause(CodeGenFunction &CGF,
841 OpenMPProcBindClauseKind ProcBind,
842 SourceLocation Loc) {
843 // Do nothing in case of Spmd mode and L0 parallel.
844 // TODO: If in Spmd mode and L1 parallel emit the clause.
845 if (isInSpmdExecutionMode())
846 return;
847
848 CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
849}
850
Arpith Chacko Jacobe04da5d2017-01-25 01:18:34 +0000851void CGOpenMPRuntimeNVPTX::emitNumThreadsClause(CodeGenFunction &CGF,
852 llvm::Value *NumThreads,
853 SourceLocation Loc) {
854 // Do nothing in case of Spmd mode and L0 parallel.
855 // TODO: If in Spmd mode and L1 parallel emit the clause.
856 if (isInSpmdExecutionMode())
857 return;
858
859 CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
860}
861
Carlo Bertollic6872252016-04-04 15:55:02 +0000862void CGOpenMPRuntimeNVPTX::emitNumTeamsClause(CodeGenFunction &CGF,
863 const Expr *NumTeams,
864 const Expr *ThreadLimit,
865 SourceLocation Loc) {}
866
Arpith Chacko Jacob19b911c2017-01-18 18:18:53 +0000867llvm::Value *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction(
868 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
869 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
Gheorghe-Teodor Berceaeb89b1d2017-11-21 15:54:54 +0000870
871 auto *OutlinedFun = cast<llvm::Function>(
872 CGOpenMPRuntime::emitParallelOutlinedFunction(
873 D, ThreadIDVar, InnermostKind, CodeGen));
874 if (!isInSpmdExecutionMode()) {
875 llvm::Function *WrapperFun =
876 createDataSharingWrapper(OutlinedFun, D);
877 WrapperFunctionsMap[OutlinedFun] = WrapperFun;
878 }
879
880 return OutlinedFun;
Arpith Chacko Jacob19b911c2017-01-18 18:18:53 +0000881}
882
883llvm::Value *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
Carlo Bertollic6872252016-04-04 15:55:02 +0000884 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
885 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
886
Arpith Chacko Jacob19b911c2017-01-18 18:18:53 +0000887 llvm::Value *OutlinedFunVal = CGOpenMPRuntime::emitTeamsOutlinedFunction(
888 D, ThreadIDVar, InnermostKind, CodeGen);
889 llvm::Function *OutlinedFun = cast<llvm::Function>(OutlinedFunVal);
890 OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
Mehdi Amini6aa9e9b2017-05-29 05:38:20 +0000891 OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
Arpith Chacko Jacob19b911c2017-01-18 18:18:53 +0000892 OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
Carlo Bertollic6872252016-04-04 15:55:02 +0000893
894 return OutlinedFun;
895}
896
897void CGOpenMPRuntimeNVPTX::emitTeamsCall(CodeGenFunction &CGF,
898 const OMPExecutableDirective &D,
899 SourceLocation Loc,
900 llvm::Value *OutlinedFn,
901 ArrayRef<llvm::Value *> CapturedVars) {
902 if (!CGF.HaveInsertPoint())
903 return;
904
905 Address ZeroAddr =
906 CGF.CreateTempAlloca(CGF.Int32Ty, CharUnits::fromQuantity(4),
907 /*Name*/ ".zero.addr");
908 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
909 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
910 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
911 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
912 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
Alexey Bataev3c595a62017-08-14 15:01:03 +0000913 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
Carlo Bertollic6872252016-04-04 15:55:02 +0000914}
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +0000915
916void CGOpenMPRuntimeNVPTX::emitParallelCall(
917 CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
918 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
919 if (!CGF.HaveInsertPoint())
920 return;
921
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +0000922 if (isInSpmdExecutionMode())
923 emitSpmdParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
924 else
925 emitGenericParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +0000926}
927
928void CGOpenMPRuntimeNVPTX::emitGenericParallelCall(
929 CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
930 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
931 llvm::Function *Fn = cast<llvm::Function>(OutlinedFn);
Gheorghe-Teodor Berceaeb89b1d2017-11-21 15:54:54 +0000932 llvm::Function *WFn = WrapperFunctionsMap[Fn];
933 assert(WFn && "Wrapper function does not exist!");
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +0000934
Gheorghe-Teodor Berceaeb89b1d2017-11-21 15:54:54 +0000935 // Force inline this outlined function at its call site.
936 Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
937
938 auto &&L0ParallelGen = [this, WFn, &CapturedVars](CodeGenFunction &CGF,
939 PrePostActionTy &) {
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +0000940 CGBuilderTy &Bld = CGF.Builder;
941
Gheorghe-Teodor Berceaeb89b1d2017-11-21 15:54:54 +0000942 llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
943
944 if (!CapturedVars.empty()) {
Gheorghe-Teodor Berceab4c74c62017-12-12 21:38:43 +0000945 // There's somehting to share, add the attribute
946 CGF.CurFn->addFnAttr("has-nvptx-shared-depot");
Gheorghe-Teodor Berceaeb89b1d2017-11-21 15:54:54 +0000947 // Prepare for parallel region. Indicate the outlined function.
948 Address SharedArgs =
949 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy,
950 "shared_args");
951 llvm::Value *SharedArgsPtr = SharedArgs.getPointer();
952 llvm::Value *Args[] = {ID, SharedArgsPtr,
953 Bld.getInt32(CapturedVars.size())};
954
955 CGF.EmitRuntimeCall(
956 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel),
957 Args);
958
959 unsigned Idx = 0;
960 ASTContext &Ctx = CGF.getContext();
961 for (llvm::Value *V : CapturedVars) {
962 Address Dst = Bld.CreateConstInBoundsGEP(
963 CGF.EmitLoadOfPointer(SharedArgs,
964 Ctx.getPointerType(
965 Ctx.getPointerType(Ctx.VoidPtrTy)).castAs<PointerType>()),
966 Idx, CGF.getPointerSize());
967 llvm::Value *PtrV = Bld.CreateBitCast(V, CGF.VoidPtrTy);
968 CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
969 Ctx.getPointerType(Ctx.VoidPtrTy));
970 Idx++;
971 }
972 } else {
973 llvm::Value *Args[] = {ID,
974 llvm::ConstantPointerNull::get(CGF.VoidPtrPtrTy->getPointerTo(0)),
975 /*nArgs=*/Bld.getInt32(0)};
976 CGF.EmitRuntimeCall(
977 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel),
978 Args);
979 }
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +0000980
981 // Activate workers. This barrier is used by the master to signal
982 // work for the workers.
983 syncCTAThreads(CGF);
984
985 // OpenMP [2.5, Parallel Construct, p.49]
986 // There is an implied barrier at the end of a parallel region. After the
987 // end of a parallel region, only the master thread of the team resumes
988 // execution of the enclosing task region.
989 //
990 // The master waits at this barrier until all workers are done.
991 syncCTAThreads(CGF);
992
993 // Remember for post-processing in worker loop.
Gheorghe-Teodor Berceaeb89b1d2017-11-21 15:54:54 +0000994 Work.emplace_back(WFn);
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +0000995 };
996
997 auto *RTLoc = emitUpdateLocation(CGF, Loc);
998 auto *ThreadID = getThreadID(CGF, Loc);
999 llvm::Value *Args[] = {RTLoc, ThreadID};
1000
Alexey Bataev3c595a62017-08-14 15:01:03 +00001001 auto &&SeqGen = [this, Fn, &CapturedVars, &Args, Loc](CodeGenFunction &CGF,
1002 PrePostActionTy &) {
1003 auto &&CodeGen = [this, Fn, &CapturedVars, Loc](CodeGenFunction &CGF,
1004 PrePostActionTy &Action) {
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +00001005 Action.Enter(CGF);
1006
1007 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
1008 OutlinedFnArgs.push_back(
1009 llvm::ConstantPointerNull::get(CGM.Int32Ty->getPointerTo()));
1010 OutlinedFnArgs.push_back(
1011 llvm::ConstantPointerNull::get(CGM.Int32Ty->getPointerTo()));
1012 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
Alexey Bataev3c595a62017-08-14 15:01:03 +00001013 emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs);
Arpith Chacko Jacobbb36fe82017-01-10 15:42:51 +00001014 };
1015
1016 RegionCodeGenTy RCG(CodeGen);
1017 NVPTXActionTy Action(
1018 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
1019 Args,
1020 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
1021 Args);
1022 RCG.setAction(Action);
1023 RCG(CGF);
1024 };
1025
1026 if (IfCond)
1027 emitOMPIfClause(CGF, IfCond, L0ParallelGen, SeqGen);
1028 else {
1029 CodeGenFunction::RunCleanupsScope Scope(CGF);
1030 RegionCodeGenTy ThenRCG(L0ParallelGen);
1031 ThenRCG(CGF);
1032 }
1033}
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +00001034
1035void CGOpenMPRuntimeNVPTX::emitSpmdParallelCall(
1036 CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
1037 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
1038 // Just call the outlined function to execute the parallel region.
1039 // OutlinedFn(&GTid, &zero, CapturedStruct);
1040 //
1041 // TODO: Do something with IfCond when support for the 'if' clause
1042 // is added on Spmd target directives.
1043 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
1044 OutlinedFnArgs.push_back(
1045 llvm::ConstantPointerNull::get(CGM.Int32Ty->getPointerTo()));
1046 OutlinedFnArgs.push_back(
1047 llvm::ConstantPointerNull::get(CGM.Int32Ty->getPointerTo()));
1048 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
Alexey Bataev3c595a62017-08-14 15:01:03 +00001049 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
Arpith Chacko Jacob44a87c92017-01-18 19:35:00 +00001050}
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001051
1052/// This function creates calls to one of two shuffle functions to copy
1053/// variables between lanes in a warp.
1054static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
1055 QualType ElemTy,
1056 llvm::Value *Elem,
1057 llvm::Value *Offset) {
1058 auto &CGM = CGF.CGM;
1059 auto &C = CGM.getContext();
1060 auto &Bld = CGF.Builder;
1061 CGOpenMPRuntimeNVPTX &RT =
1062 *(static_cast<CGOpenMPRuntimeNVPTX *>(&CGM.getOpenMPRuntime()));
1063
1064 unsigned Size = CGM.getContext().getTypeSizeInChars(ElemTy).getQuantity();
1065 assert(Size <= 8 && "Unsupported bitwidth in shuffle instruction.");
1066
1067 OpenMPRTLFunctionNVPTX ShuffleFn = Size <= 4
1068 ? OMPRTL_NVPTX__kmpc_shuffle_int32
1069 : OMPRTL_NVPTX__kmpc_shuffle_int64;
1070
1071 // Cast all types to 32- or 64-bit values before calling shuffle routines.
1072 auto CastTy = Size <= 4 ? CGM.Int32Ty : CGM.Int64Ty;
1073 auto *ElemCast = Bld.CreateSExtOrBitCast(Elem, CastTy);
1074 auto *WarpSize = CGF.EmitScalarConversion(
1075 getNVPTXWarpSize(CGF), C.getIntTypeForBitwidth(32, /* Signed */ true),
1076 C.getIntTypeForBitwidth(16, /* Signed */ true), SourceLocation());
1077
1078 auto *ShuffledVal =
1079 CGF.EmitRuntimeCall(RT.createNVPTXRuntimeFunction(ShuffleFn),
1080 {ElemCast, Offset, WarpSize});
1081
1082 return Bld.CreateTruncOrBitCast(ShuffledVal, CGF.ConvertTypeForMem(ElemTy));
1083}
1084
1085namespace {
1086enum CopyAction : unsigned {
1087 // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
1088 // the warp using shuffle instructions.
1089 RemoteLaneToThread,
1090 // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
1091 ThreadCopy,
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001092 // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
1093 ThreadToScratchpad,
1094 // ScratchpadToThread: Copy from a scratchpad array in global memory
1095 // containing team-reduced data to a thread's stack.
1096 ScratchpadToThread,
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001097};
1098} // namespace
1099
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001100struct CopyOptionsTy {
1101 llvm::Value *RemoteLaneOffset;
1102 llvm::Value *ScratchpadIndex;
1103 llvm::Value *ScratchpadWidth;
1104};
1105
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001106/// Emit instructions to copy a Reduce list, which contains partially
1107/// aggregated values, in the specified direction.
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001108static void emitReductionListCopy(
1109 CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
1110 ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
1111 CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001112
1113 auto &CGM = CGF.CGM;
1114 auto &C = CGM.getContext();
1115 auto &Bld = CGF.Builder;
1116
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001117 auto *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
1118 auto *ScratchpadIndex = CopyOptions.ScratchpadIndex;
1119 auto *ScratchpadWidth = CopyOptions.ScratchpadWidth;
1120
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001121 // Iterates, element-by-element, through the source Reduce list and
1122 // make a copy.
1123 unsigned Idx = 0;
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001124 unsigned Size = Privates.size();
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001125 for (auto &Private : Privates) {
1126 Address SrcElementAddr = Address::invalid();
1127 Address DestElementAddr = Address::invalid();
1128 Address DestElementPtrAddr = Address::invalid();
1129 // Should we shuffle in an element from a remote lane?
1130 bool ShuffleInElement = false;
1131 // Set to true to update the pointer in the dest Reduce list to a
1132 // newly created element.
1133 bool UpdateDestListPtr = false;
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001134 // Increment the src or dest pointer to the scratchpad, for each
1135 // new element.
1136 bool IncrScratchpadSrc = false;
1137 bool IncrScratchpadDest = false;
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001138
1139 switch (Action) {
1140 case RemoteLaneToThread: {
1141 // Step 1.1: Get the address for the src element in the Reduce list.
1142 Address SrcElementPtrAddr =
1143 Bld.CreateConstArrayGEP(SrcBase, Idx, CGF.getPointerSize());
1144 llvm::Value *SrcElementPtrPtr = CGF.EmitLoadOfScalar(
1145 SrcElementPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
1146 SrcElementAddr =
1147 Address(SrcElementPtrPtr, C.getTypeAlignInChars(Private->getType()));
1148
1149 // Step 1.2: Create a temporary to store the element in the destination
1150 // Reduce list.
1151 DestElementPtrAddr =
1152 Bld.CreateConstArrayGEP(DestBase, Idx, CGF.getPointerSize());
1153 DestElementAddr =
1154 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
1155 ShuffleInElement = true;
1156 UpdateDestListPtr = true;
1157 break;
1158 }
1159 case ThreadCopy: {
1160 // Step 1.1: Get the address for the src element in the Reduce list.
1161 Address SrcElementPtrAddr =
1162 Bld.CreateConstArrayGEP(SrcBase, Idx, CGF.getPointerSize());
1163 llvm::Value *SrcElementPtrPtr = CGF.EmitLoadOfScalar(
1164 SrcElementPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
1165 SrcElementAddr =
1166 Address(SrcElementPtrPtr, C.getTypeAlignInChars(Private->getType()));
1167
1168 // Step 1.2: Get the address for dest element. The destination
1169 // element has already been created on the thread's stack.
1170 DestElementPtrAddr =
1171 Bld.CreateConstArrayGEP(DestBase, Idx, CGF.getPointerSize());
1172 llvm::Value *DestElementPtr =
1173 CGF.EmitLoadOfScalar(DestElementPtrAddr, /*Volatile=*/false,
1174 C.VoidPtrTy, SourceLocation());
1175 Address DestElemAddr =
1176 Address(DestElementPtr, C.getTypeAlignInChars(Private->getType()));
1177 DestElementAddr = Bld.CreateElementBitCast(
1178 DestElemAddr, CGF.ConvertTypeForMem(Private->getType()));
1179 break;
1180 }
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001181 case ThreadToScratchpad: {
1182 // Step 1.1: Get the address for the src element in the Reduce list.
1183 Address SrcElementPtrAddr =
1184 Bld.CreateConstArrayGEP(SrcBase, Idx, CGF.getPointerSize());
1185 llvm::Value *SrcElementPtrPtr = CGF.EmitLoadOfScalar(
1186 SrcElementPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
1187 SrcElementAddr =
1188 Address(SrcElementPtrPtr, C.getTypeAlignInChars(Private->getType()));
1189
1190 // Step 1.2: Get the address for dest element:
1191 // address = base + index * ElementSizeInChars.
1192 unsigned ElementSizeInChars =
1193 C.getTypeSizeInChars(Private->getType()).getQuantity();
1194 auto *CurrentOffset =
1195 Bld.CreateMul(llvm::ConstantInt::get(CGM.SizeTy, ElementSizeInChars),
1196 ScratchpadIndex);
1197 auto *ScratchPadElemAbsolutePtrVal =
1198 Bld.CreateAdd(DestBase.getPointer(), CurrentOffset);
1199 ScratchPadElemAbsolutePtrVal =
1200 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
1201 Address ScratchpadPtr =
1202 Address(ScratchPadElemAbsolutePtrVal,
1203 C.getTypeAlignInChars(Private->getType()));
1204 DestElementAddr = Bld.CreateElementBitCast(
1205 ScratchpadPtr, CGF.ConvertTypeForMem(Private->getType()));
1206 IncrScratchpadDest = true;
1207 break;
1208 }
1209 case ScratchpadToThread: {
1210 // Step 1.1: Get the address for the src element in the scratchpad.
1211 // address = base + index * ElementSizeInChars.
1212 unsigned ElementSizeInChars =
1213 C.getTypeSizeInChars(Private->getType()).getQuantity();
1214 auto *CurrentOffset =
1215 Bld.CreateMul(llvm::ConstantInt::get(CGM.SizeTy, ElementSizeInChars),
1216 ScratchpadIndex);
1217 auto *ScratchPadElemAbsolutePtrVal =
1218 Bld.CreateAdd(SrcBase.getPointer(), CurrentOffset);
1219 ScratchPadElemAbsolutePtrVal =
1220 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
1221 SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
1222 C.getTypeAlignInChars(Private->getType()));
1223 IncrScratchpadSrc = true;
1224
1225 // Step 1.2: Create a temporary to store the element in the destination
1226 // Reduce list.
1227 DestElementPtrAddr =
1228 Bld.CreateConstArrayGEP(DestBase, Idx, CGF.getPointerSize());
1229 DestElementAddr =
1230 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
1231 UpdateDestListPtr = true;
1232 break;
1233 }
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001234 }
1235
1236 // Regardless of src and dest of copy, we emit the load of src
1237 // element as this is required in all directions
1238 SrcElementAddr = Bld.CreateElementBitCast(
1239 SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
1240 llvm::Value *Elem =
1241 CGF.EmitLoadOfScalar(SrcElementAddr, /*Volatile=*/false,
1242 Private->getType(), SourceLocation());
1243
1244 // Now that all active lanes have read the element in the
1245 // Reduce list, shuffle over the value from the remote lane.
1246 if (ShuffleInElement) {
1247 Elem = createRuntimeShuffleFunction(CGF, Private->getType(), Elem,
1248 RemoteLaneOffset);
1249 }
1250
1251 // Store the source element value to the dest element address.
1252 CGF.EmitStoreOfScalar(Elem, DestElementAddr, /*Volatile=*/false,
1253 Private->getType());
1254
1255 // Step 3.1: Modify reference in dest Reduce list as needed.
1256 // Modifying the reference in Reduce list to point to the newly
1257 // created element. The element is live in the current function
1258 // scope and that of functions it invokes (i.e., reduce_function).
1259 // RemoteReduceData[i] = (void*)&RemoteElem
1260 if (UpdateDestListPtr) {
1261 CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
1262 DestElementAddr.getPointer(), CGF.VoidPtrTy),
1263 DestElementPtrAddr, /*Volatile=*/false,
1264 C.VoidPtrTy);
1265 }
1266
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001267 // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
1268 // address of the next element in scratchpad memory, unless we're currently
1269 // processing the last one. Memory alignment is also taken care of here.
1270 if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
1271 llvm::Value *ScratchpadBasePtr =
1272 IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
1273 unsigned ElementSizeInChars =
1274 C.getTypeSizeInChars(Private->getType()).getQuantity();
1275 ScratchpadBasePtr = Bld.CreateAdd(
1276 ScratchpadBasePtr,
1277 Bld.CreateMul(ScratchpadWidth, llvm::ConstantInt::get(
1278 CGM.SizeTy, ElementSizeInChars)));
1279
1280 // Take care of global memory alignment for performance
1281 ScratchpadBasePtr = Bld.CreateSub(ScratchpadBasePtr,
1282 llvm::ConstantInt::get(CGM.SizeTy, 1));
1283 ScratchpadBasePtr = Bld.CreateSDiv(
1284 ScratchpadBasePtr,
1285 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
1286 ScratchpadBasePtr = Bld.CreateAdd(ScratchpadBasePtr,
1287 llvm::ConstantInt::get(CGM.SizeTy, 1));
1288 ScratchpadBasePtr = Bld.CreateMul(
1289 ScratchpadBasePtr,
1290 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
1291
1292 if (IncrScratchpadDest)
1293 DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
1294 else /* IncrScratchpadSrc = true */
1295 SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
1296 }
1297
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001298 Idx++;
1299 }
1300}
1301
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001302/// This function emits a helper that loads data from the scratchpad array
1303/// and (optionally) reduces it with the input operand.
1304///
1305/// load_and_reduce(local, scratchpad, index, width, should_reduce)
1306/// reduce_data remote;
1307/// for elem in remote:
1308/// remote.elem = Scratchpad[elem_id][index]
1309/// if (should_reduce)
1310/// local = local @ remote
1311/// else
1312/// local = remote
Benjamin Kramer674d5792017-05-26 20:08:24 +00001313static llvm::Value *
1314emitReduceScratchpadFunction(CodeGenModule &CGM,
1315 ArrayRef<const Expr *> Privates,
1316 QualType ReductionArrayTy, llvm::Value *ReduceFn) {
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001317 auto &C = CGM.getContext();
1318 auto Int32Ty = C.getIntTypeForBitwidth(32, /* Signed */ true);
1319
1320 // Destination of the copy.
Alexey Bataev56223232017-06-09 13:40:18 +00001321 ImplicitParamDecl ReduceListArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001322 // Base address of the scratchpad array, with each element storing a
1323 // Reduce list per team.
Alexey Bataev56223232017-06-09 13:40:18 +00001324 ImplicitParamDecl ScratchPadArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001325 // A source index into the scratchpad array.
Alexey Bataev56223232017-06-09 13:40:18 +00001326 ImplicitParamDecl IndexArg(C, Int32Ty, ImplicitParamDecl::Other);
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001327 // Row width of an element in the scratchpad array, typically
1328 // the number of teams.
Alexey Bataev56223232017-06-09 13:40:18 +00001329 ImplicitParamDecl WidthArg(C, Int32Ty, ImplicitParamDecl::Other);
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001330 // If should_reduce == 1, then it's load AND reduce,
1331 // If should_reduce == 0 (or otherwise), then it only loads (+ copy).
1332 // The latter case is used for initialization.
Alexey Bataev56223232017-06-09 13:40:18 +00001333 ImplicitParamDecl ShouldReduceArg(C, Int32Ty, ImplicitParamDecl::Other);
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001334
1335 FunctionArgList Args;
1336 Args.push_back(&ReduceListArg);
1337 Args.push_back(&ScratchPadArg);
1338 Args.push_back(&IndexArg);
1339 Args.push_back(&WidthArg);
1340 Args.push_back(&ShouldReduceArg);
1341
1342 auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1343 auto *Fn = llvm::Function::Create(
1344 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
1345 "_omp_reduction_load_and_reduce", &CGM.getModule());
1346 CGM.SetInternalFunctionAttributes(/*DC=*/nullptr, Fn, CGFI);
1347 CodeGenFunction CGF(CGM);
1348 // We don't need debug information in this function as nothing here refers to
1349 // user code.
1350 CGF.disableDebugInfo();
1351 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
1352
1353 auto &Bld = CGF.Builder;
1354
1355 // Get local Reduce list pointer.
1356 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
1357 Address ReduceListAddr(
1358 Bld.CreatePointerBitCastOrAddrSpaceCast(
1359 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
1360 C.VoidPtrTy, SourceLocation()),
1361 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
1362 CGF.getPointerAlign());
1363
1364 Address AddrScratchPadArg = CGF.GetAddrOfLocalVar(&ScratchPadArg);
1365 llvm::Value *ScratchPadBase = CGF.EmitLoadOfScalar(
1366 AddrScratchPadArg, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
1367
1368 Address AddrIndexArg = CGF.GetAddrOfLocalVar(&IndexArg);
1369 llvm::Value *IndexVal =
1370 Bld.CreateIntCast(CGF.EmitLoadOfScalar(AddrIndexArg, /*Volatile=*/false,
1371 Int32Ty, SourceLocation()),
1372 CGM.SizeTy, /*isSigned=*/true);
1373
1374 Address AddrWidthArg = CGF.GetAddrOfLocalVar(&WidthArg);
1375 llvm::Value *WidthVal =
1376 Bld.CreateIntCast(CGF.EmitLoadOfScalar(AddrWidthArg, /*Volatile=*/false,
1377 Int32Ty, SourceLocation()),
1378 CGM.SizeTy, /*isSigned=*/true);
1379
1380 Address AddrShouldReduceArg = CGF.GetAddrOfLocalVar(&ShouldReduceArg);
1381 llvm::Value *ShouldReduceVal = CGF.EmitLoadOfScalar(
1382 AddrShouldReduceArg, /*Volatile=*/false, Int32Ty, SourceLocation());
1383
1384 // The absolute ptr address to the base addr of the next element to copy.
1385 llvm::Value *CumulativeElemBasePtr =
1386 Bld.CreatePtrToInt(ScratchPadBase, CGM.SizeTy);
1387 Address SrcDataAddr(CumulativeElemBasePtr, CGF.getPointerAlign());
1388
1389 // Create a Remote Reduce list to store the elements read from the
1390 // scratchpad array.
1391 Address RemoteReduceList =
1392 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_red_list");
1393
1394 // Assemble remote Reduce list from scratchpad array.
1395 emitReductionListCopy(ScratchpadToThread, CGF, ReductionArrayTy, Privates,
1396 SrcDataAddr, RemoteReduceList,
1397 {/*RemoteLaneOffset=*/nullptr,
1398 /*ScratchpadIndex=*/IndexVal,
1399 /*ScratchpadWidth=*/WidthVal});
1400
1401 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
1402 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
1403 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
1404
1405 auto CondReduce = Bld.CreateICmpEQ(ShouldReduceVal, Bld.getInt32(1));
1406 Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
1407
1408 CGF.EmitBlock(ThenBB);
1409 // We should reduce with the local Reduce list.
1410 // reduce_function(LocalReduceList, RemoteReduceList)
1411 llvm::Value *LocalDataPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1412 ReduceListAddr.getPointer(), CGF.VoidPtrTy);
1413 llvm::Value *RemoteDataPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1414 RemoteReduceList.getPointer(), CGF.VoidPtrTy);
1415 CGF.EmitCallOrInvoke(ReduceFn, {LocalDataPtr, RemoteDataPtr});
1416 Bld.CreateBr(MergeBB);
1417
1418 CGF.EmitBlock(ElseBB);
1419 // No reduction; just copy:
1420 // Local Reduce list = Remote Reduce list.
1421 emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
1422 RemoteReduceList, ReduceListAddr);
1423 Bld.CreateBr(MergeBB);
1424
1425 CGF.EmitBlock(MergeBB);
1426
1427 CGF.FinishFunction();
1428 return Fn;
1429}
1430
1431/// This function emits a helper that stores reduced data from the team
1432/// master to a scratchpad array in global memory.
1433///
1434/// for elem in Reduce List:
1435/// scratchpad[elem_id][index] = elem
1436///
Benjamin Kramer674d5792017-05-26 20:08:24 +00001437static llvm::Value *emitCopyToScratchpad(CodeGenModule &CGM,
1438 ArrayRef<const Expr *> Privates,
1439 QualType ReductionArrayTy) {
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001440
1441 auto &C = CGM.getContext();
1442 auto Int32Ty = C.getIntTypeForBitwidth(32, /* Signed */ true);
1443
1444 // Source of the copy.
Alexey Bataev56223232017-06-09 13:40:18 +00001445 ImplicitParamDecl ReduceListArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001446 // Base address of the scratchpad array, with each element storing a
1447 // Reduce list per team.
Alexey Bataev56223232017-06-09 13:40:18 +00001448 ImplicitParamDecl ScratchPadArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001449 // A destination index into the scratchpad array, typically the team
1450 // identifier.
Alexey Bataev56223232017-06-09 13:40:18 +00001451 ImplicitParamDecl IndexArg(C, Int32Ty, ImplicitParamDecl::Other);
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001452 // Row width of an element in the scratchpad array, typically
1453 // the number of teams.
Alexey Bataev56223232017-06-09 13:40:18 +00001454 ImplicitParamDecl WidthArg(C, Int32Ty, ImplicitParamDecl::Other);
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001455
1456 FunctionArgList Args;
1457 Args.push_back(&ReduceListArg);
1458 Args.push_back(&ScratchPadArg);
1459 Args.push_back(&IndexArg);
1460 Args.push_back(&WidthArg);
1461
1462 auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1463 auto *Fn = llvm::Function::Create(
1464 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
1465 "_omp_reduction_copy_to_scratchpad", &CGM.getModule());
1466 CGM.SetInternalFunctionAttributes(/*DC=*/nullptr, Fn, CGFI);
1467 CodeGenFunction CGF(CGM);
1468 // We don't need debug information in this function as nothing here refers to
1469 // user code.
1470 CGF.disableDebugInfo();
1471 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
1472
1473 auto &Bld = CGF.Builder;
1474
1475 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
1476 Address SrcDataAddr(
1477 Bld.CreatePointerBitCastOrAddrSpaceCast(
1478 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
1479 C.VoidPtrTy, SourceLocation()),
1480 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
1481 CGF.getPointerAlign());
1482
1483 Address AddrScratchPadArg = CGF.GetAddrOfLocalVar(&ScratchPadArg);
1484 llvm::Value *ScratchPadBase = CGF.EmitLoadOfScalar(
1485 AddrScratchPadArg, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
1486
1487 Address AddrIndexArg = CGF.GetAddrOfLocalVar(&IndexArg);
1488 llvm::Value *IndexVal =
1489 Bld.CreateIntCast(CGF.EmitLoadOfScalar(AddrIndexArg, /*Volatile=*/false,
1490 Int32Ty, SourceLocation()),
1491 CGF.SizeTy, /*isSigned=*/true);
1492
1493 Address AddrWidthArg = CGF.GetAddrOfLocalVar(&WidthArg);
1494 llvm::Value *WidthVal =
1495 Bld.CreateIntCast(CGF.EmitLoadOfScalar(AddrWidthArg, /*Volatile=*/false,
1496 Int32Ty, SourceLocation()),
1497 CGF.SizeTy, /*isSigned=*/true);
1498
1499 // The absolute ptr address to the base addr of the next element to copy.
1500 llvm::Value *CumulativeElemBasePtr =
1501 Bld.CreatePtrToInt(ScratchPadBase, CGM.SizeTy);
1502 Address DestDataAddr(CumulativeElemBasePtr, CGF.getPointerAlign());
1503
1504 emitReductionListCopy(ThreadToScratchpad, CGF, ReductionArrayTy, Privates,
1505 SrcDataAddr, DestDataAddr,
1506 {/*RemoteLaneOffset=*/nullptr,
1507 /*ScratchpadIndex=*/IndexVal,
1508 /*ScratchpadWidth=*/WidthVal});
1509
1510 CGF.FinishFunction();
1511 return Fn;
1512}
1513
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001514/// This function emits a helper that gathers Reduce lists from the first
1515/// lane of every active warp to lanes in the first warp.
1516///
1517/// void inter_warp_copy_func(void* reduce_data, num_warps)
1518/// shared smem[warp_size];
1519/// For all data entries D in reduce_data:
1520/// If (I am the first lane in each warp)
1521/// Copy my local D to smem[warp_id]
1522/// sync
1523/// if (I am the first warp)
1524/// Copy smem[thread_id] to my local D
1525/// sync
1526static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
1527 ArrayRef<const Expr *> Privates,
1528 QualType ReductionArrayTy) {
1529 auto &C = CGM.getContext();
1530 auto &M = CGM.getModule();
1531
1532 // ReduceList: thread local Reduce list.
1533 // At the stage of the computation when this function is called, partially
1534 // aggregated values reside in the first lane of every active warp.
Alexey Bataev56223232017-06-09 13:40:18 +00001535 ImplicitParamDecl ReduceListArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001536 // NumWarps: number of warps active in the parallel region. This could
1537 // be smaller than 32 (max warps in a CTA) for partial block reduction.
Alexey Bataev56223232017-06-09 13:40:18 +00001538 ImplicitParamDecl NumWarpsArg(C,
1539 C.getIntTypeForBitwidth(32, /* Signed */ true),
1540 ImplicitParamDecl::Other);
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001541 FunctionArgList Args;
1542 Args.push_back(&ReduceListArg);
1543 Args.push_back(&NumWarpsArg);
1544
1545 auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1546 auto *Fn = llvm::Function::Create(
1547 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
1548 "_omp_reduction_inter_warp_copy_func", &CGM.getModule());
1549 CGM.SetInternalFunctionAttributes(/*DC=*/nullptr, Fn, CGFI);
1550 CodeGenFunction CGF(CGM);
1551 // We don't need debug information in this function as nothing here refers to
1552 // user code.
1553 CGF.disableDebugInfo();
1554 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
1555
1556 auto &Bld = CGF.Builder;
1557
1558 // This array is used as a medium to transfer, one reduce element at a time,
1559 // the data from the first lane of every warp to lanes in the first warp
1560 // in order to perform the final step of a reduction in a parallel region
1561 // (reduction across warps). The array is placed in NVPTX __shared__ memory
1562 // for reduced latency, as well as to have a distinct copy for concurrently
1563 // executing target regions. The array is declared with common linkage so
1564 // as to be shared across compilation units.
1565 const char *TransferMediumName =
1566 "__openmp_nvptx_data_transfer_temporary_storage";
1567 llvm::GlobalVariable *TransferMedium =
1568 M.getGlobalVariable(TransferMediumName);
1569 if (!TransferMedium) {
1570 auto *Ty = llvm::ArrayType::get(CGM.Int64Ty, WarpSize);
1571 unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
1572 TransferMedium = new llvm::GlobalVariable(
1573 M, Ty,
1574 /*isConstant=*/false, llvm::GlobalVariable::CommonLinkage,
1575 llvm::Constant::getNullValue(Ty), TransferMediumName,
1576 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
1577 SharedAddressSpace);
1578 }
1579
1580 // Get the CUDA thread id of the current OpenMP thread on the GPU.
1581 auto *ThreadID = getNVPTXThreadID(CGF);
1582 // nvptx_lane_id = nvptx_id % warpsize
1583 auto *LaneID = getNVPTXLaneID(CGF);
1584 // nvptx_warp_id = nvptx_id / warpsize
1585 auto *WarpID = getNVPTXWarpID(CGF);
1586
1587 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
1588 Address LocalReduceList(
1589 Bld.CreatePointerBitCastOrAddrSpaceCast(
1590 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
1591 C.VoidPtrTy, SourceLocation()),
1592 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
1593 CGF.getPointerAlign());
1594
1595 unsigned Idx = 0;
1596 for (auto &Private : Privates) {
1597 //
1598 // Warp master copies reduce element to transfer medium in __shared__
1599 // memory.
1600 //
1601 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
1602 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
1603 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
1604
1605 // if (lane_id == 0)
1606 auto IsWarpMaster =
1607 Bld.CreateICmpEQ(LaneID, Bld.getInt32(0), "warp_master");
1608 Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
1609 CGF.EmitBlock(ThenBB);
1610
1611 // Reduce element = LocalReduceList[i]
1612 Address ElemPtrPtrAddr =
1613 Bld.CreateConstArrayGEP(LocalReduceList, Idx, CGF.getPointerSize());
1614 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
1615 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
1616 // elemptr = (type[i]*)(elemptrptr)
1617 Address ElemPtr =
1618 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
1619 ElemPtr = Bld.CreateElementBitCast(
1620 ElemPtr, CGF.ConvertTypeForMem(Private->getType()));
1621 // elem = *elemptr
1622 llvm::Value *Elem = CGF.EmitLoadOfScalar(
1623 ElemPtr, /*Volatile=*/false, Private->getType(), SourceLocation());
1624
1625 // Get pointer to location in transfer medium.
1626 // MediumPtr = &medium[warp_id]
1627 llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
1628 TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
1629 Address MediumPtr(MediumPtrVal, C.getTypeAlignInChars(Private->getType()));
1630 // Casting to actual data type.
1631 // MediumPtr = (type[i]*)MediumPtrAddr;
1632 MediumPtr = Bld.CreateElementBitCast(
1633 MediumPtr, CGF.ConvertTypeForMem(Private->getType()));
1634
1635 //*MediumPtr = elem
1636 Bld.CreateStore(Elem, MediumPtr);
1637
1638 Bld.CreateBr(MergeBB);
1639
1640 CGF.EmitBlock(ElseBB);
1641 Bld.CreateBr(MergeBB);
1642
1643 CGF.EmitBlock(MergeBB);
1644
1645 Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
1646 llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
1647 AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, SourceLocation());
1648
1649 auto *NumActiveThreads = Bld.CreateNSWMul(
1650 NumWarpsVal, getNVPTXWarpSize(CGF), "num_active_threads");
1651 // named_barrier_sync(ParallelBarrierID, num_active_threads)
1652 syncParallelThreads(CGF, NumActiveThreads);
1653
1654 //
1655 // Warp 0 copies reduce element from transfer medium.
1656 //
1657 llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
1658 llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
1659 llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
1660
1661 // Up to 32 threads in warp 0 are active.
1662 auto IsActiveThread =
1663 Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
1664 Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
1665
1666 CGF.EmitBlock(W0ThenBB);
1667
1668 // SrcMediumPtr = &medium[tid]
1669 llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
1670 TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
1671 Address SrcMediumPtr(SrcMediumPtrVal,
1672 C.getTypeAlignInChars(Private->getType()));
1673 // SrcMediumVal = *SrcMediumPtr;
1674 SrcMediumPtr = Bld.CreateElementBitCast(
1675 SrcMediumPtr, CGF.ConvertTypeForMem(Private->getType()));
1676 llvm::Value *SrcMediumValue = CGF.EmitLoadOfScalar(
1677 SrcMediumPtr, /*Volatile=*/false, Private->getType(), SourceLocation());
1678
1679 // TargetElemPtr = (type[i]*)(SrcDataAddr[i])
1680 Address TargetElemPtrPtr =
1681 Bld.CreateConstArrayGEP(LocalReduceList, Idx, CGF.getPointerSize());
1682 llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
1683 TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
1684 Address TargetElemPtr =
1685 Address(TargetElemPtrVal, C.getTypeAlignInChars(Private->getType()));
1686 TargetElemPtr = Bld.CreateElementBitCast(
1687 TargetElemPtr, CGF.ConvertTypeForMem(Private->getType()));
1688
1689 // *TargetElemPtr = SrcMediumVal;
1690 CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
1691 Private->getType());
1692 Bld.CreateBr(W0MergeBB);
1693
1694 CGF.EmitBlock(W0ElseBB);
1695 Bld.CreateBr(W0MergeBB);
1696
1697 CGF.EmitBlock(W0MergeBB);
1698
1699 // While warp 0 copies values from transfer medium, all other warps must
1700 // wait.
1701 syncParallelThreads(CGF, NumActiveThreads);
1702 Idx++;
1703 }
1704
1705 CGF.FinishFunction();
1706 return Fn;
1707}
1708
1709/// Emit a helper that reduces data across two OpenMP threads (lanes)
1710/// in the same warp. It uses shuffle instructions to copy over data from
1711/// a remote lane's stack. The reduction algorithm performed is specified
1712/// by the fourth parameter.
1713///
1714/// Algorithm Versions.
1715/// Full Warp Reduce (argument value 0):
1716/// This algorithm assumes that all 32 lanes are active and gathers
1717/// data from these 32 lanes, producing a single resultant value.
1718/// Contiguous Partial Warp Reduce (argument value 1):
1719/// This algorithm assumes that only a *contiguous* subset of lanes
1720/// are active. This happens for the last warp in a parallel region
1721/// when the user specified num_threads is not an integer multiple of
1722/// 32. This contiguous subset always starts with the zeroth lane.
1723/// Partial Warp Reduce (argument value 2):
1724/// This algorithm gathers data from any number of lanes at any position.
1725/// All reduced values are stored in the lowest possible lane. The set
1726/// of problems every algorithm addresses is a super set of those
1727/// addressable by algorithms with a lower version number. Overhead
1728/// increases as algorithm version increases.
1729///
1730/// Terminology
1731/// Reduce element:
1732/// Reduce element refers to the individual data field with primitive
1733/// data types to be combined and reduced across threads.
1734/// Reduce list:
1735/// Reduce list refers to a collection of local, thread-private
1736/// reduce elements.
1737/// Remote Reduce list:
1738/// Remote Reduce list refers to a collection of remote (relative to
1739/// the current thread) reduce elements.
1740///
1741/// We distinguish between three states of threads that are important to
1742/// the implementation of this function.
1743/// Alive threads:
1744/// Threads in a warp executing the SIMT instruction, as distinguished from
1745/// threads that are inactive due to divergent control flow.
1746/// Active threads:
1747/// The minimal set of threads that has to be alive upon entry to this
1748/// function. The computation is correct iff active threads are alive.
1749/// Some threads are alive but they are not active because they do not
1750/// contribute to the computation in any useful manner. Turning them off
1751/// may introduce control flow overheads without any tangible benefits.
1752/// Effective threads:
1753/// In order to comply with the argument requirements of the shuffle
1754/// function, we must keep all lanes holding data alive. But at most
1755/// half of them perform value aggregation; we refer to this half of
1756/// threads as effective. The other half is simply handing off their
1757/// data.
1758///
1759/// Procedure
1760/// Value shuffle:
1761/// In this step active threads transfer data from higher lane positions
1762/// in the warp to lower lane positions, creating Remote Reduce list.
1763/// Value aggregation:
1764/// In this step, effective threads combine their thread local Reduce list
1765/// with Remote Reduce list and store the result in the thread local
1766/// Reduce list.
1767/// Value copy:
1768/// In this step, we deal with the assumption made by algorithm 2
1769/// (i.e. contiguity assumption). When we have an odd number of lanes
1770/// active, say 2k+1, only k threads will be effective and therefore k
1771/// new values will be produced. However, the Reduce list owned by the
1772/// (2k+1)th thread is ignored in the value aggregation. Therefore
1773/// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
1774/// that the contiguity assumption still holds.
1775static llvm::Value *
1776emitShuffleAndReduceFunction(CodeGenModule &CGM,
1777 ArrayRef<const Expr *> Privates,
1778 QualType ReductionArrayTy, llvm::Value *ReduceFn) {
1779 auto &C = CGM.getContext();
1780
1781 // Thread local Reduce list used to host the values of data to be reduced.
Alexey Bataev56223232017-06-09 13:40:18 +00001782 ImplicitParamDecl ReduceListArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001783 // Current lane id; could be logical.
Alexey Bataev56223232017-06-09 13:40:18 +00001784 ImplicitParamDecl LaneIDArg(C, C.ShortTy, ImplicitParamDecl::Other);
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001785 // Offset of the remote source lane relative to the current lane.
Alexey Bataev56223232017-06-09 13:40:18 +00001786 ImplicitParamDecl RemoteLaneOffsetArg(C, C.ShortTy,
1787 ImplicitParamDecl::Other);
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001788 // Algorithm version. This is expected to be known at compile time.
Alexey Bataev56223232017-06-09 13:40:18 +00001789 ImplicitParamDecl AlgoVerArg(C, C.ShortTy, ImplicitParamDecl::Other);
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001790 FunctionArgList Args;
1791 Args.push_back(&ReduceListArg);
1792 Args.push_back(&LaneIDArg);
1793 Args.push_back(&RemoteLaneOffsetArg);
1794 Args.push_back(&AlgoVerArg);
1795
1796 auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1797 auto *Fn = llvm::Function::Create(
1798 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
1799 "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
1800 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
1801 CodeGenFunction CGF(CGM);
1802 // We don't need debug information in this function as nothing here refers to
1803 // user code.
1804 CGF.disableDebugInfo();
1805 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
1806
1807 auto &Bld = CGF.Builder;
1808
1809 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
1810 Address LocalReduceList(
1811 Bld.CreatePointerBitCastOrAddrSpaceCast(
1812 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
1813 C.VoidPtrTy, SourceLocation()),
1814 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
1815 CGF.getPointerAlign());
1816
1817 Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
1818 llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
1819 AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
1820
1821 Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
1822 llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
1823 AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
1824
1825 Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
1826 llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
1827 AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
1828
1829 // Create a local thread-private variable to host the Reduce list
1830 // from a remote lane.
1831 Address RemoteReduceList =
1832 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
1833
1834 // This loop iterates through the list of reduce elements and copies,
1835 // element by element, from a remote lane in the warp to RemoteReduceList,
1836 // hosted on the thread's stack.
1837 emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
1838 LocalReduceList, RemoteReduceList,
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00001839 {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
1840 /*ScratchpadIndex=*/nullptr,
1841 /*ScratchpadWidth=*/nullptr});
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00001842
1843 // The actions to be performed on the Remote Reduce list is dependent
1844 // on the algorithm version.
1845 //
1846 // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
1847 // LaneId % 2 == 0 && Offset > 0):
1848 // do the reduction value aggregation
1849 //
1850 // The thread local variable Reduce list is mutated in place to host the
1851 // reduced data, which is the aggregated value produced from local and
1852 // remote lanes.
1853 //
1854 // Note that AlgoVer is expected to be a constant integer known at compile
1855 // time.
1856 // When AlgoVer==0, the first conjunction evaluates to true, making
1857 // the entire predicate true during compile time.
1858 // When AlgoVer==1, the second conjunction has only the second part to be
1859 // evaluated during runtime. Other conjunctions evaluates to false
1860 // during compile time.
1861 // When AlgoVer==2, the third conjunction has only the second part to be
1862 // evaluated during runtime. Other conjunctions evaluates to false
1863 // during compile time.
1864 auto CondAlgo0 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(0));
1865
1866 auto Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
1867 auto CondAlgo1 = Bld.CreateAnd(
1868 Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
1869
1870 auto Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
1871 auto CondAlgo2 = Bld.CreateAnd(
1872 Algo2,
1873 Bld.CreateICmpEQ(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1)),
1874 Bld.getInt16(0)));
1875 CondAlgo2 = Bld.CreateAnd(
1876 CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
1877
1878 auto CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
1879 CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
1880
1881 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
1882 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
1883 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
1884 Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
1885
1886 CGF.EmitBlock(ThenBB);
1887 // reduce_function(LocalReduceList, RemoteReduceList)
1888 llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1889 LocalReduceList.getPointer(), CGF.VoidPtrTy);
1890 llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1891 RemoteReduceList.getPointer(), CGF.VoidPtrTy);
1892 CGF.EmitCallOrInvoke(ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
1893 Bld.CreateBr(MergeBB);
1894
1895 CGF.EmitBlock(ElseBB);
1896 Bld.CreateBr(MergeBB);
1897
1898 CGF.EmitBlock(MergeBB);
1899
1900 // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
1901 // Reduce list.
1902 Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
1903 auto CondCopy = Bld.CreateAnd(
1904 Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
1905
1906 llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
1907 llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
1908 llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
1909 Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
1910
1911 CGF.EmitBlock(CpyThenBB);
1912 emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
1913 RemoteReduceList, LocalReduceList);
1914 Bld.CreateBr(CpyMergeBB);
1915
1916 CGF.EmitBlock(CpyElseBB);
1917 Bld.CreateBr(CpyMergeBB);
1918
1919 CGF.EmitBlock(CpyMergeBB);
1920
1921 CGF.FinishFunction();
1922 return Fn;
1923}
1924
1925///
1926/// Design of OpenMP reductions on the GPU
1927///
1928/// Consider a typical OpenMP program with one or more reduction
1929/// clauses:
1930///
1931/// float foo;
1932/// double bar;
1933/// #pragma omp target teams distribute parallel for \
1934/// reduction(+:foo) reduction(*:bar)
1935/// for (int i = 0; i < N; i++) {
1936/// foo += A[i]; bar *= B[i];
1937/// }
1938///
1939/// where 'foo' and 'bar' are reduced across all OpenMP threads in
1940/// all teams. In our OpenMP implementation on the NVPTX device an
1941/// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
1942/// within a team are mapped to CUDA threads within a threadblock.
1943/// Our goal is to efficiently aggregate values across all OpenMP
1944/// threads such that:
1945///
1946/// - the compiler and runtime are logically concise, and
1947/// - the reduction is performed efficiently in a hierarchical
1948/// manner as follows: within OpenMP threads in the same warp,
1949/// across warps in a threadblock, and finally across teams on
1950/// the NVPTX device.
1951///
1952/// Introduction to Decoupling
1953///
1954/// We would like to decouple the compiler and the runtime so that the
1955/// latter is ignorant of the reduction variables (number, data types)
1956/// and the reduction operators. This allows a simpler interface
1957/// and implementation while still attaining good performance.
1958///
1959/// Pseudocode for the aforementioned OpenMP program generated by the
1960/// compiler is as follows:
1961///
1962/// 1. Create private copies of reduction variables on each OpenMP
1963/// thread: 'foo_private', 'bar_private'
1964/// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
1965/// to it and writes the result in 'foo_private' and 'bar_private'
1966/// respectively.
1967/// 3. Call the OpenMP runtime on the GPU to reduce within a team
1968/// and store the result on the team master:
1969///
1970/// __kmpc_nvptx_parallel_reduce_nowait(...,
1971/// reduceData, shuffleReduceFn, interWarpCpyFn)
1972///
1973/// where:
1974/// struct ReduceData {
1975/// double *foo;
1976/// double *bar;
1977/// } reduceData
1978/// reduceData.foo = &foo_private
1979/// reduceData.bar = &bar_private
1980///
1981/// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
1982/// auxiliary functions generated by the compiler that operate on
1983/// variables of type 'ReduceData'. They aid the runtime perform
1984/// algorithmic steps in a data agnostic manner.
1985///
1986/// 'shuffleReduceFn' is a pointer to a function that reduces data
1987/// of type 'ReduceData' across two OpenMP threads (lanes) in the
1988/// same warp. It takes the following arguments as input:
1989///
1990/// a. variable of type 'ReduceData' on the calling lane,
1991/// b. its lane_id,
1992/// c. an offset relative to the current lane_id to generate a
1993/// remote_lane_id. The remote lane contains the second
1994/// variable of type 'ReduceData' that is to be reduced.
1995/// d. an algorithm version parameter determining which reduction
1996/// algorithm to use.
1997///
1998/// 'shuffleReduceFn' retrieves data from the remote lane using
1999/// efficient GPU shuffle intrinsics and reduces, using the
2000/// algorithm specified by the 4th parameter, the two operands
2001/// element-wise. The result is written to the first operand.
2002///
2003/// Different reduction algorithms are implemented in different
2004/// runtime functions, all calling 'shuffleReduceFn' to perform
2005/// the essential reduction step. Therefore, based on the 4th
2006/// parameter, this function behaves slightly differently to
2007/// cooperate with the runtime to ensure correctness under
2008/// different circumstances.
2009///
2010/// 'InterWarpCpyFn' is a pointer to a function that transfers
2011/// reduced variables across warps. It tunnels, through CUDA
2012/// shared memory, the thread-private data of type 'ReduceData'
2013/// from lane 0 of each warp to a lane in the first warp.
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00002014/// 4. Call the OpenMP runtime on the GPU to reduce across teams.
2015/// The last team writes the global reduced value to memory.
2016///
2017/// ret = __kmpc_nvptx_teams_reduce_nowait(...,
2018/// reduceData, shuffleReduceFn, interWarpCpyFn,
2019/// scratchpadCopyFn, loadAndReduceFn)
2020///
2021/// 'scratchpadCopyFn' is a helper that stores reduced
2022/// data from the team master to a scratchpad array in
2023/// global memory.
2024///
2025/// 'loadAndReduceFn' is a helper that loads data from
2026/// the scratchpad array and reduces it with the input
2027/// operand.
2028///
2029/// These compiler generated functions hide address
2030/// calculation and alignment information from the runtime.
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00002031/// 5. if ret == 1:
2032/// The team master of the last team stores the reduced
2033/// result to the globals in memory.
2034/// foo += reduceData.foo; bar *= reduceData.bar
2035///
2036///
2037/// Warp Reduction Algorithms
2038///
2039/// On the warp level, we have three algorithms implemented in the
2040/// OpenMP runtime depending on the number of active lanes:
2041///
2042/// Full Warp Reduction
2043///
2044/// The reduce algorithm within a warp where all lanes are active
2045/// is implemented in the runtime as follows:
2046///
2047/// full_warp_reduce(void *reduce_data,
2048/// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
2049/// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
2050/// ShuffleReduceFn(reduce_data, 0, offset, 0);
2051/// }
2052///
2053/// The algorithm completes in log(2, WARPSIZE) steps.
2054///
2055/// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
2056/// not used therefore we save instructions by not retrieving lane_id
2057/// from the corresponding special registers. The 4th parameter, which
2058/// represents the version of the algorithm being used, is set to 0 to
2059/// signify full warp reduction.
2060///
2061/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
2062///
2063/// #reduce_elem refers to an element in the local lane's data structure
2064/// #remote_elem is retrieved from a remote lane
2065/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
2066/// reduce_elem = reduce_elem REDUCE_OP remote_elem;
2067///
2068/// Contiguous Partial Warp Reduction
2069///
2070/// This reduce algorithm is used within a warp where only the first
2071/// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
2072/// number of OpenMP threads in a parallel region is not a multiple of
2073/// WARPSIZE. The algorithm is implemented in the runtime as follows:
2074///
2075/// void
2076/// contiguous_partial_reduce(void *reduce_data,
2077/// kmp_ShuffleReductFctPtr ShuffleReduceFn,
2078/// int size, int lane_id) {
2079/// int curr_size;
2080/// int offset;
2081/// curr_size = size;
2082/// mask = curr_size/2;
2083/// while (offset>0) {
2084/// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
2085/// curr_size = (curr_size+1)/2;
2086/// offset = curr_size/2;
2087/// }
2088/// }
2089///
2090/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
2091///
2092/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
2093/// if (lane_id < offset)
2094/// reduce_elem = reduce_elem REDUCE_OP remote_elem
2095/// else
2096/// reduce_elem = remote_elem
2097///
2098/// This algorithm assumes that the data to be reduced are located in a
2099/// contiguous subset of lanes starting from the first. When there is
2100/// an odd number of active lanes, the data in the last lane is not
2101/// aggregated with any other lane's dat but is instead copied over.
2102///
2103/// Dispersed Partial Warp Reduction
2104///
2105/// This algorithm is used within a warp when any discontiguous subset of
2106/// lanes are active. It is used to implement the reduction operation
2107/// across lanes in an OpenMP simd region or in a nested parallel region.
2108///
2109/// void
2110/// dispersed_partial_reduce(void *reduce_data,
2111/// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
2112/// int size, remote_id;
2113/// int logical_lane_id = number_of_active_lanes_before_me() * 2;
2114/// do {
2115/// remote_id = next_active_lane_id_right_after_me();
2116/// # the above function returns 0 of no active lane
2117/// # is present right after the current lane.
2118/// size = number_of_active_lanes_in_this_warp();
2119/// logical_lane_id /= 2;
2120/// ShuffleReduceFn(reduce_data, logical_lane_id,
2121/// remote_id-1-threadIdx.x, 2);
2122/// } while (logical_lane_id % 2 == 0 && size > 1);
2123/// }
2124///
2125/// There is no assumption made about the initial state of the reduction.
2126/// Any number of lanes (>=1) could be active at any position. The reduction
2127/// result is returned in the first active lane.
2128///
2129/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
2130///
2131/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
2132/// if (lane_id % 2 == 0 && offset > 0)
2133/// reduce_elem = reduce_elem REDUCE_OP remote_elem
2134/// else
2135/// reduce_elem = remote_elem
2136///
2137///
2138/// Intra-Team Reduction
2139///
2140/// This function, as implemented in the runtime call
2141/// '__kmpc_nvptx_parallel_reduce_nowait', aggregates data across OpenMP
2142/// threads in a team. It first reduces within a warp using the
2143/// aforementioned algorithms. We then proceed to gather all such
2144/// reduced values at the first warp.
2145///
2146/// The runtime makes use of the function 'InterWarpCpyFn', which copies
2147/// data from each of the "warp master" (zeroth lane of each warp, where
2148/// warp-reduced data is held) to the zeroth warp. This step reduces (in
2149/// a mathematical sense) the problem of reduction across warp masters in
2150/// a block to the problem of warp reduction.
2151///
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00002152///
2153/// Inter-Team Reduction
2154///
2155/// Once a team has reduced its data to a single value, it is stored in
2156/// a global scratchpad array. Since each team has a distinct slot, this
2157/// can be done without locking.
2158///
2159/// The last team to write to the scratchpad array proceeds to reduce the
2160/// scratchpad array. One or more workers in the last team use the helper
2161/// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
2162/// the k'th worker reduces every k'th element.
2163///
2164/// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait' to
2165/// reduce across workers and compute a globally reduced value.
2166///
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00002167void CGOpenMPRuntimeNVPTX::emitReduction(
2168 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
2169 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
2170 ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
2171 if (!CGF.HaveInsertPoint())
2172 return;
2173
2174 bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00002175 bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
2176 // FIXME: Add support for simd reduction.
2177 assert((TeamsReduction || ParallelReduction) &&
2178 "Invalid reduction selection in emitReduction.");
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00002179
2180 auto &C = CGM.getContext();
2181
2182 // 1. Build a list of reduction variables.
2183 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
2184 auto Size = RHSExprs.size();
2185 for (auto *E : Privates) {
2186 if (E->getType()->isVariablyModifiedType())
2187 // Reserve place for array size.
2188 ++Size;
2189 }
2190 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
2191 QualType ReductionArrayTy =
2192 C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
2193 /*IndexTypeQuals=*/0);
2194 Address ReductionList =
2195 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
2196 auto IPriv = Privates.begin();
2197 unsigned Idx = 0;
2198 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
2199 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
2200 CGF.getPointerSize());
2201 CGF.Builder.CreateStore(
2202 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2203 CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
2204 Elem);
2205 if ((*IPriv)->getType()->isVariablyModifiedType()) {
2206 // Store array size.
2207 ++Idx;
2208 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
2209 CGF.getPointerSize());
2210 llvm::Value *Size = CGF.Builder.CreateIntCast(
2211 CGF.getVLASize(
2212 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
2213 .first,
2214 CGF.SizeTy, /*isSigned=*/false);
2215 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
2216 Elem);
2217 }
2218 }
2219
2220 // 2. Emit reduce_func().
2221 auto *ReductionFn = emitReductionFunction(
2222 CGM, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
2223 LHSExprs, RHSExprs, ReductionOps);
2224
2225 // 4. Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
2226 // RedList, shuffle_reduce_func, interwarp_copy_func);
2227 auto *ThreadId = getThreadID(CGF, Loc);
2228 auto *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
2229 auto *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2230 ReductionList.getPointer(), CGF.VoidPtrTy);
2231
2232 auto *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
2233 CGM, Privates, ReductionArrayTy, ReductionFn);
2234 auto *InterWarpCopyFn =
2235 emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy);
2236
2237 llvm::Value *Res = nullptr;
2238 if (ParallelReduction) {
2239 llvm::Value *Args[] = {ThreadId,
2240 CGF.Builder.getInt32(RHSExprs.size()),
2241 ReductionArrayTySize,
2242 RL,
2243 ShuffleAndReduceFn,
2244 InterWarpCopyFn};
2245
2246 Res = CGF.EmitRuntimeCall(
2247 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_reduce_nowait),
2248 Args);
2249 }
2250
Arpith Chacko Jacobfc711b12017-02-16 16:48:49 +00002251 if (TeamsReduction) {
2252 auto *ScratchPadCopyFn =
2253 emitCopyToScratchpad(CGM, Privates, ReductionArrayTy);
2254 auto *LoadAndReduceFn = emitReduceScratchpadFunction(
2255 CGM, Privates, ReductionArrayTy, ReductionFn);
2256
2257 llvm::Value *Args[] = {ThreadId,
2258 CGF.Builder.getInt32(RHSExprs.size()),
2259 ReductionArrayTySize,
2260 RL,
2261 ShuffleAndReduceFn,
2262 InterWarpCopyFn,
2263 ScratchPadCopyFn,
2264 LoadAndReduceFn};
2265 Res = CGF.EmitRuntimeCall(
2266 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_teams_reduce_nowait),
2267 Args);
2268 }
2269
Arpith Chacko Jacob101e8fb2017-02-16 16:20:16 +00002270 // 5. Build switch(res)
2271 auto *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
2272 auto *SwInst = CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/1);
2273
2274 // 6. Build case 1: where we have reduced values in the master
2275 // thread in each team.
2276 // __kmpc_end_reduce{_nowait}(<gtid>);
2277 // break;
2278 auto *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
2279 SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
2280 CGF.EmitBlock(Case1BB);
2281
2282 // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
2283 llvm::Value *EndArgs[] = {ThreadId};
2284 auto &&CodeGen = [&Privates, &LHSExprs, &RHSExprs, &ReductionOps,
2285 this](CodeGenFunction &CGF, PrePostActionTy &Action) {
2286 auto IPriv = Privates.begin();
2287 auto ILHS = LHSExprs.begin();
2288 auto IRHS = RHSExprs.begin();
2289 for (auto *E : ReductionOps) {
2290 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
2291 cast<DeclRefExpr>(*IRHS));
2292 ++IPriv;
2293 ++ILHS;
2294 ++IRHS;
2295 }
2296 };
2297 RegionCodeGenTy RCG(CodeGen);
2298 NVPTXActionTy Action(
2299 nullptr, llvm::None,
2300 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait),
2301 EndArgs);
2302 RCG.setAction(Action);
2303 RCG(CGF);
2304 CGF.EmitBranch(DefaultBB);
2305 CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
2306}
Alexey Bataev3b8d5582017-08-08 18:04:06 +00002307
2308const VarDecl *
2309CGOpenMPRuntimeNVPTX::translateParameter(const FieldDecl *FD,
2310 const VarDecl *NativeParam) const {
2311 if (!NativeParam->getType()->isReferenceType())
2312 return NativeParam;
2313 QualType ArgType = NativeParam->getType();
2314 QualifierCollector QC;
2315 const Type *NonQualTy = QC.strip(ArgType);
2316 QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
2317 if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
2318 if (Attr->getCaptureKind() == OMPC_map) {
2319 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
2320 LangAS::opencl_global);
2321 }
2322 }
2323 ArgType = CGM.getContext().getPointerType(PointeeTy);
2324 QC.addRestrict();
2325 enum { NVPTX_local_addr = 5 };
Alexander Richardson6d989432017-10-15 18:48:14 +00002326 QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
Alexey Bataev3b8d5582017-08-08 18:04:06 +00002327 ArgType = QC.apply(CGM.getContext(), ArgType);
Alexey Bataevb45d43c2017-11-22 16:02:03 +00002328 if (isa<ImplicitParamDecl>(NativeParam)) {
2329 return ImplicitParamDecl::Create(
2330 CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
2331 NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
2332 }
2333 return ParmVarDecl::Create(
2334 CGM.getContext(),
2335 const_cast<DeclContext *>(NativeParam->getDeclContext()),
2336 NativeParam->getLocStart(), NativeParam->getLocation(),
2337 NativeParam->getIdentifier(), ArgType,
2338 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
Alexey Bataev3b8d5582017-08-08 18:04:06 +00002339}
2340
2341Address
2342CGOpenMPRuntimeNVPTX::getParameterAddress(CodeGenFunction &CGF,
2343 const VarDecl *NativeParam,
2344 const VarDecl *TargetParam) const {
2345 assert(NativeParam != TargetParam &&
2346 NativeParam->getType()->isReferenceType() &&
2347 "Native arg must not be the same as target arg.");
2348 Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
2349 QualType NativeParamType = NativeParam->getType();
2350 QualifierCollector QC;
2351 const Type *NonQualTy = QC.strip(NativeParamType);
2352 QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
2353 unsigned NativePointeeAddrSpace =
Alexander Richardson6d989432017-10-15 18:48:14 +00002354 CGF.getContext().getTargetAddressSpace(NativePointeeTy);
Alexey Bataev36f2c4d2017-09-13 20:20:59 +00002355 QualType TargetTy = TargetParam->getType();
Alexey Bataev3b8d5582017-08-08 18:04:06 +00002356 llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
Alexey Bataev36f2c4d2017-09-13 20:20:59 +00002357 LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
Alexey Bataev3b8d5582017-08-08 18:04:06 +00002358 // First cast to generic.
2359 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2360 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
2361 /*AddrSpace=*/0));
2362 // Cast from generic to native address space.
2363 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2364 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
2365 NativePointeeAddrSpace));
2366 Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
2367 CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
Alexey Bataev36f2c4d2017-09-13 20:20:59 +00002368 NativeParamType);
Alexey Bataev3b8d5582017-08-08 18:04:06 +00002369 return NativeParamAddr;
2370}
2371
2372void CGOpenMPRuntimeNVPTX::emitOutlinedFunctionCall(
Alexey Bataev3c595a62017-08-14 15:01:03 +00002373 CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
Alexey Bataev3b8d5582017-08-08 18:04:06 +00002374 ArrayRef<llvm::Value *> Args) const {
2375 SmallVector<llvm::Value *, 4> TargetArgs;
Alexey Bataev07ed94a2017-08-15 14:34:04 +00002376 TargetArgs.reserve(Args.size());
Alexey Bataev3b8d5582017-08-08 18:04:06 +00002377 auto *FnType =
2378 cast<llvm::FunctionType>(OutlinedFn->getType()->getPointerElementType());
2379 for (unsigned I = 0, E = Args.size(); I < E; ++I) {
Alexey Bataev07ed94a2017-08-15 14:34:04 +00002380 if (FnType->isVarArg() && FnType->getNumParams() <= I) {
2381 TargetArgs.append(std::next(Args.begin(), I), Args.end());
2382 break;
2383 }
Alexey Bataev3b8d5582017-08-08 18:04:06 +00002384 llvm::Type *TargetType = FnType->getParamType(I);
2385 llvm::Value *NativeArg = Args[I];
2386 if (!TargetType->isPointerTy()) {
2387 TargetArgs.emplace_back(NativeArg);
2388 continue;
2389 }
2390 llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2391 NativeArg, NativeArg->getType()->getPointerElementType()->getPointerTo(
2392 /*AddrSpace=*/0));
2393 TargetArgs.emplace_back(
2394 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
2395 }
Alexey Bataev3c595a62017-08-14 15:01:03 +00002396 CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
Alexey Bataev3b8d5582017-08-08 18:04:06 +00002397}
Gheorghe-Teodor Berceaeb89b1d2017-11-21 15:54:54 +00002398
2399/// Emit function which wraps the outline parallel region
2400/// and controls the arguments which are passed to this function.
2401/// The wrapper ensures that the outlined function is called
2402/// with the correct arguments when data is shared.
2403llvm::Function *CGOpenMPRuntimeNVPTX::createDataSharingWrapper(
2404 llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
2405 ASTContext &Ctx = CGM.getContext();
2406 const auto &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
2407
2408 // Create a function that takes as argument the source thread.
2409 FunctionArgList WrapperArgs;
2410 QualType Int16QTy =
2411 Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
2412 QualType Int32QTy =
2413 Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
2414 QualType Int32PtrQTy = Ctx.getPointerType(Int32QTy);
2415 QualType VoidPtrPtrQTy = Ctx.getPointerType(Ctx.VoidPtrTy);
2416 ImplicitParamDecl ParallelLevelArg(Ctx, Int16QTy, ImplicitParamDecl::Other);
2417 ImplicitParamDecl WrapperArg(Ctx, Int32QTy, ImplicitParamDecl::Other);
2418 ImplicitParamDecl SharedArgsList(Ctx, VoidPtrPtrQTy,
2419 ImplicitParamDecl::Other);
2420 WrapperArgs.emplace_back(&ParallelLevelArg);
2421 WrapperArgs.emplace_back(&WrapperArg);
2422 WrapperArgs.emplace_back(&SharedArgsList);
2423
2424 auto &CGFI =
2425 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
2426
2427 auto *Fn = llvm::Function::Create(
2428 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2429 OutlinedParallelFn->getName() + "_wrapper", &CGM.getModule());
2430 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
2431 Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
2432
2433 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
2434 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs);
2435
2436 const auto *RD = CS.getCapturedRecordDecl();
2437 auto CurField = RD->field_begin();
2438
2439 // Get the array of arguments.
2440 SmallVector<llvm::Value *, 8> Args;
2441
2442 // TODO: suppport SIMD and pass actual values
2443 Args.emplace_back(llvm::ConstantPointerNull::get(
2444 CGM.Int32Ty->getPointerTo()));
2445 Args.emplace_back(llvm::ConstantPointerNull::get(
2446 CGM.Int32Ty->getPointerTo()));
2447
2448 CGBuilderTy &Bld = CGF.Builder;
2449 auto CI = CS.capture_begin();
2450
2451 // Load the start of the array
2452 auto SharedArgs =
2453 CGF.EmitLoadOfPointer(CGF.GetAddrOfLocalVar(&SharedArgsList),
2454 VoidPtrPtrQTy->castAs<PointerType>());
2455
2456 // For each captured variable
2457 for (unsigned I = 0; I < CS.capture_size(); ++I, ++CI, ++CurField) {
2458 // Name of captured variable
2459 StringRef Name;
2460 if (CI->capturesThis())
2461 Name = "this";
2462 else
2463 Name = CI->getCapturedVar()->getName();
2464
2465 // We retrieve the CLANG type of the argument. We use it to create
2466 // an alloca which will give us the LLVM type.
2467 QualType ElemTy = CurField->getType();
2468 // If this is a capture by copy the element type has to be the pointer to
2469 // the data.
2470 if (CI->capturesVariableByCopy())
2471 ElemTy = Ctx.getPointerType(ElemTy);
2472
2473 // Get shared address of the captured variable.
2474 Address ArgAddress = Bld.CreateConstInBoundsGEP(
2475 SharedArgs, I, CGF.getPointerSize());
2476 Address TypedArgAddress = Bld.CreateBitCast(
2477 ArgAddress, CGF.ConvertTypeForMem(Ctx.getPointerType(ElemTy)));
2478 llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedArgAddress,
2479 /*Volatile=*/false, Int32PtrQTy, SourceLocation());
2480 Args.emplace_back(Arg);
2481 }
2482
2483 emitCall(CGF, OutlinedParallelFn, Args);
2484 CGF.FinishFunction();
2485 return Fn;
2486}