blob: ed3c88b577165d7ecdfe774a3e70067ca0f3aa26 [file] [log] [blame]
Gheorghe-Teodor Bercead3dcf2f2018-03-14 14:17:45 +00001// Test device global memory data sharing codegen.
2///==========================================================================///
3
4// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
5// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix CK1
6
7// expected-no-diagnostics
8
9#ifndef HEADER
10#define HEADER
11
12void test_ds(){
13 #pragma omp target
14 {
15 int a = 10;
16 #pragma omp parallel
17 {
18 a = 1000;
19 }
20 int b = 100;
Alexey Bataev63cc8e92018-03-20 14:45:59 +000021 int c = 1000;
22 #pragma omp parallel private(c)
Gheorghe-Teodor Bercead3dcf2f2018-03-14 14:17:45 +000023 {
Alexey Bataev63cc8e92018-03-20 14:45:59 +000024 int *c1 = &c;
Gheorghe-Teodor Bercead3dcf2f2018-03-14 14:17:45 +000025 b = a + 10000;
26 }
27 }
28}
Alexey Bataeve4090182018-11-02 14:54:07 +000029// CK1: [[MEM_TY:%.+]] = type { [8 x i8] }
Alexey Bataevf2f39be2018-11-16 19:38:21 +000030// CK1-DAG: [[SHARED_GLOBAL_RD:@.+]] = common addrspace(3) global [[MEM_TY]] zeroinitializer
Alexey Bataeve4090182018-11-02 14:54:07 +000031// CK1-DAG: [[KERNEL_PTR:@.+]] = internal addrspace(3) global i8* null
32// CK1-DAG: [[KERNEL_SIZE:@.+]] = internal unnamed_addr constant i64 8
Alexey Bataev09c9eea2018-11-09 16:18:04 +000033// CK1-DAG: [[KERNEL_SHARED:@.+]] = internal unnamed_addr constant i16 1
Gheorghe-Teodor Bercead3dcf2f2018-03-14 14:17:45 +000034
Gheorghe-Teodor Bercea36cdfad2018-03-22 17:33:27 +000035/// ========= In the worker function ========= ///
36// CK1: {{.*}}define internal void @__omp_offloading{{.*}}test_ds{{.*}}_worker()
37// CK1: call void @llvm.nvvm.barrier0()
Gheorghe-Teodor Berceaad4e5792018-07-13 16:18:24 +000038// CK1-NOT: call void @__kmpc_data_sharing_init_stack
Gheorghe-Teodor Bercea36cdfad2018-03-22 17:33:27 +000039
Gheorghe-Teodor Bercead3dcf2f2018-03-14 14:17:45 +000040/// ========= In the kernel function ========= ///
41
Alexey Bataev9a700172018-05-08 14:16:57 +000042// CK1: {{.*}}define weak void @__omp_offloading{{.*}}test_ds{{.*}}()
Gheorghe-Teodor Bercead3dcf2f2018-03-14 14:17:45 +000043// CK1: [[SHAREDARGS1:%.+]] = alloca i8**
44// CK1: [[SHAREDARGS2:%.+]] = alloca i8**
45// CK1: call void @__kmpc_kernel_init
46// CK1: call void @__kmpc_data_sharing_init_stack
Alexey Bataev09c9eea2018-11-09 16:18:04 +000047// CK1: [[SHARED_MEM_FLAG:%.+]] = load i16, i16* [[KERNEL_SHARED]],
Alexey Bataeve4090182018-11-02 14:54:07 +000048// CK1: [[SIZE:%.+]] = load i64, i64* [[KERNEL_SIZE]],
Alexey Bataev09c9eea2018-11-09 16:18:04 +000049// CK1: call void @__kmpc_get_team_static_memory(i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds ([[MEM_TY]], [[MEM_TY]] addrspace(3)* [[SHARED_GLOBAL_RD]], i32 0, i32 0, i32 0) to i8*), i64 [[SIZE]], i16 [[SHARED_MEM_FLAG]], i8** addrspacecast (i8* addrspace(3)* [[KERNEL_PTR]] to i8**))
Alexey Bataeve4090182018-11-02 14:54:07 +000050// CK1: [[KERNEL_RD:%.+]] = load i8*, i8* addrspace(3)* [[KERNEL_PTR]],
51// CK1: [[GLOBALSTACK:%.+]] = getelementptr inbounds i8, i8* [[KERNEL_RD]], i64 0
Gheorghe-Teodor Bercead3dcf2f2018-03-14 14:17:45 +000052// CK1: [[GLOBALSTACK2:%.+]] = bitcast i8* [[GLOBALSTACK]] to %struct._globalized_locals_ty*
Alexey Bataev4ac58d12018-10-12 20:19:59 +000053// CK1: [[A:%.+]] = getelementptr inbounds %struct._globalized_locals_ty, %struct._globalized_locals_ty* [[GLOBALSTACK2]], i32 0, i32 0
54// CK1: [[B:%.+]] = getelementptr inbounds %struct._globalized_locals_ty, %struct._globalized_locals_ty* [[GLOBALSTACK2]], i32 0, i32 1
Gheorghe-Teodor Bercead3dcf2f2018-03-14 14:17:45 +000055// CK1: store i32 10, i32* [[A]]
56// CK1: call void @__kmpc_kernel_prepare_parallel({{.*}}, i16 1)
57// CK1: call void @__kmpc_begin_sharing_variables(i8*** [[SHAREDARGS1]], i64 1)
58// CK1: [[SHARGSTMP1:%.+]] = load i8**, i8*** [[SHAREDARGS1]]
59// CK1: [[SHARGSTMP2:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP1]], i64 0
60// CK1: [[SHAREDVAR:%.+]] = bitcast i32* [[A]] to i8*
61// CK1: store i8* [[SHAREDVAR]], i8** [[SHARGSTMP2]]
62// CK1: call void @llvm.nvvm.barrier0()
63// CK1: call void @llvm.nvvm.barrier0()
64// CK1: call void @__kmpc_end_sharing_variables()
65// CK1: store i32 100, i32* [[B]]
66// CK1: call void @__kmpc_kernel_prepare_parallel({{.*}}, i16 1)
67// CK1: call void @__kmpc_begin_sharing_variables(i8*** [[SHAREDARGS2]], i64 2)
68// CK1: [[SHARGSTMP3:%.+]] = load i8**, i8*** [[SHAREDARGS2]]
69// CK1: [[SHARGSTMP4:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP3]], i64 0
70// CK1: [[SHAREDVAR1:%.+]] = bitcast i32* [[B]] to i8*
71// CK1: store i8* [[SHAREDVAR1]], i8** [[SHARGSTMP4]]
72// CK1: [[SHARGSTMP12:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP3]], i64 1
73// CK1: [[SHAREDVAR2:%.+]] = bitcast i32* [[A]] to i8*
74// CK1: store i8* [[SHAREDVAR2]], i8** [[SHARGSTMP12]]
75// CK1: call void @llvm.nvvm.barrier0()
76// CK1: call void @llvm.nvvm.barrier0()
77// CK1: call void @__kmpc_end_sharing_variables()
Alexey Bataev09c9eea2018-11-09 16:18:04 +000078// CK1: [[SHARED_MEM_FLAG:%.+]] = load i16, i16* [[KERNEL_SHARED]],
79// CK1: call void @__kmpc_restore_team_static_memory(i16 [[SHARED_MEM_FLAG]])
Gheorghe-Teodor Bercead3dcf2f2018-03-14 14:17:45 +000080// CK1: call void @__kmpc_kernel_deinit(i16 1)
81
82/// ========= In the data sharing wrapper function ========= ///
83
84// CK1: {{.*}}define internal void @__omp_outlined{{.*}}wrapper({{.*}})
85// CK1: [[SHAREDARGS4:%.+]] = alloca i8**
86// CK1: call void @__kmpc_get_shared_variables(i8*** [[SHAREDARGS4]])
87// CK1: [[SHARGSTMP13:%.+]] = load i8**, i8*** [[SHAREDARGS4]]
88// CK1: [[SHARGSTMP14:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP13]], i64 0
89// CK1: [[SHARGSTMP15:%.+]] = bitcast i8** [[SHARGSTMP14]] to i32**
90// CK1: [[SHARGSTMP16:%.+]] = load i32*, i32** [[SHARGSTMP15]]
91// CK1: call void @__omp_outlined__{{.*}}({{.*}}, i32* [[SHARGSTMP16]])
92
Alexey Bataev63cc8e92018-03-20 14:45:59 +000093/// outlined function for the second parallel region ///
94
95// CK1: define internal void @{{.+}}(i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* dereferenceable{{.+}}, i32* dereferenceable{{.+}})
Alexey Bataevb99dcb52018-07-09 17:43:58 +000096// CK1-NOT: call i8* @__kmpc_data_sharing_push_stack(
97// CK1: [[C_ADDR:%.+]] = alloca i32,
Alexey Bataev63cc8e92018-03-20 14:45:59 +000098// CK1: store i32* [[C_ADDR]], i32** %
Alexey Bataevb99dcb52018-07-09 17:43:58 +000099// CK1i-NOT: call void @__kmpc_data_sharing_pop_stack(
Alexey Bataev63cc8e92018-03-20 14:45:59 +0000100
Gheorghe-Teodor Bercead3dcf2f2018-03-14 14:17:45 +0000101/// ========= In the data sharing wrapper function ========= ///
102
103// CK1: {{.*}}define internal void @__omp_outlined{{.*}}wrapper({{.*}})
104// CK1: [[SHAREDARGS3:%.+]] = alloca i8**
105// CK1: call void @__kmpc_get_shared_variables(i8*** [[SHAREDARGS3]])
106// CK1: [[SHARGSTMP5:%.+]] = load i8**, i8*** [[SHAREDARGS3]]
107// CK1: [[SHARGSTMP6:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP5]], i64 0
108// CK1: [[SHARGSTMP7:%.+]] = bitcast i8** [[SHARGSTMP6]] to i32**
109// CK1: [[SHARGSTMP8:%.+]] = load i32*, i32** [[SHARGSTMP7]]
110// CK1: [[SHARGSTMP9:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP5]], i64 1
111// CK1: [[SHARGSTMP10:%.+]] = bitcast i8** [[SHARGSTMP9]] to i32**
112// CK1: [[SHARGSTMP11:%.+]] = load i32*, i32** [[SHARGSTMP10]]
113// CK1: call void @__omp_outlined__{{.*}}({{.*}}, i32* [[SHARGSTMP8]], i32* [[SHARGSTMP11]])
114
115#endif
116