Gheorghe-Teodor Bercea | d3dcf2f | 2018-03-14 14:17:45 +0000 | [diff] [blame^] | 1 | // Test device global memory data sharing codegen. |
| 2 | ///==========================================================================/// |
| 3 | |
| 4 | // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc |
| 5 | // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix CK1 |
| 6 | |
| 7 | // expected-no-diagnostics |
| 8 | |
| 9 | #ifndef HEADER |
| 10 | #define HEADER |
| 11 | |
| 12 | void test_ds(){ |
| 13 | #pragma omp target |
| 14 | { |
| 15 | int a = 10; |
| 16 | #pragma omp parallel |
| 17 | { |
| 18 | a = 1000; |
| 19 | } |
| 20 | int b = 100; |
| 21 | #pragma omp parallel |
| 22 | { |
| 23 | b = a + 10000; |
| 24 | } |
| 25 | } |
| 26 | } |
| 27 | |
| 28 | /// ========= In the kernel function ========= /// |
| 29 | |
| 30 | // CK1: {{.*}}define void @__omp_offloading{{.*}}test_ds{{.*}}() |
| 31 | // CK1: [[SHAREDARGS1:%.+]] = alloca i8** |
| 32 | // CK1: [[SHAREDARGS2:%.+]] = alloca i8** |
| 33 | // CK1: call void @__kmpc_kernel_init |
| 34 | // CK1: call void @__kmpc_data_sharing_init_stack |
| 35 | // CK1: [[GLOBALSTACK:%.+]] = call i8* @__kmpc_data_sharing_push_stack(i64 8, i16 0) |
| 36 | // CK1: [[GLOBALSTACK2:%.+]] = bitcast i8* [[GLOBALSTACK]] to %struct._globalized_locals_ty* |
| 37 | // CK1: [[A:%.+]] = getelementptr inbounds %struct._globalized_locals_ty, %struct._globalized_locals_ty* [[GLOBALSTACK2]], i32 0, i32 0 |
| 38 | // CK1: [[B:%.+]] = getelementptr inbounds %struct._globalized_locals_ty, %struct._globalized_locals_ty* [[GLOBALSTACK2]], i32 0, i32 1 |
| 39 | // CK1: store i32 10, i32* [[A]] |
| 40 | // CK1: call void @__kmpc_kernel_prepare_parallel({{.*}}, i16 1) |
| 41 | // CK1: call void @__kmpc_begin_sharing_variables(i8*** [[SHAREDARGS1]], i64 1) |
| 42 | // CK1: [[SHARGSTMP1:%.+]] = load i8**, i8*** [[SHAREDARGS1]] |
| 43 | // CK1: [[SHARGSTMP2:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP1]], i64 0 |
| 44 | // CK1: [[SHAREDVAR:%.+]] = bitcast i32* [[A]] to i8* |
| 45 | // CK1: store i8* [[SHAREDVAR]], i8** [[SHARGSTMP2]] |
| 46 | // CK1: call void @llvm.nvvm.barrier0() |
| 47 | // CK1: call void @llvm.nvvm.barrier0() |
| 48 | // CK1: call void @__kmpc_end_sharing_variables() |
| 49 | // CK1: store i32 100, i32* [[B]] |
| 50 | // CK1: call void @__kmpc_kernel_prepare_parallel({{.*}}, i16 1) |
| 51 | // CK1: call void @__kmpc_begin_sharing_variables(i8*** [[SHAREDARGS2]], i64 2) |
| 52 | // CK1: [[SHARGSTMP3:%.+]] = load i8**, i8*** [[SHAREDARGS2]] |
| 53 | // CK1: [[SHARGSTMP4:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP3]], i64 0 |
| 54 | // CK1: [[SHAREDVAR1:%.+]] = bitcast i32* [[B]] to i8* |
| 55 | // CK1: store i8* [[SHAREDVAR1]], i8** [[SHARGSTMP4]] |
| 56 | // CK1: [[SHARGSTMP12:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP3]], i64 1 |
| 57 | // CK1: [[SHAREDVAR2:%.+]] = bitcast i32* [[A]] to i8* |
| 58 | // CK1: store i8* [[SHAREDVAR2]], i8** [[SHARGSTMP12]] |
| 59 | // CK1: call void @llvm.nvvm.barrier0() |
| 60 | // CK1: call void @llvm.nvvm.barrier0() |
| 61 | // CK1: call void @__kmpc_end_sharing_variables() |
| 62 | // CK1: call void @__kmpc_data_sharing_pop_stack(i8* [[GLOBALSTACK]]) |
| 63 | // CK1: call void @__kmpc_kernel_deinit(i16 1) |
| 64 | |
| 65 | /// ========= In the data sharing wrapper function ========= /// |
| 66 | |
| 67 | // CK1: {{.*}}define internal void @__omp_outlined{{.*}}wrapper({{.*}}) |
| 68 | // CK1: [[SHAREDARGS4:%.+]] = alloca i8** |
| 69 | // CK1: call void @__kmpc_get_shared_variables(i8*** [[SHAREDARGS4]]) |
| 70 | // CK1: [[SHARGSTMP13:%.+]] = load i8**, i8*** [[SHAREDARGS4]] |
| 71 | // CK1: [[SHARGSTMP14:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP13]], i64 0 |
| 72 | // CK1: [[SHARGSTMP15:%.+]] = bitcast i8** [[SHARGSTMP14]] to i32** |
| 73 | // CK1: [[SHARGSTMP16:%.+]] = load i32*, i32** [[SHARGSTMP15]] |
| 74 | // CK1: call void @__omp_outlined__{{.*}}({{.*}}, i32* [[SHARGSTMP16]]) |
| 75 | |
| 76 | /// ========= In the data sharing wrapper function ========= /// |
| 77 | |
| 78 | // CK1: {{.*}}define internal void @__omp_outlined{{.*}}wrapper({{.*}}) |
| 79 | // CK1: [[SHAREDARGS3:%.+]] = alloca i8** |
| 80 | // CK1: call void @__kmpc_get_shared_variables(i8*** [[SHAREDARGS3]]) |
| 81 | // CK1: [[SHARGSTMP5:%.+]] = load i8**, i8*** [[SHAREDARGS3]] |
| 82 | // CK1: [[SHARGSTMP6:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP5]], i64 0 |
| 83 | // CK1: [[SHARGSTMP7:%.+]] = bitcast i8** [[SHARGSTMP6]] to i32** |
| 84 | // CK1: [[SHARGSTMP8:%.+]] = load i32*, i32** [[SHARGSTMP7]] |
| 85 | // CK1: [[SHARGSTMP9:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP5]], i64 1 |
| 86 | // CK1: [[SHARGSTMP10:%.+]] = bitcast i8** [[SHARGSTMP9]] to i32** |
| 87 | // CK1: [[SHARGSTMP11:%.+]] = load i32*, i32** [[SHARGSTMP10]] |
| 88 | // CK1: call void @__omp_outlined__{{.*}}({{.*}}, i32* [[SHARGSTMP8]], i32* [[SHARGSTMP11]]) |
| 89 | |
| 90 | #endif |
| 91 | |