1 | // Test device global memory data sharing codegen. |
2 | ///==========================================================================/// |
3 | |
4 | // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc |
5 | // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix CK1 |
6 | |
7 | // expected-no-diagnostics |
8 | |
9 | #ifndef HEADER |
10 | #define HEADER |
11 | |
12 | void test_ds(){ |
13 | #pragma omp target |
14 | { |
15 | int a = 10; |
16 | #pragma omp parallel |
17 | { |
18 | a = 1000; |
19 | } |
20 | int b = 100; |
21 | int c = 1000; |
22 | #pragma omp parallel private(c) |
23 | { |
24 | int *c1 = &c; |
25 | b = a + 10000; |
26 | } |
27 | } |
28 | } |
29 | // CK1: [[MEM_TY:%.+]] = type { [128 x i8] } |
30 | // CK1-DAG: [[SHARED_GLOBAL_RD:@.+]] = common addrspace(3) global [[MEM_TY]] zeroinitializer |
31 | // CK1-DAG: [[KERNEL_PTR:@.+]] = internal addrspace(3) global i8* null |
32 | // CK1-DAG: [[KERNEL_SIZE:@.+]] = internal unnamed_addr constant i64 8 |
33 | // CK1-DAG: [[KERNEL_SHARED:@.+]] = internal unnamed_addr constant i16 1 |
34 | |
35 | /// ========= In the worker function ========= /// |
36 | // CK1: {{.*}}define internal void @__omp_offloading{{.*}}test_ds{{.*}}_worker() |
37 | // CK1: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
38 | // CK1-NOT: call void @__kmpc_data_sharing_init_stack |
39 | |
40 | /// ========= In the kernel function ========= /// |
41 | |
42 | // CK1: {{.*}}define weak void @__omp_offloading{{.*}}test_ds{{.*}}() |
43 | // CK1: [[SHAREDARGS1:%.+]] = alloca i8** |
44 | // CK1: [[SHAREDARGS2:%.+]] = alloca i8** |
45 | // CK1: call void @__kmpc_kernel_init |
46 | // CK1: call void @__kmpc_data_sharing_init_stack |
47 | // CK1: [[SHARED_MEM_FLAG:%.+]] = load i16, i16* [[KERNEL_SHARED]], |
48 | // CK1: [[SIZE:%.+]] = load i64, i64* [[KERNEL_SIZE]], |
49 | // CK1: call void @__kmpc_get_team_static_memory(i16 0, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds ([[MEM_TY]], [[MEM_TY]] addrspace(3)* [[SHARED_GLOBAL_RD]], i32 0, i32 0, i32 0) to i8*), i64 [[SIZE]], i16 [[SHARED_MEM_FLAG]], i8** addrspacecast (i8* addrspace(3)* [[KERNEL_PTR]] to i8**)) |
50 | // CK1: [[KERNEL_RD:%.+]] = load i8*, i8* addrspace(3)* [[KERNEL_PTR]], |
51 | // CK1: [[GLOBALSTACK:%.+]] = getelementptr inbounds i8, i8* [[KERNEL_RD]], i64 0 |
52 | // CK1: [[GLOBALSTACK2:%.+]] = bitcast i8* [[GLOBALSTACK]] to %struct._globalized_locals_ty* |
53 | // CK1: [[A:%.+]] = getelementptr inbounds %struct._globalized_locals_ty, %struct._globalized_locals_ty* [[GLOBALSTACK2]], i32 0, i32 0 |
54 | // CK1: [[B:%.+]] = getelementptr inbounds %struct._globalized_locals_ty, %struct._globalized_locals_ty* [[GLOBALSTACK2]], i32 0, i32 1 |
55 | // CK1: store i32 10, i32* [[A]] |
56 | // CK1: call void @__kmpc_kernel_prepare_parallel({{.*}}, i16 1) |
57 | // CK1: call void @__kmpc_begin_sharing_variables(i8*** [[SHAREDARGS1]], i64 1) |
58 | // CK1: [[SHARGSTMP1:%.+]] = load i8**, i8*** [[SHAREDARGS1]] |
59 | // CK1: [[SHARGSTMP2:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP1]], i64 0 |
60 | // CK1: [[SHAREDVAR:%.+]] = bitcast i32* [[A]] to i8* |
61 | // CK1: store i8* [[SHAREDVAR]], i8** [[SHARGSTMP2]] |
62 | // CK1: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
63 | // CK1: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
64 | // CK1: call void @__kmpc_end_sharing_variables() |
65 | // CK1: store i32 100, i32* [[B]] |
66 | // CK1: call void @__kmpc_kernel_prepare_parallel({{.*}}, i16 1) |
67 | // CK1: call void @__kmpc_begin_sharing_variables(i8*** [[SHAREDARGS2]], i64 2) |
68 | // CK1: [[SHARGSTMP3:%.+]] = load i8**, i8*** [[SHAREDARGS2]] |
69 | // CK1: [[SHARGSTMP4:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP3]], i64 0 |
70 | // CK1: [[SHAREDVAR1:%.+]] = bitcast i32* [[B]] to i8* |
71 | // CK1: store i8* [[SHAREDVAR1]], i8** [[SHARGSTMP4]] |
72 | // CK1: [[SHARGSTMP12:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP3]], i64 1 |
73 | // CK1: [[SHAREDVAR2:%.+]] = bitcast i32* [[A]] to i8* |
74 | // CK1: store i8* [[SHAREDVAR2]], i8** [[SHARGSTMP12]] |
75 | // CK1: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
76 | // CK1: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
77 | // CK1: call void @__kmpc_end_sharing_variables() |
78 | // CK1: [[SHARED_MEM_FLAG:%.+]] = load i16, i16* [[KERNEL_SHARED]], |
79 | // CK1: call void @__kmpc_restore_team_static_memory(i16 0, i16 [[SHARED_MEM_FLAG]]) |
80 | // CK1: call void @__kmpc_kernel_deinit(i16 1) |
81 | |
82 | /// ========= In the data sharing wrapper function ========= /// |
83 | |
84 | // CK1: {{.*}}define internal void @__omp_outlined{{.*}}wrapper({{.*}}) |
85 | // CK1: [[SHAREDARGS4:%.+]] = alloca i8** |
86 | // CK1: call void @__kmpc_get_shared_variables(i8*** [[SHAREDARGS4]]) |
87 | // CK1: [[SHARGSTMP13:%.+]] = load i8**, i8*** [[SHAREDARGS4]] |
88 | // CK1: [[SHARGSTMP14:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP13]], i64 0 |
89 | // CK1: [[SHARGSTMP15:%.+]] = bitcast i8** [[SHARGSTMP14]] to i32** |
90 | // CK1: [[SHARGSTMP16:%.+]] = load i32*, i32** [[SHARGSTMP15]] |
91 | // CK1: call void @__omp_outlined__{{.*}}({{.*}}, i32* [[SHARGSTMP16]]) |
92 | |
93 | /// outlined function for the second parallel region /// |
94 | |
95 | // CK1: define internal void @{{.+}}(i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* dereferenceable{{.+}}, i32* dereferenceable{{.+}}) |
96 | // CK1-NOT: call i8* @__kmpc_data_sharing_push_stack( |
97 | // CK1: [[C_ADDR:%.+]] = alloca i32, |
98 | // CK1: store i32* [[C_ADDR]], i32** % |
99 | // CK1i-NOT: call void @__kmpc_data_sharing_pop_stack( |
100 | |
101 | /// ========= In the data sharing wrapper function ========= /// |
102 | |
103 | // CK1: {{.*}}define internal void @__omp_outlined{{.*}}wrapper({{.*}}) |
104 | // CK1: [[SHAREDARGS3:%.+]] = alloca i8** |
105 | // CK1: call void @__kmpc_get_shared_variables(i8*** [[SHAREDARGS3]]) |
106 | // CK1: [[SHARGSTMP5:%.+]] = load i8**, i8*** [[SHAREDARGS3]] |
107 | // CK1: [[SHARGSTMP6:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP5]], i64 0 |
108 | // CK1: [[SHARGSTMP7:%.+]] = bitcast i8** [[SHARGSTMP6]] to i32** |
109 | // CK1: [[SHARGSTMP8:%.+]] = load i32*, i32** [[SHARGSTMP7]] |
110 | // CK1: [[SHARGSTMP9:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP5]], i64 1 |
111 | // CK1: [[SHARGSTMP10:%.+]] = bitcast i8** [[SHARGSTMP9]] to i32** |
112 | // CK1: [[SHARGSTMP11:%.+]] = load i32*, i32** [[SHARGSTMP10]] |
113 | // CK1: call void @__omp_outlined__{{.*}}({{.*}}, i32* [[SHARGSTMP8]], i32* [[SHARGSTMP11]]) |
114 | |
115 | #endif |
116 | |
117 | |