1 | // Test target codegen - host bc file has to be created first. |
2 | // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc |
3 | // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 |
4 | // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc |
5 | // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 |
6 | // RUN: %clang_cc1 -verify -fopenmp -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 |
7 | // expected-no-diagnostics |
8 | #ifndef HEADER |
9 | #define HEADER |
10 | |
11 | // Check that the execution mode of all 7 target regions is set to Generic Mode. |
12 | // CHECK-DAG: [[NONSPMD:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, i8* getelementptr inbounds |
13 | // CHECK-DAG: [[UNKNOWN:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, i8* getelementptr inbounds |
14 | // CHECK-DAG: {{@__omp_offloading_.+l59}}_exec_mode = weak constant i8 1 |
15 | // CHECK-DAG: {{@__omp_offloading_.+l137}}_exec_mode = weak constant i8 1 |
16 | // CHECK-DAG: {{@__omp_offloading_.+l214}}_exec_mode = weak constant i8 1 |
17 | // CHECK-DAG: {{@__omp_offloading_.+l324}}_exec_mode = weak constant i8 1 |
18 | // CHECK-DAG: {{@__omp_offloading_.+l362}}_exec_mode = weak constant i8 1 |
19 | // CHECK-DAG: {{@__omp_offloading_.+l380}}_exec_mode = weak constant i8 1 |
20 | // CHECK-DAG: {{@__omp_offloading_.+l345}}_exec_mode = weak constant i8 1 |
21 | // CHECK-DAG: [[MAP_TY:%.+]] = type { [128 x i8] } |
22 | // CHECK-DAG: [[GLOB_TY:%.+]] = type { i32* } |
23 | |
24 | __thread int id; |
25 | |
26 | int baz(int f, double &a); |
27 | |
28 | template<typename tx, typename ty> |
29 | struct TT{ |
30 | tx X; |
31 | ty Y; |
32 | tx &operator[](int i) { return X; } |
33 | }; |
34 | |
35 | // CHECK: define weak void @__omp_offloading_{{.+}}_{{.+}}targetBar{{.+}}_l59(i32* [[PTR1:%.+]], i32** dereferenceable{{.*}} [[PTR2_REF:%.+]]) |
36 | // CHECK: store i32* [[PTR1]], i32** [[PTR1_ADDR:%.+]], |
37 | // CHECK: store i32** [[PTR2_REF]], i32*** [[PTR2_REF_PTR:%.+]], |
38 | // CHECK: [[PTR2_REF:%.+]] = load i32**, i32*** [[PTR2_REF_PTR]], |
39 | // CHECK: call void @__kmpc_kernel_init( |
40 | // CHECK: call void @__kmpc_get_team_static_memory(i16 0, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds ([[MAP_TY]], [[MAP_TY]] addrspace(3)* @{{.+}}, i32 0, i32 0, i32 0) to i8*), i{{64|32}} %{{.+}}, i16 %{{.+}}, i8** addrspacecast (i8* addrspace(3)* [[BUF_PTR:@.+]] to i8**)) |
41 | // CHECK: [[BUF:%.+]] = load i8*, i8* addrspace(3)* [[BUF_PTR]], |
42 | // CHECK: [[BUF_OFFS:%.+]] = getelementptr inbounds i8, i8* [[BUF]], i{{[0-9]+}} 0 |
43 | // CHECK: [[BUF:%.+]] = bitcast i8* [[BUF_OFFS]] to [[GLOB_TY]]* |
44 | // CHECK: [[PTR1:%.+]] = load i32*, i32** [[PTR1_ADDR]], |
45 | // CHECK: [[PTR1_GLOB_REF:%.+]] = getelementptr inbounds [[GLOB_TY]], [[GLOB_TY]]* [[BUF]], i32 0, i32 0 |
46 | // CHECK: store i32* [[PTR1]], i32** [[PTR1_GLOB_REF]], |
47 | // CHECK: call void @__kmpc_begin_sharing_variables(i8*** [[ARG_PTRS_REF:%.+]], i{{64|32}} 2) |
48 | // CHECK: [[ARG_PTRS:%.+]] = load i8**, i8*** [[ARG_PTRS_REF]], |
49 | // CHECK: [[ARG_PTR1:%.+]] = getelementptr inbounds i8*, i8** [[ARG_PTRS]], i{{[0-9]+}} 0 |
50 | // CHECK: [[BC:%.+]] = bitcast i32** [[PTR1_GLOB_REF]] to i8* |
51 | // CHECK: store i8* [[BC]], i8** [[ARG_PTR1]], |
52 | // CHECK: [[ARG_PTR2:%.+]] = getelementptr inbounds i8*, i8** [[ARG_PTRS]], i{{[0-9]+}} 1 |
53 | // CHECK: [[BC:%.+]] = bitcast i32** [[PTR2_REF]] to i8* |
54 | // CHECK: store i8* [[BC]], i8** [[ARG_PTR2]], |
55 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
56 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
57 | // CHECK: call void @__kmpc_end_sharing_variables() |
58 | void targetBar(int *Ptr1, int *Ptr2) { |
59 | #pragma omp target map(Ptr1[:0], Ptr2) |
60 | #pragma omp parallel num_threads(2) |
61 | *Ptr1 = *Ptr2; |
62 | } |
63 | |
64 | int foo(int n) { |
65 | int a = 0; |
66 | short aa = 0; |
67 | float b[10]; |
68 | float bn[n]; |
69 | double c[5][10]; |
70 | double cn[5][n]; |
71 | TT<long long, char> d; |
72 | |
73 | // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l137}}_worker() |
74 | // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8, |
75 | // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*, |
76 | // CHECK: store i8* null, i8** [[OMP_WORK_FN]], |
77 | // CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]], |
78 | // CHECK: br label {{%?}}[[AWAIT_WORK:.+]] |
79 | // |
80 | // CHECK: [[AWAIT_WORK]] |
81 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
82 | // CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]], |
83 | // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null |
84 | // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]] |
85 | // |
86 | // CHECK: [[SEL_WORKERS]] |
87 | // CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]], |
88 | // CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0 |
89 | // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]] |
90 | // |
91 | // CHECK: [[EXEC_PARALLEL]] |
92 | // CHECK: br label {{%?}}[[TERM_PARALLEL:.+]] |
93 | // |
94 | // CHECK: [[TERM_PARALLEL]] |
95 | // CHECK: br label {{%?}}[[BAR_PARALLEL]] |
96 | // |
97 | // CHECK: [[BAR_PARALLEL]] |
98 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
99 | // CHECK: br label {{%?}}[[AWAIT_WORK]] |
100 | // |
101 | // CHECK: [[EXIT]] |
102 | // CHECK: ret void |
103 | |
104 | // CHECK: define {{.*}}void [[T1:@__omp_offloading_.+foo.+l137]]() |
105 | // CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() |
106 | // CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
107 | // CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
108 | // CHECK-DAG: [[TH_LIMIT:%.+]] = sub nuw i32 [[NTH]], [[WS]] |
109 | // CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]] |
110 | // CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]] |
111 | // |
112 | // CHECK: [[WORKER]] |
113 | // CHECK: {{call|invoke}} void [[T1]]_worker() |
114 | // CHECK: br label {{%?}}[[EXIT:.+]] |
115 | // |
116 | // CHECK: [[CHECK_MASTER]] |
117 | // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() |
118 | // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
119 | // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
120 | // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], |
121 | // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]] |
122 | // |
123 | // CHECK: [[MASTER]] |
124 | // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
125 | // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
126 | // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]] |
127 | // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]] |
128 | // CHECK: br label {{%?}}[[TERMINATE:.+]] |
129 | // |
130 | // CHECK: [[TERMINATE]] |
131 | // CHECK: call void @__kmpc_kernel_deinit( |
132 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
133 | // CHECK: br label {{%?}}[[EXIT]] |
134 | // |
135 | // CHECK: [[EXIT]] |
136 | // CHECK: ret void |
137 | #pragma omp target |
138 | { |
139 | } |
140 | |
141 | // CHECK-NOT: define {{.*}}void [[T2:@__omp_offloading_.+foo.+]]_worker() |
142 | #pragma omp target if(0) |
143 | { |
144 | } |
145 | |
146 | // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l214}}_worker() |
147 | // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8, |
148 | // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*, |
149 | // CHECK: store i8* null, i8** [[OMP_WORK_FN]], |
150 | // CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]], |
151 | // CHECK: br label {{%?}}[[AWAIT_WORK:.+]] |
152 | // |
153 | // CHECK: [[AWAIT_WORK]] |
154 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
155 | // CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]], |
156 | // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null |
157 | // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]] |
158 | // |
159 | // CHECK: [[SEL_WORKERS]] |
160 | // CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]], |
161 | // CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0 |
162 | // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]] |
163 | // |
164 | // CHECK: [[EXEC_PARALLEL]] |
165 | // CHECK: br label {{%?}}[[TERM_PARALLEL:.+]] |
166 | // |
167 | // CHECK: [[TERM_PARALLEL]] |
168 | // CHECK: br label {{%?}}[[BAR_PARALLEL]] |
169 | // |
170 | // CHECK: [[BAR_PARALLEL]] |
171 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
172 | // CHECK: br label {{%?}}[[AWAIT_WORK]] |
173 | // |
174 | // CHECK: [[EXIT]] |
175 | // CHECK: ret void |
176 | |
177 | // CHECK: define {{.*}}void [[T2:@__omp_offloading_.+foo.+l214]](i[[SZ:32|64]] [[ARG1:%[a-zA-Z_]+]], i[[SZ:32|64]] [[ID:%[a-zA-Z_]+]]) |
178 | // CHECK: [[AA_ADDR:%.+]] = alloca i[[SZ]], |
179 | // CHECK: store i[[SZ]] [[ARG1]], i[[SZ]]* [[AA_ADDR]], |
180 | // CHECK: [[AA_CADDR:%.+]] = bitcast i[[SZ]]* [[AA_ADDR]] to i16* |
181 | // CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() |
182 | // CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
183 | // CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
184 | // CHECK-DAG: [[TH_LIMIT:%.+]] = sub nuw i32 [[NTH]], [[WS]] |
185 | // CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]] |
186 | // CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]] |
187 | // |
188 | // CHECK: [[WORKER]] |
189 | // CHECK: {{call|invoke}} void [[T2]]_worker() |
190 | // CHECK: br label {{%?}}[[EXIT:.+]] |
191 | // |
192 | // CHECK: [[CHECK_MASTER]] |
193 | // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() |
194 | // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
195 | // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
196 | // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], |
197 | // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]] |
198 | // |
199 | // CHECK: [[MASTER]] |
200 | // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
201 | // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
202 | // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]] |
203 | // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]] |
204 | // CHECK: load i16, i16* [[AA_CADDR]], |
205 | // CHECK: br label {{%?}}[[TERMINATE:.+]] |
206 | // |
207 | // CHECK: [[TERMINATE]] |
208 | // CHECK: call void @__kmpc_kernel_deinit( |
209 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
210 | // CHECK: br label {{%?}}[[EXIT]] |
211 | // |
212 | // CHECK: [[EXIT]] |
213 | // CHECK: ret void |
214 | #pragma omp target if(1) |
215 | { |
216 | aa += 1; |
217 | id = aa; |
218 | } |
219 | |
220 | // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l324}}_worker() |
221 | // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8, |
222 | // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*, |
223 | // CHECK: store i8* null, i8** [[OMP_WORK_FN]], |
224 | // CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]], |
225 | // CHECK: br label {{%?}}[[AWAIT_WORK:.+]] |
226 | // |
227 | // CHECK: [[AWAIT_WORK]] |
228 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
229 | // CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]], |
230 | // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null |
231 | // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]] |
232 | // |
233 | // CHECK: [[SEL_WORKERS]] |
234 | // CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]], |
235 | // CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0 |
236 | // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]] |
237 | // |
238 | // CHECK: [[EXEC_PARALLEL]] |
239 | // CHECK: br label {{%?}}[[TERM_PARALLEL:.+]] |
240 | // |
241 | // CHECK: [[TERM_PARALLEL]] |
242 | // CHECK: br label {{%?}}[[BAR_PARALLEL]] |
243 | // |
244 | // CHECK: [[BAR_PARALLEL]] |
245 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
246 | // CHECK: br label {{%?}}[[AWAIT_WORK]] |
247 | // |
248 | // CHECK: [[EXIT]] |
249 | // CHECK: ret void |
250 | |
251 | // CHECK: define {{.*}}void [[T3:@__omp_offloading_.+foo.+l324]](i[[SZ]] |
252 | // Create local storage for each capture. |
253 | // CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]] |
254 | // CHECK: [[LOCAL_B:%.+]] = alloca [10 x float]* |
255 | // CHECK: [[LOCAL_VLA1:%.+]] = alloca i[[SZ]] |
256 | // CHECK: [[LOCAL_BN:%.+]] = alloca float* |
257 | // CHECK: [[LOCAL_C:%.+]] = alloca [5 x [10 x double]]* |
258 | // CHECK: [[LOCAL_VLA2:%.+]] = alloca i[[SZ]] |
259 | // CHECK: [[LOCAL_VLA3:%.+]] = alloca i[[SZ]] |
260 | // CHECK: [[LOCAL_CN:%.+]] = alloca double* |
261 | // CHECK: [[LOCAL_D:%.+]] = alloca [[TT:%.+]]* |
262 | // CHECK-DAG: store i[[SZ]] [[ARG_A:%.+]], i[[SZ]]* [[LOCAL_A]] |
263 | // CHECK-DAG: store [10 x float]* [[ARG_B:%.+]], [10 x float]** [[LOCAL_B]] |
264 | // CHECK-DAG: store i[[SZ]] [[ARG_VLA1:%.+]], i[[SZ]]* [[LOCAL_VLA1]] |
265 | // CHECK-DAG: store float* [[ARG_BN:%.+]], float** [[LOCAL_BN]] |
266 | // CHECK-DAG: store [5 x [10 x double]]* [[ARG_C:%.+]], [5 x [10 x double]]** [[LOCAL_C]] |
267 | // CHECK-DAG: store i[[SZ]] [[ARG_VLA2:%.+]], i[[SZ]]* [[LOCAL_VLA2]] |
268 | // CHECK-DAG: store i[[SZ]] [[ARG_VLA3:%.+]], i[[SZ]]* [[LOCAL_VLA3]] |
269 | // CHECK-DAG: store double* [[ARG_CN:%.+]], double** [[LOCAL_CN]] |
270 | // CHECK-DAG: store [[TT]]* [[ARG_D:%.+]], [[TT]]** [[LOCAL_D]] |
271 | // |
272 | // CHECK-64-DAG: [[REF_A:%.+]] = bitcast i64* [[LOCAL_A]] to i32* |
273 | // CHECK-DAG: [[REF_B:%.+]] = load [10 x float]*, [10 x float]** [[LOCAL_B]], |
274 | // CHECK-DAG: [[VAL_VLA1:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA1]], |
275 | // CHECK-DAG: [[REF_BN:%.+]] = load float*, float** [[LOCAL_BN]], |
276 | // CHECK-DAG: [[REF_C:%.+]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[LOCAL_C]], |
277 | // CHECK-DAG: [[VAL_VLA2:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA2]], |
278 | // CHECK-DAG: [[VAL_VLA3:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA3]], |
279 | // CHECK-DAG: [[REF_CN:%.+]] = load double*, double** [[LOCAL_CN]], |
280 | // CHECK-DAG: [[REF_D:%.+]] = load [[TT]]*, [[TT]]** [[LOCAL_D]], |
281 | // |
282 | // CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() |
283 | // CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
284 | // CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
285 | // CHECK-DAG: [[TH_LIMIT:%.+]] = sub nuw i32 [[NTH]], [[WS]] |
286 | // CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]] |
287 | // CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]] |
288 | // |
289 | // CHECK: [[WORKER]] |
290 | // CHECK: {{call|invoke}} void [[T3]]_worker() |
291 | // CHECK: br label {{%?}}[[EXIT:.+]] |
292 | // |
293 | // CHECK: [[CHECK_MASTER]] |
294 | // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() |
295 | // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
296 | // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
297 | // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], |
298 | // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]] |
299 | // |
300 | // CHECK: [[MASTER]] |
301 | // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
302 | // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
303 | // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]] |
304 | // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]] |
305 | // |
306 | // Use captures. |
307 | // CHECK-64-DAG: load i32, i32* [[REF_A]] |
308 | // CHECK-32-DAG: load i32, i32* [[LOCAL_A]] |
309 | // CHECK-DAG: getelementptr inbounds [10 x float], [10 x float]* [[REF_B]], i[[SZ]] 0, i[[SZ]] 2 |
310 | // CHECK-DAG: getelementptr inbounds float, float* [[REF_BN]], i[[SZ]] 3 |
311 | // CHECK-DAG: getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[REF_C]], i[[SZ]] 0, i[[SZ]] 1 |
312 | // CHECK-DAG: getelementptr inbounds double, double* [[REF_CN]], i[[SZ]] %{{.+}} |
313 | // CHECK-DAG: getelementptr inbounds [[TT]], [[TT]]* [[REF_D]], i32 0, i32 0 |
314 | // |
315 | // CHECK: br label {{%?}}[[TERMINATE:.+]] |
316 | // |
317 | // CHECK: [[TERMINATE]] |
318 | // CHECK: call void @__kmpc_kernel_deinit( |
319 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
320 | // CHECK: br label {{%?}}[[EXIT]] |
321 | // |
322 | // CHECK: [[EXIT]] |
323 | // CHECK: ret void |
324 | #pragma omp target if(n>20) |
325 | { |
326 | a += 1; |
327 | b[2] += 1.0; |
328 | bn[3] += 1.0; |
329 | c[1][2] += 1.0; |
330 | cn[1][3] += 1.0; |
331 | d.X += 1; |
332 | d.Y += 1; |
333 | d[0] += 1; |
334 | } |
335 | |
336 | return a; |
337 | } |
338 | |
339 | template<typename tx> |
340 | tx ftemplate(int n) { |
341 | tx a = 0; |
342 | short aa = 0; |
343 | tx b[10]; |
344 | |
345 | #pragma omp target if(n>40) |
346 | { |
347 | a += 1; |
348 | aa += 1; |
349 | b[2] += 1; |
350 | } |
351 | |
352 | return a; |
353 | } |
354 | |
355 | static |
356 | int fstatic(int n) { |
357 | int a = 0; |
358 | short aa = 0; |
359 | char aaa = 0; |
360 | int b[10]; |
361 | |
362 | #pragma omp target if(n>50) |
363 | { |
364 | a += 1; |
365 | aa += 1; |
366 | aaa += 1; |
367 | b[2] += 1; |
368 | } |
369 | |
370 | return a; |
371 | } |
372 | |
373 | struct S1 { |
374 | double a; |
375 | |
376 | int r1(int n){ |
377 | int b = n+1; |
378 | short int c[2][n]; |
379 | |
380 | #pragma omp target if(n>60) |
381 | { |
382 | this->a = (double)b + 1.5; |
383 | c[1][1] = ++a; |
384 | baz(a, a); |
385 | } |
386 | |
387 | return c[1][1] + (int)b; |
388 | } |
389 | }; |
390 | |
391 | int bar(int n){ |
392 | int a = 0; |
393 | |
394 | a += foo(n); |
395 | |
396 | S1 S; |
397 | a += S.r1(n); |
398 | |
399 | a += fstatic(n); |
400 | |
401 | a += ftemplate<int>(n); |
402 | |
403 | return a; |
404 | } |
405 | |
406 | int baz(int f, double &a) { |
407 | #pragma omp parallel |
408 | f = 2 + a; |
409 | return f; |
410 | } |
411 | |
412 | // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+static.+362}}_worker() |
413 | // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8, |
414 | // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*, |
415 | // CHECK: store i8* null, i8** [[OMP_WORK_FN]], |
416 | // CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]], |
417 | // CHECK: br label {{%?}}[[AWAIT_WORK:.+]] |
418 | // |
419 | // CHECK: [[AWAIT_WORK]] |
420 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
421 | // CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]], |
422 | // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null |
423 | // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]] |
424 | // |
425 | // CHECK: [[SEL_WORKERS]] |
426 | // CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]], |
427 | // CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0 |
428 | // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]] |
429 | // |
430 | // CHECK: [[EXEC_PARALLEL]] |
431 | // CHECK: br label {{%?}}[[TERM_PARALLEL:.+]] |
432 | // |
433 | // CHECK: [[TERM_PARALLEL]] |
434 | // CHECK: br label {{%?}}[[BAR_PARALLEL]] |
435 | // |
436 | // CHECK: [[BAR_PARALLEL]] |
437 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
438 | // CHECK: br label {{%?}}[[AWAIT_WORK]] |
439 | // |
440 | // CHECK: [[EXIT]] |
441 | // CHECK: ret void |
442 | |
443 | // CHECK: define {{.*}}void [[T4:@__omp_offloading_.+static.+l362]](i[[SZ]] |
444 | // Create local storage for each capture. |
445 | // CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]] |
446 | // CHECK: [[LOCAL_AA:%.+]] = alloca i[[SZ]] |
447 | // CHECK: [[LOCAL_AAA:%.+]] = alloca i[[SZ]] |
448 | // CHECK: [[LOCAL_B:%.+]] = alloca [10 x i32]* |
449 | // CHECK-DAG: store i[[SZ]] [[ARG_A:%.+]], i[[SZ]]* [[LOCAL_A]] |
450 | // CHECK-DAG: store i[[SZ]] [[ARG_AA:%.+]], i[[SZ]]* [[LOCAL_AA]] |
451 | // CHECK-DAG: store i[[SZ]] [[ARG_AAA:%.+]], i[[SZ]]* [[LOCAL_AAA]] |
452 | // CHECK-DAG: store [10 x i32]* [[ARG_B:%.+]], [10 x i32]** [[LOCAL_B]] |
453 | // Store captures in the context. |
454 | // CHECK-64-DAG: [[REF_A:%.+]] = bitcast i[[SZ]]* [[LOCAL_A]] to i32* |
455 | // CHECK-DAG: [[REF_AA:%.+]] = bitcast i[[SZ]]* [[LOCAL_AA]] to i16* |
456 | // CHECK-DAG: [[REF_AAA:%.+]] = bitcast i[[SZ]]* [[LOCAL_AAA]] to i8* |
457 | // CHECK-DAG: [[REF_B:%.+]] = load [10 x i32]*, [10 x i32]** [[LOCAL_B]], |
458 | // |
459 | // CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() |
460 | // CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
461 | // CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
462 | // CHECK-DAG: [[TH_LIMIT:%.+]] = sub nuw i32 [[NTH]], [[WS]] |
463 | // CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]] |
464 | // CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]] |
465 | // |
466 | // CHECK: [[WORKER]] |
467 | // CHECK: {{call|invoke}} void [[T4]]_worker() |
468 | // CHECK: br label {{%?}}[[EXIT:.+]] |
469 | // |
470 | // CHECK: [[CHECK_MASTER]] |
471 | // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() |
472 | // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
473 | // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
474 | // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], |
475 | // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]] |
476 | // |
477 | // CHECK: [[MASTER]] |
478 | // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
479 | // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
480 | // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]] |
481 | // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]] |
482 | // CHECK-64-DAG: load i32, i32* [[REF_A]] |
483 | // CHECK-32-DAG: load i32, i32* [[LOCAL_A]] |
484 | // CHECK-DAG: load i16, i16* [[REF_AA]] |
485 | // CHECK-DAG: getelementptr inbounds [10 x i32], [10 x i32]* [[REF_B]], i[[SZ]] 0, i[[SZ]] 2 |
486 | // CHECK: br label {{%?}}[[TERMINATE:.+]] |
487 | // |
488 | // CHECK: [[TERMINATE]] |
489 | // CHECK: call void @__kmpc_kernel_deinit( |
490 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
491 | // CHECK: br label {{%?}}[[EXIT]] |
492 | // |
493 | // CHECK: [[EXIT]] |
494 | // CHECK: ret void |
495 | |
496 | |
497 | |
498 | // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+S1.+l380}}_worker() |
499 | // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8, |
500 | // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*, |
501 | // CHECK: store i8* null, i8** [[OMP_WORK_FN]], |
502 | // CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]], |
503 | // CHECK: br label {{%?}}[[AWAIT_WORK:.+]] |
504 | // |
505 | // CHECK: [[AWAIT_WORK]] |
506 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
507 | // CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]], |
508 | // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null |
509 | // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]] |
510 | // |
511 | // CHECK: [[SEL_WORKERS]] |
512 | // CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]], |
513 | // CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0 |
514 | // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]] |
515 | // |
516 | // CHECK: [[EXEC_PARALLEL]] |
517 | // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[NONSPMD]] |
518 | // CHECK: [[WORK_FN:%.+]] = bitcast i8* [[WORK]] to void (i16, i32)* |
519 | // CHECK: call void [[WORK_FN]](i16 0, i32 [[GTID]]) |
520 | // CHECK: br label {{%?}}[[TERM_PARALLEL:.+]] |
521 | // |
522 | // CHECK: [[TERM_PARALLEL]] |
523 | // CHECK: br label {{%?}}[[BAR_PARALLEL]] |
524 | // |
525 | // CHECK: [[BAR_PARALLEL]] |
526 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
527 | // CHECK: br label {{%?}}[[AWAIT_WORK]] |
528 | // |
529 | // CHECK: [[EXIT]] |
530 | // CHECK: ret void |
531 | |
532 | // CHECK: define {{.*}}void [[T5:@__omp_offloading_.+S1.+l380]]( |
533 | // Create local storage for each capture. |
534 | // CHECK: [[LOCAL_THIS:%.+]] = alloca [[S1:%struct.*]]* |
535 | // CHECK: [[LOCAL_B:%.+]] = alloca i[[SZ]] |
536 | // CHECK: [[LOCAL_VLA1:%.+]] = alloca i[[SZ]] |
537 | // CHECK: [[LOCAL_VLA2:%.+]] = alloca i[[SZ]] |
538 | // CHECK: [[LOCAL_C:%.+]] = alloca i16* |
539 | // CHECK-DAG: store [[S1]]* [[ARG_THIS:%.+]], [[S1]]** [[LOCAL_THIS]] |
540 | // CHECK-DAG: store i[[SZ]] [[ARG_B:%.+]], i[[SZ]]* [[LOCAL_B]] |
541 | // CHECK-DAG: store i[[SZ]] [[ARG_VLA1:%.+]], i[[SZ]]* [[LOCAL_VLA1]] |
542 | // CHECK-DAG: store i[[SZ]] [[ARG_VLA2:%.+]], i[[SZ]]* [[LOCAL_VLA2]] |
543 | // CHECK-DAG: store i16* [[ARG_C:%.+]], i16** [[LOCAL_C]] |
544 | // Store captures in the context. |
545 | // CHECK-DAG: [[REF_THIS:%.+]] = load [[S1]]*, [[S1]]** [[LOCAL_THIS]], |
546 | // CHECK-64-DAG:[[REF_B:%.+]] = bitcast i[[SZ]]* [[LOCAL_B]] to i32* |
547 | // CHECK-DAG: [[VAL_VLA1:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA1]], |
548 | // CHECK-DAG: [[VAL_VLA2:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA2]], |
549 | // CHECK-DAG: [[REF_C:%.+]] = load i16*, i16** [[LOCAL_C]], |
550 | // |
551 | // CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() |
552 | // CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
553 | // CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
554 | // CHECK-DAG: [[TH_LIMIT:%.+]] = sub nuw i32 [[NTH]], [[WS]] |
555 | // CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]] |
556 | // CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]] |
557 | // |
558 | // CHECK: [[WORKER]] |
559 | // CHECK: {{call|invoke}} void [[T5]]_worker() |
560 | // CHECK: br label {{%?}}[[EXIT:.+]] |
561 | // |
562 | // CHECK: [[CHECK_MASTER]] |
563 | // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() |
564 | // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
565 | // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
566 | // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], |
567 | // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]] |
568 | // |
569 | // CHECK: [[MASTER]] |
570 | // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
571 | // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
572 | // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]] |
573 | // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]] |
574 | // Use captures. |
575 | // CHECK-DAG: getelementptr inbounds [[S1]], [[S1]]* [[REF_THIS]], i32 0, i32 0 |
576 | // CHECK-64-DAG:load i32, i32* [[REF_B]] |
577 | // CHECK-32-DAG:load i32, i32* [[LOCAL_B]] |
578 | // CHECK-DAG: getelementptr inbounds i16, i16* [[REF_C]], i[[SZ]] %{{.+}} |
579 | // CHECK: call i32 [[BAZ:@.*baz.*]](i32 % |
580 | // CHECK: br label {{%?}}[[TERMINATE:.+]] |
581 | // |
582 | // CHECK: [[TERMINATE]] |
583 | // CHECK: call void @__kmpc_kernel_deinit( |
584 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
585 | // CHECK: br label {{%?}}[[EXIT]] |
586 | // |
587 | // CHECK: [[EXIT]] |
588 | // CHECK: ret void |
589 | |
590 | // CHECK: define i32 [[BAZ]](i32 [[F:%.*]], double* dereferenceable{{.*}}) |
591 | // CHECK: alloca i32, |
592 | // CHECK: [[LOCAL_F_PTR:%.+]] = alloca i32, |
593 | // CHECK: [[ZERO_ADDR:%.+]] = alloca i32, |
594 | // CHECK: store i32 0, i32* [[ZERO_ADDR]] |
595 | // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[UNKNOWN]] |
596 | // CHECK: [[PAR_LEVEL:%.+]] = call i16 @__kmpc_parallel_level(%struct.ident_t* [[UNKNOWN]], i32 [[GTID]]) |
597 | // CHECK: [[IS_TTD:%.+]] = icmp eq i16 %1, 0 |
598 | // CHECK: [[RES:%.+]] = call i8 @__kmpc_is_spmd_exec_mode() |
599 | // CHECK: [[IS_SPMD:%.+]] = icmp ne i8 [[RES]], 0 |
600 | // CHECK: br i1 [[IS_SPMD]], label |
601 | // CHECK: br label |
602 | // CHECK: [[SIZE:%.+]] = select i1 [[IS_TTD]], i{{64|32}} 4, i{{64|32}} 128 |
603 | // CHECK: [[PTR:%.+]] = call i8* @__kmpc_data_sharing_coalesced_push_stack(i{{64|32}} [[SIZE]], i16 0) |
604 | // CHECK: [[REC_ADDR:%.+]] = bitcast i8* [[PTR]] to [[GLOBAL_ST:%.+]]* |
605 | // CHECK: br label |
606 | // CHECK: [[ITEMS:%.+]] = phi [[GLOBAL_ST]]* [ null, {{.+}} ], [ [[REC_ADDR]], {{.+}} ] |
607 | // CHECK: [[TTD_ITEMS:%.+]] = bitcast [[GLOBAL_ST]]* [[ITEMS]] to [[SEC_GLOBAL_ST:%.+]]* |
608 | // CHECK: [[F_PTR_ARR:%.+]] = getelementptr inbounds [[GLOBAL_ST]], [[GLOBAL_ST]]* [[ITEMS]], i32 0, i32 0 |
609 | // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() |
610 | // CHECK: [[LID:%.+]] = and i32 [[TID]], 31 |
611 | // CHECK: [[GLOBAL_F_PTR_PAR:%.+]] = getelementptr inbounds [32 x i32], [32 x i32]* [[F_PTR_ARR]], i32 0, i32 [[LID]] |
612 | // CHECK: [[GLOBAL_F_PTR_TTD:%.+]] = getelementptr inbounds [[SEC_GLOBAL_ST]], [[SEC_GLOBAL_ST]]* [[TTD_ITEMS]], i32 0, i32 0 |
613 | // CHECK: [[GLOBAL_F_PTR:%.+]] = select i1 [[IS_TTD]], i32* [[GLOBAL_F_PTR_TTD]], i32* [[GLOBAL_F_PTR_PAR]] |
614 | // CHECK: [[F_PTR:%.+]] = select i1 [[IS_SPMD]], i32* [[LOCAL_F_PTR]], i32* [[GLOBAL_F_PTR]] |
615 | // CHECK: store i32 %{{.+}}, i32* [[F_PTR]], |
616 | |
617 | // CHECK: [[RES:%.+]] = call i8 @__kmpc_is_spmd_exec_mode() |
618 | // CHECK: icmp ne i8 [[RES]], 0 |
619 | // CHECK: br i1 |
620 | |
621 | // CHECK: [[RES:%.+]] = call i16 @__kmpc_parallel_level(%struct.ident_t* [[UNKNOWN]], i32 [[GTID]]) |
622 | // CHECK: icmp ne i16 [[RES]], 0 |
623 | // CHECK: br i1 |
624 | |
625 | // CHECK: call void @__kmpc_serialized_parallel(%struct.ident_t* [[UNKNOWN]], i32 [[GTID]]) |
626 | // CHECK: call void [[OUTLINED:@.+]](i32* [[ZERO_ADDR]], i32* [[ZERO_ADDR]], i32* [[F_PTR]], double* %{{.+}}) |
627 | // CHECK: call void @__kmpc_end_serialized_parallel(%struct.ident_t* [[UNKNOWN]], i32 [[GTID]]) |
628 | // CHECK: br label |
629 | |
630 | // CHECK: call void @__kmpc_kernel_prepare_parallel(i8* bitcast (void (i16, i32)* @{{.+}} to i8*), i16 1) |
631 | // CHECK: call void @__kmpc_begin_sharing_variables(i8*** [[SHARED_PTR:%.+]], i{{64|32}} 2) |
632 | // CHECK: [[SHARED:%.+]] = load i8**, i8*** [[SHARED_PTR]], |
633 | // CHECK: [[REF:%.+]] = getelementptr inbounds i8*, i8** [[SHARED]], i{{64|32}} 0 |
634 | // CHECK: [[F_REF:%.+]] = bitcast i32* [[F_PTR]] to i8* |
635 | // CHECK: store i8* [[F_REF]], i8** [[REF]], |
636 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
637 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
638 | // CHECK: call void @__kmpc_end_sharing_variables() |
639 | // CHECK: br label |
640 | |
641 | // CHECK: [[RES:%.+]] = load i32, i32* [[F_PTR]], |
642 | // CHECK: store i32 [[RES]], i32* [[RET:%.+]], |
643 | // CHECK: br i1 [[IS_SPMD]], label |
644 | // CHECK: [[BC:%.+]] = bitcast [[GLOBAL_ST]]* [[ITEMS]] to i8* |
645 | // CHECK: call void @__kmpc_data_sharing_pop_stack(i8* [[BC]]) |
646 | // CHECK: br label |
647 | // CHECK: [[RES:%.+]] = load i32, i32* [[RET]], |
648 | // CHECK: ret i32 [[RES]] |
649 | |
650 | |
651 | // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l345}}_worker() |
652 | // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8, |
653 | // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*, |
654 | // CHECK: store i8* null, i8** [[OMP_WORK_FN]], |
655 | // CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]], |
656 | // CHECK: br label {{%?}}[[AWAIT_WORK:.+]] |
657 | // |
658 | // CHECK: [[AWAIT_WORK]] |
659 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
660 | // CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]], |
661 | // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null |
662 | // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]] |
663 | // |
664 | // CHECK: [[SEL_WORKERS]] |
665 | // CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]], |
666 | // CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0 |
667 | // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]] |
668 | // |
669 | // CHECK: [[EXEC_PARALLEL]] |
670 | // CHECK: br label {{%?}}[[TERM_PARALLEL:.+]] |
671 | // |
672 | // CHECK: [[TERM_PARALLEL]] |
673 | // CHECK: br label {{%?}}[[BAR_PARALLEL]] |
674 | // |
675 | // CHECK: [[BAR_PARALLEL]] |
676 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
677 | // CHECK: br label {{%?}}[[AWAIT_WORK]] |
678 | // |
679 | // CHECK: [[EXIT]] |
680 | // CHECK: ret void |
681 | |
682 | // CHECK: define {{.*}}void [[T6:@__omp_offloading_.+template.+l345]](i[[SZ]] |
683 | // Create local storage for each capture. |
684 | // CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]] |
685 | // CHECK: [[LOCAL_AA:%.+]] = alloca i[[SZ]] |
686 | // CHECK: [[LOCAL_B:%.+]] = alloca [10 x i32]* |
687 | // CHECK-DAG: store i[[SZ]] [[ARG_A:%.+]], i[[SZ]]* [[LOCAL_A]] |
688 | // CHECK-DAG: store i[[SZ]] [[ARG_AA:%.+]], i[[SZ]]* [[LOCAL_AA]] |
689 | // CHECK-DAG: store [10 x i32]* [[ARG_B:%.+]], [10 x i32]** [[LOCAL_B]] |
690 | // Store captures in the context. |
691 | // CHECK-64-DAG:[[REF_A:%.+]] = bitcast i[[SZ]]* [[LOCAL_A]] to i32* |
692 | // CHECK-DAG: [[REF_AA:%.+]] = bitcast i[[SZ]]* [[LOCAL_AA]] to i16* |
693 | // CHECK-DAG: [[REF_B:%.+]] = load [10 x i32]*, [10 x i32]** [[LOCAL_B]], |
694 | // |
695 | // CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() |
696 | // CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
697 | // CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
698 | // CHECK-DAG: [[TH_LIMIT:%.+]] = sub nuw i32 [[NTH]], [[WS]] |
699 | // CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]] |
700 | // CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]] |
701 | // |
702 | // CHECK: [[WORKER]] |
703 | // CHECK: {{call|invoke}} void [[T6]]_worker() |
704 | // CHECK: br label {{%?}}[[EXIT:.+]] |
705 | // |
706 | // CHECK: [[CHECK_MASTER]] |
707 | // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() |
708 | // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
709 | // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
710 | // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], |
711 | // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]] |
712 | // |
713 | // CHECK: [[MASTER]] |
714 | // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() |
715 | // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() |
716 | // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]] |
717 | // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]] |
718 | // |
719 | // CHECK-64-DAG: load i32, i32* [[REF_A]] |
720 | // CHECK-32-DAG: load i32, i32* [[LOCAL_A]] |
721 | // CHECK-DAG: load i16, i16* [[REF_AA]] |
722 | // CHECK-DAG: getelementptr inbounds [10 x i32], [10 x i32]* [[REF_B]], i[[SZ]] 0, i[[SZ]] 2 |
723 | // |
724 | // CHECK: br label {{%?}}[[TERMINATE:.+]] |
725 | // |
726 | // CHECK: [[TERMINATE]] |
727 | // CHECK: call void @__kmpc_kernel_deinit( |
728 | // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) |
729 | // CHECK: br label {{%?}}[[EXIT]] |
730 | // |
731 | // CHECK: [[EXIT]] |
732 | // CHECK: ret void |
733 | |
734 | #endif |
735 | |