1 | // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s |
2 | // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s |
3 | // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s |
4 | // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fexceptions -fcxx-exceptions -debug-info-kind=line-tables-only -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=TERM_DEBUG |
5 | // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -O1 -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=CLEANUP |
6 | |
7 | // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck --check-prefix SIMD-ONLY0 %s |
8 | // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s |
9 | // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s |
10 | // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -fexceptions -fcxx-exceptions -debug-info-kind=line-tables-only -x c++ -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s |
11 | // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -O1 -fopenmp-simd -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s |
12 | // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} |
13 | // expected-no-diagnostics |
14 | #ifndef HEADER |
15 | #define HEADER |
16 | |
17 | // CHECK-DAG: [[IDENT_T_TY:%.+]] = type { i32, i32, i32, i32, i8* } |
18 | // CHECK-DAG: [[LOOP_LOC:@.+]] = private unnamed_addr global [[IDENT_T_TY]] { i32 0, i32 514, i32 0, i32 0, i8* |
19 | |
20 | // CHECK-LABEL: with_var_schedule |
21 | void with_var_schedule() { |
22 | double a = 5; |
23 | // CHECK: [[CHUNK_SIZE:%.+]] = fptosi double %{{.+}}to i8 |
24 | // CHECK: store i8 %{{.+}}, i8* [[CHUNK:%.+]], |
25 | // CHECK: [[VAL:%.+]] = load i8, i8* [[CHUNK]], |
26 | // CHECK: store i8 [[VAL]], i8* |
27 | // CHECK: [[CHUNK:%.+]] = load i64, i64* % |
28 | // CHECK: call void {{.+}} @__kmpc_fork_call({{.+}}, i64 [[CHUNK]]) |
29 | |
30 | // CHECK: [[CHUNK_VAL:%.+]] = load i8, i8* % |
31 | // CHECK: [[CHUNK_SIZE:%.+]] = sext i8 [[CHUNK_VAL]] to i64 |
32 | // CHECK: call void @__kmpc_for_static_init_8u([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID:%[^,]+]], i32 33, i32* [[IS_LAST:%[^,]+]], i64* [[OMP_LB:%[^,]+]], i64* [[OMP_UB:%[^,]+]], i64* [[OMP_ST:%[^,]+]], i64 1, i64 [[CHUNK_SIZE]]) |
33 | // CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]]) |
34 | #pragma omp parallel for schedule(static, char(a)) |
35 | for (unsigned long long i = 1; i < 2; ++i) { |
36 | } |
37 | } |
38 | |
39 | // CHECK-LABEL: define {{.*void}} @{{.*}}without_schedule_clause{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) |
40 | void without_schedule_clause(float *a, float *b, float *c, float *d) { |
41 | #pragma omp parallel for |
42 | // CHECK: call void ([[IDENT_T_TY]]*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* [[OMP_PARALLEL_FUNC:@.+]] to void (i32*, i32*, ...)*), |
43 | // CHECK: define internal void [[OMP_PARALLEL_FUNC]](i32* noalias [[GTID_PARAM_ADDR:%.+]], i32* noalias %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}) |
44 | // CHECK: store i32* [[GTID_PARAM_ADDR]], i32** [[GTID_REF_ADDR:%.+]], |
45 | // CHECK: call void @__kmpc_for_static_init_4([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID:%.+]], i32 34, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1) |
46 | // UB = min(UB, GlobalUB) |
47 | // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] |
48 | // CHECK-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 4571423 |
49 | // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]] |
50 | // CHECK: [[UBRESULT:%.+]] = phi i32 [ 4571423, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ] |
51 | // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]] |
52 | // CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]] |
53 | // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]] |
54 | // Loop header |
55 | // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] |
56 | // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] |
57 | // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB]] |
58 | // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] |
59 | for (int i = 33; i < 32000000; i += 7) { |
60 | // CHECK: [[LOOP1_BODY]] |
61 | // Start of body: calculate i from IV: |
62 | // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]] |
63 | // CHECK-NEXT: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 7 |
64 | // CHECK-NEXT: [[CALC_I_2:%.+]] = add nsw i32 33, [[CALC_I_1]] |
65 | // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] |
66 | // ... loop body ... |
67 | // End of body: store into a[i]: |
68 | // CHECK: store float [[RESULT:%.+]], float* {{%.+}} |
69 | a[i] = b[i] * c[i] * d[i]; |
70 | // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} |
71 | // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 |
72 | // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]] |
73 | // CHECK-NEXT: br label %{{.+}} |
74 | } |
75 | // CHECK: [[LOOP1_END]] |
76 | // CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]]) |
77 | // CHECK: ret void |
78 | } |
79 | |
80 | // CHECK-LABEL: define {{.*void}} @{{.*}}static_not_chunked{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) |
81 | void static_not_chunked(float *a, float *b, float *c, float *d) { |
82 | #pragma omp parallel for schedule(static) |
83 | // CHECK: call void ([[IDENT_T_TY]]*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* [[OMP_PARALLEL_FUNC:@.+]] to void (i32*, i32*, ...)*), |
84 | // CHECK: define internal void [[OMP_PARALLEL_FUNC]](i32* noalias [[GTID_PARAM_ADDR:%.+]], i32* noalias %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}) |
85 | // CHECK: store i32* [[GTID_PARAM_ADDR]], i32** [[GTID_REF_ADDR:%.+]], |
86 | // CHECK: call void @__kmpc_for_static_init_4([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID:%.+]], i32 34, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1) |
87 | // UB = min(UB, GlobalUB) |
88 | // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] |
89 | // CHECK-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 4571423 |
90 | // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]] |
91 | // CHECK: [[UBRESULT:%.+]] = phi i32 [ 4571423, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ] |
92 | // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]] |
93 | // CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]] |
94 | // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]] |
95 | // Loop header |
96 | // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] |
97 | // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] |
98 | // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB]] |
99 | // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] |
100 | for (int i = 32000000; i > 33; i += -7) { |
101 | // CHECK: [[LOOP1_BODY]] |
102 | // Start of body: calculate i from IV: |
103 | // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]] |
104 | // CHECK-NEXT: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 7 |
105 | // CHECK-NEXT: [[CALC_I_2:%.+]] = sub nsw i32 32000000, [[CALC_I_1]] |
106 | // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] |
107 | // ... loop body ... |
108 | // End of body: store into a[i]: |
109 | // CHECK: store float [[RESULT:%.+]], float* {{%.+}} |
110 | a[i] = b[i] * c[i] * d[i]; |
111 | // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} |
112 | // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 |
113 | // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]] |
114 | // CHECK-NEXT: br label %{{.+}} |
115 | } |
116 | // CHECK: [[LOOP1_END]] |
117 | // CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]]) |
118 | // CHECK: ret void |
119 | } |
120 | |
121 | // CHECK-LABEL: define {{.*void}} @{{.*}}static_chunked{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) |
122 | void static_chunked(float *a, float *b, float *c, float *d) { |
123 | #pragma omp parallel for schedule(static, 5) |
124 | // CHECK: call void ([[IDENT_T_TY]]*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* [[OMP_PARALLEL_FUNC:@.+]] to void (i32*, i32*, ...)*), |
125 | // CHECK: define internal void [[OMP_PARALLEL_FUNC]](i32* noalias [[GTID_PARAM_ADDR:%.+]], i32* noalias %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}) |
126 | // CHECK: store i32* [[GTID_PARAM_ADDR]], i32** [[GTID_REF_ADDR:%.+]], |
127 | // CHECK: call void @__kmpc_for_static_init_4u([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID:%.+]], i32 33, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 5) |
128 | // UB = min(UB, GlobalUB) |
129 | // CHECK: [[UB:%.+]] = load i32, i32* [[OMP_UB]] |
130 | // CHECK-NEXT: [[UBCMP:%.+]] = icmp ugt i32 [[UB]], 16908288 |
131 | // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]] |
132 | // CHECK: [[UBRESULT:%.+]] = phi i32 [ 16908288, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ] |
133 | // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]] |
134 | // CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]] |
135 | // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]] |
136 | |
137 | // Outer loop header |
138 | // CHECK: [[O_IV:%.+]] = load i32, i32* [[OMP_IV]] |
139 | // CHECK-NEXT: [[O_UB:%.+]] = load i32, i32* [[OMP_UB]] |
140 | // CHECK-NEXT: [[O_CMP:%.+]] = icmp ule i32 [[O_IV]], [[O_UB]] |
141 | // CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]] |
142 | |
143 | // Loop header |
144 | // CHECK: [[O_LOOP1_BODY]] |
145 | // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] |
146 | // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] |
147 | // CHECK-NEXT: [[CMP:%.+]] = icmp ule i32 [[IV]], [[UB]] |
148 | // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] |
149 | for (unsigned i = 131071; i <= 2147483647; i += 127) { |
150 | // CHECK: [[LOOP1_BODY]] |
151 | // Start of body: calculate i from IV: |
152 | // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]] |
153 | // CHECK-NEXT: [[CALC_I_1:%.+]] = mul i32 [[IV1_1]], 127 |
154 | // CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 131071, [[CALC_I_1]] |
155 | // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] |
156 | // ... loop body ... |
157 | // End of body: store into a[i]: |
158 | // CHECK: store float [[RESULT:%.+]], float* {{%.+}} |
159 | a[i] = b[i] * c[i] * d[i]; |
160 | // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} |
161 | // CHECK-NEXT: [[ADD1_2:%.+]] = add i32 [[IV1_2]], 1 |
162 | // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]] |
163 | // CHECK-NEXT: br label %{{.+}} |
164 | } |
165 | // CHECK: [[LOOP1_END]] |
166 | // Update the counters, adding stride |
167 | // CHECK: [[LB:%.+]] = load i32, i32* [[OMP_LB]] |
168 | // CHECK-NEXT: [[ST:%.+]] = load i32, i32* [[OMP_ST]] |
169 | // CHECK-NEXT: [[ADD_LB:%.+]] = add i32 [[LB]], [[ST]] |
170 | // CHECK-NEXT: store i32 [[ADD_LB]], i32* [[OMP_LB]] |
171 | // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] |
172 | // CHECK-NEXT: [[ST:%.+]] = load i32, i32* [[OMP_ST]] |
173 | // CHECK-NEXT: [[ADD_UB:%.+]] = add i32 [[UB]], [[ST]] |
174 | // CHECK-NEXT: store i32 [[ADD_UB]], i32* [[OMP_UB]] |
175 | |
176 | // CHECK: [[O_LOOP1_END]] |
177 | // CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]]) |
178 | // CHECK: ret void |
179 | } |
180 | |
181 | // CHECK-LABEL: define {{.*void}} @{{.*}}dynamic1{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) |
182 | void dynamic1(float *a, float *b, float *c, float *d) { |
183 | #pragma omp parallel for schedule(dynamic) |
184 | // CHECK: call void ([[IDENT_T_TY]]*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* [[OMP_PARALLEL_FUNC:@.+]] to void (i32*, i32*, ...)*), |
185 | // CHECK: define internal void [[OMP_PARALLEL_FUNC]](i32* noalias [[GTID_PARAM_ADDR:%.+]], i32* noalias %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}) |
186 | // CHECK: store i32* [[GTID_PARAM_ADDR]], i32** [[GTID_REF_ADDR:%.+]], |
187 | // CHECK: call void @__kmpc_dispatch_init_8u([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID:%.+]], i32 35, i64 0, i64 16908287, i64 1, i64 1) |
188 | // |
189 | // CHECK: [[HASWORK:%.+]] = call i32 @__kmpc_dispatch_next_8u([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32* [[OMP_ISLAST:%[^,]+]], i64* [[OMP_LB:%[^,]+]], i64* [[OMP_UB:%[^,]+]], i64* [[OMP_ST:%[^,]+]]) |
190 | // CHECK-NEXT: [[O_CMP:%.+]] = icmp ne i32 [[HASWORK]], 0 |
191 | // CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]] |
192 | |
193 | // Loop header |
194 | // CHECK: [[O_LOOP1_BODY]] |
195 | // CHECK: [[LB:%.+]] = load i64, i64* [[OMP_LB]] |
196 | // CHECK-NEXT: store i64 [[LB]], i64* [[OMP_IV:[^,]+]] |
197 | // CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]] |
198 | |
199 | // CHECK-NEXT: [[UB:%.+]] = load i64, i64* [[OMP_UB]] |
200 | // CHECK-NEXT: [[BOUND:%.+]] = add i64 [[UB]], 1 |
201 | // CHECK-NEXT: [[CMP:%.+]] = icmp ult i64 [[IV]], [[BOUND]] |
202 | // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] |
203 | for (unsigned long long i = 131071; i < 2147483647; i += 127) { |
204 | // CHECK: [[LOOP1_BODY]] |
205 | // Start of body: calculate i from IV: |
206 | // CHECK: [[IV1_1:%.+]] = load i64, i64* [[OMP_IV]] |
207 | // CHECK-NEXT: [[CALC_I_1:%.+]] = mul i64 [[IV1_1]], 127 |
208 | // CHECK-NEXT: [[CALC_I_2:%.+]] = add i64 131071, [[CALC_I_1]] |
209 | // CHECK-NEXT: store i64 [[CALC_I_2]], i64* [[LC_I:.+]] |
210 | // ... loop body ... |
211 | // End of body: store into a[i]: |
212 | // CHECK: store float [[RESULT:%.+]], float* {{%.+}} |
213 | a[i] = b[i] * c[i] * d[i]; |
214 | // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}} |
215 | // CHECK-NEXT: [[ADD1_2:%.+]] = add i64 [[IV1_2]], 1 |
216 | // CHECK-NEXT: store i64 [[ADD1_2]], i64* [[OMP_IV]] |
217 | // CHECK-NEXT: br label %{{.+}} |
218 | } |
219 | // CHECK: [[LOOP1_END]] |
220 | // CHECK: [[O_LOOP1_END]] |
221 | // CHECK: ret void |
222 | } |
223 | |
224 | // CHECK-LABEL: define {{.*void}} @{{.*}}guided7{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) |
225 | void guided7(float *a, float *b, float *c, float *d) { |
226 | #pragma omp parallel for schedule(guided, 7) |
227 | // CHECK: call void ([[IDENT_T_TY]]*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* [[OMP_PARALLEL_FUNC:@.+]] to void (i32*, i32*, ...)*), |
228 | // CHECK: define internal void [[OMP_PARALLEL_FUNC]](i32* noalias [[GTID_PARAM_ADDR:%.+]], i32* noalias %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}) |
229 | // CHECK: store i32* [[GTID_PARAM_ADDR]], i32** [[GTID_REF_ADDR:%.+]], |
230 | // CHECK: call void @__kmpc_dispatch_init_8u([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID:%.+]], i32 36, i64 0, i64 16908287, i64 1, i64 7) |
231 | // |
232 | // CHECK: [[HASWORK:%.+]] = call i32 @__kmpc_dispatch_next_8u([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32* [[OMP_ISLAST:%[^,]+]], i64* [[OMP_LB:%[^,]+]], i64* [[OMP_UB:%[^,]+]], i64* [[OMP_ST:%[^,]+]]) |
233 | // CHECK-NEXT: [[O_CMP:%.+]] = icmp ne i32 [[HASWORK]], 0 |
234 | // CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]] |
235 | |
236 | // Loop header |
237 | // CHECK: [[O_LOOP1_BODY]] |
238 | // CHECK: [[LB:%.+]] = load i64, i64* [[OMP_LB]] |
239 | // CHECK-NEXT: store i64 [[LB]], i64* [[OMP_IV:[^,]+]] |
240 | // CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]] |
241 | |
242 | // CHECK-NEXT: [[UB:%.+]] = load i64, i64* [[OMP_UB]] |
243 | // CHECK-NEXT: [[BOUND:%.+]] = add i64 [[UB]], 1 |
244 | // CHECK-NEXT: [[CMP:%.+]] = icmp ult i64 [[IV]], [[BOUND]] |
245 | // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] |
246 | for (unsigned long long i = 131071; i < 2147483647; i += 127) { |
247 | // CHECK: [[LOOP1_BODY]] |
248 | // Start of body: calculate i from IV: |
249 | // CHECK: [[IV1_1:%.+]] = load i64, i64* [[OMP_IV]] |
250 | // CHECK-NEXT: [[CALC_I_1:%.+]] = mul i64 [[IV1_1]], 127 |
251 | // CHECK-NEXT: [[CALC_I_2:%.+]] = add i64 131071, [[CALC_I_1]] |
252 | // CHECK-NEXT: store i64 [[CALC_I_2]], i64* [[LC_I:.+]] |
253 | // ... loop body ... |
254 | // End of body: store into a[i]: |
255 | // CHECK: store float [[RESULT:%.+]], float* {{%.+}} |
256 | a[i] = b[i] * c[i] * d[i]; |
257 | // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}} |
258 | // CHECK-NEXT: [[ADD1_2:%.+]] = add i64 [[IV1_2]], 1 |
259 | // CHECK-NEXT: store i64 [[ADD1_2]], i64* [[OMP_IV]] |
260 | // CHECK-NEXT: br label %{{.+}} |
261 | } |
262 | // CHECK: [[LOOP1_END]] |
263 | // CHECK: [[O_LOOP1_END]] |
264 | // CHECK: ret void |
265 | } |
266 | |
267 | // CHECK-LABEL: define {{.*void}} @{{.*}}test_auto{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) |
268 | void test_auto(float *a, float *b, float *c, float *d) { |
269 | unsigned int x = 0; |
270 | unsigned int y = 0; |
271 | #pragma omp parallel for schedule(auto) collapse(2) |
272 | // CHECK: call void ([[IDENT_T_TY]]*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, float**, float**, float**, float**)* [[OMP_PARALLEL_FUNC:@.+]] to void (i32*, i32*, ...)*), |
273 | // CHECK: define internal void [[OMP_PARALLEL_FUNC]](i32* noalias [[GTID_PARAM_ADDR:%.+]], i32* noalias %{{.+}}, i32* dereferenceable(4) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}) |
274 | // CHECK: store i32* [[GTID_PARAM_ADDR]], i32** [[GTID_REF_ADDR:%.+]], |
275 | // CHECK: call void @__kmpc_dispatch_init_8([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID:%.+]], i32 38, i64 0, i64 [[LAST_ITER:%[^,]+]], i64 1, i64 1) |
276 | // |
277 | // CHECK: [[HASWORK:%.+]] = call i32 @__kmpc_dispatch_next_8([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32* [[OMP_ISLAST:%[^,]+]], i64* [[OMP_LB:%[^,]+]], i64* [[OMP_UB:%[^,]+]], i64* [[OMP_ST:%[^,]+]]) |
278 | // CHECK-NEXT: [[O_CMP:%.+]] = icmp ne i32 [[HASWORK]], 0 |
279 | // CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]] |
280 | |
281 | // Loop header |
282 | // CHECK: [[O_LOOP1_BODY]] |
283 | // CHECK: [[LB:%.+]] = load i64, i64* [[OMP_LB]] |
284 | // CHECK-NEXT: store i64 [[LB]], i64* [[OMP_IV:[^,]+]] |
285 | // CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]] |
286 | |
287 | // CHECK-NEXT: [[UB:%.+]] = load i64, i64* [[OMP_UB]] |
288 | // CHECK-NEXT: [[CMP:%.+]] = icmp sle i64 [[IV]], [[UB]] |
289 | // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] |
290 | // FIXME: When the iteration count of some nested loop is not a known constant, |
291 | // we should pre-calculate it, like we do for the total number of iterations! |
292 | for (char i = static_cast<char>(y); i <= '9'; ++i) |
293 | for (x = 11; x > 0; --x) { |
294 | // CHECK: [[LOOP1_BODY]] |
295 | // Start of body: indices are calculated from IV: |
296 | // CHECK: store i8 {{%[^,]+}}, i8* {{%[^,]+}} |
297 | // CHECK: store i32 {{%[^,]+}}, i32* {{%[^,]+}} |
298 | // ... loop body ... |
299 | // End of body: store into a[i]: |
300 | // CHECK: store float [[RESULT:%.+]], float* {{%.+}} |
301 | a[i] = b[i] * c[i] * d[i]; |
302 | // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}} |
303 | // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i64 [[IV1_2]], 1 |
304 | // CHECK-NEXT: store i64 [[ADD1_2]], i64* [[OMP_IV]] |
305 | // CHECK-NEXT: br label %{{.+}} |
306 | } |
307 | // CHECK: [[LOOP1_END]] |
308 | // CHECK: [[O_LOOP1_END]] |
309 | // CHECK: ret void |
310 | } |
311 | |
312 | // CHECK-LABEL: define {{.*void}} @{{.*}}runtime{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) |
313 | void runtime(float *a, float *b, float *c, float *d) { |
314 | int x = 0; |
315 | #pragma omp parallel for collapse(2) schedule(runtime) |
316 | // CHECK: call void ([[IDENT_T_TY]]*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* [[OMP_PARALLEL_FUNC:@.+]] to void (i32*, i32*, ...)*), |
317 | // CHECK: define internal void [[OMP_PARALLEL_FUNC]](i32* noalias [[GTID_PARAM_ADDR:%.+]], i32* noalias %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}) |
318 | // CHECK: store i32* [[GTID_PARAM_ADDR]], i32** [[GTID_REF_ADDR:%.+]], |
319 | // CHECK: call void @__kmpc_dispatch_init_4([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID:%.+]], i32 37, i32 0, i32 199, i32 1, i32 1) |
320 | // |
321 | // CHECK: [[HASWORK:%.+]] = call i32 @__kmpc_dispatch_next_4([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32* [[OMP_ISLAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]]) |
322 | // CHECK-NEXT: [[O_CMP:%.+]] = icmp ne i32 [[HASWORK]], 0 |
323 | // CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]] |
324 | |
325 | // Loop header |
326 | // CHECK: [[O_LOOP1_BODY]] |
327 | // CHECK: [[LB:%.+]] = load i32, i32* [[OMP_LB]] |
328 | // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]] |
329 | // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] |
330 | |
331 | // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] |
332 | // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB]] |
333 | // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] |
334 | for (unsigned char i = '0' ; i <= '9'; ++i) |
335 | for (x = -10; x < 10; ++x) { |
336 | // CHECK: [[LOOP1_BODY]] |
337 | // Start of body: indices are calculated from IV: |
338 | // CHECK: store i8 {{%[^,]+}}, i8* {{%[^,]+}} |
339 | // CHECK: store i32 {{%[^,]+}}, i32* {{%[^,]+}} |
340 | // ... loop body ... |
341 | // End of body: store into a[i]: |
342 | // CHECK: store float [[RESULT:%.+]], float* {{%.+}} |
343 | a[i] = b[i] * c[i] * d[i]; |
344 | // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} |
345 | // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 |
346 | // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]] |
347 | // CHECK-NEXT: br label %{{.+}} |
348 | } |
349 | // CHECK: [[LOOP1_END]] |
350 | // CHECK: [[O_LOOP1_END]] |
351 | // CHECK: ret void |
352 | } |
353 | |
354 | // TERM_DEBUG-LABEL: foo |
355 | int foo() {return 0;}; |
356 | |
357 | // TERM_DEBUG-LABEL: parallel_for |
358 | // CLEANUP: parallel_for |
359 | void parallel_for(float *a, const int n) { |
360 | float arr[n]; |
361 | #pragma omp parallel for schedule(static, 5) private(arr) default(none) firstprivate(n) shared(a) |
362 | // TERM_DEBUG-NOT: __kmpc_global_thread_num |
363 | // TERM_DEBUG: call void @__kmpc_for_static_init_4u({{.+}}), !dbg [[DBG_LOC_START:![0-9]+]] |
364 | // TERM_DEBUG: invoke i32 {{.*}}foo{{.*}}() |
365 | // TERM_DEBUG: unwind label %[[TERM_LPAD:.+]], |
366 | // TERM_DEBUG-NOT: __kmpc_global_thread_num |
367 | // TERM_DEBUG: call void @__kmpc_for_static_fini({{.+}}), !dbg [[DBG_LOC_END:![0-9]+]] |
368 | // TERM_DEBUG: [[TERM_LPAD]] |
369 | // TERM_DEBUG: call void @__clang_call_terminate |
370 | // TERM_DEBUG: unreachable |
371 | // CLEANUP-NOT: __kmpc_global_thread_num |
372 | // CLEANUP: call void @__kmpc_for_static_init_4u({{.+}}) |
373 | // CLEANUP: call void @__kmpc_for_static_fini({{.+}}) |
374 | for (unsigned i = 131071; i <= 2147483647; i += 127) |
375 | a[i] += foo() + arr[i] + n; |
376 | } |
377 | // Check source line corresponds to "#pragma omp parallel for schedule(static, 5)" above: |
378 | // TERM_DEBUG-DAG: [[DBG_LOC_START]] = !DILocation(line: [[@LINE-4]], |
379 | // TERM_DEBUG-DAG: [[DBG_LOC_END]] = !DILocation(line: [[@LINE-18]], |
380 | |
381 | // CHECK-LABEL: increment |
382 | int increment () { |
383 | // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]]) |
384 | #pragma omp for |
385 | // Determine UB = min(UB, GlobalUB) |
386 | // CHECK: call void @__kmpc_for_static_init_4([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]], i32 34, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1) |
387 | // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] |
388 | // CHECK-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 4 |
389 | // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]] |
390 | // CHECK: [[UBRESULT:%.+]] = phi i32 [ 4, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ] |
391 | // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]] |
392 | // CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]] |
393 | // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]] |
394 | // CHECK-NEXT: br label %[[LOOP1_HEAD:.+]] |
395 | |
396 | // Loop header |
397 | // CHECK: [[LOOP1_HEAD]] |
398 | // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] |
399 | // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] |
400 | // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB]] |
401 | // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] |
402 | |
403 | for (int i = 0 ; i != 5; ++i) |
404 | // Start of body: calculate i from IV: |
405 | // CHECK: [[LOOP1_BODY]] |
406 | // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]] |
407 | // CHECK-NEXT: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 1 |
408 | // CHECK-NEXT: [[CALC_I_2:%.+]] = add nsw i32 0, [[CALC_I_1]] |
409 | // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] |
410 | // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} |
411 | // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 |
412 | // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]] |
413 | // CHECK-NEXT: br label %[[LOOP1_HEAD]] |
414 | ; |
415 | // CHECK: [[LOOP1_END]] |
416 | // CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]]) |
417 | // CHECK: __kmpc_barrier |
418 | return 0; |
419 | // CHECK: ret i32 0 |
420 | } |
421 | |
422 | // CHECK-LABEL: decrement_nowait |
423 | int decrement_nowait () { |
424 | // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]]) |
425 | #pragma omp for nowait |
426 | // Determine UB = min(UB, GlobalUB) |
427 | // CHECK: call void @__kmpc_for_static_init_4([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]], i32 34, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1) |
428 | // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] |
429 | // CHECK-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 4 |
430 | // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]] |
431 | // CHECK: [[UBRESULT:%.+]] = phi i32 [ 4, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ] |
432 | // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]] |
433 | // CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]] |
434 | // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]] |
435 | // CHECK-NEXT: br label %[[LOOP1_HEAD:.+]] |
436 | |
437 | // Loop header |
438 | // CHECK: [[LOOP1_HEAD]] |
439 | // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] |
440 | // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] |
441 | // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB]] |
442 | // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] |
443 | for (int j = 5 ; j != 0; --j) |
444 | // Start of body: calculate i from IV: |
445 | // CHECK: [[LOOP1_BODY]] |
446 | // CHECK: [[IV2_1:%.+]] = load i32, i32* [[OMP_IV]] |
447 | // CHECK-NEXT: [[CALC_II_1:%.+]] = mul nsw i32 [[IV2_1]], 1 |
448 | // CHECK-NEXT: [[CALC_II_2:%.+]] = sub nsw i32 5, [[CALC_II_1]] |
449 | // CHECK-NEXT: store i32 [[CALC_II_2]], i32* [[LC_I:.+]] |
450 | // CHECK: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} |
451 | // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV2_2]], 1 |
452 | // CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV]] |
453 | // CHECK-NEXT: br label %[[LOOP1_HEAD]] |
454 | ; |
455 | // CHECK: [[LOOP1_END]] |
456 | // CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]]) |
457 | // CHECK-NOT: __kmpc_barrier |
458 | return 0; |
459 | // CHECK: ret i32 0 |
460 | } |
461 | #endif // HEADER |
462 | |
463 | |