1 | // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm -fexceptions -fcxx-exceptions -o - < %s | FileCheck %s |
2 | // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t < %s |
3 | // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify -emit-llvm -o - < %s | FileCheck %s |
4 | // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fexceptions -fcxx-exceptions -debug-info-kind=line-tables-only -x c++ -emit-llvm -o - < %s | FileCheck %s --check-prefix=TERM_DEBUG |
5 | |
6 | // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm -fexceptions -fcxx-exceptions -o - < %s | FileCheck --check-prefix SIMD-ONLY0 %s |
7 | // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t < %s |
8 | // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify -emit-llvm -o - < %s | FileCheck --check-prefix SIMD-ONLY0 %s |
9 | // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -fexceptions -fcxx-exceptions -debug-info-kind=line-tables-only -x c++ -emit-llvm -o - < %s | FileCheck --check-prefix SIMD-ONLY0 %s |
10 | // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} |
11 | // expected-no-diagnostics |
12 | #ifndef HEADER |
13 | #define HEADER |
14 | |
15 | long long get_val() { return 0; } |
16 | double *g_ptr; |
17 | |
18 | // CHECK-LABEL: define {{.*void}} @{{.*}}simple{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) |
19 | void simple(float *a, float *b, float *c, float *d) { |
20 | #pragma omp for simd |
21 | // CHECK: call void @__kmpc_for_static_init_4(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1) |
22 | // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], |
23 | // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 5 |
24 | // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] |
25 | // CHECK: [[TRUE]]: |
26 | // CHECK: br label %[[SWITCH:[^,]+]] |
27 | // CHECK: [[FALSE]]: |
28 | // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], |
29 | // CHECK: br label %[[SWITCH]] |
30 | // CHECK: [[SWITCH]]: |
31 | // CHECK: [[UP:%.+]] = phi i32 [ 5, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] |
32 | // CHECK: store i32 [[UP]], i32* [[UB]], |
33 | // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], |
34 | // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV:%[^,]+]], |
35 | |
36 | // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] |
37 | // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]] |
38 | // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB_VAL]] |
39 | // CHECK-NEXT: br i1 [[CMP]], label %[[SIMPLE_LOOP1_BODY:.+]], label %[[SIMPLE_LOOP1_END:[^,]+]] |
40 | for (int i = 3; i < 32; i += 5) { |
41 | // CHECK: [[SIMPLE_LOOP1_BODY]]: |
42 | // Start of body: calculate i from IV: |
43 | // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} |
44 | // CHECK: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 5 |
45 | // CHECK-NEXT: [[CALC_I_2:%.+]] = add nsw i32 3, [[CALC_I_1]] |
46 | // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] |
47 | // ... loop body ... |
48 | // End of body: store into a[i]: |
49 | // CHECK: store float [[RESULT:%.+]], float* |
50 | a[i] = b[i] * c[i] * d[i]; |
51 | // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]] |
52 | // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 |
53 | // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]] |
54 | // br label %{{.+}}, !llvm.loop !{{.+}} |
55 | } |
56 | // CHECK: [[SIMPLE_LOOP1_END]]: |
57 | // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
58 | // CHECK: call void @__kmpc_barrier(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
59 | |
60 | long long k = get_val(); |
61 | |
62 | #pragma omp for simd linear(k : 3) schedule(simd, nonmonotonic: dynamic) |
63 | // CHECK: [[K0:%.+]] = call {{.*}}i64 @{{.*}}get_val |
64 | // CHECK-NEXT: store i64 [[K0]], i64* [[K_VAR:%[^,]+]] |
65 | // CHECK: [[K0LOAD:%.+]] = load i64, i64* [[K_VAR]] |
66 | // CHECK-NEXT: store i64 [[K0LOAD]], i64* [[LIN0:%[^,]+]] |
67 | |
68 | // CHECK: call void @__kmpc_dispatch_init_4(%struct.ident_t* {{.+}}, i32 %{{.+}}, i32 1073741859, i32 0, i32 8, i32 1, i32 1) |
69 | // CHECK: [[NEXT:%.+]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* {{.+}}, i32 %{{.+}}, i32* %{{.+}}, i32* [[LB:%.+]], i32* [[UB:%.+]], i32* %{{.+}}) |
70 | // CHECK: [[COND:%.+]] = icmp ne i32 [[NEXT]], 0 |
71 | // CHECK: br i1 [[COND]], label %[[CONT:.+]], label %[[END:.+]] |
72 | // CHECK: [[CONT]]: |
73 | // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], |
74 | // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV2:%[^,]+]], |
75 | |
76 | // CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group |
77 | // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.access.group |
78 | // CHECK-NEXT: [[CMP2:%.+]] = icmp sle i32 [[IV2]], [[UB_VAL]] |
79 | // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP2_BODY:.+]], label %[[SIMPLE_LOOP2_END:[^,]+]] |
80 | for (int i = 10; i > 1; i--) { |
81 | // CHECK: [[SIMPLE_LOOP2_BODY]]: |
82 | // Start of body: calculate i from IV: |
83 | // CHECK: [[IV2_0:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group |
84 | // FIXME: It is interesting, why the following "mul 1" was not constant folded? |
85 | // CHECK-NEXT: [[IV2_1:%.+]] = mul nsw i32 [[IV2_0]], 1 |
86 | // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV2_1]] |
87 | // CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.access.group |
88 | // |
89 | // CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.access.group |
90 | // CHECK-NEXT: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group |
91 | // CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV2_2]], 3 |
92 | // CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64 |
93 | // CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]] |
94 | // Update of the privatized version of linear variable! |
95 | // CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]] |
96 | a[k]++; |
97 | k = k + 3; |
98 | // CHECK: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group |
99 | // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV2_2]], 1 |
100 | // CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV2]]{{.*}}!llvm.access.group |
101 | // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP2_ID]] |
102 | } |
103 | // CHECK: [[SIMPLE_LOOP2_END]]: |
104 | // |
105 | // Update linear vars after loop, as the loop was operating on a private version. |
106 | // CHECK: [[LIN0_2:%.+]] = load i64, i64* [[LIN0]] |
107 | // CHECK-NEXT: [[LIN_ADD2:%.+]] = add nsw i64 [[LIN0_2]], 27 |
108 | // CHECK-NEXT: store i64 [[LIN_ADD2]], i64* [[K_VAR]] |
109 | // CHECK: call void @__kmpc_barrier(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
110 | |
111 | int lin = 12; |
112 | #pragma omp for simd linear(lin : get_val()), linear(g_ptr) |
113 | |
114 | // Init linear private var. |
115 | // CHECK: store i32 12, i32* [[LIN_VAR:%[^,]+]] |
116 | // CHECK: [[LIN_LOAD:%.+]] = load i32, i32* [[LIN_VAR]] |
117 | // CHECK-NEXT: store i32 [[LIN_LOAD]], i32* [[LIN_START:%[^,]+]] |
118 | // Remember linear step. |
119 | // CHECK: [[CALL_VAL:%.+]] = invoke |
120 | // CHECK: store i64 [[CALL_VAL]], i64* [[LIN_STEP:%[^,]+]] |
121 | |
122 | // CHECK: [[GLIN_LOAD:%.+]] = load double*, double** [[GLIN_VAR:@[^,]+]] |
123 | // CHECK-NEXT: store double* [[GLIN_LOAD]], double** [[GLIN_START:%[^,]+]] |
124 | |
125 | // CHECK: call void @__kmpc_for_static_init_8u(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1) |
126 | // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], |
127 | // CHECK: [[CMP:%.+]] = icmp ugt i64 [[UB_VAL]], 3 |
128 | // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] |
129 | // CHECK: [[TRUE]]: |
130 | // CHECK: br label %[[SWITCH:[^,]+]] |
131 | // CHECK: [[FALSE]]: |
132 | // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], |
133 | // CHECK: br label %[[SWITCH]] |
134 | // CHECK: [[SWITCH]]: |
135 | // CHECK: [[UP:%.+]] = phi i64 [ 3, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] |
136 | // CHECK: store i64 [[UP]], i64* [[UB]], |
137 | // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], |
138 | // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV3:%[^,]+]], |
139 | |
140 | // CHECK: [[IV3:%.+]] = load i64, i64* [[OMP_IV3]] |
141 | // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]] |
142 | // CHECK-NEXT: [[CMP3:%.+]] = icmp ule i64 [[IV3]], [[UB_VAL]] |
143 | // CHECK-NEXT: br i1 [[CMP3]], label %[[SIMPLE_LOOP3_BODY:.+]], label %[[SIMPLE_LOOP3_END:[^,]+]] |
144 | for (unsigned long long it = 2000; it >= 600; it-=400) { |
145 | // CHECK: [[SIMPLE_LOOP3_BODY]]: |
146 | // Start of body: calculate it from IV: |
147 | // CHECK: [[IV3_0:%.+]] = load i64, i64* [[OMP_IV3]] |
148 | // CHECK-NEXT: [[LC_IT_1:%.+]] = mul i64 [[IV3_0]], 400 |
149 | // CHECK-NEXT: [[LC_IT_2:%.+]] = sub i64 2000, [[LC_IT_1]] |
150 | // CHECK-NEXT: store i64 [[LC_IT_2]], i64* |
151 | // |
152 | // Linear start and step are used to calculate current value of the linear variable. |
153 | // CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]] |
154 | // CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]] |
155 | // CHECK-NOT: store i32 {{.+}}, i32* [[LIN_VAR]], |
156 | // CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]] |
157 | // CHECK-NEXT: [[IV3_1:%.+]] = load i64, i64* [[OMP_IV3]] |
158 | // CHECK-NEXT: [[MUL:%.+]] = mul i64 [[IV3_1]], 1 |
159 | // CHECK: [[GEP:%.+]] = getelementptr{{.*}}[[GLINSTART]] |
160 | // CHECK-NEXT: store double* [[GEP]], double** [[G_PTR_CUR:%[^,]+]] |
161 | *g_ptr++ = 0.0; |
162 | // CHECK: [[GEP_VAL:%.+]] = load double{{.*}}[[G_PTR_CUR]] |
163 | // CHECK: store double{{.*}}[[GEP_VAL]] |
164 | a[it + lin]++; |
165 | // CHECK: [[FLT_INC:%.+]] = fadd float |
166 | // CHECK-NEXT: store float [[FLT_INC]], |
167 | // CHECK: [[IV3_2:%.+]] = load i64, i64* [[OMP_IV3]] |
168 | // CHECK-NEXT: [[ADD3_2:%.+]] = add i64 [[IV3_2]], 1 |
169 | // CHECK-NEXT: store i64 [[ADD3_2]], i64* [[OMP_IV3]] |
170 | } |
171 | // CHECK: [[SIMPLE_LOOP3_END]]: |
172 | // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
173 | // |
174 | // Linear start and step are used to calculate final value of the linear variables. |
175 | // CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]] |
176 | // CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]] |
177 | // CHECK: store i32 {{.+}}, i32* [[LIN_VAR]], |
178 | // CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]] |
179 | // CHECK: store double* {{.*}}[[GLIN_VAR]] |
180 | // CHECK: call void @__kmpc_barrier(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
181 | |
182 | #pragma omp for simd |
183 | // CHECK: call void @__kmpc_for_static_init_4(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1) |
184 | // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], |
185 | // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 3 |
186 | // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] |
187 | // CHECK: [[TRUE]]: |
188 | // CHECK: br label %[[SWITCH:[^,]+]] |
189 | // CHECK: [[FALSE]]: |
190 | // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], |
191 | // CHECK: br label %[[SWITCH]] |
192 | // CHECK: [[SWITCH]]: |
193 | // CHECK: [[UP:%.+]] = phi i32 [ 3, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] |
194 | // CHECK: store i32 [[UP]], i32* [[UB]], |
195 | // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], |
196 | // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV4:%[^,]+]], |
197 | |
198 | // CHECK: [[IV4:%.+]] = load i32, i32* [[OMP_IV4]] |
199 | // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]] |
200 | // CHECK-NEXT: [[CMP4:%.+]] = icmp sle i32 [[IV4]], [[UB_VAL]] |
201 | // CHECK-NEXT: br i1 [[CMP4]], label %[[SIMPLE_LOOP4_BODY:.+]], label %[[SIMPLE_LOOP4_END:[^,]+]] |
202 | for (short it = 6; it <= 20; it-=-4) { |
203 | // CHECK: [[SIMPLE_LOOP4_BODY]]: |
204 | // Start of body: calculate it from IV: |
205 | // CHECK: [[IV4_0:%.+]] = load i32, i32* [[OMP_IV4]] |
206 | // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i32 [[IV4_0]], 4 |
207 | // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i32 6, [[LC_IT_1]] |
208 | // CHECK-NEXT: [[LC_IT_3:%.+]] = trunc i32 [[LC_IT_2]] to i16 |
209 | // CHECK-NEXT: store i16 [[LC_IT_3]], i16* |
210 | |
211 | // CHECK: [[IV4_2:%.+]] = load i32, i32* [[OMP_IV4]] |
212 | // CHECK-NEXT: [[ADD4_2:%.+]] = add nsw i32 [[IV4_2]], 1 |
213 | // CHECK-NEXT: store i32 [[ADD4_2]], i32* [[OMP_IV4]] |
214 | } |
215 | // CHECK: [[SIMPLE_LOOP4_END]]: |
216 | // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
217 | // CHECK: call void @__kmpc_barrier(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
218 | |
219 | #pragma omp for simd |
220 | // CHECK: call void @__kmpc_for_static_init_4(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1) |
221 | // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], |
222 | // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 25 |
223 | // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] |
224 | // CHECK: [[TRUE]]: |
225 | // CHECK: br label %[[SWITCH:[^,]+]] |
226 | // CHECK: [[FALSE]]: |
227 | // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], |
228 | // CHECK: br label %[[SWITCH]] |
229 | // CHECK: [[SWITCH]]: |
230 | // CHECK: [[UP:%.+]] = phi i32 [ 25, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] |
231 | // CHECK: store i32 [[UP]], i32* [[UB]], |
232 | // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], |
233 | // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV5:%[^,]+]], |
234 | |
235 | // CHECK: [[IV5:%.+]] = load i32, i32* [[OMP_IV5]] |
236 | // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]] |
237 | // CHECK-NEXT: [[CMP5:%.+]] = icmp sle i32 [[IV5]], [[UB_VAL]] |
238 | // CHECK-NEXT: br i1 [[CMP5]], label %[[SIMPLE_LOOP5_BODY:.+]], label %[[SIMPLE_LOOP5_END:[^,]+]] |
239 | for (unsigned char it = 'z'; it >= 'a'; it+=-1) { |
240 | // CHECK: [[SIMPLE_LOOP5_BODY]]: |
241 | // Start of body: calculate it from IV: |
242 | // CHECK: [[IV5_0:%.+]] = load i32, i32* [[OMP_IV5]] |
243 | // CHECK-NEXT: [[IV5_1:%.+]] = mul nsw i32 [[IV5_0]], 1 |
244 | // CHECK-NEXT: [[LC_IT_1:%.+]] = sub nsw i32 122, [[IV5_1]] |
245 | // CHECK-NEXT: [[LC_IT_2:%.+]] = trunc i32 [[LC_IT_1]] to i8 |
246 | // CHECK-NEXT: store i8 [[LC_IT_2]], i8* |
247 | |
248 | // CHECK: [[IV5_2:%.+]] = load i32, i32* [[OMP_IV5]] |
249 | // CHECK-NEXT: [[ADD5_2:%.+]] = add nsw i32 [[IV5_2]], 1 |
250 | // CHECK-NEXT: store i32 [[ADD5_2]], i32* [[OMP_IV5]] |
251 | } |
252 | // CHECK: [[SIMPLE_LOOP5_END]]: |
253 | // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
254 | // CHECK: call void @__kmpc_barrier(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
255 | |
256 | // CHECK-NOT: mul i32 %{{.+}}, 10 |
257 | #pragma omp for simd |
258 | for (unsigned i=100; i<10; i+=10) { |
259 | } |
260 | |
261 | int A; |
262 | #pragma omp parallel |
263 | { |
264 | // CHECK: store i32 -1, i32* [[A:%.+]], |
265 | A = -1; |
266 | #pragma omp for simd lastprivate(A) |
267 | // CHECK: call void @__kmpc_for_static_init_8(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1) |
268 | // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], |
269 | // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 6 |
270 | // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] |
271 | // CHECK: [[TRUE]]: |
272 | // CHECK: br label %[[SWITCH:[^,]+]] |
273 | // CHECK: [[FALSE]]: |
274 | // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], |
275 | // CHECK: br label %[[SWITCH]] |
276 | // CHECK: [[SWITCH]]: |
277 | // CHECK: [[UP:%.+]] = phi i64 [ 6, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] |
278 | // CHECK: store i64 [[UP]], i64* [[UB]], |
279 | // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], |
280 | // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV7:%[^,]+]], |
281 | |
282 | // CHECK: br label %[[SIMD_LOOP7_COND:[^,]+]] |
283 | // CHECK: [[SIMD_LOOP7_COND]]: |
284 | // CHECK-NEXT: [[IV7:%.+]] = load i64, i64* [[OMP_IV7]] |
285 | // CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]] |
286 | // CHECK-NEXT: [[CMP7:%.+]] = icmp sle i64 [[IV7]], [[UB_VAL]] |
287 | // CHECK-NEXT: br i1 [[CMP7]], label %[[SIMPLE_LOOP7_BODY:.+]], label %[[SIMPLE_LOOP7_END:[^,]+]] |
288 | for (long long i = -10; i < 10; i += 3) { |
289 | // CHECK: [[SIMPLE_LOOP7_BODY]]: |
290 | // Start of body: calculate i from IV: |
291 | // CHECK: [[IV7_0:%.+]] = load i64, i64* [[OMP_IV7]] |
292 | // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV7_0]], 3 |
293 | // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]] |
294 | // CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]], |
295 | // CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]] |
296 | // CHECK-NEXT: [[CONV:%.+]] = trunc i64 [[LC_VAL]] to i32 |
297 | // CHECK-NEXT: store i32 [[CONV]], i32* [[A_PRIV:%[^,]+]], |
298 | A = i; |
299 | // CHECK: [[IV7_2:%.+]] = load i64, i64* [[OMP_IV7]] |
300 | // CHECK-NEXT: [[ADD7_2:%.+]] = add nsw i64 [[IV7_2]], 1 |
301 | // CHECK-NEXT: store i64 [[ADD7_2]], i64* [[OMP_IV7]] |
302 | } |
303 | // CHECK: [[SIMPLE_LOOP7_END]]: |
304 | // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
305 | // CHECK: load i32, i32* |
306 | // CHECK: icmp ne i32 %{{.+}}, 0 |
307 | // CHECK: br i1 %{{.+}}, label |
308 | // CHECK: [[A_PRIV_VAL:%.+]] = load i32, i32* [[A_PRIV]], |
309 | // CHECK-NEXT: store i32 [[A_PRIV_VAL]], i32* %{{.+}}, |
310 | // CHECK-NEXT: br label |
311 | // CHECK: call void @__kmpc_barrier(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
312 | } |
313 | int R; |
314 | #pragma omp parallel |
315 | { |
316 | // CHECK: store i32 -1, i32* [[R:%[^,]+]], |
317 | R = -1; |
318 | // CHECK: store i32 1, i32* [[R_PRIV:%[^,]+]], |
319 | #pragma omp for simd reduction(*:R) |
320 | // CHECK: call void @__kmpc_for_static_init_8(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1) |
321 | // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], |
322 | // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 6 |
323 | // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] |
324 | // CHECK: [[TRUE]]: |
325 | // CHECK: br label %[[SWITCH:[^,]+]] |
326 | // CHECK: [[FALSE]]: |
327 | // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], |
328 | // CHECK: br label %[[SWITCH]] |
329 | // CHECK: [[SWITCH]]: |
330 | // CHECK: [[UP:%.+]] = phi i64 [ 6, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] |
331 | // CHECK: store i64 [[UP]], i64* [[UB]], |
332 | // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], |
333 | // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV8:%[^,]+]], |
334 | |
335 | // CHECK: br label %[[SIMD_LOOP8_COND:[^,]+]] |
336 | // CHECK: [[SIMD_LOOP8_COND]]: |
337 | // CHECK-NEXT: [[IV8:%.+]] = load i64, i64* [[OMP_IV8]] |
338 | // CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]] |
339 | // CHECK-NEXT: [[CMP8:%.+]] = icmp sle i64 [[IV8]], [[UB_VAL]] |
340 | // CHECK-NEXT: br i1 [[CMP8]], label %[[SIMPLE_LOOP8_BODY:.+]], label %[[SIMPLE_LOOP8_END:[^,]+]] |
341 | for (long long i = -10; i < 10; i += 3) { |
342 | // CHECK: [[SIMPLE_LOOP8_BODY]]: |
343 | // Start of body: calculate i from IV: |
344 | // CHECK: [[IV8_0:%.+]] = load i64, i64* [[OMP_IV8]] |
345 | // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV8_0]], 3 |
346 | // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]] |
347 | // CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]], |
348 | // CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]] |
349 | // CHECK: store i32 %{{.+}}, i32* [[R_PRIV]], |
350 | R *= i; |
351 | // CHECK: [[IV8_2:%.+]] = load i64, i64* [[OMP_IV8]] |
352 | // CHECK-NEXT: [[ADD8_2:%.+]] = add nsw i64 [[IV8_2]], 1 |
353 | // CHECK-NEXT: store i64 [[ADD8_2]], i64* [[OMP_IV8]] |
354 | } |
355 | // CHECK: [[SIMPLE_LOOP8_END]]: |
356 | // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
357 | // CHECK: call i32 @__kmpc_reduce( |
358 | // CHECK: [[R_PRIV_VAL:%.+]] = load i32, i32* [[R_PRIV]], |
359 | // CHECK: [[RED:%.+]] = mul nsw i32 %{{.+}}, [[R_PRIV_VAL]] |
360 | // CHECK-NEXT: store i32 [[RED]], i32* %{{.+}}, |
361 | // CHECK-NEXT: call void @__kmpc_end_reduce( |
362 | // CHECK: call void @__kmpc_barrier(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
363 | } |
364 | } |
365 | |
366 | template <class T, unsigned K> T tfoo(T a) { return a + K; } |
367 | |
368 | template <typename T, unsigned N> |
369 | int templ1(T a, T *z) { |
370 | #pragma omp for simd collapse(N) schedule(simd: static, N) |
371 | for (int i = 0; i < N * 2; i++) { |
372 | for (long long j = 0; j < (N + N + N + N); j += 2) { |
373 | z[i + j] = a + tfoo<T, N>(i + j); |
374 | } |
375 | } |
376 | return 0; |
377 | } |
378 | |
379 | // Instatiation templ1<float,2> |
380 | // CHECK-LABEL: define {{.*i32}} @{{.*}}templ1{{.*}}(float {{.+}}, float* {{.+}}) |
381 | // CHECK: call void @__kmpc_for_static_init_8(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 45, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 2) |
382 | // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], |
383 | // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 15 |
384 | // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] |
385 | // CHECK: [[TRUE]]: |
386 | // CHECK: br label %[[SWITCH:[^,]+]] |
387 | // CHECK: [[FALSE]]: |
388 | // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], |
389 | // CHECK: br label %[[SWITCH]] |
390 | // CHECK: [[SWITCH]]: |
391 | // CHECK: [[UP:%.+]] = phi i64 [ 15, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] |
392 | // CHECK: store i64 [[UP]], i64* [[UB]], |
393 | // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], |
394 | // CHECK: store i64 [[LB_VAL]], i64* [[T1_OMP_IV:%[^,]+]], |
395 | |
396 | // ... |
397 | // CHECK: icmp sle i64 |
398 | // CHECK: [[IV:%.+]] = load i64, i64* [[T1_OMP_IV]] |
399 | // CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]] |
400 | // CHECK-NEXT: [[CMP1:%.+]] = icmp sle i64 [[IV]], [[UB_VAL]] |
401 | // CHECK-NEXT: br i1 [[CMP1]], label %[[T1_BODY:.+]], label %[[T1_END:[^,]+]] |
402 | // CHECK: [[T1_BODY]]: |
403 | // Loop counters i and j updates: |
404 | // CHECK: [[IV1:%.+]] = load i64, i64* [[T1_OMP_IV]] |
405 | // CHECK-NEXT: [[I_1:%.+]] = sdiv i64 [[IV1]], 4 |
406 | // CHECK-NEXT: [[I_1_MUL1:%.+]] = mul nsw i64 [[I_1]], 1 |
407 | // CHECK-NEXT: [[I_1_ADD0:%.+]] = add nsw i64 0, [[I_1_MUL1]] |
408 | // CHECK-NEXT: [[I_2:%.+]] = trunc i64 [[I_1_ADD0]] to i32 |
409 | // CHECK-NEXT: store i32 [[I_2]], i32* |
410 | // CHECK: [[IV2:%.+]] = load i64, i64* [[T1_OMP_IV]] |
411 | // CHECK-NEXT: [[IV2_1:%.+]] = load i64, i64* [[T1_OMP_IV]] |
412 | // CHECK-NEXT: [[DIV_2:%.+]] = sdiv i64 [[IV2_1]], 4 |
413 | // CHECK-NEXT: [[MUL_2:%.+]] = mul nsw i64 [[DIV_2]], 4 |
414 | // CHECK-NEXT: [[J_1:%.+]] = sub nsw i64 [[IV2]], [[MUL_2]] |
415 | // CHECK-NEXT: [[J_2:%.+]] = mul nsw i64 [[J_1]], 2 |
416 | // CHECK-NEXT: [[J_2_ADD0:%.+]] = add nsw i64 0, [[J_2]] |
417 | // CHECK-NEXT: store i64 [[J_2_ADD0]], i64* |
418 | // simd.for.inc: |
419 | // CHECK: [[IV3:%.+]] = load i64, i64* [[T1_OMP_IV]] |
420 | // CHECK-NEXT: [[INC:%.+]] = add nsw i64 [[IV3]], 1 |
421 | // CHECK-NEXT: store i64 [[INC]], i64* [[T1_OMP_IV]] |
422 | // CHECK-NEXT: br label {{%.+}} |
423 | // CHECK: [[T1_END]]: |
424 | // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
425 | // CHECK: call void @__kmpc_barrier(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
426 | // CHECK: ret i32 0 |
427 | // |
428 | void inst_templ1() { |
429 | float a; |
430 | float z[100]; |
431 | templ1<float,2> (a, z); |
432 | } |
433 | |
434 | |
435 | typedef int MyIdx; |
436 | |
437 | class IterDouble { |
438 | double *Ptr; |
439 | public: |
440 | IterDouble operator++ () const { |
441 | IterDouble n; |
442 | n.Ptr = Ptr + 1; |
443 | return n; |
444 | } |
445 | bool operator < (const IterDouble &that) const { |
446 | return Ptr < that.Ptr; |
447 | } |
448 | double & operator *() const { |
449 | return *Ptr; |
450 | } |
451 | MyIdx operator - (const IterDouble &that) const { |
452 | return (MyIdx) (Ptr - that.Ptr); |
453 | } |
454 | IterDouble operator + (int Delta) { |
455 | IterDouble re; |
456 | re.Ptr = Ptr + Delta; |
457 | return re; |
458 | } |
459 | |
460 | ///~IterDouble() {} |
461 | }; |
462 | |
463 | // CHECK-LABEL: define {{.*void}} @{{.*}}iter_simple{{.*}} |
464 | void iter_simple(IterDouble ia, IterDouble ib, IterDouble ic) { |
465 | // |
466 | // Calculate number of iterations before the loop body. |
467 | // CHECK: [[DIFF1:%.+]] = invoke {{.*}}i32 @{{.*}}IterDouble{{.*}} |
468 | // CHECK: [[DIFF2:%.+]] = sub nsw i32 [[DIFF1]], 1 |
469 | // CHECK-NEXT: [[DIFF3:%.+]] = add nsw i32 [[DIFF2]], 1 |
470 | // CHECK-NEXT: [[DIFF4:%.+]] = sdiv i32 [[DIFF3]], 1 |
471 | // CHECK-NEXT: [[DIFF5:%.+]] = sub nsw i32 [[DIFF4]], 1 |
472 | // CHECK-NEXT: store i32 [[DIFF5]], i32* [[OMP_LAST_IT:%[^,]+]]{{.+}} |
473 | #pragma omp for simd |
474 | |
475 | // CHECK: call void @__kmpc_for_static_init_4(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1) |
476 | // CHECK-DAG: [[UB_VAL:%.+]] = load i32, i32* [[UB]], |
477 | // CHECK-DAG: [[OMP_LAST_IT_VAL:%.+]] = load i32, i32* [[OMP_LAST_IT]], |
478 | // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], [[OMP_LAST_IT_VAL]] |
479 | // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] |
480 | // CHECK: [[TRUE]]: |
481 | // CHECK: [[OMP_LAST_IT_VAL:%.+]] = load i32, i32* [[OMP_LAST_IT]], |
482 | // CHECK: br label %[[SWITCH:[^,]+]] |
483 | // CHECK: [[FALSE]]: |
484 | // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], |
485 | // CHECK: br label %[[SWITCH]] |
486 | // CHECK: [[SWITCH]]: |
487 | // CHECK: [[UP:%.+]] = phi i32 [ [[OMP_LAST_IT_VAL]], %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] |
488 | // CHECK: store i32 [[UP]], i32* [[UB]], |
489 | // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], |
490 | // CHECK: store i32 [[LB_VAL]], i32* [[IT_OMP_IV:%[^,]+]], |
491 | |
492 | // CHECK: [[IV:%.+]] = load i32, i32* [[IT_OMP_IV]] |
493 | // CHECK-NEXT: [[UB_VAL:%.+]] = load i32, i32* [[UB]] |
494 | // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB_VAL]] |
495 | // CHECK-NEXT: br i1 [[CMP]], label %[[IT_BODY:[^,]+]], label %[[IT_END:[^,]+]] |
496 | for (IterDouble i = ia; i < ib; ++i) { |
497 | // CHECK: [[IT_BODY]]: |
498 | // Start of body: calculate i from index: |
499 | // CHECK: [[IV1:%.+]] = load i32, i32* [[IT_OMP_IV]] |
500 | // Call of operator+ (i, IV). |
501 | // CHECK: {{%.+}} = invoke {{.+}} @{{.*}}IterDouble{{.*}} |
502 | // ... loop body ... |
503 | *i = *ic * 0.5; |
504 | // Float multiply and save result. |
505 | // CHECK: [[MULR:%.+]] = fmul double {{%.+}}, 5.000000e-01 |
506 | // CHECK-NEXT: invoke {{.+}} @{{.*}}IterDouble{{.*}} |
507 | // CHECK: store double [[MULR:%.+]], double* [[RESULT_ADDR:%.+]] |
508 | ++ic; |
509 | // |
510 | // CHECK: [[IV2:%.+]] = load i32, i32* [[IT_OMP_IV]] |
511 | // CHECK-NEXT: [[ADD2:%.+]] = add nsw i32 [[IV2]], 1 |
512 | // CHECK-NEXT: store i32 [[ADD2]], i32* [[IT_OMP_IV]] |
513 | // br label %{{.*}}, !llvm.loop ![[ITER_LOOP_ID]] |
514 | } |
515 | // CHECK: [[IT_END]]: |
516 | // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
517 | // CHECK: call void @__kmpc_barrier(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
518 | // CHECK: ret void |
519 | } |
520 | |
521 | |
522 | // CHECK-LABEL: define {{.*void}} @{{.*}}collapsed{{.*}} |
523 | void collapsed(float *a, float *b, float *c, float *d) { |
524 | int i; // outer loop counter |
525 | unsigned j; // middle loop couter, leads to unsigned icmp in loop header. |
526 | // k declared in the loop init below |
527 | short l; // inner loop counter |
528 | // CHECK: call void @__kmpc_for_static_init_4u(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1) |
529 | // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], |
530 | // CHECK: [[CMP:%.+]] = icmp ugt i32 [[UB_VAL]], 119 |
531 | // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] |
532 | // CHECK: [[TRUE]]: |
533 | // CHECK: br label %[[SWITCH:[^,]+]] |
534 | // CHECK: [[FALSE]]: |
535 | // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], |
536 | // CHECK: br label %[[SWITCH]] |
537 | // CHECK: [[SWITCH]]: |
538 | // CHECK: [[UP:%.+]] = phi i32 [ 119, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] |
539 | // CHECK: store i32 [[UP]], i32* [[UB]], |
540 | // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], |
541 | // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV:%[^,]+]], |
542 | // |
543 | #pragma omp for simd collapse(4) |
544 | |
545 | // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] |
546 | // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]] |
547 | // CHECK-NEXT: [[CMP:%.+]] = icmp ule i32 [[IV]], [[UB_VAL]] |
548 | // CHECK-NEXT: br i1 [[CMP]], label %[[COLL1_BODY:[^,]+]], label %[[COLL1_END:[^,]+]] |
549 | for (i = 1; i < 3; i++) // 2 iterations |
550 | for (j = 2u; j < 5u; j++) //3 iterations |
551 | for (int k = 3; k <= 6; k++) // 4 iterations |
552 | for (l = 4; l < 9; ++l) // 5 iterations |
553 | { |
554 | // CHECK: [[COLL1_BODY]]: |
555 | // Start of body: calculate i from index: |
556 | // CHECK: [[IV1:%.+]] = load i32, i32* [[OMP_IV]] |
557 | // Calculation of the loop counters values. |
558 | // CHECK: [[CALC_I_1:%.+]] = udiv i32 [[IV1]], 60 |
559 | // CHECK-NEXT: [[CALC_I_1_MUL1:%.+]] = mul i32 [[CALC_I_1]], 1 |
560 | // CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 1, [[CALC_I_1_MUL1]] |
561 | // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] |
562 | |
563 | // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]] |
564 | // CHECK: [[IV1_2_1:%.+]] = load i32, i32* [[OMP_IV]] |
565 | // CHECK-NEXT: [[CALC_J_1:%.+]] = udiv i32 [[IV1_2_1]], 60 |
566 | // CHECK-NEXT: [[MUL_1:%.+]] = mul i32 [[CALC_J_1]], 60 |
567 | // CHECK-NEXT: [[SUB_3:%.+]] = sub i32 [[IV1_2]], [[MUL_1]] |
568 | // CHECK-NEXT: [[CALC_J_2:%.+]] = udiv i32 [[SUB_3]], 20 |
569 | // CHECK-NEXT: [[CALC_J_2_MUL1:%.+]] = mul i32 [[CALC_J_2]], 1 |
570 | // CHECK-NEXT: [[CALC_J_3:%.+]] = add i32 2, [[CALC_J_2_MUL1]] |
571 | // CHECK-NEXT: store i32 [[CALC_J_3]], i32* [[LC_J:.+]] |
572 | |
573 | // CHECK: [[IV1_3:%.+]] = load i32, i32* [[OMP_IV]] |
574 | // CHECK: [[IV1_3_1:%.+]] = load i32, i32* [[OMP_IV]] |
575 | // CHECK-NEXT: [[DIV_1:%.+]] = udiv i32 [[IV1_3_1]], 60 |
576 | // CHECK-NEXT: [[MUL_2:%.+]] = mul i32 [[DIV_1]], 60 |
577 | // CHECK-NEXT: [[ADD_3:%.+]] = sub i32 [[IV1_3]], [[MUL_2]] |
578 | |
579 | // CHECK: [[IV1_4:%.+]] = load i32, i32* [[OMP_IV]] |
580 | // CHECK: [[IV1_4_1:%.+]] = load i32, i32* [[OMP_IV]] |
581 | // CHECK-NEXT: [[DIV_2:%.+]] = udiv i32 [[IV1_4_1]], 60 |
582 | // CHECK-NEXT: [[MUL_3:%.+]] = mul i32 [[DIV_2]], 60 |
583 | // CHECK-NEXT: [[SUB_6:%.+]] = sub i32 [[IV1_4]], [[MUL_3]] |
584 | // CHECK-NEXT: [[DIV_3:%.+]] = udiv i32 [[SUB_6]], 20 |
585 | // CHECK-NEXT: [[MUL_4:%.+]] = mul i32 [[DIV_3]], 20 |
586 | // CHECK-NEXT: [[SUB_7:%.+]] = sub i32 [[ADD_3]], [[MUL_4]] |
587 | // CHECK-NEXT: [[DIV_4:%.+]] = udiv i32 [[SUB_7]], 5 |
588 | // CHECK-NEXT: [[MUL_5:%.+]] = mul i32 [[DIV_4]], 1 |
589 | // CHECK-NEXT: [[ADD_6:%.+]] = add i32 3, [[MUL_5]] |
590 | // CHECK-NEXT: store i32 [[ADD_6]], i32* [[LC_K:.+]] |
591 | |
592 | // CHECK: [[IV1_5:%.+]] = load i32, i32* [[OMP_IV]] |
593 | // CHECK: [[IV1_5_1:%.+]] = load i32, i32* [[OMP_IV]] |
594 | // CHECK-NEXT: [[DIV_5:%.+]] = udiv i32 [[IV1_5_1]], 60 |
595 | // CHECK-NEXT: [[MUL_6:%.+]] = mul i32 [[DIV_5]], 60 |
596 | // CHECK-NEXT: [[ADD_7:%.+]] = sub i32 [[IV1_5]], [[MUL_6]] |
597 | |
598 | // CHECK: [[IV1_6:%.+]] = load i32, i32* [[OMP_IV]] |
599 | // CHECK: [[IV1_6_1:%.+]] = load i32, i32* [[OMP_IV]] |
600 | // CHECK-NEXT: [[DIV_6:%.+]] = udiv i32 [[IV1_6_1]], 60 |
601 | // CHECK-NEXT: [[MUL_7:%.+]] = mul i32 [[DIV_6]], 60 |
602 | // CHECK-NEXT: [[SUB_10:%.+]] = sub i32 [[IV1_6]], [[MUL_7]] |
603 | // CHECK-NEXT: [[DIV_7:%.+]] = udiv i32 [[SUB_10]], 20 |
604 | // CHECK-NEXT: [[MUL_8:%.+]] = mul i32 [[DIV_7]], 20 |
605 | // CHECK-NEXT: [[ADD_9:%.+]] = sub i32 [[ADD_7]], [[MUL_8]] |
606 | |
607 | // CHECK: [[IV1_7:%.+]] = load i32, i32* [[OMP_IV]] |
608 | // CHECK: [[IV1_7_1:%.+]] = load i32, i32* [[OMP_IV]] |
609 | // CHECK-NEXT: [[DIV_8:%.+]] = udiv i32 [[IV1_7_1]], 60 |
610 | // CHECK-NEXT: [[MUL_9:%.+]] = mul i32 [[DIV_8]], 60 |
611 | // CHECK-NEXT: [[ADD_10:%.+]] = sub i32 [[IV1_7]], [[MUL_9]] |
612 | |
613 | // CHECK: [[IV1_8:%.+]] = load i32, i32* [[OMP_IV]] |
614 | // CHECK: [[IV1_8_1:%.+]] = load i32, i32* [[OMP_IV]] |
615 | // CHECK-NEXT: [[DIV_3:%.+]] = udiv i32 [[IV1_8_1]], 60 |
616 | // CHECK-NEXT: [[MUL_4:%.+]] = mul i32 [[DIV_3]], 60 |
617 | // CHECK-NEXT: [[SUB_7:%.+]] = sub i32 [[IV1_8]], [[MUL_4]] |
618 | // CHECK-NEXT: [[DIV_4:%.+]] = udiv i32 [[SUB_7]], 20 |
619 | // CHECK-NEXT: [[MUL_5:%.+]] = mul i32 [[DIV_4]], 20 |
620 | // CHECK-NEXT: [[SUB_8:%.+]] = sub i32 [[ADD_10]], [[MUL_5]] |
621 | // CHECK-NEXT: [[DIV_5:%.+]] = udiv i32 [[SUB_8]], 5 |
622 | // CHECK-NEXT: [[MUL_6:%.+]] = mul i32 [[DIV_5]], 5 |
623 | // CHECK-NEXT: [[SUB_9:%.+]] = sub i32 [[ADD_9]], [[MUL_6]] |
624 | // CHECK-NEXT: [[MUL_6:%.+]] = mul i32 [[SUB_9]], 1 |
625 | // CHECK-NEXT: [[CALC_L_2:%.+]] = add i32 4, [[MUL_6]] |
626 | // CHECK-NEXT: [[CALC_L_3:%.+]] = trunc i32 [[CALC_L_2]] to i16 |
627 | // CHECK-NEXT: store i16 [[CALC_L_3]], i16* [[LC_L:.+]] |
628 | // ... loop body ... |
629 | // End of body: store into a[i]: |
630 | // CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]] |
631 | float res = b[j] * c[k]; |
632 | a[i] = res * d[l]; |
633 | // CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV]] |
634 | // CHECK-NEXT: [[ADD2:%.+]] = add i32 [[IV2]], 1 |
635 | // CHECK-NEXT: store i32 [[ADD2]], i32* [[OMP_IV]] |
636 | // br label %{{[^,]+}}, !llvm.loop ![[COLL1_LOOP_ID]] |
637 | // CHECK: [[COLL1_END]]: |
638 | } |
639 | // i,j,l are updated; k is not updated. |
640 | // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
641 | // CHECK: br i1 |
642 | // CHECK: store i32 3, i32* |
643 | // CHECK-NEXT: store i32 5, |
644 | // CHECK-NEXT: store i32 7, |
645 | // CHECK-NEXT: store i16 9, i16* |
646 | // CHECK: call void @__kmpc_barrier(%struct.ident_t* {{.+}}, i32 %{{.+}}) |
647 | // CHECK: ret void |
648 | } |
649 | |
650 | extern char foo(); |
651 | extern double globalfloat; |
652 | |
653 | // CHECK-LABEL: define {{.*void}} @{{.*}}widened{{.*}} |
654 | void widened(float *a, float *b, float *c, float *d) { |
655 | int i; // outer loop counter |
656 | short j; // inner loop counter |
657 | globalfloat = 1.0; |
658 | int localint = 1; |
659 | // CHECK: store double {{.+}}, double* [[GLOBALFLOAT:@.+]] |
660 | // Counter is widened to 64 bits. |
661 | // CHECK: [[MUL:%.+]] = mul nsw i64 2, %{{.+}} |
662 | // CHECK-NEXT: [[SUB:%.+]] = sub nsw i64 [[MUL]], 1 |
663 | // CHECK-NEXT: store i64 [[SUB]], i64* [[OMP_LAST_IT:%[^,]+]], |
664 | // CHECK: call void @__kmpc_for_static_init_8(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1) |
665 | // CHECK-DAG: [[UB_VAL:%.+]] = load i64, i64* [[UB]], |
666 | // CHECK-DAG: [[OMP_LAST_IT_VAL:%.+]] = load i64, i64* [[OMP_LAST_IT]], |
667 | // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], [[OMP_LAST_IT_VAL]] |
668 | // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] |
669 | // CHECK: [[TRUE]]: |
670 | // CHECK: [[OMP_LAST_IT_VAL:%.+]] = load i64, i64* [[OMP_LAST_IT]], |
671 | // CHECK: br label %[[SWITCH:[^,]+]] |
672 | // CHECK: [[FALSE]]: |
673 | // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], |
674 | // CHECK: br label %[[SWITCH]] |
675 | // CHECK: [[SWITCH]]: |
676 | // CHECK: [[UP:%.+]] = phi i64 [ [[OMP_LAST_IT_VAL]], %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] |
677 | // CHECK: store i64 [[UP]], i64* [[UB]], |
678 | // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], |
679 | // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV:%[^,]+]], |
680 | // |
681 | #pragma omp for simd collapse(2) private(globalfloat, localint) |
682 | |
683 | // CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]] |
684 | // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]] |
685 | // CHECK-NEXT: [[CMP:%.+]] = icmp sle i64 [[IV]], [[UB_VAL]] |
686 | // CHECK-NEXT: br i1 [[CMP]], label %[[WIDE1_BODY:[^,]+]], label %[[WIDE1_END:[^,]+]] |
687 | for (i = 1; i < 3; i++) // 2 iterations |
688 | for (j = 0; j < foo(); j++) // foo() iterations |
689 | { |
690 | // CHECK: [[WIDE1_BODY]]: |
691 | // Start of body: calculate i from index: |
692 | // CHECK: [[IV1:%.+]] = load i64, i64* [[OMP_IV]] |
693 | // Calculation of the loop counters values... |
694 | // CHECK: store i32 {{[^,]+}}, i32* [[LC_I:.+]] |
695 | // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]] |
696 | // CHECK: store i16 {{[^,]+}}, i16* [[LC_J:.+]] |
697 | // ... loop body ... |
698 | // |
699 | // Here we expect store into private double var, not global |
700 | // CHECK-NOT: store double {{.+}}, double* [[GLOBALFLOAT]] |
701 | globalfloat = (float)j/i; |
702 | float res = b[j] * c[j]; |
703 | // Store into a[i]: |
704 | // CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]] |
705 | a[i] = res * d[i]; |
706 | // Then there's a store into private var localint: |
707 | // CHECK: store i32 {{.+}}, i32* [[LOCALINT:%[^,]+]] |
708 | localint = (int)j; |
709 | // CHECK: [[IV2:%.+]] = load i64, i64* [[OMP_IV]] |
710 | // CHECK-NEXT: [[ADD2:%.+]] = add nsw i64 [[IV2]], 1 |
711 | // CHECK-NEXT: store i64 [[ADD2]], i64* [[OMP_IV]] |
712 | // |
713 | // br label %{{[^,]+}}, !llvm.loop ![[WIDE1_LOOP_ID]] |
714 | // CHECK: [[WIDE1_END]]: |
715 | } |
716 | // i,j are updated. |
717 | // CHECK: store i32 3, i32* [[I:%[^,]+]] |
718 | // CHECK: store i16 |
719 | // |
720 | // Here we expect store into original localint, not its privatized version. |
721 | // CHECK-NOT: store i32 {{.+}}, i32* [[LOCALINT]] |
722 | localint = (int)j; |
723 | // CHECK: ret void |
724 | } |
725 | |
726 | // TERM_DEBUG-LABEL: bar |
727 | int bar() {return 0;}; |
728 | |
729 | // TERM_DEBUG-LABEL: parallel_simd |
730 | void parallel_simd(float *a) { |
731 | #pragma omp parallel |
732 | #pragma omp for simd |
733 | // TERM_DEBUG-NOT: __kmpc_global_thread_num |
734 | // TERM_DEBUG: invoke i32 {{.*}}bar{{.*}}() |
735 | // TERM_DEBUG: unwind label %[[TERM_LPAD:.+]], |
736 | // TERM_DEBUG-NOT: __kmpc_global_thread_num |
737 | // TERM_DEBUG: [[TERM_LPAD]] |
738 | // TERM_DEBUG: call void @__clang_call_terminate |
739 | // TERM_DEBUG: unreachable |
740 | for (unsigned i = 131071; i <= 2147483647; i += 127) |
741 | a[i] += bar(); |
742 | } |
743 | // TERM_DEBUG: !{{[0-9]+}} = !DILocation(line: [[@LINE-11]], |
744 | // TERM_DEBUG-NOT: line: 0, |
745 | #endif // HEADER |
746 | |