1 | // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s |
2 | // RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s |
3 | // RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s |
4 | |
5 | // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s |
6 | // RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s |
7 | // RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s |
8 | // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} |
9 | // expected-no-diagnostics |
10 | #ifndef HEADER |
11 | #define HEADER |
12 | |
13 | _Bool bv, bx; |
14 | char cv, cx; |
15 | unsigned char ucv, ucx; |
16 | short sv, sx; |
17 | unsigned short usv, usx; |
18 | int iv, ix; |
19 | unsigned int uiv, uix; |
20 | long lv, lx; |
21 | unsigned long ulv, ulx; |
22 | long long llv, llx; |
23 | unsigned long long ullv, ullx; |
24 | float fv, fx; |
25 | double dv, dx; |
26 | long double ldv, ldx; |
27 | _Complex int civ, cix; |
28 | _Complex float cfv, cfx; |
29 | _Complex double cdv, cdx; |
30 | |
31 | typedef int int4 __attribute__((__vector_size__(16))); |
32 | int4 int4x; |
33 | |
34 | struct BitFields { |
35 | int : 32; |
36 | int a : 31; |
37 | } bfx; |
38 | |
39 | struct BitFields_packed { |
40 | int : 32; |
41 | int a : 31; |
42 | } __attribute__ ((__packed__)) bfx_packed; |
43 | |
44 | struct BitFields2 { |
45 | int : 31; |
46 | int a : 1; |
47 | } bfx2; |
48 | |
49 | struct BitFields2_packed { |
50 | int : 31; |
51 | int a : 1; |
52 | } __attribute__ ((__packed__)) bfx2_packed; |
53 | |
54 | struct BitFields3 { |
55 | int : 11; |
56 | int a : 14; |
57 | } bfx3; |
58 | |
59 | struct BitFields3_packed { |
60 | int : 11; |
61 | int a : 14; |
62 | } __attribute__ ((__packed__)) bfx3_packed; |
63 | |
64 | struct BitFields4 { |
65 | short : 16; |
66 | int a: 1; |
67 | long b : 7; |
68 | } bfx4; |
69 | |
70 | struct BitFields4_packed { |
71 | short : 16; |
72 | int a: 1; |
73 | long b : 7; |
74 | } __attribute__ ((__packed__)) bfx4_packed; |
75 | |
76 | typedef float float2 __attribute__((ext_vector_type(2))); |
77 | float2 float2x; |
78 | |
79 | // Register "0" is currently an invalid register for global register variables. |
80 | // Use "esp" instead of "0". |
81 | // register int rix __asm__("0"); |
82 | register int rix __asm__("esp"); |
83 | |
84 | int main() { |
85 | // CHECK: [[PREV:%.+]] = atomicrmw add i8* @{{.+}}, i8 1 monotonic |
86 | // CHECK: store i8 [[PREV]], i8* @{{.+}}, |
87 | #pragma omp atomic capture |
88 | bv = bx++; |
89 | // CHECK: atomicrmw add i8* @{{.+}}, i8 1 monotonic |
90 | // CHECK: add nsw i32 %{{.+}}, 1 |
91 | // CHECK: store i8 %{{.+}}, i8* @{{.+}}, |
92 | #pragma omp atomic capture |
93 | cv = ++cx; |
94 | // CHECK: [[PREV:%.+]] = atomicrmw sub i8* @{{.+}}, i8 1 monotonic |
95 | // CHECK: store i8 [[PREV]], i8* @{{.+}}, |
96 | #pragma omp atomic capture |
97 | ucv = ucx--; |
98 | // CHECK: atomicrmw sub i16* @{{.+}}, i16 1 monotonic |
99 | // CHECK: sub nsw i32 %{{.+}}, 1 |
100 | // CHECK: store i16 %{{.+}}, i16* @{{.+}}, |
101 | #pragma omp atomic capture |
102 | sv = --sx; |
103 | // CHECK: [[USV:%.+]] = load i16, i16* @{{.+}}, |
104 | // CHECK: [[EXPR:%.+]] = zext i16 [[USV]] to i32 |
105 | // CHECK: [[X:%.+]] = load atomic i16, i16* [[X_ADDR:@.+]] monotonic |
106 | // CHECK: br label %[[CONT:.+]] |
107 | // CHECK: [[CONT]] |
108 | // CHECK: [[EXPECTED:%.+]] = phi i16 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
109 | // CHECK: [[CONV:%.+]] = zext i16 [[EXPECTED]] to i32 |
110 | // CHECK: [[ADD:%.+]] = add nsw i32 [[CONV]], [[EXPR]] |
111 | // CHECK: [[DESIRED_CALC:%.+]] = trunc i32 [[ADD]] to i16 |
112 | // CHECK: store i16 [[DESIRED_CALC]], i16* [[TEMP:%.+]], |
113 | // CHECK: [[DESIRED:%.+]] = load i16, i16* [[TEMP]], |
114 | // CHECK: [[RES:%.+]] = cmpxchg i16* [[X_ADDR]], i16 [[EXPECTED]], i16 [[DESIRED]] monotonic monotonic |
115 | // CHECK: [[OLD_X]] = extractvalue { i16, i1 } [[RES]], 0 |
116 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i16, i1 } [[RES]], 1 |
117 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
118 | // CHECK: [[EXIT]] |
119 | // CHECK: store i16 [[DESIRED_CALC]], i16* @{{.+}}, |
120 | #pragma omp atomic capture |
121 | sv = usx += usv; |
122 | // CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}}, |
123 | // CHECK: [[X:%.+]] = load atomic i32, i32* [[X_ADDR:@.+]] monotonic |
124 | // CHECK: br label %[[CONT:.+]] |
125 | // CHECK: [[CONT]] |
126 | // CHECK: [[EXPECTED:%.+]] = phi i32 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
127 | // CHECK: [[DESIRED_CALC:%.+]] = mul nsw i32 [[EXPECTED]], [[EXPR]] |
128 | // CHECK: store i32 [[DESIRED_CALC]], i32* [[TEMP:%.+]], |
129 | // CHECK: [[DESIRED:%.+]] = load i32, i32* [[TEMP]], |
130 | // CHECK: [[RES:%.+]] = cmpxchg i32* [[X_ADDR]], i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic |
131 | // CHECK: [[OLD_X]] = extractvalue { i32, i1 } [[RES]], 0 |
132 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1 |
133 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
134 | // CHECK: [[EXIT]] |
135 | // CHECK: store i32 [[DESIRED_CALC]], i32* @{{.+}}, |
136 | #pragma omp atomic capture |
137 | uiv = ix *= iv; |
138 | // CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}}, |
139 | // CHECK: [[PREV:%.+]] = atomicrmw sub i32* @{{.+}}, i32 [[EXPR]] monotonic |
140 | // CHECK: store i32 [[PREV]], i32* @{{.+}}, |
141 | #pragma omp atomic capture |
142 | {iv = uix; uix -= uiv;} |
143 | // CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}}, |
144 | // CHECK: [[X:%.+]] = load atomic i32, i32* [[X_ADDR:@.+]] monotonic |
145 | // CHECK: br label %[[CONT:.+]] |
146 | // CHECK: [[CONT]] |
147 | // CHECK: [[EXPECTED:%.+]] = phi i32 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
148 | // CHECK: [[DESIRED_CALC:%.+]] = shl i32 [[EXPECTED]], [[EXPR]] |
149 | // CHECK: store i32 [[DESIRED_CALC]], i32* [[TEMP:%.+]], |
150 | // CHECK: [[DESIRED:%.+]] = load i32, i32* [[TEMP]], |
151 | // CHECK: [[RES:%.+]] = cmpxchg i32* [[X_ADDR]], i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic |
152 | // CHECK: [[OLD_X]] = extractvalue { i32, i1 } [[RES]], 0 |
153 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1 |
154 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
155 | // CHECK: [[EXIT]] |
156 | // CHECK: store i32 [[DESIRED_CALC]], i32* @{{.+}}, |
157 | #pragma omp atomic capture |
158 | {ix <<= iv; uiv = ix;} |
159 | // CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}}, |
160 | // CHECK: [[X:%.+]] = load atomic i32, i32* [[X_ADDR:@.+]] monotonic |
161 | // CHECK: br label %[[CONT:.+]] |
162 | // CHECK: [[CONT]] |
163 | // CHECK: [[EXPECTED:%.+]] = phi i32 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
164 | // CHECK: [[DESIRED_CALC:%.+]] = lshr i32 [[EXPECTED]], [[EXPR]] |
165 | // CHECK: store i32 [[DESIRED_CALC]], i32* [[TEMP:%.+]], |
166 | // CHECK: [[DESIRED:%.+]] = load i32, i32* [[TEMP]], |
167 | // CHECK: [[RES:%.+]] = cmpxchg i32* [[X_ADDR]], i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic |
168 | // CHECK: [[OLD_X]] = extractvalue { i32, i1 } [[RES]], 0 |
169 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1 |
170 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
171 | // CHECK: [[EXIT]] |
172 | // CHECK: store i32 [[DESIRED_CALC]], i32* @{{.+}}, |
173 | #pragma omp atomic capture |
174 | iv = uix >>= uiv; |
175 | // CHECK: [[EXPR:%.+]] = load i64, i64* @{{.+}}, |
176 | // CHECK: [[X:%.+]] = load atomic i64, i64* [[X_ADDR:@.+]] monotonic |
177 | // CHECK: br label %[[CONT:.+]] |
178 | // CHECK: [[CONT]] |
179 | // CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
180 | // CHECK: [[DESIRED:%.+]] = sdiv i64 [[EXPECTED]], [[EXPR]] |
181 | // CHECK: store i64 [[DESIRED]], i64* [[TEMP:%.+]], |
182 | // CHECK: [[DESIRED:%.+]] = load i64, i64* [[TEMP]], |
183 | // CHECK: [[RES:%.+]] = cmpxchg i64* [[X_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic |
184 | // CHECK: [[OLD_X]] = extractvalue { i64, i1 } [[RES]], 0 |
185 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1 |
186 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
187 | // CHECK: [[EXIT]] |
188 | // CHECK: store i64 [[EXPECTED]], i64* @{{.+}}, |
189 | #pragma omp atomic capture |
190 | {ulv = lx; lx /= lv;} |
191 | // CHECK: [[EXPR:%.+]] = load i64, i64* @{{.+}}, |
192 | // CHECK: [[OLD:%.+]] = atomicrmw and i64* @{{.+}}, i64 [[EXPR]] monotonic |
193 | // CHECK: [[DESIRED:%.+]] = and i64 [[OLD]], [[EXPR]] |
194 | // CHECK: store i64 [[DESIRED]], i64* @{{.+}}, |
195 | #pragma omp atomic capture |
196 | {ulx &= ulv; lv = ulx;} |
197 | // CHECK: [[EXPR:%.+]] = load i64, i64* @{{.+}}, |
198 | // CHECK: [[OLD:%.+]] = atomicrmw xor i64* @{{.+}}, i64 [[EXPR]] monotonic |
199 | // CHECK: [[DESIRED:%.+]] = xor i64 [[OLD]], [[EXPR]] |
200 | // CHECK: store i64 [[DESIRED]], i64* @{{.+}}, |
201 | #pragma omp atomic capture |
202 | ullv = llx ^= llv; |
203 | // CHECK: [[EXPR:%.+]] = load i64, i64* @{{.+}}, |
204 | // CHECK: [[OLD:%.+]] = atomicrmw or i64* @{{.+}}, i64 [[EXPR]] monotonic |
205 | // CHECK: [[DESIRED:%.+]] = or i64 [[OLD]], [[EXPR]] |
206 | // CHECK: store i64 [[DESIRED]], i64* @{{.+}}, |
207 | #pragma omp atomic capture |
208 | llv = ullx |= ullv; |
209 | // CHECK: [[EXPR:%.+]] = load float, float* @{{.+}}, |
210 | // CHECK: [[X:%.+]] = load atomic i32, i32* bitcast (float* [[X_ADDR:@.+]] to i32*) monotonic |
211 | // CHECK: br label %[[CONT:.+]] |
212 | // CHECK: [[CONT]] |
213 | // CHECK: [[EXPECTED:%.+]] = phi i32 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
214 | // CHECK: [[TEMP_I:%.+]] = bitcast float* [[TEMP:%.+]] to i32* |
215 | // CHECK: [[OLD:%.+]] = bitcast i32 [[EXPECTED]] to float |
216 | // CHECK: [[ADD:%.+]] = fadd float [[OLD]], [[EXPR]] |
217 | // CHECK: store float [[ADD]], float* [[TEMP]], |
218 | // CHECK: [[DESIRED:%.+]] = load i32, i32* [[TEMP_I]], |
219 | // CHECK: [[RES:%.+]] = cmpxchg i32* bitcast (float* [[X_ADDR]] to i32*), i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic |
220 | // CHECK: [[OLD_X:%.+]] = extractvalue { i32, i1 } [[RES]], 0 |
221 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1 |
222 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
223 | // CHECK: [[EXIT]] |
224 | // CHECK: [[CAST:%.+]] = fpext float [[ADD]] to double |
225 | // CHECK: store double [[CAST]], double* @{{.+}}, |
226 | #pragma omp atomic capture |
227 | dv = fx = fx + fv; |
228 | // CHECK: [[EXPR:%.+]] = load double, double* @{{.+}}, |
229 | // CHECK: [[X:%.+]] = load atomic i64, i64* bitcast (double* [[X_ADDR:@.+]] to i64*) monotonic |
230 | // CHECK: br label %[[CONT:.+]] |
231 | // CHECK: [[CONT]] |
232 | // CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
233 | // CHECK: [[TEMP_I:%.+]] = bitcast double* [[TEMP:%.+]] to i64* |
234 | // CHECK: [[OLD:%.+]] = bitcast i64 [[EXPECTED]] to double |
235 | // CHECK: [[SUB:%.+]] = fsub double [[EXPR]], [[OLD]] |
236 | // CHECK: store double [[SUB]], double* [[TEMP]], |
237 | // CHECK: [[DESIRED:%.+]] = load i64, i64* [[TEMP_I]], |
238 | // CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (double* [[X_ADDR]] to i64*), i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic |
239 | // CHECK: [[OLD_X:%.+]] = extractvalue { i64, i1 } [[RES]], 0 |
240 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1 |
241 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
242 | // CHECK: [[EXIT]] |
243 | // CHECK: [[CAST:%.+]] = fptrunc double [[OLD]] to float |
244 | // CHECK: store float [[CAST]], float* @{{.+}}, |
245 | #pragma omp atomic capture |
246 | {fv = dx; dx = dv - dx;} |
247 | // CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}, |
248 | // CHECK: [[X:%.+]] = load atomic i128, i128* bitcast (x86_fp80* [[X_ADDR:@.+]] to i128*) monotonic |
249 | // CHECK: br label %[[CONT:.+]] |
250 | // CHECK: [[CONT]] |
251 | // CHECK: [[EXPECTED:%.+]] = phi i128 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
252 | // CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i128* |
253 | // CHECK: store i128 [[EXPECTED]], i128* [[BITCAST]] |
254 | // CHECK: [[BITCAST1:%.+]] = bitcast x86_fp80* [[TEMP1:%.+]] to i128* |
255 | // CHECK: store i128 [[EXPECTED]], i128* [[BITCAST1]] |
256 | // CHECK: [[OLD:%.+]] = load x86_fp80, x86_fp80* [[TEMP1]] |
257 | // CHECK: [[MUL:%.+]] = fmul x86_fp80 [[OLD]], [[EXPR]] |
258 | // CHECK: store x86_fp80 [[MUL]], x86_fp80* [[TEMP]] |
259 | // CHECK: [[DESIRED:%.+]] = load i128, i128* [[BITCAST]] |
260 | // CHECK: [[RES:%.+]] = cmpxchg i128* bitcast (x86_fp80* [[X_ADDR]] to i128*), i128 [[EXPECTED]], i128 [[DESIRED]] monotonic monotonic |
261 | // CHECK: [[OLD_X:%.+]] = extractvalue { i128, i1 } [[RES]], 0 |
262 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i128, i1 } [[RES]], 1 |
263 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
264 | // CHECK: [[EXIT]] |
265 | // CHECK: [[CAST:%.+]] = fptrunc x86_fp80 [[MUL]] to double |
266 | // CHECK: store double [[CAST]], double* @{{.+}}, |
267 | #pragma omp atomic capture |
268 | {ldx = ldx * ldv; dv = ldx;} |
269 | // CHECK: [[EXPR_RE:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 0) |
270 | // CHECK: [[EXPR_IM:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 1) |
271 | // CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[EXPECTED_ADDR:%.+]] to i8* |
272 | // CHECK: call void @__atomic_load(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR:@.+]] to i8*), i8* [[BITCAST]], i32 0) |
273 | // CHECK: br label %[[CONT:.+]] |
274 | // CHECK: [[CONT]] |
275 | // CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR]], i32 0, i32 0 |
276 | // CHECK: [[LD_RE:%.+]] = load i32, i32* [[LD_RE_ADDR]] |
277 | // CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR]], i32 0, i32 1 |
278 | // CHECK: [[LD_IM:%.+]] = load i32, i32* [[LD_IM_ADDR]] |
279 | // <Skip checks for complex calculations> |
280 | // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR:%.+]], i32 0, i32 0 |
281 | // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR]], i32 0, i32 1 |
282 | // CHECK: store i32 [[NEW_RE:%.+]], i32* [[X_RE_ADDR]] |
283 | // CHECK: store i32 [[NEW_IM:%.+]], i32* [[X_IM_ADDR]] |
284 | // CHECK: [[EXPECTED:%.+]] = bitcast { i32, i32 }* [[EXPECTED_ADDR]] to i8* |
285 | // CHECK: [[DESIRED:%.+]] = bitcast { i32, i32 }* [[DESIRED_ADDR]] to i8* |
286 | // CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR]] to i8*), i8* [[EXPECTED]], i8* [[DESIRED]], i32 0, i32 0) |
287 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
288 | // CHECK: [[EXIT]] |
289 | // CHECK: [[RE_CAST:%.+]] = sitofp i32 [[NEW_RE]] to float |
290 | // CHECK: [[IM_CAST:%.+]] = sitofp i32 [[NEW_IM]] to float |
291 | // CHECK: store float [[RE_CAST]], float* getelementptr inbounds ({ float, float }, { float, float }* @{{.+}}, i32 0, i32 0), |
292 | // CHECK: store float [[IM_CAST]], float* getelementptr inbounds ({ float, float }, { float, float }* @{{.+}}, i32 0, i32 1), |
293 | #pragma omp atomic capture |
294 | cfv = cix = civ / cix; |
295 | // CHECK: [[EXPR_RE:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.+}}, i32 0, i32 0) |
296 | // CHECK: [[EXPR_IM:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.+}}, i32 0, i32 1) |
297 | // CHECK: [[BITCAST:%.+]] = bitcast { float, float }* [[EXPECTED_ADDR:%.+]] to i8* |
298 | // CHECK: call void @__atomic_load(i64 8, i8* bitcast ({ float, float }* [[X_ADDR:@.+]] to i8*), i8* [[BITCAST]], i32 0) |
299 | // CHECK: br label %[[CONT:.+]] |
300 | // CHECK: [[CONT]] |
301 | // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[EXPECTED_ADDR]], i32 0, i32 0 |
302 | // CHECK: [[X_RE_OLD:%.+]] = load float, float* [[X_RE_ADDR]] |
303 | // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[EXPECTED_ADDR]], i32 0, i32 1 |
304 | // CHECK: [[X_IM_OLD:%.+]] = load float, float* [[X_IM_ADDR]] |
305 | // <Skip checks for complex calculations> |
306 | // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[DESIRED_ADDR:%.+]], i32 0, i32 0 |
307 | // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[DESIRED_ADDR]], i32 0, i32 1 |
308 | // CHECK: store float [[NEW_RE:%.+]], float* [[X_RE_ADDR]] |
309 | // CHECK: store float [[NEW_IM:%.+]], float* [[X_IM_ADDR]] |
310 | // CHECK: [[EXPECTED:%.+]] = bitcast { float, float }* [[EXPECTED_ADDR]] to i8* |
311 | // CHECK: [[DESIRED:%.+]] = bitcast { float, float }* [[DESIRED_ADDR]] to i8* |
312 | // CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 8, i8* bitcast ({ float, float }* [[X_ADDR]] to i8*), i8* [[EXPECTED]], i8* [[DESIRED]], i32 0, i32 0) |
313 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
314 | // CHECK: [[EXIT]] |
315 | // CHECK: [[RE_CAST:%.+]] = fptosi float [[X_RE_OLD]] to i32 |
316 | // CHECK: [[IM_CAST:%.+]] = fptosi float [[X_IM_OLD]] to i32 |
317 | // CHECK: store i32 [[RE_CAST]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 0), |
318 | // CHECK: store i32 [[IM_CAST]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 1), |
319 | #pragma omp atomic capture |
320 | {civ = cfx; cfx = cfv + cfx;} |
321 | // CHECK: [[EXPR_RE:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.+}}, i32 0, i32 0) |
322 | // CHECK: [[EXPR_IM:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.+}}, i32 0, i32 1) |
323 | // CHECK: [[BITCAST:%.+]] = bitcast { double, double }* [[EXPECTED_ADDR:%.+]] to i8* |
324 | // CHECK: call void @__atomic_load(i64 16, i8* bitcast ({ double, double }* [[X_ADDR:@.+]] to i8*), i8* [[BITCAST]], i32 5) |
325 | // CHECK: br label %[[CONT:.+]] |
326 | // CHECK: [[CONT]] |
327 | // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[EXPECTED_ADDR]], i32 0, i32 0 |
328 | // CHECK: [[X_RE:%.+]] = load double, double* [[X_RE_ADDR]] |
329 | // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[EXPECTED_ADDR]], i32 0, i32 1 |
330 | // CHECK: [[X_IM:%.+]] = load double, double* [[X_IM_ADDR]] |
331 | // <Skip checks for complex calculations> |
332 | // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[DESIRED_ADDR:%.+]], i32 0, i32 0 |
333 | // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[DESIRED_ADDR]], i32 0, i32 1 |
334 | // CHECK: store double [[NEW_RE:%.+]], double* [[X_RE_ADDR]] |
335 | // CHECK: store double [[NEW_IM:%.+]], double* [[X_IM_ADDR]] |
336 | // CHECK: [[EXPECTED:%.+]] = bitcast { double, double }* [[EXPECTED_ADDR]] to i8* |
337 | // CHECK: [[DESIRED:%.+]] = bitcast { double, double }* [[DESIRED_ADDR]] to i8* |
338 | // CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 16, i8* bitcast ({ double, double }* [[X_ADDR]] to i8*), i8* [[EXPECTED]], i8* [[DESIRED]], i32 5, i32 5) |
339 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
340 | // CHECK: [[EXIT]] |
341 | // CHECK: [[RE_CAST:%.+]] = fptrunc double [[NEW_RE]] to float |
342 | // CHECK: [[IM_CAST:%.+]] = fptrunc double [[NEW_IM]] to float |
343 | // CHECK: store float [[RE_CAST]], float* getelementptr inbounds ({ float, float }, { float, float }* @{{.+}}, i32 0, i32 0), |
344 | // CHECK: store float [[IM_CAST]], float* getelementptr inbounds ({ float, float }, { float, float }* @{{.+}}, i32 0, i32 1), |
345 | // CHECK: call{{.*}} @__kmpc_flush( |
346 | #pragma omp atomic capture seq_cst |
347 | {cdx = cdx - cdv; cfv = cdx;} |
348 | // CHECK: [[BV:%.+]] = load i8, i8* @{{.+}} |
349 | // CHECK: [[BOOL:%.+]] = trunc i8 [[BV]] to i1 |
350 | // CHECK: [[EXPR:%.+]] = zext i1 [[BOOL]] to i64 |
351 | // CHECK: [[OLD:%.+]] = atomicrmw and i64* @{{.+}}, i64 [[EXPR]] monotonic |
352 | // CHECK: [[DESIRED:%.+]] = and i64 [[OLD]], [[EXPR]] |
353 | // CHECK: store i64 [[DESIRED]], i64* @{{.+}}, |
354 | #pragma omp atomic capture |
355 | ulv = ulx = ulx & bv; |
356 | // CHECK: [[CV:%.+]] = load i8, i8* @{{.+}}, align 1 |
357 | // CHECK: [[EXPR:%.+]] = sext i8 [[CV]] to i32 |
358 | // CHECK: [[X:%.+]] = load atomic i8, i8* [[BX_ADDR:@.+]] monotonic |
359 | // CHECK: br label %[[CONT:.+]] |
360 | // CHECK: [[CONT]] |
361 | // CHECK: [[EXPECTED:%.+]] = phi i8 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
362 | // CHECK: [[OLD_BOOL:%.+]] = trunc i8 [[EXPECTED]] to i1 |
363 | // CHECK: [[X_RVAL:%.+]] = zext i1 [[OLD_BOOL]] to i32 |
364 | // CHECK: [[AND:%.+]] = and i32 [[EXPR]], [[X_RVAL]] |
365 | // CHECK: [[CAST:%.+]] = icmp ne i32 [[AND]], 0 |
366 | // CHECK: [[NEW:%.+]] = zext i1 [[CAST]] to i8 |
367 | // CHECK: store i8 [[NEW]], i8* [[TEMP:%.+]], |
368 | // CHECK: [[DESIRED:%.+]] = load i8, i8* [[TEMP]], |
369 | // CHECK: [[RES:%.+]] = cmpxchg i8* [[BX_ADDR]], i8 [[EXPECTED]], i8 [[DESIRED]] monotonic monotonic |
370 | // CHECK: [[OLD:%.+]] = extractvalue { i8, i1 } [[RES]], 0 |
371 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i8, i1 } [[RES]], 1 |
372 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
373 | // CHECK: [[EXIT]] |
374 | // CHECK: [[OLD_I8:%.+]] = zext i1 [[OLD_BOOL]] to i8 |
375 | // CHECK: store i8 [[OLD_I8]], i8* @{{.+}}, |
376 | #pragma omp atomic capture |
377 | {bv = bx; bx = cv & bx;} |
378 | // CHECK: [[UCV:%.+]] = load i8, i8* @{{.+}}, |
379 | // CHECK: [[EXPR:%.+]] = zext i8 [[UCV]] to i32 |
380 | // CHECK: [[X:%.+]] = load atomic i8, i8* [[CX_ADDR:@.+]] seq_cst |
381 | // CHECK: br label %[[CONT:.+]] |
382 | // CHECK: [[CONT]] |
383 | // CHECK: [[EXPECTED:%.+]] = phi i8 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
384 | // CHECK: [[X_RVAL:%.+]] = sext i8 [[EXPECTED]] to i32 |
385 | // CHECK: [[ASHR:%.+]] = ashr i32 [[X_RVAL]], [[EXPR]] |
386 | // CHECK: [[NEW:%.+]] = trunc i32 [[ASHR]] to i8 |
387 | // CHECK: store i8 [[NEW]], i8* [[TEMP:%.+]], |
388 | // CHECK: [[DESIRED:%.+]] = load i8, i8* [[TEMP]], |
389 | // CHECK: [[RES:%.+]] = cmpxchg i8* [[CX_ADDR]], i8 [[EXPECTED]], i8 [[DESIRED]] seq_cst seq_cst |
390 | // CHECK: [[OLD_X:%.+]] = extractvalue { i8, i1 } [[RES]], 0 |
391 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i8, i1 } [[RES]], 1 |
392 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
393 | // CHECK: [[EXIT]] |
394 | // CHECK: store i8 [[NEW]], i8* @{{.+}}, |
395 | // CHECK: call{{.*}} @__kmpc_flush( |
396 | #pragma omp atomic capture, seq_cst |
397 | {cx = cx >> ucv; cv = cx;} |
398 | // CHECK: [[SV:%.+]] = load i16, i16* @{{.+}}, |
399 | // CHECK: [[EXPR:%.+]] = sext i16 [[SV]] to i32 |
400 | // CHECK: [[X:%.+]] = load atomic i64, i64* [[ULX_ADDR:@.+]] monotonic |
401 | // CHECK: br label %[[CONT:.+]] |
402 | // CHECK: [[CONT]] |
403 | // CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
404 | // CHECK: [[X_RVAL:%.+]] = trunc i64 [[EXPECTED]] to i32 |
405 | // CHECK: [[SHL:%.+]] = shl i32 [[EXPR]], [[X_RVAL]] |
406 | // CHECK: [[NEW:%.+]] = sext i32 [[SHL]] to i64 |
407 | // CHECK: store i64 [[NEW]], i64* [[TEMP:%.+]], |
408 | // CHECK: [[DESIRED:%.+]] = load i64, i64* [[TEMP]], |
409 | // CHECK: [[RES:%.+]] = cmpxchg i64* [[ULX_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic |
410 | // CHECK: [[OLD_X:%.+]] = extractvalue { i64, i1 } [[RES]], 0 |
411 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1 |
412 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
413 | // CHECK: [[EXIT]] |
414 | // CHECK: store i64 [[NEW]], i64* @{{.+}}, |
415 | #pragma omp atomic capture |
416 | ulv = ulx = sv << ulx; |
417 | // CHECK: [[USV:%.+]] = load i16, i16* @{{.+}}, |
418 | // CHECK: [[EXPR:%.+]] = zext i16 [[USV]] to i64 |
419 | // CHECK: [[X:%.+]] = load atomic i64, i64* [[LX_ADDR:@.+]] monotonic |
420 | // CHECK: br label %[[CONT:.+]] |
421 | // CHECK: [[CONT]] |
422 | // CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
423 | // CHECK: [[DESIRED:%.+]] = srem i64 [[EXPECTED]], [[EXPR]] |
424 | // CHECK: store i64 [[DESIRED]], i64* [[TEMP:%.+]], |
425 | // CHECK: [[DESIRED:%.+]] = load i64, i64* [[TEMP]], |
426 | // CHECK: [[RES:%.+]] = cmpxchg i64* [[LX_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic |
427 | // CHECK: [[OLD_X:%.+]] = extractvalue { i64, i1 } [[RES]], 0 |
428 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1 |
429 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
430 | // CHECK: [[EXIT]] |
431 | // CHECK: store i64 [[EXPECTED]], i64* @{{.+}}, |
432 | #pragma omp atomic capture |
433 | {lv = lx; lx = lx % usv;} |
434 | // CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}} |
435 | // CHECK: [[OLD:%.+]] = atomicrmw or i32* @{{.+}}, i32 [[EXPR]] seq_cst |
436 | // CHECK: [[DESIRED:%.+]] = or i32 [[EXPR]], [[OLD]] |
437 | // CHECK: store i32 [[DESIRED]], i32* @{{.+}}, |
438 | // CHECK: call{{.*}} @__kmpc_flush( |
439 | #pragma omp atomic seq_cst, capture |
440 | {uix = iv | uix; uiv = uix;} |
441 | // CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}} |
442 | // CHECK: [[OLD:%.+]] = atomicrmw and i32* @{{.+}}, i32 [[EXPR]] monotonic |
443 | // CHECK: [[DESIRED:%.+]] = and i32 [[OLD]], [[EXPR]] |
444 | // CHECK: store i32 [[DESIRED]], i32* @{{.+}}, |
445 | #pragma omp atomic capture |
446 | iv = ix = ix & uiv; |
447 | // CHECK: [[EXPR:%.+]] = load i64, i64* @{{.+}}, |
448 | // CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[EXPECTED_ADDR:%.+]] to i8* |
449 | // CHECK: call void @__atomic_load(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR:@.+]] to i8*), i8* [[BITCAST]], i32 0) |
450 | // CHECK: br label %[[CONT:.+]] |
451 | // CHECK: [[CONT]] |
452 | // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR]], i32 0, i32 0 |
453 | // CHECK: [[OLD_RE:%.+]] = load i32, i32* [[X_RE_ADDR]] |
454 | // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR]], i32 0, i32 1 |
455 | // CHECK: [[OLD_IM:%.+]] = load i32, i32* [[X_IM_ADDR]] |
456 | // <Skip checks for complex calculations> |
457 | // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR:%.+]], i32 0, i32 0 |
458 | // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR]], i32 0, i32 1 |
459 | // CHECK: store i32 %{{.+}}, i32* [[X_RE_ADDR]] |
460 | // CHECK: store i32 %{{.+}}, i32* [[X_IM_ADDR]] |
461 | // CHECK: [[EXPECTED:%.+]] = bitcast { i32, i32 }* [[EXPECTED_ADDR]] to i8* |
462 | // CHECK: [[DESIRED:%.+]] = bitcast { i32, i32 }* [[DESIRED_ADDR]] to i8* |
463 | // CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR]] to i8*), i8* [[EXPECTED]], i8* [[DESIRED]], i32 0, i32 0) |
464 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
465 | // CHECK: [[EXIT]] |
466 | // CHECK: store i32 [[OLD_RE]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 0), |
467 | // CHECK: store i32 [[OLD_IM]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 1), |
468 | #pragma omp atomic capture |
469 | {civ = cix; cix = lv + cix;} |
470 | // CHECK: [[ULV:%.+]] = load i64, i64* @{{.+}}, |
471 | // CHECK: [[EXPR:%.+]] = uitofp i64 [[ULV]] to float |
472 | // CHECK: [[X:%.+]] = load atomic i32, i32* bitcast (float* [[X_ADDR:@.+]] to i32*) monotonic |
473 | // CHECK: br label %[[CONT:.+]] |
474 | // CHECK: [[CONT]] |
475 | // CHECK: [[EXPECTED:%.+]] = phi i32 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
476 | // CHECK: [[TEMP_I:%.+]] = bitcast float* [[TEMP:%.+]] to i32* |
477 | // CHECK: [[OLD:%.+]] = bitcast i32 [[EXPECTED]] to float |
478 | // CHECK: [[MUL:%.+]] = fmul float [[OLD]], [[EXPR]] |
479 | // CHECK: store float [[MUL]], float* [[TEMP]], |
480 | // CHECK: [[DESIRED:%.+]] = load i32, i32* [[TEMP_I]], |
481 | // CHECK: [[RES:%.+]] = cmpxchg i32* bitcast (float* [[X_ADDR]] to i32*), i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic |
482 | // CHECK: [[OLD_X:%.+]] = extractvalue { i32, i1 } [[RES]], 0 |
483 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1 |
484 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
485 | // CHECK: [[EXIT]] |
486 | // CHECK: store float [[MUL]], float* @{{.+}}, |
487 | #pragma omp atomic capture |
488 | {fx = fx * ulv; fv = fx;} |
489 | // CHECK: [[LLV:%.+]] = load i64, i64* @{{.+}}, |
490 | // CHECK: [[EXPR:%.+]] = sitofp i64 [[LLV]] to double |
491 | // CHECK: [[X:%.+]] = load atomic i64, i64* bitcast (double* [[X_ADDR:@.+]] to i64*) monotonic |
492 | // CHECK: br label %[[CONT:.+]] |
493 | // CHECK: [[CONT]] |
494 | // CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
495 | // CHECK: [[TEMP_I:%.+]] = bitcast double* [[TEMP:%.+]] to i64* |
496 | // CHECK: [[OLD:%.+]] = bitcast i64 [[EXPECTED]] to double |
497 | // CHECK: [[DIV:%.+]] = fdiv double [[OLD]], [[EXPR]] |
498 | // CHECK: store double [[DIV]], double* [[TEMP]], |
499 | // CHECK: [[DESIRED:%.+]] = load i64, i64* [[TEMP_I]], |
500 | // CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (double* [[X_ADDR]] to i64*), i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic |
501 | // CHECK: [[OLD_X:%.+]] = extractvalue { i64, i1 } [[RES]], 0 |
502 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1 |
503 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
504 | // CHECK: [[EXIT]] |
505 | // CHECK: store double [[DIV]], double* @{{.+}}, |
506 | #pragma omp atomic capture |
507 | dv = dx /= llv; |
508 | // CHECK: [[ULLV:%.+]] = load i64, i64* @{{.+}}, |
509 | // CHECK: [[EXPR:%.+]] = uitofp i64 [[ULLV]] to x86_fp80 |
510 | // CHECK: [[X:%.+]] = load atomic i128, i128* bitcast (x86_fp80* [[X_ADDR:@.+]] to i128*) monotonic |
511 | // CHECK: br label %[[CONT:.+]] |
512 | // CHECK: [[CONT]] |
513 | // CHECK: [[EXPECTED:%.+]] = phi i128 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
514 | // CHECK: [[TEMP_I1:%.+]] = bitcast x86_fp80* [[TEMP1:%.+]] to i128* |
515 | // CHECK: store i128 [[EXPECTED]], i128* [[TEMP_I1]], |
516 | // CHECK: [[TEMP_I:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i128* |
517 | // CHECK: store i128 [[EXPECTED]], i128* [[TEMP_I]], |
518 | // CHECK: [[OLD:%.+]] = load x86_fp80, x86_fp80* [[TEMP]], |
519 | // CHECK: [[SUB:%.+]] = fsub x86_fp80 [[OLD]], [[EXPR]] |
520 | // CHECK: store x86_fp80 [[SUB]], x86_fp80* [[TEMP1]] |
521 | // CHECK: [[DESIRED:%.+]] = load i128, i128* [[TEMP_I1]] |
522 | // CHECK: [[RES:%.+]] = cmpxchg i128* bitcast (x86_fp80* [[X_ADDR]] to i128*), i128 [[EXPECTED]], i128 [[DESIRED]] monotonic monotonic |
523 | // CHECK: [[OLD_X:%.+]] = extractvalue { i128, i1 } [[RES]], 0 |
524 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i128, i1 } [[RES]], 1 |
525 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
526 | // CHECK: [[EXIT]] |
527 | // CHECK: store x86_fp80 [[OLD]], x86_fp80* @{{.+}}, |
528 | #pragma omp atomic capture |
529 | {ldv = ldx; ldx -= ullv;} |
530 | // CHECK: [[EXPR:%.+]] = load float, float* @{{.+}}, |
531 | // CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[EXPECTED_ADDR:%.+]] to i8* |
532 | // CHECK: call void @__atomic_load(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR:@.+]] to i8*), i8* [[BITCAST]], i32 0) |
533 | // CHECK: br label %[[CONT:.+]] |
534 | // CHECK: [[CONT]] |
535 | // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR]], i32 0, i32 0 |
536 | // CHECK: [[X_RE:%.+]] = load i32, i32* [[X_RE_ADDR]] |
537 | // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR]], i32 0, i32 1 |
538 | // CHECK: [[X_IM:%.+]] = load i32, i32* [[X_IM_ADDR]] |
539 | // <Skip checks for complex calculations> |
540 | // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR:%.+]], i32 0, i32 0 |
541 | // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR]], i32 0, i32 1 |
542 | // CHECK: store i32 [[NEW_RE:%.+]], i32* [[X_RE_ADDR]] |
543 | // CHECK: store i32 [[NEW_IM:%.+]], i32* [[X_IM_ADDR]] |
544 | // CHECK: [[EXPECTED:%.+]] = bitcast { i32, i32 }* [[EXPECTED_ADDR]] to i8* |
545 | // CHECK: [[DESIRED:%.+]] = bitcast { i32, i32 }* [[DESIRED_ADDR]] to i8* |
546 | // CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR]] to i8*), i8* [[EXPECTED]], i8* [[DESIRED]], i32 0, i32 0) |
547 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
548 | // CHECK: [[EXIT]] |
549 | // CHECK: store i32 [[NEW_RE]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 0), |
550 | // CHECK: store i32 [[NEW_IM]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 1), |
551 | #pragma omp atomic capture |
552 | {cix = fv / cix; civ = cix;} |
553 | // CHECK: [[EXPR:%.+]] = load double, double* @{{.+}}, |
554 | // CHECK: [[X:%.+]] = load atomic i16, i16* [[X_ADDR:@.+]] monotonic |
555 | // CHECK: br label %[[CONT:.+]] |
556 | // CHECK: [[CONT]] |
557 | // CHECK: [[EXPECTED:%.+]] = phi i16 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
558 | // CHECK: [[CONV:%.+]] = sext i16 [[EXPECTED]] to i32 |
559 | // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CONV]] to double |
560 | // CHECK: [[ADD:%.+]] = fadd double [[X_RVAL]], [[EXPR]] |
561 | // CHECK: [[NEW:%.+]] = fptosi double [[ADD]] to i16 |
562 | // CHECK: store i16 [[NEW]], i16* [[TEMP:%.+]], |
563 | // CHECK: [[DESIRED:%.+]] = load i16, i16* [[TEMP]], |
564 | // CHECK: [[RES:%.+]] = cmpxchg i16* [[X_ADDR]], i16 [[EXPECTED]], i16 [[DESIRED]] monotonic monotonic |
565 | // CHECK: [[OLD_X]] = extractvalue { i16, i1 } [[RES]], 0 |
566 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i16, i1 } [[RES]], 1 |
567 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
568 | // CHECK: [[EXIT]] |
569 | // CHECK: store i16 [[NEW]], i16* @{{.+}}, |
570 | #pragma omp atomic capture |
571 | sv = sx = sx + dv; |
572 | // CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}, |
573 | // CHECK: [[XI8:%.+]] = load atomic i8, i8* [[X_ADDR:@.+]] monotonic |
574 | // CHECK: br label %[[CONT:.+]] |
575 | // CHECK: [[CONT]] |
576 | // CHECK: [[EXPECTED:%.+]] = phi i8 [ [[XI8]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
577 | // CHECK: [[BOOL_EXPECTED:%.+]] = trunc i8 [[EXPECTED]] to i1 |
578 | // CHECK: [[CONV:%.+]] = zext i1 [[BOOL_EXPECTED]] to i32 |
579 | // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CONV]] to x86_fp80 |
580 | // CHECK: [[MUL:%.+]] = fmul x86_fp80 [[EXPR]], [[X_RVAL]] |
581 | // CHECK: [[BOOL_DESIRED:%.+]] = fcmp une x86_fp80 [[MUL]], 0xK00000000000000000000 |
582 | // CHECK: [[DESIRED:%.+]] = zext i1 [[BOOL_DESIRED]] to i8 |
583 | // CHECK: store i8 [[DESIRED]], i8* [[TEMP:%.+]], |
584 | // CHECK: [[DESIRED:%.+]] = load i8, i8* [[TEMP]], |
585 | // CHECK: [[RES:%.+]] = cmpxchg i8* [[X_ADDR]], i8 [[EXPECTED]], i8 [[DESIRED]] monotonic monotonic |
586 | // CHECK: [[OLD_X:%.+]] = extractvalue { i8, i1 } [[RES]], 0 |
587 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i8, i1 } [[RES]], 1 |
588 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
589 | // CHECK: [[EXIT]] |
590 | // CHECK: [[EXPECTED_I8:%.+]] = zext i1 [[BOOL_EXPECTED]] to i8 |
591 | // CHECK: store i8 [[EXPECTED_I8]], i8* @{{.+}}, |
592 | #pragma omp atomic capture |
593 | {bv = bx; bx = ldv * bx;} |
594 | // CHECK: [[EXPR_RE:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* [[CIV_ADDR:@.+]], i32 0, i32 0), |
595 | // CHECK: [[EXPR_IM:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* [[CIV_ADDR]], i32 0, i32 1), |
596 | // CHECK: [[XI8:%.+]] = load atomic i8, i8* [[X_ADDR:@.+]] monotonic |
597 | // CHECK: br label %[[CONT:.+]] |
598 | // CHECK: [[CONT]] |
599 | // CHECK: [[EXPECTED:%.+]] = phi i8 [ [[XI8]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ] |
600 | // CHECK: [[BOOL_EXPECTED:%.+]] = trunc i8 [[EXPECTED]] to i1 |
601 | // CHECK: [[X_RVAL:%.+]] = zext i1 [[BOOL_EXPECTED]] to i32 |
602 | // CHECK: [[SUB_RE:%.+]] = sub i32 [[EXPR_RE:%.+]], [[X_RVAL]] |
603 | // CHECK: [[SUB_IM:%.+]] = sub i32 [[EXPR_IM:%.+]], 0 |
604 | // CHECK: icmp ne i32 [[SUB_RE]], 0 |
605 | // CHECK: icmp ne i32 [[SUB_IM]], 0 |
606 | // CHECK: [[BOOL_DESIRED:%.+]] = or i1 |
607 | // CHECK: [[DESIRED:%.+]] = zext i1 [[BOOL_DESIRED]] to i8 |
608 | // CHECK: store i8 [[DESIRED]], i8* [[TEMP:%.+]], |
609 | // CHECK: [[DESIRED:%.+]] = load i8, i8* [[TEMP]], |
610 | // CHECK: [[RES:%.+]] = cmpxchg i8* [[X_ADDR]], i8 [[EXPECTED]], i8 [[DESIRED]] monotonic monotonic |
611 | // CHECK: [[OLD_X:%.+]] = extractvalue { i8, i1 } [[RES]], 0 |
612 | // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i8, i1 } [[RES]], 1 |
613 | // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]] |
614 | // CHECK: [[EXIT]] |
615 | // CHECK: [[DESIRED_I8:%.+]] = zext i1 [[BOOL_DESIRED]] to i8 |
616 | // CHECK: store i8 [[DESIRED_I8]], i8* @{{.+}}, |
617 | #pragma omp atomic capture |
618 | {bx = civ - bx; bv = bx;} |
619 | // CHECK: [[IDX:%.+]] = load i16, i16* @{{.+}} |
620 | // CHECK: load i8, i8* |
621 | // CHECK: [[VEC_ITEM_VAL:%.+]] = zext i1 %{{.+}} to i32 |
622 | // CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* [[DEST:@.+]] to i128*) monotonic |
623 | // CHECK: br label %[[CONT:.+]] |
624 | // CHECK: [[CONT]] |
625 | // CHECK: [[OLD_I128:%.+]] = phi i128 [ [[I128VAL]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] |
626 | // CHECK: [[TEMP_I:%.+]] = bitcast <4 x i32>* [[TEMP:%.+]] to i128* |
627 | // CHECK: store i128 [[OLD_I128]], i128* [[TEMP_I]], |
628 | // CHECK: [[LD:%.+]] = bitcast i128 [[OLD_I128]] to <4 x i32> |
629 | // CHECK: store <4 x i32> [[LD]], <4 x i32>* [[TEMP1:%.+]], |
630 | // CHECK: [[VEC_VAL:%.+]] = load <4 x i32>, <4 x i32>* [[TEMP1]] |
631 | // CHECK: [[ITEM:%.+]] = extractelement <4 x i32> [[VEC_VAL]], i16 [[IDX]] |
632 | // CHECK: [[OR:%.+]] = or i32 [[ITEM]], [[VEC_ITEM_VAL]] |
633 | // CHECK: [[VEC_VAL:%.+]] = load <4 x i32>, <4 x i32>* [[TEMP]] |
634 | // CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <4 x i32> [[VEC_VAL]], i32 [[OR]], i16 [[IDX]] |
635 | // CHECK: store <4 x i32> [[NEW_VEC_VAL]], <4 x i32>* [[TEMP]] |
636 | // CHECK: [[NEW_I128:%.+]] = load i128, i128* [[TEMP_I]], |
637 | // CHECK: [[RES:%.+]] = cmpxchg i128* bitcast (<4 x i32>* [[DEST]] to i128*), i128 [[OLD_I128]], i128 [[NEW_I128]] monotonic monotonic |
638 | // CHECK: [[FAILED_OLD_VAL:%.+]] = extractvalue { i128, i1 } [[RES]], 0 |
639 | // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1 |
640 | // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] |
641 | // CHECK: [[EXIT]] |
642 | // CHECK: store i32 [[OR]], i32* @{{.+}}, |
643 | #pragma omp atomic capture |
644 | {int4x[sv] |= bv; iv = int4x[sv];} |
645 | // CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}} |
646 | // CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*) monotonic |
647 | // CHECK: br label %[[CONT:.+]] |
648 | // CHECK: [[CONT]] |
649 | // CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] |
650 | // CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP1:%.+]], |
651 | // CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP:%.+]], |
652 | // CHECK: [[A_LD:%.+]] = load i32, i32* [[TEMP]], |
653 | // CHECK: [[A_SHL:%.+]] = shl i32 [[A_LD]], 1 |
654 | // CHECK: [[A_ASHR:%.+]] = ashr i32 [[A_SHL]], 1 |
655 | // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[A_ASHR]] to x86_fp80 |
656 | // CHECK: [[SUB:%.+]] = fsub x86_fp80 [[X_RVAL]], [[EXPR]] |
657 | // CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[SUB]] to i32 |
658 | // CHECK: [[NEW_VAL:%.+]] = load i32, i32* [[TEMP1]], |
659 | // CHECK: [[BF_VALUE:%.+]] = and i32 [[CONV]], 2147483647 |
660 | // CHECK: [[BF_CLEAR:%.+]] = and i32 [[NEW_VAL]], -2147483648 |
661 | // CHECK: [[BF_SET:%.+]] = or i32 [[BF_CLEAR]], [[BF_VALUE]] |
662 | // CHECK: store i32 [[BF_SET]], i32* [[TEMP1]], |
663 | // CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[TEMP1]], |
664 | // CHECK: [[RES:%.+]] = cmpxchg i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic |
665 | // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0 |
666 | // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1 |
667 | // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] |
668 | // CHECK: [[EXIT]] |
669 | // CHECK: store i32 [[CONV]], i32* @{{.+}}, |
670 | #pragma omp atomic capture |
671 | iv = bfx.a = bfx.a - ldv; |
672 | // CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}} |
673 | // CHECK: [[BITCAST:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8* |
674 | // CHECK: call void @__atomic_load(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* [[BITCAST]], i32 0) |
675 | // CHECK: br label %[[CONT:.+]] |
676 | // CHECK: [[CONT]] |
677 | // CHECK: [[OLD:%.+]] = load i32, i32* [[LDTEMP]], |
678 | // CHECK: store i32 [[OLD]], i32* [[TEMP1:%.+]], |
679 | // CHECK: [[OLD:%.+]] = load i32, i32* [[LDTEMP]], |
680 | // CHECK: store i32 [[OLD]], i32* [[TEMP:%.+]], |
681 | // CHECK: [[A_LD:%.+]] = load i32, i32* [[TEMP]], |
682 | // CHECK: [[A_SHL:%.+]] = shl i32 [[A_LD]], 1 |
683 | // CHECK: [[A_ASHR:%.+]] = ashr i32 [[A_SHL]], 1 |
684 | // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[A_ASHR]] to x86_fp80 |
685 | // CHECK: [[MUL:%.+]] = fmul x86_fp80 [[X_RVAL]], [[EXPR]] |
686 | // CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[MUL]] to i32 |
687 | // CHECK: [[NEW_VAL:%.+]] = load i32, i32* [[TEMP1]], |
688 | // CHECK: [[BF_VALUE:%.+]] = and i32 [[CONV]], 2147483647 |
689 | // CHECK: [[BF_CLEAR:%.+]] = and i32 [[NEW_VAL]], -2147483648 |
690 | // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]] |
691 | // CHECK: store i32 %{{.+}}, i32* [[TEMP1]] |
692 | // CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i32* [[LDTEMP]] to i8* |
693 | // CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i32* [[TEMP1]] to i8* |
694 | // CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* [[BITCAST_TEMP_OLD_BF_ADDR]], i8* [[BITCAST_TEMP_NEW_BF_ADDR]], i32 0, i32 0) |
695 | // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] |
696 | // CHECK: [[EXIT]] |
697 | // CHECK: store i32 [[A_ASHR]], i32* @{{.+}}, |
698 | #pragma omp atomic capture |
699 | {iv = bfx_packed.a; bfx_packed.a *= ldv;} |
700 | // CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}} |
701 | // CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0) monotonic |
702 | // CHECK: br label %[[CONT:.+]] |
703 | // CHECK: [[CONT]] |
704 | // CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] |
705 | // CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP1:%.+]], |
706 | // CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP:%.+]], |
707 | // CHECK: [[A_LD:%.+]] = load i32, i32* [[TEMP]], |
708 | // CHECK: [[A_ASHR:%.+]] = ashr i32 [[A_LD]], 31 |
709 | // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[A_ASHR]] to x86_fp80 |
710 | // CHECK: [[SUB:%.+]] = fsub x86_fp80 [[X_RVAL]], [[EXPR]] |
711 | // CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[SUB]] to i32 |
712 | // CHECK: [[NEW_VAL:%.+]] = load i32, i32* [[TEMP1]], |
713 | // CHECK: [[BF_AND:%.+]] = and i32 [[CONV]], 1 |
714 | // CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 31 |
715 | // CHECK: [[BF_CLEAR:%.+]] = and i32 [[NEW_VAL]], 2147483647 |
716 | // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]] |
717 | // CHECK: store i32 %{{.+}}, i32* [[TEMP1]] |
718 | // CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[TEMP1]] |
719 | // CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic |
720 | // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0 |
721 | // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1 |
722 | // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] |
723 | // CHECK: [[EXIT]] |
724 | // CHECK: store i32 [[CONV]], i32* @{{.+}}, |
725 | #pragma omp atomic capture |
726 | {bfx2.a -= ldv; iv = bfx2.a;} |
727 | // CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}} |
728 | // CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3) monotonic |
729 | // CHECK: br label %[[CONT:.+]] |
730 | // CHECK: [[CONT]] |
731 | // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] |
732 | // CHECK: [[BITCAST_NEW:%.+]] = bitcast i32* %{{.+}} to i8* |
733 | // CHECK: store i8 [[OLD_BF_VALUE]], i8* [[BITCAST_NEW]], |
734 | // CHECK: [[BITCAST:%.+]] = bitcast i32* %{{.+}} to i8* |
735 | // CHECK: store i8 [[OLD_BF_VALUE]], i8* [[BITCAST]], |
736 | // CHECK: [[A_LD:%.+]] = load i8, i8* [[BITCAST]], |
737 | // CHECK: [[A_ASHR:%.+]] = ashr i8 [[A_LD]], 7 |
738 | // CHECK: [[CAST:%.+]] = sext i8 [[A_ASHR]] to i32 |
739 | // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CAST]] to x86_fp80 |
740 | // CHECK: [[DIV:%.+]] = fdiv x86_fp80 [[EXPR]], [[X_RVAL]] |
741 | // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[DIV]] to i32 |
742 | // CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8 |
743 | // CHECK: [[BF_LD:%.+]] = load i8, i8* [[BITCAST_NEW]], |
744 | // CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 1 |
745 | // CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 7 |
746 | // CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 127 |
747 | // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]] |
748 | // CHECK: store i8 %{{.+}}, i8* [[BITCAST_NEW]] |
749 | // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[BITCAST_NEW]] |
750 | // CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic |
751 | // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0 |
752 | // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1 |
753 | // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] |
754 | // CHECK: [[EXIT]] |
755 | // CHECK: store i32 [[NEW_VAL]], i32* @{{.+}}, |
756 | #pragma omp atomic capture |
757 | iv = bfx2_packed.a = ldv / bfx2_packed.a; |
758 | // CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}} |
759 | // CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0) monotonic |
760 | // CHECK: br label %[[CONT:.+]] |
761 | // CHECK: [[CONT]] |
762 | // CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] |
763 | // CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP1:%.+]], |
764 | // CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP:%.+]], |
765 | // CHECK: [[A_LD:%.+]] = load i32, i32* [[TEMP]], |
766 | // CHECK: [[A_SHL:%.+]] = shl i32 [[A_LD]], 7 |
767 | // CHECK: [[A_ASHR:%.+]] = ashr i32 [[A_SHL]], 18 |
768 | // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[A_ASHR]] to x86_fp80 |
769 | // CHECK: [[DIV:%.+]] = fdiv x86_fp80 [[X_RVAL]], [[EXPR]] |
770 | // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[DIV]] to i32 |
771 | // CHECK: [[BF_LD:%.+]] = load i32, i32* [[TEMP1]], |
772 | // CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 16383 |
773 | // CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 11 |
774 | // CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -33552385 |
775 | // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]] |
776 | // CHECK: store i32 %{{.+}}, i32* [[TEMP1]] |
777 | // CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[TEMP1]] |
778 | // CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic |
779 | // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0 |
780 | // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1 |
781 | // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] |
782 | // CHECK: [[EXIT]] |
783 | // CHECK: store i32 [[A_ASHR]], i32* @{{.+}}, |
784 | #pragma omp atomic capture |
785 | {iv = bfx3.a; bfx3.a /= ldv;} |
786 | // CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}} |
787 | // CHECK: [[LDTEMP:%.+]] = bitcast i32* %{{.+}} to i24* |
788 | // CHECK: [[BITCAST:%.+]] = bitcast i24* [[LDTEMP]] to i8* |
789 | // CHECK: call void @__atomic_load(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* [[BITCAST]], i32 0) |
790 | // CHECK: br label %[[CONT:.+]] |
791 | // CHECK: [[CONT]] |
792 | // CHECK: [[OLD:%.+]] = load i24, i24* [[LDTEMP]], |
793 | // CHECK: store i24 [[OLD]], i24* [[BITCAST2:%.+]], |
794 | // CHECK: [[OLD:%.+]] = load i24, i24* [[LDTEMP]], |
795 | // CHECK: store i24 [[OLD]], i24* [[BITCAST1:%.+]], |
796 | // CHECK: [[A_LD:%.+]] = load i24, i24* [[BITCAST1]], |
797 | // CHECK: [[A_SHL:%.+]] = shl i24 [[A_LD]], 7 |
798 | // CHECK: [[A_ASHR:%.+]] = ashr i24 [[A_SHL]], 10 |
799 | // CHECK: [[CAST:%.+]] = sext i24 [[A_ASHR]] to i32 |
800 | // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CAST]] to x86_fp80 |
801 | // CHECK: [[ADD:%.+]] = fadd x86_fp80 [[X_RVAL]], [[EXPR]] |
802 | // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[ADD]] to i32 |
803 | // CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i24 |
804 | // CHECK: [[BF_LD:%.+]] = load i24, i24* [[BITCAST2]], |
805 | // CHECK: [[BF_AND:%.+]] = and i24 [[TRUNC]], 16383 |
806 | // CHECK: [[BF_VALUE:%.+]] = shl i24 [[BF_AND]], 3 |
807 | // CHECK: [[BF_CLEAR:%.+]] = and i24 [[BF_LD]], -131065 |
808 | // CHECK: or i24 [[BF_CLEAR]], [[BF_VALUE]] |
809 | // CHECK: store i24 %{{.+}}, i24* [[BITCAST2]] |
810 | // CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i24* [[LDTEMP]] to i8* |
811 | // CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i24* [[BITCAST2]] to i8* |
812 | // CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* [[BITCAST_TEMP_OLD_BF_ADDR]], i8* [[BITCAST_TEMP_NEW_BF_ADDR]], i32 0, i32 0) |
813 | // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] |
814 | // CHECK: [[EXIT]] |
815 | // CHECK: store i32 [[NEW_VAL]], i32* @{{.+}}, |
816 | #pragma omp atomic capture |
817 | {bfx3_packed.a += ldv; iv = bfx3_packed.a;} |
818 | // CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}} |
819 | // CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic |
820 | // CHECK: br label %[[CONT:.+]] |
821 | // CHECK: [[CONT]] |
822 | // CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] |
823 | // CHECK: store i64 [[OLD_BF_VALUE]], i64* [[TEMP1:%.+]], |
824 | // CHECK: store i64 [[OLD_BF_VALUE]], i64* [[TEMP:%.+]], |
825 | // CHECK: [[A_LD:%.+]] = load i64, i64* [[TEMP]], |
826 | // CHECK: [[A_SHL:%.+]] = shl i64 [[A_LD]], 47 |
827 | // CHECK: [[A_ASHR:%.+]] = ashr i64 [[A_SHL:%.+]], 63 |
828 | // CHECK: [[A_CAST:%.+]] = trunc i64 [[A_ASHR:%.+]] to i32 |
829 | // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CAST:%.+]] to x86_fp80 |
830 | // CHECK: [[MUL:%.+]] = fmul x86_fp80 [[X_RVAL]], [[EXPR]] |
831 | // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[MUL]] to i32 |
832 | // CHECK: [[ZEXT:%.+]] = zext i32 [[NEW_VAL]] to i64 |
833 | // CHECK: [[BF_LD:%.+]] = load i64, i64* [[TEMP1]], |
834 | // CHECK: [[BF_AND:%.+]] = and i64 [[ZEXT]], 1 |
835 | // CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 16 |
836 | // CHECK: [[BF_CLEAR:%.+]] = and i64 [[BF_LD]], -65537 |
837 | // CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]] |
838 | // CHECK: store i64 %{{.+}}, i64* [[TEMP1]] |
839 | // CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[TEMP1]] |
840 | // CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic |
841 | // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0 |
842 | // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1 |
843 | // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] |
844 | // CHECK: [[EXIT]] |
845 | // CHECK: store i32 [[NEW_VAL]], i32* @{{.+}}, |
846 | #pragma omp atomic capture |
847 | iv = bfx4.a = bfx4.a * ldv; |
848 | // CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}} |
849 | // CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic |
850 | // CHECK: br label %[[CONT:.+]] |
851 | // CHECK: [[CONT]] |
852 | // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] |
853 | // CHECK: [[BITCAST1:%.+]] = bitcast i32* %{{.+}} to i8* |
854 | // CHECK: store i8 [[OLD_BF_VALUE]], i8* [[BITCAST1]], |
855 | // CHECK: [[BITCAST:%.+]] = bitcast i32* %{{.+}} to i8* |
856 | // CHECK: store i8 [[OLD_BF_VALUE]], i8* [[BITCAST]], |
857 | // CHECK: [[A_LD:%.+]] = load i8, i8* [[BITCAST]], |
858 | // CHECK: [[A_SHL:%.+]] = shl i8 [[A_LD]], 7 |
859 | // CHECK: [[A_ASHR:%.+]] = ashr i8 [[A_SHL:%.+]], 7 |
860 | // CHECK: [[CAST:%.+]] = sext i8 [[A_ASHR:%.+]] to i32 |
861 | // CHECK: [[CONV:%.+]] = sitofp i32 [[CAST]] to x86_fp80 |
862 | // CHECK: [[SUB: %.+]] = fsub x86_fp80 [[CONV]], [[EXPR]] |
863 | // CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[SUB:%.+]] to i32 |
864 | // CHECK: [[NEW_VAL:%.+]] = trunc i32 [[CONV]] to i8 |
865 | // CHECK: [[BF_LD:%.+]] = load i8, i8* [[BITCAST1]], |
866 | // CHECK: [[BF_VALUE:%.+]] = and i8 [[NEW_VAL]], 1 |
867 | // CHECK: [[BF_CLEAR:%.+]] = and i8 [[BF_LD]], -2 |
868 | // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]] |
869 | // CHECK: store i8 %{{.+}}, i8* [[BITCAST1]] |
870 | // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[BITCAST1]] |
871 | // CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic |
872 | // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0 |
873 | // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1 |
874 | // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] |
875 | // CHECK: [[EXIT]] |
876 | // CHECK: store i32 [[CAST]], i32* @{{.+}}, |
877 | #pragma omp atomic capture |
878 | {iv = bfx4_packed.a; bfx4_packed.a -= ldv;} |
879 | // CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}} |
880 | // CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic |
881 | // CHECK: br label %[[CONT:.+]] |
882 | // CHECK: [[CONT]] |
883 | // CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] |
884 | // CHECK: store i64 [[OLD_BF_VALUE]], i64* [[TEMP1:%.+]], |
885 | // CHECK: store i64 [[OLD_BF_VALUE]], i64* [[TEMP:%.+]], |
886 | // CHECK: [[A_LD:%.+]] = load i64, i64* [[TEMP]], |
887 | // CHECK: [[A_SHL:%.+]] = shl i64 [[A_LD]], 40 |
888 | // CHECK: [[A_ASHR:%.+]] = ashr i64 [[A_SHL:%.+]], 57 |
889 | // CHECK: [[CONV:%.+]] = sitofp i64 [[A_ASHR]] to x86_fp80 |
890 | // CHECK: [[DIV:%.+]] = fdiv x86_fp80 [[CONV]], [[EXPR]] |
891 | // CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[DIV]] to i64 |
892 | // CHECK: [[BF_LD:%.+]] = load i64, i64* [[TEMP1]], |
893 | // CHECK: [[BF_AND:%.+]] = and i64 [[CONV]], 127 |
894 | // CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND:%.+]], 17 |
895 | // CHECK: [[BF_CLEAR:%.+]] = and i64 [[BF_LD]], -16646145 |
896 | // CHECK: [[VAL:%.+]] = or i64 [[BF_CLEAR]], [[BF_VALUE]] |
897 | // CHECK: store i64 [[VAL]], i64* [[TEMP1]] |
898 | // CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[TEMP1]] |
899 | // CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic |
900 | // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0 |
901 | // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1 |
902 | // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] |
903 | // CHECK: [[EXIT]] |
904 | // CHECK: [[NEW_VAL:%.+]] = trunc i64 [[CONV]] to i32 |
905 | // CHECK: store i32 [[NEW_VAL]], i32* @{{.+}}, |
906 | #pragma omp atomic capture |
907 | {bfx4.b /= ldv; iv = bfx4.b;} |
908 | // CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}} |
909 | // CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic |
910 | // CHECK: br label %[[CONT:.+]] |
911 | // CHECK: [[CONT]] |
912 | // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] |
913 | // CHECK: [[BITCAST1:%.+]] = bitcast i64* %{{.+}} to i8* |
914 | // CHECK: store i8 [[OLD_BF_VALUE]], i8* [[BITCAST1]], |
915 | // CHECK: [[BITCAST:%.+]] = bitcast i64* %{{.+}} to i8* |
916 | // CHECK: store i8 [[OLD_BF_VALUE]], i8* [[BITCAST]], |
917 | // CHECK: [[A_LD:%.+]] = load i8, i8* [[BITCAST]], |
918 | // CHECK: [[A_ASHR:%.+]] = ashr i8 [[A_LD]], 1 |
919 | // CHECK: [[CAST:%.+]] = sext i8 [[A_ASHR]] to i64 |
920 | // CHECK: [[CONV:%.+]] = sitofp i64 [[CAST]] to x86_fp80 |
921 | // CHECK: [[ADD:%.+]] = fadd x86_fp80 [[CONV]], [[EXPR]] |
922 | // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[ADD]] to i64 |
923 | // CHECK: [[TRUNC:%.+]] = trunc i64 [[NEW_VAL]] to i8 |
924 | // CHECK: [[BF_LD:%.+]] = load i8, i8* [[BITCAST1]], |
925 | // CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 127 |
926 | // CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 1 |
927 | // CHECK: [[BF_CLEAR:%.+]] = and i8 [[BF_LD]], 1 |
928 | // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]] |
929 | // CHECK: store i8 %{{.+}}, i8* [[BITCAST1]] |
930 | // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[BITCAST1]] |
931 | // CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic |
932 | // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0 |
933 | // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1 |
934 | // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] |
935 | // CHECK: [[EXIT]] |
936 | // CHECK: [[NEW_VAL_I32:%.+]] = trunc i64 [[NEW_VAL]] to i32 |
937 | // CHECK: store i32 [[NEW_VAL_I32]], i32* @{{.+}}, |
938 | #pragma omp atomic capture |
939 | iv = bfx4_packed.b += ldv; |
940 | // CHECK: load i64, i64* |
941 | // CHECK: [[EXPR:%.+]] = uitofp i64 %{{.+}} to float |
942 | // CHECK: [[I64VAL:%.+]] = load atomic i64, i64* bitcast (<2 x float>* [[DEST:@.+]] to i64*) monotonic |
943 | // CHECK: br label %[[CONT:.+]] |
944 | // CHECK: [[CONT]] |
945 | // CHECK: [[OLD_I64:%.+]] = phi i64 [ [[I64VAL]], %{{.+}} ], [ [[FAILED_I64_OLD_VAL:%.+]], %[[CONT]] ] |
946 | // CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP1:%.+]] to i64* |
947 | // CHECK: store i64 [[OLD_I64]], i64* [[BITCAST]], |
948 | // CHECK: [[OLD_VEC_VAL:%.+]] = bitcast i64 [[OLD_I64]] to <2 x float> |
949 | // CHECK: store <2 x float> [[OLD_VEC_VAL]], <2 x float>* [[LDTEMP:%.+]], |
950 | // CHECK: [[VEC_VAL:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]] |
951 | // CHECK: [[X:%.+]] = extractelement <2 x float> [[VEC_VAL]], i64 0 |
952 | // CHECK: [[VEC_ITEM_VAL:%.+]] = fsub float [[EXPR]], [[X]] |
953 | // CHECK: [[VEC_VAL:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP1]], |
954 | // CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <2 x float> [[VEC_VAL]], float [[VEC_ITEM_VAL]], i64 0 |
955 | // CHECK: store <2 x float> [[NEW_VEC_VAL]], <2 x float>* [[LDTEMP1]] |
956 | // CHECK: [[NEW_I64:%.+]] = load i64, i64* [[BITCAST]] |
957 | // CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (<2 x float>* [[DEST]] to i64*), i64 [[OLD_I64]], i64 [[NEW_I64]] monotonic monotonic |
958 | // CHECK: [[FAILED_I64_OLD_VAL:%.+]] = extractvalue { i64, i1 } [[RES]], 0 |
959 | // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1 |
960 | // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] |
961 | // CHECK: [[EXIT]] |
962 | // CHECK: store float [[X]], float* @{{.+}}, |
963 | #pragma omp atomic capture |
964 | {fv = float2x.x; float2x.x = ulv - float2x.x;} |
965 | // CHECK: [[EXPR:%.+]] = load double, double* @{{.+}}, |
966 | // CHECK: [[OLD_VAL:%.+]] = call i32 @llvm.read_register.i32([[REG:metadata ![0-9]+]]) |
967 | // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[OLD_VAL]] to double |
968 | // CHECK: [[DIV:%.+]] = fdiv double [[EXPR]], [[X_RVAL]] |
969 | // CHECK: [[NEW_VAL:%.+]] = fptosi double [[DIV]] to i32 |
970 | // CHECK: call void @llvm.write_register.i32([[REG]], i32 [[NEW_VAL]]) |
971 | // CHECK: store i32 [[NEW_VAL]], i32* @{{.+}}, |
972 | // CHECK: call{{.*}} @__kmpc_flush( |
973 | #pragma omp atomic capture seq_cst |
974 | {rix = dv / rix; iv = rix;} |
975 | // CHECK: [[OLD_VAL:%.+]] = atomicrmw xchg i32* @{{.+}}, i32 5 monotonic |
976 | // CHECK: call void @llvm.write_register.i32([[REG]], i32 [[OLD_VAL]]) |
977 | #pragma omp atomic capture |
978 | {rix = ix; ix = 5;} |
979 | return 0; |
980 | } |
981 | #endif |
982 | |