| 1 | // RUN: %clang_cc1 %s -emit-llvm -o - -ffreestanding -ffake-address-space-map -triple=i686-apple-darwin9 | FileCheck %s |
| 2 | // REQUIRES: x86-registered-target |
| 3 | |
| 4 | // Also test serialization of atomic operations here, to avoid duplicating the |
| 5 | // test. |
| 6 | // RUN: %clang_cc1 %s -emit-pch -o %t -ffreestanding -ffake-address-space-map -triple=i686-apple-darwin9 |
| 7 | // RUN: %clang_cc1 %s -include-pch %t -ffreestanding -ffake-address-space-map -triple=i686-apple-darwin9 -emit-llvm -o - | FileCheck %s |
| 8 | #ifndef ALREADY_INCLUDED |
| 9 | #define ALREADY_INCLUDED |
| 10 | |
| 11 | #include <stdatomic.h> |
| 12 | |
| 13 | // Basic IRGen tests for __c11_atomic_* and GNU __atomic_* |
| 14 | |
| 15 | int fi1(_Atomic(int) *i) { |
| 16 | // CHECK-LABEL: @fi1 |
| 17 | // CHECK: load atomic i32, i32* {{.*}} seq_cst |
| 18 | return __c11_atomic_load(i, memory_order_seq_cst); |
| 19 | } |
| 20 | |
| 21 | int fi1a(int *i) { |
| 22 | // CHECK-LABEL: @fi1a |
| 23 | // CHECK: load atomic i32, i32* {{.*}} seq_cst |
| 24 | int v; |
| 25 | __atomic_load(i, &v, memory_order_seq_cst); |
| 26 | return v; |
| 27 | } |
| 28 | |
| 29 | int fi1b(int *i) { |
| 30 | // CHECK-LABEL: @fi1b |
| 31 | // CHECK: load atomic i32, i32* {{.*}} seq_cst |
| 32 | return __atomic_load_n(i, memory_order_seq_cst); |
| 33 | } |
| 34 | |
| 35 | int fi1c(atomic_int *i) { |
| 36 | // CHECK-LABEL: @fi1c |
| 37 | // CHECK: load atomic i32, i32* {{.*}} seq_cst |
| 38 | return atomic_load(i); |
| 39 | } |
| 40 | |
| 41 | void fi2(_Atomic(int) *i) { |
| 42 | // CHECK-LABEL: @fi2 |
| 43 | // CHECK: store atomic i32 {{.*}} seq_cst |
| 44 | __c11_atomic_store(i, 1, memory_order_seq_cst); |
| 45 | } |
| 46 | |
| 47 | void fi2a(int *i) { |
| 48 | // CHECK-LABEL: @fi2a |
| 49 | // CHECK: store atomic i32 {{.*}} seq_cst |
| 50 | int v = 1; |
| 51 | __atomic_store(i, &v, memory_order_seq_cst); |
| 52 | } |
| 53 | |
| 54 | void fi2b(int *i) { |
| 55 | // CHECK-LABEL: @fi2b |
| 56 | // CHECK: store atomic i32 {{.*}} seq_cst |
| 57 | __atomic_store_n(i, 1, memory_order_seq_cst); |
| 58 | } |
| 59 | |
| 60 | void fi2c(atomic_int *i) { |
| 61 | // CHECK-LABEL: @fi2c |
| 62 | // CHECK: store atomic i32 {{.*}} seq_cst |
| 63 | atomic_store(i, 1); |
| 64 | } |
| 65 | |
| 66 | int fi3(_Atomic(int) *i) { |
| 67 | // CHECK-LABEL: @fi3 |
| 68 | // CHECK: atomicrmw and |
| 69 | // CHECK-NOT: and |
| 70 | return __c11_atomic_fetch_and(i, 1, memory_order_seq_cst); |
| 71 | } |
| 72 | |
| 73 | int fi3a(int *i) { |
| 74 | // CHECK-LABEL: @fi3a |
| 75 | // CHECK: atomicrmw xor |
| 76 | // CHECK-NOT: xor |
| 77 | return __atomic_fetch_xor(i, 1, memory_order_seq_cst); |
| 78 | } |
| 79 | |
| 80 | int fi3b(int *i) { |
| 81 | // CHECK-LABEL: @fi3b |
| 82 | // CHECK: atomicrmw add |
| 83 | // CHECK: add |
| 84 | return __atomic_add_fetch(i, 1, memory_order_seq_cst); |
| 85 | } |
| 86 | |
| 87 | int fi3c(int *i) { |
| 88 | // CHECK-LABEL: @fi3c |
| 89 | // CHECK: atomicrmw nand |
| 90 | // CHECK-NOT: and |
| 91 | return __atomic_fetch_nand(i, 1, memory_order_seq_cst); |
| 92 | } |
| 93 | |
| 94 | int fi3d(int *i) { |
| 95 | // CHECK-LABEL: @fi3d |
| 96 | // CHECK: atomicrmw nand |
| 97 | // CHECK: and |
| 98 | // CHECK: xor |
| 99 | return __atomic_nand_fetch(i, 1, memory_order_seq_cst); |
| 100 | } |
| 101 | |
| 102 | int fi3e(atomic_int *i) { |
| 103 | // CHECK-LABEL: @fi3e |
| 104 | // CHECK: atomicrmw or |
| 105 | // CHECK-NOT: {{ or }} |
| 106 | return atomic_fetch_or(i, 1); |
| 107 | } |
| 108 | |
| 109 | int fi3f(int *i) { |
| 110 | // CHECK-LABEL: @fi3f |
| 111 | // CHECK-NOT: store volatile |
| 112 | // CHECK: atomicrmw or |
| 113 | // CHECK-NOT: {{ or }} |
| 114 | return __atomic_fetch_or(i, (short)1, memory_order_seq_cst); |
| 115 | } |
| 116 | |
| 117 | _Bool fi4(_Atomic(int) *i) { |
| 118 | // CHECK-LABEL: @fi4( |
| 119 | // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]] |
| 120 | // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0 |
| 121 | // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1 |
| 122 | // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]] |
| 123 | // CHECK: store i32 [[OLD]] |
| 124 | int cmp = 0; |
| 125 | return __c11_atomic_compare_exchange_strong(i, &cmp, 1, memory_order_acquire, memory_order_acquire); |
| 126 | } |
| 127 | |
| 128 | _Bool fi4a(int *i) { |
| 129 | // CHECK-LABEL: @fi4a |
| 130 | // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]] |
| 131 | // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0 |
| 132 | // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1 |
| 133 | // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]] |
| 134 | // CHECK: store i32 [[OLD]] |
| 135 | int cmp = 0; |
| 136 | int desired = 1; |
| 137 | return __atomic_compare_exchange(i, &cmp, &desired, 0, memory_order_acquire, memory_order_acquire); |
| 138 | } |
| 139 | |
| 140 | _Bool fi4b(int *i) { |
| 141 | // CHECK-LABEL: @fi4b( |
| 142 | // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg weak i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]] |
| 143 | // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0 |
| 144 | // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1 |
| 145 | // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]] |
| 146 | // CHECK: store i32 [[OLD]] |
| 147 | int cmp = 0; |
| 148 | return __atomic_compare_exchange_n(i, &cmp, 1, 1, memory_order_acquire, memory_order_acquire); |
| 149 | } |
| 150 | |
| 151 | _Bool fi4c(atomic_int *i) { |
| 152 | // CHECK-LABEL: @fi4c |
| 153 | // CHECK: cmpxchg i32* |
| 154 | int cmp = 0; |
| 155 | return atomic_compare_exchange_strong(i, &cmp, 1); |
| 156 | } |
| 157 | |
| 158 | #define _AS1 __attribute__((address_space(1))) |
| 159 | _Bool fi4d(_Atomic(int) *i, int _AS1 *ptr2) { |
| 160 | // CHECK-LABEL: @fi4d( |
| 161 | // CHECK: [[EXPECTED:%[.0-9A-Z_a-z]+]] = load i32, i32 addrspace(1)* %{{[0-9]+}} |
| 162 | // CHECK: cmpxchg i32* %{{[0-9]+}}, i32 [[EXPECTED]], i32 %{{[0-9]+}} acquire acquire |
| 163 | return __c11_atomic_compare_exchange_strong(i, ptr2, 1, memory_order_acquire, memory_order_acquire); |
| 164 | } |
| 165 | |
| 166 | float ff1(_Atomic(float) *d) { |
| 167 | // CHECK-LABEL: @ff1 |
| 168 | // CHECK: load atomic i32, i32* {{.*}} monotonic |
| 169 | return __c11_atomic_load(d, memory_order_relaxed); |
| 170 | } |
| 171 | |
| 172 | void ff2(_Atomic(float) *d) { |
| 173 | // CHECK-LABEL: @ff2 |
| 174 | // CHECK: store atomic i32 {{.*}} release |
| 175 | __c11_atomic_store(d, 1, memory_order_release); |
| 176 | } |
| 177 | |
| 178 | float ff3(_Atomic(float) *d) { |
| 179 | return __c11_atomic_exchange(d, 2, memory_order_seq_cst); |
| 180 | } |
| 181 | |
| 182 | struct S { |
| 183 | double x; |
| 184 | }; |
| 185 | |
| 186 | void implicit_store(_Atomic(struct S) *a, struct S s) { |
| 187 | // CHECK-LABEL: @implicit_store( |
| 188 | // CHECK: store atomic i64 %{{.*}}, i64* %{{.*}} seq_cst, align 8 |
| 189 | *a = s; |
| 190 | } |
| 191 | |
| 192 | struct S implicit_load(_Atomic(struct S) *a) { |
| 193 | // CHECK-LABEL: @implicit_load( |
| 194 | // CHECK: load atomic i64, i64* %{{.*}} seq_cst, align 8 |
| 195 | return *a; |
| 196 | } |
| 197 | |
| 198 | struct S fd1(struct S *a) { |
| 199 | // CHECK-LABEL: @fd1 |
| 200 | // CHECK: [[RETVAL:%.*]] = alloca %struct.S, align 4 |
| 201 | // CHECK: [[A:%.*]] = bitcast %struct.S* {{.*}} to i64* |
| 202 | // CHECK: [[CAST:%.*]] = bitcast %struct.S* [[RETVAL]] to i64* |
| 203 | // CHECK: [[SRC:%.*]] = bitcast i64* [[A]] to i8* |
| 204 | // CHECK: [[DEST:%.*]] = bitcast i64* [[CAST]] to i8* |
| 205 | // CHECK: call void @__atomic_load(i32 8, i8* [[SRC]], i8* [[DEST]], i32 5) |
| 206 | // CHECK: ret |
| 207 | struct S ret; |
| 208 | __atomic_load(a, &ret, memory_order_seq_cst); |
| 209 | return ret; |
| 210 | } |
| 211 | |
| 212 | void fd2(struct S *a, struct S *b) { |
| 213 | // CHECK-LABEL: @fd2 |
| 214 | // CHECK: [[A_ADDR:%.*]] = alloca %struct.S*, align 4 |
| 215 | // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4 |
| 216 | // CHECK-NEXT: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4 |
| 217 | // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4 |
| 218 | // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4 |
| 219 | // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4 |
| 220 | // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64* |
| 221 | // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64* |
| 222 | // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8* |
| 223 | // CHECK-NEXT: [[CAST_B:%.*]] = bitcast i64* [[COERCED_B]] to i8* |
| 224 | // CHECK-NEXT: call void @__atomic_store(i32 8, i8* [[COERCED_A]], i8* [[CAST_B]], |
| 225 | // CHECK-NEXT: ret void |
| 226 | __atomic_store(a, b, memory_order_seq_cst); |
| 227 | } |
| 228 | |
| 229 | void fd3(struct S *a, struct S *b, struct S *c) { |
| 230 | // CHECK-LABEL: @fd3 |
| 231 | // CHECK: [[A_ADDR:%.*]] = alloca %struct.S*, align 4 |
| 232 | // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4 |
| 233 | // CHECK-NEXT: [[C_ADDR:%.*]] = alloca %struct.S*, align 4 |
| 234 | // CHECK-NEXT: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4 |
| 235 | // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4 |
| 236 | // CHECK-NEXT: store %struct.S* %c, %struct.S** [[C_ADDR]], align 4 |
| 237 | // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4 |
| 238 | // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4 |
| 239 | // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4 |
| 240 | // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64* |
| 241 | // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64* |
| 242 | // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64* |
| 243 | // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8* |
| 244 | // CHECK-NEXT: [[CAST_B:%.*]] = bitcast i64* [[COERCED_B]] to i8* |
| 245 | // CHECK-NEXT: [[CAST_C:%.*]] = bitcast i64* [[COERCED_C]] to i8* |
| 246 | // CHECK-NEXT: call void @__atomic_exchange(i32 8, i8* [[COERCED_A]], i8* [[CAST_B]], i8* [[CAST_C]], |
| 247 | |
| 248 | __atomic_exchange(a, b, c, memory_order_seq_cst); |
| 249 | } |
| 250 | |
| 251 | _Bool fd4(struct S *a, struct S *b, struct S *c) { |
| 252 | // CHECK-LABEL: @fd4 |
| 253 | // CHECK: [[A_ADDR:%.*]] = alloca %struct.S*, align 4 |
| 254 | // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4 |
| 255 | // CHECK-NEXT: [[C_ADDR:%.*]] = alloca %struct.S*, align 4 |
| 256 | // CHECK: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4 |
| 257 | // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4 |
| 258 | // CHECK-NEXT: store %struct.S* %c, %struct.S** [[C_ADDR]], align 4 |
| 259 | // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4 |
| 260 | // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4 |
| 261 | // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4 |
| 262 | // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64* |
| 263 | // CHECK-NEXT: [[COERCED_B_TMP:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64* |
| 264 | // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64* |
| 265 | // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8* |
| 266 | // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast i64* [[COERCED_B_TMP]] to i8* |
| 267 | // CHECK-NEXT: [[CAST_C:%.*]] = bitcast i64* [[COERCED_C]] to i8* |
| 268 | // CHECK-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i32 8, i8* [[COERCED_A]], i8* [[COERCED_B]], i8* [[CAST_C]], |
| 269 | // CHECK-NEXT: ret i1 [[CALL]] |
| 270 | return __atomic_compare_exchange(a, b, c, 1, 5, 5); |
| 271 | } |
| 272 | |
| 273 | int* fp1(_Atomic(int*) *p) { |
| 274 | // CHECK-LABEL: @fp1 |
| 275 | // CHECK: load atomic i32, i32* {{.*}} seq_cst |
| 276 | return __c11_atomic_load(p, memory_order_seq_cst); |
| 277 | } |
| 278 | |
| 279 | int* fp2(_Atomic(int*) *p) { |
| 280 | // CHECK-LABEL: @fp2 |
| 281 | // CHECK: store i32 4 |
| 282 | // CHECK: atomicrmw add {{.*}} monotonic |
| 283 | return __c11_atomic_fetch_add(p, 1, memory_order_relaxed); |
| 284 | } |
| 285 | |
| 286 | int *fp2a(int **p) { |
| 287 | // CHECK-LABEL: @fp2a |
| 288 | // CHECK: store i32 4 |
| 289 | // CHECK: atomicrmw sub {{.*}} monotonic |
| 290 | // Note, the GNU builtins do not multiply by sizeof(T)! |
| 291 | return __atomic_fetch_sub(p, 4, memory_order_relaxed); |
| 292 | } |
| 293 | |
| 294 | _Complex float fc(_Atomic(_Complex float) *c) { |
| 295 | // CHECK-LABEL: @fc |
| 296 | // CHECK: atomicrmw xchg i64* |
| 297 | return __c11_atomic_exchange(c, 2, memory_order_seq_cst); |
| 298 | } |
| 299 | |
| 300 | typedef struct X { int x; } X; |
| 301 | X fs(_Atomic(X) *c) { |
| 302 | // CHECK-LABEL: @fs |
| 303 | // CHECK: atomicrmw xchg i32* |
| 304 | return __c11_atomic_exchange(c, (X){2}, memory_order_seq_cst); |
| 305 | } |
| 306 | |
| 307 | X fsa(X *c, X *d) { |
| 308 | // CHECK-LABEL: @fsa |
| 309 | // CHECK: atomicrmw xchg i32* |
| 310 | X ret; |
| 311 | __atomic_exchange(c, d, &ret, memory_order_seq_cst); |
| 312 | return ret; |
| 313 | } |
| 314 | |
| 315 | _Bool fsb(_Bool *c) { |
| 316 | // CHECK-LABEL: @fsb |
| 317 | // CHECK: atomicrmw xchg i8* |
| 318 | return __atomic_exchange_n(c, 1, memory_order_seq_cst); |
| 319 | } |
| 320 | |
| 321 | char flag1; |
| 322 | volatile char flag2; |
| 323 | void test_and_set() { |
| 324 | // CHECK: atomicrmw xchg i8* @flag1, i8 1 seq_cst |
| 325 | __atomic_test_and_set(&flag1, memory_order_seq_cst); |
| 326 | // CHECK: atomicrmw volatile xchg i8* @flag2, i8 1 acquire |
| 327 | __atomic_test_and_set(&flag2, memory_order_acquire); |
| 328 | // CHECK: store atomic volatile i8 0, i8* @flag2 release |
| 329 | __atomic_clear(&flag2, memory_order_release); |
| 330 | // CHECK: store atomic i8 0, i8* @flag1 seq_cst |
| 331 | __atomic_clear(&flag1, memory_order_seq_cst); |
| 332 | } |
| 333 | |
| 334 | struct Sixteen { |
| 335 | char c[16]; |
| 336 | } sixteen; |
| 337 | struct Seventeen { |
| 338 | char c[17]; |
| 339 | } seventeen; |
| 340 | |
| 341 | struct Incomplete; |
| 342 | |
| 343 | int lock_free(struct Incomplete *incomplete) { |
| 344 | // CHECK-LABEL: @lock_free |
| 345 | |
| 346 | // CHECK: call i32 @__atomic_is_lock_free(i32 3, i8* null) |
| 347 | __c11_atomic_is_lock_free(3); |
| 348 | |
| 349 | // CHECK: call i32 @__atomic_is_lock_free(i32 16, i8* {{.*}}@sixteen{{.*}}) |
| 350 | __atomic_is_lock_free(16, &sixteen); |
| 351 | |
| 352 | // CHECK: call i32 @__atomic_is_lock_free(i32 17, i8* {{.*}}@seventeen{{.*}}) |
| 353 | __atomic_is_lock_free(17, &seventeen); |
| 354 | |
| 355 | // CHECK: call i32 @__atomic_is_lock_free(i32 4, {{.*}}) |
| 356 | __atomic_is_lock_free(4, incomplete); |
| 357 | |
| 358 | char cs[20]; |
| 359 | // CHECK: call i32 @__atomic_is_lock_free(i32 4, {{.*}}) |
| 360 | __atomic_is_lock_free(4, cs+1); |
| 361 | |
| 362 | // CHECK-NOT: call |
| 363 | __atomic_always_lock_free(3, 0); |
| 364 | __atomic_always_lock_free(16, 0); |
| 365 | __atomic_always_lock_free(17, 0); |
| 366 | __atomic_always_lock_free(16, &sixteen); |
| 367 | __atomic_always_lock_free(17, &seventeen); |
| 368 | |
| 369 | int n; |
| 370 | __atomic_is_lock_free(4, &n); |
| 371 | |
| 372 | // CHECK: ret i32 1 |
| 373 | return __c11_atomic_is_lock_free(sizeof(_Atomic(int))); |
| 374 | } |
| 375 | |
| 376 | // Tests for atomic operations on big values. These should call the functions |
| 377 | // defined here: |
| 378 | // http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#The_Library_interface |
| 379 | |
| 380 | struct foo { |
| 381 | int big[128]; |
| 382 | }; |
| 383 | struct bar { |
| 384 | char c[3]; |
| 385 | }; |
| 386 | |
| 387 | struct bar smallThing, thing1, thing2; |
| 388 | struct foo bigThing; |
| 389 | _Atomic(struct foo) bigAtomic; |
| 390 | |
| 391 | void structAtomicStore() { |
| 392 | // CHECK-LABEL: @structAtomicStore |
| 393 | struct foo f = {0}; |
| 394 | struct bar b = {0}; |
| 395 | __atomic_store(&smallThing, &b, 5); |
| 396 | // CHECK: call void @__atomic_store(i32 3, i8* {{.*}} @smallThing |
| 397 | |
| 398 | __atomic_store(&bigThing, &f, 5); |
| 399 | // CHECK: call void @__atomic_store(i32 512, i8* {{.*}} @bigThing |
| 400 | } |
| 401 | void structAtomicLoad() { |
| 402 | // CHECK-LABEL: @structAtomicLoad |
| 403 | struct bar b; |
| 404 | __atomic_load(&smallThing, &b, 5); |
| 405 | // CHECK: call void @__atomic_load(i32 3, i8* {{.*}} @smallThing |
| 406 | |
| 407 | struct foo f = {0}; |
| 408 | __atomic_load(&bigThing, &f, 5); |
| 409 | // CHECK: call void @__atomic_load(i32 512, i8* {{.*}} @bigThing |
| 410 | } |
| 411 | struct foo structAtomicExchange() { |
| 412 | // CHECK-LABEL: @structAtomicExchange |
| 413 | struct foo f = {0}; |
| 414 | struct foo old; |
| 415 | __atomic_exchange(&f, &bigThing, &old, 5); |
| 416 | // CHECK: call void @__atomic_exchange(i32 512, {{.*}}, i8* bitcast ({{.*}} @bigThing to i8*), |
| 417 | |
| 418 | return __c11_atomic_exchange(&bigAtomic, f, 5); |
| 419 | // CHECK: call void @__atomic_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*), |
| 420 | } |
| 421 | int structAtomicCmpExchange() { |
| 422 | // CHECK-LABEL: @structAtomicCmpExchange |
| 423 | // CHECK: %[[x_mem:.*]] = alloca i8 |
| 424 | _Bool x = __atomic_compare_exchange(&smallThing, &thing1, &thing2, 1, 5, 5); |
| 425 | // CHECK: %[[call1:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2 |
| 426 | // CHECK: %[[zext1:.*]] = zext i1 %[[call1]] to i8 |
| 427 | // CHECK: store i8 %[[zext1]], i8* %[[x_mem]], align 1 |
| 428 | // CHECK: %[[x:.*]] = load i8, i8* %[[x_mem]] |
| 429 | // CHECK: %[[x_bool:.*]] = trunc i8 %[[x]] to i1 |
| 430 | // CHECK: %[[conv1:.*]] = zext i1 %[[x_bool]] to i32 |
| 431 | |
| 432 | struct foo f = {0}; |
| 433 | struct foo g = {0}; |
| 434 | g.big[12] = 12; |
| 435 | return x & __c11_atomic_compare_exchange_strong(&bigAtomic, &f, g, 5, 5); |
| 436 | // CHECK: %[[call2:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*), |
| 437 | // CHECK: %[[conv2:.*]] = zext i1 %[[call2]] to i32 |
| 438 | // CHECK: %[[and:.*]] = and i32 %[[conv1]], %[[conv2]] |
| 439 | // CHECK: ret i32 %[[and]] |
| 440 | } |
| 441 | |
| 442 | // Check that no atomic operations are used in any initialisation of _Atomic |
| 443 | // types. |
| 444 | _Atomic(int) atomic_init_i = 42; |
| 445 | |
| 446 | // CHECK-LABEL: @atomic_init_foo |
| 447 | void atomic_init_foo() |
| 448 | { |
| 449 | // CHECK-NOT: } |
| 450 | // CHECK-NOT: atomic |
| 451 | // CHECK: store |
| 452 | _Atomic(int) j = 12; |
| 453 | |
| 454 | // CHECK-NOT: } |
| 455 | // CHECK-NOT: atomic |
| 456 | // CHECK: store |
| 457 | __c11_atomic_init(&j, 42); |
| 458 | |
| 459 | // CHECK-NOT: atomic |
| 460 | // CHECK: } |
| 461 | } |
| 462 | |
| 463 | // CHECK-LABEL: @failureOrder |
| 464 | void failureOrder(_Atomic(int) *ptr, int *ptr2) { |
| 465 | __c11_atomic_compare_exchange_strong(ptr, ptr2, 43, memory_order_acquire, memory_order_relaxed); |
| 466 | // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acquire monotonic |
| 467 | |
| 468 | __c11_atomic_compare_exchange_weak(ptr, ptr2, 43, memory_order_seq_cst, memory_order_acquire); |
| 469 | // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst acquire |
| 470 | |
| 471 | // Unknown ordering: conservatively pick strongest valid option (for now!). |
| 472 | __atomic_compare_exchange(ptr2, ptr2, ptr2, 0, memory_order_acq_rel, *ptr2); |
| 473 | // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acq_rel acquire |
| 474 | |
| 475 | // Undefined behaviour: don't really care what that last ordering is so leave |
| 476 | // it out: |
| 477 | __atomic_compare_exchange_n(ptr2, ptr2, 43, 1, memory_order_seq_cst, 42); |
| 478 | // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst |
| 479 | } |
| 480 | |
| 481 | // CHECK-LABEL: @generalFailureOrder |
| 482 | void generalFailureOrder(_Atomic(int) *ptr, int *ptr2, int success, int fail) { |
| 483 | __c11_atomic_compare_exchange_strong(ptr, ptr2, 42, success, fail); |
| 484 | // CHECK: switch i32 {{.*}}, label %[[MONOTONIC:[0-9a-zA-Z._]+]] [ |
| 485 | // CHECK-NEXT: i32 1, label %[[ACQUIRE:[0-9a-zA-Z._]+]] |
| 486 | // CHECK-NEXT: i32 2, label %[[ACQUIRE]] |
| 487 | // CHECK-NEXT: i32 3, label %[[RELEASE:[0-9a-zA-Z._]+]] |
| 488 | // CHECK-NEXT: i32 4, label %[[ACQREL:[0-9a-zA-Z._]+]] |
| 489 | // CHECK-NEXT: i32 5, label %[[SEQCST:[0-9a-zA-Z._]+]] |
| 490 | |
| 491 | // CHECK: [[MONOTONIC]] |
| 492 | // CHECK: switch {{.*}}, label %[[MONOTONIC_MONOTONIC:[0-9a-zA-Z._]+]] [ |
| 493 | // CHECK-NEXT: ] |
| 494 | |
| 495 | // CHECK: [[ACQUIRE]] |
| 496 | // CHECK: switch {{.*}}, label %[[ACQUIRE_MONOTONIC:[0-9a-zA-Z._]+]] [ |
| 497 | // CHECK-NEXT: i32 1, label %[[ACQUIRE_ACQUIRE:[0-9a-zA-Z._]+]] |
| 498 | // CHECK-NEXT: i32 2, label %[[ACQUIRE_ACQUIRE:[0-9a-zA-Z._]+]] |
| 499 | // CHECK-NEXT: ] |
| 500 | |
| 501 | // CHECK: [[RELEASE]] |
| 502 | // CHECK: switch {{.*}}, label %[[RELEASE_MONOTONIC:[0-9a-zA-Z._]+]] [ |
| 503 | // CHECK-NEXT: ] |
| 504 | |
| 505 | // CHECK: [[ACQREL]] |
| 506 | // CHECK: switch {{.*}}, label %[[ACQREL_MONOTONIC:[0-9a-zA-Z._]+]] [ |
| 507 | // CHECK-NEXT: i32 1, label %[[ACQREL_ACQUIRE:[0-9a-zA-Z._]+]] |
| 508 | // CHECK-NEXT: i32 2, label %[[ACQREL_ACQUIRE:[0-9a-zA-Z._]+]] |
| 509 | // CHECK-NEXT: ] |
| 510 | |
| 511 | // CHECK: [[SEQCST]] |
| 512 | // CHECK: switch {{.*}}, label %[[SEQCST_MONOTONIC:[0-9a-zA-Z._]+]] [ |
| 513 | // CHECK-NEXT: i32 1, label %[[SEQCST_ACQUIRE:[0-9a-zA-Z._]+]] |
| 514 | // CHECK-NEXT: i32 2, label %[[SEQCST_ACQUIRE:[0-9a-zA-Z._]+]] |
| 515 | // CHECK-NEXT: i32 5, label %[[SEQCST_SEQCST:[0-9a-zA-Z._]+]] |
| 516 | // CHECK-NEXT: ] |
| 517 | |
| 518 | // CHECK: [[MONOTONIC_MONOTONIC]] |
| 519 | // CHECK: cmpxchg {{.*}} monotonic monotonic |
| 520 | // CHECK: br |
| 521 | |
| 522 | // CHECK: [[ACQUIRE_MONOTONIC]] |
| 523 | // CHECK: cmpxchg {{.*}} acquire monotonic |
| 524 | // CHECK: br |
| 525 | |
| 526 | // CHECK: [[ACQUIRE_ACQUIRE]] |
| 527 | // CHECK: cmpxchg {{.*}} acquire acquire |
| 528 | // CHECK: br |
| 529 | |
| 530 | // CHECK: [[ACQREL_MONOTONIC]] |
| 531 | // CHECK: cmpxchg {{.*}} acq_rel monotonic |
| 532 | // CHECK: br |
| 533 | |
| 534 | // CHECK: [[ACQREL_ACQUIRE]] |
| 535 | // CHECK: cmpxchg {{.*}} acq_rel acquire |
| 536 | // CHECK: br |
| 537 | |
| 538 | // CHECK: [[SEQCST_MONOTONIC]] |
| 539 | // CHECK: cmpxchg {{.*}} seq_cst monotonic |
| 540 | // CHECK: br |
| 541 | |
| 542 | // CHECK: [[SEQCST_ACQUIRE]] |
| 543 | // CHECK: cmpxchg {{.*}} seq_cst acquire |
| 544 | // CHECK: br |
| 545 | |
| 546 | // CHECK: [[SEQCST_SEQCST]] |
| 547 | // CHECK: cmpxchg {{.*}} seq_cst seq_cst |
| 548 | // CHECK: br |
| 549 | } |
| 550 | |
| 551 | void generalWeakness(int *ptr, int *ptr2, _Bool weak) { |
| 552 | __atomic_compare_exchange_n(ptr, ptr2, 42, weak, memory_order_seq_cst, memory_order_seq_cst); |
| 553 | // CHECK: switch i1 {{.*}}, label %[[WEAK:[0-9a-zA-Z._]+]] [ |
| 554 | // CHECK-NEXT: i1 false, label %[[STRONG:[0-9a-zA-Z._]+]] |
| 555 | |
| 556 | // CHECK: [[STRONG]] |
| 557 | // CHECK-NOT: br |
| 558 | // CHECK: cmpxchg {{.*}} seq_cst seq_cst |
| 559 | // CHECK: br |
| 560 | |
| 561 | // CHECK: [[WEAK]] |
| 562 | // CHECK-NOT: br |
| 563 | // CHECK: cmpxchg weak {{.*}} seq_cst seq_cst |
| 564 | // CHECK: br |
| 565 | } |
| 566 | |
| 567 | // Having checked the flow in the previous two cases, we'll trust clang to |
| 568 | // combine them sanely. |
| 569 | void EMIT_ALL_THE_THINGS(int *ptr, int *ptr2, int new, _Bool weak, int success, int fail) { |
| 570 | __atomic_compare_exchange(ptr, ptr2, &new, weak, success, fail); |
| 571 | |
| 572 | // CHECK: = cmpxchg {{.*}} monotonic monotonic |
| 573 | // CHECK: = cmpxchg weak {{.*}} monotonic monotonic |
| 574 | // CHECK: = cmpxchg {{.*}} acquire monotonic |
| 575 | // CHECK: = cmpxchg {{.*}} acquire acquire |
| 576 | // CHECK: = cmpxchg weak {{.*}} acquire monotonic |
| 577 | // CHECK: = cmpxchg weak {{.*}} acquire acquire |
| 578 | // CHECK: = cmpxchg {{.*}} release monotonic |
| 579 | // CHECK: = cmpxchg weak {{.*}} release monotonic |
| 580 | // CHECK: = cmpxchg {{.*}} acq_rel monotonic |
| 581 | // CHECK: = cmpxchg {{.*}} acq_rel acquire |
| 582 | // CHECK: = cmpxchg weak {{.*}} acq_rel monotonic |
| 583 | // CHECK: = cmpxchg weak {{.*}} acq_rel acquire |
| 584 | // CHECK: = cmpxchg {{.*}} seq_cst monotonic |
| 585 | // CHECK: = cmpxchg {{.*}} seq_cst acquire |
| 586 | // CHECK: = cmpxchg {{.*}} seq_cst seq_cst |
| 587 | // CHECK: = cmpxchg weak {{.*}} seq_cst monotonic |
| 588 | // CHECK: = cmpxchg weak {{.*}} seq_cst acquire |
| 589 | // CHECK: = cmpxchg weak {{.*}} seq_cst seq_cst |
| 590 | } |
| 591 | |
| 592 | int PR21643() { |
| 593 | return __atomic_or_fetch((int __attribute__((address_space(257))) *)0x308, 1, |
| 594 | __ATOMIC_RELAXED); |
| 595 | // CHECK: %[[atomictmp:.*]] = alloca i32, align 4 |
| 596 | // CHECK: %[[atomicdst:.*]] = alloca i32, align 4 |
| 597 | // CHECK: store i32 1, i32* %[[atomictmp]] |
| 598 | // CHECK: %[[one:.*]] = load i32, i32* %[[atomictmp]], align 4 |
| 599 | // CHECK: %[[old:.*]] = atomicrmw or i32 addrspace(257)* inttoptr (i32 776 to i32 addrspace(257)*), i32 %[[one]] monotonic |
| 600 | // CHECK: %[[new:.*]] = or i32 %[[old]], %[[one]] |
| 601 | // CHECK: store i32 %[[new]], i32* %[[atomicdst]], align 4 |
| 602 | // CHECK: %[[ret:.*]] = load i32, i32* %[[atomicdst]], align 4 |
| 603 | // CHECK: ret i32 %[[ret]] |
| 604 | } |
| 605 | |
| 606 | int PR17306_1(volatile _Atomic(int) *i) { |
| 607 | // CHECK-LABEL: @PR17306_1 |
| 608 | // CHECK: %[[i_addr:.*]] = alloca i32 |
| 609 | // CHECK-NEXT: %[[atomicdst:.*]] = alloca i32 |
| 610 | // CHECK-NEXT: store i32* %i, i32** %[[i_addr]] |
| 611 | // CHECK-NEXT: %[[addr:.*]] = load i32*, i32** %[[i_addr]] |
| 612 | // CHECK-NEXT: %[[res:.*]] = load atomic volatile i32, i32* %[[addr]] seq_cst |
| 613 | // CHECK-NEXT: store i32 %[[res]], i32* %[[atomicdst]] |
| 614 | // CHECK-NEXT: %[[retval:.*]] = load i32, i32* %[[atomicdst]] |
| 615 | // CHECK-NEXT: ret i32 %[[retval]] |
| 616 | return __c11_atomic_load(i, memory_order_seq_cst); |
| 617 | } |
| 618 | |
| 619 | int PR17306_2(volatile int *i, int value) { |
| 620 | // CHECK-LABEL: @PR17306_2 |
| 621 | // CHECK: %[[i_addr:.*]] = alloca i32* |
| 622 | // CHECK-NEXT: %[[value_addr:.*]] = alloca i32 |
| 623 | // CHECK-NEXT: %[[atomictmp:.*]] = alloca i32 |
| 624 | // CHECK-NEXT: %[[atomicdst:.*]] = alloca i32 |
| 625 | // CHECK-NEXT: store i32* %i, i32** %[[i_addr]] |
| 626 | // CHECK-NEXT: store i32 %value, i32* %[[value_addr]] |
| 627 | // CHECK-NEXT: %[[i_lval:.*]] = load i32*, i32** %[[i_addr]] |
| 628 | // CHECK-NEXT: %[[value:.*]] = load i32, i32* %[[value_addr]] |
| 629 | // CHECK-NEXT: store i32 %[[value]], i32* %[[atomictmp]] |
| 630 | // CHECK-NEXT: %[[value_lval:.*]] = load i32, i32* %[[atomictmp]] |
| 631 | // CHECK-NEXT: %[[old_val:.*]] = atomicrmw volatile add i32* %[[i_lval]], i32 %[[value_lval]] seq_cst |
| 632 | // CHECK-NEXT: %[[new_val:.*]] = add i32 %[[old_val]], %[[value_lval]] |
| 633 | // CHECK-NEXT: store i32 %[[new_val]], i32* %[[atomicdst]] |
| 634 | // CHECK-NEXT: %[[retval:.*]] = load i32, i32* %[[atomicdst]] |
| 635 | // CHECK-NEXT: ret i32 %[[retval]] |
| 636 | return __atomic_add_fetch(i, value, memory_order_seq_cst); |
| 637 | } |
| 638 | |
| 639 | void test_underaligned() { |
| 640 | // CHECK-LABEL: @test_underaligned |
| 641 | struct Underaligned { char c[8]; } underaligned_a, underaligned_b, underaligned_c; |
| 642 | |
| 643 | // CHECK: call void @__atomic_load(i32 8, |
| 644 | __atomic_load(&underaligned_a, &underaligned_b, memory_order_seq_cst); |
| 645 | // CHECK: call void @__atomic_store(i32 8, |
| 646 | __atomic_store(&underaligned_a, &underaligned_b, memory_order_seq_cst); |
| 647 | // CHECK: call void @__atomic_exchange(i32 8, |
| 648 | __atomic_exchange(&underaligned_a, &underaligned_b, &underaligned_c, memory_order_seq_cst); |
| 649 | // CHECK: call {{.*}} @__atomic_compare_exchange(i32 8, |
| 650 | __atomic_compare_exchange(&underaligned_a, &underaligned_b, &underaligned_c, 1, memory_order_seq_cst, memory_order_seq_cst); |
| 651 | |
| 652 | __attribute__((aligned)) struct Underaligned aligned_a, aligned_b, aligned_c; |
| 653 | |
| 654 | // CHECK: load atomic |
| 655 | __atomic_load(&aligned_a, &aligned_b, memory_order_seq_cst); |
| 656 | // CHECK: store atomic |
| 657 | __atomic_store(&aligned_a, &aligned_b, memory_order_seq_cst); |
| 658 | // CHECK: atomicrmw xchg |
| 659 | __atomic_exchange(&aligned_a, &aligned_b, &aligned_c, memory_order_seq_cst); |
| 660 | // CHECK: cmpxchg weak |
| 661 | __atomic_compare_exchange(&aligned_a, &aligned_b, &aligned_c, 1, memory_order_seq_cst, memory_order_seq_cst); |
| 662 | } |
| 663 | |
| 664 | #endif |
| 665 | |