1 | // RUN: %clang_cc1 %s -emit-llvm -o - -triple=armv7-apple-ios -std=c11 | FileCheck %s |
2 | |
3 | // There isn't really anything special about iOS; it just happens to |
4 | // only deploy on processors with native atomics support, so it's a good |
5 | // way to test those code-paths. |
6 | |
7 | // This work was done in pursuit of <rdar://13338582>. |
8 | |
9 | // CHECK-LABEL: define void @testFloat(float* |
10 | void testFloat(_Atomic(float) *fp) { |
11 | // CHECK: [[FP:%.*]] = alloca float* |
12 | // CHECK-NEXT: [[X:%.*]] = alloca float |
13 | // CHECK-NEXT: [[F:%.*]] = alloca float |
14 | // CHECK-NEXT: store float* {{%.*}}, float** [[FP]] |
15 | |
16 | // CHECK-NEXT: [[T0:%.*]] = load float*, float** [[FP]] |
17 | // CHECK-NEXT: store float 1.000000e+00, float* [[T0]], align 4 |
18 | __c11_atomic_init(fp, 1.0f); |
19 | |
20 | // CHECK-NEXT: store float 2.000000e+00, float* [[X]], align 4 |
21 | _Atomic(float) x = 2.0f; |
22 | |
23 | // CHECK-NEXT: [[T0:%.*]] = load float*, float** [[FP]] |
24 | // CHECK-NEXT: [[T1:%.*]] = bitcast float* [[T0]] to i32* |
25 | // CHECK-NEXT: [[T2:%.*]] = load atomic i32, i32* [[T1]] seq_cst, align 4 |
26 | // CHECK-NEXT: [[T3:%.*]] = bitcast i32 [[T2]] to float |
27 | // CHECK-NEXT: store float [[T3]], float* [[F]] |
28 | float f = *fp; |
29 | |
30 | // CHECK-NEXT: [[T0:%.*]] = load float, float* [[F]], align 4 |
31 | // CHECK-NEXT: [[T1:%.*]] = load float*, float** [[FP]], align 4 |
32 | // CHECK-NEXT: [[T2:%.*]] = bitcast float [[T0]] to i32 |
33 | // CHECK-NEXT: [[T3:%.*]] = bitcast float* [[T1]] to i32* |
34 | // CHECK-NEXT: store atomic i32 [[T2]], i32* [[T3]] seq_cst, align 4 |
35 | *fp = f; |
36 | |
37 | // CHECK-NEXT: ret void |
38 | } |
39 | |
40 | // CHECK: define void @testComplexFloat([[CF:{ float, float }]]* |
41 | void testComplexFloat(_Atomic(_Complex float) *fp) { |
42 | // CHECK: [[FP:%.*]] = alloca [[CF]]*, align 4 |
43 | // CHECK-NEXT: [[X:%.*]] = alloca [[CF]], align 8 |
44 | // CHECK-NEXT: [[F:%.*]] = alloca [[CF]], align 4 |
45 | // CHECK-NEXT: [[TMP0:%.*]] = alloca [[CF]], align 8 |
46 | // CHECK-NEXT: [[TMP1:%.*]] = alloca [[CF]], align 8 |
47 | // CHECK-NEXT: store [[CF]]* |
48 | |
49 | // CHECK-NEXT: [[P:%.*]] = load [[CF]]*, [[CF]]** [[FP]] |
50 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[P]], i32 0, i32 0 |
51 | // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[P]], i32 0, i32 1 |
52 | // CHECK-NEXT: store float 1.000000e+00, float* [[T0]] |
53 | // CHECK-NEXT: store float 0.000000e+00, float* [[T1]] |
54 | __c11_atomic_init(fp, 1.0f); |
55 | |
56 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[X]], i32 0, i32 0 |
57 | // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[X]], i32 0, i32 1 |
58 | // CHECK-NEXT: store float 2.000000e+00, float* [[T0]] |
59 | // CHECK-NEXT: store float 0.000000e+00, float* [[T1]] |
60 | _Atomic(_Complex float) x = 2.0f; |
61 | |
62 | // CHECK-NEXT: [[T0:%.*]] = load [[CF]]*, [[CF]]** [[FP]] |
63 | // CHECK-NEXT: [[T1:%.*]] = bitcast [[CF]]* [[T0]] to i64* |
64 | // CHECK-NEXT: [[T2:%.*]] = load atomic i64, i64* [[T1]] seq_cst, align 8 |
65 | // CHECK-NEXT: [[T3:%.*]] = bitcast [[CF]]* [[TMP0]] to i64* |
66 | // CHECK-NEXT: store i64 [[T2]], i64* [[T3]], align 8 |
67 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[TMP0]], i32 0, i32 0 |
68 | // CHECK-NEXT: [[R:%.*]] = load float, float* [[T0]] |
69 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[TMP0]], i32 0, i32 1 |
70 | // CHECK-NEXT: [[I:%.*]] = load float, float* [[T0]] |
71 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[F]], i32 0, i32 0 |
72 | // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[F]], i32 0, i32 1 |
73 | // CHECK-NEXT: store float [[R]], float* [[T0]] |
74 | // CHECK-NEXT: store float [[I]], float* [[T1]] |
75 | _Complex float f = *fp; |
76 | |
77 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[F]], i32 0, i32 0 |
78 | // CHECK-NEXT: [[R:%.*]] = load float, float* [[T0]] |
79 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[F]], i32 0, i32 1 |
80 | // CHECK-NEXT: [[I:%.*]] = load float, float* [[T0]] |
81 | // CHECK-NEXT: [[DEST:%.*]] = load [[CF]]*, [[CF]]** [[FP]], align 4 |
82 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[TMP1]], i32 0, i32 0 |
83 | // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[TMP1]], i32 0, i32 1 |
84 | // CHECK-NEXT: store float [[R]], float* [[T0]] |
85 | // CHECK-NEXT: store float [[I]], float* [[T1]] |
86 | // CHECK-NEXT: [[T0:%.*]] = bitcast [[CF]]* [[TMP1]] to i64* |
87 | // CHECK-NEXT: [[T1:%.*]] = load i64, i64* [[T0]], align 8 |
88 | // CHECK-NEXT: [[T2:%.*]] = bitcast [[CF]]* [[DEST]] to i64* |
89 | // CHECK-NEXT: store atomic i64 [[T1]], i64* [[T2]] seq_cst, align 8 |
90 | *fp = f; |
91 | |
92 | // CHECK-NEXT: ret void |
93 | } |
94 | |
95 | typedef struct { short x, y, z, w; } S; |
96 | // CHECK: define void @testStruct([[S:.*]]* |
97 | void testStruct(_Atomic(S) *fp) { |
98 | // CHECK: [[FP:%.*]] = alloca [[S]]*, align 4 |
99 | // CHECK-NEXT: [[X:%.*]] = alloca [[S]], align 8 |
100 | // CHECK-NEXT: [[F:%.*]] = alloca [[S:%.*]], align 2 |
101 | // CHECK-NEXT: [[TMP0:%.*]] = alloca [[S]], align 8 |
102 | // CHECK-NEXT: store [[S]]* |
103 | |
104 | // CHECK-NEXT: [[P:%.*]] = load [[S]]*, [[S]]** [[FP]] |
105 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[P]], i32 0, i32 0 |
106 | // CHECK-NEXT: store i16 1, i16* [[T0]], align 8 |
107 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[P]], i32 0, i32 1 |
108 | // CHECK-NEXT: store i16 2, i16* [[T0]], align 2 |
109 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[P]], i32 0, i32 2 |
110 | // CHECK-NEXT: store i16 3, i16* [[T0]], align 4 |
111 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[P]], i32 0, i32 3 |
112 | // CHECK-NEXT: store i16 4, i16* [[T0]], align 2 |
113 | __c11_atomic_init(fp, (S){1,2,3,4}); |
114 | |
115 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[X]], i32 0, i32 0 |
116 | // CHECK-NEXT: store i16 1, i16* [[T0]], align 8 |
117 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[X]], i32 0, i32 1 |
118 | // CHECK-NEXT: store i16 2, i16* [[T0]], align 2 |
119 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[X]], i32 0, i32 2 |
120 | // CHECK-NEXT: store i16 3, i16* [[T0]], align 4 |
121 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[X]], i32 0, i32 3 |
122 | // CHECK-NEXT: store i16 4, i16* [[T0]], align 2 |
123 | _Atomic(S) x = (S){1,2,3,4}; |
124 | |
125 | // CHECK-NEXT: [[T0:%.*]] = load [[S]]*, [[S]]** [[FP]] |
126 | // CHECK-NEXT: [[T1:%.*]] = bitcast [[S]]* [[T0]] to i64* |
127 | // CHECK-NEXT: [[T2:%.*]] = load atomic i64, i64* [[T1]] seq_cst, align 8 |
128 | // CHECK-NEXT: [[T3:%.*]] = bitcast [[S]]* [[F]] to i64* |
129 | // CHECK-NEXT: store i64 [[T2]], i64* [[T3]], align 2 |
130 | S f = *fp; |
131 | |
132 | // CHECK-NEXT: [[T0:%.*]] = load [[S]]*, [[S]]** [[FP]] |
133 | // CHECK-NEXT: [[T1:%.*]] = bitcast [[S]]* [[TMP0]] to i8* |
134 | // CHECK-NEXT: [[T2:%.*]] = bitcast [[S]]* [[F]] to i8* |
135 | // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[T1]], i8* align 2 [[T2]], i32 8, i1 false) |
136 | // CHECK-NEXT: [[T3:%.*]] = bitcast [[S]]* [[TMP0]] to i64* |
137 | // CHECK-NEXT: [[T4:%.*]] = load i64, i64* [[T3]], align 8 |
138 | // CHECK-NEXT: [[T5:%.*]] = bitcast [[S]]* [[T0]] to i64* |
139 | // CHECK-NEXT: store atomic i64 [[T4]], i64* [[T5]] seq_cst, align 8 |
140 | *fp = f; |
141 | |
142 | // CHECK-NEXT: ret void |
143 | } |
144 | |
145 | typedef struct { short x, y, z; } PS; |
146 | // CHECK: define void @testPromotedStruct([[APS:.*]]* |
147 | void testPromotedStruct(_Atomic(PS) *fp) { |
148 | // CHECK: [[FP:%.*]] = alloca [[APS]]*, align 4 |
149 | // CHECK-NEXT: [[X:%.*]] = alloca [[APS]], align 8 |
150 | // CHECK-NEXT: [[F:%.*]] = alloca [[PS:%.*]], align 2 |
151 | // CHECK-NEXT: [[TMP0:%.*]] = alloca [[APS]], align 8 |
152 | // CHECK-NEXT: [[TMP1:%.*]] = alloca [[APS]], align 8 |
153 | // CHECK-NEXT: store [[APS]]* |
154 | |
155 | // CHECK-NEXT: [[P:%.*]] = load [[APS]]*, [[APS]]** [[FP]] |
156 | // CHECK-NEXT: [[T0:%.*]] = bitcast [[APS]]* [[P]] to i8* |
157 | // CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[T0]], i8 0, i64 8, i1 false) |
158 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[P]], i32 0, i32 0 |
159 | // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 0 |
160 | // CHECK-NEXT: store i16 1, i16* [[T1]], align 8 |
161 | // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 1 |
162 | // CHECK-NEXT: store i16 2, i16* [[T1]], align 2 |
163 | // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 2 |
164 | // CHECK-NEXT: store i16 3, i16* [[T1]], align 4 |
165 | __c11_atomic_init(fp, (PS){1,2,3}); |
166 | |
167 | // CHECK-NEXT: [[T0:%.*]] = bitcast [[APS]]* [[X]] to i8* |
168 | // CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 8 [[T0]], i8 0, i32 8, i1 false) |
169 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[X]], i32 0, i32 0 |
170 | // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 0 |
171 | // CHECK-NEXT: store i16 1, i16* [[T1]], align 8 |
172 | // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 1 |
173 | // CHECK-NEXT: store i16 2, i16* [[T1]], align 2 |
174 | // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 2 |
175 | // CHECK-NEXT: store i16 3, i16* [[T1]], align 4 |
176 | _Atomic(PS) x = (PS){1,2,3}; |
177 | |
178 | // CHECK-NEXT: [[T0:%.*]] = load [[APS]]*, [[APS]]** [[FP]] |
179 | // CHECK-NEXT: [[T1:%.*]] = bitcast [[APS]]* [[T0]] to i64* |
180 | // CHECK-NEXT: [[T2:%.*]] = load atomic i64, i64* [[T1]] seq_cst, align 8 |
181 | // CHECK-NEXT: [[T3:%.*]] = bitcast [[APS]]* [[TMP0]] to i64* |
182 | // CHECK-NEXT: store i64 [[T2]], i64* [[T3]], align 8 |
183 | // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP0]], i32 0, i32 0 |
184 | // CHECK-NEXT: [[T1:%.*]] = bitcast [[PS]]* [[F]] to i8* |
185 | // CHECK-NEXT: [[T2:%.*]] = bitcast [[PS]]* [[T0]] to i8* |
186 | // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[T1]], i8* align 8 [[T2]], i32 6, i1 false) |
187 | PS f = *fp; |
188 | |
189 | // CHECK-NEXT: [[T0:%.*]] = load [[APS]]*, [[APS]]** [[FP]] |
190 | // CHECK-NEXT: [[T1:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[TMP1]] to i8* |
191 | // CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 8 [[T1]], i8 0, i32 8, i1 false) |
192 | // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP1]], i32 0, i32 0 |
193 | // CHECK-NEXT: [[T2:%.*]] = bitcast [[PS]]* [[T1]] to i8* |
194 | // CHECK-NEXT: [[T3:%.*]] = bitcast [[PS]]* [[F]] to i8* |
195 | // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[T2]], i8* align 2 [[T3]], i32 6, i1 false) |
196 | // CHECK-NEXT: [[T4:%.*]] = bitcast [[APS]]* [[TMP1]] to i64* |
197 | // CHECK-NEXT: [[T5:%.*]] = load i64, i64* [[T4]], align 8 |
198 | // CHECK-NEXT: [[T6:%.*]] = bitcast [[APS]]* [[T0]] to i64* |
199 | // CHECK-NEXT: store atomic i64 [[T5]], i64* [[T6]] seq_cst, align 8 |
200 | *fp = f; |
201 | |
202 | // CHECK-NEXT: ret void |
203 | } |
204 | |
205 | PS test_promoted_load(_Atomic(PS) *addr) { |
206 | // CHECK-LABEL: @test_promoted_load(%struct.PS* noalias sret %agg.result, { %struct.PS, [2 x i8] }* %addr) |
207 | // CHECK: [[ADDR_ARG:%.*]] = alloca { %struct.PS, [2 x i8] }*, align 4 |
208 | // CHECK: [[ATOMIC_RES:%.*]] = alloca { %struct.PS, [2 x i8] }, align 8 |
209 | // CHECK: store { %struct.PS, [2 x i8] }* %addr, { %struct.PS, [2 x i8] }** [[ADDR_ARG]], align 4 |
210 | // CHECK: [[ADDR:%.*]] = load { %struct.PS, [2 x i8] }*, { %struct.PS, [2 x i8] }** [[ADDR_ARG]], align 4 |
211 | // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* |
212 | // CHECK: [[ATOMIC_RES64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_RES]] to i64* |
213 | // CHECK: [[VAL:%.*]] = load atomic i64, i64* [[ADDR64]] seq_cst, align 8 |
214 | // CHECK: store i64 [[VAL]], i64* [[ATOMIC_RES64]], align 8 |
215 | // CHECK: [[ATOMIC_RES_STRUCT:%.*]] = bitcast i64* [[ATOMIC_RES64]] to %struct.PS* |
216 | // CHECK: [[AGG_RESULT8:%.*]] = bitcast %struct.PS* %agg.result to i8* |
217 | // CHECK: [[ATOMIC_RES8:%.*]] = bitcast %struct.PS* [[ATOMIC_RES_STRUCT]] to i8* |
218 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[AGG_RESULT8]], i8* align 8 [[ATOMIC_RES8]], i32 6, i1 false) |
219 | |
220 | return __c11_atomic_load(addr, 5); |
221 | } |
222 | |
223 | void test_promoted_store(_Atomic(PS) *addr, PS *val) { |
224 | // CHECK-LABEL: @test_promoted_store({ %struct.PS, [2 x i8] }* %addr, %struct.PS* %val) |
225 | // CHECK: [[ADDR_ARG:%.*]] = alloca { %struct.PS, [2 x i8] }*, align 4 |
226 | // CHECK: [[VAL_ARG:%.*]] = alloca %struct.PS*, align 4 |
227 | // CHECK: [[NONATOMIC_TMP:%.*]] = alloca %struct.PS, align 2 |
228 | // CHECK: [[ATOMIC_VAL:%.*]] = alloca { %struct.PS, [2 x i8] }, align 8 |
229 | // CHECK: store { %struct.PS, [2 x i8] }* %addr, { %struct.PS, [2 x i8] }** [[ADDR_ARG]], align 4 |
230 | // CHECK: store %struct.PS* %val, %struct.PS** [[VAL_ARG]], align 4 |
231 | // CHECK: [[ADDR:%.*]] = load { %struct.PS, [2 x i8] }*, { %struct.PS, [2 x i8] }** [[ADDR_ARG]], align 4 |
232 | // CHECK: [[VAL:%.*]] = load %struct.PS*, %struct.PS** [[VAL_ARG]], align 4 |
233 | // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* |
234 | // CHECK: [[VAL8:%.*]] = bitcast %struct.PS* [[VAL]] to i8* |
235 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[NONATOMIC_TMP8]], i8* align 2 [[VAL8]], i32 6, i1 false) |
236 | // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* |
237 | // CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8* |
238 | // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* |
239 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) |
240 | // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* |
241 | // CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 8 |
242 | // CHECK: store atomic i64 [[VAL64]], i64* [[ADDR64]] seq_cst, align 8 |
243 | |
244 | __c11_atomic_store(addr, *val, 5); |
245 | } |
246 | |
247 | PS test_promoted_exchange(_Atomic(PS) *addr, PS *val) { |
248 | // CHECK-LABEL: @test_promoted_exchange(%struct.PS* noalias sret %agg.result, { %struct.PS, [2 x i8] }* %addr, %struct.PS* %val) |
249 | // CHECK: [[ADDR_ARG:%.*]] = alloca { %struct.PS, [2 x i8] }*, align 4 |
250 | // CHECK: [[VAL_ARG:%.*]] = alloca %struct.PS*, align 4 |
251 | // CHECK: [[NONATOMIC_TMP:%.*]] = alloca %struct.PS, align 2 |
252 | // CHECK: [[ATOMIC_VAL:%.*]] = alloca { %struct.PS, [2 x i8] }, align 8 |
253 | // CHECK: [[ATOMIC_RES:%.*]] = alloca { %struct.PS, [2 x i8] }, align 8 |
254 | // CHECK: store { %struct.PS, [2 x i8] }* %addr, { %struct.PS, [2 x i8] }** [[ADDR_ARG]], align 4 |
255 | // CHECK: store %struct.PS* %val, %struct.PS** [[VAL_ARG]], align 4 |
256 | // CHECK: [[ADDR:%.*]] = load { %struct.PS, [2 x i8] }*, { %struct.PS, [2 x i8] }** [[ADDR_ARG]], align 4 |
257 | // CHECK: [[VAL:%.*]] = load %struct.PS*, %struct.PS** [[VAL_ARG]], align 4 |
258 | // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* |
259 | // CHECK: [[VAL8:%.*]] = bitcast %struct.PS* [[VAL]] to i8* |
260 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[NONATOMIC_TMP8]], i8* align 2 [[VAL8]], i32 6, i1 false) |
261 | // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* |
262 | // CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8* |
263 | // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* |
264 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) |
265 | // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* |
266 | // CHECK: [[ATOMIC_RES64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_RES]] to i64* |
267 | // CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 8 |
268 | // CHECK: [[RES:%.*]] = atomicrmw xchg i64* [[ADDR64]], i64 [[VAL64]] seq_cst |
269 | // CHECK: store i64 [[RES]], i64* [[ATOMIC_RES64]], align 8 |
270 | // CHECK: [[ATOMIC_RES_STRUCT:%.*]] = bitcast i64* [[ATOMIC_RES64]] to %struct.PS* |
271 | // CHECK: [[AGG_RESULT8:%.*]] = bitcast %struct.PS* %agg.result to i8* |
272 | // CHECK: [[ATOMIC_RES8:%.*]] = bitcast %struct.PS* [[ATOMIC_RES_STRUCT]] to i8* |
273 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[AGG_RESULT8]], i8* align 8 [[ATOMIC_RES8]], i32 6, i1 false) |
274 | return __c11_atomic_exchange(addr, *val, 5); |
275 | } |
276 | |
277 | _Bool test_promoted_cmpxchg(_Atomic(PS) *addr, PS *desired, PS *new) { |
278 | // CHECK: define zeroext i1 @test_promoted_cmpxchg({ %struct.PS, [2 x i8] }* %addr, %struct.PS* %desired, %struct.PS* %new) #0 { |
279 | // CHECK: [[ADDR_ARG:%.*]] = alloca { %struct.PS, [2 x i8] }*, align 4 |
280 | // CHECK: [[DESIRED_ARG:%.*]] = alloca %struct.PS*, align 4 |
281 | // CHECK: [[NEW_ARG:%.*]] = alloca %struct.PS*, align 4 |
282 | // CHECK: [[NONATOMIC_TMP:%.*]] = alloca %struct.PS, align 2 |
283 | // CHECK: [[ATOMIC_DESIRED:%.*]] = alloca { %struct.PS, [2 x i8] }, align 8 |
284 | // CHECK: [[ATOMIC_NEW:%.*]] = alloca { %struct.PS, [2 x i8] }, align 8 |
285 | // CHECK: [[RES_ADDR:%.*]] = alloca i8, align 1 |
286 | // CHECK: store { %struct.PS, [2 x i8] }* %addr, { %struct.PS, [2 x i8] }** [[ADDR_ARG]], align 4 |
287 | // CHECK: store %struct.PS* %desired, %struct.PS** [[DESIRED_ARG]], align 4 |
288 | // CHECK: store %struct.PS* %new, %struct.PS** [[NEW_ARG]], align 4 |
289 | // CHECK: [[ADDR:%.*]] = load { %struct.PS, [2 x i8] }*, { %struct.PS, [2 x i8] }** [[ADDR_ARG]], align 4 |
290 | // CHECK: [[DESIRED:%.*]] = load %struct.PS*, %struct.PS** [[DESIRED_ARG]], align 4 |
291 | // CHECK: [[NEW:%.*]] = load %struct.PS*, %struct.PS** [[NEW_ARG]], align 4 |
292 | // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* |
293 | // CHECK: [[NEW8:%.*]] = bitcast %struct.PS* [[NEW]] to i8* |
294 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[NONATOMIC_TMP8]], i8* align 2 [[NEW8]], i32 6, i1 false) |
295 | // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* |
296 | // CHECK: [[ATOMIC_DESIRED8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED:%.*]] to i8* |
297 | // CHECK: [[DESIRED8:%.*]] = bitcast %struct.PS* [[DESIRED]] to i8* |
298 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_DESIRED8]], i8* align 2 [[DESIRED8]], i64 6, i1 false) |
299 | // CHECK: [[ATOMIC_DESIRED64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED:%.*]] to i64* |
300 | // CHECK: [[ATOMIC_NEW8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i8* |
301 | // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* |
302 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_NEW8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) |
303 | // CHECK: [[ATOMIC_NEW64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i64* |
304 | // CHECK: [[ATOMIC_DESIRED_VAL64:%.*]] = load i64, i64* [[ATOMIC_DESIRED64]], align 8 |
305 | // CHECK: [[ATOMIC_NEW_VAL64:%.*]] = load i64, i64* [[ATOMIC_NEW64]], align 8 |
306 | // CHECK: [[RES:%.*]] = cmpxchg i64* [[ADDR64]], i64 [[ATOMIC_DESIRED_VAL64]], i64 [[ATOMIC_NEW_VAL64]] seq_cst seq_cst |
307 | // CHECK: [[RES_VAL64:%.*]] = extractvalue { i64, i1 } [[RES]], 0 |
308 | // CHECK: [[RES_BOOL:%.*]] = extractvalue { i64, i1 } [[RES]], 1 |
309 | // CHECK: br i1 [[RES_BOOL]], label {{%.*}}, label {{%.*}} |
310 | |
311 | // CHECK: store i64 [[RES_VAL64]], i64* [[ATOMIC_DESIRED64]], align 8 |
312 | // CHECK: br label {{%.*}} |
313 | |
314 | // CHECK: [[RES_BOOL8:%.*]] = zext i1 [[RES_BOOL]] to i8 |
315 | // CHECK: store i8 [[RES_BOOL8]], i8* [[RES_ADDR]], align 1 |
316 | // CHECK: [[RES_BOOL8:%.*]] = load i8, i8* [[RES_ADDR]], align 1 |
317 | // CHECK: [[RETVAL:%.*]] = trunc i8 [[RES_BOOL8]] to i1 |
318 | // CHECK: ret i1 [[RETVAL]] |
319 | |
320 | return __c11_atomic_compare_exchange_strong(addr, desired, *new, 5, 5); |
321 | } |
322 | |
323 | struct Empty {}; |
324 | |
325 | struct Empty testEmptyStructLoad(_Atomic(struct Empty)* empty) { |
326 | // CHECK-LABEL: @testEmptyStructLoad( |
327 | // CHECK-NOT: @__atomic_load |
328 | // CHECK: load atomic i8, i8* %{{.*}} seq_cst, align 1 |
329 | return *empty; |
330 | } |
331 | |
332 | void testEmptyStructStore(_Atomic(struct Empty)* empty, struct Empty value) { |
333 | // CHECK-LABEL: @testEmptyStructStore( |
334 | // CHECK-NOT: @__atomic_store |
335 | // CHECK: store atomic i8 %{{.*}}, i8* %{{.*}} seq_cst, align 1 |
336 | *empty = value; |
337 | } |
338 | |