1 | // RUN: %clang_cc1 %s -emit-llvm -o - -triple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NOCOMPAT |
2 | // RUN: %clang_cc1 %s -emit-llvm -o - -triple=x86_64-apple-darwin10 -fclang-abi-compat=6.0 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V6COMPAT |
3 | // RUN: %clang_cc1 %s -emit-llvm -o - -triple=x86_64-scei-ps4 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V6COMPAT |
4 | |
5 | extern int int_source(); |
6 | extern void int_sink(int x); |
7 | |
8 | namespace test0 { |
9 | struct A { |
10 | int aField; |
11 | int bField; |
12 | }; |
13 | |
14 | struct B { |
15 | int onebit : 2; |
16 | int twobit : 6; |
17 | int intField; |
18 | }; |
19 | |
20 | struct __attribute__((packed, aligned(2))) C : A, B { |
21 | }; |
22 | |
23 | // These accesses should have alignment 4 because they're at offset 0 |
24 | // in a reference with an assumed alignment of 4. |
25 | // CHECK-LABEL: @_ZN5test01aERNS_1BE |
26 | void a(B &b) { |
27 | // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev() |
28 | // CHECK: [[B_P:%.*]] = load [[B:%.*]]*, [[B]]** |
29 | // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8* |
30 | // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8 |
31 | // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4 |
32 | // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3 |
33 | // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4 |
34 | // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]] |
35 | // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 4 |
36 | b.onebit = int_source(); |
37 | |
38 | // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]** |
39 | // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8* |
40 | // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4 |
41 | // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6 |
42 | // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6 |
43 | // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32 |
44 | // CHECK: call void @_Z8int_sinki(i32 [[T2]]) |
45 | int_sink(b.onebit); |
46 | } |
47 | |
48 | // These accesses should have alignment 2 because they're at offset 8 |
49 | // in a reference/pointer with an assumed alignment of 2. |
50 | // CHECK-LABEL: @_ZN5test01bERNS_1CE |
51 | void b(C &c) { |
52 | // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev() |
53 | // CHECK: [[C_P:%.*]] = load [[C:%.*]]*, [[C]]** |
54 | // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8* |
55 | // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8 |
56 | // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]* |
57 | // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8* |
58 | // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8 |
59 | // CHECK-V6COMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2 |
60 | // CHECK-NOCOMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4 |
61 | // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3 |
62 | // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4 |
63 | // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]] |
64 | // CHECK-V6COMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 2 |
65 | // CHECK-NOCOMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 4 |
66 | c.onebit = int_source(); |
67 | |
68 | // CHECK: [[C_P:%.*]] = load [[C]]*, [[C]]** |
69 | // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8* |
70 | // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8 |
71 | // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]* |
72 | // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8* |
73 | // CHECK-V6COMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2 |
74 | // CHECK-NOCOMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4 |
75 | // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6 |
76 | // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6 |
77 | // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32 |
78 | // CHECK: call void @_Z8int_sinki(i32 [[T2]]) |
79 | int_sink(c.onebit); |
80 | } |
81 | |
82 | // CHECK-LABEL: @_ZN5test01cEPNS_1CE |
83 | void c(C *c) { |
84 | // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev() |
85 | // CHECK: [[C_P:%.*]] = load [[C]]*, [[C]]** |
86 | // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8* |
87 | // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8 |
88 | // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]* |
89 | // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8* |
90 | // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8 |
91 | // CHECK-V6COMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2 |
92 | // CHECK-NOCOMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4 |
93 | // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3 |
94 | // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4 |
95 | // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]] |
96 | // CHECK-V6COMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 2 |
97 | // CHECK-NOCOMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 4 |
98 | c->onebit = int_source(); |
99 | |
100 | // CHECK: [[C_P:%.*]] = load [[C:%.*]]*, [[C]]** |
101 | // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8* |
102 | // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8 |
103 | // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]* |
104 | // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8* |
105 | // CHECK-V6COMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2 |
106 | // CHECK-NOCOMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4 |
107 | // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6 |
108 | // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6 |
109 | // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32 |
110 | // CHECK: call void @_Z8int_sinki(i32 [[T2]]) |
111 | int_sink(c->onebit); |
112 | } |
113 | |
114 | // These accesses should have alignment 2 because they're at offset 8 |
115 | // in an alignment-2 variable. |
116 | // CHECK-LABEL: @_ZN5test01dEv |
117 | void d() { |
118 | // CHECK-V6COMPAT: [[C_P:%.*]] = alloca [[C:%.*]], align 2 |
119 | // CHECK-NOCOMPAT: [[C_P:%.*]] = alloca [[C:%.*]], align 4 |
120 | C c; |
121 | |
122 | // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev() |
123 | // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8* |
124 | // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8 |
125 | // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]* |
126 | // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8* |
127 | // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8 |
128 | // CHECK-V6COMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2 |
129 | // CHECK-NOCOMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4 |
130 | // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3 |
131 | // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4 |
132 | // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]] |
133 | // CHECK-V6COMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 2 |
134 | // CHECK-NOCOMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 4 |
135 | c.onebit = int_source(); |
136 | |
137 | // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8* |
138 | // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8 |
139 | // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]* |
140 | // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8* |
141 | // CHECK-V6COMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2 |
142 | // CHECK-NOCOMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4 |
143 | // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6 |
144 | // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6 |
145 | // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32 |
146 | // CHECK: call void @_Z8int_sinki(i32 [[T2]]) |
147 | int_sink(c.onebit); |
148 | } |
149 | |
150 | // These accesses should have alignment 8 because they're at offset 8 |
151 | // in an alignment-16 variable. |
152 | // CHECK-LABEL: @_ZN5test01eEv |
153 | void e() { |
154 | // CHECK: [[C_P:%.*]] = alloca [[C:%.*]], align 16 |
155 | __attribute__((aligned(16))) C c; |
156 | |
157 | // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev() |
158 | // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8* |
159 | // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8 |
160 | // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]* |
161 | // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8* |
162 | // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8 |
163 | // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 8 |
164 | // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3 |
165 | // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4 |
166 | // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]] |
167 | // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 8 |
168 | c.onebit = int_source(); |
169 | |
170 | // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8* |
171 | // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8 |
172 | // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]* |
173 | // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8* |
174 | // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 8 |
175 | // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6 |
176 | // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6 |
177 | // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32 |
178 | // CHECK: call void @_Z8int_sinki(i32 [[T2]]) |
179 | int_sink(c.onebit); |
180 | } |
181 | } |
182 | |
183 | namespace test1 { |
184 | struct Array { |
185 | int elts[4]; |
186 | }; |
187 | |
188 | struct A { |
189 | __attribute__((aligned(16))) Array aArray; |
190 | }; |
191 | |
192 | struct B : virtual A { |
193 | void *bPointer; // puts bArray at offset 16 |
194 | Array bArray; |
195 | }; |
196 | |
197 | struct C : virtual A { // must be viable as primary base |
198 | // Non-empty, nv-size not a multiple of 16. |
199 | void *cPointer1; |
200 | void *cPointer2; |
201 | }; |
202 | |
203 | // Proof of concept that the non-virtual components of B do not have |
204 | // to be 16-byte-aligned. |
205 | struct D : C, B {}; |
206 | |
207 | // For the following tests, we want to assign into a variable whose |
208 | // alignment is high enough that it will absolutely not be the |
209 | // constraint on the memcpy alignment. |
210 | typedef __attribute__((aligned(64))) Array AlignedArray; |
211 | |
212 | // CHECK-LABEL: @_ZN5test11aERNS_1AE |
213 | void a(A &a) { |
214 | // CHECK: [[RESULT:%.*]] = alloca [[ARRAY:%.*]], align 64 |
215 | // CHECK: [[A_P:%.*]] = load [[A:%.*]]*, [[A]]** |
216 | // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0 |
217 | // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8* |
218 | // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8* |
219 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false) |
220 | AlignedArray result = a.aArray; |
221 | } |
222 | |
223 | // CHECK-LABEL: @_ZN5test11bERNS_1BE |
224 | void b(B &b) { |
225 | // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64 |
226 | // CHECK: [[B_P:%.*]] = load [[B:%.*]]*, [[B]]** |
227 | // CHECK: [[VPTR_P:%.*]] = bitcast [[B]]* [[B_P]] to i8** |
228 | // CHECK: [[VPTR:%.*]] = load i8*, i8** [[VPTR_P]], align 8 |
229 | // CHECK: [[T0:%.*]] = getelementptr i8, i8* [[VPTR]], i64 -24 |
230 | // CHECK: [[OFFSET_P:%.*]] = bitcast i8* [[T0]] to i64* |
231 | // CHECK: [[OFFSET:%.*]] = load i64, i64* [[OFFSET_P]], align 8 |
232 | // CHECK: [[T0:%.*]] = bitcast [[B]]* [[B_P]] to i8* |
233 | // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]] |
234 | // CHECK: [[A_P:%.*]] = bitcast i8* [[T1]] to [[A]]* |
235 | // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0 |
236 | // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8* |
237 | // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8* |
238 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false) |
239 | AlignedArray result = b.aArray; |
240 | } |
241 | |
242 | // CHECK-LABEL: @_ZN5test11cERNS_1BE |
243 | void c(B &b) { |
244 | // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64 |
245 | // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]** |
246 | // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2 |
247 | // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8* |
248 | // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8* |
249 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 8 [[T1]], i64 16, i1 false) |
250 | AlignedArray result = b.bArray; |
251 | } |
252 | |
253 | // CHECK-LABEL: @_ZN5test11dEPNS_1BE |
254 | void d(B *b) { |
255 | // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64 |
256 | // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]** |
257 | // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2 |
258 | // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8* |
259 | // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8* |
260 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 8 [[T1]], i64 16, i1 false) |
261 | AlignedArray result = b->bArray; |
262 | } |
263 | |
264 | // CHECK-LABEL: @_ZN5test11eEv |
265 | void e() { |
266 | // CHECK: [[B_P:%.*]] = alloca [[B]], align 16 |
267 | // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64 |
268 | // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2 |
269 | // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8* |
270 | // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8* |
271 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false) |
272 | B b; |
273 | AlignedArray result = b.bArray; |
274 | } |
275 | |
276 | // CHECK-LABEL: @_ZN5test11fEv |
277 | void f() { |
278 | // TODO: we should devirtualize this derived-to-base conversion. |
279 | // CHECK: [[D_P:%.*]] = alloca [[D:%.*]], align 16 |
280 | // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64 |
281 | // CHECK: [[VPTR_P:%.*]] = bitcast [[D]]* [[D_P]] to i8** |
282 | // CHECK: [[VPTR:%.*]] = load i8*, i8** [[VPTR_P]], align 16 |
283 | // CHECK: [[T0:%.*]] = getelementptr i8, i8* [[VPTR]], i64 -24 |
284 | // CHECK: [[OFFSET_P:%.*]] = bitcast i8* [[T0]] to i64* |
285 | // CHECK: [[OFFSET:%.*]] = load i64, i64* [[OFFSET_P]], align 8 |
286 | // CHECK: [[T0:%.*]] = bitcast [[D]]* [[D_P]] to i8* |
287 | // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]] |
288 | // CHECK: [[A_P:%.*]] = bitcast i8* [[T1]] to [[A]]* |
289 | // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0 |
290 | // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8* |
291 | // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8* |
292 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false) |
293 | D d; |
294 | AlignedArray result = d.aArray; |
295 | } |
296 | |
297 | // CHECK-LABEL: @_ZN5test11gEv |
298 | void g() { |
299 | // CHECK: [[D_P:%.*]] = alloca [[D]], align 16 |
300 | // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64 |
301 | // CHECK: [[T0:%.*]] = bitcast [[D]]* [[D_P]] to i8* |
302 | // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 24 |
303 | // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]* |
304 | // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2 |
305 | // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8* |
306 | // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8* |
307 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 8 [[T1]], i64 16, i1 false) |
308 | D d; |
309 | AlignedArray result = d.bArray; |
310 | } |
311 | } |
312 | |