1 | // RUN: %clang_cc1 -triple riscv64 -emit-llvm %s -o - | FileCheck %s |
2 | |
3 | #include <stddef.h> |
4 | #include <stdint.h> |
5 | |
6 | // CHECK-LABEL: define void @f_void() |
7 | void f_void(void) {} |
8 | |
9 | // Scalar arguments and return values smaller than the word size are extended |
10 | // according to the sign of their type, up to 32 bits |
11 | |
12 | // CHECK-LABEL: define zeroext i1 @f_scalar_0(i1 zeroext %x) |
13 | _Bool f_scalar_0(_Bool x) { return x; } |
14 | |
15 | // CHECK-LABEL: define signext i8 @f_scalar_1(i8 signext %x) |
16 | int8_t f_scalar_1(int8_t x) { return x; } |
17 | |
18 | // CHECK-LABEL: define zeroext i8 @f_scalar_2(i8 zeroext %x) |
19 | uint8_t f_scalar_2(uint8_t x) { return x; } |
20 | |
21 | // CHECK-LABEL: define signext i32 @f_scalar_3(i32 signext %x) |
22 | uint32_t f_scalar_3(int32_t x) { return x; } |
23 | |
24 | // CHECK-LABEL: define i64 @f_scalar_4(i64 %x) |
25 | int64_t f_scalar_4(int64_t x) { return x; } |
26 | |
27 | // CHECK-LABEL: define float @f_fp_scalar_1(float %x) |
28 | float f_fp_scalar_1(float x) { return x; } |
29 | |
30 | // CHECK-LABEL: define double @f_fp_scalar_2(double %x) |
31 | double f_fp_scalar_2(double x) { return x; } |
32 | |
33 | // CHECK-LABEL: define fp128 @f_fp_scalar_3(fp128 %x) |
34 | long double f_fp_scalar_3(long double x) { return x; } |
35 | |
36 | // Empty structs or unions are ignored. |
37 | |
38 | struct empty_s {}; |
39 | |
40 | // CHECK-LABEL: define void @f_agg_empty_struct() |
41 | struct empty_s f_agg_empty_struct(struct empty_s x) { |
42 | return x; |
43 | } |
44 | |
45 | union empty_u {}; |
46 | |
47 | // CHECK-LABEL: define void @f_agg_empty_union() |
48 | union empty_u f_agg_empty_union(union empty_u x) { |
49 | return x; |
50 | } |
51 | |
52 | // Aggregates <= 2*xlen may be passed in registers, so will be coerced to |
53 | // integer arguments. The rules for return are the same. |
54 | |
55 | struct tiny { |
56 | uint16_t a, b, c, d; |
57 | }; |
58 | |
59 | // CHECK-LABEL: define void @f_agg_tiny(i64 %x.coerce) |
60 | void f_agg_tiny(struct tiny x) { |
61 | x.a += x.b; |
62 | x.c += x.d; |
63 | } |
64 | |
65 | // CHECK-LABEL: define i64 @f_agg_tiny_ret() |
66 | struct tiny f_agg_tiny_ret() { |
67 | return (struct tiny){1, 2, 3, 4}; |
68 | } |
69 | |
70 | typedef uint16_t v4i16 __attribute__((vector_size(8))); |
71 | typedef int64_t v1i64 __attribute__((vector_size(8))); |
72 | |
73 | // CHECK-LABEL: define void @f_vec_tiny_v4i16(i64 %x.coerce) |
74 | void f_vec_tiny_v4i16(v4i16 x) { |
75 | x[0] = x[1]; |
76 | x[2] = x[3]; |
77 | } |
78 | |
79 | // CHECK-LABEL: define i64 @f_vec_tiny_v4i16_ret() |
80 | v4i16 f_vec_tiny_v4i16_ret() { |
81 | return (v4i16){1, 2, 3, 4}; |
82 | } |
83 | |
84 | // CHECK-LABEL: define void @f_vec_tiny_v1i64(i64 %x.coerce) |
85 | void f_vec_tiny_v1i64(v1i64 x) { |
86 | x[0] = 114; |
87 | } |
88 | |
89 | // CHECK-LABEL: define i64 @f_vec_tiny_v1i64_ret() |
90 | v1i64 f_vec_tiny_v1i64_ret() { |
91 | return (v1i64){1}; |
92 | } |
93 | |
94 | struct small { |
95 | int64_t a, *b; |
96 | }; |
97 | |
98 | // CHECK-LABEL: define void @f_agg_small([2 x i64] %x.coerce) |
99 | void f_agg_small(struct small x) { |
100 | x.a += *x.b; |
101 | x.b = &x.a; |
102 | } |
103 | |
104 | // CHECK-LABEL: define [2 x i64] @f_agg_small_ret() |
105 | struct small f_agg_small_ret() { |
106 | return (struct small){1, 0}; |
107 | } |
108 | |
109 | typedef uint16_t v8i16 __attribute__((vector_size(16))); |
110 | typedef __int128_t v1i128 __attribute__((vector_size(16))); |
111 | |
112 | // CHECK-LABEL: define void @f_vec_small_v8i16(i128 %x.coerce) |
113 | void f_vec_small_v8i16(v8i16 x) { |
114 | x[0] = x[7]; |
115 | } |
116 | |
117 | // CHECK-LABEL: define i128 @f_vec_small_v8i16_ret() |
118 | v8i16 f_vec_small_v8i16_ret() { |
119 | return (v8i16){1, 2, 3, 4, 5, 6, 7, 8}; |
120 | } |
121 | |
122 | // CHECK-LABEL: define void @f_vec_small_v1i128(i128 %x.coerce) |
123 | void f_vec_small_v1i128(v1i128 x) { |
124 | x[0] = 114; |
125 | } |
126 | |
127 | // CHECK-LABEL: define i128 @f_vec_small_v1i128_ret() |
128 | v1i128 f_vec_small_v1i128_ret() { |
129 | return (v1i128){1}; |
130 | } |
131 | |
132 | // Aggregates of 2*xlen size and 2*xlen alignment should be coerced to a |
133 | // single 2*xlen-sized argument, to ensure that alignment can be maintained if |
134 | // passed on the stack. |
135 | |
136 | struct small_aligned { |
137 | __int128_t a; |
138 | }; |
139 | |
140 | // CHECK-LABEL: define void @f_agg_small_aligned(i128 %x.coerce) |
141 | void f_agg_small_aligned(struct small_aligned x) { |
142 | x.a += x.a; |
143 | } |
144 | |
145 | // CHECK-LABEL: define i128 @f_agg_small_aligned_ret(i128 %x.coerce) |
146 | struct small_aligned f_agg_small_aligned_ret(struct small_aligned x) { |
147 | return (struct small_aligned){10}; |
148 | } |
149 | |
150 | // Aggregates greater > 2*xlen will be passed and returned indirectly |
151 | struct large { |
152 | int64_t a, b, c, d; |
153 | }; |
154 | |
155 | // CHECK-LABEL: define void @f_agg_large(%struct.large* %x) |
156 | void f_agg_large(struct large x) { |
157 | x.a = x.b + x.c + x.d; |
158 | } |
159 | |
160 | // The address where the struct should be written to will be the first |
161 | // argument |
162 | // CHECK-LABEL: define void @f_agg_large_ret(%struct.large* noalias sret %agg.result, i32 signext %i, i8 signext %j) |
163 | struct large f_agg_large_ret(int32_t i, int8_t j) { |
164 | return (struct large){1, 2, 3, 4}; |
165 | } |
166 | |
167 | typedef unsigned char v32i8 __attribute__((vector_size(32))); |
168 | |
169 | // CHECK-LABEL: define void @f_vec_large_v32i8(<32 x i8>*) |
170 | void f_vec_large_v32i8(v32i8 x) { |
171 | x[0] = x[7]; |
172 | } |
173 | |
174 | // CHECK-LABEL: define void @f_vec_large_v32i8_ret(<32 x i8>* noalias sret %agg.result) |
175 | v32i8 f_vec_large_v32i8_ret() { |
176 | return (v32i8){1, 2, 3, 4, 5, 6, 7, 8}; |
177 | } |
178 | |
179 | // Scalars passed on the stack should have signext/zeroext attributes (they |
180 | // are anyext). |
181 | |
182 | // CHECK-LABEL: define signext i32 @f_scalar_stack_1(i64 %a.coerce, [2 x i64] %b.coerce, i128 %c.coerce, %struct.large* %d, i8 zeroext %e, i8 signext %f, i8 %g, i8 %h) |
183 | int f_scalar_stack_1(struct tiny a, struct small b, struct small_aligned c, |
184 | struct large d, uint8_t e, int8_t f, uint8_t g, int8_t h) { |
185 | return g + h; |
186 | } |
187 | |
188 | // CHECK-LABEL: define signext i32 @f_scalar_stack_2(i32 signext %a, i128 %b, float %c, fp128 %d, <32 x i8>*, i8 zeroext %f, i8 %g, i8 %h) |
189 | int f_scalar_stack_2(int32_t a, __int128_t b, float c, long double d, v32i8 e, |
190 | uint8_t f, int8_t g, uint8_t h) { |
191 | return g + h; |
192 | } |
193 | |
194 | // Ensure that scalars passed on the stack are still determined correctly in |
195 | // the presence of large return values that consume a register due to the need |
196 | // to pass a pointer. |
197 | |
198 | // CHECK-LABEL: define void @f_scalar_stack_3(%struct.large* noalias sret %agg.result, i32 signext %a, i128 %b, fp128 %c, <32 x i8>*, i8 zeroext %e, i8 %f, i8 %g) |
199 | struct large f_scalar_stack_3(uint32_t a, __int128_t b, long double c, v32i8 d, |
200 | uint8_t e, int8_t f, uint8_t g) { |
201 | return (struct large){a, e, f, g}; |
202 | } |
203 | |
204 | // Ensure that ABI lowering happens as expected for vararg calls. |
205 | // Specifically, ensure that signext is emitted for varargs that will be |
206 | // passed in registers but not on the stack. Ensure this takes into account |
207 | // the use of "aligned" register pairs for varargs with 2*xlen alignment. |
208 | |
209 | int f_va_callee(int, ...); |
210 | |
211 | // CHECK-LABEL: define void @f_va_caller() |
212 | void f_va_caller() { |
213 | // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i64 3, double 4.000000e+00, double 5.000000e+00, i64 {{%.*}}, [2 x i64] {{%.*}}, i128 {{%.*}}, %struct.large* {{%.*}}) |
214 | f_va_callee(1, 2, 3LL, 4.0f, 5.0, (struct tiny){6, 7, 8, 9}, |
215 | (struct small){10, NULL}, (struct small_aligned){11}, |
216 | (struct large){12, 13, 14, 15}); |
217 | // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, fp128 0xL00000000000000004001400000000000, i32 signext 6, i32 signext 7, i32 8, i32 9) |
218 | f_va_callee(1, 2, 3, 4, 5.0L, 6, 7, 8, 9); |
219 | // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, i128 {{%.*}}, i32 signext 6, i32 signext 7, i32 8, i32 9) |
220 | f_va_callee(1, 2, 3, 4, (struct small_aligned){5}, 6, 7, 8, 9); |
221 | // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, [2 x i64] {{%.*}}, i32 signext 6, i32 signext 7, i32 8, i32 9) |
222 | f_va_callee(1, 2, 3, 4, (struct small){5, NULL}, 6, 7, 8, 9); |
223 | // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, i32 signext 5, fp128 0xL00000000000000004001800000000000, i32 7, i32 8, i32 9) |
224 | f_va_callee(1, 2, 3, 4, 5, 6.0L, 7, 8, 9); |
225 | // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, i32 signext 5, i128 {{%.*}}, i32 7, i32 8, i32 9) |
226 | f_va_callee(1, 2, 3, 4, 5, (struct small_aligned){6}, 7, 8, 9); |
227 | // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, i32 signext 5, [2 x i64] {{%.*}}, i32 signext 7, i32 8, i32 9) |
228 | f_va_callee(1, 2, 3, 4, 5, (struct small){6, NULL}, 7, 8, 9); |
229 | // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, i32 signext 5, i32 signext 6, fp128 0xL00000000000000004001C00000000000, i32 8, i32 9) |
230 | f_va_callee(1, 2, 3, 4, 5, 6, 7.0L, 8, 9); |
231 | // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, i32 signext 5, i32 signext 6, i128 {{%.*}}, i32 8, i32 9) |
232 | f_va_callee(1, 2, 3, 4, 5, 6, (struct small_aligned){7}, 8, 9); |
233 | // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, i32 signext 5, i32 signext 6, [2 x i64] {{.*}}, i32 8, i32 9) |
234 | f_va_callee(1, 2, 3, 4, 5, 6, (struct small){7, NULL}, 8, 9); |
235 | } |
236 | |
237 | // CHECK-LABEL: define signext i32 @f_va_1(i8* %fmt, ...) {{.*}} { |
238 | // CHECK: [[FMT_ADDR:%.*]] = alloca i8*, align 8 |
239 | // CHECK: [[VA:%.*]] = alloca i8*, align 8 |
240 | // CHECK: [[V:%.*]] = alloca i32, align 4 |
241 | // CHECK: store i8* %fmt, i8** [[FMT_ADDR]], align 8 |
242 | // CHECK: [[VA1:%.*]] = bitcast i8** [[VA]] to i8* |
243 | // CHECK: call void @llvm.va_start(i8* [[VA1]]) |
244 | // CHECK: [[ARGP_CUR:%.*]] = load i8*, i8** [[VA]], align 8 |
245 | // CHECK: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR]], i64 8 |
246 | // CHECK: store i8* [[ARGP_NEXT]], i8** [[VA]], align 8 |
247 | // CHECK: [[TMP0:%.*]] = bitcast i8* [[ARGP_CUR]] to i32* |
248 | // CHECK: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 8 |
249 | // CHECK: store i32 [[TMP1]], i32* [[V]], align 4 |
250 | // CHECK: [[VA2:%.*]] = bitcast i8** [[VA]] to i8* |
251 | // CHECK: call void @llvm.va_end(i8* [[VA2]]) |
252 | // CHECK: [[TMP2:%.*]] = load i32, i32* [[V]], align 4 |
253 | // CHECK: ret i32 [[TMP2]] |
254 | // CHECK: } |
255 | int f_va_1(char *fmt, ...) { |
256 | __builtin_va_list va; |
257 | |
258 | __builtin_va_start(va, fmt); |
259 | int v = __builtin_va_arg(va, int); |
260 | __builtin_va_end(va); |
261 | |
262 | return v; |
263 | } |
264 | |
265 | // An "aligned" register pair (where the first register is even-numbered) is |
266 | // used to pass varargs with 2x xlen alignment and 2x xlen size. Ensure the |
267 | // correct offsets are used. |
268 | |
269 | // CHECK-LABEL: @f_va_2( |
270 | // CHECK: [[FMT_ADDR:%.*]] = alloca i8*, align 8 |
271 | // CHECK-NEXT: [[VA:%.*]] = alloca i8*, align 8 |
272 | // CHECK-NEXT: [[V:%.*]] = alloca fp128, align 16 |
273 | // CHECK-NEXT: store i8* [[FMT:%.*]], i8** [[FMT_ADDR]], align 8 |
274 | // CHECK-NEXT: [[VA1:%.*]] = bitcast i8** [[VA]] to i8* |
275 | // CHECK-NEXT: call void @llvm.va_start(i8* [[VA1]]) |
276 | // CHECK-NEXT: [[ARGP_CUR:%.*]] = load i8*, i8** [[VA]], align 8 |
277 | // CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i8* [[ARGP_CUR]] to i64 |
278 | // CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], 15 |
279 | // CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -16 |
280 | // CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = inttoptr i64 [[TMP2]] to i8* |
281 | // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR_ALIGNED]], i64 16 |
282 | // CHECK-NEXT: store i8* [[ARGP_NEXT]], i8** [[VA]], align 8 |
283 | // CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[ARGP_CUR_ALIGNED]] to fp128* |
284 | // CHECK-NEXT: [[TMP4:%.*]] = load fp128, fp128* [[TMP3]], align 16 |
285 | // CHECK-NEXT: store fp128 [[TMP4]], fp128* [[V]], align 16 |
286 | // CHECK-NEXT: [[VA2:%.*]] = bitcast i8** [[VA]] to i8* |
287 | // CHECK-NEXT: call void @llvm.va_end(i8* [[VA2]]) |
288 | // CHECK-NEXT: [[TMP5:%.*]] = load fp128, fp128* [[V]], align 16 |
289 | // CHECK-NEXT: ret fp128 [[TMP5]] |
290 | long double f_va_2(char *fmt, ...) { |
291 | __builtin_va_list va; |
292 | |
293 | __builtin_va_start(va, fmt); |
294 | long double v = __builtin_va_arg(va, long double); |
295 | __builtin_va_end(va); |
296 | |
297 | return v; |
298 | } |
299 | |
300 | // Two "aligned" register pairs. |
301 | |
302 | // CHECK-LABEL: @f_va_3( |
303 | // CHECK: [[FMT_ADDR:%.*]] = alloca i8*, align 8 |
304 | // CHECK-NEXT: [[VA:%.*]] = alloca i8*, align 8 |
305 | // CHECK-NEXT: [[V:%.*]] = alloca fp128, align 16 |
306 | // CHECK-NEXT: [[W:%.*]] = alloca i32, align 4 |
307 | // CHECK-NEXT: [[X:%.*]] = alloca fp128, align 16 |
308 | // CHECK-NEXT: store i8* [[FMT:%.*]], i8** [[FMT_ADDR]], align 8 |
309 | // CHECK-NEXT: [[VA1:%.*]] = bitcast i8** [[VA]] to i8* |
310 | // CHECK-NEXT: call void @llvm.va_start(i8* [[VA1]]) |
311 | // CHECK-NEXT: [[ARGP_CUR:%.*]] = load i8*, i8** [[VA]], align 8 |
312 | // CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i8* [[ARGP_CUR]] to i64 |
313 | // CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], 15 |
314 | // CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -16 |
315 | // CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = inttoptr i64 [[TMP2]] to i8* |
316 | // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR_ALIGNED]], i64 16 |
317 | // CHECK-NEXT: store i8* [[ARGP_NEXT]], i8** [[VA]], align 8 |
318 | // CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[ARGP_CUR_ALIGNED]] to fp128* |
319 | // CHECK-NEXT: [[TMP4:%.*]] = load fp128, fp128* [[TMP3]], align 16 |
320 | // CHECK-NEXT: store fp128 [[TMP4]], fp128* [[V]], align 16 |
321 | // CHECK-NEXT: [[ARGP_CUR2:%.*]] = load i8*, i8** [[VA]], align 8 |
322 | // CHECK-NEXT: [[ARGP_NEXT3:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR2]], i64 8 |
323 | // CHECK-NEXT: store i8* [[ARGP_NEXT3]], i8** [[VA]], align 8 |
324 | // CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[ARGP_CUR2]] to i32* |
325 | // CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 8 |
326 | // CHECK-NEXT: store i32 [[TMP6]], i32* [[W]], align 4 |
327 | // CHECK-NEXT: [[ARGP_CUR4:%.*]] = load i8*, i8** [[VA]], align 8 |
328 | // CHECK-NEXT: [[TMP7:%.*]] = ptrtoint i8* [[ARGP_CUR4]] to i64 |
329 | // CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], 15 |
330 | // CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -16 |
331 | // CHECK-NEXT: [[ARGP_CUR4_ALIGNED:%.*]] = inttoptr i64 [[TMP9]] to i8* |
332 | // CHECK-NEXT: [[ARGP_NEXT5:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR4_ALIGNED]], i64 16 |
333 | // CHECK-NEXT: store i8* [[ARGP_NEXT5]], i8** [[VA]], align 8 |
334 | // CHECK-NEXT: [[TMP10:%.*]] = bitcast i8* [[ARGP_CUR4_ALIGNED]] to fp128* |
335 | // CHECK-NEXT: [[TMP11:%.*]] = load fp128, fp128* [[TMP10]], align 16 |
336 | // CHECK-NEXT: store fp128 [[TMP11]], fp128* [[X]], align 16 |
337 | // CHECK-NEXT: [[VA6:%.*]] = bitcast i8** [[VA]] to i8* |
338 | // CHECK-NEXT: call void @llvm.va_end(i8* [[VA6]]) |
339 | // CHECK-NEXT: [[TMP12:%.*]] = load fp128, fp128* [[V]], align 16 |
340 | // CHECK-NEXT: [[TMP13:%.*]] = load fp128, fp128* [[X]], align 16 |
341 | // CHECK-NEXT: [[ADD:%.*]] = fadd fp128 [[TMP12]], [[TMP13]] |
342 | // CHECK-NEXT: ret fp128 [[ADD]] |
343 | long double f_va_3(char *fmt, ...) { |
344 | __builtin_va_list va; |
345 | |
346 | __builtin_va_start(va, fmt); |
347 | long double v = __builtin_va_arg(va, long double); |
348 | int w = __builtin_va_arg(va, int); |
349 | long double x = __builtin_va_arg(va, long double); |
350 | __builtin_va_end(va); |
351 | |
352 | return v + x; |
353 | } |
354 | |
355 | // CHECK-LABEL: @f_va_4( |
356 | // CHECK: [[FMT_ADDR:%.*]] = alloca i8*, align 8 |
357 | // CHECK-NEXT: [[VA:%.*]] = alloca i8*, align 8 |
358 | // CHECK-NEXT: [[V:%.*]] = alloca i32, align 4 |
359 | // CHECK-NEXT: [[TS:%.*]] = alloca [[STRUCT_TINY:%.*]], align 2 |
360 | // CHECK-NEXT: [[SS:%.*]] = alloca [[STRUCT_SMALL:%.*]], align 8 |
361 | // CHECK-NEXT: [[LS:%.*]] = alloca [[STRUCT_LARGE:%.*]], align 8 |
362 | // CHECK-NEXT: [[RET:%.*]] = alloca i32, align 4 |
363 | // CHECK-NEXT: store i8* [[FMT:%.*]], i8** [[FMT_ADDR]], align 8 |
364 | // CHECK-NEXT: [[VA1:%.*]] = bitcast i8** [[VA]] to i8* |
365 | // CHECK-NEXT: call void @llvm.va_start(i8* [[VA1]]) |
366 | // CHECK-NEXT: [[ARGP_CUR:%.*]] = load i8*, i8** [[VA]], align 8 |
367 | // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR]], i64 8 |
368 | // CHECK-NEXT: store i8* [[ARGP_NEXT]], i8** [[VA]], align 8 |
369 | // CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[ARGP_CUR]] to i32* |
370 | // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 8 |
371 | // CHECK-NEXT: store i32 [[TMP1]], i32* [[V]], align 4 |
372 | // CHECK-NEXT: [[ARGP_CUR2:%.*]] = load i8*, i8** [[VA]], align 8 |
373 | // CHECK-NEXT: [[ARGP_NEXT3:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR2]], i64 8 |
374 | // CHECK-NEXT: store i8* [[ARGP_NEXT3]], i8** [[VA]], align 8 |
375 | // CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[ARGP_CUR2]] to %struct.tiny* |
376 | // CHECK-NEXT: [[TMP3:%.*]] = bitcast %struct.tiny* [[TS]] to i8* |
377 | // CHECK-NEXT: [[TMP4:%.*]] = bitcast %struct.tiny* [[TMP2]] to i8* |
378 | // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[TMP3]], i8* align 8 [[TMP4]], i64 8, i1 false) |
379 | // CHECK-NEXT: [[ARGP_CUR4:%.*]] = load i8*, i8** [[VA]], align 8 |
380 | // CHECK-NEXT: [[ARGP_NEXT5:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR4]], i64 16 |
381 | // CHECK-NEXT: store i8* [[ARGP_NEXT5]], i8** [[VA]], align 8 |
382 | // CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[ARGP_CUR4]] to %struct.small* |
383 | // CHECK-NEXT: [[TMP6:%.*]] = bitcast %struct.small* [[SS]] to i8* |
384 | // CHECK-NEXT: [[TMP7:%.*]] = bitcast %struct.small* [[TMP5]] to i8* |
385 | // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP6]], i8* align 8 [[TMP7]], i64 16, i1 false) |
386 | // CHECK-NEXT: [[ARGP_CUR6:%.*]] = load i8*, i8** [[VA]], align 8 |
387 | // CHECK-NEXT: [[ARGP_NEXT7:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR6]], i64 8 |
388 | // CHECK-NEXT: store i8* [[ARGP_NEXT7]], i8** [[VA]], align 8 |
389 | // CHECK-NEXT: [[TMP8:%.*]] = bitcast i8* [[ARGP_CUR6]] to %struct.large** |
390 | // CHECK-NEXT: [[TMP9:%.*]] = load %struct.large*, %struct.large** [[TMP8]], align 8 |
391 | // CHECK-NEXT: [[TMP10:%.*]] = bitcast %struct.large* [[LS]] to i8* |
392 | // CHECK-NEXT: [[TMP11:%.*]] = bitcast %struct.large* [[TMP9]] to i8* |
393 | // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP10]], i8* align 8 [[TMP11]], i64 32, i1 false) |
394 | // CHECK-NEXT: [[VA8:%.*]] = bitcast i8** [[VA]] to i8* |
395 | // CHECK-NEXT: call void @llvm.va_end(i8* [[VA8]]) |
396 | // CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_TINY]], %struct.tiny* [[TS]], i32 0, i32 0 |
397 | // CHECK-NEXT: [[TMP12:%.*]] = load i16, i16* [[A]], align 2 |
398 | // CHECK-NEXT: [[CONV:%.*]] = zext i16 [[TMP12]] to i64 |
399 | // CHECK-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SMALL]], %struct.small* [[SS]], i32 0, i32 0 |
400 | // CHECK-NEXT: [[TMP13:%.*]] = load i64, i64* [[A9]], align 8 |
401 | // CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[CONV]], [[TMP13]] |
402 | // CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_LARGE]], %struct.large* [[LS]], i32 0, i32 2 |
403 | // CHECK-NEXT: [[TMP14:%.*]] = load i64, i64* [[C]], align 8 |
404 | // CHECK-NEXT: [[ADD10:%.*]] = add nsw i64 [[ADD]], [[TMP14]] |
405 | // CHECK-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 |
406 | // CHECK-NEXT: store i32 [[CONV11]], i32* [[RET]], align 4 |
407 | // CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[RET]], align 4 |
408 | // CHECK-NEXT: ret i32 [[TMP15]] |
409 | int f_va_4(char *fmt, ...) { |
410 | __builtin_va_list va; |
411 | |
412 | __builtin_va_start(va, fmt); |
413 | int v = __builtin_va_arg(va, int); |
414 | struct tiny ts = __builtin_va_arg(va, struct tiny); |
415 | struct small ss = __builtin_va_arg(va, struct small); |
416 | struct large ls = __builtin_va_arg(va, struct large); |
417 | __builtin_va_end(va); |
418 | |
419 | int ret = ts.a + ss.a + ls.c; |
420 | |
421 | return ret; |
422 | } |
423 | |