1 | // RUN: %clang_cc1 -triple arm64-apple-ios7 -target-abi darwinpcs -emit-llvm -o - %s | FileCheck %s |
2 | // RUN: %clang_cc1 -triple aarch64-linux-android -emit-llvm -o - %s | FileCheck -check-prefix=ANDROID %s |
3 | |
4 | #include <stdarg.h> |
5 | |
6 | typedef __attribute__(( ext_vector_type(2) )) char __char2; |
7 | typedef __attribute__(( ext_vector_type(3) )) char __char3; |
8 | typedef __attribute__(( ext_vector_type(4) )) char __char4; |
9 | typedef __attribute__(( ext_vector_type(5) )) char __char5; |
10 | typedef __attribute__(( ext_vector_type(9) )) char __char9; |
11 | typedef __attribute__(( ext_vector_type(19) )) char __char19; |
12 | typedef __attribute__(( ext_vector_type(3) )) short __short3; |
13 | typedef __attribute__(( ext_vector_type(5) )) short __short5; |
14 | typedef __attribute__(( ext_vector_type(3) )) int __int3; |
15 | typedef __attribute__(( ext_vector_type(5) )) int __int5; |
16 | typedef __attribute__(( ext_vector_type(3) )) double __double3; |
17 | |
18 | // Passing legal vector types as varargs. Check that we've allocated the appropriate size |
19 | double varargs_vec_2c(int fixed, ...) { |
20 | // ANDROID: varargs_vec_2c |
21 | // ANDROID: [[VAR:%.*]] = alloca <2 x i8>, align 2 |
22 | // ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 |
23 | // ANDROID: bitcast i8* [[AP_CUR]] to <2 x i8>* |
24 | va_list ap; |
25 | double sum = fixed; |
26 | va_start(ap, fixed); |
27 | __char2 c3 = va_arg(ap, __char2); |
28 | sum = sum + c3.x + c3.y; |
29 | va_end(ap); |
30 | return sum; |
31 | } |
32 | |
33 | double test_2c(__char2 *in) { |
34 | // ANDROID: call double (i32, ...) @varargs_vec_2c(i32 3, i16 {{%.*}}) |
35 | return varargs_vec_2c(3, *in); |
36 | } |
37 | |
38 | double varargs_vec_3c(int fixed, ...) { |
39 | // CHECK: varargs_vec_3c |
40 | // CHECK: alloca <3 x i8>, align 4 |
41 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 |
42 | // CHECK: bitcast i8* [[AP_CUR]] to <3 x i8>* |
43 | va_list ap; |
44 | double sum = fixed; |
45 | va_start(ap, fixed); |
46 | __char3 c3 = va_arg(ap, __char3); |
47 | sum = sum + c3.x + c3.y; |
48 | va_end(ap); |
49 | return sum; |
50 | } |
51 | |
52 | double test_3c(__char3 *in) { |
53 | // CHECK: test_3c |
54 | // CHECK: call double (i32, ...) @varargs_vec_3c(i32 3, i32 {{%.*}}) |
55 | return varargs_vec_3c(3, *in); |
56 | } |
57 | |
58 | double varargs_vec_4c(int fixed, ...) { |
59 | // CHECK: varargs_vec_4c |
60 | // CHECK: alloca <4 x i8>, align 4 |
61 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 |
62 | // CHECK: bitcast i8* [[AP_CUR]] to <4 x i8>* |
63 | va_list ap; |
64 | double sum = fixed; |
65 | va_start(ap, fixed); |
66 | __char4 c4 = va_arg(ap, __char4); |
67 | sum = sum + c4.x + c4.y; |
68 | va_end(ap); |
69 | return sum; |
70 | } |
71 | |
72 | double test_4c(__char4 *in) { |
73 | // CHECK: test_4c |
74 | // CHECK: call double (i32, ...) @varargs_vec_4c(i32 4, i32 {{%.*}}) |
75 | return varargs_vec_4c(4, *in); |
76 | } |
77 | |
78 | double varargs_vec_5c(int fixed, ...) { |
79 | // CHECK: varargs_vec_5c |
80 | // CHECK: alloca <5 x i8>, align 8 |
81 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 |
82 | // CHECK: bitcast i8* [[AP_CUR]] to <5 x i8>* |
83 | va_list ap; |
84 | double sum = fixed; |
85 | va_start(ap, fixed); |
86 | __char5 c5 = va_arg(ap, __char5); |
87 | sum = sum + c5.x + c5.y; |
88 | va_end(ap); |
89 | return sum; |
90 | } |
91 | |
92 | double test_5c(__char5 *in) { |
93 | // CHECK: test_5c |
94 | // CHECK: call double (i32, ...) @varargs_vec_5c(i32 5, <2 x i32> {{%.*}}) |
95 | return varargs_vec_5c(5, *in); |
96 | } |
97 | |
98 | double varargs_vec_9c(int fixed, ...) { |
99 | // CHECK: varargs_vec_9c |
100 | // CHECK: alloca <9 x i8>, align 16 |
101 | // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16 |
102 | // CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8* |
103 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16 |
104 | // CHECK: bitcast i8* [[AP_ALIGN]] to <9 x i8>* |
105 | va_list ap; |
106 | double sum = fixed; |
107 | va_start(ap, fixed); |
108 | __char9 c9 = va_arg(ap, __char9); |
109 | sum = sum + c9.x + c9.y; |
110 | va_end(ap); |
111 | return sum; |
112 | } |
113 | |
114 | double test_9c(__char9 *in) { |
115 | // CHECK: test_9c |
116 | // CHECK: call double (i32, ...) @varargs_vec_9c(i32 9, <4 x i32> {{%.*}}) |
117 | return varargs_vec_9c(9, *in); |
118 | } |
119 | |
120 | double varargs_vec_19c(int fixed, ...) { |
121 | // CHECK: varargs_vec_19c |
122 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 |
123 | // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <19 x i8>** |
124 | // CHECK: [[VAR2:%.*]] = load <19 x i8>*, <19 x i8>** [[VAR]] |
125 | va_list ap; |
126 | double sum = fixed; |
127 | va_start(ap, fixed); |
128 | __char19 c19 = va_arg(ap, __char19); |
129 | sum = sum + c19.x + c19.y; |
130 | va_end(ap); |
131 | return sum; |
132 | } |
133 | |
134 | double test_19c(__char19 *in) { |
135 | // CHECK: test_19c |
136 | // CHECK: call double (i32, ...) @varargs_vec_19c(i32 19, <19 x i8>* {{%.*}}) |
137 | return varargs_vec_19c(19, *in); |
138 | } |
139 | |
140 | double varargs_vec_3s(int fixed, ...) { |
141 | // CHECK: varargs_vec_3s |
142 | // CHECK: alloca <3 x i16>, align 8 |
143 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 |
144 | // CHECK: bitcast i8* [[AP_CUR]] to <3 x i16>* |
145 | va_list ap; |
146 | double sum = fixed; |
147 | va_start(ap, fixed); |
148 | __short3 c3 = va_arg(ap, __short3); |
149 | sum = sum + c3.x + c3.y; |
150 | va_end(ap); |
151 | return sum; |
152 | } |
153 | |
154 | double test_3s(__short3 *in) { |
155 | // CHECK: test_3s |
156 | // CHECK: call double (i32, ...) @varargs_vec_3s(i32 3, <2 x i32> {{%.*}}) |
157 | return varargs_vec_3s(3, *in); |
158 | } |
159 | |
160 | double varargs_vec_5s(int fixed, ...) { |
161 | // CHECK: varargs_vec_5s |
162 | // CHECK: alloca <5 x i16>, align 16 |
163 | // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16 |
164 | // CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8* |
165 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16 |
166 | // CHECK: bitcast i8* [[AP_ALIGN]] to <5 x i16>* |
167 | va_list ap; |
168 | double sum = fixed; |
169 | va_start(ap, fixed); |
170 | __short5 c5 = va_arg(ap, __short5); |
171 | sum = sum + c5.x + c5.y; |
172 | va_end(ap); |
173 | return sum; |
174 | } |
175 | |
176 | double test_5s(__short5 *in) { |
177 | // CHECK: test_5s |
178 | // CHECK: call double (i32, ...) @varargs_vec_5s(i32 5, <4 x i32> {{%.*}}) |
179 | return varargs_vec_5s(5, *in); |
180 | } |
181 | |
182 | double varargs_vec_3i(int fixed, ...) { |
183 | // CHECK: varargs_vec_3i |
184 | // CHECK: alloca <3 x i32>, align 16 |
185 | // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16 |
186 | // CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8* |
187 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16 |
188 | // CHECK: bitcast i8* [[AP_ALIGN]] to <3 x i32>* |
189 | va_list ap; |
190 | double sum = fixed; |
191 | va_start(ap, fixed); |
192 | __int3 c3 = va_arg(ap, __int3); |
193 | sum = sum + c3.x + c3.y; |
194 | va_end(ap); |
195 | return sum; |
196 | } |
197 | |
198 | double test_3i(__int3 *in) { |
199 | // CHECK: test_3i |
200 | // CHECK: call double (i32, ...) @varargs_vec_3i(i32 3, <4 x i32> {{%.*}}) |
201 | return varargs_vec_3i(3, *in); |
202 | } |
203 | |
204 | double varargs_vec_5i(int fixed, ...) { |
205 | // CHECK: varargs_vec_5i |
206 | // CHECK: alloca <5 x i32>, align 16 |
207 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 |
208 | // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <5 x i32>** |
209 | // CHECK: [[VAR2:%.*]] = load <5 x i32>*, <5 x i32>** [[VAR]] |
210 | va_list ap; |
211 | double sum = fixed; |
212 | va_start(ap, fixed); |
213 | __int5 c5 = va_arg(ap, __int5); |
214 | sum = sum + c5.x + c5.y; |
215 | va_end(ap); |
216 | return sum; |
217 | } |
218 | |
219 | double test_5i(__int5 *in) { |
220 | // CHECK: test_5i |
221 | // CHECK: call double (i32, ...) @varargs_vec_5i(i32 5, <5 x i32>* {{%.*}}) |
222 | return varargs_vec_5i(5, *in); |
223 | } |
224 | |
225 | double varargs_vec_3d(int fixed, ...) { |
226 | // CHECK: varargs_vec_3d |
227 | // CHECK: alloca <3 x double>, align 16 |
228 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 |
229 | // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <3 x double>** |
230 | // CHECK: [[VAR2:%.*]] = load <3 x double>*, <3 x double>** [[VAR]] |
231 | va_list ap; |
232 | double sum = fixed; |
233 | va_start(ap, fixed); |
234 | __double3 c3 = va_arg(ap, __double3); |
235 | sum = sum + c3.x + c3.y; |
236 | va_end(ap); |
237 | return sum; |
238 | } |
239 | |
240 | double test_3d(__double3 *in) { |
241 | // CHECK: test_3d |
242 | // CHECK: call double (i32, ...) @varargs_vec_3d(i32 3, <3 x double>* {{%.*}}) |
243 | return varargs_vec_3d(3, *in); |
244 | } |
245 | |
246 | double varargs_vec(int fixed, ...) { |
247 | // CHECK: varargs_vec |
248 | va_list ap; |
249 | double sum = fixed; |
250 | va_start(ap, fixed); |
251 | __char3 c3 = va_arg(ap, __char3); |
252 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 |
253 | // CHECK: bitcast i8* [[AP_CUR]] to <3 x i8>* |
254 | sum = sum + c3.x + c3.y; |
255 | __char5 c5 = va_arg(ap, __char5); |
256 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 |
257 | // CHECK: bitcast i8* [[AP_CUR]] to <5 x i8>* |
258 | sum = sum + c5.x + c5.y; |
259 | __char9 c9 = va_arg(ap, __char9); |
260 | // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16 |
261 | // CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8* |
262 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16 |
263 | // CHECK: bitcast i8* [[AP_ALIGN]] to <9 x i8>* |
264 | sum = sum + c9.x + c9.y; |
265 | __char19 c19 = va_arg(ap, __char19); |
266 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 |
267 | // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <19 x i8>** |
268 | // CHECK: [[VAR2:%.*]] = load <19 x i8>*, <19 x i8>** [[VAR]] |
269 | sum = sum + c19.x + c19.y; |
270 | __short3 s3 = va_arg(ap, __short3); |
271 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 |
272 | // CHECK: bitcast i8* [[AP_CUR]] to <3 x i16>* |
273 | sum = sum + s3.x + s3.y; |
274 | __short5 s5 = va_arg(ap, __short5); |
275 | // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16 |
276 | // CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8* |
277 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16 |
278 | // CHECK: bitcast i8* [[AP_ALIGN]] to <5 x i16>* |
279 | sum = sum + s5.x + s5.y; |
280 | __int3 i3 = va_arg(ap, __int3); |
281 | // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16 |
282 | // CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8* |
283 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16 |
284 | // CHECK: bitcast i8* [[AP_ALIGN]] to <3 x i32>* |
285 | sum = sum + i3.x + i3.y; |
286 | __int5 i5 = va_arg(ap, __int5); |
287 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 |
288 | // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <5 x i32>** |
289 | // CHECK: [[VAR2:%.*]] = load <5 x i32>*, <5 x i32>** [[VAR]] |
290 | sum = sum + i5.x + i5.y; |
291 | __double3 d3 = va_arg(ap, __double3); |
292 | // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 |
293 | // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <3 x double>** |
294 | // CHECK: [[VAR2:%.*]] = load <3 x double>*, <3 x double>** [[VAR]] |
295 | sum = sum + d3.x + d3.y; |
296 | va_end(ap); |
297 | return sum; |
298 | } |
299 | |
300 | double test(__char3 *c3, __char5 *c5, __char9 *c9, __char19 *c19, |
301 | __short3 *s3, __short5 *s5, __int3 *i3, __int5 *i5, |
302 | __double3 *d3) { |
303 | double ret = varargs_vec(3, *c3, *c5, *c9, *c19, *s3, *s5, *i3, *i5, *d3); |
304 | // CHECK: call double (i32, ...) @varargs_vec(i32 3, i32 {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> {{%.*}}, <19 x i8>* {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> {{%.*}}, <4 x i32> {{%.*}}, <5 x i32>* {{%.*}}, <3 x double>* {{%.*}}) |
305 | return ret; |
306 | } |
307 | |
308 | __attribute__((noinline)) double args_vec_3c(int fixed, __char3 c3) { |
309 | // CHECK: args_vec_3c |
310 | // CHECK: [[C3:%.*]] = alloca <3 x i8>, align 4 |
311 | // CHECK: [[TMP:%.*]] = bitcast <3 x i8>* [[C3]] to i32* |
312 | // CHECK: store i32 {{%.*}}, i32* [[TMP]] |
313 | double sum = fixed; |
314 | sum = sum + c3.x + c3.y; |
315 | return sum; |
316 | } |
317 | |
318 | double fixed_3c(__char3 *in) { |
319 | // CHECK: fixed_3c |
320 | // CHECK: call double @args_vec_3c(i32 3, i32 {{%.*}}) |
321 | return args_vec_3c(3, *in); |
322 | } |
323 | |
324 | __attribute__((noinline)) double args_vec_5c(int fixed, __char5 c5) { |
325 | // CHECK: args_vec_5c |
326 | // CHECK: [[C5:%.*]] = alloca <5 x i8>, align 8 |
327 | // CHECK: [[TMP:%.*]] = bitcast <5 x i8>* [[C5]] to <2 x i32>* |
328 | // CHECK: store <2 x i32> {{%.*}}, <2 x i32>* [[TMP]], align 8 |
329 | double sum = fixed; |
330 | sum = sum + c5.x + c5.y; |
331 | return sum; |
332 | } |
333 | |
334 | double fixed_5c(__char5 *in) { |
335 | // CHECK: fixed_5c |
336 | // CHECK: call double @args_vec_5c(i32 5, <2 x i32> {{%.*}}) |
337 | return args_vec_5c(5, *in); |
338 | } |
339 | |
340 | __attribute__((noinline)) double args_vec_9c(int fixed, __char9 c9) { |
341 | // CHECK: args_vec_9c |
342 | // CHECK: [[C9:%.*]] = alloca <9 x i8>, align 16 |
343 | // CHECK: [[TMP:%.*]] = bitcast <9 x i8>* [[C9]] to <4 x i32>* |
344 | // CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 16 |
345 | double sum = fixed; |
346 | sum = sum + c9.x + c9.y; |
347 | return sum; |
348 | } |
349 | |
350 | double fixed_9c(__char9 *in) { |
351 | // CHECK: fixed_9c |
352 | // CHECK: call double @args_vec_9c(i32 9, <4 x i32> {{%.*}}) |
353 | return args_vec_9c(9, *in); |
354 | } |
355 | |
356 | __attribute__((noinline)) double args_vec_19c(int fixed, __char19 c19) { |
357 | // CHECK: args_vec_19c |
358 | // CHECK: [[C19:%.*]] = load <19 x i8>, <19 x i8>* {{.*}}, align 16 |
359 | double sum = fixed; |
360 | sum = sum + c19.x + c19.y; |
361 | return sum; |
362 | } |
363 | |
364 | double fixed_19c(__char19 *in) { |
365 | // CHECK: fixed_19c |
366 | // CHECK: call double @args_vec_19c(i32 19, <19 x i8>* {{%.*}}) |
367 | return args_vec_19c(19, *in); |
368 | } |
369 | |
370 | __attribute__((noinline)) double args_vec_3s(int fixed, __short3 c3) { |
371 | // CHECK: args_vec_3s |
372 | // CHECK: [[C3:%.*]] = alloca <3 x i16>, align 8 |
373 | // CHECK: [[TMP:%.*]] = bitcast <3 x i16>* [[C3]] to <2 x i32>* |
374 | // CHECK: store <2 x i32> {{%.*}}, <2 x i32>* [[TMP]], align 8 |
375 | double sum = fixed; |
376 | sum = sum + c3.x + c3.y; |
377 | return sum; |
378 | } |
379 | |
380 | double fixed_3s(__short3 *in) { |
381 | // CHECK: fixed_3s |
382 | // CHECK: call double @args_vec_3s(i32 3, <2 x i32> {{%.*}}) |
383 | return args_vec_3s(3, *in); |
384 | } |
385 | |
386 | __attribute__((noinline)) double args_vec_5s(int fixed, __short5 c5) { |
387 | // CHECK: args_vec_5s |
388 | // CHECK: [[C5:%.*]] = alloca <5 x i16>, align 16 |
389 | // CHECK: [[TMP:%.*]] = bitcast <5 x i16>* [[C5]] to <4 x i32>* |
390 | // CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 16 |
391 | double sum = fixed; |
392 | sum = sum + c5.x + c5.y; |
393 | return sum; |
394 | } |
395 | |
396 | double fixed_5s(__short5 *in) { |
397 | // CHECK: fixed_5s |
398 | // CHECK: call double @args_vec_5s(i32 5, <4 x i32> {{%.*}}) |
399 | return args_vec_5s(5, *in); |
400 | } |
401 | |
402 | __attribute__((noinline)) double args_vec_3i(int fixed, __int3 c3) { |
403 | // CHECK: args_vec_3i |
404 | // CHECK: [[C3:%.*]] = alloca <3 x i32>, align 16 |
405 | // CHECK: [[TMP:%.*]] = bitcast <3 x i32>* [[C3]] to <4 x i32>* |
406 | // CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 16 |
407 | double sum = fixed; |
408 | sum = sum + c3.x + c3.y; |
409 | return sum; |
410 | } |
411 | |
412 | double fixed_3i(__int3 *in) { |
413 | // CHECK: fixed_3i |
414 | // CHECK: call double @args_vec_3i(i32 3, <4 x i32> {{%.*}}) |
415 | return args_vec_3i(3, *in); |
416 | } |
417 | |
418 | __attribute__((noinline)) double args_vec_5i(int fixed, __int5 c5) { |
419 | // CHECK: args_vec_5i |
420 | // CHECK: [[C5:%.*]] = load <5 x i32>, <5 x i32>* {{%.*}}, align 16 |
421 | double sum = fixed; |
422 | sum = sum + c5.x + c5.y; |
423 | return sum; |
424 | } |
425 | |
426 | double fixed_5i(__int5 *in) { |
427 | // CHECK: fixed_5i |
428 | // CHECK: call double @args_vec_5i(i32 5, <5 x i32>* {{%.*}}) |
429 | return args_vec_5i(5, *in); |
430 | } |
431 | |
432 | __attribute__((noinline)) double args_vec_3d(int fixed, __double3 c3) { |
433 | // CHECK: args_vec_3d |
434 | // CHECK: [[CAST:%.*]] = bitcast <3 x double>* {{%.*}} to <4 x double>* |
435 | // CHECK: [[LOAD:%.*]] = load <4 x double>, <4 x double>* [[CAST]] |
436 | // CHECK: shufflevector <4 x double> [[LOAD]], <4 x double> undef, <3 x i32> <i32 0, i32 1, i32 2> |
437 | double sum = fixed; |
438 | sum = sum + c3.x + c3.y; |
439 | return sum; |
440 | } |
441 | |
442 | double fixed_3d(__double3 *in) { |
443 | // CHECK: fixed_3d |
444 | // CHECK: call double @args_vec_3d(i32 3, <3 x double>* {{%.*}}) |
445 | return args_vec_3d(3, *in); |
446 | } |
447 | |