1 | // REQUIRES: powerpc-registered-target |
2 | // RUN: %clang_cc1 -target-feature +altivec -target-feature +power8-vector -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s |
3 | // RUN: %clang_cc1 -target-feature +altivec -target-feature +power8-vector -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-LE |
4 | // RUN: not %clang_cc1 -target-feature +altivec -target-feature +vsx -triple powerpc64-unknown-unknown -emit-llvm %s -o - 2>&1 | FileCheck %s -check-prefix=CHECK-PPC |
5 | // Added -target-feature +vsx above to avoid errors about "vector double" and to |
6 | // generate the correct errors for functions that are only overloaded with VSX |
7 | // (vec_cmpge, vec_cmple). Without this option, there is only one overload so |
8 | // it is selected. |
9 | #include <altivec.h> |
10 | |
11 | void dummy() { } |
12 | signed int si; |
13 | signed long long sll; |
14 | unsigned long long ull; |
15 | signed __int128 sx; |
16 | unsigned __int128 ux; |
17 | double d; |
18 | vector signed char vsc = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 }; |
19 | vector unsigned char vuc = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 }; |
20 | vector bool char vbc = { 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1 }; |
21 | |
22 | vector signed short vss = { 0, 1, 2, 3, 4, 5, 6, 7 }; |
23 | vector unsigned short vus = { 0, 1, 2, 3, 4, 5, 6, 7 }; |
24 | vector bool short vbs = { 1, 1, 0, 0, 0, 0, 1, 1 }; |
25 | |
26 | vector signed int vsi = { -1, 2, -3, 4 }; |
27 | vector unsigned int vui = { 1, 2, 3, 4 }; |
28 | vector bool int vbi = {0, -1, -1, 0}; |
29 | |
30 | vector signed long long vsll = { 1, 2 }; |
31 | vector unsigned long long vull = { 1, 2 }; |
32 | vector bool long long vbll = { 1, 0 }; |
33 | |
34 | vector signed __int128 vsx = { 1 }; |
35 | vector unsigned __int128 vux = { 1 }; |
36 | |
37 | vector float vfa = { 1.e-4f, -132.23f, -22.1, 32.00f }; |
38 | vector double vda = { 1.e-11, -132.23e10 }; |
39 | |
40 | int res_i; |
41 | double res_d; |
42 | signed long long res_sll; |
43 | unsigned long long res_ull; |
44 | |
45 | vector signed char res_vsc; |
46 | vector unsigned char res_vuc; |
47 | vector bool char res_vbc; |
48 | |
49 | vector signed short res_vss; |
50 | vector unsigned short res_vus; |
51 | vector bool short res_vbs; |
52 | |
53 | vector signed int res_vsi; |
54 | vector unsigned int res_vui; |
55 | vector bool int res_vbi; |
56 | |
57 | vector signed long long res_vsll; |
58 | vector unsigned long long res_vull; |
59 | vector bool long long res_vbll; |
60 | |
61 | vector signed __int128 res_vsx; |
62 | vector unsigned __int128 res_vux; |
63 | |
64 | vector float res_vf; |
65 | vector double res_vd; |
66 | |
67 | // CHECK-LABEL: define void @test1 |
68 | void test1() { |
69 | |
70 | /* vec_abs */ |
71 | res_vsll = vec_abs(vsll); |
72 | // CHECK: call <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64> %{{[0-9]*}}, <2 x i64> |
73 | // CHECK-LE: call <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64> %{{[0-9]*}}, <2 x i64> |
74 | // CHECK-PPC: error: call to 'vec_abs' is ambiguous |
75 | |
76 | /* vec_add */ |
77 | res_vsll = vec_add(vsll, vsll); |
78 | // CHECK: add <2 x i64> |
79 | // CHECK-LE: add <2 x i64> |
80 | // CHECK-PPC: error: call to 'vec_add' is ambiguous |
81 | |
82 | res_vull = vec_add(vull, vull); |
83 | // CHECK: add <2 x i64> |
84 | // CHECK-LE: add <2 x i64> |
85 | // CHECK-PPC: error: call to 'vec_add' is ambiguous |
86 | |
87 | /* vec_addc */ |
88 | res_vsi = vec_addc(vsi, vsi); |
89 | // CHECK: @llvm.ppc.altivec.vaddcuw |
90 | // CHECK-LE: @llvm.ppc.altivec.vaddcuw |
91 | |
92 | res_vui = vec_addc(vui, vui); |
93 | // CHECK: @llvm.ppc.altivec.vaddcuw |
94 | // CHECK-LE: @llvm.ppc.altivec.vaddcuw |
95 | |
96 | res_vsx = vec_addc(vsx, vsx); |
97 | // CHECK: @llvm.ppc.altivec.vaddcuq |
98 | // CHECK-LE: @llvm.ppc.altivec.vaddcuq |
99 | |
100 | res_vux = vec_addc(vux, vux); |
101 | // CHECK: @llvm.ppc.altivec.vaddcuq |
102 | // CHECK-LE: @llvm.ppc.altivec.vaddcuq |
103 | |
104 | /* vec_adde */ |
105 | res_vsx = vec_adde(vsx, vsx, vsx); |
106 | // CHECK: @llvm.ppc.altivec.vaddeuqm |
107 | // CHECK-LE: @llvm.ppc.altivec.vaddeuqm |
108 | |
109 | res_vux = vec_adde(vux, vux, vux); |
110 | // CHECK: @llvm.ppc.altivec.vaddeuqm |
111 | // CHECK-LE: @llvm.ppc.altivec.vaddeuqm |
112 | |
113 | /* vec_addec */ |
114 | res_vsx = vec_addec(vsx, vsx, vsx); |
115 | // CHECK: @llvm.ppc.altivec.vaddecuq |
116 | // CHECK-LE: @llvm.ppc.altivec.vaddecuq |
117 | |
118 | /* vec_mergee */ |
119 | res_vbi = vec_mergee(vbi, vbi); |
120 | // CHECK: @llvm.ppc.altivec.vperm |
121 | // CHECK-LE: @llvm.ppc.altivec.vperm |
122 | |
123 | res_vsi = vec_mergee(vsi, vsi); |
124 | // CHECK: @llvm.ppc.altivec.vperm |
125 | // CHECK-LE: @llvm.ppc.altivec.vperm |
126 | |
127 | res_vui = vec_mergee(vui, vui); |
128 | // CHECK: @llvm.ppc.altivec.vperm |
129 | // CHECK-LE: @llvm.ppc.altivec.vperm |
130 | // CHECK-PPC: warning: implicit declaration of function 'vec_mergee' |
131 | |
132 | res_vbll = vec_mergee(vbll, vbll); |
133 | // CHECK: @llvm.ppc.altivec.vperm |
134 | // CHECK-LE: @llvm.ppc.altivec.vperm |
135 | |
136 | res_vsll = vec_mergee(vsll, vsll); |
137 | // CHECK: @llvm.ppc.altivec.vperm |
138 | // CHECK-LE: @llvm.ppc.altivec.vperm |
139 | |
140 | res_vull = vec_mergee(vull, vull); |
141 | // CHECK: @llvm.ppc.altivec.vperm |
142 | // CHECK-LE: @llvm.ppc.altivec.vperm |
143 | |
144 | res_vf = vec_mergee(vfa, vfa); |
145 | // CHECK: @llvm.ppc.altivec.vperm |
146 | // CHECK-LE: @llvm.ppc.altivec.vperm |
147 | |
148 | res_vd = vec_mergee(vda, vda); |
149 | // CHECK: @llvm.ppc.altivec.vperm |
150 | // CHECK-LE: @llvm.ppc.altivec.vperm |
151 | |
152 | /* vec_mergeo */ |
153 | res_vbi = vec_mergeo(vbi, vbi); |
154 | // CHECK: @llvm.ppc.altivec.vperm |
155 | // CHECK-LE: @llvm.ppc.altivec.vperm |
156 | |
157 | res_vsi = vec_mergeo(vsi, vsi); |
158 | // CHECK: @llvm.ppc.altivec.vperm |
159 | // CHECK-LE: @llvm.ppc.altivec.vperm |
160 | |
161 | res_vui = vec_mergeo(vui, vui); |
162 | // CHECK: @llvm.ppc.altivec.vperm |
163 | // CHECK-LE: @llvm.ppc.altivec.vperm |
164 | // CHECK-PPC: warning: implicit declaration of function 'vec_mergeo' |
165 | |
166 | /* vec_cmpeq */ |
167 | res_vbll = vec_cmpeq(vbll, vbll); |
168 | // CHECK: @llvm.ppc.altivec.vcmpequd |
169 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd |
170 | // CHECK-PPC: error: call to 'vec_cmpeq' is ambiguous |
171 | |
172 | res_vbll = vec_cmpeq(vsll, vsll); |
173 | // CHECK: @llvm.ppc.altivec.vcmpequd |
174 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd |
175 | // CHECK-PPC: error: call to 'vec_cmpeq' is ambiguous |
176 | |
177 | res_vbll = vec_cmpeq(vull, vull); |
178 | // CHECK: @llvm.ppc.altivec.vcmpequd |
179 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd |
180 | // CHECK-PPC: error: call to 'vec_cmpeq' is ambiguous |
181 | |
182 | /* vec_cmpge */ |
183 | res_vbll = vec_cmpge(vsll, vsll); |
184 | // CHECK: @llvm.ppc.altivec.vcmpgtsd |
185 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd |
186 | // CHECK-PPC: error: call to 'vec_cmpge' is ambiguous |
187 | |
188 | res_vbll = vec_cmpge(vull, vull); |
189 | // CHECK: @llvm.ppc.altivec.vcmpgtud |
190 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud |
191 | // CHECK-PPC: error: call to 'vec_cmpge' is ambiguous |
192 | |
193 | /* vec_cmple */ |
194 | res_vbll = vec_cmple(vsll, vsll); |
195 | // CHECK: @llvm.ppc.altivec.vcmpgtsd |
196 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd |
197 | // CHECK-PPC: error: call to 'vec_cmple' is ambiguous |
198 | |
199 | res_vbll = vec_cmple(vull, vull); |
200 | // CHECK: @llvm.ppc.altivec.vcmpgtud |
201 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud |
202 | // CHECK-PPC: error: call to 'vec_cmple' is ambiguous |
203 | |
204 | /* vec_cmpgt */ |
205 | res_vbll = vec_cmpgt(vsll, vsll); |
206 | // CHECK: @llvm.ppc.altivec.vcmpgtsd |
207 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd |
208 | // CHECK-PPC: error: call to 'vec_cmpgt' is ambiguous |
209 | |
210 | res_vbll = vec_cmpgt(vull, vull); |
211 | // CHECK: @llvm.ppc.altivec.vcmpgtud |
212 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud |
213 | // CHECK-PPC: error: call to 'vec_cmpgt' is ambiguous |
214 | |
215 | /* vec_cmplt */ |
216 | res_vbll = vec_cmplt(vsll, vsll); |
217 | // CHECK: call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}}) |
218 | // CHECK-LE: call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}}) |
219 | // CHECK-PPC: error: call to 'vec_cmplt' is ambiguous |
220 | |
221 | res_vbll = vec_cmplt(vull, vull); |
222 | // CHECK: call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}}) |
223 | // CHECK-LE: call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}}) |
224 | // CHECK-PPC: error: call to 'vec_cmplt' is ambiguous |
225 | |
226 | /* vec_eqv */ |
227 | res_vsc = vec_eqv(vsc, vsc); |
228 | // CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> |
229 | // CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> |
230 | // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
231 | // CHECK: bitcast <4 x i32> [[T3]] to <16 x i8> |
232 | // CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> |
233 | // CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> |
234 | // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
235 | // CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8> |
236 | // CHECK-PPC: error: assigning to |
237 | |
238 | res_vsc = vec_eqv(vbc, vbc); |
239 | // CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> |
240 | // CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> |
241 | // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
242 | // CHECK: bitcast <4 x i32> [[T3]] to <16 x i8> |
243 | // CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> |
244 | // CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> |
245 | // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
246 | // CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8> |
247 | // CHECK-PPC: error: assigning to |
248 | |
249 | res_vuc = vec_eqv(vuc, vuc); |
250 | // CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> |
251 | // CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> |
252 | // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
253 | // CHECK: bitcast <4 x i32> [[T3]] to <16 x i8> |
254 | // CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> |
255 | // CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> |
256 | // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
257 | // CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8> |
258 | // CHECK-PPC: error: assigning to |
259 | |
260 | res_vss = vec_eqv(vss, vss); |
261 | // CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> |
262 | // CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> |
263 | // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
264 | // CHECK: bitcast <4 x i32> [[T3]] to <8 x i16> |
265 | // CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> |
266 | // CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> |
267 | // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
268 | // CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16> |
269 | // CHECK-PPC: error: assigning to |
270 | |
271 | res_vss = vec_eqv(vbs, vbs); |
272 | // CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> |
273 | // CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> |
274 | // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
275 | // CHECK: bitcast <4 x i32> [[T3]] to <8 x i16> |
276 | // CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> |
277 | // CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> |
278 | // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
279 | // CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16> |
280 | // CHECK-PPC: error: assigning to |
281 | |
282 | res_vus = vec_eqv(vus, vus); |
283 | // CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> |
284 | // CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> |
285 | // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
286 | // CHECK: bitcast <4 x i32> [[T3]] to <8 x i16> |
287 | // CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> |
288 | // CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> |
289 | // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
290 | // CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16> |
291 | // CHECK-PPC: error: assigning to |
292 | |
293 | res_vsi = vec_eqv(vsi, vsi); |
294 | // CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) |
295 | // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) |
296 | // CHECK-PPC: error: assigning to |
297 | |
298 | res_vsi = vec_eqv(vbi, vbi); |
299 | // CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) |
300 | // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) |
301 | // CHECK-PPC: error: assigning to |
302 | |
303 | res_vui = vec_eqv(vui, vui); |
304 | // CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) |
305 | // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) |
306 | // CHECK-PPC: error: assigning to |
307 | |
308 | res_vsll = vec_eqv(vsll, vsll); |
309 | // CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> |
310 | // CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> |
311 | // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
312 | // CHECK: bitcast <4 x i32> [[T3]] to <2 x i64> |
313 | // CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> |
314 | // CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> |
315 | // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
316 | // CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64> |
317 | // CHECK-PPC: error: assigning to |
318 | |
319 | res_vsll = vec_eqv(vbll, vbll); |
320 | // CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> |
321 | // CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> |
322 | // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
323 | // CHECK: bitcast <4 x i32> [[T3]] to <2 x i64> |
324 | // CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> |
325 | // CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> |
326 | // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
327 | // CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64> |
328 | // CHECK-PPC: error: assigning to |
329 | |
330 | res_vull = vec_eqv(vull, vull); |
331 | // CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> |
332 | // CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> |
333 | // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
334 | // CHECK: bitcast <4 x i32> [[T3]] to <2 x i64> |
335 | // CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> |
336 | // CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> |
337 | // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
338 | // CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64> |
339 | // CHECK-PPC: error: assigning to |
340 | |
341 | res_vf = vec_eqv(vfa, vfa); |
342 | // CHECK: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32> |
343 | // CHECK: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32> |
344 | // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
345 | // CHECK: bitcast <4 x i32> [[T3]] to <4 x float> |
346 | // CHECK-LE: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32> |
347 | // CHECK-LE: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32> |
348 | // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
349 | // CHECK-LE: bitcast <4 x i32> [[T3]] to <4 x float> |
350 | // CHECK-PPC: error: assigning to |
351 | |
352 | res_vd = vec_eqv(vda, vda); |
353 | // CHECK: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32> |
354 | // CHECK: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32> |
355 | // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
356 | // CHECK: bitcast <4 x i32> [[T3]] to <2 x double> |
357 | // CHECK-LE: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32> |
358 | // CHECK-LE: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32> |
359 | // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) |
360 | // CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x double> |
361 | // CHECK-PPC: error: assigning to |
362 | |
363 | /* vec_extract */ |
364 | res_sll = vec_extract(vsll, si); |
365 | // CHECK: extractelement <2 x i64> |
366 | // CHECK-LE: extractelement <2 x i64> |
367 | |
368 | res_ull = vec_extract(vull, si); |
369 | // CHECK: extractelement <2 x i64> |
370 | // CHECK-LE: extractelement <2 x i64> |
371 | |
372 | res_ull = vec_extract(vbll, si); |
373 | // CHECK: extractelement <2 x i64> |
374 | // CHECK-LE: extractelement <2 x i64> |
375 | |
376 | res_d = vec_extract(vda, si); |
377 | // CHECK: extractelement <2 x double> |
378 | // CHECK-LE: extractelement <2 x double> |
379 | |
380 | /* vec_insert */ |
381 | res_vsll = vec_insert(sll, vsll, si); |
382 | // CHECK: insertelement <2 x i64> |
383 | // CHECK-LE: insertelement <2 x i64> |
384 | |
385 | res_vbll = vec_insert(ull, vbll, si); |
386 | // CHECK: insertelement <2 x i64> |
387 | // CHECK-LE: insertelement <2 x i64> |
388 | |
389 | res_vull = vec_insert(ull, vull, si); |
390 | // CHECK: insertelement <2 x i64> |
391 | // CHECK-LE: insertelement <2 x i64> |
392 | |
393 | res_vd = vec_insert(d, vda, si); |
394 | // CHECK: insertelement <2 x double> |
395 | // CHECK-LE: insertelement <2 x double> |
396 | |
397 | /* vec_cntlz */ |
398 | res_vsc = vec_cntlz(vsc); |
399 | // CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false) |
400 | // CHECK-LE: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false) |
401 | // CHECK-PPC: warning: implicit declaration of function 'vec_cntlz' is invalid in C99 |
402 | |
403 | res_vuc = vec_cntlz(vuc); |
404 | // CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false) |
405 | // CHECK-LE: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false) |
406 | |
407 | res_vss = vec_cntlz(vss); |
408 | // CHECK: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.+}}, i1 false) |
409 | // CHECK-LE: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.+}}, i1 false) |
410 | |
411 | res_vus = vec_cntlz(vus); |
412 | // CHECK: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.+}}, i1 false) |
413 | // CHECK-LE: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.+}}, i1 false) |
414 | |
415 | res_vsi = vec_cntlz(vsi); |
416 | // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.+}}, i1 false) |
417 | // CHECK-LE: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.+}}, i1 false) |
418 | |
419 | res_vui = vec_cntlz(vui); |
420 | // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.+}}, i1 false) |
421 | // CHECK-LE: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.+}}, i1 false) |
422 | |
423 | res_vsll = vec_cntlz(vsll); |
424 | // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.+}}, i1 false) |
425 | // CHECK-LE: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.+}}, i1 false) |
426 | |
427 | res_vull = vec_cntlz(vull); |
428 | // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.+}}, i1 false) |
429 | // CHECK-LE: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.+}}, i1 false) |
430 | |
431 | /* ----------------------- predicates --------------------------- */ |
432 | /* vec_all_eq */ |
433 | res_i = vec_all_eq(vsll, vsll); |
434 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
435 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
436 | // CHECK-PPC: error: call to 'vec_all_eq' is ambiguous |
437 | |
438 | res_i = vec_all_eq(vsll, vbll); |
439 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
440 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
441 | // CHECK-PPC: error: call to 'vec_all_eq' is ambiguous |
442 | |
443 | res_i = vec_all_eq(vull, vull); |
444 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
445 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
446 | // CHECK-PPC: error: call to 'vec_all_eq' is ambiguous |
447 | |
448 | res_i = vec_all_eq(vull, vbll); |
449 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
450 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
451 | // CHECK-PPC: error: call to 'vec_all_eq' is ambiguous |
452 | |
453 | res_i = vec_all_eq(vbll, vsll); |
454 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
455 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
456 | // CHECK-PPC: error: call to 'vec_all_eq' is ambiguous |
457 | |
458 | res_i = vec_all_eq(vbll, vull); |
459 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
460 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
461 | // CHECK-PPC: error: call to 'vec_all_eq' is ambiguous |
462 | |
463 | res_i = vec_all_eq(vbll, vbll); |
464 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
465 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
466 | // CHECK-PPC: error: call to 'vec_all_eq' is ambiguous |
467 | |
468 | res_i = vec_all_eq(vda, vda); |
469 | // CHECK: @llvm.ppc.vsx.xvcmpeqdp.p |
470 | // CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p |
471 | |
472 | /* vec_all_ne */ |
473 | res_i = vec_all_ne(vsll, vsll); |
474 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
475 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
476 | // CHECK-PPC: error: call to 'vec_all_ne' is ambiguous |
477 | |
478 | res_i = vec_all_ne(vsll, vbll); |
479 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
480 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
481 | // CHECK-PPC: error: call to 'vec_all_ne' is ambiguous |
482 | |
483 | res_i = vec_all_ne(vull, vull); |
484 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
485 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
486 | // CHECK-PPC: error: call to 'vec_all_ne' is ambiguous |
487 | |
488 | res_i = vec_all_ne(vull, vbll); |
489 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
490 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
491 | // CHECK-PPC: error: call to 'vec_all_ne' is ambiguous |
492 | |
493 | res_i = vec_all_ne(vbll, vsll); |
494 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
495 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
496 | // CHECK-PPC: error: call to 'vec_all_ne' is ambiguous |
497 | |
498 | res_i = vec_all_ne(vbll, vull); |
499 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
500 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
501 | // CHECK-PPC: error: call to 'vec_all_ne' is ambiguous |
502 | |
503 | res_i = vec_all_ne(vbll, vbll); |
504 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
505 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
506 | // CHECK-PPC: error: call to 'vec_all_ne' is ambiguous |
507 | |
508 | dummy(); |
509 | // CHECK: @dummy |
510 | |
511 | res_i = vec_all_ne(vda, vda); |
512 | // CHECK: @llvm.ppc.vsx.xvcmpeqdp.p |
513 | // CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p |
514 | |
515 | dummy(); |
516 | // CHECK: @dummy |
517 | |
518 | res_i = vec_all_nge(vda, vda); |
519 | // CHECK: @llvm.ppc.vsx.xvcmpgedp.p |
520 | // CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p |
521 | |
522 | res_i = vec_all_ngt(vda, vda); |
523 | // CHECK: @llvm.ppc.vsx.xvcmpgtdp.p |
524 | // CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p |
525 | |
526 | /* vec_any_eq */ |
527 | res_i = vec_any_eq(vsll, vsll); |
528 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
529 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
530 | // CHECK-PPC: error: call to 'vec_any_eq' is ambiguous |
531 | |
532 | res_i = vec_any_eq(vsll, vbll); |
533 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
534 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
535 | // CHECK-PPC: error: call to 'vec_any_eq' is ambiguous |
536 | |
537 | res_i = vec_any_eq(vull, vull); |
538 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
539 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
540 | // CHECK-PPC: error: call to 'vec_any_eq' is ambiguous |
541 | |
542 | res_i = vec_any_eq(vull, vbll); |
543 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
544 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
545 | // CHECK-PPC: error: call to 'vec_any_eq' is ambiguous |
546 | |
547 | res_i = vec_any_eq(vbll, vsll); |
548 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
549 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
550 | // CHECK-PPC: error: call to 'vec_any_eq' is ambiguous |
551 | |
552 | res_i = vec_any_eq(vbll, vull); |
553 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
554 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
555 | // CHECK-PPC: error: call to 'vec_any_eq' is ambiguous |
556 | |
557 | res_i = vec_any_eq(vbll, vbll); |
558 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
559 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
560 | // CHECK-PPC: error: call to 'vec_any_eq' is ambiguous |
561 | |
562 | res_i = vec_any_eq(vda, vda); |
563 | // CHECK: @llvm.ppc.vsx.xvcmpeqdp.p |
564 | // CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p |
565 | |
566 | /* vec_any_ne */ |
567 | res_i = vec_any_ne(vsll, vsll); |
568 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
569 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
570 | // CHECK-PPC: error: call to 'vec_any_ne' is ambiguous |
571 | |
572 | res_i = vec_any_ne(vsll, vbll); |
573 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
574 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
575 | // CHECK-PPC: error: call to 'vec_any_ne' is ambiguous |
576 | |
577 | res_i = vec_any_ne(vull, vull); |
578 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
579 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
580 | // CHECK-PPC: error: call to 'vec_any_ne' is ambiguous |
581 | |
582 | res_i = vec_any_ne(vull, vbll); |
583 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
584 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
585 | // CHECK-PPC: error: call to 'vec_any_ne' is ambiguous |
586 | |
587 | res_i = vec_any_ne(vbll, vsll); |
588 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
589 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
590 | // CHECK-PPC: error: call to 'vec_any_ne' is ambiguous |
591 | |
592 | res_i = vec_any_ne(vbll, vull); |
593 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
594 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
595 | // CHECK-PPC: error: call to 'vec_any_ne' is ambiguous |
596 | |
597 | res_i = vec_any_ne(vbll, vbll); |
598 | // CHECK: @llvm.ppc.altivec.vcmpequd.p |
599 | // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p |
600 | // CHECK-PPC: error: call to 'vec_any_ne' is ambiguous |
601 | |
602 | res_i = vec_any_ne(vda, vda); |
603 | // CHECK: @llvm.ppc.vsx.xvcmpeqdp.p |
604 | // CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p |
605 | |
606 | /* vec_all_ge */ |
607 | res_i = vec_all_ge(vsll, vsll); |
608 | // CHECK: @llvm.ppc.altivec.vcmpgtsd.p |
609 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p |
610 | // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous |
611 | |
612 | res_i = vec_all_ge(vsll, vbll); |
613 | // CHECK: @llvm.ppc.altivec.vcmpgtsd.p |
614 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p |
615 | // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous |
616 | |
617 | res_i = vec_all_ge(vull, vull); |
618 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
619 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
620 | // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous |
621 | |
622 | res_i = vec_all_ge(vull, vbll); |
623 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
624 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
625 | // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous |
626 | |
627 | res_i = vec_all_ge(vbll, vsll); |
628 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
629 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
630 | // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous |
631 | |
632 | res_i = vec_all_ge(vbll, vull); |
633 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
634 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
635 | // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous |
636 | |
637 | res_i = vec_all_ge(vbll, vbll); |
638 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
639 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
640 | // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous |
641 | |
642 | res_i = vec_all_ge(vda, vda); |
643 | // CHECK: @llvm.ppc.vsx.xvcmpgedp.p |
644 | // CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p |
645 | |
646 | /* vec_all_gt */ |
647 | res_i = vec_all_gt(vsll, vsll); |
648 | // CHECK: @llvm.ppc.altivec.vcmpgtsd.p |
649 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p |
650 | // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous |
651 | |
652 | res_i = vec_all_gt(vsll, vbll); |
653 | // CHECK: @llvm.ppc.altivec.vcmpgtsd.p |
654 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p |
655 | // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous |
656 | |
657 | res_i = vec_all_gt(vull, vull); |
658 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
659 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
660 | // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous |
661 | |
662 | res_i = vec_all_gt(vull, vbll); |
663 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
664 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
665 | // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous |
666 | |
667 | res_i = vec_all_gt(vbll, vsll); |
668 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
669 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
670 | // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous |
671 | |
672 | res_i = vec_all_gt(vbll, vull); |
673 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
674 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
675 | // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous |
676 | |
677 | res_i = vec_all_gt(vbll, vbll); |
678 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
679 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
680 | // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous |
681 | |
682 | res_i = vec_all_gt(vda, vda); |
683 | // CHECK: @llvm.ppc.vsx.xvcmpgtdp.p |
684 | // CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p |
685 | |
686 | /* vec_all_le */ |
687 | res_i = vec_all_le(vsll, vsll); |
688 | // CHECK: @llvm.ppc.altivec.vcmpgtsd.p |
689 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p |
690 | // CHECK-PPC: error: call to 'vec_all_le' is ambiguous |
691 | |
692 | res_i = vec_all_le(vsll, vbll); |
693 | // CHECK: @llvm.ppc.altivec.vcmpgtsd.p |
694 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p |
695 | // CHECK-PPC: error: call to 'vec_all_le' is ambiguous |
696 | |
697 | res_i = vec_all_le(vull, vull); |
698 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
699 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
700 | // CHECK-PPC: error: call to 'vec_all_le' is ambiguous |
701 | |
702 | res_i = vec_all_le(vull, vbll); |
703 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
704 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
705 | // CHECK-PPC: error: call to 'vec_all_le' is ambiguous |
706 | |
707 | res_i = vec_all_le(vbll, vsll); |
708 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
709 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
710 | // CHECK-PPC: error: call to 'vec_all_le' is ambiguous |
711 | |
712 | res_i = vec_all_le(vbll, vull); |
713 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
714 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
715 | // CHECK-PPC: error: call to 'vec_all_le' is ambiguous |
716 | |
717 | res_i = vec_all_le(vbll, vbll); |
718 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
719 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
720 | // CHECK-PPC: error: call to 'vec_all_le' is ambiguous |
721 | |
722 | res_i = vec_all_le(vda, vda); |
723 | // CHECK: @llvm.ppc.vsx.xvcmpgedp.p |
724 | // CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p |
725 | |
726 | /* vec_all_lt */ |
727 | res_i = vec_all_lt(vsll, vsll); |
728 | // CHECK: @llvm.ppc.altivec.vcmpgtsd.p |
729 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p |
730 | // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous |
731 | |
732 | res_i = vec_all_lt(vsll, vbll); |
733 | // CHECK: @llvm.ppc.altivec.vcmpgtsd.p |
734 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p |
735 | // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous |
736 | |
737 | res_i = vec_all_lt(vull, vull); |
738 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
739 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
740 | // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous |
741 | |
742 | res_i = vec_all_lt(vull, vbll); |
743 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
744 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
745 | // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous |
746 | |
747 | res_i = vec_all_lt(vbll, vsll); |
748 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
749 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
750 | // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous |
751 | |
752 | res_i = vec_all_lt(vbll, vull); |
753 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
754 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
755 | // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous |
756 | |
757 | res_i = vec_all_lt(vbll, vbll); |
758 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
759 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
760 | // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous |
761 | |
762 | res_i = vec_all_lt(vda, vda); |
763 | // CHECK: @llvm.ppc.vsx.xvcmpgtdp.p |
764 | // CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p |
765 | |
766 | res_i = vec_all_nan(vda); |
767 | // CHECK: @llvm.ppc.vsx.xvcmpeqdp.p |
768 | // CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p |
769 | |
770 | /* vec_any_ge */ |
771 | res_i = vec_any_ge(vsll, vsll); |
772 | // CHECK: @llvm.ppc.altivec.vcmpgtsd.p |
773 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p |
774 | // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous |
775 | |
776 | res_i = vec_any_ge(vsll, vbll); |
777 | // CHECK: @llvm.ppc.altivec.vcmpgtsd.p |
778 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p |
779 | // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous |
780 | |
781 | res_i = vec_any_ge(vull, vull); |
782 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
783 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
784 | // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous |
785 | |
786 | res_i = vec_any_ge(vull, vbll); |
787 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
788 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
789 | // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous |
790 | |
791 | res_i = vec_any_ge(vbll, vsll); |
792 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
793 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
794 | // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous |
795 | |
796 | res_i = vec_any_ge(vbll, vull); |
797 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
798 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
799 | // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous |
800 | |
801 | res_i = vec_any_ge(vbll, vbll); |
802 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
803 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
804 | // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous |
805 | |
806 | res_i = vec_any_ge(vda, vda); |
807 | // CHECK: @llvm.ppc.vsx.xvcmpgedp.p |
808 | // CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p |
809 | |
810 | /* vec_any_gt */ |
811 | res_i = vec_any_gt(vsll, vsll); |
812 | // CHECK: @llvm.ppc.altivec.vcmpgtsd.p |
813 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p |
814 | // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous |
815 | |
816 | res_i = vec_any_gt(vsll, vbll); |
817 | // CHECK: @llvm.ppc.altivec.vcmpgtsd.p |
818 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p |
819 | // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous |
820 | |
821 | res_i = vec_any_gt(vull, vull); |
822 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
823 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
824 | // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous |
825 | |
826 | res_i = vec_any_gt(vull, vbll); |
827 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
828 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
829 | // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous |
830 | |
831 | res_i = vec_any_gt(vbll, vsll); |
832 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
833 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
834 | // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous |
835 | |
836 | res_i = vec_any_gt(vbll, vull); |
837 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
838 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
839 | // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous |
840 | |
841 | res_i = vec_any_gt(vbll, vbll); |
842 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
843 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
844 | // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous |
845 | |
846 | res_i = vec_any_gt(vda, vda); |
847 | // CHECK: @llvm.ppc.vsx.xvcmpgtdp.p |
848 | // CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p |
849 | |
850 | /* vec_any_le */ |
851 | res_i = vec_any_le(vsll, vsll); |
852 | // CHECK: @llvm.ppc.altivec.vcmpgtsd.p |
853 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p |
854 | // CHECK-PPC: error: call to 'vec_any_le' is ambiguous |
855 | |
856 | res_i = vec_any_le(vsll, vbll); |
857 | // CHECK: @llvm.ppc.altivec.vcmpgtsd.p |
858 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p |
859 | // CHECK-PPC: error: call to 'vec_any_le' is ambiguous |
860 | |
861 | res_i = vec_any_le(vull, vull); |
862 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
863 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
864 | // CHECK-PPC: error: call to 'vec_any_le' is ambiguous |
865 | |
866 | res_i = vec_any_le(vull, vbll); |
867 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
868 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
869 | // CHECK-PPC: error: call to 'vec_any_le' is ambiguous |
870 | |
871 | res_i = vec_any_le(vbll, vsll); |
872 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
873 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
874 | // CHECK-PPC: error: call to 'vec_any_le' is ambiguous |
875 | |
876 | res_i = vec_any_le(vbll, vull); |
877 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
878 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
879 | // CHECK-PPC: error: call to 'vec_any_le' is ambiguous |
880 | |
881 | res_i = vec_any_le(vbll, vbll); |
882 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
883 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
884 | // CHECK-PPC: error: call to 'vec_any_le' is ambiguous |
885 | |
886 | res_i = vec_any_le(vda, vda); |
887 | // CHECK: @llvm.ppc.vsx.xvcmpgedp.p |
888 | // CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p |
889 | |
890 | /* vec_any_lt */ |
891 | res_i = vec_any_lt(vsll, vsll); |
892 | // CHECK: @llvm.ppc.altivec.vcmpgtsd.p |
893 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p |
894 | // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous |
895 | |
896 | res_i = vec_any_lt(vsll, vbll); |
897 | // CHECK: @llvm.ppc.altivec.vcmpgtsd.p |
898 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p |
899 | // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous |
900 | |
901 | res_i = vec_any_lt(vull, vull); |
902 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
903 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
904 | // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous |
905 | |
906 | res_i = vec_any_lt(vull, vbll); |
907 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
908 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
909 | // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous |
910 | |
911 | res_i = vec_any_lt(vbll, vsll); |
912 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
913 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
914 | // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous |
915 | |
916 | res_i = vec_any_lt(vbll, vull); |
917 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
918 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
919 | // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous |
920 | |
921 | res_i = vec_any_lt(vbll, vbll); |
922 | // CHECK: @llvm.ppc.altivec.vcmpgtud.p |
923 | // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p |
924 | // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous |
925 | |
926 | res_i = vec_any_lt(vda, vda); |
927 | // CHECK: @llvm.ppc.vsx.xvcmpgtdp.p |
928 | // CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p |
929 | |
930 | /* vec_max */ |
931 | res_vsll = vec_max(vsll, vsll); |
932 | // CHECK: @llvm.ppc.altivec.vmaxsd |
933 | // CHECK-LE: @llvm.ppc.altivec.vmaxsd |
934 | // CHECK-PPC: error: call to 'vec_max' is ambiguous |
935 | |
936 | res_vsll = vec_max(vbll, vsll); |
937 | // CHECK: @llvm.ppc.altivec.vmaxsd |
938 | // CHECK-LE: @llvm.ppc.altivec.vmaxsd |
939 | // CHECK-PPC: error: call to 'vec_max' is ambiguous |
940 | |
941 | res_vsll = vec_max(vsll, vbll); |
942 | // CHECK: @llvm.ppc.altivec.vmaxsd |
943 | // CHECK-LE: @llvm.ppc.altivec.vmaxsd |
944 | // CHECK-PPC: error: call to 'vec_max' is ambiguous |
945 | |
946 | res_vull = vec_max(vull, vull); |
947 | // CHECK: @llvm.ppc.altivec.vmaxud |
948 | // CHECK-LE: @llvm.ppc.altivec.vmaxud |
949 | // CHECK-PPC: error: call to 'vec_max' is ambiguous |
950 | |
951 | res_vull = vec_max(vbll, vull); |
952 | // CHECK: @llvm.ppc.altivec.vmaxud |
953 | // CHECK-LE: @llvm.ppc.altivec.vmaxud |
954 | // CHECK-PPC: error: call to 'vec_max' is ambiguous |
955 | |
956 | res_vull = vec_max(vull, vbll); |
957 | // CHECK: @llvm.ppc.altivec.vmaxud |
958 | // CHECK-LE: @llvm.ppc.altivec.vmaxud |
959 | // CHECK-PPC: error: call to 'vec_max' is ambiguous |
960 | |
961 | /* vec_mergeh */ |
962 | res_vbll = vec_mergeh(vbll, vbll); |
963 | // CHECK: @llvm.ppc.altivec.vperm |
964 | // CHECK-LE: @llvm.ppc.altivec.vperm |
965 | |
966 | res_vbll = vec_mergel(vbll, vbll); |
967 | // CHECK: @llvm.ppc.altivec.vperm |
968 | // CHECK-LE: @llvm.ppc.altivec.vperm |
969 | |
970 | /* vec_min */ |
971 | res_vsll = vec_min(vsll, vsll); |
972 | // CHECK: @llvm.ppc.altivec.vminsd |
973 | // CHECK-LE: @llvm.ppc.altivec.vminsd |
974 | // CHECK-PPC: error: call to 'vec_min' is ambiguous |
975 | |
976 | res_vsll = vec_min(vbll, vsll); |
977 | // CHECK: @llvm.ppc.altivec.vminsd |
978 | // CHECK-LE: @llvm.ppc.altivec.vminsd |
979 | // CHECK-PPC: error: call to 'vec_min' is ambiguous |
980 | |
981 | res_vsll = vec_min(vsll, vbll); |
982 | // CHECK: @llvm.ppc.altivec.vminsd |
983 | // CHECK-LE: @llvm.ppc.altivec.vminsd |
984 | // CHECK-PPC: error: call to 'vec_min' is ambiguous |
985 | |
986 | res_vull = vec_min(vull, vull); |
987 | // CHECK: @llvm.ppc.altivec.vminud |
988 | // CHECK-LE: @llvm.ppc.altivec.vminud |
989 | // CHECK-PPC: error: call to 'vec_min' is ambiguous |
990 | |
991 | res_vull = vec_min(vbll, vull); |
992 | // CHECK: @llvm.ppc.altivec.vminud |
993 | // CHECK-LE: @llvm.ppc.altivec.vminud |
994 | // CHECK-PPC: error: call to 'vec_min' is ambiguous |
995 | |
996 | res_vull = vec_min(vull, vbll); |
997 | // CHECK: @llvm.ppc.altivec.vminud |
998 | // CHECK-LE: @llvm.ppc.altivec.vminud |
999 | // CHECK-PPC: error: call to 'vec_min' is ambiguous |
1000 | |
1001 | /* vec_mule */ |
1002 | res_vsll = vec_mule(vsi, vsi); |
1003 | // CHECK: @llvm.ppc.altivec.vmulesw |
1004 | // CHECK-LE: @llvm.ppc.altivec.vmulosw |
1005 | // CHECK-PPC: error: call to 'vec_mule' is ambiguous |
1006 | |
1007 | res_vull = vec_mule(vui , vui); |
1008 | // CHECK: @llvm.ppc.altivec.vmuleuw |
1009 | // CHECK-LE: @llvm.ppc.altivec.vmulouw |
1010 | // CHECK-PPC: error: call to 'vec_mule' is ambiguous |
1011 | |
1012 | /* vec_mulo */ |
1013 | res_vsll = vec_mulo(vsi, vsi); |
1014 | // CHECK: @llvm.ppc.altivec.vmulosw |
1015 | // CHECK-LE: @llvm.ppc.altivec.vmulesw |
1016 | // CHECK-PPC: error: call to 'vec_mulo' is ambiguous |
1017 | |
1018 | res_vull = vec_mulo(vui, vui); |
1019 | // CHECK: @llvm.ppc.altivec.vmulouw |
1020 | // CHECK-LE: @llvm.ppc.altivec.vmuleuw |
1021 | // CHECK-PPC: error: call to 'vec_mulo' is ambiguous |
1022 | |
1023 | /* vec_packs */ |
1024 | res_vsi = vec_packs(vsll, vsll); |
1025 | // CHECK: @llvm.ppc.altivec.vpksdss |
1026 | // CHECK-LE: @llvm.ppc.altivec.vpksdss |
1027 | // CHECK-PPC: error: call to 'vec_packs' is ambiguous |
1028 | |
1029 | res_vui = vec_packs(vull, vull); |
1030 | // CHECK: @llvm.ppc.altivec.vpkudus |
1031 | // CHECK-LE: @llvm.ppc.altivec.vpkudus |
1032 | // CHECK-PPC: error: call to 'vec_packs' is ambiguous |
1033 | |
1034 | /* vec_packsu */ |
1035 | res_vui = vec_packsu(vsll, vsll); |
1036 | // CHECK: @llvm.ppc.altivec.vpksdus |
1037 | // CHECK-LE: @llvm.ppc.altivec.vpksdus |
1038 | // CHECK-PPC: error: call to 'vec_packsu' is ambiguous |
1039 | |
1040 | res_vui = vec_packsu(vull, vull); |
1041 | // CHECK: @llvm.ppc.altivec.vpkudus |
1042 | // CHECK-LE: @llvm.ppc.altivec.vpkudus |
1043 | // CHECK-PPC: error: call to 'vec_packsu' is ambiguous |
1044 | |
1045 | /* vec_rl */ |
1046 | res_vsll = vec_rl(vsll, vull); |
1047 | // CHECK: @llvm.ppc.altivec.vrld |
1048 | // CHECK-LE: @llvm.ppc.altivec.vrld |
1049 | // CHECK-PPC: error: call to 'vec_rl' is ambiguous |
1050 | |
1051 | res_vull = vec_rl(vull, vull); |
1052 | // CHECK: @llvm.ppc.altivec.vrld |
1053 | // CHECK-LE: @llvm.ppc.altivec.vrld |
1054 | // CHECK-PPC: error: call to 'vec_rl' is ambiguous |
1055 | |
1056 | /* vec_sl */ |
1057 | res_vsll = vec_sl(vsll, vull); |
1058 | // CHECK: shl <2 x i64> |
1059 | // CHECK-LE: shl <2 x i64> |
1060 | // CHECK-PPC: error: call to 'vec_sl' is ambiguous |
1061 | |
1062 | res_vull = vec_sl(vull, vull); |
1063 | // CHECK: shl <2 x i64> |
1064 | // CHECK-LE: shl <2 x i64> |
1065 | // CHECK-PPC: error: call to 'vec_sl' is ambiguous |
1066 | |
1067 | /* vec_sr */ |
1068 | res_vsll = vec_sr(vsll, vull); |
1069 | // CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <2 x i64> {{[0-9a-zA-Z%.]+}}, <i64 64, i64 64> |
1070 | // CHECK: lshr <2 x i64> {{[0-9a-zA-Z%.]+}}, [[UREM]] |
1071 | // CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <2 x i64> {{[0-9a-zA-Z%.]+}}, <i64 64, i64 64> |
1072 | // CHECK-LE: lshr <2 x i64> {{[0-9a-zA-Z%.]+}}, [[UREM]] |
1073 | // CHECK-PPC: error: call to 'vec_sr' is ambiguous |
1074 | |
1075 | res_vull = vec_sr(vull, vull); |
1076 | // CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <2 x i64> {{[0-9a-zA-Z%.]+}}, <i64 64, i64 64> |
1077 | // CHECK: lshr <2 x i64> {{[0-9a-zA-Z%.]+}}, [[UREM]] |
1078 | // CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <2 x i64> {{[0-9a-zA-Z%.]+}}, <i64 64, i64 64> |
1079 | // CHECK-LE: lshr <2 x i64> {{[0-9a-zA-Z%.]+}}, [[UREM]] |
1080 | // CHECK-PPC: error: call to 'vec_sr' is ambiguous |
1081 | |
1082 | /* vec_sra */ |
1083 | res_vsll = vec_sra(vsll, vull); |
1084 | // CHECK: ashr <2 x i64> |
1085 | // CHECK-LE: ashr <2 x i64> |
1086 | // CHECK-PPC: error: call to 'vec_sra' is ambiguous |
1087 | |
1088 | res_vull = vec_sra(vull, vull); |
1089 | // CHECK: ashr <2 x i64> |
1090 | // CHECK-LE: ashr <2 x i64> |
1091 | // CHECK-PPC: error: call to 'vec_sra' is ambiguous |
1092 | |
1093 | /* vec_splats */ |
1094 | res_vsll = vec_splats(sll); |
1095 | // CHECK: insertelement <2 x i64> |
1096 | // CHECK-LE: insertelement <2 x i64> |
1097 | |
1098 | res_vull = vec_splats(ull); |
1099 | // CHECK: insertelement <2 x i64> |
1100 | // CHECK-LE: insertelement <2 x i64> |
1101 | |
1102 | res_vsx = vec_splats(sx); |
1103 | // CHECK: insertelement <1 x i128> |
1104 | // CHECK-LE: insertelement <1 x i128> |
1105 | |
1106 | res_vux = vec_splats(ux); |
1107 | // CHECK: insertelement <1 x i128> |
1108 | // CHECK-LE: insertelement <1 x i128> |
1109 | |
1110 | res_vd = vec_splats(d); |
1111 | // CHECK: insertelement <2 x double> |
1112 | // CHECK-LE: insertelement <2 x double> |
1113 | |
1114 | |
1115 | /* vec_unpackh */ |
1116 | res_vsll = vec_unpackh(vsi); |
1117 | // CHECK: llvm.ppc.altivec.vupkhsw |
1118 | // CHECK-LE: llvm.ppc.altivec.vupklsw |
1119 | // CHECK-PPC: error: call to 'vec_unpackh' is ambiguous |
1120 | |
1121 | res_vbll = vec_unpackh(vbi); |
1122 | // CHECK: llvm.ppc.altivec.vupkhsw |
1123 | // CHECK-LE: llvm.ppc.altivec.vupklsw |
1124 | // CHECK-PPC: error: call to 'vec_unpackh' is ambiguous |
1125 | |
1126 | /* vec_unpackl */ |
1127 | res_vsll = vec_unpackl(vsi); |
1128 | // CHECK: llvm.ppc.altivec.vupklsw |
1129 | // CHECK-LE: llvm.ppc.altivec.vupkhsw |
1130 | // CHECK-PPC: error: call to 'vec_unpackl' is ambiguous |
1131 | |
1132 | res_vbll = vec_unpackl(vbi); |
1133 | // CHECK: llvm.ppc.altivec.vupklsw |
1134 | // CHECK-LE: llvm.ppc.altivec.vupkhsw |
1135 | // CHECK-PPC: error: call to 'vec_unpackl' is ambiguous |
1136 | |
1137 | /* vec_vpksdss */ |
1138 | res_vsi = vec_vpksdss(vsll, vsll); |
1139 | // CHECK: llvm.ppc.altivec.vpksdss |
1140 | // CHECK-LE: llvm.ppc.altivec.vpksdss |
1141 | // CHECK-PPC: warning: implicit declaration of function 'vec_vpksdss' |
1142 | |
1143 | /* vec_vpksdus */ |
1144 | res_vui = vec_vpksdus(vsll, vsll); |
1145 | // CHECK: llvm.ppc.altivec.vpksdus |
1146 | // CHECK-LE: llvm.ppc.altivec.vpksdus |
1147 | // CHECK-PPC: warning: implicit declaration of function 'vec_vpksdus' |
1148 | |
1149 | /* vec_vpkudum */ |
1150 | res_vsi = vec_vpkudum(vsll, vsll); |
1151 | // CHECK: vperm |
1152 | // CHECK-LE: vperm |
1153 | // CHECK-PPC: warning: implicit declaration of function 'vec_vpkudum' |
1154 | |
1155 | res_vui = vec_vpkudum(vull, vull); |
1156 | // CHECK: vperm |
1157 | // CHECK-LE: vperm |
1158 | |
1159 | res_vui = vec_vpkudus(vull, vull); |
1160 | // CHECK: llvm.ppc.altivec.vpkudus |
1161 | // CHECK-LE: llvm.ppc.altivec.vpkudus |
1162 | // CHECK-PPC: warning: implicit declaration of function 'vec_vpkudus' |
1163 | |
1164 | /* vec_vupkhsw */ |
1165 | res_vsll = vec_vupkhsw(vsi); |
1166 | // CHECK: llvm.ppc.altivec.vupkhsw |
1167 | // CHECK-LE: llvm.ppc.altivec.vupklsw |
1168 | // CHECK-PPC: warning: implicit declaration of function 'vec_vupkhsw' |
1169 | |
1170 | res_vbll = vec_vupkhsw(vbi); |
1171 | // CHECK: llvm.ppc.altivec.vupkhsw |
1172 | // CHECK-LE: llvm.ppc.altivec.vupklsw |
1173 | |
1174 | /* vec_vupklsw */ |
1175 | res_vsll = vec_vupklsw(vsi); |
1176 | // CHECK: llvm.ppc.altivec.vupklsw |
1177 | // CHECK-LE: llvm.ppc.altivec.vupkhsw |
1178 | // CHECK-PPC: warning: implicit declaration of function 'vec_vupklsw' |
1179 | |
1180 | res_vbll = vec_vupklsw(vbi); |
1181 | // CHECK: llvm.ppc.altivec.vupklsw |
1182 | // CHECK-LE: llvm.ppc.altivec.vupkhsw |
1183 | |
1184 | /* vec_max */ |
1185 | res_vsll = vec_max(vsll, vsll); |
1186 | // CHECK: @llvm.ppc.altivec.vmaxsd |
1187 | // CHECK-LE: @llvm.ppc.altivec.vmaxsd |
1188 | |
1189 | res_vsll = vec_max(vbll, vsll); |
1190 | // CHECK: @llvm.ppc.altivec.vmaxsd |
1191 | // CHECK-LE: @llvm.ppc.altivec.vmaxsd |
1192 | |
1193 | res_vsll = vec_max(vsll, vbll); |
1194 | // CHECK: @llvm.ppc.altivec.vmaxsd |
1195 | // CHECK-LE: @llvm.ppc.altivec.vmaxsd |
1196 | |
1197 | res_vull = vec_max(vull, vull); |
1198 | // CHECK: @llvm.ppc.altivec.vmaxud |
1199 | // CHECK-LE: @llvm.ppc.altivec.vmaxud |
1200 | |
1201 | res_vull = vec_max(vbll, vull); |
1202 | // CHECK: @llvm.ppc.altivec.vmaxud |
1203 | // CHECK-LE: @llvm.ppc.altivec.vmaxud |
1204 | |
1205 | /* vec_min */ |
1206 | res_vsll = vec_min(vsll, vsll); |
1207 | // CHECK: @llvm.ppc.altivec.vminsd |
1208 | // CHECK-LE: @llvm.ppc.altivec.vminsd |
1209 | |
1210 | res_vsll = vec_min(vbll, vsll); |
1211 | // CHECK: @llvm.ppc.altivec.vminsd |
1212 | // CHECK-LE: @llvm.ppc.altivec.vminsd |
1213 | |
1214 | res_vsll = vec_min(vsll, vbll); |
1215 | // CHECK: @llvm.ppc.altivec.vminsd |
1216 | // CHECK-LE: @llvm.ppc.altivec.vminsd |
1217 | |
1218 | res_vull = vec_min(vull, vull); |
1219 | // CHECK: @llvm.ppc.altivec.vminud |
1220 | // CHECK-LE: @llvm.ppc.altivec.vminud |
1221 | |
1222 | res_vull = vec_min(vbll, vull); |
1223 | // CHECK: @llvm.ppc.altivec.vminud |
1224 | // CHECK-LE: @llvm.ppc.altivec.vminud |
1225 | |
1226 | /* vec_nand */ |
1227 | res_vsc = vec_nand(vsc, vsc); |
1228 | // CHECK: [[T1:%.+]] = and <16 x i8> |
1229 | // CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1230 | // CHECK-LE: [[T1:%.+]] = and <16 x i8> |
1231 | // CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1232 | // CHECK-PPC: warning: implicit declaration of function 'vec_nand' is invalid in C99 |
1233 | |
1234 | res_vsc = vec_nand(vbc, vbc); |
1235 | // CHECK: [[T1:%.+]] = and <16 x i8> |
1236 | // CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1237 | // CHECK-LE: [[T1:%.+]] = and <16 x i8> |
1238 | // CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1239 | |
1240 | res_vuc = vec_nand(vuc, vuc); |
1241 | // CHECK: [[T1:%.+]] = and <16 x i8> |
1242 | // CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1243 | // CHECK-LE: [[T1:%.+]] = and <16 x i8> |
1244 | // CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1245 | |
1246 | res_vss = vec_nand(vss, vss); |
1247 | // CHECK: [[T1:%.+]] = and <8 x i16> |
1248 | // CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1249 | // CHECK-LE: [[T1:%.+]] = and <8 x i16> |
1250 | // CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1251 | |
1252 | res_vss = vec_nand(vbs, vbs); |
1253 | // CHECK: [[T1:%.+]] = and <8 x i16> |
1254 | // CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1255 | // CHECK-LE: [[T1:%.+]] = and <8 x i16> |
1256 | // CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1257 | |
1258 | res_vus = vec_nand(vus, vus); |
1259 | // CHECK: [[T1:%.+]] = and <8 x i16> |
1260 | // CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1261 | // CHECK-LE: [[T1:%.+]] = and <8 x i16> |
1262 | // CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1263 | |
1264 | res_vsi = vec_nand(vsi, vsi); |
1265 | // CHECK: [[T1:%.+]] = and <4 x i32> |
1266 | // CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> |
1267 | // CHECK-LE: [[T1:%.+]] = and <4 x i32> |
1268 | // CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> |
1269 | |
1270 | res_vsi = vec_nand(vbi, vbi); |
1271 | // CHECK: [[T1:%.+]] = and <4 x i32> |
1272 | // CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> |
1273 | // CHECK-LE: [[T1:%.+]] = and <4 x i32> |
1274 | // CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> |
1275 | |
1276 | res_vui = vec_nand(vui, vui); |
1277 | // CHECK: [[T1:%.+]] = and <4 x i32> |
1278 | // CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> |
1279 | // CHECK-LE: [[T1:%.+]] = and <4 x i32> |
1280 | // CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> |
1281 | |
1282 | res_vf = vec_nand(vfa, vfa); |
1283 | // CHECK: [[T1:%.+]] = and <4 x i32> |
1284 | // CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> |
1285 | // CHECK-LE: [[T1:%.+]] = and <4 x i32> |
1286 | // CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> |
1287 | |
1288 | res_vsll = vec_nand(vsll, vsll); |
1289 | // CHECK: [[T1:%.+]] = and <2 x i64> |
1290 | // CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1> |
1291 | // CHECK-LE: [[T1:%.+]] = and <2 x i64> |
1292 | // CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1> |
1293 | |
1294 | res_vsll = vec_nand(vbll, vbll); |
1295 | // CHECK: [[T1:%.+]] = and <2 x i64> |
1296 | // CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1> |
1297 | // CHECK-LE: [[T1:%.+]] = and <2 x i64> |
1298 | // CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1> |
1299 | |
1300 | res_vull = vec_nand(vull, vull); |
1301 | // CHECK: [[T1:%.+]] = and <2 x i64> |
1302 | // CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1> |
1303 | // CHECK-LE: [[T1:%.+]] = and <2 x i64> |
1304 | // CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1> |
1305 | |
1306 | res_vd = vec_nand(vda, vda); |
1307 | // CHECK: [[T1:%.+]] = and <2 x i64> |
1308 | // CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1> |
1309 | // CHECK-LE: [[T1:%.+]] = and <2 x i64> |
1310 | // CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1> |
1311 | |
1312 | /* vec_orc */ |
1313 | res_vsc = vec_orc(vsc, vsc); |
1314 | // CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1315 | // CHECK: or <16 x i8> {{%.+}}, [[T1]] |
1316 | // CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1317 | // CHECK-LE: or <16 x i8> {{%.+}}, [[T1]] |
1318 | // CHECK-PPC: warning: implicit declaration of function 'vec_orc' is invalid in C99 |
1319 | |
1320 | res_vsc = vec_orc(vsc, vbc); |
1321 | // CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1322 | // CHECK: or <16 x i8> {{%.+}}, [[T1]] |
1323 | // CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1324 | // CHECK-LE: or <16 x i8> {{%.+}}, [[T1]] |
1325 | |
1326 | res_vsc = vec_orc(vbc, vsc); |
1327 | // CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1328 | // CHECK: or <16 x i8> {{%.+}}, [[T1]] |
1329 | // CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1330 | // CHECK-LE: or <16 x i8> {{%.+}}, [[T1]] |
1331 | |
1332 | res_vuc = vec_orc(vuc, vuc); |
1333 | // CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1334 | // CHECK: or <16 x i8> {{%.+}}, [[T1]] |
1335 | // CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1336 | // CHECK-LE: or <16 x i8> {{%.+}}, [[T1]] |
1337 | |
1338 | res_vuc = vec_orc(vuc, vbc); |
1339 | // CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1340 | // CHECK: or <16 x i8> {{%.+}}, [[T1]] |
1341 | // CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1342 | // CHECK-LE: or <16 x i8> {{%.+}}, [[T1]] |
1343 | |
1344 | res_vuc = vec_orc(vbc, vuc); |
1345 | // CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1346 | // CHECK: or <16 x i8> {{%.+}}, [[T1]] |
1347 | // CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1348 | // CHECK-LE: or <16 x i8> {{%.+}}, [[T1]] |
1349 | |
1350 | res_vbc = vec_orc(vbc, vbc); |
1351 | // CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1352 | // CHECK: or <16 x i8> {{%.+}}, [[T1]] |
1353 | // CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1354 | // CHECK-LE: or <16 x i8> {{%.+}}, [[T1]] |
1355 | |
1356 | res_vss = vec_orc(vss, vss); |
1357 | // CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1358 | // CHECK: or <8 x i16> {{%.+}}, [[T1]] |
1359 | // CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1360 | // CHECK-LE: or <8 x i16> {{%.+}}, [[T1]] |
1361 | |
1362 | res_vss = vec_orc(vss, vbs); |
1363 | // CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1364 | // CHECK: or <8 x i16> {{%.+}}, [[T1]] |
1365 | // CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1366 | // CHECK-LE: or <8 x i16> {{%.+}}, [[T1]] |
1367 | |
1368 | res_vss = vec_orc(vbs, vss); |
1369 | // CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1370 | // CHECK: or <8 x i16> {{%.+}}, [[T1]] |
1371 | // CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1372 | // CHECK-LE: or <8 x i16> {{%.+}}, [[T1]] |
1373 | |
1374 | res_vus = vec_orc(vus, vus); |
1375 | // CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1376 | // CHECK: or <8 x i16> {{%.+}}, [[T1]] |
1377 | // CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1378 | // CHECK-LE: or <8 x i16> {{%.+}}, [[T1]] |
1379 | |
1380 | res_vus = vec_orc(vus, vbs); |
1381 | // CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1382 | // CHECK: or <8 x i16> {{%.+}}, [[T1]] |
1383 | // CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1384 | // CHECK-LE: or <8 x i16> {{%.+}}, [[T1]] |
1385 | |
1386 | res_vus = vec_orc(vbs, vus); |
1387 | // CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1388 | // CHECK: or <8 x i16> {{%.+}}, [[T1]] |
1389 | // CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1390 | // CHECK-LE: or <8 x i16> {{%.+}}, [[T1]] |
1391 | |
1392 | res_vbs = vec_orc(vbs, vbs); |
1393 | // CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1394 | // CHECK: or <8 x i16> {{%.+}}, [[T1]] |
1395 | // CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1396 | // CHECK-LE: or <8 x i16> {{%.+}}, [[T1]] |
1397 | |
1398 | res_vsi = vec_orc(vsi, vsi); |
1399 | // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1400 | // CHECK: or <4 x i32> {{%.+}}, [[T1]] |
1401 | // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1402 | // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]] |
1403 | |
1404 | res_vsi = vec_orc(vsi, vbi); |
1405 | // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1406 | // CHECK: or <4 x i32> {{%.+}}, [[T1]] |
1407 | // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1408 | // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]] |
1409 | |
1410 | res_vsi = vec_orc(vbi, vsi); |
1411 | // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1412 | // CHECK: or <4 x i32> {{%.+}}, [[T1]] |
1413 | // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1414 | // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]] |
1415 | |
1416 | res_vui = vec_orc(vui, vui); |
1417 | // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1418 | // CHECK: or <4 x i32> {{%.+}}, [[T1]] |
1419 | // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1420 | // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]] |
1421 | |
1422 | res_vui = vec_orc(vui, vbi); |
1423 | // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1424 | // CHECK: or <4 x i32> {{%.+}}, [[T1]] |
1425 | // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1426 | // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]] |
1427 | |
1428 | res_vui = vec_orc(vbi, vui); |
1429 | // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1430 | // CHECK: or <4 x i32> {{%.+}}, [[T1]] |
1431 | // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1432 | // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]] |
1433 | |
1434 | res_vbi = vec_orc(vbi, vbi); |
1435 | // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1436 | // CHECK: or <4 x i32> {{%.+}}, [[T1]] |
1437 | // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1438 | // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]] |
1439 | |
1440 | res_vf = vec_orc(vbi, vfa); |
1441 | // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1442 | // CHECK: or <4 x i32> {{%.+}}, [[T1]] |
1443 | // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1444 | // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]] |
1445 | |
1446 | res_vf = vec_orc(vfa, vbi); |
1447 | // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1448 | // CHECK: or <4 x i32> {{%.+}}, [[T1]] |
1449 | // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1450 | // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]] |
1451 | |
1452 | res_vsll = vec_orc(vsll, vsll); |
1453 | // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1454 | // CHECK: or <2 x i64> {{%.+}}, [[T1]] |
1455 | // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1456 | // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]] |
1457 | |
1458 | res_vsll = vec_orc(vsll, vbll); |
1459 | // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1460 | // CHECK: or <2 x i64> {{%.+}}, [[T1]] |
1461 | // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1462 | // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]] |
1463 | |
1464 | res_vsll = vec_orc(vbll, vsll); |
1465 | // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1466 | // CHECK: or <2 x i64> {{%.+}}, [[T1]] |
1467 | // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1468 | // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]] |
1469 | |
1470 | res_vull = vec_orc(vull, vull); |
1471 | // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1472 | // CHECK: or <2 x i64> {{%.+}}, [[T1]] |
1473 | // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1474 | // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]] |
1475 | |
1476 | res_vull = vec_orc(vull, vbll); |
1477 | // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1478 | // CHECK: or <2 x i64> {{%.+}}, [[T1]] |
1479 | // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1480 | // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]] |
1481 | |
1482 | res_vull = vec_orc(vbll, vull); |
1483 | // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1484 | // CHECK: or <2 x i64> {{%.+}}, [[T1]] |
1485 | // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1486 | // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]] |
1487 | |
1488 | res_vbll = vec_orc(vbll, vbll); |
1489 | // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1490 | // CHECK: or <2 x i64> {{%.+}}, [[T1]] |
1491 | // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1492 | // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]] |
1493 | |
1494 | res_vd = vec_orc(vbll, vda); |
1495 | // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1496 | // CHECK: or <2 x i64> {{%.+}}, [[T1]] |
1497 | // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1498 | // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]] |
1499 | |
1500 | res_vd = vec_orc(vda, vbll); |
1501 | // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1502 | // CHECK: or <2 x i64> {{%.+}}, [[T1]] |
1503 | // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> |
1504 | // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]] |
1505 | |
1506 | /* vec_sub */ |
1507 | res_vsll = vec_sub(vsll, vsll); |
1508 | // CHECK: sub <2 x i64> |
1509 | // CHECK-LE: sub <2 x i64> |
1510 | |
1511 | res_vull = vec_sub(vull, vull); |
1512 | // CHECK: sub <2 x i64> |
1513 | // CHECK-LE: sub <2 x i64> |
1514 | |
1515 | res_vd = vec_sub(vda, vda); |
1516 | // CHECK: fsub <2 x double> |
1517 | // CHECK-LE: fsub <2 x double> |
1518 | |
1519 | res_vsx = vec_sub(vsx, vsx); |
1520 | // CHECK: sub <1 x i128> |
1521 | // CHECK-LE: sub <1 x i128> |
1522 | |
1523 | res_vux = vec_sub(vux, vux); |
1524 | // CHECK: sub <1 x i128> |
1525 | // CHECK-LE: sub <1 x i128> |
1526 | |
1527 | /* vec_vbpermq */ |
1528 | res_vsll = vec_vbpermq(vsc, vsc); |
1529 | // CHECK: llvm.ppc.altivec.vbpermq |
1530 | // CHECK-LE: llvm.ppc.altivec.vbpermq |
1531 | |
1532 | res_vull = vec_vbpermq(vuc, vuc); |
1533 | // CHECK: llvm.ppc.altivec.vbpermq |
1534 | // CHECK-LE: llvm.ppc.altivec.vbpermq |
1535 | // CHECK-PPC: warning: implicit declaration of function 'vec_vbpermq' |
1536 | |
1537 | /* vec_vgbbd */ |
1538 | res_vsc = vec_vgbbd(vsc); |
1539 | // CHECK: llvm.ppc.altivec.vgbbd |
1540 | // CHECK-LE: llvm.ppc.altivec.vgbbd |
1541 | |
1542 | res_vuc = vec_vgbbd(vuc); |
1543 | // CHECK: llvm.ppc.altivec.vgbbd |
1544 | // CHECK-LE: llvm.ppc.altivec.vgbbd |
1545 | // CHECK-PPC: warning: implicit declaration of function 'vec_vgbbd' |
1546 | |
1547 | res_vuc = vec_gb(vuc); |
1548 | // CHECK: llvm.ppc.altivec.vgbbd |
1549 | // CHECK-LE: llvm.ppc.altivec.vgbbd |
1550 | // CHECK-PPC: warning: implicit declaration of function 'vec_gb' |
1551 | |
1552 | res_vull = vec_bperm(vux, vux); |
1553 | // CHECK: llvm.ppc.altivec.vbpermq |
1554 | // CHECK-LE: llvm.ppc.altivec.vbpermq |
1555 | // CHECK-PPC: warning: implicit declaration of function 'vec_bperm' |
1556 | |
1557 | res_vsll = vec_neg(vsll); |
1558 | // CHECK: sub <2 x i64> zeroinitializer, {{%[0-9]+}} |
1559 | // CHECK-LE: sub <2 x i64> zeroinitializer, {{%[0-9]+}} |
1560 | // CHECK_PPC: call to 'vec_neg' is ambiguous |
1561 | |
1562 | |
1563 | } |
1564 | |
1565 | |
1566 | vector signed int test_vec_addec_signed (vector signed int a, vector signed int b, vector signed int c) { |
1567 | return vec_addec(a, b, c); |
1568 | // CHECK-LABEL: @test_vec_addec_signed |
1569 | // CHECK: icmp slt i32 {{%[0-9]+}}, 4 |
1570 | // CHECK: extractelement |
1571 | // CHECK: extractelement |
1572 | // CHECK: extractelement |
1573 | // CHECK: and i32 {{%[0-9]+}}, 1 |
1574 | // CHECK: zext |
1575 | // CHECK: zext |
1576 | // CHECK: zext |
1577 | // CHECK: add i64 |
1578 | // CHECK: add i64 |
1579 | // CHECK: lshr i64 |
1580 | // CHECK: and i64 |
1581 | // CHECK: trunc i64 {{%[0-9]+}} to i32 |
1582 | // CHECK: zext i32 |
1583 | // CHECK: trunc i64 {{%[0-9]+}} to i32 |
1584 | // CHECK: sext i32 |
1585 | // CHECK: add nsw i32 |
1586 | // CHECK: br label |
1587 | // CHECK: ret <4 x i32> |
1588 | |
1589 | } |
1590 | |
1591 | |
1592 | vector unsigned int test_vec_addec_unsigned (vector unsigned int a, vector unsigned int b, vector unsigned int c) { |
1593 | return vec_addec(a, b, c); |
1594 | |
1595 | // CHECK-LABEL: @test_vec_addec_unsigned |
1596 | // CHECK: icmp slt i32 {{%[0-9]+}}, 4 |
1597 | // CHECK: extractelement |
1598 | // CHECK: and i32 |
1599 | // CHECK: extractelement |
1600 | // CHECK: zext i32 |
1601 | // CHECK: extractelement |
1602 | // CHECK: zext i32 |
1603 | // CHECK: zext i32 |
1604 | // CHECK: add i64 |
1605 | // CHECK: lshr i64 |
1606 | // CHECK: and i64 |
1607 | // CHECK: trunc i64 {{%[0-9]+}} to i32 |
1608 | // CHECK: zext i32 |
1609 | // CHECK: trunc i64 {{%[0-9]+}} to i32 |
1610 | // CHECK: sext i32 |
1611 | // CHECK: add nsw i32 |
1612 | // CHECK: br label |
1613 | // CHECK: ret <4 x i32> |
1614 | } |
1615 | |
1616 | vector signed int test_vec_subec_signed (vector signed int a, vector signed int b, vector signed int c) { |
1617 | return vec_subec(a, b, c); |
1618 | // CHECK-LABEL: @test_vec_subec_signed |
1619 | // CHECK: xor <4 x i32> {{%[0-9]+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1620 | // CHECK: ret <4 x i32> |
1621 | } |
1622 | |
1623 | vector unsigned int test_vec_subec_unsigned (vector unsigned int a, vector unsigned int b, vector unsigned int c) { |
1624 | return vec_subec(a, b, c); |
1625 | |
1626 | // CHECK-LABEL: @test_vec_subec_unsigned |
1627 | // CHECK: xor <4 x i32> {{%[0-9]+}}, <i32 -1, i32 -1, i32 -1, i32 -1> |
1628 | // CHECK: ret <4 x i32> |
1629 | } |
1630 | |