1 | // RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu z13 -fzvector -emit-llvm -o - -W -Wall -Werror %s | opt -S -mem2reg | FileCheck %s |
2 | |
3 | volatile vector signed char sc, sc2; |
4 | volatile vector unsigned char uc, uc2; |
5 | volatile vector bool char bc, bc2; |
6 | |
7 | volatile vector signed short ss, ss2; |
8 | volatile vector unsigned short us, us2; |
9 | volatile vector bool short bs, bs2; |
10 | |
11 | volatile vector signed int si, si2; |
12 | volatile vector unsigned int ui, ui2; |
13 | volatile vector bool int bi, bi2; |
14 | |
15 | volatile vector signed long long sl, sl2; |
16 | volatile vector unsigned long long ul, ul2; |
17 | volatile vector bool long long bl, bl2; |
18 | |
19 | volatile vector double fd, fd2; |
20 | |
21 | volatile int cnt; |
22 | |
23 | // CHECK-LABEL: define void @test_assign() #0 { |
24 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
25 | // CHECK: store volatile <16 x i8> [[TMP0]], <16 x i8>* @sc, align 8 |
26 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
27 | // CHECK: store volatile <16 x i8> [[TMP1]], <16 x i8>* @uc, align 8 |
28 | // CHECK: [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
29 | // CHECK: store volatile <8 x i16> [[TMP2]], <8 x i16>* @ss, align 8 |
30 | // CHECK: [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
31 | // CHECK: store volatile <8 x i16> [[TMP3]], <8 x i16>* @us, align 8 |
32 | // CHECK: [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
33 | // CHECK: store volatile <4 x i32> [[TMP4]], <4 x i32>* @si, align 8 |
34 | // CHECK: [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
35 | // CHECK: store volatile <4 x i32> [[TMP5]], <4 x i32>* @ui, align 8 |
36 | // CHECK: [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
37 | // CHECK: store volatile <2 x i64> [[TMP6]], <2 x i64>* @sl, align 8 |
38 | // CHECK: [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
39 | // CHECK: store volatile <2 x i64> [[TMP7]], <2 x i64>* @ul, align 8 |
40 | // CHECK: [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
41 | // CHECK: store volatile <2 x double> [[TMP8]], <2 x double>* @fd, align 8 |
42 | // CHECK: ret void |
43 | void test_assign(void) { |
44 | |
45 | sc = sc2; |
46 | uc = uc2; |
47 | |
48 | ss = ss2; |
49 | us = us2; |
50 | |
51 | si = si2; |
52 | ui = ui2; |
53 | |
54 | sl = sl2; |
55 | ul = ul2; |
56 | |
57 | fd = fd2; |
58 | } |
59 | |
60 | // CHECK-LABEL: define void @test_pos() #0 { |
61 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
62 | // CHECK: store volatile <16 x i8> [[TMP0]], <16 x i8>* @sc, align 8 |
63 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
64 | // CHECK: store volatile <16 x i8> [[TMP1]], <16 x i8>* @uc, align 8 |
65 | // CHECK: [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
66 | // CHECK: store volatile <8 x i16> [[TMP2]], <8 x i16>* @ss, align 8 |
67 | // CHECK: [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
68 | // CHECK: store volatile <8 x i16> [[TMP3]], <8 x i16>* @us, align 8 |
69 | // CHECK: [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
70 | // CHECK: store volatile <4 x i32> [[TMP4]], <4 x i32>* @si, align 8 |
71 | // CHECK: [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
72 | // CHECK: store volatile <4 x i32> [[TMP5]], <4 x i32>* @ui, align 8 |
73 | // CHECK: [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
74 | // CHECK: store volatile <2 x i64> [[TMP6]], <2 x i64>* @sl, align 8 |
75 | // CHECK: [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
76 | // CHECK: store volatile <2 x i64> [[TMP7]], <2 x i64>* @ul, align 8 |
77 | // CHECK: [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
78 | // CHECK: store volatile <2 x double> [[TMP8]], <2 x double>* @fd, align 8 |
79 | // CHECK: ret void |
80 | void test_pos(void) { |
81 | |
82 | sc = +sc2; |
83 | uc = +uc2; |
84 | |
85 | ss = +ss2; |
86 | us = +us2; |
87 | |
88 | si = +si2; |
89 | ui = +ui2; |
90 | |
91 | sl = +sl2; |
92 | ul = +ul2; |
93 | |
94 | fd = +fd2; |
95 | } |
96 | |
97 | // CHECK-LABEL: define void @test_neg() #0 { |
98 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
99 | // CHECK: [[SUB:%.*]] = sub <16 x i8> zeroinitializer, [[TMP0]] |
100 | // CHECK: store volatile <16 x i8> [[SUB]], <16 x i8>* @sc, align 8 |
101 | // CHECK: [[TMP1:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
102 | // CHECK: [[SUB1:%.*]] = sub <8 x i16> zeroinitializer, [[TMP1]] |
103 | // CHECK: store volatile <8 x i16> [[SUB1]], <8 x i16>* @ss, align 8 |
104 | // CHECK: [[TMP2:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
105 | // CHECK: [[SUB2:%.*]] = sub <4 x i32> zeroinitializer, [[TMP2]] |
106 | // CHECK: store volatile <4 x i32> [[SUB2]], <4 x i32>* @si, align 8 |
107 | // CHECK: [[TMP3:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
108 | // CHECK: [[SUB3:%.*]] = sub <2 x i64> zeroinitializer, [[TMP3]] |
109 | // CHECK: store volatile <2 x i64> [[SUB3]], <2 x i64>* @sl, align 8 |
110 | // CHECK: [[TMP4:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
111 | // CHECK: [[SUB4:%.*]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[TMP4]] |
112 | // CHECK: store volatile <2 x double> [[SUB4]], <2 x double>* @fd, align 8 |
113 | // CHECK: ret void |
114 | void test_neg(void) { |
115 | |
116 | sc = -sc2; |
117 | ss = -ss2; |
118 | si = -si2; |
119 | sl = -sl2; |
120 | fd = -fd2; |
121 | } |
122 | |
123 | // CHECK-LABEL: define void @test_preinc() #0 { |
124 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
125 | // CHECK: [[INC:%.*]] = add <16 x i8> [[TMP0]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> |
126 | // CHECK: store volatile <16 x i8> [[INC]], <16 x i8>* @sc2, align 8 |
127 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
128 | // CHECK: [[INC1:%.*]] = add <16 x i8> [[TMP1]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> |
129 | // CHECK: store volatile <16 x i8> [[INC1]], <16 x i8>* @uc2, align 8 |
130 | // CHECK: [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
131 | // CHECK: [[INC2:%.*]] = add <8 x i16> [[TMP2]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> |
132 | // CHECK: store volatile <8 x i16> [[INC2]], <8 x i16>* @ss2, align 8 |
133 | // CHECK: [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
134 | // CHECK: [[INC3:%.*]] = add <8 x i16> [[TMP3]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> |
135 | // CHECK: store volatile <8 x i16> [[INC3]], <8 x i16>* @us2, align 8 |
136 | // CHECK: [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
137 | // CHECK: [[INC4:%.*]] = add <4 x i32> [[TMP4]], <i32 1, i32 1, i32 1, i32 1> |
138 | // CHECK: store volatile <4 x i32> [[INC4]], <4 x i32>* @si2, align 8 |
139 | // CHECK: [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
140 | // CHECK: [[INC5:%.*]] = add <4 x i32> [[TMP5]], <i32 1, i32 1, i32 1, i32 1> |
141 | // CHECK: store volatile <4 x i32> [[INC5]], <4 x i32>* @ui2, align 8 |
142 | // CHECK: [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
143 | // CHECK: [[INC6:%.*]] = add <2 x i64> [[TMP6]], <i64 1, i64 1> |
144 | // CHECK: store volatile <2 x i64> [[INC6]], <2 x i64>* @sl2, align 8 |
145 | // CHECK: [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
146 | // CHECK: [[INC7:%.*]] = add <2 x i64> [[TMP7]], <i64 1, i64 1> |
147 | // CHECK: store volatile <2 x i64> [[INC7]], <2 x i64>* @ul2, align 8 |
148 | // CHECK: [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
149 | // CHECK: [[INC8:%.*]] = fadd <2 x double> [[TMP8]], <double 1.000000e+00, double 1.000000e+00> |
150 | // CHECK: store volatile <2 x double> [[INC8]], <2 x double>* @fd2, align 8 |
151 | // CHECK: ret void |
152 | void test_preinc(void) { |
153 | |
154 | ++sc2; |
155 | ++uc2; |
156 | |
157 | ++ss2; |
158 | ++us2; |
159 | |
160 | ++si2; |
161 | ++ui2; |
162 | |
163 | ++sl2; |
164 | ++ul2; |
165 | |
166 | ++fd2; |
167 | } |
168 | |
169 | // CHECK-LABEL: define void @test_postinc() #0 { |
170 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
171 | // CHECK: [[INC:%.*]] = add <16 x i8> [[TMP0]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> |
172 | // CHECK: store volatile <16 x i8> [[INC]], <16 x i8>* @sc2, align 8 |
173 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
174 | // CHECK: [[INC1:%.*]] = add <16 x i8> [[TMP1]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> |
175 | // CHECK: store volatile <16 x i8> [[INC1]], <16 x i8>* @uc2, align 8 |
176 | // CHECK: [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
177 | // CHECK: [[INC2:%.*]] = add <8 x i16> [[TMP2]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> |
178 | // CHECK: store volatile <8 x i16> [[INC2]], <8 x i16>* @ss2, align 8 |
179 | // CHECK: [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
180 | // CHECK: [[INC3:%.*]] = add <8 x i16> [[TMP3]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> |
181 | // CHECK: store volatile <8 x i16> [[INC3]], <8 x i16>* @us2, align 8 |
182 | // CHECK: [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
183 | // CHECK: [[INC4:%.*]] = add <4 x i32> [[TMP4]], <i32 1, i32 1, i32 1, i32 1> |
184 | // CHECK: store volatile <4 x i32> [[INC4]], <4 x i32>* @si2, align 8 |
185 | // CHECK: [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
186 | // CHECK: [[INC5:%.*]] = add <4 x i32> [[TMP5]], <i32 1, i32 1, i32 1, i32 1> |
187 | // CHECK: store volatile <4 x i32> [[INC5]], <4 x i32>* @ui2, align 8 |
188 | // CHECK: [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
189 | // CHECK: [[INC6:%.*]] = add <2 x i64> [[TMP6]], <i64 1, i64 1> |
190 | // CHECK: store volatile <2 x i64> [[INC6]], <2 x i64>* @sl2, align 8 |
191 | // CHECK: [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
192 | // CHECK: [[INC7:%.*]] = add <2 x i64> [[TMP7]], <i64 1, i64 1> |
193 | // CHECK: store volatile <2 x i64> [[INC7]], <2 x i64>* @ul2, align 8 |
194 | // CHECK: [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
195 | // CHECK: [[INC8:%.*]] = fadd <2 x double> [[TMP8]], <double 1.000000e+00, double 1.000000e+00> |
196 | // CHECK: store volatile <2 x double> [[INC8]], <2 x double>* @fd2, align 8 |
197 | // CHECK: ret void |
198 | void test_postinc(void) { |
199 | |
200 | sc2++; |
201 | uc2++; |
202 | |
203 | ss2++; |
204 | us2++; |
205 | |
206 | si2++; |
207 | ui2++; |
208 | |
209 | sl2++; |
210 | ul2++; |
211 | |
212 | fd2++; |
213 | } |
214 | |
215 | // CHECK-LABEL: define void @test_predec() #0 { |
216 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
217 | // CHECK: [[DEC:%.*]] = add <16 x i8> [[TMP0]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
218 | // CHECK: store volatile <16 x i8> [[DEC]], <16 x i8>* @sc2, align 8 |
219 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
220 | // CHECK: [[DEC1:%.*]] = add <16 x i8> [[TMP1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
221 | // CHECK: store volatile <16 x i8> [[DEC1]], <16 x i8>* @uc2, align 8 |
222 | // CHECK: [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
223 | // CHECK: [[DEC2:%.*]] = add <8 x i16> [[TMP2]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
224 | // CHECK: store volatile <8 x i16> [[DEC2]], <8 x i16>* @ss2, align 8 |
225 | // CHECK: [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
226 | // CHECK: [[DEC3:%.*]] = add <8 x i16> [[TMP3]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
227 | // CHECK: store volatile <8 x i16> [[DEC3]], <8 x i16>* @us2, align 8 |
228 | // CHECK: [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
229 | // CHECK: [[DEC4:%.*]] = add <4 x i32> [[TMP4]], <i32 -1, i32 -1, i32 -1, i32 -1> |
230 | // CHECK: store volatile <4 x i32> [[DEC4]], <4 x i32>* @si2, align 8 |
231 | // CHECK: [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
232 | // CHECK: [[DEC5:%.*]] = add <4 x i32> [[TMP5]], <i32 -1, i32 -1, i32 -1, i32 -1> |
233 | // CHECK: store volatile <4 x i32> [[DEC5]], <4 x i32>* @ui2, align 8 |
234 | // CHECK: [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
235 | // CHECK: [[DEC6:%.*]] = add <2 x i64> [[TMP6]], <i64 -1, i64 -1> |
236 | // CHECK: store volatile <2 x i64> [[DEC6]], <2 x i64>* @sl2, align 8 |
237 | // CHECK: [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
238 | // CHECK: [[DEC7:%.*]] = add <2 x i64> [[TMP7]], <i64 -1, i64 -1> |
239 | // CHECK: store volatile <2 x i64> [[DEC7]], <2 x i64>* @ul2, align 8 |
240 | // CHECK: [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
241 | // CHECK: [[DEC8:%.*]] = fadd <2 x double> [[TMP8]], <double -1.000000e+00, double -1.000000e+00> |
242 | // CHECK: store volatile <2 x double> [[DEC8]], <2 x double>* @fd2, align 8 |
243 | // CHECK: ret void |
244 | void test_predec(void) { |
245 | |
246 | --sc2; |
247 | --uc2; |
248 | |
249 | --ss2; |
250 | --us2; |
251 | |
252 | --si2; |
253 | --ui2; |
254 | |
255 | --sl2; |
256 | --ul2; |
257 | |
258 | --fd2; |
259 | } |
260 | |
261 | // CHECK-LABEL: define void @test_postdec() #0 { |
262 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
263 | // CHECK: [[DEC:%.*]] = add <16 x i8> [[TMP0]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
264 | // CHECK: store volatile <16 x i8> [[DEC]], <16 x i8>* @sc2, align 8 |
265 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
266 | // CHECK: [[DEC1:%.*]] = add <16 x i8> [[TMP1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
267 | // CHECK: store volatile <16 x i8> [[DEC1]], <16 x i8>* @uc2, align 8 |
268 | // CHECK: [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
269 | // CHECK: [[DEC2:%.*]] = add <8 x i16> [[TMP2]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
270 | // CHECK: store volatile <8 x i16> [[DEC2]], <8 x i16>* @ss2, align 8 |
271 | // CHECK: [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
272 | // CHECK: [[DEC3:%.*]] = add <8 x i16> [[TMP3]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
273 | // CHECK: store volatile <8 x i16> [[DEC3]], <8 x i16>* @us2, align 8 |
274 | // CHECK: [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
275 | // CHECK: [[DEC4:%.*]] = add <4 x i32> [[TMP4]], <i32 -1, i32 -1, i32 -1, i32 -1> |
276 | // CHECK: store volatile <4 x i32> [[DEC4]], <4 x i32>* @si2, align 8 |
277 | // CHECK: [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
278 | // CHECK: [[DEC5:%.*]] = add <4 x i32> [[TMP5]], <i32 -1, i32 -1, i32 -1, i32 -1> |
279 | // CHECK: store volatile <4 x i32> [[DEC5]], <4 x i32>* @ui2, align 8 |
280 | // CHECK: [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
281 | // CHECK: [[DEC6:%.*]] = add <2 x i64> [[TMP6]], <i64 -1, i64 -1> |
282 | // CHECK: store volatile <2 x i64> [[DEC6]], <2 x i64>* @sl2, align 8 |
283 | // CHECK: [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
284 | // CHECK: [[DEC7:%.*]] = add <2 x i64> [[TMP7]], <i64 -1, i64 -1> |
285 | // CHECK: store volatile <2 x i64> [[DEC7]], <2 x i64>* @ul2, align 8 |
286 | // CHECK: [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
287 | // CHECK: [[DEC8:%.*]] = fadd <2 x double> [[TMP8]], <double -1.000000e+00, double -1.000000e+00> |
288 | // CHECK: store volatile <2 x double> [[DEC8]], <2 x double>* @fd2, align 8 |
289 | // CHECK: ret void |
290 | void test_postdec(void) { |
291 | |
292 | sc2--; |
293 | uc2--; |
294 | |
295 | ss2--; |
296 | us2--; |
297 | |
298 | si2--; |
299 | ui2--; |
300 | |
301 | sl2--; |
302 | ul2--; |
303 | |
304 | fd2--; |
305 | } |
306 | |
307 | // CHECK-LABEL: define void @test_add() #0 { |
308 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
309 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
310 | // CHECK: [[ADD:%.*]] = add <16 x i8> [[TMP0]], [[TMP1]] |
311 | // CHECK: store volatile <16 x i8> [[ADD]], <16 x i8>* @sc, align 8 |
312 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
313 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
314 | // CHECK: [[ADD1:%.*]] = add <16 x i8> [[TMP2]], [[TMP3]] |
315 | // CHECK: store volatile <16 x i8> [[ADD1]], <16 x i8>* @sc, align 8 |
316 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
317 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
318 | // CHECK: [[ADD2:%.*]] = add <16 x i8> [[TMP4]], [[TMP5]] |
319 | // CHECK: store volatile <16 x i8> [[ADD2]], <16 x i8>* @sc, align 8 |
320 | // CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
321 | // CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
322 | // CHECK: [[ADD3:%.*]] = add <16 x i8> [[TMP6]], [[TMP7]] |
323 | // CHECK: store volatile <16 x i8> [[ADD3]], <16 x i8>* @uc, align 8 |
324 | // CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
325 | // CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
326 | // CHECK: [[ADD4:%.*]] = add <16 x i8> [[TMP8]], [[TMP9]] |
327 | // CHECK: store volatile <16 x i8> [[ADD4]], <16 x i8>* @uc, align 8 |
328 | // CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
329 | // CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
330 | // CHECK: [[ADD5:%.*]] = add <16 x i8> [[TMP10]], [[TMP11]] |
331 | // CHECK: store volatile <16 x i8> [[ADD5]], <16 x i8>* @uc, align 8 |
332 | // CHECK: [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
333 | // CHECK: [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
334 | // CHECK: [[ADD6:%.*]] = add <8 x i16> [[TMP12]], [[TMP13]] |
335 | // CHECK: store volatile <8 x i16> [[ADD6]], <8 x i16>* @ss, align 8 |
336 | // CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
337 | // CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
338 | // CHECK: [[ADD7:%.*]] = add <8 x i16> [[TMP14]], [[TMP15]] |
339 | // CHECK: store volatile <8 x i16> [[ADD7]], <8 x i16>* @ss, align 8 |
340 | // CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
341 | // CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
342 | // CHECK: [[ADD8:%.*]] = add <8 x i16> [[TMP16]], [[TMP17]] |
343 | // CHECK: store volatile <8 x i16> [[ADD8]], <8 x i16>* @ss, align 8 |
344 | // CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
345 | // CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
346 | // CHECK: [[ADD9:%.*]] = add <8 x i16> [[TMP18]], [[TMP19]] |
347 | // CHECK: store volatile <8 x i16> [[ADD9]], <8 x i16>* @us, align 8 |
348 | // CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
349 | // CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
350 | // CHECK: [[ADD10:%.*]] = add <8 x i16> [[TMP20]], [[TMP21]] |
351 | // CHECK: store volatile <8 x i16> [[ADD10]], <8 x i16>* @us, align 8 |
352 | // CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
353 | // CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
354 | // CHECK: [[ADD11:%.*]] = add <8 x i16> [[TMP22]], [[TMP23]] |
355 | // CHECK: store volatile <8 x i16> [[ADD11]], <8 x i16>* @us, align 8 |
356 | // CHECK: [[TMP24:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
357 | // CHECK: [[TMP25:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
358 | // CHECK: [[ADD12:%.*]] = add <4 x i32> [[TMP24]], [[TMP25]] |
359 | // CHECK: store volatile <4 x i32> [[ADD12]], <4 x i32>* @si, align 8 |
360 | // CHECK: [[TMP26:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
361 | // CHECK: [[TMP27:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
362 | // CHECK: [[ADD13:%.*]] = add <4 x i32> [[TMP26]], [[TMP27]] |
363 | // CHECK: store volatile <4 x i32> [[ADD13]], <4 x i32>* @si, align 8 |
364 | // CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
365 | // CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
366 | // CHECK: [[ADD14:%.*]] = add <4 x i32> [[TMP28]], [[TMP29]] |
367 | // CHECK: store volatile <4 x i32> [[ADD14]], <4 x i32>* @si, align 8 |
368 | // CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
369 | // CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
370 | // CHECK: [[ADD15:%.*]] = add <4 x i32> [[TMP30]], [[TMP31]] |
371 | // CHECK: store volatile <4 x i32> [[ADD15]], <4 x i32>* @ui, align 8 |
372 | // CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
373 | // CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
374 | // CHECK: [[ADD16:%.*]] = add <4 x i32> [[TMP32]], [[TMP33]] |
375 | // CHECK: store volatile <4 x i32> [[ADD16]], <4 x i32>* @ui, align 8 |
376 | // CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
377 | // CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
378 | // CHECK: [[ADD17:%.*]] = add <4 x i32> [[TMP34]], [[TMP35]] |
379 | // CHECK: store volatile <4 x i32> [[ADD17]], <4 x i32>* @ui, align 8 |
380 | // CHECK: [[TMP36:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
381 | // CHECK: [[TMP37:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
382 | // CHECK: [[ADD18:%.*]] = add <2 x i64> [[TMP36]], [[TMP37]] |
383 | // CHECK: store volatile <2 x i64> [[ADD18]], <2 x i64>* @sl, align 8 |
384 | // CHECK: [[TMP38:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
385 | // CHECK: [[TMP39:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
386 | // CHECK: [[ADD19:%.*]] = add <2 x i64> [[TMP38]], [[TMP39]] |
387 | // CHECK: store volatile <2 x i64> [[ADD19]], <2 x i64>* @sl, align 8 |
388 | // CHECK: [[TMP40:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
389 | // CHECK: [[TMP41:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
390 | // CHECK: [[ADD20:%.*]] = add <2 x i64> [[TMP40]], [[TMP41]] |
391 | // CHECK: store volatile <2 x i64> [[ADD20]], <2 x i64>* @sl, align 8 |
392 | // CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
393 | // CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
394 | // CHECK: [[ADD21:%.*]] = add <2 x i64> [[TMP42]], [[TMP43]] |
395 | // CHECK: store volatile <2 x i64> [[ADD21]], <2 x i64>* @ul, align 8 |
396 | // CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
397 | // CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
398 | // CHECK: [[ADD22:%.*]] = add <2 x i64> [[TMP44]], [[TMP45]] |
399 | // CHECK: store volatile <2 x i64> [[ADD22]], <2 x i64>* @ul, align 8 |
400 | // CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
401 | // CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
402 | // CHECK: [[ADD23:%.*]] = add <2 x i64> [[TMP46]], [[TMP47]] |
403 | // CHECK: store volatile <2 x i64> [[ADD23]], <2 x i64>* @ul, align 8 |
404 | // CHECK: [[TMP48:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8 |
405 | // CHECK: [[TMP49:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
406 | // CHECK: [[ADD24:%.*]] = fadd <2 x double> [[TMP48]], [[TMP49]] |
407 | // CHECK: store volatile <2 x double> [[ADD24]], <2 x double>* @fd, align 8 |
408 | // CHECK: ret void |
409 | void test_add(void) { |
410 | |
411 | sc = sc + sc2; |
412 | sc = sc + bc2; |
413 | sc = bc + sc2; |
414 | uc = uc + uc2; |
415 | uc = uc + bc2; |
416 | uc = bc + uc2; |
417 | |
418 | ss = ss + ss2; |
419 | ss = ss + bs2; |
420 | ss = bs + ss2; |
421 | us = us + us2; |
422 | us = us + bs2; |
423 | us = bs + us2; |
424 | |
425 | si = si + si2; |
426 | si = si + bi2; |
427 | si = bi + si2; |
428 | ui = ui + ui2; |
429 | ui = ui + bi2; |
430 | ui = bi + ui2; |
431 | |
432 | sl = sl + sl2; |
433 | sl = sl + bl2; |
434 | sl = bl + sl2; |
435 | ul = ul + ul2; |
436 | ul = ul + bl2; |
437 | ul = bl + ul2; |
438 | |
439 | fd = fd + fd2; |
440 | } |
441 | |
442 | // CHECK-LABEL: define void @test_add_assign() #0 { |
443 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
444 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
445 | // CHECK: [[ADD:%.*]] = add <16 x i8> [[TMP1]], [[TMP0]] |
446 | // CHECK: store volatile <16 x i8> [[ADD]], <16 x i8>* @sc, align 8 |
447 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
448 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
449 | // CHECK: [[ADD1:%.*]] = add <16 x i8> [[TMP3]], [[TMP2]] |
450 | // CHECK: store volatile <16 x i8> [[ADD1]], <16 x i8>* @sc, align 8 |
451 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
452 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
453 | // CHECK: [[ADD2:%.*]] = add <16 x i8> [[TMP5]], [[TMP4]] |
454 | // CHECK: store volatile <16 x i8> [[ADD2]], <16 x i8>* @uc, align 8 |
455 | // CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
456 | // CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
457 | // CHECK: [[ADD3:%.*]] = add <16 x i8> [[TMP7]], [[TMP6]] |
458 | // CHECK: store volatile <16 x i8> [[ADD3]], <16 x i8>* @uc, align 8 |
459 | // CHECK: [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
460 | // CHECK: [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
461 | // CHECK: [[ADD4:%.*]] = add <8 x i16> [[TMP9]], [[TMP8]] |
462 | // CHECK: store volatile <8 x i16> [[ADD4]], <8 x i16>* @ss, align 8 |
463 | // CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
464 | // CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
465 | // CHECK: [[ADD5:%.*]] = add <8 x i16> [[TMP11]], [[TMP10]] |
466 | // CHECK: store volatile <8 x i16> [[ADD5]], <8 x i16>* @ss, align 8 |
467 | // CHECK: [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
468 | // CHECK: [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
469 | // CHECK: [[ADD6:%.*]] = add <8 x i16> [[TMP13]], [[TMP12]] |
470 | // CHECK: store volatile <8 x i16> [[ADD6]], <8 x i16>* @us, align 8 |
471 | // CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
472 | // CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
473 | // CHECK: [[ADD7:%.*]] = add <8 x i16> [[TMP15]], [[TMP14]] |
474 | // CHECK: store volatile <8 x i16> [[ADD7]], <8 x i16>* @us, align 8 |
475 | // CHECK: [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
476 | // CHECK: [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
477 | // CHECK: [[ADD8:%.*]] = add <4 x i32> [[TMP17]], [[TMP16]] |
478 | // CHECK: store volatile <4 x i32> [[ADD8]], <4 x i32>* @si, align 8 |
479 | // CHECK: [[TMP18:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
480 | // CHECK: [[TMP19:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
481 | // CHECK: [[ADD9:%.*]] = add <4 x i32> [[TMP19]], [[TMP18]] |
482 | // CHECK: store volatile <4 x i32> [[ADD9]], <4 x i32>* @si, align 8 |
483 | // CHECK: [[TMP20:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
484 | // CHECK: [[TMP21:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
485 | // CHECK: [[ADD10:%.*]] = add <4 x i32> [[TMP21]], [[TMP20]] |
486 | // CHECK: store volatile <4 x i32> [[ADD10]], <4 x i32>* @ui, align 8 |
487 | // CHECK: [[TMP22:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
488 | // CHECK: [[TMP23:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
489 | // CHECK: [[ADD11:%.*]] = add <4 x i32> [[TMP23]], [[TMP22]] |
490 | // CHECK: store volatile <4 x i32> [[ADD11]], <4 x i32>* @ui, align 8 |
491 | // CHECK: [[TMP24:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
492 | // CHECK: [[TMP25:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
493 | // CHECK: [[ADD12:%.*]] = add <2 x i64> [[TMP25]], [[TMP24]] |
494 | // CHECK: store volatile <2 x i64> [[ADD12]], <2 x i64>* @sl, align 8 |
495 | // CHECK: [[TMP26:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
496 | // CHECK: [[TMP27:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
497 | // CHECK: [[ADD13:%.*]] = add <2 x i64> [[TMP27]], [[TMP26]] |
498 | // CHECK: store volatile <2 x i64> [[ADD13]], <2 x i64>* @sl, align 8 |
499 | // CHECK: [[TMP28:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
500 | // CHECK: [[TMP29:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
501 | // CHECK: [[ADD14:%.*]] = add <2 x i64> [[TMP29]], [[TMP28]] |
502 | // CHECK: store volatile <2 x i64> [[ADD14]], <2 x i64>* @ul, align 8 |
503 | // CHECK: [[TMP30:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
504 | // CHECK: [[TMP31:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
505 | // CHECK: [[ADD15:%.*]] = add <2 x i64> [[TMP31]], [[TMP30]] |
506 | // CHECK: store volatile <2 x i64> [[ADD15]], <2 x i64>* @ul, align 8 |
507 | // CHECK: [[TMP32:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
508 | // CHECK: [[TMP33:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8 |
509 | // CHECK: [[ADD16:%.*]] = fadd <2 x double> [[TMP33]], [[TMP32]] |
510 | // CHECK: store volatile <2 x double> [[ADD16]], <2 x double>* @fd, align 8 |
511 | // CHECK: ret void |
512 | void test_add_assign(void) { |
513 | |
514 | sc += sc2; |
515 | sc += bc2; |
516 | uc += uc2; |
517 | uc += bc2; |
518 | |
519 | ss += ss2; |
520 | ss += bs2; |
521 | us += us2; |
522 | us += bs2; |
523 | |
524 | si += si2; |
525 | si += bi2; |
526 | ui += ui2; |
527 | ui += bi2; |
528 | |
529 | sl += sl2; |
530 | sl += bl2; |
531 | ul += ul2; |
532 | ul += bl2; |
533 | |
534 | fd += fd2; |
535 | } |
536 | |
537 | // CHECK-LABEL: define void @test_sub() #0 { |
538 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
539 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
540 | // CHECK: [[SUB:%.*]] = sub <16 x i8> [[TMP0]], [[TMP1]] |
541 | // CHECK: store volatile <16 x i8> [[SUB]], <16 x i8>* @sc, align 8 |
542 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
543 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
544 | // CHECK: [[SUB1:%.*]] = sub <16 x i8> [[TMP2]], [[TMP3]] |
545 | // CHECK: store volatile <16 x i8> [[SUB1]], <16 x i8>* @sc, align 8 |
546 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
547 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
548 | // CHECK: [[SUB2:%.*]] = sub <16 x i8> [[TMP4]], [[TMP5]] |
549 | // CHECK: store volatile <16 x i8> [[SUB2]], <16 x i8>* @sc, align 8 |
550 | // CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
551 | // CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
552 | // CHECK: [[SUB3:%.*]] = sub <16 x i8> [[TMP6]], [[TMP7]] |
553 | // CHECK: store volatile <16 x i8> [[SUB3]], <16 x i8>* @uc, align 8 |
554 | // CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
555 | // CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
556 | // CHECK: [[SUB4:%.*]] = sub <16 x i8> [[TMP8]], [[TMP9]] |
557 | // CHECK: store volatile <16 x i8> [[SUB4]], <16 x i8>* @uc, align 8 |
558 | // CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
559 | // CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
560 | // CHECK: [[SUB5:%.*]] = sub <16 x i8> [[TMP10]], [[TMP11]] |
561 | // CHECK: store volatile <16 x i8> [[SUB5]], <16 x i8>* @uc, align 8 |
562 | // CHECK: [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
563 | // CHECK: [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
564 | // CHECK: [[SUB6:%.*]] = sub <8 x i16> [[TMP12]], [[TMP13]] |
565 | // CHECK: store volatile <8 x i16> [[SUB6]], <8 x i16>* @ss, align 8 |
566 | // CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
567 | // CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
568 | // CHECK: [[SUB7:%.*]] = sub <8 x i16> [[TMP14]], [[TMP15]] |
569 | // CHECK: store volatile <8 x i16> [[SUB7]], <8 x i16>* @ss, align 8 |
570 | // CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
571 | // CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
572 | // CHECK: [[SUB8:%.*]] = sub <8 x i16> [[TMP16]], [[TMP17]] |
573 | // CHECK: store volatile <8 x i16> [[SUB8]], <8 x i16>* @ss, align 8 |
574 | // CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
575 | // CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
576 | // CHECK: [[SUB9:%.*]] = sub <8 x i16> [[TMP18]], [[TMP19]] |
577 | // CHECK: store volatile <8 x i16> [[SUB9]], <8 x i16>* @us, align 8 |
578 | // CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
579 | // CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
580 | // CHECK: [[SUB10:%.*]] = sub <8 x i16> [[TMP20]], [[TMP21]] |
581 | // CHECK: store volatile <8 x i16> [[SUB10]], <8 x i16>* @us, align 8 |
582 | // CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
583 | // CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
584 | // CHECK: [[SUB11:%.*]] = sub <8 x i16> [[TMP22]], [[TMP23]] |
585 | // CHECK: store volatile <8 x i16> [[SUB11]], <8 x i16>* @us, align 8 |
586 | // CHECK: [[TMP24:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
587 | // CHECK: [[TMP25:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
588 | // CHECK: [[SUB12:%.*]] = sub <4 x i32> [[TMP24]], [[TMP25]] |
589 | // CHECK: store volatile <4 x i32> [[SUB12]], <4 x i32>* @si, align 8 |
590 | // CHECK: [[TMP26:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
591 | // CHECK: [[TMP27:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
592 | // CHECK: [[SUB13:%.*]] = sub <4 x i32> [[TMP26]], [[TMP27]] |
593 | // CHECK: store volatile <4 x i32> [[SUB13]], <4 x i32>* @si, align 8 |
594 | // CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
595 | // CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
596 | // CHECK: [[SUB14:%.*]] = sub <4 x i32> [[TMP28]], [[TMP29]] |
597 | // CHECK: store volatile <4 x i32> [[SUB14]], <4 x i32>* @si, align 8 |
598 | // CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
599 | // CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
600 | // CHECK: [[SUB15:%.*]] = sub <4 x i32> [[TMP30]], [[TMP31]] |
601 | // CHECK: store volatile <4 x i32> [[SUB15]], <4 x i32>* @ui, align 8 |
602 | // CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
603 | // CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
604 | // CHECK: [[SUB16:%.*]] = sub <4 x i32> [[TMP32]], [[TMP33]] |
605 | // CHECK: store volatile <4 x i32> [[SUB16]], <4 x i32>* @ui, align 8 |
606 | // CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
607 | // CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
608 | // CHECK: [[SUB17:%.*]] = sub <4 x i32> [[TMP34]], [[TMP35]] |
609 | // CHECK: store volatile <4 x i32> [[SUB17]], <4 x i32>* @ui, align 8 |
610 | // CHECK: [[TMP36:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
611 | // CHECK: [[TMP37:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
612 | // CHECK: [[SUB18:%.*]] = sub <2 x i64> [[TMP36]], [[TMP37]] |
613 | // CHECK: store volatile <2 x i64> [[SUB18]], <2 x i64>* @sl, align 8 |
614 | // CHECK: [[TMP38:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
615 | // CHECK: [[TMP39:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
616 | // CHECK: [[SUB19:%.*]] = sub <2 x i64> [[TMP38]], [[TMP39]] |
617 | // CHECK: store volatile <2 x i64> [[SUB19]], <2 x i64>* @sl, align 8 |
618 | // CHECK: [[TMP40:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
619 | // CHECK: [[TMP41:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
620 | // CHECK: [[SUB20:%.*]] = sub <2 x i64> [[TMP40]], [[TMP41]] |
621 | // CHECK: store volatile <2 x i64> [[SUB20]], <2 x i64>* @sl, align 8 |
622 | // CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
623 | // CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
624 | // CHECK: [[SUB21:%.*]] = sub <2 x i64> [[TMP42]], [[TMP43]] |
625 | // CHECK: store volatile <2 x i64> [[SUB21]], <2 x i64>* @ul, align 8 |
626 | // CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
627 | // CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
628 | // CHECK: [[SUB22:%.*]] = sub <2 x i64> [[TMP44]], [[TMP45]] |
629 | // CHECK: store volatile <2 x i64> [[SUB22]], <2 x i64>* @ul, align 8 |
630 | // CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
631 | // CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
632 | // CHECK: [[SUB23:%.*]] = sub <2 x i64> [[TMP46]], [[TMP47]] |
633 | // CHECK: store volatile <2 x i64> [[SUB23]], <2 x i64>* @ul, align 8 |
634 | // CHECK: [[TMP48:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8 |
635 | // CHECK: [[TMP49:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
636 | // CHECK: [[SUB24:%.*]] = fsub <2 x double> [[TMP48]], [[TMP49]] |
637 | // CHECK: store volatile <2 x double> [[SUB24]], <2 x double>* @fd, align 8 |
638 | // CHECK: ret void |
639 | void test_sub(void) { |
640 | |
641 | sc = sc - sc2; |
642 | sc = sc - bc2; |
643 | sc = bc - sc2; |
644 | uc = uc - uc2; |
645 | uc = uc - bc2; |
646 | uc = bc - uc2; |
647 | |
648 | ss = ss - ss2; |
649 | ss = ss - bs2; |
650 | ss = bs - ss2; |
651 | us = us - us2; |
652 | us = us - bs2; |
653 | us = bs - us2; |
654 | |
655 | si = si - si2; |
656 | si = si - bi2; |
657 | si = bi - si2; |
658 | ui = ui - ui2; |
659 | ui = ui - bi2; |
660 | ui = bi - ui2; |
661 | |
662 | sl = sl - sl2; |
663 | sl = sl - bl2; |
664 | sl = bl - sl2; |
665 | ul = ul - ul2; |
666 | ul = ul - bl2; |
667 | ul = bl - ul2; |
668 | |
669 | fd = fd - fd2; |
670 | } |
671 | |
672 | // CHECK-LABEL: define void @test_sub_assign() #0 { |
673 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
674 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
675 | // CHECK: [[SUB:%.*]] = sub <16 x i8> [[TMP1]], [[TMP0]] |
676 | // CHECK: store volatile <16 x i8> [[SUB]], <16 x i8>* @sc, align 8 |
677 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
678 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
679 | // CHECK: [[SUB1:%.*]] = sub <16 x i8> [[TMP3]], [[TMP2]] |
680 | // CHECK: store volatile <16 x i8> [[SUB1]], <16 x i8>* @sc, align 8 |
681 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
682 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
683 | // CHECK: [[SUB2:%.*]] = sub <16 x i8> [[TMP5]], [[TMP4]] |
684 | // CHECK: store volatile <16 x i8> [[SUB2]], <16 x i8>* @uc, align 8 |
685 | // CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
686 | // CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
687 | // CHECK: [[SUB3:%.*]] = sub <16 x i8> [[TMP7]], [[TMP6]] |
688 | // CHECK: store volatile <16 x i8> [[SUB3]], <16 x i8>* @uc, align 8 |
689 | // CHECK: [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
690 | // CHECK: [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
691 | // CHECK: [[SUB4:%.*]] = sub <8 x i16> [[TMP9]], [[TMP8]] |
692 | // CHECK: store volatile <8 x i16> [[SUB4]], <8 x i16>* @ss, align 8 |
693 | // CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
694 | // CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
695 | // CHECK: [[SUB5:%.*]] = sub <8 x i16> [[TMP11]], [[TMP10]] |
696 | // CHECK: store volatile <8 x i16> [[SUB5]], <8 x i16>* @ss, align 8 |
697 | // CHECK: [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
698 | // CHECK: [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
699 | // CHECK: [[SUB6:%.*]] = sub <8 x i16> [[TMP13]], [[TMP12]] |
700 | // CHECK: store volatile <8 x i16> [[SUB6]], <8 x i16>* @us, align 8 |
701 | // CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
702 | // CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
703 | // CHECK: [[SUB7:%.*]] = sub <8 x i16> [[TMP15]], [[TMP14]] |
704 | // CHECK: store volatile <8 x i16> [[SUB7]], <8 x i16>* @us, align 8 |
705 | // CHECK: [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
706 | // CHECK: [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
707 | // CHECK: [[SUB8:%.*]] = sub <4 x i32> [[TMP17]], [[TMP16]] |
708 | // CHECK: store volatile <4 x i32> [[SUB8]], <4 x i32>* @si, align 8 |
709 | // CHECK: [[TMP18:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
710 | // CHECK: [[TMP19:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
711 | // CHECK: [[SUB9:%.*]] = sub <4 x i32> [[TMP19]], [[TMP18]] |
712 | // CHECK: store volatile <4 x i32> [[SUB9]], <4 x i32>* @si, align 8 |
713 | // CHECK: [[TMP20:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
714 | // CHECK: [[TMP21:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
715 | // CHECK: [[SUB10:%.*]] = sub <4 x i32> [[TMP21]], [[TMP20]] |
716 | // CHECK: store volatile <4 x i32> [[SUB10]], <4 x i32>* @ui, align 8 |
717 | // CHECK: [[TMP22:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
718 | // CHECK: [[TMP23:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
719 | // CHECK: [[SUB11:%.*]] = sub <4 x i32> [[TMP23]], [[TMP22]] |
720 | // CHECK: store volatile <4 x i32> [[SUB11]], <4 x i32>* @ui, align 8 |
721 | // CHECK: [[TMP24:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
722 | // CHECK: [[TMP25:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
723 | // CHECK: [[SUB12:%.*]] = sub <2 x i64> [[TMP25]], [[TMP24]] |
724 | // CHECK: store volatile <2 x i64> [[SUB12]], <2 x i64>* @sl, align 8 |
725 | // CHECK: [[TMP26:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
726 | // CHECK: [[TMP27:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
727 | // CHECK: [[SUB13:%.*]] = sub <2 x i64> [[TMP27]], [[TMP26]] |
728 | // CHECK: store volatile <2 x i64> [[SUB13]], <2 x i64>* @sl, align 8 |
729 | // CHECK: [[TMP28:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
730 | // CHECK: [[TMP29:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
731 | // CHECK: [[SUB14:%.*]] = sub <2 x i64> [[TMP29]], [[TMP28]] |
732 | // CHECK: store volatile <2 x i64> [[SUB14]], <2 x i64>* @ul, align 8 |
733 | // CHECK: [[TMP30:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
734 | // CHECK: [[TMP31:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
735 | // CHECK: [[SUB15:%.*]] = sub <2 x i64> [[TMP31]], [[TMP30]] |
736 | // CHECK: store volatile <2 x i64> [[SUB15]], <2 x i64>* @ul, align 8 |
737 | // CHECK: [[TMP32:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
738 | // CHECK: [[TMP33:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8 |
739 | // CHECK: [[SUB16:%.*]] = fsub <2 x double> [[TMP33]], [[TMP32]] |
740 | // CHECK: store volatile <2 x double> [[SUB16]], <2 x double>* @fd, align 8 |
741 | // CHECK: ret void |
742 | void test_sub_assign(void) { |
743 | |
744 | sc -= sc2; |
745 | sc -= bc2; |
746 | uc -= uc2; |
747 | uc -= bc2; |
748 | |
749 | ss -= ss2; |
750 | ss -= bs2; |
751 | us -= us2; |
752 | us -= bs2; |
753 | |
754 | si -= si2; |
755 | si -= bi2; |
756 | ui -= ui2; |
757 | ui -= bi2; |
758 | |
759 | sl -= sl2; |
760 | sl -= bl2; |
761 | ul -= ul2; |
762 | ul -= bl2; |
763 | |
764 | fd -= fd2; |
765 | } |
766 | |
767 | // CHECK-LABEL: define void @test_mul() #0 { |
768 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
769 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
770 | // CHECK: [[MUL:%.*]] = mul <16 x i8> [[TMP0]], [[TMP1]] |
771 | // CHECK: store volatile <16 x i8> [[MUL]], <16 x i8>* @sc, align 8 |
772 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
773 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
774 | // CHECK: [[MUL1:%.*]] = mul <16 x i8> [[TMP2]], [[TMP3]] |
775 | // CHECK: store volatile <16 x i8> [[MUL1]], <16 x i8>* @uc, align 8 |
776 | // CHECK: [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
777 | // CHECK: [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
778 | // CHECK: [[MUL2:%.*]] = mul <8 x i16> [[TMP4]], [[TMP5]] |
779 | // CHECK: store volatile <8 x i16> [[MUL2]], <8 x i16>* @ss, align 8 |
780 | // CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
781 | // CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
782 | // CHECK: [[MUL3:%.*]] = mul <8 x i16> [[TMP6]], [[TMP7]] |
783 | // CHECK: store volatile <8 x i16> [[MUL3]], <8 x i16>* @us, align 8 |
784 | // CHECK: [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
785 | // CHECK: [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
786 | // CHECK: [[MUL4:%.*]] = mul <4 x i32> [[TMP8]], [[TMP9]] |
787 | // CHECK: store volatile <4 x i32> [[MUL4]], <4 x i32>* @si, align 8 |
788 | // CHECK: [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
789 | // CHECK: [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
790 | // CHECK: [[MUL5:%.*]] = mul <4 x i32> [[TMP10]], [[TMP11]] |
791 | // CHECK: store volatile <4 x i32> [[MUL5]], <4 x i32>* @ui, align 8 |
792 | // CHECK: [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
793 | // CHECK: [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
794 | // CHECK: [[MUL6:%.*]] = mul <2 x i64> [[TMP12]], [[TMP13]] |
795 | // CHECK: store volatile <2 x i64> [[MUL6]], <2 x i64>* @sl, align 8 |
796 | // CHECK: [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
797 | // CHECK: [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
798 | // CHECK: [[MUL7:%.*]] = mul <2 x i64> [[TMP14]], [[TMP15]] |
799 | // CHECK: store volatile <2 x i64> [[MUL7]], <2 x i64>* @ul, align 8 |
800 | // CHECK: [[TMP16:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8 |
801 | // CHECK: [[TMP17:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
802 | // CHECK: [[MUL8:%.*]] = fmul <2 x double> [[TMP16]], [[TMP17]] |
803 | // CHECK: store volatile <2 x double> [[MUL8]], <2 x double>* @fd, align 8 |
804 | // CHECK: ret void |
805 | void test_mul(void) { |
806 | |
807 | sc = sc * sc2; |
808 | uc = uc * uc2; |
809 | |
810 | ss = ss * ss2; |
811 | us = us * us2; |
812 | |
813 | si = si * si2; |
814 | ui = ui * ui2; |
815 | |
816 | sl = sl * sl2; |
817 | ul = ul * ul2; |
818 | |
819 | fd = fd * fd2; |
820 | } |
821 | |
822 | // CHECK-LABEL: define void @test_mul_assign() #0 { |
823 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
824 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
825 | // CHECK: [[MUL:%.*]] = mul <16 x i8> [[TMP1]], [[TMP0]] |
826 | // CHECK: store volatile <16 x i8> [[MUL]], <16 x i8>* @sc, align 8 |
827 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
828 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
829 | // CHECK: [[MUL1:%.*]] = mul <16 x i8> [[TMP3]], [[TMP2]] |
830 | // CHECK: store volatile <16 x i8> [[MUL1]], <16 x i8>* @uc, align 8 |
831 | // CHECK: [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
832 | // CHECK: [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
833 | // CHECK: [[MUL2:%.*]] = mul <8 x i16> [[TMP5]], [[TMP4]] |
834 | // CHECK: store volatile <8 x i16> [[MUL2]], <8 x i16>* @ss, align 8 |
835 | // CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
836 | // CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
837 | // CHECK: [[MUL3:%.*]] = mul <8 x i16> [[TMP7]], [[TMP6]] |
838 | // CHECK: store volatile <8 x i16> [[MUL3]], <8 x i16>* @us, align 8 |
839 | // CHECK: [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
840 | // CHECK: [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
841 | // CHECK: [[MUL4:%.*]] = mul <4 x i32> [[TMP9]], [[TMP8]] |
842 | // CHECK: store volatile <4 x i32> [[MUL4]], <4 x i32>* @si, align 8 |
843 | // CHECK: [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
844 | // CHECK: [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
845 | // CHECK: [[MUL5:%.*]] = mul <4 x i32> [[TMP11]], [[TMP10]] |
846 | // CHECK: store volatile <4 x i32> [[MUL5]], <4 x i32>* @ui, align 8 |
847 | // CHECK: [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
848 | // CHECK: [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
849 | // CHECK: [[MUL6:%.*]] = mul <2 x i64> [[TMP13]], [[TMP12]] |
850 | // CHECK: store volatile <2 x i64> [[MUL6]], <2 x i64>* @sl, align 8 |
851 | // CHECK: [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
852 | // CHECK: [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
853 | // CHECK: [[MUL7:%.*]] = mul <2 x i64> [[TMP15]], [[TMP14]] |
854 | // CHECK: store volatile <2 x i64> [[MUL7]], <2 x i64>* @ul, align 8 |
855 | // CHECK: [[TMP16:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
856 | // CHECK: [[TMP17:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8 |
857 | // CHECK: [[MUL8:%.*]] = fmul <2 x double> [[TMP17]], [[TMP16]] |
858 | // CHECK: store volatile <2 x double> [[MUL8]], <2 x double>* @fd, align 8 |
859 | // CHECK: ret void |
860 | void test_mul_assign(void) { |
861 | |
862 | sc *= sc2; |
863 | uc *= uc2; |
864 | |
865 | ss *= ss2; |
866 | us *= us2; |
867 | |
868 | si *= si2; |
869 | ui *= ui2; |
870 | |
871 | sl *= sl2; |
872 | ul *= ul2; |
873 | |
874 | fd *= fd2; |
875 | } |
876 | |
877 | // CHECK-LABEL: define void @test_div() #0 { |
878 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
879 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
880 | // CHECK: [[DIV:%.*]] = sdiv <16 x i8> [[TMP0]], [[TMP1]] |
881 | // CHECK: store volatile <16 x i8> [[DIV]], <16 x i8>* @sc, align 8 |
882 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
883 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
884 | // CHECK: [[DIV1:%.*]] = udiv <16 x i8> [[TMP2]], [[TMP3]] |
885 | // CHECK: store volatile <16 x i8> [[DIV1]], <16 x i8>* @uc, align 8 |
886 | // CHECK: [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
887 | // CHECK: [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
888 | // CHECK: [[DIV2:%.*]] = sdiv <8 x i16> [[TMP4]], [[TMP5]] |
889 | // CHECK: store volatile <8 x i16> [[DIV2]], <8 x i16>* @ss, align 8 |
890 | // CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
891 | // CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
892 | // CHECK: [[DIV3:%.*]] = udiv <8 x i16> [[TMP6]], [[TMP7]] |
893 | // CHECK: store volatile <8 x i16> [[DIV3]], <8 x i16>* @us, align 8 |
894 | // CHECK: [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
895 | // CHECK: [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
896 | // CHECK: [[DIV4:%.*]] = sdiv <4 x i32> [[TMP8]], [[TMP9]] |
897 | // CHECK: store volatile <4 x i32> [[DIV4]], <4 x i32>* @si, align 8 |
898 | // CHECK: [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
899 | // CHECK: [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
900 | // CHECK: [[DIV5:%.*]] = udiv <4 x i32> [[TMP10]], [[TMP11]] |
901 | // CHECK: store volatile <4 x i32> [[DIV5]], <4 x i32>* @ui, align 8 |
902 | // CHECK: [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
903 | // CHECK: [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
904 | // CHECK: [[DIV6:%.*]] = sdiv <2 x i64> [[TMP12]], [[TMP13]] |
905 | // CHECK: store volatile <2 x i64> [[DIV6]], <2 x i64>* @sl, align 8 |
906 | // CHECK: [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
907 | // CHECK: [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
908 | // CHECK: [[DIV7:%.*]] = udiv <2 x i64> [[TMP14]], [[TMP15]] |
909 | // CHECK: store volatile <2 x i64> [[DIV7]], <2 x i64>* @ul, align 8 |
910 | // CHECK: [[TMP16:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8 |
911 | // CHECK: [[TMP17:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
912 | // CHECK: [[DIV8:%.*]] = fdiv <2 x double> [[TMP16]], [[TMP17]] |
913 | // CHECK: store volatile <2 x double> [[DIV8]], <2 x double>* @fd, align 8 |
914 | // CHECK: ret void |
915 | void test_div(void) { |
916 | |
917 | sc = sc / sc2; |
918 | uc = uc / uc2; |
919 | |
920 | ss = ss / ss2; |
921 | us = us / us2; |
922 | |
923 | si = si / si2; |
924 | ui = ui / ui2; |
925 | |
926 | sl = sl / sl2; |
927 | ul = ul / ul2; |
928 | |
929 | fd = fd / fd2; |
930 | } |
931 | |
932 | // CHECK-LABEL: define void @test_div_assign() #0 { |
933 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
934 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
935 | // CHECK: [[DIV:%.*]] = sdiv <16 x i8> [[TMP1]], [[TMP0]] |
936 | // CHECK: store volatile <16 x i8> [[DIV]], <16 x i8>* @sc, align 8 |
937 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
938 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
939 | // CHECK: [[DIV1:%.*]] = udiv <16 x i8> [[TMP3]], [[TMP2]] |
940 | // CHECK: store volatile <16 x i8> [[DIV1]], <16 x i8>* @uc, align 8 |
941 | // CHECK: [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
942 | // CHECK: [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
943 | // CHECK: [[DIV2:%.*]] = sdiv <8 x i16> [[TMP5]], [[TMP4]] |
944 | // CHECK: store volatile <8 x i16> [[DIV2]], <8 x i16>* @ss, align 8 |
945 | // CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
946 | // CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
947 | // CHECK: [[DIV3:%.*]] = udiv <8 x i16> [[TMP7]], [[TMP6]] |
948 | // CHECK: store volatile <8 x i16> [[DIV3]], <8 x i16>* @us, align 8 |
949 | // CHECK: [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
950 | // CHECK: [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
951 | // CHECK: [[DIV4:%.*]] = sdiv <4 x i32> [[TMP9]], [[TMP8]] |
952 | // CHECK: store volatile <4 x i32> [[DIV4]], <4 x i32>* @si, align 8 |
953 | // CHECK: [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
954 | // CHECK: [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
955 | // CHECK: [[DIV5:%.*]] = udiv <4 x i32> [[TMP11]], [[TMP10]] |
956 | // CHECK: store volatile <4 x i32> [[DIV5]], <4 x i32>* @ui, align 8 |
957 | // CHECK: [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
958 | // CHECK: [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
959 | // CHECK: [[DIV6:%.*]] = sdiv <2 x i64> [[TMP13]], [[TMP12]] |
960 | // CHECK: store volatile <2 x i64> [[DIV6]], <2 x i64>* @sl, align 8 |
961 | // CHECK: [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
962 | // CHECK: [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
963 | // CHECK: [[DIV7:%.*]] = udiv <2 x i64> [[TMP15]], [[TMP14]] |
964 | // CHECK: store volatile <2 x i64> [[DIV7]], <2 x i64>* @ul, align 8 |
965 | // CHECK: [[TMP16:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
966 | // CHECK: [[TMP17:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8 |
967 | // CHECK: [[DIV8:%.*]] = fdiv <2 x double> [[TMP17]], [[TMP16]] |
968 | // CHECK: store volatile <2 x double> [[DIV8]], <2 x double>* @fd, align 8 |
969 | // CHECK: ret void |
970 | void test_div_assign(void) { |
971 | |
972 | sc /= sc2; |
973 | uc /= uc2; |
974 | |
975 | ss /= ss2; |
976 | us /= us2; |
977 | |
978 | si /= si2; |
979 | ui /= ui2; |
980 | |
981 | sl /= sl2; |
982 | ul /= ul2; |
983 | |
984 | fd /= fd2; |
985 | } |
986 | |
987 | // CHECK-LABEL: define void @test_rem() #0 { |
988 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
989 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
990 | // CHECK: [[REM:%.*]] = srem <16 x i8> [[TMP0]], [[TMP1]] |
991 | // CHECK: store volatile <16 x i8> [[REM]], <16 x i8>* @sc, align 8 |
992 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
993 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
994 | // CHECK: [[REM1:%.*]] = urem <16 x i8> [[TMP2]], [[TMP3]] |
995 | // CHECK: store volatile <16 x i8> [[REM1]], <16 x i8>* @uc, align 8 |
996 | // CHECK: [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
997 | // CHECK: [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
998 | // CHECK: [[REM2:%.*]] = srem <8 x i16> [[TMP4]], [[TMP5]] |
999 | // CHECK: store volatile <8 x i16> [[REM2]], <8 x i16>* @ss, align 8 |
1000 | // CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1001 | // CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
1002 | // CHECK: [[REM3:%.*]] = urem <8 x i16> [[TMP6]], [[TMP7]] |
1003 | // CHECK: store volatile <8 x i16> [[REM3]], <8 x i16>* @us, align 8 |
1004 | // CHECK: [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1005 | // CHECK: [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
1006 | // CHECK: [[REM4:%.*]] = srem <4 x i32> [[TMP8]], [[TMP9]] |
1007 | // CHECK: store volatile <4 x i32> [[REM4]], <4 x i32>* @si, align 8 |
1008 | // CHECK: [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
1009 | // CHECK: [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
1010 | // CHECK: [[REM5:%.*]] = urem <4 x i32> [[TMP10]], [[TMP11]] |
1011 | // CHECK: store volatile <4 x i32> [[REM5]], <4 x i32>* @ui, align 8 |
1012 | // CHECK: [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
1013 | // CHECK: [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
1014 | // CHECK: [[REM6:%.*]] = srem <2 x i64> [[TMP12]], [[TMP13]] |
1015 | // CHECK: store volatile <2 x i64> [[REM6]], <2 x i64>* @sl, align 8 |
1016 | // CHECK: [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
1017 | // CHECK: [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
1018 | // CHECK: [[REM7:%.*]] = urem <2 x i64> [[TMP14]], [[TMP15]] |
1019 | // CHECK: store volatile <2 x i64> [[REM7]], <2 x i64>* @ul, align 8 |
1020 | // CHECK: ret void |
1021 | void test_rem(void) { |
1022 | |
1023 | sc = sc % sc2; |
1024 | uc = uc % uc2; |
1025 | |
1026 | ss = ss % ss2; |
1027 | us = us % us2; |
1028 | |
1029 | si = si % si2; |
1030 | ui = ui % ui2; |
1031 | |
1032 | sl = sl % sl2; |
1033 | ul = ul % ul2; |
1034 | } |
1035 | |
1036 | // CHECK-LABEL: define void @test_rem_assign() #0 { |
1037 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
1038 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1039 | // CHECK: [[REM:%.*]] = srem <16 x i8> [[TMP1]], [[TMP0]] |
1040 | // CHECK: store volatile <16 x i8> [[REM]], <16 x i8>* @sc, align 8 |
1041 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
1042 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1043 | // CHECK: [[REM1:%.*]] = urem <16 x i8> [[TMP3]], [[TMP2]] |
1044 | // CHECK: store volatile <16 x i8> [[REM1]], <16 x i8>* @uc, align 8 |
1045 | // CHECK: [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
1046 | // CHECK: [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1047 | // CHECK: [[REM2:%.*]] = srem <8 x i16> [[TMP5]], [[TMP4]] |
1048 | // CHECK: store volatile <8 x i16> [[REM2]], <8 x i16>* @ss, align 8 |
1049 | // CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
1050 | // CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1051 | // CHECK: [[REM3:%.*]] = urem <8 x i16> [[TMP7]], [[TMP6]] |
1052 | // CHECK: store volatile <8 x i16> [[REM3]], <8 x i16>* @us, align 8 |
1053 | // CHECK: [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
1054 | // CHECK: [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1055 | // CHECK: [[REM4:%.*]] = srem <4 x i32> [[TMP9]], [[TMP8]] |
1056 | // CHECK: store volatile <4 x i32> [[REM4]], <4 x i32>* @si, align 8 |
1057 | // CHECK: [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
1058 | // CHECK: [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
1059 | // CHECK: [[REM5:%.*]] = urem <4 x i32> [[TMP11]], [[TMP10]] |
1060 | // CHECK: store volatile <4 x i32> [[REM5]], <4 x i32>* @ui, align 8 |
1061 | // CHECK: [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
1062 | // CHECK: [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
1063 | // CHECK: [[REM6:%.*]] = srem <2 x i64> [[TMP13]], [[TMP12]] |
1064 | // CHECK: store volatile <2 x i64> [[REM6]], <2 x i64>* @sl, align 8 |
1065 | // CHECK: [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
1066 | // CHECK: [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
1067 | // CHECK: [[REM7:%.*]] = urem <2 x i64> [[TMP15]], [[TMP14]] |
1068 | // CHECK: store volatile <2 x i64> [[REM7]], <2 x i64>* @ul, align 8 |
1069 | // CHECK: ret void |
1070 | void test_rem_assign(void) { |
1071 | |
1072 | sc %= sc2; |
1073 | uc %= uc2; |
1074 | |
1075 | ss %= ss2; |
1076 | us %= us2; |
1077 | |
1078 | si %= si2; |
1079 | ui %= ui2; |
1080 | |
1081 | sl %= sl2; |
1082 | ul %= ul2; |
1083 | } |
1084 | |
1085 | // CHECK-LABEL: define void @test_not() #0 { |
1086 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
1087 | // CHECK: [[NEG:%.*]] = xor <16 x i8> [[TMP0]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1088 | // CHECK: store volatile <16 x i8> [[NEG]], <16 x i8>* @sc, align 8 |
1089 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
1090 | // CHECK: [[NEG1:%.*]] = xor <16 x i8> [[TMP1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1091 | // CHECK: store volatile <16 x i8> [[NEG1]], <16 x i8>* @uc, align 8 |
1092 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1093 | // CHECK: [[NEG2:%.*]] = xor <16 x i8> [[TMP2]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
1094 | // CHECK: store volatile <16 x i8> [[NEG2]], <16 x i8>* @bc, align 8 |
1095 | // CHECK: [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
1096 | // CHECK: [[NEG3:%.*]] = xor <8 x i16> [[TMP3]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1097 | // CHECK: store volatile <8 x i16> [[NEG3]], <8 x i16>* @ss, align 8 |
1098 | // CHECK: [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
1099 | // CHECK: [[NEG4:%.*]] = xor <8 x i16> [[TMP4]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1100 | // CHECK: store volatile <8 x i16> [[NEG4]], <8 x i16>* @us, align 8 |
1101 | // CHECK: [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1102 | // CHECK: [[NEG5:%.*]] = xor <8 x i16> [[TMP5]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> |
1103 | // CHECK: store volatile <8 x i16> [[NEG5]], <8 x i16>* @bs, align 8 |
1104 | // CHECK: [[TMP6:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
1105 | // CHECK: [[NEG6:%.*]] = xor <4 x i32> [[TMP6]], <i32 -1, i32 -1, i32 -1, i32 -1> |
1106 | // CHECK: store volatile <4 x i32> [[NEG6]], <4 x i32>* @si, align 8 |
1107 | // CHECK: [[TMP7:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
1108 | // CHECK: [[NEG7:%.*]] = xor <4 x i32> [[TMP7]], <i32 -1, i32 -1, i32 -1, i32 -1> |
1109 | // CHECK: store volatile <4 x i32> [[NEG7]], <4 x i32>* @ui, align 8 |
1110 | // CHECK: [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1111 | // CHECK: [[NEG8:%.*]] = xor <4 x i32> [[TMP8]], <i32 -1, i32 -1, i32 -1, i32 -1> |
1112 | // CHECK: store volatile <4 x i32> [[NEG8]], <4 x i32>* @bi, align 8 |
1113 | // CHECK: [[TMP9:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
1114 | // CHECK: [[NEG9:%.*]] = xor <2 x i64> [[TMP9]], <i64 -1, i64 -1> |
1115 | // CHECK: store volatile <2 x i64> [[NEG9]], <2 x i64>* @sl, align 8 |
1116 | // CHECK: [[TMP10:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
1117 | // CHECK: [[NEG10:%.*]] = xor <2 x i64> [[TMP10]], <i64 -1, i64 -1> |
1118 | // CHECK: store volatile <2 x i64> [[NEG10]], <2 x i64>* @ul, align 8 |
1119 | // CHECK: [[TMP11:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1120 | // CHECK: [[NEG11:%.*]] = xor <2 x i64> [[TMP11]], <i64 -1, i64 -1> |
1121 | // CHECK: store volatile <2 x i64> [[NEG11]], <2 x i64>* @bl, align 8 |
1122 | // CHECK: ret void |
1123 | void test_not(void) { |
1124 | |
1125 | sc = ~sc2; |
1126 | uc = ~uc2; |
1127 | bc = ~bc2; |
1128 | |
1129 | ss = ~ss2; |
1130 | us = ~us2; |
1131 | bs = ~bs2; |
1132 | |
1133 | si = ~si2; |
1134 | ui = ~ui2; |
1135 | bi = ~bi2; |
1136 | |
1137 | sl = ~sl2; |
1138 | ul = ~ul2; |
1139 | bl = ~bl2; |
1140 | } |
1141 | |
1142 | // CHECK-LABEL: define void @test_and() #0 { |
1143 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1144 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
1145 | // CHECK: [[AND:%.*]] = and <16 x i8> [[TMP0]], [[TMP1]] |
1146 | // CHECK: store volatile <16 x i8> [[AND]], <16 x i8>* @sc, align 8 |
1147 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1148 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1149 | // CHECK: [[AND1:%.*]] = and <16 x i8> [[TMP2]], [[TMP3]] |
1150 | // CHECK: store volatile <16 x i8> [[AND1]], <16 x i8>* @sc, align 8 |
1151 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
1152 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
1153 | // CHECK: [[AND2:%.*]] = and <16 x i8> [[TMP4]], [[TMP5]] |
1154 | // CHECK: store volatile <16 x i8> [[AND2]], <16 x i8>* @sc, align 8 |
1155 | // CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1156 | // CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
1157 | // CHECK: [[AND3:%.*]] = and <16 x i8> [[TMP6]], [[TMP7]] |
1158 | // CHECK: store volatile <16 x i8> [[AND3]], <16 x i8>* @uc, align 8 |
1159 | // CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1160 | // CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1161 | // CHECK: [[AND4:%.*]] = and <16 x i8> [[TMP8]], [[TMP9]] |
1162 | // CHECK: store volatile <16 x i8> [[AND4]], <16 x i8>* @uc, align 8 |
1163 | // CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
1164 | // CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
1165 | // CHECK: [[AND5:%.*]] = and <16 x i8> [[TMP10]], [[TMP11]] |
1166 | // CHECK: store volatile <16 x i8> [[AND5]], <16 x i8>* @uc, align 8 |
1167 | // CHECK: [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
1168 | // CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1169 | // CHECK: [[AND6:%.*]] = and <16 x i8> [[TMP12]], [[TMP13]] |
1170 | // CHECK: store volatile <16 x i8> [[AND6]], <16 x i8>* @bc, align 8 |
1171 | // CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1172 | // CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
1173 | // CHECK: [[AND7:%.*]] = and <8 x i16> [[TMP14]], [[TMP15]] |
1174 | // CHECK: store volatile <8 x i16> [[AND7]], <8 x i16>* @ss, align 8 |
1175 | // CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1176 | // CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1177 | // CHECK: [[AND8:%.*]] = and <8 x i16> [[TMP16]], [[TMP17]] |
1178 | // CHECK: store volatile <8 x i16> [[AND8]], <8 x i16>* @ss, align 8 |
1179 | // CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
1180 | // CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
1181 | // CHECK: [[AND9:%.*]] = and <8 x i16> [[TMP18]], [[TMP19]] |
1182 | // CHECK: store volatile <8 x i16> [[AND9]], <8 x i16>* @ss, align 8 |
1183 | // CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1184 | // CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
1185 | // CHECK: [[AND10:%.*]] = and <8 x i16> [[TMP20]], [[TMP21]] |
1186 | // CHECK: store volatile <8 x i16> [[AND10]], <8 x i16>* @us, align 8 |
1187 | // CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1188 | // CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1189 | // CHECK: [[AND11:%.*]] = and <8 x i16> [[TMP22]], [[TMP23]] |
1190 | // CHECK: store volatile <8 x i16> [[AND11]], <8 x i16>* @us, align 8 |
1191 | // CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
1192 | // CHECK: [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
1193 | // CHECK: [[AND12:%.*]] = and <8 x i16> [[TMP24]], [[TMP25]] |
1194 | // CHECK: store volatile <8 x i16> [[AND12]], <8 x i16>* @us, align 8 |
1195 | // CHECK: [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
1196 | // CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1197 | // CHECK: [[AND13:%.*]] = and <8 x i16> [[TMP26]], [[TMP27]] |
1198 | // CHECK: store volatile <8 x i16> [[AND13]], <8 x i16>* @bs, align 8 |
1199 | // CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1200 | // CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
1201 | // CHECK: [[AND14:%.*]] = and <4 x i32> [[TMP28]], [[TMP29]] |
1202 | // CHECK: store volatile <4 x i32> [[AND14]], <4 x i32>* @si, align 8 |
1203 | // CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1204 | // CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1205 | // CHECK: [[AND15:%.*]] = and <4 x i32> [[TMP30]], [[TMP31]] |
1206 | // CHECK: store volatile <4 x i32> [[AND15]], <4 x i32>* @si, align 8 |
1207 | // CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
1208 | // CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
1209 | // CHECK: [[AND16:%.*]] = and <4 x i32> [[TMP32]], [[TMP33]] |
1210 | // CHECK: store volatile <4 x i32> [[AND16]], <4 x i32>* @si, align 8 |
1211 | // CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
1212 | // CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
1213 | // CHECK: [[AND17:%.*]] = and <4 x i32> [[TMP34]], [[TMP35]] |
1214 | // CHECK: store volatile <4 x i32> [[AND17]], <4 x i32>* @ui, align 8 |
1215 | // CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
1216 | // CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1217 | // CHECK: [[AND18:%.*]] = and <4 x i32> [[TMP36]], [[TMP37]] |
1218 | // CHECK: store volatile <4 x i32> [[AND18]], <4 x i32>* @ui, align 8 |
1219 | // CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
1220 | // CHECK: [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
1221 | // CHECK: [[AND19:%.*]] = and <4 x i32> [[TMP38]], [[TMP39]] |
1222 | // CHECK: store volatile <4 x i32> [[AND19]], <4 x i32>* @ui, align 8 |
1223 | // CHECK: [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
1224 | // CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1225 | // CHECK: [[AND20:%.*]] = and <4 x i32> [[TMP40]], [[TMP41]] |
1226 | // CHECK: store volatile <4 x i32> [[AND20]], <4 x i32>* @bi, align 8 |
1227 | // CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
1228 | // CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
1229 | // CHECK: [[AND21:%.*]] = and <2 x i64> [[TMP42]], [[TMP43]] |
1230 | // CHECK: store volatile <2 x i64> [[AND21]], <2 x i64>* @sl, align 8 |
1231 | // CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
1232 | // CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1233 | // CHECK: [[AND22:%.*]] = and <2 x i64> [[TMP44]], [[TMP45]] |
1234 | // CHECK: store volatile <2 x i64> [[AND22]], <2 x i64>* @sl, align 8 |
1235 | // CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
1236 | // CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
1237 | // CHECK: [[AND23:%.*]] = and <2 x i64> [[TMP46]], [[TMP47]] |
1238 | // CHECK: store volatile <2 x i64> [[AND23]], <2 x i64>* @sl, align 8 |
1239 | // CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
1240 | // CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
1241 | // CHECK: [[AND24:%.*]] = and <2 x i64> [[TMP48]], [[TMP49]] |
1242 | // CHECK: store volatile <2 x i64> [[AND24]], <2 x i64>* @ul, align 8 |
1243 | // CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
1244 | // CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1245 | // CHECK: [[AND25:%.*]] = and <2 x i64> [[TMP50]], [[TMP51]] |
1246 | // CHECK: store volatile <2 x i64> [[AND25]], <2 x i64>* @ul, align 8 |
1247 | // CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
1248 | // CHECK: [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
1249 | // CHECK: [[AND26:%.*]] = and <2 x i64> [[TMP52]], [[TMP53]] |
1250 | // CHECK: store volatile <2 x i64> [[AND26]], <2 x i64>* @ul, align 8 |
1251 | // CHECK: [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
1252 | // CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1253 | // CHECK: [[AND27:%.*]] = and <2 x i64> [[TMP54]], [[TMP55]] |
1254 | // CHECK: store volatile <2 x i64> [[AND27]], <2 x i64>* @bl, align 8 |
1255 | // CHECK: ret void |
1256 | void test_and(void) { |
1257 | |
1258 | sc = sc & sc2; |
1259 | sc = sc & bc2; |
1260 | sc = bc & sc2; |
1261 | uc = uc & uc2; |
1262 | uc = uc & bc2; |
1263 | uc = bc & uc2; |
1264 | bc = bc & bc2; |
1265 | |
1266 | ss = ss & ss2; |
1267 | ss = ss & bs2; |
1268 | ss = bs & ss2; |
1269 | us = us & us2; |
1270 | us = us & bs2; |
1271 | us = bs & us2; |
1272 | bs = bs & bs2; |
1273 | |
1274 | si = si & si2; |
1275 | si = si & bi2; |
1276 | si = bi & si2; |
1277 | ui = ui & ui2; |
1278 | ui = ui & bi2; |
1279 | ui = bi & ui2; |
1280 | bi = bi & bi2; |
1281 | |
1282 | sl = sl & sl2; |
1283 | sl = sl & bl2; |
1284 | sl = bl & sl2; |
1285 | ul = ul & ul2; |
1286 | ul = ul & bl2; |
1287 | ul = bl & ul2; |
1288 | bl = bl & bl2; |
1289 | } |
1290 | |
1291 | // CHECK-LABEL: define void @test_and_assign() #0 { |
1292 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
1293 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1294 | // CHECK: [[AND:%.*]] = and <16 x i8> [[TMP1]], [[TMP0]] |
1295 | // CHECK: store volatile <16 x i8> [[AND]], <16 x i8>* @sc, align 8 |
1296 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1297 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1298 | // CHECK: [[AND1:%.*]] = and <16 x i8> [[TMP3]], [[TMP2]] |
1299 | // CHECK: store volatile <16 x i8> [[AND1]], <16 x i8>* @sc, align 8 |
1300 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
1301 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1302 | // CHECK: [[AND2:%.*]] = and <16 x i8> [[TMP5]], [[TMP4]] |
1303 | // CHECK: store volatile <16 x i8> [[AND2]], <16 x i8>* @uc, align 8 |
1304 | // CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1305 | // CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1306 | // CHECK: [[AND3:%.*]] = and <16 x i8> [[TMP7]], [[TMP6]] |
1307 | // CHECK: store volatile <16 x i8> [[AND3]], <16 x i8>* @uc, align 8 |
1308 | // CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1309 | // CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
1310 | // CHECK: [[AND4:%.*]] = and <16 x i8> [[TMP9]], [[TMP8]] |
1311 | // CHECK: store volatile <16 x i8> [[AND4]], <16 x i8>* @bc, align 8 |
1312 | // CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
1313 | // CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1314 | // CHECK: [[AND5:%.*]] = and <8 x i16> [[TMP11]], [[TMP10]] |
1315 | // CHECK: store volatile <8 x i16> [[AND5]], <8 x i16>* @ss, align 8 |
1316 | // CHECK: [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1317 | // CHECK: [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1318 | // CHECK: [[AND6:%.*]] = and <8 x i16> [[TMP13]], [[TMP12]] |
1319 | // CHECK: store volatile <8 x i16> [[AND6]], <8 x i16>* @ss, align 8 |
1320 | // CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
1321 | // CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1322 | // CHECK: [[AND7:%.*]] = and <8 x i16> [[TMP15]], [[TMP14]] |
1323 | // CHECK: store volatile <8 x i16> [[AND7]], <8 x i16>* @us, align 8 |
1324 | // CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1325 | // CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1326 | // CHECK: [[AND8:%.*]] = and <8 x i16> [[TMP17]], [[TMP16]] |
1327 | // CHECK: store volatile <8 x i16> [[AND8]], <8 x i16>* @us, align 8 |
1328 | // CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1329 | // CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
1330 | // CHECK: [[AND9:%.*]] = and <8 x i16> [[TMP19]], [[TMP18]] |
1331 | // CHECK: store volatile <8 x i16> [[AND9]], <8 x i16>* @bs, align 8 |
1332 | // CHECK: [[TMP20:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
1333 | // CHECK: [[TMP21:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1334 | // CHECK: [[AND10:%.*]] = and <4 x i32> [[TMP21]], [[TMP20]] |
1335 | // CHECK: store volatile <4 x i32> [[AND10]], <4 x i32>* @si, align 8 |
1336 | // CHECK: [[TMP22:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1337 | // CHECK: [[TMP23:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1338 | // CHECK: [[AND11:%.*]] = and <4 x i32> [[TMP23]], [[TMP22]] |
1339 | // CHECK: store volatile <4 x i32> [[AND11]], <4 x i32>* @si, align 8 |
1340 | // CHECK: [[TMP24:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
1341 | // CHECK: [[TMP25:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
1342 | // CHECK: [[AND12:%.*]] = and <4 x i32> [[TMP25]], [[TMP24]] |
1343 | // CHECK: store volatile <4 x i32> [[AND12]], <4 x i32>* @ui, align 8 |
1344 | // CHECK: [[TMP26:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1345 | // CHECK: [[TMP27:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
1346 | // CHECK: [[AND13:%.*]] = and <4 x i32> [[TMP27]], [[TMP26]] |
1347 | // CHECK: store volatile <4 x i32> [[AND13]], <4 x i32>* @ui, align 8 |
1348 | // CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1349 | // CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
1350 | // CHECK: [[AND14:%.*]] = and <4 x i32> [[TMP29]], [[TMP28]] |
1351 | // CHECK: store volatile <4 x i32> [[AND14]], <4 x i32>* @bi, align 8 |
1352 | // CHECK: [[TMP30:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
1353 | // CHECK: [[TMP31:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
1354 | // CHECK: [[AND15:%.*]] = and <2 x i64> [[TMP31]], [[TMP30]] |
1355 | // CHECK: store volatile <2 x i64> [[AND15]], <2 x i64>* @sl, align 8 |
1356 | // CHECK: [[TMP32:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1357 | // CHECK: [[TMP33:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
1358 | // CHECK: [[AND16:%.*]] = and <2 x i64> [[TMP33]], [[TMP32]] |
1359 | // CHECK: store volatile <2 x i64> [[AND16]], <2 x i64>* @sl, align 8 |
1360 | // CHECK: [[TMP34:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
1361 | // CHECK: [[TMP35:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
1362 | // CHECK: [[AND17:%.*]] = and <2 x i64> [[TMP35]], [[TMP34]] |
1363 | // CHECK: store volatile <2 x i64> [[AND17]], <2 x i64>* @ul, align 8 |
1364 | // CHECK: [[TMP36:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1365 | // CHECK: [[TMP37:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
1366 | // CHECK: [[AND18:%.*]] = and <2 x i64> [[TMP37]], [[TMP36]] |
1367 | // CHECK: store volatile <2 x i64> [[AND18]], <2 x i64>* @ul, align 8 |
1368 | // CHECK: [[TMP38:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1369 | // CHECK: [[TMP39:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
1370 | // CHECK: [[AND19:%.*]] = and <2 x i64> [[TMP39]], [[TMP38]] |
1371 | // CHECK: store volatile <2 x i64> [[AND19]], <2 x i64>* @bl, align 8 |
1372 | // CHECK: ret void |
1373 | void test_and_assign(void) { |
1374 | |
1375 | sc &= sc2; |
1376 | sc &= bc2; |
1377 | uc &= uc2; |
1378 | uc &= bc2; |
1379 | bc &= bc2; |
1380 | |
1381 | ss &= ss2; |
1382 | ss &= bs2; |
1383 | us &= us2; |
1384 | us &= bs2; |
1385 | bs &= bs2; |
1386 | |
1387 | si &= si2; |
1388 | si &= bi2; |
1389 | ui &= ui2; |
1390 | ui &= bi2; |
1391 | bi &= bi2; |
1392 | |
1393 | sl &= sl2; |
1394 | sl &= bl2; |
1395 | ul &= ul2; |
1396 | ul &= bl2; |
1397 | bl &= bl2; |
1398 | } |
1399 | |
1400 | // CHECK-LABEL: define void @test_or() #0 { |
1401 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1402 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
1403 | // CHECK: [[OR:%.*]] = or <16 x i8> [[TMP0]], [[TMP1]] |
1404 | // CHECK: store volatile <16 x i8> [[OR]], <16 x i8>* @sc, align 8 |
1405 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1406 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1407 | // CHECK: [[OR1:%.*]] = or <16 x i8> [[TMP2]], [[TMP3]] |
1408 | // CHECK: store volatile <16 x i8> [[OR1]], <16 x i8>* @sc, align 8 |
1409 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
1410 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
1411 | // CHECK: [[OR2:%.*]] = or <16 x i8> [[TMP4]], [[TMP5]] |
1412 | // CHECK: store volatile <16 x i8> [[OR2]], <16 x i8>* @sc, align 8 |
1413 | // CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1414 | // CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
1415 | // CHECK: [[OR3:%.*]] = or <16 x i8> [[TMP6]], [[TMP7]] |
1416 | // CHECK: store volatile <16 x i8> [[OR3]], <16 x i8>* @uc, align 8 |
1417 | // CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1418 | // CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1419 | // CHECK: [[OR4:%.*]] = or <16 x i8> [[TMP8]], [[TMP9]] |
1420 | // CHECK: store volatile <16 x i8> [[OR4]], <16 x i8>* @uc, align 8 |
1421 | // CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
1422 | // CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
1423 | // CHECK: [[OR5:%.*]] = or <16 x i8> [[TMP10]], [[TMP11]] |
1424 | // CHECK: store volatile <16 x i8> [[OR5]], <16 x i8>* @uc, align 8 |
1425 | // CHECK: [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
1426 | // CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1427 | // CHECK: [[OR6:%.*]] = or <16 x i8> [[TMP12]], [[TMP13]] |
1428 | // CHECK: store volatile <16 x i8> [[OR6]], <16 x i8>* @bc, align 8 |
1429 | // CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1430 | // CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
1431 | // CHECK: [[OR7:%.*]] = or <8 x i16> [[TMP14]], [[TMP15]] |
1432 | // CHECK: store volatile <8 x i16> [[OR7]], <8 x i16>* @ss, align 8 |
1433 | // CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1434 | // CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1435 | // CHECK: [[OR8:%.*]] = or <8 x i16> [[TMP16]], [[TMP17]] |
1436 | // CHECK: store volatile <8 x i16> [[OR8]], <8 x i16>* @ss, align 8 |
1437 | // CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
1438 | // CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
1439 | // CHECK: [[OR9:%.*]] = or <8 x i16> [[TMP18]], [[TMP19]] |
1440 | // CHECK: store volatile <8 x i16> [[OR9]], <8 x i16>* @ss, align 8 |
1441 | // CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1442 | // CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
1443 | // CHECK: [[OR10:%.*]] = or <8 x i16> [[TMP20]], [[TMP21]] |
1444 | // CHECK: store volatile <8 x i16> [[OR10]], <8 x i16>* @us, align 8 |
1445 | // CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1446 | // CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1447 | // CHECK: [[OR11:%.*]] = or <8 x i16> [[TMP22]], [[TMP23]] |
1448 | // CHECK: store volatile <8 x i16> [[OR11]], <8 x i16>* @us, align 8 |
1449 | // CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
1450 | // CHECK: [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
1451 | // CHECK: [[OR12:%.*]] = or <8 x i16> [[TMP24]], [[TMP25]] |
1452 | // CHECK: store volatile <8 x i16> [[OR12]], <8 x i16>* @us, align 8 |
1453 | // CHECK: [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
1454 | // CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1455 | // CHECK: [[OR13:%.*]] = or <8 x i16> [[TMP26]], [[TMP27]] |
1456 | // CHECK: store volatile <8 x i16> [[OR13]], <8 x i16>* @bs, align 8 |
1457 | // CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1458 | // CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
1459 | // CHECK: [[OR14:%.*]] = or <4 x i32> [[TMP28]], [[TMP29]] |
1460 | // CHECK: store volatile <4 x i32> [[OR14]], <4 x i32>* @si, align 8 |
1461 | // CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1462 | // CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1463 | // CHECK: [[OR15:%.*]] = or <4 x i32> [[TMP30]], [[TMP31]] |
1464 | // CHECK: store volatile <4 x i32> [[OR15]], <4 x i32>* @si, align 8 |
1465 | // CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
1466 | // CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
1467 | // CHECK: [[OR16:%.*]] = or <4 x i32> [[TMP32]], [[TMP33]] |
1468 | // CHECK: store volatile <4 x i32> [[OR16]], <4 x i32>* @si, align 8 |
1469 | // CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
1470 | // CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
1471 | // CHECK: [[OR17:%.*]] = or <4 x i32> [[TMP34]], [[TMP35]] |
1472 | // CHECK: store volatile <4 x i32> [[OR17]], <4 x i32>* @ui, align 8 |
1473 | // CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
1474 | // CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1475 | // CHECK: [[OR18:%.*]] = or <4 x i32> [[TMP36]], [[TMP37]] |
1476 | // CHECK: store volatile <4 x i32> [[OR18]], <4 x i32>* @ui, align 8 |
1477 | // CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
1478 | // CHECK: [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
1479 | // CHECK: [[OR19:%.*]] = or <4 x i32> [[TMP38]], [[TMP39]] |
1480 | // CHECK: store volatile <4 x i32> [[OR19]], <4 x i32>* @ui, align 8 |
1481 | // CHECK: [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
1482 | // CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1483 | // CHECK: [[OR20:%.*]] = or <4 x i32> [[TMP40]], [[TMP41]] |
1484 | // CHECK: store volatile <4 x i32> [[OR20]], <4 x i32>* @bi, align 8 |
1485 | // CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
1486 | // CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
1487 | // CHECK: [[OR21:%.*]] = or <2 x i64> [[TMP42]], [[TMP43]] |
1488 | // CHECK: store volatile <2 x i64> [[OR21]], <2 x i64>* @sl, align 8 |
1489 | // CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
1490 | // CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1491 | // CHECK: [[OR22:%.*]] = or <2 x i64> [[TMP44]], [[TMP45]] |
1492 | // CHECK: store volatile <2 x i64> [[OR22]], <2 x i64>* @sl, align 8 |
1493 | // CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
1494 | // CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
1495 | // CHECK: [[OR23:%.*]] = or <2 x i64> [[TMP46]], [[TMP47]] |
1496 | // CHECK: store volatile <2 x i64> [[OR23]], <2 x i64>* @sl, align 8 |
1497 | // CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
1498 | // CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
1499 | // CHECK: [[OR24:%.*]] = or <2 x i64> [[TMP48]], [[TMP49]] |
1500 | // CHECK: store volatile <2 x i64> [[OR24]], <2 x i64>* @ul, align 8 |
1501 | // CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
1502 | // CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1503 | // CHECK: [[OR25:%.*]] = or <2 x i64> [[TMP50]], [[TMP51]] |
1504 | // CHECK: store volatile <2 x i64> [[OR25]], <2 x i64>* @ul, align 8 |
1505 | // CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
1506 | // CHECK: [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
1507 | // CHECK: [[OR26:%.*]] = or <2 x i64> [[TMP52]], [[TMP53]] |
1508 | // CHECK: store volatile <2 x i64> [[OR26]], <2 x i64>* @ul, align 8 |
1509 | // CHECK: [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
1510 | // CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1511 | // CHECK: [[OR27:%.*]] = or <2 x i64> [[TMP54]], [[TMP55]] |
1512 | // CHECK: store volatile <2 x i64> [[OR27]], <2 x i64>* @bl, align 8 |
1513 | // CHECK: ret void |
1514 | void test_or(void) { |
1515 | |
1516 | sc = sc | sc2; |
1517 | sc = sc | bc2; |
1518 | sc = bc | sc2; |
1519 | uc = uc | uc2; |
1520 | uc = uc | bc2; |
1521 | uc = bc | uc2; |
1522 | bc = bc | bc2; |
1523 | |
1524 | ss = ss | ss2; |
1525 | ss = ss | bs2; |
1526 | ss = bs | ss2; |
1527 | us = us | us2; |
1528 | us = us | bs2; |
1529 | us = bs | us2; |
1530 | bs = bs | bs2; |
1531 | |
1532 | si = si | si2; |
1533 | si = si | bi2; |
1534 | si = bi | si2; |
1535 | ui = ui | ui2; |
1536 | ui = ui | bi2; |
1537 | ui = bi | ui2; |
1538 | bi = bi | bi2; |
1539 | |
1540 | sl = sl | sl2; |
1541 | sl = sl | bl2; |
1542 | sl = bl | sl2; |
1543 | ul = ul | ul2; |
1544 | ul = ul | bl2; |
1545 | ul = bl | ul2; |
1546 | bl = bl | bl2; |
1547 | } |
1548 | |
1549 | // CHECK-LABEL: define void @test_or_assign() #0 { |
1550 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
1551 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1552 | // CHECK: [[OR:%.*]] = or <16 x i8> [[TMP1]], [[TMP0]] |
1553 | // CHECK: store volatile <16 x i8> [[OR]], <16 x i8>* @sc, align 8 |
1554 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1555 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1556 | // CHECK: [[OR1:%.*]] = or <16 x i8> [[TMP3]], [[TMP2]] |
1557 | // CHECK: store volatile <16 x i8> [[OR1]], <16 x i8>* @sc, align 8 |
1558 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
1559 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1560 | // CHECK: [[OR2:%.*]] = or <16 x i8> [[TMP5]], [[TMP4]] |
1561 | // CHECK: store volatile <16 x i8> [[OR2]], <16 x i8>* @uc, align 8 |
1562 | // CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1563 | // CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1564 | // CHECK: [[OR3:%.*]] = or <16 x i8> [[TMP7]], [[TMP6]] |
1565 | // CHECK: store volatile <16 x i8> [[OR3]], <16 x i8>* @uc, align 8 |
1566 | // CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1567 | // CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
1568 | // CHECK: [[OR4:%.*]] = or <16 x i8> [[TMP9]], [[TMP8]] |
1569 | // CHECK: store volatile <16 x i8> [[OR4]], <16 x i8>* @bc, align 8 |
1570 | // CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
1571 | // CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1572 | // CHECK: [[OR5:%.*]] = or <8 x i16> [[TMP11]], [[TMP10]] |
1573 | // CHECK: store volatile <8 x i16> [[OR5]], <8 x i16>* @ss, align 8 |
1574 | // CHECK: [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1575 | // CHECK: [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1576 | // CHECK: [[OR6:%.*]] = or <8 x i16> [[TMP13]], [[TMP12]] |
1577 | // CHECK: store volatile <8 x i16> [[OR6]], <8 x i16>* @ss, align 8 |
1578 | // CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
1579 | // CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1580 | // CHECK: [[OR7:%.*]] = or <8 x i16> [[TMP15]], [[TMP14]] |
1581 | // CHECK: store volatile <8 x i16> [[OR7]], <8 x i16>* @us, align 8 |
1582 | // CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1583 | // CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1584 | // CHECK: [[OR8:%.*]] = or <8 x i16> [[TMP17]], [[TMP16]] |
1585 | // CHECK: store volatile <8 x i16> [[OR8]], <8 x i16>* @us, align 8 |
1586 | // CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1587 | // CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
1588 | // CHECK: [[OR9:%.*]] = or <8 x i16> [[TMP19]], [[TMP18]] |
1589 | // CHECK: store volatile <8 x i16> [[OR9]], <8 x i16>* @bs, align 8 |
1590 | // CHECK: [[TMP20:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
1591 | // CHECK: [[TMP21:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1592 | // CHECK: [[OR10:%.*]] = or <4 x i32> [[TMP21]], [[TMP20]] |
1593 | // CHECK: store volatile <4 x i32> [[OR10]], <4 x i32>* @si, align 8 |
1594 | // CHECK: [[TMP22:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1595 | // CHECK: [[TMP23:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1596 | // CHECK: [[OR11:%.*]] = or <4 x i32> [[TMP23]], [[TMP22]] |
1597 | // CHECK: store volatile <4 x i32> [[OR11]], <4 x i32>* @si, align 8 |
1598 | // CHECK: [[TMP24:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
1599 | // CHECK: [[TMP25:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
1600 | // CHECK: [[OR12:%.*]] = or <4 x i32> [[TMP25]], [[TMP24]] |
1601 | // CHECK: store volatile <4 x i32> [[OR12]], <4 x i32>* @ui, align 8 |
1602 | // CHECK: [[TMP26:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1603 | // CHECK: [[TMP27:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
1604 | // CHECK: [[OR13:%.*]] = or <4 x i32> [[TMP27]], [[TMP26]] |
1605 | // CHECK: store volatile <4 x i32> [[OR13]], <4 x i32>* @ui, align 8 |
1606 | // CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1607 | // CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
1608 | // CHECK: [[OR14:%.*]] = or <4 x i32> [[TMP29]], [[TMP28]] |
1609 | // CHECK: store volatile <4 x i32> [[OR14]], <4 x i32>* @bi, align 8 |
1610 | // CHECK: [[TMP30:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
1611 | // CHECK: [[TMP31:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
1612 | // CHECK: [[OR15:%.*]] = or <2 x i64> [[TMP31]], [[TMP30]] |
1613 | // CHECK: store volatile <2 x i64> [[OR15]], <2 x i64>* @sl, align 8 |
1614 | // CHECK: [[TMP32:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1615 | // CHECK: [[TMP33:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
1616 | // CHECK: [[OR16:%.*]] = or <2 x i64> [[TMP33]], [[TMP32]] |
1617 | // CHECK: store volatile <2 x i64> [[OR16]], <2 x i64>* @sl, align 8 |
1618 | // CHECK: [[TMP34:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
1619 | // CHECK: [[TMP35:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
1620 | // CHECK: [[OR17:%.*]] = or <2 x i64> [[TMP35]], [[TMP34]] |
1621 | // CHECK: store volatile <2 x i64> [[OR17]], <2 x i64>* @ul, align 8 |
1622 | // CHECK: [[TMP36:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1623 | // CHECK: [[TMP37:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
1624 | // CHECK: [[OR18:%.*]] = or <2 x i64> [[TMP37]], [[TMP36]] |
1625 | // CHECK: store volatile <2 x i64> [[OR18]], <2 x i64>* @ul, align 8 |
1626 | // CHECK: [[TMP38:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1627 | // CHECK: [[TMP39:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
1628 | // CHECK: [[OR19:%.*]] = or <2 x i64> [[TMP39]], [[TMP38]] |
1629 | // CHECK: store volatile <2 x i64> [[OR19]], <2 x i64>* @bl, align 8 |
1630 | // CHECK: ret void |
1631 | void test_or_assign(void) { |
1632 | |
1633 | sc |= sc2; |
1634 | sc |= bc2; |
1635 | uc |= uc2; |
1636 | uc |= bc2; |
1637 | bc |= bc2; |
1638 | |
1639 | ss |= ss2; |
1640 | ss |= bs2; |
1641 | us |= us2; |
1642 | us |= bs2; |
1643 | bs |= bs2; |
1644 | |
1645 | si |= si2; |
1646 | si |= bi2; |
1647 | ui |= ui2; |
1648 | ui |= bi2; |
1649 | bi |= bi2; |
1650 | |
1651 | sl |= sl2; |
1652 | sl |= bl2; |
1653 | ul |= ul2; |
1654 | ul |= bl2; |
1655 | bl |= bl2; |
1656 | } |
1657 | |
1658 | // CHECK-LABEL: define void @test_xor() #0 { |
1659 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1660 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
1661 | // CHECK: [[XOR:%.*]] = xor <16 x i8> [[TMP0]], [[TMP1]] |
1662 | // CHECK: store volatile <16 x i8> [[XOR]], <16 x i8>* @sc, align 8 |
1663 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1664 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1665 | // CHECK: [[XOR1:%.*]] = xor <16 x i8> [[TMP2]], [[TMP3]] |
1666 | // CHECK: store volatile <16 x i8> [[XOR1]], <16 x i8>* @sc, align 8 |
1667 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
1668 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
1669 | // CHECK: [[XOR2:%.*]] = xor <16 x i8> [[TMP4]], [[TMP5]] |
1670 | // CHECK: store volatile <16 x i8> [[XOR2]], <16 x i8>* @sc, align 8 |
1671 | // CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1672 | // CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
1673 | // CHECK: [[XOR3:%.*]] = xor <16 x i8> [[TMP6]], [[TMP7]] |
1674 | // CHECK: store volatile <16 x i8> [[XOR3]], <16 x i8>* @uc, align 8 |
1675 | // CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1676 | // CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1677 | // CHECK: [[XOR4:%.*]] = xor <16 x i8> [[TMP8]], [[TMP9]] |
1678 | // CHECK: store volatile <16 x i8> [[XOR4]], <16 x i8>* @uc, align 8 |
1679 | // CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
1680 | // CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
1681 | // CHECK: [[XOR5:%.*]] = xor <16 x i8> [[TMP10]], [[TMP11]] |
1682 | // CHECK: store volatile <16 x i8> [[XOR5]], <16 x i8>* @uc, align 8 |
1683 | // CHECK: [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
1684 | // CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1685 | // CHECK: [[XOR6:%.*]] = xor <16 x i8> [[TMP12]], [[TMP13]] |
1686 | // CHECK: store volatile <16 x i8> [[XOR6]], <16 x i8>* @bc, align 8 |
1687 | // CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1688 | // CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
1689 | // CHECK: [[XOR7:%.*]] = xor <8 x i16> [[TMP14]], [[TMP15]] |
1690 | // CHECK: store volatile <8 x i16> [[XOR7]], <8 x i16>* @ss, align 8 |
1691 | // CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1692 | // CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1693 | // CHECK: [[XOR8:%.*]] = xor <8 x i16> [[TMP16]], [[TMP17]] |
1694 | // CHECK: store volatile <8 x i16> [[XOR8]], <8 x i16>* @ss, align 8 |
1695 | // CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
1696 | // CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
1697 | // CHECK: [[XOR9:%.*]] = xor <8 x i16> [[TMP18]], [[TMP19]] |
1698 | // CHECK: store volatile <8 x i16> [[XOR9]], <8 x i16>* @ss, align 8 |
1699 | // CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1700 | // CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
1701 | // CHECK: [[XOR10:%.*]] = xor <8 x i16> [[TMP20]], [[TMP21]] |
1702 | // CHECK: store volatile <8 x i16> [[XOR10]], <8 x i16>* @us, align 8 |
1703 | // CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1704 | // CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1705 | // CHECK: [[XOR11:%.*]] = xor <8 x i16> [[TMP22]], [[TMP23]] |
1706 | // CHECK: store volatile <8 x i16> [[XOR11]], <8 x i16>* @us, align 8 |
1707 | // CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
1708 | // CHECK: [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
1709 | // CHECK: [[XOR12:%.*]] = xor <8 x i16> [[TMP24]], [[TMP25]] |
1710 | // CHECK: store volatile <8 x i16> [[XOR12]], <8 x i16>* @us, align 8 |
1711 | // CHECK: [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
1712 | // CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1713 | // CHECK: [[XOR13:%.*]] = xor <8 x i16> [[TMP26]], [[TMP27]] |
1714 | // CHECK: store volatile <8 x i16> [[XOR13]], <8 x i16>* @bs, align 8 |
1715 | // CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1716 | // CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
1717 | // CHECK: [[XOR14:%.*]] = xor <4 x i32> [[TMP28]], [[TMP29]] |
1718 | // CHECK: store volatile <4 x i32> [[XOR14]], <4 x i32>* @si, align 8 |
1719 | // CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1720 | // CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1721 | // CHECK: [[XOR15:%.*]] = xor <4 x i32> [[TMP30]], [[TMP31]] |
1722 | // CHECK: store volatile <4 x i32> [[XOR15]], <4 x i32>* @si, align 8 |
1723 | // CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
1724 | // CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
1725 | // CHECK: [[XOR16:%.*]] = xor <4 x i32> [[TMP32]], [[TMP33]] |
1726 | // CHECK: store volatile <4 x i32> [[XOR16]], <4 x i32>* @si, align 8 |
1727 | // CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
1728 | // CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
1729 | // CHECK: [[XOR17:%.*]] = xor <4 x i32> [[TMP34]], [[TMP35]] |
1730 | // CHECK: store volatile <4 x i32> [[XOR17]], <4 x i32>* @ui, align 8 |
1731 | // CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
1732 | // CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1733 | // CHECK: [[XOR18:%.*]] = xor <4 x i32> [[TMP36]], [[TMP37]] |
1734 | // CHECK: store volatile <4 x i32> [[XOR18]], <4 x i32>* @ui, align 8 |
1735 | // CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
1736 | // CHECK: [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
1737 | // CHECK: [[XOR19:%.*]] = xor <4 x i32> [[TMP38]], [[TMP39]] |
1738 | // CHECK: store volatile <4 x i32> [[XOR19]], <4 x i32>* @ui, align 8 |
1739 | // CHECK: [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
1740 | // CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1741 | // CHECK: [[XOR20:%.*]] = xor <4 x i32> [[TMP40]], [[TMP41]] |
1742 | // CHECK: store volatile <4 x i32> [[XOR20]], <4 x i32>* @bi, align 8 |
1743 | // CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
1744 | // CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
1745 | // CHECK: [[XOR21:%.*]] = xor <2 x i64> [[TMP42]], [[TMP43]] |
1746 | // CHECK: store volatile <2 x i64> [[XOR21]], <2 x i64>* @sl, align 8 |
1747 | // CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
1748 | // CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1749 | // CHECK: [[XOR22:%.*]] = xor <2 x i64> [[TMP44]], [[TMP45]] |
1750 | // CHECK: store volatile <2 x i64> [[XOR22]], <2 x i64>* @sl, align 8 |
1751 | // CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
1752 | // CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
1753 | // CHECK: [[XOR23:%.*]] = xor <2 x i64> [[TMP46]], [[TMP47]] |
1754 | // CHECK: store volatile <2 x i64> [[XOR23]], <2 x i64>* @sl, align 8 |
1755 | // CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
1756 | // CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
1757 | // CHECK: [[XOR24:%.*]] = xor <2 x i64> [[TMP48]], [[TMP49]] |
1758 | // CHECK: store volatile <2 x i64> [[XOR24]], <2 x i64>* @ul, align 8 |
1759 | // CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
1760 | // CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1761 | // CHECK: [[XOR25:%.*]] = xor <2 x i64> [[TMP50]], [[TMP51]] |
1762 | // CHECK: store volatile <2 x i64> [[XOR25]], <2 x i64>* @ul, align 8 |
1763 | // CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
1764 | // CHECK: [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
1765 | // CHECK: [[XOR26:%.*]] = xor <2 x i64> [[TMP52]], [[TMP53]] |
1766 | // CHECK: store volatile <2 x i64> [[XOR26]], <2 x i64>* @ul, align 8 |
1767 | // CHECK: [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
1768 | // CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1769 | // CHECK: [[XOR27:%.*]] = xor <2 x i64> [[TMP54]], [[TMP55]] |
1770 | // CHECK: store volatile <2 x i64> [[XOR27]], <2 x i64>* @bl, align 8 |
1771 | // CHECK: ret void |
1772 | void test_xor(void) { |
1773 | |
1774 | sc = sc ^ sc2; |
1775 | sc = sc ^ bc2; |
1776 | sc = bc ^ sc2; |
1777 | uc = uc ^ uc2; |
1778 | uc = uc ^ bc2; |
1779 | uc = bc ^ uc2; |
1780 | bc = bc ^ bc2; |
1781 | |
1782 | ss = ss ^ ss2; |
1783 | ss = ss ^ bs2; |
1784 | ss = bs ^ ss2; |
1785 | us = us ^ us2; |
1786 | us = us ^ bs2; |
1787 | us = bs ^ us2; |
1788 | bs = bs ^ bs2; |
1789 | |
1790 | si = si ^ si2; |
1791 | si = si ^ bi2; |
1792 | si = bi ^ si2; |
1793 | ui = ui ^ ui2; |
1794 | ui = ui ^ bi2; |
1795 | ui = bi ^ ui2; |
1796 | bi = bi ^ bi2; |
1797 | |
1798 | sl = sl ^ sl2; |
1799 | sl = sl ^ bl2; |
1800 | sl = bl ^ sl2; |
1801 | ul = ul ^ ul2; |
1802 | ul = ul ^ bl2; |
1803 | ul = bl ^ ul2; |
1804 | bl = bl ^ bl2; |
1805 | } |
1806 | |
1807 | // CHECK-LABEL: define void @test_xor_assign() #0 { |
1808 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
1809 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1810 | // CHECK: [[XOR:%.*]] = xor <16 x i8> [[TMP1]], [[TMP0]] |
1811 | // CHECK: store volatile <16 x i8> [[XOR]], <16 x i8>* @sc, align 8 |
1812 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1813 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1814 | // CHECK: [[XOR1:%.*]] = xor <16 x i8> [[TMP3]], [[TMP2]] |
1815 | // CHECK: store volatile <16 x i8> [[XOR1]], <16 x i8>* @sc, align 8 |
1816 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
1817 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1818 | // CHECK: [[XOR2:%.*]] = xor <16 x i8> [[TMP5]], [[TMP4]] |
1819 | // CHECK: store volatile <16 x i8> [[XOR2]], <16 x i8>* @uc, align 8 |
1820 | // CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1821 | // CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1822 | // CHECK: [[XOR3:%.*]] = xor <16 x i8> [[TMP7]], [[TMP6]] |
1823 | // CHECK: store volatile <16 x i8> [[XOR3]], <16 x i8>* @uc, align 8 |
1824 | // CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
1825 | // CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
1826 | // CHECK: [[XOR4:%.*]] = xor <16 x i8> [[TMP9]], [[TMP8]] |
1827 | // CHECK: store volatile <16 x i8> [[XOR4]], <16 x i8>* @bc, align 8 |
1828 | // CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
1829 | // CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1830 | // CHECK: [[XOR5:%.*]] = xor <8 x i16> [[TMP11]], [[TMP10]] |
1831 | // CHECK: store volatile <8 x i16> [[XOR5]], <8 x i16>* @ss, align 8 |
1832 | // CHECK: [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1833 | // CHECK: [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1834 | // CHECK: [[XOR6:%.*]] = xor <8 x i16> [[TMP13]], [[TMP12]] |
1835 | // CHECK: store volatile <8 x i16> [[XOR6]], <8 x i16>* @ss, align 8 |
1836 | // CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
1837 | // CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1838 | // CHECK: [[XOR7:%.*]] = xor <8 x i16> [[TMP15]], [[TMP14]] |
1839 | // CHECK: store volatile <8 x i16> [[XOR7]], <8 x i16>* @us, align 8 |
1840 | // CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1841 | // CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1842 | // CHECK: [[XOR8:%.*]] = xor <8 x i16> [[TMP17]], [[TMP16]] |
1843 | // CHECK: store volatile <8 x i16> [[XOR8]], <8 x i16>* @us, align 8 |
1844 | // CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
1845 | // CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
1846 | // CHECK: [[XOR9:%.*]] = xor <8 x i16> [[TMP19]], [[TMP18]] |
1847 | // CHECK: store volatile <8 x i16> [[XOR9]], <8 x i16>* @bs, align 8 |
1848 | // CHECK: [[TMP20:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
1849 | // CHECK: [[TMP21:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1850 | // CHECK: [[XOR10:%.*]] = xor <4 x i32> [[TMP21]], [[TMP20]] |
1851 | // CHECK: store volatile <4 x i32> [[XOR10]], <4 x i32>* @si, align 8 |
1852 | // CHECK: [[TMP22:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1853 | // CHECK: [[TMP23:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1854 | // CHECK: [[XOR11:%.*]] = xor <4 x i32> [[TMP23]], [[TMP22]] |
1855 | // CHECK: store volatile <4 x i32> [[XOR11]], <4 x i32>* @si, align 8 |
1856 | // CHECK: [[TMP24:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
1857 | // CHECK: [[TMP25:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
1858 | // CHECK: [[XOR12:%.*]] = xor <4 x i32> [[TMP25]], [[TMP24]] |
1859 | // CHECK: store volatile <4 x i32> [[XOR12]], <4 x i32>* @ui, align 8 |
1860 | // CHECK: [[TMP26:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1861 | // CHECK: [[TMP27:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
1862 | // CHECK: [[XOR13:%.*]] = xor <4 x i32> [[TMP27]], [[TMP26]] |
1863 | // CHECK: store volatile <4 x i32> [[XOR13]], <4 x i32>* @ui, align 8 |
1864 | // CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
1865 | // CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
1866 | // CHECK: [[XOR14:%.*]] = xor <4 x i32> [[TMP29]], [[TMP28]] |
1867 | // CHECK: store volatile <4 x i32> [[XOR14]], <4 x i32>* @bi, align 8 |
1868 | // CHECK: [[TMP30:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
1869 | // CHECK: [[TMP31:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
1870 | // CHECK: [[XOR15:%.*]] = xor <2 x i64> [[TMP31]], [[TMP30]] |
1871 | // CHECK: store volatile <2 x i64> [[XOR15]], <2 x i64>* @sl, align 8 |
1872 | // CHECK: [[TMP32:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1873 | // CHECK: [[TMP33:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
1874 | // CHECK: [[XOR16:%.*]] = xor <2 x i64> [[TMP33]], [[TMP32]] |
1875 | // CHECK: store volatile <2 x i64> [[XOR16]], <2 x i64>* @sl, align 8 |
1876 | // CHECK: [[TMP34:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
1877 | // CHECK: [[TMP35:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
1878 | // CHECK: [[XOR17:%.*]] = xor <2 x i64> [[TMP35]], [[TMP34]] |
1879 | // CHECK: store volatile <2 x i64> [[XOR17]], <2 x i64>* @ul, align 8 |
1880 | // CHECK: [[TMP36:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1881 | // CHECK: [[TMP37:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
1882 | // CHECK: [[XOR18:%.*]] = xor <2 x i64> [[TMP37]], [[TMP36]] |
1883 | // CHECK: store volatile <2 x i64> [[XOR18]], <2 x i64>* @ul, align 8 |
1884 | // CHECK: [[TMP38:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
1885 | // CHECK: [[TMP39:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
1886 | // CHECK: [[XOR19:%.*]] = xor <2 x i64> [[TMP39]], [[TMP38]] |
1887 | // CHECK: store volatile <2 x i64> [[XOR19]], <2 x i64>* @bl, align 8 |
1888 | // CHECK: ret void |
1889 | void test_xor_assign(void) { |
1890 | |
1891 | sc ^= sc2; |
1892 | sc ^= bc2; |
1893 | uc ^= uc2; |
1894 | uc ^= bc2; |
1895 | bc ^= bc2; |
1896 | |
1897 | ss ^= ss2; |
1898 | ss ^= bs2; |
1899 | us ^= us2; |
1900 | us ^= bs2; |
1901 | bs ^= bs2; |
1902 | |
1903 | si ^= si2; |
1904 | si ^= bi2; |
1905 | ui ^= ui2; |
1906 | ui ^= bi2; |
1907 | bi ^= bi2; |
1908 | |
1909 | sl ^= sl2; |
1910 | sl ^= bl2; |
1911 | ul ^= ul2; |
1912 | ul ^= bl2; |
1913 | bl ^= bl2; |
1914 | } |
1915 | |
1916 | // CHECK-LABEL: define void @test_sl() #0 { |
1917 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1918 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
1919 | // CHECK: [[SHL:%.*]] = shl <16 x i8> [[TMP0]], [[TMP1]] |
1920 | // CHECK: store volatile <16 x i8> [[SHL]], <16 x i8>* @sc, align 8 |
1921 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1922 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
1923 | // CHECK: [[SHL1:%.*]] = shl <16 x i8> [[TMP2]], [[TMP3]] |
1924 | // CHECK: store volatile <16 x i8> [[SHL1]], <16 x i8>* @sc, align 8 |
1925 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1926 | // CHECK: [[TMP5:%.*]] = load volatile i32, i32* @cnt, align 4 |
1927 | // CHECK: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> undef, i32 [[TMP5]], i32 0 |
1928 | // CHECK: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLAT:%.*]]insert, <16 x i32> undef, <16 x i32> zeroinitializer |
1929 | // CHECK: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i8> |
1930 | // CHECK: [[SHL2:%.*]] = shl <16 x i8> [[TMP4]], [[SH_PROM]] |
1931 | // CHECK: store volatile <16 x i8> [[SHL2]], <16 x i8>* @sc, align 8 |
1932 | // CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
1933 | // CHECK: [[SHL3:%.*]] = shl <16 x i8> [[TMP6]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5> |
1934 | // CHECK: store volatile <16 x i8> [[SHL3]], <16 x i8>* @sc, align 8 |
1935 | // CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1936 | // CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
1937 | // CHECK: [[SHL4:%.*]] = shl <16 x i8> [[TMP7]], [[TMP8]] |
1938 | // CHECK: store volatile <16 x i8> [[SHL4]], <16 x i8>* @uc, align 8 |
1939 | // CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1940 | // CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
1941 | // CHECK: [[SHL5:%.*]] = shl <16 x i8> [[TMP9]], [[TMP10]] |
1942 | // CHECK: store volatile <16 x i8> [[SHL5]], <16 x i8>* @uc, align 8 |
1943 | // CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1944 | // CHECK: [[TMP12:%.*]] = load volatile i32, i32* @cnt, align 4 |
1945 | // CHECK: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <16 x i32> undef, i32 [[TMP12]], i32 0 |
1946 | // CHECK: [[SPLAT_SPLAT7:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT6]], <16 x i32> undef, <16 x i32> zeroinitializer |
1947 | // CHECK: [[SH_PROM8:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT7]] to <16 x i8> |
1948 | // CHECK: [[SHL9:%.*]] = shl <16 x i8> [[TMP11]], [[SH_PROM8]] |
1949 | // CHECK: store volatile <16 x i8> [[SHL9]], <16 x i8>* @uc, align 8 |
1950 | // CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
1951 | // CHECK: [[SHL10:%.*]] = shl <16 x i8> [[TMP13]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5> |
1952 | // CHECK: store volatile <16 x i8> [[SHL10]], <16 x i8>* @uc, align 8 |
1953 | // CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1954 | // CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
1955 | // CHECK: [[SHL11:%.*]] = shl <8 x i16> [[TMP14]], [[TMP15]] |
1956 | // CHECK: store volatile <8 x i16> [[SHL11]], <8 x i16>* @ss, align 8 |
1957 | // CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1958 | // CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
1959 | // CHECK: [[SHL12:%.*]] = shl <8 x i16> [[TMP16]], [[TMP17]] |
1960 | // CHECK: store volatile <8 x i16> [[SHL12]], <8 x i16>* @ss, align 8 |
1961 | // CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1962 | // CHECK: [[TMP19:%.*]] = load volatile i32, i32* @cnt, align 4 |
1963 | // CHECK: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <8 x i32> undef, i32 [[TMP19]], i32 0 |
1964 | // CHECK: [[SPLAT_SPLAT14:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT13]], <8 x i32> undef, <8 x i32> zeroinitializer |
1965 | // CHECK: [[SH_PROM15:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT14]] to <8 x i16> |
1966 | // CHECK: [[SHL16:%.*]] = shl <8 x i16> [[TMP18]], [[SH_PROM15]] |
1967 | // CHECK: store volatile <8 x i16> [[SHL16]], <8 x i16>* @ss, align 8 |
1968 | // CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
1969 | // CHECK: [[SHL17:%.*]] = shl <8 x i16> [[TMP20]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5> |
1970 | // CHECK: store volatile <8 x i16> [[SHL17]], <8 x i16>* @ss, align 8 |
1971 | // CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1972 | // CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
1973 | // CHECK: [[SHL18:%.*]] = shl <8 x i16> [[TMP21]], [[TMP22]] |
1974 | // CHECK: store volatile <8 x i16> [[SHL18]], <8 x i16>* @us, align 8 |
1975 | // CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1976 | // CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
1977 | // CHECK: [[SHL19:%.*]] = shl <8 x i16> [[TMP23]], [[TMP24]] |
1978 | // CHECK: store volatile <8 x i16> [[SHL19]], <8 x i16>* @us, align 8 |
1979 | // CHECK: [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1980 | // CHECK: [[TMP26:%.*]] = load volatile i32, i32* @cnt, align 4 |
1981 | // CHECK: [[SPLAT_SPLATINSERT20:%.*]] = insertelement <8 x i32> undef, i32 [[TMP26]], i32 0 |
1982 | // CHECK: [[SPLAT_SPLAT21:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT20]], <8 x i32> undef, <8 x i32> zeroinitializer |
1983 | // CHECK: [[SH_PROM22:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT21]] to <8 x i16> |
1984 | // CHECK: [[SHL23:%.*]] = shl <8 x i16> [[TMP25]], [[SH_PROM22]] |
1985 | // CHECK: store volatile <8 x i16> [[SHL23]], <8 x i16>* @us, align 8 |
1986 | // CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
1987 | // CHECK: [[SHL24:%.*]] = shl <8 x i16> [[TMP27]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5> |
1988 | // CHECK: store volatile <8 x i16> [[SHL24]], <8 x i16>* @us, align 8 |
1989 | // CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1990 | // CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
1991 | // CHECK: [[SHL25:%.*]] = shl <4 x i32> [[TMP28]], [[TMP29]] |
1992 | // CHECK: store volatile <4 x i32> [[SHL25]], <4 x i32>* @si, align 8 |
1993 | // CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1994 | // CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
1995 | // CHECK: [[SHL26:%.*]] = shl <4 x i32> [[TMP30]], [[TMP31]] |
1996 | // CHECK: store volatile <4 x i32> [[SHL26]], <4 x i32>* @si, align 8 |
1997 | // CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
1998 | // CHECK: [[TMP33:%.*]] = load volatile i32, i32* @cnt, align 4 |
1999 | // CHECK: [[SPLAT_SPLATINSERT27:%.*]] = insertelement <4 x i32> undef, i32 [[TMP33]], i32 0 |
2000 | // CHECK: [[SPLAT_SPLAT28:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT27]], <4 x i32> undef, <4 x i32> zeroinitializer |
2001 | // CHECK: [[SHL29:%.*]] = shl <4 x i32> [[TMP32]], [[SPLAT_SPLAT28]] |
2002 | // CHECK: store volatile <4 x i32> [[SHL29]], <4 x i32>* @si, align 8 |
2003 | // CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2004 | // CHECK: [[SHL30:%.*]] = shl <4 x i32> [[TMP34]], <i32 5, i32 5, i32 5, i32 5> |
2005 | // CHECK: store volatile <4 x i32> [[SHL30]], <4 x i32>* @si, align 8 |
2006 | // CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2007 | // CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
2008 | // CHECK: [[SHL31:%.*]] = shl <4 x i32> [[TMP35]], [[TMP36]] |
2009 | // CHECK: store volatile <4 x i32> [[SHL31]], <4 x i32>* @ui, align 8 |
2010 | // CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2011 | // CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
2012 | // CHECK: [[SHL32:%.*]] = shl <4 x i32> [[TMP37]], [[TMP38]] |
2013 | // CHECK: store volatile <4 x i32> [[SHL32]], <4 x i32>* @ui, align 8 |
2014 | // CHECK: [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2015 | // CHECK: [[TMP40:%.*]] = load volatile i32, i32* @cnt, align 4 |
2016 | // CHECK: [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> undef, i32 [[TMP40]], i32 0 |
2017 | // CHECK: [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> undef, <4 x i32> zeroinitializer |
2018 | // CHECK: [[SHL35:%.*]] = shl <4 x i32> [[TMP39]], [[SPLAT_SPLAT34]] |
2019 | // CHECK: store volatile <4 x i32> [[SHL35]], <4 x i32>* @ui, align 8 |
2020 | // CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2021 | // CHECK: [[SHL36:%.*]] = shl <4 x i32> [[TMP41]], <i32 5, i32 5, i32 5, i32 5> |
2022 | // CHECK: store volatile <4 x i32> [[SHL36]], <4 x i32>* @ui, align 8 |
2023 | // CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2024 | // CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
2025 | // CHECK: [[SHL37:%.*]] = shl <2 x i64> [[TMP42]], [[TMP43]] |
2026 | // CHECK: store volatile <2 x i64> [[SHL37]], <2 x i64>* @sl, align 8 |
2027 | // CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2028 | // CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
2029 | // CHECK: [[SHL38:%.*]] = shl <2 x i64> [[TMP44]], [[TMP45]] |
2030 | // CHECK: store volatile <2 x i64> [[SHL38]], <2 x i64>* @sl, align 8 |
2031 | // CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2032 | // CHECK: [[TMP47:%.*]] = load volatile i32, i32* @cnt, align 4 |
2033 | // CHECK: [[SPLAT_SPLATINSERT39:%.*]] = insertelement <2 x i32> undef, i32 [[TMP47]], i32 0 |
2034 | // CHECK: [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT39]], <2 x i32> undef, <2 x i32> zeroinitializer |
2035 | // CHECK: [[SH_PROM41:%.*]] = zext <2 x i32> [[SPLAT_SPLAT40]] to <2 x i64> |
2036 | // CHECK: [[SHL42:%.*]] = shl <2 x i64> [[TMP46]], [[SH_PROM41]] |
2037 | // CHECK: store volatile <2 x i64> [[SHL42]], <2 x i64>* @sl, align 8 |
2038 | // CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2039 | // CHECK: [[SHL43:%.*]] = shl <2 x i64> [[TMP48]], <i64 5, i64 5> |
2040 | // CHECK: store volatile <2 x i64> [[SHL43]], <2 x i64>* @sl, align 8 |
2041 | // CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2042 | // CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
2043 | // CHECK: [[SHL44:%.*]] = shl <2 x i64> [[TMP49]], [[TMP50]] |
2044 | // CHECK: store volatile <2 x i64> [[SHL44]], <2 x i64>* @ul, align 8 |
2045 | // CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2046 | // CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
2047 | // CHECK: [[SHL45:%.*]] = shl <2 x i64> [[TMP51]], [[TMP52]] |
2048 | // CHECK: store volatile <2 x i64> [[SHL45]], <2 x i64>* @ul, align 8 |
2049 | // CHECK: [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2050 | // CHECK: [[TMP54:%.*]] = load volatile i32, i32* @cnt, align 4 |
2051 | // CHECK: [[SPLAT_SPLATINSERT46:%.*]] = insertelement <2 x i32> undef, i32 [[TMP54]], i32 0 |
2052 | // CHECK: [[SPLAT_SPLAT47:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT46]], <2 x i32> undef, <2 x i32> zeroinitializer |
2053 | // CHECK: [[SH_PROM48:%.*]] = zext <2 x i32> [[SPLAT_SPLAT47]] to <2 x i64> |
2054 | // CHECK: [[SHL49:%.*]] = shl <2 x i64> [[TMP53]], [[SH_PROM48]] |
2055 | // CHECK: store volatile <2 x i64> [[SHL49]], <2 x i64>* @ul, align 8 |
2056 | // CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2057 | // CHECK: [[SHL50:%.*]] = shl <2 x i64> [[TMP55]], <i64 5, i64 5> |
2058 | // CHECK: store volatile <2 x i64> [[SHL50]], <2 x i64>* @ul, align 8 |
2059 | // CHECK: ret void |
2060 | void test_sl(void) { |
2061 | |
2062 | sc = sc << sc2; |
2063 | sc = sc << uc2; |
2064 | sc = sc << cnt; |
2065 | sc = sc << 5; |
2066 | uc = uc << sc2; |
2067 | uc = uc << uc2; |
2068 | uc = uc << cnt; |
2069 | uc = uc << 5; |
2070 | |
2071 | ss = ss << ss2; |
2072 | ss = ss << us2; |
2073 | ss = ss << cnt; |
2074 | ss = ss << 5; |
2075 | us = us << ss2; |
2076 | us = us << us2; |
2077 | us = us << cnt; |
2078 | us = us << 5; |
2079 | |
2080 | si = si << si2; |
2081 | si = si << ui2; |
2082 | si = si << cnt; |
2083 | si = si << 5; |
2084 | ui = ui << si2; |
2085 | ui = ui << ui2; |
2086 | ui = ui << cnt; |
2087 | ui = ui << 5; |
2088 | |
2089 | sl = sl << sl2; |
2090 | sl = sl << ul2; |
2091 | sl = sl << cnt; |
2092 | sl = sl << 5; |
2093 | ul = ul << sl2; |
2094 | ul = ul << ul2; |
2095 | ul = ul << cnt; |
2096 | ul = ul << 5; |
2097 | } |
2098 | |
2099 | // CHECK-LABEL: define void @test_sl_assign() #0 { |
2100 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
2101 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
2102 | // CHECK: [[SHL:%.*]] = shl <16 x i8> [[TMP1]], [[TMP0]] |
2103 | // CHECK: store volatile <16 x i8> [[SHL]], <16 x i8>* @sc, align 8 |
2104 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
2105 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
2106 | // CHECK: [[SHL1:%.*]] = shl <16 x i8> [[TMP3]], [[TMP2]] |
2107 | // CHECK: store volatile <16 x i8> [[SHL1]], <16 x i8>* @sc, align 8 |
2108 | // CHECK: [[TMP4:%.*]] = load volatile i32, i32* @cnt, align 4 |
2109 | // CHECK: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> undef, i32 [[TMP4]], i32 0 |
2110 | // CHECK: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLAT:%.*]]insert, <16 x i32> undef, <16 x i32> zeroinitializer |
2111 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
2112 | // CHECK: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i8> |
2113 | // CHECK: [[SHL2:%.*]] = shl <16 x i8> [[TMP5]], [[SH_PROM]] |
2114 | // CHECK: store volatile <16 x i8> [[SHL2]], <16 x i8>* @sc, align 8 |
2115 | // CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
2116 | // CHECK: [[SHL3:%.*]] = shl <16 x i8> [[TMP6]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5> |
2117 | // CHECK: store volatile <16 x i8> [[SHL3]], <16 x i8>* @sc, align 8 |
2118 | // CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
2119 | // CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
2120 | // CHECK: [[SHL4:%.*]] = shl <16 x i8> [[TMP8]], [[TMP7]] |
2121 | // CHECK: store volatile <16 x i8> [[SHL4]], <16 x i8>* @uc, align 8 |
2122 | // CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
2123 | // CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
2124 | // CHECK: [[SHL5:%.*]] = shl <16 x i8> [[TMP10]], [[TMP9]] |
2125 | // CHECK: store volatile <16 x i8> [[SHL5]], <16 x i8>* @uc, align 8 |
2126 | // CHECK: [[TMP11:%.*]] = load volatile i32, i32* @cnt, align 4 |
2127 | // CHECK: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <16 x i32> undef, i32 [[TMP11]], i32 0 |
2128 | // CHECK: [[SPLAT_SPLAT7:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT6]], <16 x i32> undef, <16 x i32> zeroinitializer |
2129 | // CHECK: [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
2130 | // CHECK: [[SH_PROM8:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT7]] to <16 x i8> |
2131 | // CHECK: [[SHL9:%.*]] = shl <16 x i8> [[TMP12]], [[SH_PROM8]] |
2132 | // CHECK: store volatile <16 x i8> [[SHL9]], <16 x i8>* @uc, align 8 |
2133 | // CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
2134 | // CHECK: [[SHL10:%.*]] = shl <16 x i8> [[TMP13]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5> |
2135 | // CHECK: store volatile <16 x i8> [[SHL10]], <16 x i8>* @uc, align 8 |
2136 | // CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
2137 | // CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
2138 | // CHECK: [[SHL11:%.*]] = shl <8 x i16> [[TMP15]], [[TMP14]] |
2139 | // CHECK: store volatile <8 x i16> [[SHL11]], <8 x i16>* @ss, align 8 |
2140 | // CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
2141 | // CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
2142 | // CHECK: [[SHL12:%.*]] = shl <8 x i16> [[TMP17]], [[TMP16]] |
2143 | // CHECK: store volatile <8 x i16> [[SHL12]], <8 x i16>* @ss, align 8 |
2144 | // CHECK: [[TMP18:%.*]] = load volatile i32, i32* @cnt, align 4 |
2145 | // CHECK: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <8 x i32> undef, i32 [[TMP18]], i32 0 |
2146 | // CHECK: [[SPLAT_SPLAT14:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT13]], <8 x i32> undef, <8 x i32> zeroinitializer |
2147 | // CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
2148 | // CHECK: [[SH_PROM15:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT14]] to <8 x i16> |
2149 | // CHECK: [[SHL16:%.*]] = shl <8 x i16> [[TMP19]], [[SH_PROM15]] |
2150 | // CHECK: store volatile <8 x i16> [[SHL16]], <8 x i16>* @ss, align 8 |
2151 | // CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
2152 | // CHECK: [[SHL17:%.*]] = shl <8 x i16> [[TMP20]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5> |
2153 | // CHECK: store volatile <8 x i16> [[SHL17]], <8 x i16>* @ss, align 8 |
2154 | // CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
2155 | // CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
2156 | // CHECK: [[SHL18:%.*]] = shl <8 x i16> [[TMP22]], [[TMP21]] |
2157 | // CHECK: store volatile <8 x i16> [[SHL18]], <8 x i16>* @us, align 8 |
2158 | // CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
2159 | // CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
2160 | // CHECK: [[SHL19:%.*]] = shl <8 x i16> [[TMP24]], [[TMP23]] |
2161 | // CHECK: store volatile <8 x i16> [[SHL19]], <8 x i16>* @us, align 8 |
2162 | // CHECK: [[TMP25:%.*]] = load volatile i32, i32* @cnt, align 4 |
2163 | // CHECK: [[SPLAT_SPLATINSERT20:%.*]] = insertelement <8 x i32> undef, i32 [[TMP25]], i32 0 |
2164 | // CHECK: [[SPLAT_SPLAT21:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT20]], <8 x i32> undef, <8 x i32> zeroinitializer |
2165 | // CHECK: [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
2166 | // CHECK: [[SH_PROM22:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT21]] to <8 x i16> |
2167 | // CHECK: [[SHL23:%.*]] = shl <8 x i16> [[TMP26]], [[SH_PROM22]] |
2168 | // CHECK: store volatile <8 x i16> [[SHL23]], <8 x i16>* @us, align 8 |
2169 | // CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
2170 | // CHECK: [[SHL24:%.*]] = shl <8 x i16> [[TMP27]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5> |
2171 | // CHECK: store volatile <8 x i16> [[SHL24]], <8 x i16>* @us, align 8 |
2172 | // CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
2173 | // CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2174 | // CHECK: [[SHL25:%.*]] = shl <4 x i32> [[TMP29]], [[TMP28]] |
2175 | // CHECK: store volatile <4 x i32> [[SHL25]], <4 x i32>* @si, align 8 |
2176 | // CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
2177 | // CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2178 | // CHECK: [[SHL26:%.*]] = shl <4 x i32> [[TMP31]], [[TMP30]] |
2179 | // CHECK: store volatile <4 x i32> [[SHL26]], <4 x i32>* @si, align 8 |
2180 | // CHECK: [[TMP32:%.*]] = load volatile i32, i32* @cnt, align 4 |
2181 | // CHECK: [[SPLAT_SPLATINSERT27:%.*]] = insertelement <4 x i32> undef, i32 [[TMP32]], i32 0 |
2182 | // CHECK: [[SPLAT_SPLAT28:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT27]], <4 x i32> undef, <4 x i32> zeroinitializer |
2183 | // CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2184 | // CHECK: [[SHL29:%.*]] = shl <4 x i32> [[TMP33]], [[SPLAT_SPLAT28]] |
2185 | // CHECK: store volatile <4 x i32> [[SHL29]], <4 x i32>* @si, align 8 |
2186 | // CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2187 | // CHECK: [[SHL30:%.*]] = shl <4 x i32> [[TMP34]], <i32 5, i32 5, i32 5, i32 5> |
2188 | // CHECK: store volatile <4 x i32> [[SHL30]], <4 x i32>* @si, align 8 |
2189 | // CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
2190 | // CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2191 | // CHECK: [[SHL31:%.*]] = shl <4 x i32> [[TMP36]], [[TMP35]] |
2192 | // CHECK: store volatile <4 x i32> [[SHL31]], <4 x i32>* @ui, align 8 |
2193 | // CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
2194 | // CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2195 | // CHECK: [[SHL32:%.*]] = shl <4 x i32> [[TMP38]], [[TMP37]] |
2196 | // CHECK: store volatile <4 x i32> [[SHL32]], <4 x i32>* @ui, align 8 |
2197 | // CHECK: [[TMP39:%.*]] = load volatile i32, i32* @cnt, align 4 |
2198 | // CHECK: [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> undef, i32 [[TMP39]], i32 0 |
2199 | // CHECK: [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> undef, <4 x i32> zeroinitializer |
2200 | // CHECK: [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2201 | // CHECK: [[SHL35:%.*]] = shl <4 x i32> [[TMP40]], [[SPLAT_SPLAT34]] |
2202 | // CHECK: store volatile <4 x i32> [[SHL35]], <4 x i32>* @ui, align 8 |
2203 | // CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2204 | // CHECK: [[SHL36:%.*]] = shl <4 x i32> [[TMP41]], <i32 5, i32 5, i32 5, i32 5> |
2205 | // CHECK: store volatile <4 x i32> [[SHL36]], <4 x i32>* @ui, align 8 |
2206 | // CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
2207 | // CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2208 | // CHECK: [[SHL37:%.*]] = shl <2 x i64> [[TMP43]], [[TMP42]] |
2209 | // CHECK: store volatile <2 x i64> [[SHL37]], <2 x i64>* @sl, align 8 |
2210 | // CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
2211 | // CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2212 | // CHECK: [[SHL38:%.*]] = shl <2 x i64> [[TMP45]], [[TMP44]] |
2213 | // CHECK: store volatile <2 x i64> [[SHL38]], <2 x i64>* @sl, align 8 |
2214 | // CHECK: [[TMP46:%.*]] = load volatile i32, i32* @cnt, align 4 |
2215 | // CHECK: [[SPLAT_SPLATINSERT39:%.*]] = insertelement <2 x i32> undef, i32 [[TMP46]], i32 0 |
2216 | // CHECK: [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT39]], <2 x i32> undef, <2 x i32> zeroinitializer |
2217 | // CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2218 | // CHECK: [[SH_PROM41:%.*]] = zext <2 x i32> [[SPLAT_SPLAT40]] to <2 x i64> |
2219 | // CHECK: [[SHL42:%.*]] = shl <2 x i64> [[TMP47]], [[SH_PROM41]] |
2220 | // CHECK: store volatile <2 x i64> [[SHL42]], <2 x i64>* @sl, align 8 |
2221 | // CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2222 | // CHECK: [[SHL43:%.*]] = shl <2 x i64> [[TMP48]], <i64 5, i64 5> |
2223 | // CHECK: store volatile <2 x i64> [[SHL43]], <2 x i64>* @sl, align 8 |
2224 | // CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
2225 | // CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2226 | // CHECK: [[SHL44:%.*]] = shl <2 x i64> [[TMP50]], [[TMP49]] |
2227 | // CHECK: store volatile <2 x i64> [[SHL44]], <2 x i64>* @ul, align 8 |
2228 | // CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
2229 | // CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2230 | // CHECK: [[SHL45:%.*]] = shl <2 x i64> [[TMP52]], [[TMP51]] |
2231 | // CHECK: store volatile <2 x i64> [[SHL45]], <2 x i64>* @ul, align 8 |
2232 | // CHECK: [[TMP53:%.*]] = load volatile i32, i32* @cnt, align 4 |
2233 | // CHECK: [[SPLAT_SPLATINSERT46:%.*]] = insertelement <2 x i32> undef, i32 [[TMP53]], i32 0 |
2234 | // CHECK: [[SPLAT_SPLAT47:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT46]], <2 x i32> undef, <2 x i32> zeroinitializer |
2235 | // CHECK: [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2236 | // CHECK: [[SH_PROM48:%.*]] = zext <2 x i32> [[SPLAT_SPLAT47]] to <2 x i64> |
2237 | // CHECK: [[SHL49:%.*]] = shl <2 x i64> [[TMP54]], [[SH_PROM48]] |
2238 | // CHECK: store volatile <2 x i64> [[SHL49]], <2 x i64>* @ul, align 8 |
2239 | // CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2240 | // CHECK: [[SHL50:%.*]] = shl <2 x i64> [[TMP55]], <i64 5, i64 5> |
2241 | // CHECK: store volatile <2 x i64> [[SHL50]], <2 x i64>* @ul, align 8 |
2242 | // CHECK: ret void |
2243 | void test_sl_assign(void) { |
2244 | |
2245 | sc <<= sc2; |
2246 | sc <<= uc2; |
2247 | sc <<= cnt; |
2248 | sc <<= 5; |
2249 | uc <<= sc2; |
2250 | uc <<= uc2; |
2251 | uc <<= cnt; |
2252 | uc <<= 5; |
2253 | |
2254 | ss <<= ss2; |
2255 | ss <<= us2; |
2256 | ss <<= cnt; |
2257 | ss <<= 5; |
2258 | us <<= ss2; |
2259 | us <<= us2; |
2260 | us <<= cnt; |
2261 | us <<= 5; |
2262 | |
2263 | si <<= si2; |
2264 | si <<= ui2; |
2265 | si <<= cnt; |
2266 | si <<= 5; |
2267 | ui <<= si2; |
2268 | ui <<= ui2; |
2269 | ui <<= cnt; |
2270 | ui <<= 5; |
2271 | |
2272 | sl <<= sl2; |
2273 | sl <<= ul2; |
2274 | sl <<= cnt; |
2275 | sl <<= 5; |
2276 | ul <<= sl2; |
2277 | ul <<= ul2; |
2278 | ul <<= cnt; |
2279 | ul <<= 5; |
2280 | } |
2281 | |
2282 | // CHECK-LABEL: define void @test_sr() #0 { |
2283 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
2284 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
2285 | // CHECK: [[SHR:%.*]] = ashr <16 x i8> [[TMP0]], [[TMP1]] |
2286 | // CHECK: store volatile <16 x i8> [[SHR]], <16 x i8>* @sc, align 8 |
2287 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
2288 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
2289 | // CHECK: [[SHR1:%.*]] = ashr <16 x i8> [[TMP2]], [[TMP3]] |
2290 | // CHECK: store volatile <16 x i8> [[SHR1]], <16 x i8>* @sc, align 8 |
2291 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
2292 | // CHECK: [[TMP5:%.*]] = load volatile i32, i32* @cnt, align 4 |
2293 | // CHECK: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> undef, i32 [[TMP5]], i32 0 |
2294 | // CHECK: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLAT:%.*]]insert, <16 x i32> undef, <16 x i32> zeroinitializer |
2295 | // CHECK: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i8> |
2296 | // CHECK: [[SHR2:%.*]] = ashr <16 x i8> [[TMP4]], [[SH_PROM]] |
2297 | // CHECK: store volatile <16 x i8> [[SHR2]], <16 x i8>* @sc, align 8 |
2298 | // CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
2299 | // CHECK: [[SHR3:%.*]] = ashr <16 x i8> [[TMP6]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5> |
2300 | // CHECK: store volatile <16 x i8> [[SHR3]], <16 x i8>* @sc, align 8 |
2301 | // CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
2302 | // CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
2303 | // CHECK: [[SHR4:%.*]] = lshr <16 x i8> [[TMP7]], [[TMP8]] |
2304 | // CHECK: store volatile <16 x i8> [[SHR4]], <16 x i8>* @uc, align 8 |
2305 | // CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
2306 | // CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
2307 | // CHECK: [[SHR5:%.*]] = lshr <16 x i8> [[TMP9]], [[TMP10]] |
2308 | // CHECK: store volatile <16 x i8> [[SHR5]], <16 x i8>* @uc, align 8 |
2309 | // CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
2310 | // CHECK: [[TMP12:%.*]] = load volatile i32, i32* @cnt, align 4 |
2311 | // CHECK: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <16 x i32> undef, i32 [[TMP12]], i32 0 |
2312 | // CHECK: [[SPLAT_SPLAT7:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT6]], <16 x i32> undef, <16 x i32> zeroinitializer |
2313 | // CHECK: [[SH_PROM8:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT7]] to <16 x i8> |
2314 | // CHECK: [[SHR9:%.*]] = lshr <16 x i8> [[TMP11]], [[SH_PROM8]] |
2315 | // CHECK: store volatile <16 x i8> [[SHR9]], <16 x i8>* @uc, align 8 |
2316 | // CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
2317 | // CHECK: [[SHR10:%.*]] = lshr <16 x i8> [[TMP13]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5> |
2318 | // CHECK: store volatile <16 x i8> [[SHR10]], <16 x i8>* @uc, align 8 |
2319 | // CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
2320 | // CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
2321 | // CHECK: [[SHR11:%.*]] = ashr <8 x i16> [[TMP14]], [[TMP15]] |
2322 | // CHECK: store volatile <8 x i16> [[SHR11]], <8 x i16>* @ss, align 8 |
2323 | // CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
2324 | // CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
2325 | // CHECK: [[SHR12:%.*]] = ashr <8 x i16> [[TMP16]], [[TMP17]] |
2326 | // CHECK: store volatile <8 x i16> [[SHR12]], <8 x i16>* @ss, align 8 |
2327 | // CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
2328 | // CHECK: [[TMP19:%.*]] = load volatile i32, i32* @cnt, align 4 |
2329 | // CHECK: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <8 x i32> undef, i32 [[TMP19]], i32 0 |
2330 | // CHECK: [[SPLAT_SPLAT14:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT13]], <8 x i32> undef, <8 x i32> zeroinitializer |
2331 | // CHECK: [[SH_PROM15:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT14]] to <8 x i16> |
2332 | // CHECK: [[SHR16:%.*]] = ashr <8 x i16> [[TMP18]], [[SH_PROM15]] |
2333 | // CHECK: store volatile <8 x i16> [[SHR16]], <8 x i16>* @ss, align 8 |
2334 | // CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
2335 | // CHECK: [[SHR17:%.*]] = ashr <8 x i16> [[TMP20]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5> |
2336 | // CHECK: store volatile <8 x i16> [[SHR17]], <8 x i16>* @ss, align 8 |
2337 | // CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
2338 | // CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
2339 | // CHECK: [[SHR18:%.*]] = lshr <8 x i16> [[TMP21]], [[TMP22]] |
2340 | // CHECK: store volatile <8 x i16> [[SHR18]], <8 x i16>* @us, align 8 |
2341 | // CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
2342 | // CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
2343 | // CHECK: [[SHR19:%.*]] = lshr <8 x i16> [[TMP23]], [[TMP24]] |
2344 | // CHECK: store volatile <8 x i16> [[SHR19]], <8 x i16>* @us, align 8 |
2345 | // CHECK: [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
2346 | // CHECK: [[TMP26:%.*]] = load volatile i32, i32* @cnt, align 4 |
2347 | // CHECK: [[SPLAT_SPLATINSERT20:%.*]] = insertelement <8 x i32> undef, i32 [[TMP26]], i32 0 |
2348 | // CHECK: [[SPLAT_SPLAT21:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT20]], <8 x i32> undef, <8 x i32> zeroinitializer |
2349 | // CHECK: [[SH_PROM22:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT21]] to <8 x i16> |
2350 | // CHECK: [[SHR23:%.*]] = lshr <8 x i16> [[TMP25]], [[SH_PROM22]] |
2351 | // CHECK: store volatile <8 x i16> [[SHR23]], <8 x i16>* @us, align 8 |
2352 | // CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
2353 | // CHECK: [[SHR24:%.*]] = lshr <8 x i16> [[TMP27]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5> |
2354 | // CHECK: store volatile <8 x i16> [[SHR24]], <8 x i16>* @us, align 8 |
2355 | // CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2356 | // CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
2357 | // CHECK: [[SHR25:%.*]] = ashr <4 x i32> [[TMP28]], [[TMP29]] |
2358 | // CHECK: store volatile <4 x i32> [[SHR25]], <4 x i32>* @si, align 8 |
2359 | // CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2360 | // CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
2361 | // CHECK: [[SHR26:%.*]] = ashr <4 x i32> [[TMP30]], [[TMP31]] |
2362 | // CHECK: store volatile <4 x i32> [[SHR26]], <4 x i32>* @si, align 8 |
2363 | // CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2364 | // CHECK: [[TMP33:%.*]] = load volatile i32, i32* @cnt, align 4 |
2365 | // CHECK: [[SPLAT_SPLATINSERT27:%.*]] = insertelement <4 x i32> undef, i32 [[TMP33]], i32 0 |
2366 | // CHECK: [[SPLAT_SPLAT28:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT27]], <4 x i32> undef, <4 x i32> zeroinitializer |
2367 | // CHECK: [[SHR29:%.*]] = ashr <4 x i32> [[TMP32]], [[SPLAT_SPLAT28]] |
2368 | // CHECK: store volatile <4 x i32> [[SHR29]], <4 x i32>* @si, align 8 |
2369 | // CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2370 | // CHECK: [[SHR30:%.*]] = ashr <4 x i32> [[TMP34]], <i32 5, i32 5, i32 5, i32 5> |
2371 | // CHECK: store volatile <4 x i32> [[SHR30]], <4 x i32>* @si, align 8 |
2372 | // CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2373 | // CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
2374 | // CHECK: [[SHR31:%.*]] = lshr <4 x i32> [[TMP35]], [[TMP36]] |
2375 | // CHECK: store volatile <4 x i32> [[SHR31]], <4 x i32>* @ui, align 8 |
2376 | // CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2377 | // CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
2378 | // CHECK: [[SHR32:%.*]] = lshr <4 x i32> [[TMP37]], [[TMP38]] |
2379 | // CHECK: store volatile <4 x i32> [[SHR32]], <4 x i32>* @ui, align 8 |
2380 | // CHECK: [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2381 | // CHECK: [[TMP40:%.*]] = load volatile i32, i32* @cnt, align 4 |
2382 | // CHECK: [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> undef, i32 [[TMP40]], i32 0 |
2383 | // CHECK: [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> undef, <4 x i32> zeroinitializer |
2384 | // CHECK: [[SHR35:%.*]] = lshr <4 x i32> [[TMP39]], [[SPLAT_SPLAT34]] |
2385 | // CHECK: store volatile <4 x i32> [[SHR35]], <4 x i32>* @ui, align 8 |
2386 | // CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2387 | // CHECK: [[SHR36:%.*]] = lshr <4 x i32> [[TMP41]], <i32 5, i32 5, i32 5, i32 5> |
2388 | // CHECK: store volatile <4 x i32> [[SHR36]], <4 x i32>* @ui, align 8 |
2389 | // CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2390 | // CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
2391 | // CHECK: [[SHR37:%.*]] = ashr <2 x i64> [[TMP42]], [[TMP43]] |
2392 | // CHECK: store volatile <2 x i64> [[SHR37]], <2 x i64>* @sl, align 8 |
2393 | // CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2394 | // CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
2395 | // CHECK: [[SHR38:%.*]] = ashr <2 x i64> [[TMP44]], [[TMP45]] |
2396 | // CHECK: store volatile <2 x i64> [[SHR38]], <2 x i64>* @sl, align 8 |
2397 | // CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2398 | // CHECK: [[TMP47:%.*]] = load volatile i32, i32* @cnt, align 4 |
2399 | // CHECK: [[SPLAT_SPLATINSERT39:%.*]] = insertelement <2 x i32> undef, i32 [[TMP47]], i32 0 |
2400 | // CHECK: [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT39]], <2 x i32> undef, <2 x i32> zeroinitializer |
2401 | // CHECK: [[SH_PROM41:%.*]] = zext <2 x i32> [[SPLAT_SPLAT40]] to <2 x i64> |
2402 | // CHECK: [[SHR42:%.*]] = ashr <2 x i64> [[TMP46]], [[SH_PROM41]] |
2403 | // CHECK: store volatile <2 x i64> [[SHR42]], <2 x i64>* @sl, align 8 |
2404 | // CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2405 | // CHECK: [[SHR43:%.*]] = ashr <2 x i64> [[TMP48]], <i64 5, i64 5> |
2406 | // CHECK: store volatile <2 x i64> [[SHR43]], <2 x i64>* @sl, align 8 |
2407 | // CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2408 | // CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
2409 | // CHECK: [[SHR44:%.*]] = lshr <2 x i64> [[TMP49]], [[TMP50]] |
2410 | // CHECK: store volatile <2 x i64> [[SHR44]], <2 x i64>* @ul, align 8 |
2411 | // CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2412 | // CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
2413 | // CHECK: [[SHR45:%.*]] = lshr <2 x i64> [[TMP51]], [[TMP52]] |
2414 | // CHECK: store volatile <2 x i64> [[SHR45]], <2 x i64>* @ul, align 8 |
2415 | // CHECK: [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2416 | // CHECK: [[TMP54:%.*]] = load volatile i32, i32* @cnt, align 4 |
2417 | // CHECK: [[SPLAT_SPLATINSERT46:%.*]] = insertelement <2 x i32> undef, i32 [[TMP54]], i32 0 |
2418 | // CHECK: [[SPLAT_SPLAT47:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT46]], <2 x i32> undef, <2 x i32> zeroinitializer |
2419 | // CHECK: [[SH_PROM48:%.*]] = zext <2 x i32> [[SPLAT_SPLAT47]] to <2 x i64> |
2420 | // CHECK: [[SHR49:%.*]] = lshr <2 x i64> [[TMP53]], [[SH_PROM48]] |
2421 | // CHECK: store volatile <2 x i64> [[SHR49]], <2 x i64>* @ul, align 8 |
2422 | // CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2423 | // CHECK: [[SHR50:%.*]] = lshr <2 x i64> [[TMP55]], <i64 5, i64 5> |
2424 | // CHECK: store volatile <2 x i64> [[SHR50]], <2 x i64>* @ul, align 8 |
2425 | // CHECK: ret void |
2426 | void test_sr(void) { |
2427 | |
2428 | sc = sc >> sc2; |
2429 | sc = sc >> uc2; |
2430 | sc = sc >> cnt; |
2431 | sc = sc >> 5; |
2432 | uc = uc >> sc2; |
2433 | uc = uc >> uc2; |
2434 | uc = uc >> cnt; |
2435 | uc = uc >> 5; |
2436 | |
2437 | ss = ss >> ss2; |
2438 | ss = ss >> us2; |
2439 | ss = ss >> cnt; |
2440 | ss = ss >> 5; |
2441 | us = us >> ss2; |
2442 | us = us >> us2; |
2443 | us = us >> cnt; |
2444 | us = us >> 5; |
2445 | |
2446 | si = si >> si2; |
2447 | si = si >> ui2; |
2448 | si = si >> cnt; |
2449 | si = si >> 5; |
2450 | ui = ui >> si2; |
2451 | ui = ui >> ui2; |
2452 | ui = ui >> cnt; |
2453 | ui = ui >> 5; |
2454 | |
2455 | sl = sl >> sl2; |
2456 | sl = sl >> ul2; |
2457 | sl = sl >> cnt; |
2458 | sl = sl >> 5; |
2459 | ul = ul >> sl2; |
2460 | ul = ul >> ul2; |
2461 | ul = ul >> cnt; |
2462 | ul = ul >> 5; |
2463 | } |
2464 | |
2465 | // CHECK-LABEL: define void @test_sr_assign() #0 { |
2466 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
2467 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
2468 | // CHECK: [[SHR:%.*]] = ashr <16 x i8> [[TMP1]], [[TMP0]] |
2469 | // CHECK: store volatile <16 x i8> [[SHR]], <16 x i8>* @sc, align 8 |
2470 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
2471 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
2472 | // CHECK: [[SHR1:%.*]] = ashr <16 x i8> [[TMP3]], [[TMP2]] |
2473 | // CHECK: store volatile <16 x i8> [[SHR1]], <16 x i8>* @sc, align 8 |
2474 | // CHECK: [[TMP4:%.*]] = load volatile i32, i32* @cnt, align 4 |
2475 | // CHECK: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> undef, i32 [[TMP4]], i32 0 |
2476 | // CHECK: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLAT:%.*]]insert, <16 x i32> undef, <16 x i32> zeroinitializer |
2477 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
2478 | // CHECK: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i8> |
2479 | // CHECK: [[SHR2:%.*]] = ashr <16 x i8> [[TMP5]], [[SH_PROM]] |
2480 | // CHECK: store volatile <16 x i8> [[SHR2]], <16 x i8>* @sc, align 8 |
2481 | // CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
2482 | // CHECK: [[SHR3:%.*]] = ashr <16 x i8> [[TMP6]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5> |
2483 | // CHECK: store volatile <16 x i8> [[SHR3]], <16 x i8>* @sc, align 8 |
2484 | // CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
2485 | // CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
2486 | // CHECK: [[SHR4:%.*]] = lshr <16 x i8> [[TMP8]], [[TMP7]] |
2487 | // CHECK: store volatile <16 x i8> [[SHR4]], <16 x i8>* @uc, align 8 |
2488 | // CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
2489 | // CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
2490 | // CHECK: [[SHR5:%.*]] = lshr <16 x i8> [[TMP10]], [[TMP9]] |
2491 | // CHECK: store volatile <16 x i8> [[SHR5]], <16 x i8>* @uc, align 8 |
2492 | // CHECK: [[TMP11:%.*]] = load volatile i32, i32* @cnt, align 4 |
2493 | // CHECK: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <16 x i32> undef, i32 [[TMP11]], i32 0 |
2494 | // CHECK: [[SPLAT_SPLAT7:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT6]], <16 x i32> undef, <16 x i32> zeroinitializer |
2495 | // CHECK: [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
2496 | // CHECK: [[SH_PROM8:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT7]] to <16 x i8> |
2497 | // CHECK: [[SHR9:%.*]] = lshr <16 x i8> [[TMP12]], [[SH_PROM8]] |
2498 | // CHECK: store volatile <16 x i8> [[SHR9]], <16 x i8>* @uc, align 8 |
2499 | // CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
2500 | // CHECK: [[SHR10:%.*]] = lshr <16 x i8> [[TMP13]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5> |
2501 | // CHECK: store volatile <16 x i8> [[SHR10]], <16 x i8>* @uc, align 8 |
2502 | // CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
2503 | // CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
2504 | // CHECK: [[SHR11:%.*]] = ashr <8 x i16> [[TMP15]], [[TMP14]] |
2505 | // CHECK: store volatile <8 x i16> [[SHR11]], <8 x i16>* @ss, align 8 |
2506 | // CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
2507 | // CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
2508 | // CHECK: [[SHR12:%.*]] = ashr <8 x i16> [[TMP17]], [[TMP16]] |
2509 | // CHECK: store volatile <8 x i16> [[SHR12]], <8 x i16>* @ss, align 8 |
2510 | // CHECK: [[TMP18:%.*]] = load volatile i32, i32* @cnt, align 4 |
2511 | // CHECK: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <8 x i32> undef, i32 [[TMP18]], i32 0 |
2512 | // CHECK: [[SPLAT_SPLAT14:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT13]], <8 x i32> undef, <8 x i32> zeroinitializer |
2513 | // CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
2514 | // CHECK: [[SH_PROM15:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT14]] to <8 x i16> |
2515 | // CHECK: [[SHR16:%.*]] = ashr <8 x i16> [[TMP19]], [[SH_PROM15]] |
2516 | // CHECK: store volatile <8 x i16> [[SHR16]], <8 x i16>* @ss, align 8 |
2517 | // CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
2518 | // CHECK: [[SHR17:%.*]] = ashr <8 x i16> [[TMP20]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5> |
2519 | // CHECK: store volatile <8 x i16> [[SHR17]], <8 x i16>* @ss, align 8 |
2520 | // CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
2521 | // CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
2522 | // CHECK: [[SHR18:%.*]] = lshr <8 x i16> [[TMP22]], [[TMP21]] |
2523 | // CHECK: store volatile <8 x i16> [[SHR18]], <8 x i16>* @us, align 8 |
2524 | // CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
2525 | // CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
2526 | // CHECK: [[SHR19:%.*]] = lshr <8 x i16> [[TMP24]], [[TMP23]] |
2527 | // CHECK: store volatile <8 x i16> [[SHR19]], <8 x i16>* @us, align 8 |
2528 | // CHECK: [[TMP25:%.*]] = load volatile i32, i32* @cnt, align 4 |
2529 | // CHECK: [[SPLAT_SPLATINSERT20:%.*]] = insertelement <8 x i32> undef, i32 [[TMP25]], i32 0 |
2530 | // CHECK: [[SPLAT_SPLAT21:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT20]], <8 x i32> undef, <8 x i32> zeroinitializer |
2531 | // CHECK: [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
2532 | // CHECK: [[SH_PROM22:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT21]] to <8 x i16> |
2533 | // CHECK: [[SHR23:%.*]] = lshr <8 x i16> [[TMP26]], [[SH_PROM22]] |
2534 | // CHECK: store volatile <8 x i16> [[SHR23]], <8 x i16>* @us, align 8 |
2535 | // CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
2536 | // CHECK: [[SHR24:%.*]] = lshr <8 x i16> [[TMP27]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5> |
2537 | // CHECK: store volatile <8 x i16> [[SHR24]], <8 x i16>* @us, align 8 |
2538 | // CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
2539 | // CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2540 | // CHECK: [[SHR25:%.*]] = ashr <4 x i32> [[TMP29]], [[TMP28]] |
2541 | // CHECK: store volatile <4 x i32> [[SHR25]], <4 x i32>* @si, align 8 |
2542 | // CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
2543 | // CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2544 | // CHECK: [[SHR26:%.*]] = ashr <4 x i32> [[TMP31]], [[TMP30]] |
2545 | // CHECK: store volatile <4 x i32> [[SHR26]], <4 x i32>* @si, align 8 |
2546 | // CHECK: [[TMP32:%.*]] = load volatile i32, i32* @cnt, align 4 |
2547 | // CHECK: [[SPLAT_SPLATINSERT27:%.*]] = insertelement <4 x i32> undef, i32 [[TMP32]], i32 0 |
2548 | // CHECK: [[SPLAT_SPLAT28:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT27]], <4 x i32> undef, <4 x i32> zeroinitializer |
2549 | // CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2550 | // CHECK: [[SHR29:%.*]] = ashr <4 x i32> [[TMP33]], [[SPLAT_SPLAT28]] |
2551 | // CHECK: store volatile <4 x i32> [[SHR29]], <4 x i32>* @si, align 8 |
2552 | // CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2553 | // CHECK: [[SHR30:%.*]] = ashr <4 x i32> [[TMP34]], <i32 5, i32 5, i32 5, i32 5> |
2554 | // CHECK: store volatile <4 x i32> [[SHR30]], <4 x i32>* @si, align 8 |
2555 | // CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
2556 | // CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2557 | // CHECK: [[SHR31:%.*]] = lshr <4 x i32> [[TMP36]], [[TMP35]] |
2558 | // CHECK: store volatile <4 x i32> [[SHR31]], <4 x i32>* @ui, align 8 |
2559 | // CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
2560 | // CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2561 | // CHECK: [[SHR32:%.*]] = lshr <4 x i32> [[TMP38]], [[TMP37]] |
2562 | // CHECK: store volatile <4 x i32> [[SHR32]], <4 x i32>* @ui, align 8 |
2563 | // CHECK: [[TMP39:%.*]] = load volatile i32, i32* @cnt, align 4 |
2564 | // CHECK: [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> undef, i32 [[TMP39]], i32 0 |
2565 | // CHECK: [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> undef, <4 x i32> zeroinitializer |
2566 | // CHECK: [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2567 | // CHECK: [[SHR35:%.*]] = lshr <4 x i32> [[TMP40]], [[SPLAT_SPLAT34]] |
2568 | // CHECK: store volatile <4 x i32> [[SHR35]], <4 x i32>* @ui, align 8 |
2569 | // CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2570 | // CHECK: [[SHR36:%.*]] = lshr <4 x i32> [[TMP41]], <i32 5, i32 5, i32 5, i32 5> |
2571 | // CHECK: store volatile <4 x i32> [[SHR36]], <4 x i32>* @ui, align 8 |
2572 | // CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
2573 | // CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2574 | // CHECK: [[SHR37:%.*]] = ashr <2 x i64> [[TMP43]], [[TMP42]] |
2575 | // CHECK: store volatile <2 x i64> [[SHR37]], <2 x i64>* @sl, align 8 |
2576 | // CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
2577 | // CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2578 | // CHECK: [[SHR38:%.*]] = ashr <2 x i64> [[TMP45]], [[TMP44]] |
2579 | // CHECK: store volatile <2 x i64> [[SHR38]], <2 x i64>* @sl, align 8 |
2580 | // CHECK: [[TMP46:%.*]] = load volatile i32, i32* @cnt, align 4 |
2581 | // CHECK: [[SPLAT_SPLATINSERT39:%.*]] = insertelement <2 x i32> undef, i32 [[TMP46]], i32 0 |
2582 | // CHECK: [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT39]], <2 x i32> undef, <2 x i32> zeroinitializer |
2583 | // CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2584 | // CHECK: [[SH_PROM41:%.*]] = zext <2 x i32> [[SPLAT_SPLAT40]] to <2 x i64> |
2585 | // CHECK: [[SHR42:%.*]] = ashr <2 x i64> [[TMP47]], [[SH_PROM41]] |
2586 | // CHECK: store volatile <2 x i64> [[SHR42]], <2 x i64>* @sl, align 8 |
2587 | // CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2588 | // CHECK: [[SHR43:%.*]] = ashr <2 x i64> [[TMP48]], <i64 5, i64 5> |
2589 | // CHECK: store volatile <2 x i64> [[SHR43]], <2 x i64>* @sl, align 8 |
2590 | // CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
2591 | // CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2592 | // CHECK: [[SHR44:%.*]] = lshr <2 x i64> [[TMP50]], [[TMP49]] |
2593 | // CHECK: store volatile <2 x i64> [[SHR44]], <2 x i64>* @ul, align 8 |
2594 | // CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
2595 | // CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2596 | // CHECK: [[SHR45:%.*]] = lshr <2 x i64> [[TMP52]], [[TMP51]] |
2597 | // CHECK: store volatile <2 x i64> [[SHR45]], <2 x i64>* @ul, align 8 |
2598 | // CHECK: [[TMP53:%.*]] = load volatile i32, i32* @cnt, align 4 |
2599 | // CHECK: [[SPLAT_SPLATINSERT46:%.*]] = insertelement <2 x i32> undef, i32 [[TMP53]], i32 0 |
2600 | // CHECK: [[SPLAT_SPLAT47:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT46]], <2 x i32> undef, <2 x i32> zeroinitializer |
2601 | // CHECK: [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2602 | // CHECK: [[SH_PROM48:%.*]] = zext <2 x i32> [[SPLAT_SPLAT47]] to <2 x i64> |
2603 | // CHECK: [[SHR49:%.*]] = lshr <2 x i64> [[TMP54]], [[SH_PROM48]] |
2604 | // CHECK: store volatile <2 x i64> [[SHR49]], <2 x i64>* @ul, align 8 |
2605 | // CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2606 | // CHECK: [[SHR50:%.*]] = lshr <2 x i64> [[TMP55]], <i64 5, i64 5> |
2607 | // CHECK: store volatile <2 x i64> [[SHR50]], <2 x i64>* @ul, align 8 |
2608 | // CHECK: ret void |
2609 | void test_sr_assign(void) { |
2610 | |
2611 | sc >>= sc2; |
2612 | sc >>= uc2; |
2613 | sc >>= cnt; |
2614 | sc >>= 5; |
2615 | uc >>= sc2; |
2616 | uc >>= uc2; |
2617 | uc >>= cnt; |
2618 | uc >>= 5; |
2619 | |
2620 | ss >>= ss2; |
2621 | ss >>= us2; |
2622 | ss >>= cnt; |
2623 | ss >>= 5; |
2624 | us >>= ss2; |
2625 | us >>= us2; |
2626 | us >>= cnt; |
2627 | us >>= 5; |
2628 | |
2629 | si >>= si2; |
2630 | si >>= ui2; |
2631 | si >>= cnt; |
2632 | si >>= 5; |
2633 | ui >>= si2; |
2634 | ui >>= ui2; |
2635 | ui >>= cnt; |
2636 | ui >>= 5; |
2637 | |
2638 | sl >>= sl2; |
2639 | sl >>= ul2; |
2640 | sl >>= cnt; |
2641 | sl >>= 5; |
2642 | ul >>= sl2; |
2643 | ul >>= ul2; |
2644 | ul >>= cnt; |
2645 | ul >>= 5; |
2646 | } |
2647 | |
2648 | |
2649 | // CHECK-LABEL: define void @test_cmpeq() #0 { |
2650 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
2651 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
2652 | // CHECK: [[CMP:%.*]] = icmp eq <16 x i8> [[TMP0]], [[TMP1]] |
2653 | // CHECK: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8> |
2654 | // CHECK: store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8 |
2655 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
2656 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
2657 | // CHECK: [[CMP1:%.*]] = icmp eq <16 x i8> [[TMP2]], [[TMP3]] |
2658 | // CHECK: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8> |
2659 | // CHECK: store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8 |
2660 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
2661 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
2662 | // CHECK: [[CMP3:%.*]] = icmp eq <16 x i8> [[TMP4]], [[TMP5]] |
2663 | // CHECK: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8> |
2664 | // CHECK: store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8 |
2665 | // CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
2666 | // CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
2667 | // CHECK: [[CMP5:%.*]] = icmp eq <16 x i8> [[TMP6]], [[TMP7]] |
2668 | // CHECK: [[SEXT6:%.*]] = sext <16 x i1> [[CMP5]] to <16 x i8> |
2669 | // CHECK: store volatile <16 x i8> [[SEXT6]], <16 x i8>* @bc, align 8 |
2670 | // CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
2671 | // CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
2672 | // CHECK: [[CMP7:%.*]] = icmp eq <16 x i8> [[TMP8]], [[TMP9]] |
2673 | // CHECK: [[SEXT8:%.*]] = sext <16 x i1> [[CMP7]] to <16 x i8> |
2674 | // CHECK: store volatile <16 x i8> [[SEXT8]], <16 x i8>* @bc, align 8 |
2675 | // CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
2676 | // CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
2677 | // CHECK: [[CMP9:%.*]] = icmp eq <16 x i8> [[TMP10]], [[TMP11]] |
2678 | // CHECK: [[SEXT10:%.*]] = sext <16 x i1> [[CMP9]] to <16 x i8> |
2679 | // CHECK: store volatile <16 x i8> [[SEXT10]], <16 x i8>* @bc, align 8 |
2680 | // CHECK: [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
2681 | // CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
2682 | // CHECK: [[CMP11:%.*]] = icmp eq <16 x i8> [[TMP12]], [[TMP13]] |
2683 | // CHECK: [[SEXT12:%.*]] = sext <16 x i1> [[CMP11]] to <16 x i8> |
2684 | // CHECK: store volatile <16 x i8> [[SEXT12]], <16 x i8>* @bc, align 8 |
2685 | // CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
2686 | // CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
2687 | // CHECK: [[CMP13:%.*]] = icmp eq <8 x i16> [[TMP14]], [[TMP15]] |
2688 | // CHECK: [[SEXT14:%.*]] = sext <8 x i1> [[CMP13]] to <8 x i16> |
2689 | // CHECK: store volatile <8 x i16> [[SEXT14]], <8 x i16>* @bs, align 8 |
2690 | // CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
2691 | // CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
2692 | // CHECK: [[CMP15:%.*]] = icmp eq <8 x i16> [[TMP16]], [[TMP17]] |
2693 | // CHECK: [[SEXT16:%.*]] = sext <8 x i1> [[CMP15]] to <8 x i16> |
2694 | // CHECK: store volatile <8 x i16> [[SEXT16]], <8 x i16>* @bs, align 8 |
2695 | // CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
2696 | // CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
2697 | // CHECK: [[CMP17:%.*]] = icmp eq <8 x i16> [[TMP18]], [[TMP19]] |
2698 | // CHECK: [[SEXT18:%.*]] = sext <8 x i1> [[CMP17]] to <8 x i16> |
2699 | // CHECK: store volatile <8 x i16> [[SEXT18]], <8 x i16>* @bs, align 8 |
2700 | // CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
2701 | // CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
2702 | // CHECK: [[CMP19:%.*]] = icmp eq <8 x i16> [[TMP20]], [[TMP21]] |
2703 | // CHECK: [[SEXT20:%.*]] = sext <8 x i1> [[CMP19]] to <8 x i16> |
2704 | // CHECK: store volatile <8 x i16> [[SEXT20]], <8 x i16>* @bs, align 8 |
2705 | // CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
2706 | // CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
2707 | // CHECK: [[CMP21:%.*]] = icmp eq <8 x i16> [[TMP22]], [[TMP23]] |
2708 | // CHECK: [[SEXT22:%.*]] = sext <8 x i1> [[CMP21]] to <8 x i16> |
2709 | // CHECK: store volatile <8 x i16> [[SEXT22]], <8 x i16>* @bs, align 8 |
2710 | // CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
2711 | // CHECK: [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
2712 | // CHECK: [[CMP23:%.*]] = icmp eq <8 x i16> [[TMP24]], [[TMP25]] |
2713 | // CHECK: [[SEXT24:%.*]] = sext <8 x i1> [[CMP23]] to <8 x i16> |
2714 | // CHECK: store volatile <8 x i16> [[SEXT24]], <8 x i16>* @bs, align 8 |
2715 | // CHECK: [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
2716 | // CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
2717 | // CHECK: [[CMP25:%.*]] = icmp eq <8 x i16> [[TMP26]], [[TMP27]] |
2718 | // CHECK: [[SEXT26:%.*]] = sext <8 x i1> [[CMP25]] to <8 x i16> |
2719 | // CHECK: store volatile <8 x i16> [[SEXT26]], <8 x i16>* @bs, align 8 |
2720 | // CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2721 | // CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
2722 | // CHECK: [[CMP27:%.*]] = icmp eq <4 x i32> [[TMP28]], [[TMP29]] |
2723 | // CHECK: [[SEXT28:%.*]] = sext <4 x i1> [[CMP27]] to <4 x i32> |
2724 | // CHECK: store volatile <4 x i32> [[SEXT28]], <4 x i32>* @bi, align 8 |
2725 | // CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2726 | // CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
2727 | // CHECK: [[CMP29:%.*]] = icmp eq <4 x i32> [[TMP30]], [[TMP31]] |
2728 | // CHECK: [[SEXT30:%.*]] = sext <4 x i1> [[CMP29]] to <4 x i32> |
2729 | // CHECK: store volatile <4 x i32> [[SEXT30]], <4 x i32>* @bi, align 8 |
2730 | // CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
2731 | // CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
2732 | // CHECK: [[CMP31:%.*]] = icmp eq <4 x i32> [[TMP32]], [[TMP33]] |
2733 | // CHECK: [[SEXT32:%.*]] = sext <4 x i1> [[CMP31]] to <4 x i32> |
2734 | // CHECK: store volatile <4 x i32> [[SEXT32]], <4 x i32>* @bi, align 8 |
2735 | // CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2736 | // CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
2737 | // CHECK: [[CMP33:%.*]] = icmp eq <4 x i32> [[TMP34]], [[TMP35]] |
2738 | // CHECK: [[SEXT34:%.*]] = sext <4 x i1> [[CMP33]] to <4 x i32> |
2739 | // CHECK: store volatile <4 x i32> [[SEXT34]], <4 x i32>* @bi, align 8 |
2740 | // CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2741 | // CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
2742 | // CHECK: [[CMP35:%.*]] = icmp eq <4 x i32> [[TMP36]], [[TMP37]] |
2743 | // CHECK: [[SEXT36:%.*]] = sext <4 x i1> [[CMP35]] to <4 x i32> |
2744 | // CHECK: store volatile <4 x i32> [[SEXT36]], <4 x i32>* @bi, align 8 |
2745 | // CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
2746 | // CHECK: [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
2747 | // CHECK: [[CMP37:%.*]] = icmp eq <4 x i32> [[TMP38]], [[TMP39]] |
2748 | // CHECK: [[SEXT38:%.*]] = sext <4 x i1> [[CMP37]] to <4 x i32> |
2749 | // CHECK: store volatile <4 x i32> [[SEXT38]], <4 x i32>* @bi, align 8 |
2750 | // CHECK: [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
2751 | // CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
2752 | // CHECK: [[CMP39:%.*]] = icmp eq <4 x i32> [[TMP40]], [[TMP41]] |
2753 | // CHECK: [[SEXT40:%.*]] = sext <4 x i1> [[CMP39]] to <4 x i32> |
2754 | // CHECK: store volatile <4 x i32> [[SEXT40]], <4 x i32>* @bi, align 8 |
2755 | // CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2756 | // CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
2757 | // CHECK: [[CMP41:%.*]] = icmp eq <2 x i64> [[TMP42]], [[TMP43]] |
2758 | // CHECK: [[SEXT42:%.*]] = sext <2 x i1> [[CMP41]] to <2 x i64> |
2759 | // CHECK: store volatile <2 x i64> [[SEXT42]], <2 x i64>* @bl, align 8 |
2760 | // CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2761 | // CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
2762 | // CHECK: [[CMP43:%.*]] = icmp eq <2 x i64> [[TMP44]], [[TMP45]] |
2763 | // CHECK: [[SEXT44:%.*]] = sext <2 x i1> [[CMP43]] to <2 x i64> |
2764 | // CHECK: store volatile <2 x i64> [[SEXT44]], <2 x i64>* @bl, align 8 |
2765 | // CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
2766 | // CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
2767 | // CHECK: [[CMP45:%.*]] = icmp eq <2 x i64> [[TMP46]], [[TMP47]] |
2768 | // CHECK: [[SEXT46:%.*]] = sext <2 x i1> [[CMP45]] to <2 x i64> |
2769 | // CHECK: store volatile <2 x i64> [[SEXT46]], <2 x i64>* @bl, align 8 |
2770 | // CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2771 | // CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
2772 | // CHECK: [[CMP47:%.*]] = icmp eq <2 x i64> [[TMP48]], [[TMP49]] |
2773 | // CHECK: [[SEXT48:%.*]] = sext <2 x i1> [[CMP47]] to <2 x i64> |
2774 | // CHECK: store volatile <2 x i64> [[SEXT48]], <2 x i64>* @bl, align 8 |
2775 | // CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2776 | // CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
2777 | // CHECK: [[CMP49:%.*]] = icmp eq <2 x i64> [[TMP50]], [[TMP51]] |
2778 | // CHECK: [[SEXT50:%.*]] = sext <2 x i1> [[CMP49]] to <2 x i64> |
2779 | // CHECK: store volatile <2 x i64> [[SEXT50]], <2 x i64>* @bl, align 8 |
2780 | // CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
2781 | // CHECK: [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
2782 | // CHECK: [[CMP51:%.*]] = icmp eq <2 x i64> [[TMP52]], [[TMP53]] |
2783 | // CHECK: [[SEXT52:%.*]] = sext <2 x i1> [[CMP51]] to <2 x i64> |
2784 | // CHECK: store volatile <2 x i64> [[SEXT52]], <2 x i64>* @bl, align 8 |
2785 | // CHECK: [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
2786 | // CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
2787 | // CHECK: [[CMP53:%.*]] = icmp eq <2 x i64> [[TMP54]], [[TMP55]] |
2788 | // CHECK: [[SEXT54:%.*]] = sext <2 x i1> [[CMP53]] to <2 x i64> |
2789 | // CHECK: store volatile <2 x i64> [[SEXT54]], <2 x i64>* @bl, align 8 |
2790 | // CHECK: [[TMP56:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8 |
2791 | // CHECK: [[TMP57:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
2792 | // CHECK: [[CMP55:%.*]] = fcmp oeq <2 x double> [[TMP56]], [[TMP57]] |
2793 | // CHECK: [[SEXT56:%.*]] = sext <2 x i1> [[CMP55]] to <2 x i64> |
2794 | // CHECK: store volatile <2 x i64> [[SEXT56]], <2 x i64>* @bl, align 8 |
2795 | // CHECK: ret void |
2796 | void test_cmpeq(void) { |
2797 | |
2798 | bc = sc == sc2; |
2799 | bc = sc == bc2; |
2800 | bc = bc == sc2; |
2801 | bc = uc == uc2; |
2802 | bc = uc == bc2; |
2803 | bc = bc == uc2; |
2804 | bc = bc == bc2; |
2805 | |
2806 | bs = ss == ss2; |
2807 | bs = ss == bs2; |
2808 | bs = bs == ss2; |
2809 | bs = us == us2; |
2810 | bs = us == bs2; |
2811 | bs = bs == us2; |
2812 | bs = bs == bs2; |
2813 | |
2814 | bi = si == si2; |
2815 | bi = si == bi2; |
2816 | bi = bi == si2; |
2817 | bi = ui == ui2; |
2818 | bi = ui == bi2; |
2819 | bi = bi == ui2; |
2820 | bi = bi == bi2; |
2821 | |
2822 | bl = sl == sl2; |
2823 | bl = sl == bl2; |
2824 | bl = bl == sl2; |
2825 | bl = ul == ul2; |
2826 | bl = ul == bl2; |
2827 | bl = bl == ul2; |
2828 | bl = bl == bl2; |
2829 | |
2830 | bl = fd == fd2; |
2831 | } |
2832 | |
2833 | // CHECK-LABEL: define void @test_cmpne() #0 { |
2834 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
2835 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
2836 | // CHECK: [[CMP:%.*]] = icmp ne <16 x i8> [[TMP0]], [[TMP1]] |
2837 | // CHECK: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8> |
2838 | // CHECK: store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8 |
2839 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
2840 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
2841 | // CHECK: [[CMP1:%.*]] = icmp ne <16 x i8> [[TMP2]], [[TMP3]] |
2842 | // CHECK: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8> |
2843 | // CHECK: store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8 |
2844 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
2845 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
2846 | // CHECK: [[CMP3:%.*]] = icmp ne <16 x i8> [[TMP4]], [[TMP5]] |
2847 | // CHECK: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8> |
2848 | // CHECK: store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8 |
2849 | // CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
2850 | // CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
2851 | // CHECK: [[CMP5:%.*]] = icmp ne <16 x i8> [[TMP6]], [[TMP7]] |
2852 | // CHECK: [[SEXT6:%.*]] = sext <16 x i1> [[CMP5]] to <16 x i8> |
2853 | // CHECK: store volatile <16 x i8> [[SEXT6]], <16 x i8>* @bc, align 8 |
2854 | // CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
2855 | // CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
2856 | // CHECK: [[CMP7:%.*]] = icmp ne <16 x i8> [[TMP8]], [[TMP9]] |
2857 | // CHECK: [[SEXT8:%.*]] = sext <16 x i1> [[CMP7]] to <16 x i8> |
2858 | // CHECK: store volatile <16 x i8> [[SEXT8]], <16 x i8>* @bc, align 8 |
2859 | // CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
2860 | // CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
2861 | // CHECK: [[CMP9:%.*]] = icmp ne <16 x i8> [[TMP10]], [[TMP11]] |
2862 | // CHECK: [[SEXT10:%.*]] = sext <16 x i1> [[CMP9]] to <16 x i8> |
2863 | // CHECK: store volatile <16 x i8> [[SEXT10]], <16 x i8>* @bc, align 8 |
2864 | // CHECK: [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
2865 | // CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
2866 | // CHECK: [[CMP11:%.*]] = icmp ne <16 x i8> [[TMP12]], [[TMP13]] |
2867 | // CHECK: [[SEXT12:%.*]] = sext <16 x i1> [[CMP11]] to <16 x i8> |
2868 | // CHECK: store volatile <16 x i8> [[SEXT12]], <16 x i8>* @bc, align 8 |
2869 | // CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
2870 | // CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
2871 | // CHECK: [[CMP13:%.*]] = icmp ne <8 x i16> [[TMP14]], [[TMP15]] |
2872 | // CHECK: [[SEXT14:%.*]] = sext <8 x i1> [[CMP13]] to <8 x i16> |
2873 | // CHECK: store volatile <8 x i16> [[SEXT14]], <8 x i16>* @bs, align 8 |
2874 | // CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
2875 | // CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
2876 | // CHECK: [[CMP15:%.*]] = icmp ne <8 x i16> [[TMP16]], [[TMP17]] |
2877 | // CHECK: [[SEXT16:%.*]] = sext <8 x i1> [[CMP15]] to <8 x i16> |
2878 | // CHECK: store volatile <8 x i16> [[SEXT16]], <8 x i16>* @bs, align 8 |
2879 | // CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
2880 | // CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
2881 | // CHECK: [[CMP17:%.*]] = icmp ne <8 x i16> [[TMP18]], [[TMP19]] |
2882 | // CHECK: [[SEXT18:%.*]] = sext <8 x i1> [[CMP17]] to <8 x i16> |
2883 | // CHECK: store volatile <8 x i16> [[SEXT18]], <8 x i16>* @bs, align 8 |
2884 | // CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
2885 | // CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
2886 | // CHECK: [[CMP19:%.*]] = icmp ne <8 x i16> [[TMP20]], [[TMP21]] |
2887 | // CHECK: [[SEXT20:%.*]] = sext <8 x i1> [[CMP19]] to <8 x i16> |
2888 | // CHECK: store volatile <8 x i16> [[SEXT20]], <8 x i16>* @bs, align 8 |
2889 | // CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
2890 | // CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
2891 | // CHECK: [[CMP21:%.*]] = icmp ne <8 x i16> [[TMP22]], [[TMP23]] |
2892 | // CHECK: [[SEXT22:%.*]] = sext <8 x i1> [[CMP21]] to <8 x i16> |
2893 | // CHECK: store volatile <8 x i16> [[SEXT22]], <8 x i16>* @bs, align 8 |
2894 | // CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
2895 | // CHECK: [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
2896 | // CHECK: [[CMP23:%.*]] = icmp ne <8 x i16> [[TMP24]], [[TMP25]] |
2897 | // CHECK: [[SEXT24:%.*]] = sext <8 x i1> [[CMP23]] to <8 x i16> |
2898 | // CHECK: store volatile <8 x i16> [[SEXT24]], <8 x i16>* @bs, align 8 |
2899 | // CHECK: [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
2900 | // CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
2901 | // CHECK: [[CMP25:%.*]] = icmp ne <8 x i16> [[TMP26]], [[TMP27]] |
2902 | // CHECK: [[SEXT26:%.*]] = sext <8 x i1> [[CMP25]] to <8 x i16> |
2903 | // CHECK: store volatile <8 x i16> [[SEXT26]], <8 x i16>* @bs, align 8 |
2904 | // CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2905 | // CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
2906 | // CHECK: [[CMP27:%.*]] = icmp ne <4 x i32> [[TMP28]], [[TMP29]] |
2907 | // CHECK: [[SEXT28:%.*]] = sext <4 x i1> [[CMP27]] to <4 x i32> |
2908 | // CHECK: store volatile <4 x i32> [[SEXT28]], <4 x i32>* @bi, align 8 |
2909 | // CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
2910 | // CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
2911 | // CHECK: [[CMP29:%.*]] = icmp ne <4 x i32> [[TMP30]], [[TMP31]] |
2912 | // CHECK: [[SEXT30:%.*]] = sext <4 x i1> [[CMP29]] to <4 x i32> |
2913 | // CHECK: store volatile <4 x i32> [[SEXT30]], <4 x i32>* @bi, align 8 |
2914 | // CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
2915 | // CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
2916 | // CHECK: [[CMP31:%.*]] = icmp ne <4 x i32> [[TMP32]], [[TMP33]] |
2917 | // CHECK: [[SEXT32:%.*]] = sext <4 x i1> [[CMP31]] to <4 x i32> |
2918 | // CHECK: store volatile <4 x i32> [[SEXT32]], <4 x i32>* @bi, align 8 |
2919 | // CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2920 | // CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
2921 | // CHECK: [[CMP33:%.*]] = icmp ne <4 x i32> [[TMP34]], [[TMP35]] |
2922 | // CHECK: [[SEXT34:%.*]] = sext <4 x i1> [[CMP33]] to <4 x i32> |
2923 | // CHECK: store volatile <4 x i32> [[SEXT34]], <4 x i32>* @bi, align 8 |
2924 | // CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
2925 | // CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
2926 | // CHECK: [[CMP35:%.*]] = icmp ne <4 x i32> [[TMP36]], [[TMP37]] |
2927 | // CHECK: [[SEXT36:%.*]] = sext <4 x i1> [[CMP35]] to <4 x i32> |
2928 | // CHECK: store volatile <4 x i32> [[SEXT36]], <4 x i32>* @bi, align 8 |
2929 | // CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
2930 | // CHECK: [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
2931 | // CHECK: [[CMP37:%.*]] = icmp ne <4 x i32> [[TMP38]], [[TMP39]] |
2932 | // CHECK: [[SEXT38:%.*]] = sext <4 x i1> [[CMP37]] to <4 x i32> |
2933 | // CHECK: store volatile <4 x i32> [[SEXT38]], <4 x i32>* @bi, align 8 |
2934 | // CHECK: [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
2935 | // CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
2936 | // CHECK: [[CMP39:%.*]] = icmp ne <4 x i32> [[TMP40]], [[TMP41]] |
2937 | // CHECK: [[SEXT40:%.*]] = sext <4 x i1> [[CMP39]] to <4 x i32> |
2938 | // CHECK: store volatile <4 x i32> [[SEXT40]], <4 x i32>* @bi, align 8 |
2939 | // CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2940 | // CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
2941 | // CHECK: [[CMP41:%.*]] = icmp ne <2 x i64> [[TMP42]], [[TMP43]] |
2942 | // CHECK: [[SEXT42:%.*]] = sext <2 x i1> [[CMP41]] to <2 x i64> |
2943 | // CHECK: store volatile <2 x i64> [[SEXT42]], <2 x i64>* @bl, align 8 |
2944 | // CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
2945 | // CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
2946 | // CHECK: [[CMP43:%.*]] = icmp ne <2 x i64> [[TMP44]], [[TMP45]] |
2947 | // CHECK: [[SEXT44:%.*]] = sext <2 x i1> [[CMP43]] to <2 x i64> |
2948 | // CHECK: store volatile <2 x i64> [[SEXT44]], <2 x i64>* @bl, align 8 |
2949 | // CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
2950 | // CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
2951 | // CHECK: [[CMP45:%.*]] = icmp ne <2 x i64> [[TMP46]], [[TMP47]] |
2952 | // CHECK: [[SEXT46:%.*]] = sext <2 x i1> [[CMP45]] to <2 x i64> |
2953 | // CHECK: store volatile <2 x i64> [[SEXT46]], <2 x i64>* @bl, align 8 |
2954 | // CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2955 | // CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
2956 | // CHECK: [[CMP47:%.*]] = icmp ne <2 x i64> [[TMP48]], [[TMP49]] |
2957 | // CHECK: [[SEXT48:%.*]] = sext <2 x i1> [[CMP47]] to <2 x i64> |
2958 | // CHECK: store volatile <2 x i64> [[SEXT48]], <2 x i64>* @bl, align 8 |
2959 | // CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
2960 | // CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
2961 | // CHECK: [[CMP49:%.*]] = icmp ne <2 x i64> [[TMP50]], [[TMP51]] |
2962 | // CHECK: [[SEXT50:%.*]] = sext <2 x i1> [[CMP49]] to <2 x i64> |
2963 | // CHECK: store volatile <2 x i64> [[SEXT50]], <2 x i64>* @bl, align 8 |
2964 | // CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
2965 | // CHECK: [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
2966 | // CHECK: [[CMP51:%.*]] = icmp ne <2 x i64> [[TMP52]], [[TMP53]] |
2967 | // CHECK: [[SEXT52:%.*]] = sext <2 x i1> [[CMP51]] to <2 x i64> |
2968 | // CHECK: store volatile <2 x i64> [[SEXT52]], <2 x i64>* @bl, align 8 |
2969 | // CHECK: [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
2970 | // CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
2971 | // CHECK: [[CMP53:%.*]] = icmp ne <2 x i64> [[TMP54]], [[TMP55]] |
2972 | // CHECK: [[SEXT54:%.*]] = sext <2 x i1> [[CMP53]] to <2 x i64> |
2973 | // CHECK: store volatile <2 x i64> [[SEXT54]], <2 x i64>* @bl, align 8 |
2974 | // CHECK: [[TMP56:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8 |
2975 | // CHECK: [[TMP57:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
2976 | // CHECK: [[CMP55:%.*]] = fcmp une <2 x double> [[TMP56]], [[TMP57]] |
2977 | // CHECK: [[SEXT56:%.*]] = sext <2 x i1> [[CMP55]] to <2 x i64> |
2978 | // CHECK: store volatile <2 x i64> [[SEXT56]], <2 x i64>* @bl, align 8 |
2979 | // CHECK: ret void |
2980 | void test_cmpne(void) { |
2981 | |
2982 | bc = sc != sc2; |
2983 | bc = sc != bc2; |
2984 | bc = bc != sc2; |
2985 | bc = uc != uc2; |
2986 | bc = uc != bc2; |
2987 | bc = bc != uc2; |
2988 | bc = bc != bc2; |
2989 | |
2990 | bs = ss != ss2; |
2991 | bs = ss != bs2; |
2992 | bs = bs != ss2; |
2993 | bs = us != us2; |
2994 | bs = us != bs2; |
2995 | bs = bs != us2; |
2996 | bs = bs != bs2; |
2997 | |
2998 | bi = si != si2; |
2999 | bi = si != bi2; |
3000 | bi = bi != si2; |
3001 | bi = ui != ui2; |
3002 | bi = ui != bi2; |
3003 | bi = bi != ui2; |
3004 | bi = bi != bi2; |
3005 | |
3006 | bl = sl != sl2; |
3007 | bl = sl != bl2; |
3008 | bl = bl != sl2; |
3009 | bl = ul != ul2; |
3010 | bl = ul != bl2; |
3011 | bl = bl != ul2; |
3012 | bl = bl != bl2; |
3013 | |
3014 | bl = fd != fd2; |
3015 | } |
3016 | |
3017 | // CHECK-LABEL: define void @test_cmpge() #0 { |
3018 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
3019 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
3020 | // CHECK: [[CMP:%.*]] = icmp sge <16 x i8> [[TMP0]], [[TMP1]] |
3021 | // CHECK: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8> |
3022 | // CHECK: store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8 |
3023 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
3024 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
3025 | // CHECK: [[CMP1:%.*]] = icmp uge <16 x i8> [[TMP2]], [[TMP3]] |
3026 | // CHECK: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8> |
3027 | // CHECK: store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8 |
3028 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
3029 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
3030 | // CHECK: [[CMP3:%.*]] = icmp uge <16 x i8> [[TMP4]], [[TMP5]] |
3031 | // CHECK: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8> |
3032 | // CHECK: store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8 |
3033 | // CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
3034 | // CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
3035 | // CHECK: [[CMP5:%.*]] = icmp sge <8 x i16> [[TMP6]], [[TMP7]] |
3036 | // CHECK: [[SEXT6:%.*]] = sext <8 x i1> [[CMP5]] to <8 x i16> |
3037 | // CHECK: store volatile <8 x i16> [[SEXT6]], <8 x i16>* @bs, align 8 |
3038 | // CHECK: [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
3039 | // CHECK: [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
3040 | // CHECK: [[CMP7:%.*]] = icmp uge <8 x i16> [[TMP8]], [[TMP9]] |
3041 | // CHECK: [[SEXT8:%.*]] = sext <8 x i1> [[CMP7]] to <8 x i16> |
3042 | // CHECK: store volatile <8 x i16> [[SEXT8]], <8 x i16>* @bs, align 8 |
3043 | // CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
3044 | // CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
3045 | // CHECK: [[CMP9:%.*]] = icmp uge <8 x i16> [[TMP10]], [[TMP11]] |
3046 | // CHECK: [[SEXT10:%.*]] = sext <8 x i1> [[CMP9]] to <8 x i16> |
3047 | // CHECK: store volatile <8 x i16> [[SEXT10]], <8 x i16>* @bs, align 8 |
3048 | // CHECK: [[TMP12:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
3049 | // CHECK: [[TMP13:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
3050 | // CHECK: [[CMP11:%.*]] = icmp sge <4 x i32> [[TMP12]], [[TMP13]] |
3051 | // CHECK: [[SEXT12:%.*]] = sext <4 x i1> [[CMP11]] to <4 x i32> |
3052 | // CHECK: store volatile <4 x i32> [[SEXT12]], <4 x i32>* @bi, align 8 |
3053 | // CHECK: [[TMP14:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
3054 | // CHECK: [[TMP15:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
3055 | // CHECK: [[CMP13:%.*]] = icmp uge <4 x i32> [[TMP14]], [[TMP15]] |
3056 | // CHECK: [[SEXT14:%.*]] = sext <4 x i1> [[CMP13]] to <4 x i32> |
3057 | // CHECK: store volatile <4 x i32> [[SEXT14]], <4 x i32>* @bi, align 8 |
3058 | // CHECK: [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
3059 | // CHECK: [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
3060 | // CHECK: [[CMP15:%.*]] = icmp uge <4 x i32> [[TMP16]], [[TMP17]] |
3061 | // CHECK: [[SEXT16:%.*]] = sext <4 x i1> [[CMP15]] to <4 x i32> |
3062 | // CHECK: store volatile <4 x i32> [[SEXT16]], <4 x i32>* @bi, align 8 |
3063 | // CHECK: [[TMP18:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
3064 | // CHECK: [[TMP19:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
3065 | // CHECK: [[CMP17:%.*]] = icmp sge <2 x i64> [[TMP18]], [[TMP19]] |
3066 | // CHECK: [[SEXT18:%.*]] = sext <2 x i1> [[CMP17]] to <2 x i64> |
3067 | // CHECK: store volatile <2 x i64> [[SEXT18]], <2 x i64>* @bl, align 8 |
3068 | // CHECK: [[TMP20:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
3069 | // CHECK: [[TMP21:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
3070 | // CHECK: [[CMP19:%.*]] = icmp uge <2 x i64> [[TMP20]], [[TMP21]] |
3071 | // CHECK: [[SEXT20:%.*]] = sext <2 x i1> [[CMP19]] to <2 x i64> |
3072 | // CHECK: store volatile <2 x i64> [[SEXT20]], <2 x i64>* @bl, align 8 |
3073 | // CHECK: [[TMP22:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
3074 | // CHECK: [[TMP23:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
3075 | // CHECK: [[CMP21:%.*]] = icmp uge <2 x i64> [[TMP22]], [[TMP23]] |
3076 | // CHECK: [[SEXT22:%.*]] = sext <2 x i1> [[CMP21]] to <2 x i64> |
3077 | // CHECK: store volatile <2 x i64> [[SEXT22]], <2 x i64>* @bl, align 8 |
3078 | // CHECK: [[TMP24:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8 |
3079 | // CHECK: [[TMP25:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
3080 | // CHECK: [[CMP23:%.*]] = fcmp oge <2 x double> [[TMP24]], [[TMP25]] |
3081 | // CHECK: [[SEXT24:%.*]] = sext <2 x i1> [[CMP23]] to <2 x i64> |
3082 | // CHECK: store volatile <2 x i64> [[SEXT24]], <2 x i64>* @bl, align 8 |
3083 | // CHECK: ret void |
3084 | void test_cmpge(void) { |
3085 | |
3086 | bc = sc >= sc2; |
3087 | bc = uc >= uc2; |
3088 | bc = bc >= bc2; |
3089 | |
3090 | bs = ss >= ss2; |
3091 | bs = us >= us2; |
3092 | bs = bs >= bs2; |
3093 | |
3094 | bi = si >= si2; |
3095 | bi = ui >= ui2; |
3096 | bi = bi >= bi2; |
3097 | |
3098 | bl = sl >= sl2; |
3099 | bl = ul >= ul2; |
3100 | bl = bl >= bl2; |
3101 | |
3102 | bl = fd >= fd2; |
3103 | } |
3104 | |
3105 | // CHECK-LABEL: define void @test_cmpgt() #0 { |
3106 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
3107 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
3108 | // CHECK: [[CMP:%.*]] = icmp sgt <16 x i8> [[TMP0]], [[TMP1]] |
3109 | // CHECK: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8> |
3110 | // CHECK: store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8 |
3111 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
3112 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
3113 | // CHECK: [[CMP1:%.*]] = icmp ugt <16 x i8> [[TMP2]], [[TMP3]] |
3114 | // CHECK: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8> |
3115 | // CHECK: store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8 |
3116 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
3117 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
3118 | // CHECK: [[CMP3:%.*]] = icmp ugt <16 x i8> [[TMP4]], [[TMP5]] |
3119 | // CHECK: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8> |
3120 | // CHECK: store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8 |
3121 | // CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
3122 | // CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
3123 | // CHECK: [[CMP5:%.*]] = icmp sgt <8 x i16> [[TMP6]], [[TMP7]] |
3124 | // CHECK: [[SEXT6:%.*]] = sext <8 x i1> [[CMP5]] to <8 x i16> |
3125 | // CHECK: store volatile <8 x i16> [[SEXT6]], <8 x i16>* @bs, align 8 |
3126 | // CHECK: [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
3127 | // CHECK: [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
3128 | // CHECK: [[CMP7:%.*]] = icmp ugt <8 x i16> [[TMP8]], [[TMP9]] |
3129 | // CHECK: [[SEXT8:%.*]] = sext <8 x i1> [[CMP7]] to <8 x i16> |
3130 | // CHECK: store volatile <8 x i16> [[SEXT8]], <8 x i16>* @bs, align 8 |
3131 | // CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
3132 | // CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
3133 | // CHECK: [[CMP9:%.*]] = icmp ugt <8 x i16> [[TMP10]], [[TMP11]] |
3134 | // CHECK: [[SEXT10:%.*]] = sext <8 x i1> [[CMP9]] to <8 x i16> |
3135 | // CHECK: store volatile <8 x i16> [[SEXT10]], <8 x i16>* @bs, align 8 |
3136 | // CHECK: [[TMP12:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
3137 | // CHECK: [[TMP13:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
3138 | // CHECK: [[CMP11:%.*]] = icmp sgt <4 x i32> [[TMP12]], [[TMP13]] |
3139 | // CHECK: [[SEXT12:%.*]] = sext <4 x i1> [[CMP11]] to <4 x i32> |
3140 | // CHECK: store volatile <4 x i32> [[SEXT12]], <4 x i32>* @bi, align 8 |
3141 | // CHECK: [[TMP14:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
3142 | // CHECK: [[TMP15:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
3143 | // CHECK: [[CMP13:%.*]] = icmp ugt <4 x i32> [[TMP14]], [[TMP15]] |
3144 | // CHECK: [[SEXT14:%.*]] = sext <4 x i1> [[CMP13]] to <4 x i32> |
3145 | // CHECK: store volatile <4 x i32> [[SEXT14]], <4 x i32>* @bi, align 8 |
3146 | // CHECK: [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
3147 | // CHECK: [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
3148 | // CHECK: [[CMP15:%.*]] = icmp ugt <4 x i32> [[TMP16]], [[TMP17]] |
3149 | // CHECK: [[SEXT16:%.*]] = sext <4 x i1> [[CMP15]] to <4 x i32> |
3150 | // CHECK: store volatile <4 x i32> [[SEXT16]], <4 x i32>* @bi, align 8 |
3151 | // CHECK: [[TMP18:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
3152 | // CHECK: [[TMP19:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
3153 | // CHECK: [[CMP17:%.*]] = icmp sgt <2 x i64> [[TMP18]], [[TMP19]] |
3154 | // CHECK: [[SEXT18:%.*]] = sext <2 x i1> [[CMP17]] to <2 x i64> |
3155 | // CHECK: store volatile <2 x i64> [[SEXT18]], <2 x i64>* @bl, align 8 |
3156 | // CHECK: [[TMP20:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
3157 | // CHECK: [[TMP21:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
3158 | // CHECK: [[CMP19:%.*]] = icmp ugt <2 x i64> [[TMP20]], [[TMP21]] |
3159 | // CHECK: [[SEXT20:%.*]] = sext <2 x i1> [[CMP19]] to <2 x i64> |
3160 | // CHECK: store volatile <2 x i64> [[SEXT20]], <2 x i64>* @bl, align 8 |
3161 | // CHECK: [[TMP22:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
3162 | // CHECK: [[TMP23:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
3163 | // CHECK: [[CMP21:%.*]] = icmp ugt <2 x i64> [[TMP22]], [[TMP23]] |
3164 | // CHECK: [[SEXT22:%.*]] = sext <2 x i1> [[CMP21]] to <2 x i64> |
3165 | // CHECK: store volatile <2 x i64> [[SEXT22]], <2 x i64>* @bl, align 8 |
3166 | // CHECK: [[TMP24:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8 |
3167 | // CHECK: [[TMP25:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
3168 | // CHECK: [[CMP23:%.*]] = fcmp ogt <2 x double> [[TMP24]], [[TMP25]] |
3169 | // CHECK: [[SEXT24:%.*]] = sext <2 x i1> [[CMP23]] to <2 x i64> |
3170 | // CHECK: store volatile <2 x i64> [[SEXT24]], <2 x i64>* @bl, align 8 |
3171 | // CHECK: ret void |
3172 | void test_cmpgt(void) { |
3173 | |
3174 | bc = sc > sc2; |
3175 | bc = uc > uc2; |
3176 | bc = bc > bc2; |
3177 | |
3178 | bs = ss > ss2; |
3179 | bs = us > us2; |
3180 | bs = bs > bs2; |
3181 | |
3182 | bi = si > si2; |
3183 | bi = ui > ui2; |
3184 | bi = bi > bi2; |
3185 | |
3186 | bl = sl > sl2; |
3187 | bl = ul > ul2; |
3188 | bl = bl > bl2; |
3189 | |
3190 | bl = fd > fd2; |
3191 | } |
3192 | |
3193 | // CHECK-LABEL: define void @test_cmple() #0 { |
3194 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
3195 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
3196 | // CHECK: [[CMP:%.*]] = icmp sle <16 x i8> [[TMP0]], [[TMP1]] |
3197 | // CHECK: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8> |
3198 | // CHECK: store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8 |
3199 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
3200 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
3201 | // CHECK: [[CMP1:%.*]] = icmp ule <16 x i8> [[TMP2]], [[TMP3]] |
3202 | // CHECK: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8> |
3203 | // CHECK: store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8 |
3204 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
3205 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
3206 | // CHECK: [[CMP3:%.*]] = icmp ule <16 x i8> [[TMP4]], [[TMP5]] |
3207 | // CHECK: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8> |
3208 | // CHECK: store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8 |
3209 | // CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
3210 | // CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
3211 | // CHECK: [[CMP5:%.*]] = icmp sle <8 x i16> [[TMP6]], [[TMP7]] |
3212 | // CHECK: [[SEXT6:%.*]] = sext <8 x i1> [[CMP5]] to <8 x i16> |
3213 | // CHECK: store volatile <8 x i16> [[SEXT6]], <8 x i16>* @bs, align 8 |
3214 | // CHECK: [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
3215 | // CHECK: [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
3216 | // CHECK: [[CMP7:%.*]] = icmp ule <8 x i16> [[TMP8]], [[TMP9]] |
3217 | // CHECK: [[SEXT8:%.*]] = sext <8 x i1> [[CMP7]] to <8 x i16> |
3218 | // CHECK: store volatile <8 x i16> [[SEXT8]], <8 x i16>* @bs, align 8 |
3219 | // CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
3220 | // CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
3221 | // CHECK: [[CMP9:%.*]] = icmp ule <8 x i16> [[TMP10]], [[TMP11]] |
3222 | // CHECK: [[SEXT10:%.*]] = sext <8 x i1> [[CMP9]] to <8 x i16> |
3223 | // CHECK: store volatile <8 x i16> [[SEXT10]], <8 x i16>* @bs, align 8 |
3224 | // CHECK: [[TMP12:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
3225 | // CHECK: [[TMP13:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
3226 | // CHECK: [[CMP11:%.*]] = icmp sle <4 x i32> [[TMP12]], [[TMP13]] |
3227 | // CHECK: [[SEXT12:%.*]] = sext <4 x i1> [[CMP11]] to <4 x i32> |
3228 | // CHECK: store volatile <4 x i32> [[SEXT12]], <4 x i32>* @bi, align 8 |
3229 | // CHECK: [[TMP14:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
3230 | // CHECK: [[TMP15:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
3231 | // CHECK: [[CMP13:%.*]] = icmp ule <4 x i32> [[TMP14]], [[TMP15]] |
3232 | // CHECK: [[SEXT14:%.*]] = sext <4 x i1> [[CMP13]] to <4 x i32> |
3233 | // CHECK: store volatile <4 x i32> [[SEXT14]], <4 x i32>* @bi, align 8 |
3234 | // CHECK: [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
3235 | // CHECK: [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
3236 | // CHECK: [[CMP15:%.*]] = icmp ule <4 x i32> [[TMP16]], [[TMP17]] |
3237 | // CHECK: [[SEXT16:%.*]] = sext <4 x i1> [[CMP15]] to <4 x i32> |
3238 | // CHECK: store volatile <4 x i32> [[SEXT16]], <4 x i32>* @bi, align 8 |
3239 | // CHECK: [[TMP18:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
3240 | // CHECK: [[TMP19:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
3241 | // CHECK: [[CMP17:%.*]] = icmp sle <2 x i64> [[TMP18]], [[TMP19]] |
3242 | // CHECK: [[SEXT18:%.*]] = sext <2 x i1> [[CMP17]] to <2 x i64> |
3243 | // CHECK: store volatile <2 x i64> [[SEXT18]], <2 x i64>* @bl, align 8 |
3244 | // CHECK: [[TMP20:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
3245 | // CHECK: [[TMP21:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
3246 | // CHECK: [[CMP19:%.*]] = icmp ule <2 x i64> [[TMP20]], [[TMP21]] |
3247 | // CHECK: [[SEXT20:%.*]] = sext <2 x i1> [[CMP19]] to <2 x i64> |
3248 | // CHECK: store volatile <2 x i64> [[SEXT20]], <2 x i64>* @bl, align 8 |
3249 | // CHECK: [[TMP22:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
3250 | // CHECK: [[TMP23:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
3251 | // CHECK: [[CMP21:%.*]] = icmp ule <2 x i64> [[TMP22]], [[TMP23]] |
3252 | // CHECK: [[SEXT22:%.*]] = sext <2 x i1> [[CMP21]] to <2 x i64> |
3253 | // CHECK: store volatile <2 x i64> [[SEXT22]], <2 x i64>* @bl, align 8 |
3254 | // CHECK: [[TMP24:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8 |
3255 | // CHECK: [[TMP25:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
3256 | // CHECK: [[CMP23:%.*]] = fcmp ole <2 x double> [[TMP24]], [[TMP25]] |
3257 | // CHECK: [[SEXT24:%.*]] = sext <2 x i1> [[CMP23]] to <2 x i64> |
3258 | // CHECK: store volatile <2 x i64> [[SEXT24]], <2 x i64>* @bl, align 8 |
3259 | // CHECK: ret void |
3260 | void test_cmple(void) { |
3261 | |
3262 | bc = sc <= sc2; |
3263 | bc = uc <= uc2; |
3264 | bc = bc <= bc2; |
3265 | |
3266 | bs = ss <= ss2; |
3267 | bs = us <= us2; |
3268 | bs = bs <= bs2; |
3269 | |
3270 | bi = si <= si2; |
3271 | bi = ui <= ui2; |
3272 | bi = bi <= bi2; |
3273 | |
3274 | bl = sl <= sl2; |
3275 | bl = ul <= ul2; |
3276 | bl = bl <= bl2; |
3277 | |
3278 | bl = fd <= fd2; |
3279 | } |
3280 | |
3281 | // CHECK-LABEL: define void @test_cmplt() #0 { |
3282 | // CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8 |
3283 | // CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8 |
3284 | // CHECK: [[CMP:%.*]] = icmp slt <16 x i8> [[TMP0]], [[TMP1]] |
3285 | // CHECK: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8> |
3286 | // CHECK: store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8 |
3287 | // CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8 |
3288 | // CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8 |
3289 | // CHECK: [[CMP1:%.*]] = icmp ult <16 x i8> [[TMP2]], [[TMP3]] |
3290 | // CHECK: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8> |
3291 | // CHECK: store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8 |
3292 | // CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8 |
3293 | // CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8 |
3294 | // CHECK: [[CMP3:%.*]] = icmp ult <16 x i8> [[TMP4]], [[TMP5]] |
3295 | // CHECK: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8> |
3296 | // CHECK: store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8 |
3297 | // CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8 |
3298 | // CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8 |
3299 | // CHECK: [[CMP5:%.*]] = icmp slt <8 x i16> [[TMP6]], [[TMP7]] |
3300 | // CHECK: [[SEXT6:%.*]] = sext <8 x i1> [[CMP5]] to <8 x i16> |
3301 | // CHECK: store volatile <8 x i16> [[SEXT6]], <8 x i16>* @bs, align 8 |
3302 | // CHECK: [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8 |
3303 | // CHECK: [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8 |
3304 | // CHECK: [[CMP7:%.*]] = icmp ult <8 x i16> [[TMP8]], [[TMP9]] |
3305 | // CHECK: [[SEXT8:%.*]] = sext <8 x i1> [[CMP7]] to <8 x i16> |
3306 | // CHECK: store volatile <8 x i16> [[SEXT8]], <8 x i16>* @bs, align 8 |
3307 | // CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8 |
3308 | // CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8 |
3309 | // CHECK: [[CMP9:%.*]] = icmp ult <8 x i16> [[TMP10]], [[TMP11]] |
3310 | // CHECK: [[SEXT10:%.*]] = sext <8 x i1> [[CMP9]] to <8 x i16> |
3311 | // CHECK: store volatile <8 x i16> [[SEXT10]], <8 x i16>* @bs, align 8 |
3312 | // CHECK: [[TMP12:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8 |
3313 | // CHECK: [[TMP13:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8 |
3314 | // CHECK: [[CMP11:%.*]] = icmp slt <4 x i32> [[TMP12]], [[TMP13]] |
3315 | // CHECK: [[SEXT12:%.*]] = sext <4 x i1> [[CMP11]] to <4 x i32> |
3316 | // CHECK: store volatile <4 x i32> [[SEXT12]], <4 x i32>* @bi, align 8 |
3317 | // CHECK: [[TMP14:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8 |
3318 | // CHECK: [[TMP15:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8 |
3319 | // CHECK: [[CMP13:%.*]] = icmp ult <4 x i32> [[TMP14]], [[TMP15]] |
3320 | // CHECK: [[SEXT14:%.*]] = sext <4 x i1> [[CMP13]] to <4 x i32> |
3321 | // CHECK: store volatile <4 x i32> [[SEXT14]], <4 x i32>* @bi, align 8 |
3322 | // CHECK: [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8 |
3323 | // CHECK: [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8 |
3324 | // CHECK: [[CMP15:%.*]] = icmp ult <4 x i32> [[TMP16]], [[TMP17]] |
3325 | // CHECK: [[SEXT16:%.*]] = sext <4 x i1> [[CMP15]] to <4 x i32> |
3326 | // CHECK: store volatile <4 x i32> [[SEXT16]], <4 x i32>* @bi, align 8 |
3327 | // CHECK: [[TMP18:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8 |
3328 | // CHECK: [[TMP19:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8 |
3329 | // CHECK: [[CMP17:%.*]] = icmp slt <2 x i64> [[TMP18]], [[TMP19]] |
3330 | // CHECK: [[SEXT18:%.*]] = sext <2 x i1> [[CMP17]] to <2 x i64> |
3331 | // CHECK: store volatile <2 x i64> [[SEXT18]], <2 x i64>* @bl, align 8 |
3332 | // CHECK: [[TMP20:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8 |
3333 | // CHECK: [[TMP21:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8 |
3334 | // CHECK: [[CMP19:%.*]] = icmp ult <2 x i64> [[TMP20]], [[TMP21]] |
3335 | // CHECK: [[SEXT20:%.*]] = sext <2 x i1> [[CMP19]] to <2 x i64> |
3336 | // CHECK: store volatile <2 x i64> [[SEXT20]], <2 x i64>* @bl, align 8 |
3337 | // CHECK: [[TMP22:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8 |
3338 | // CHECK: [[TMP23:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8 |
3339 | // CHECK: [[CMP21:%.*]] = icmp ult <2 x i64> [[TMP22]], [[TMP23]] |
3340 | // CHECK: [[SEXT22:%.*]] = sext <2 x i1> [[CMP21]] to <2 x i64> |
3341 | // CHECK: store volatile <2 x i64> [[SEXT22]], <2 x i64>* @bl, align 8 |
3342 | // CHECK: [[TMP24:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8 |
3343 | // CHECK: [[TMP25:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8 |
3344 | // CHECK: [[CMP23:%.*]] = fcmp olt <2 x double> [[TMP24]], [[TMP25]] |
3345 | // CHECK: [[SEXT24:%.*]] = sext <2 x i1> [[CMP23]] to <2 x i64> |
3346 | // CHECK: store volatile <2 x i64> [[SEXT24]], <2 x i64>* @bl, align 8 |
3347 | // CHECK: ret void |
3348 | void test_cmplt(void) { |
3349 | |
3350 | bc = sc < sc2; |
3351 | bc = uc < uc2; |
3352 | bc = bc < bc2; |
3353 | |
3354 | bs = ss < ss2; |
3355 | bs = us < us2; |
3356 | bs = bs < bs2; |
3357 | |
3358 | bi = si < si2; |
3359 | bi = ui < ui2; |
3360 | bi = bi < bi2; |
3361 | |
3362 | bl = sl < sl2; |
3363 | bl = ul < ul2; |
3364 | bl = bl < bl2; |
3365 | |
3366 | bl = fd < fd2; |
3367 | } |
3368 | |
3369 | |