1 | // RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \ |
2 | // RUN: -triple i686--windows -Oz -emit-llvm %s -o - \ |
3 | // RUN: | FileCheck %s -check-prefixes CHECK,CHECK-I386,CHECK-INTEL |
4 | // RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \ |
5 | // RUN: -triple thumbv7--windows -Oz -emit-llvm %s -o - \ |
6 | // RUN: | FileCheck %s --check-prefixes CHECK,CHECK-ARM,CHECK-ARM-ARM64,CHECK-ARM-X64 |
7 | // RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \ |
8 | // RUN: -triple x86_64--windows -Oz -emit-llvm -target-feature +cx16 %s -o - \ |
9 | // RUN: | FileCheck %s --check-prefixes CHECK,CHECK-X64,CHECK-ARM-X64,CHECK-INTEL |
10 | // RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \ |
11 | // RUN: -triple aarch64-windows -Oz -emit-llvm %s -o - \ |
12 | // RUN: | FileCheck %s --check-prefixes CHECK-ARM-ARM64,CHECK-ARM-X64,CHECK-ARM64 |
13 | |
14 | // intrin.h needs size_t, but -ffreestanding prevents us from getting it from |
15 | // stddef.h. Work around it with this typedef. |
16 | typedef __SIZE_TYPE__ size_t; |
17 | |
18 | #include <intrin.h> |
19 | |
20 | #if defined(__i386__) || defined(__x86_64__) |
21 | void test__stosb(unsigned char *Dest, unsigned char Data, size_t Count) { |
22 | return __stosb(Dest, Data, Count); |
23 | } |
24 | |
25 | // CHECK-I386: define{{.*}}void @test__stosb |
26 | // CHECK-I386: tail call void @llvm.memset.p0i8.i32(i8* align 1 %Dest, i8 %Data, i32 %Count, i1 true) |
27 | // CHECK-I386: ret void |
28 | // CHECK-I386: } |
29 | |
30 | // CHECK-X64: define{{.*}}void @test__stosb |
31 | // CHECK-X64: tail call void @llvm.memset.p0i8.i64(i8* align 1 %Dest, i8 %Data, i64 %Count, i1 true) |
32 | // CHECK-X64: ret void |
33 | // CHECK-X64: } |
34 | |
35 | void test__movsb(unsigned char *Dest, unsigned char *Src, size_t Count) { |
36 | return __movsb(Dest, Src, Count); |
37 | } |
38 | // CHECK-I386-LABEL: define{{.*}} void @test__movsb |
39 | // CHECK-I386: call { i8*, i8*, i32 } asm sideeffect "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i8* %Dest, i8* %Src, i32 %Count) |
40 | // CHECK-I386: ret void |
41 | // CHECK-I386: } |
42 | |
43 | // CHECK-X64-LABEL: define{{.*}} void @test__movsb |
44 | // CHECK-X64: call { i8*, i8*, i64 } asm sideeffect "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i8* %Dest, i8* %Src, i64 %Count) |
45 | // CHECK-X64: ret void |
46 | // CHECK-X64: } |
47 | |
48 | void test__stosw(unsigned short *Dest, unsigned short Data, size_t Count) { |
49 | return __stosw(Dest, Data, Count); |
50 | } |
51 | // CHECK-I386-LABEL: define{{.*}} void @test__stosw |
52 | // CHECK-I386: call { i16*, i32 } asm sideeffect "rep stosw", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, i16* %Dest, i32 %Count) |
53 | // CHECK-I386: ret void |
54 | // CHECK-I386: } |
55 | |
56 | // CHECK-X64-LABEL: define{{.*}} void @test__stosw |
57 | // CHECK-X64: call { i16*, i64 } asm sideeffect "rep stosw", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, i16* %Dest, i64 %Count) |
58 | // CHECK-X64: ret void |
59 | // CHECK-X64: } |
60 | |
61 | void test__movsw(unsigned short *Dest, unsigned short *Src, size_t Count) { |
62 | return __movsw(Dest, Src, Count); |
63 | } |
64 | // CHECK-I386-LABEL: define{{.*}} void @test__movsw |
65 | // CHECK-I386: call { i16*, i16*, i32 } asm sideeffect "rep movsw", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i16* %Dest, i16* %Src, i32 %Count) |
66 | // CHECK-I386: ret void |
67 | // CHECK-I386: } |
68 | |
69 | // CHECK-X64-LABEL: define{{.*}} void @test__movsw |
70 | // CHECK-X64: call { i16*, i16*, i64 } asm sideeffect "rep movsw", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i16* %Dest, i16* %Src, i64 %Count) |
71 | // CHECK-X64: ret void |
72 | // CHECK-X64: } |
73 | |
74 | void test__stosd(unsigned long *Dest, unsigned long Data, size_t Count) { |
75 | return __stosd(Dest, Data, Count); |
76 | } |
77 | // CHECK-I386-LABEL: define{{.*}} void @test__stosd |
78 | // CHECK-I386: call { i32*, i32 } asm sideeffect "rep stosl", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, i32* %Dest, i32 %Count) |
79 | // CHECK-I386: ret void |
80 | // CHECK-I386: } |
81 | |
82 | // CHECK-X64-LABEL: define{{.*}} void @test__stosd |
83 | // CHECK-X64: call { i32*, i64 } asm sideeffect "rep stosl", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, i32* %Dest, i64 %Count) |
84 | // CHECK-X64: ret void |
85 | // CHECK-X64: } |
86 | |
87 | void test__movsd(unsigned long *Dest, unsigned long *Src, size_t Count) { |
88 | return __movsd(Dest, Src, Count); |
89 | } |
90 | // CHECK-I386-LABEL: define{{.*}} void @test__movsd |
91 | // CHECK-I386: call { i32*, i32*, i32 } asm sideeffect "rep movsl", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Dest, i32* %Src, i32 %Count) |
92 | // CHECK-I386: ret void |
93 | // CHECK-I386: } |
94 | |
95 | // CHECK-X64-LABEL: define{{.*}} void @test__movsd |
96 | // CHECK-X64: call { i32*, i32*, i64 } asm sideeffect "rep movsl", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Dest, i32* %Src, i64 %Count) |
97 | // CHECK-X64: ret void |
98 | // CHECK-X64: } |
99 | |
100 | #ifdef __x86_64__ |
101 | void test__stosq(unsigned __int64 *Dest, unsigned __int64 Data, size_t Count) { |
102 | return __stosq(Dest, Data, Count); |
103 | } |
104 | // CHECK-X64-LABEL: define{{.*}} void @test__stosq |
105 | // CHECK-X64: call { i64*, i64 } asm sideeffect "rep stosq", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i64 %Data, i64* %Dest, i64 %Count) |
106 | // CHECK-X64: ret void |
107 | // CHECK-X64: } |
108 | |
109 | void test__movsq(unsigned __int64 *Dest, unsigned __int64 *Src, size_t Count) { |
110 | return __movsq(Dest, Src, Count); |
111 | } |
112 | // CHECK-X64-LABEL: define{{.*}} void @test__movsq |
113 | // CHECK-X64: call { i64*, i64*, i64 } asm sideeffect "rep movsq", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Dest, i64* %Src, i64 %Count) |
114 | // CHECK-X64: ret void |
115 | // CHECK-X64: } |
116 | #endif |
117 | |
118 | void test__ud2(void) { |
119 | __ud2(); |
120 | } |
121 | // CHECK-INTEL-LABEL: define{{.*}} void @test__ud2() |
122 | // CHECK-INTEL: call void @llvm.trap() |
123 | |
124 | void test__int2c(void) { |
125 | __int2c(); |
126 | } |
127 | // CHECK-INTEL-LABEL: define{{.*}} void @test__int2c() |
128 | // CHECK-INTEL: call void asm sideeffect "int $$0x2c", ""() #[[NORETURN:[0-9]+]] |
129 | |
130 | |
131 | #endif |
132 | |
133 | void *test_ReturnAddress() { |
134 | return _ReturnAddress(); |
135 | } |
136 | // CHECK-LABEL: define{{.*}}i8* @test_ReturnAddress() |
137 | // CHECK: = tail call i8* @llvm.returnaddress(i32 0) |
138 | // CHECK: ret i8* |
139 | |
140 | #if defined(__i386__) || defined(__x86_64__) || defined (__aarch64__) |
141 | void *test_AddressOfReturnAddress() { |
142 | return _AddressOfReturnAddress(); |
143 | } |
144 | // CHECK-INTEL-LABEL: define dso_local i8* @test_AddressOfReturnAddress() |
145 | // CHECK-INTEL: = tail call i8* @llvm.addressofreturnaddress() |
146 | // CHECK-INTEL: ret i8* |
147 | #endif |
148 | |
149 | unsigned char test_BitScanForward(unsigned long *Index, unsigned long Mask) { |
150 | return _BitScanForward(Index, Mask); |
151 | } |
152 | // CHECK: define{{.*}}i8 @test_BitScanForward(i32* {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{ |
153 | // CHECK: [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i32 %Mask, 0 |
154 | // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]] |
155 | // CHECK: [[END_LABEL]]: |
156 | // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ] |
157 | // CHECK: ret i8 [[RESULT]] |
158 | // CHECK: [[ISNOTZERO_LABEL]]: |
159 | // CHECK: [[INDEX:%[0-9]+]] = tail call i32 @llvm.cttz.i32(i32 %Mask, i1 true) |
160 | // CHECK: store i32 [[INDEX]], i32* %Index, align 4 |
161 | // CHECK: br label %[[END_LABEL]] |
162 | |
163 | unsigned char test_BitScanReverse(unsigned long *Index, unsigned long Mask) { |
164 | return _BitScanReverse(Index, Mask); |
165 | } |
166 | // CHECK: define{{.*}}i8 @test_BitScanReverse(i32* {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{ |
167 | // CHECK: [[ISNOTZERO:%[0-9]+]] = icmp eq i32 %Mask, 0 |
168 | // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]] |
169 | // CHECK: [[END_LABEL]]: |
170 | // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ] |
171 | // CHECK: ret i8 [[RESULT]] |
172 | // CHECK: [[ISNOTZERO_LABEL]]: |
173 | // CHECK: [[REVINDEX:%[0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %Mask, i1 true) |
174 | // CHECK: [[INDEX:%[0-9]+]] = xor i32 [[REVINDEX]], 31 |
175 | // CHECK: store i32 [[INDEX]], i32* %Index, align 4 |
176 | // CHECK: br label %[[END_LABEL]] |
177 | |
178 | #if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) |
179 | unsigned char test_BitScanForward64(unsigned long *Index, unsigned __int64 Mask) { |
180 | return _BitScanForward64(Index, Mask); |
181 | } |
182 | // CHECK-ARM-X64: define{{.*}}i8 @test_BitScanForward64(i32* {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{ |
183 | // CHECK-ARM-X64: [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i64 %Mask, 0 |
184 | // CHECK-ARM-X64: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]] |
185 | // CHECK-ARM-X64: [[END_LABEL]]: |
186 | // CHECK-ARM-X64: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ] |
187 | // CHECK-ARM-X64: ret i8 [[RESULT]] |
188 | // CHECK-ARM-X64: [[ISNOTZERO_LABEL]]: |
189 | // CHECK-ARM-X64: [[INDEX:%[0-9]+]] = tail call i64 @llvm.cttz.i64(i64 %Mask, i1 true) |
190 | // CHECK-ARM-X64: [[TRUNC_INDEX:%[0-9]+]] = trunc i64 [[INDEX]] to i32 |
191 | // CHECK-ARM-X64: store i32 [[TRUNC_INDEX]], i32* %Index, align 4 |
192 | // CHECK-ARM-X64: br label %[[END_LABEL]] |
193 | |
194 | unsigned char test_BitScanReverse64(unsigned long *Index, unsigned __int64 Mask) { |
195 | return _BitScanReverse64(Index, Mask); |
196 | } |
197 | // CHECK-ARM-X64: define{{.*}}i8 @test_BitScanReverse64(i32* {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{ |
198 | // CHECK-ARM-X64: [[ISNOTZERO:%[0-9]+]] = icmp eq i64 %Mask, 0 |
199 | // CHECK-ARM-X64: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]] |
200 | // CHECK-ARM-X64: [[END_LABEL]]: |
201 | // CHECK-ARM-X64: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ] |
202 | // CHECK-ARM-X64: ret i8 [[RESULT]] |
203 | // CHECK-ARM-X64: [[ISNOTZERO_LABEL]]: |
204 | // CHECK-ARM-X64: [[REVINDEX:%[0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %Mask, i1 true) |
205 | // CHECK-ARM-X64: [[TRUNC_REVINDEX:%[0-9]+]] = trunc i64 [[REVINDEX]] to i32 |
206 | // CHECK-ARM-X64: [[INDEX:%[0-9]+]] = xor i32 [[TRUNC_REVINDEX]], 63 |
207 | // CHECK-ARM-X64: store i32 [[INDEX]], i32* %Index, align 4 |
208 | // CHECK-ARM-X64: br label %[[END_LABEL]] |
209 | #endif |
210 | |
211 | void *test_InterlockedExchangePointer(void * volatile *Target, void *Value) { |
212 | return _InterlockedExchangePointer(Target, Value); |
213 | } |
214 | |
215 | // CHECK: define{{.*}}i8* @test_InterlockedExchangePointer(i8** {{[a-z_ ]*}}%Target, i8* {{[a-z_ ]*}}%Value){{.*}}{ |
216 | // CHECK: %[[TARGET:[0-9]+]] = bitcast i8** %Target to [[iPTR:i[0-9]+]]* |
217 | // CHECK: %[[VALUE:[0-9]+]] = ptrtoint i8* %Value to [[iPTR]] |
218 | // CHECK: %[[EXCHANGE:[0-9]+]] = atomicrmw xchg [[iPTR]]* %[[TARGET]], [[iPTR]] %[[VALUE]] seq_cst |
219 | // CHECK: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXCHANGE]] to i8* |
220 | // CHECK: ret i8* %[[RESULT]] |
221 | // CHECK: } |
222 | |
223 | void *test_InterlockedCompareExchangePointer(void * volatile *Destination, |
224 | void *Exchange, void *Comparand) { |
225 | return _InterlockedCompareExchangePointer(Destination, Exchange, Comparand); |
226 | } |
227 | |
228 | // CHECK: define{{.*}}i8* @test_InterlockedCompareExchangePointer(i8** {{[a-z_ ]*}}%Destination, i8* {{[a-z_ ]*}}%Exchange, i8* {{[a-z_ ]*}}%Comparand){{.*}}{ |
229 | // CHECK: %[[DEST:[0-9]+]] = bitcast i8** %Destination to [[iPTR]]* |
230 | // CHECK: %[[EXCHANGE:[0-9]+]] = ptrtoint i8* %Exchange to [[iPTR]] |
231 | // CHECK: %[[COMPARAND:[0-9]+]] = ptrtoint i8* %Comparand to [[iPTR]] |
232 | // CHECK: %[[XCHG:[0-9]+]] = cmpxchg volatile [[iPTR]]* %[[DEST:[0-9]+]], [[iPTR]] %[[COMPARAND:[0-9]+]], [[iPTR]] %[[EXCHANGE:[0-9]+]] seq_cst seq_cst |
233 | // CHECK: %[[EXTRACT:[0-9]+]] = extractvalue { [[iPTR]], i1 } %[[XCHG]], 0 |
234 | // CHECK: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXTRACT]] to i8* |
235 | // CHECK: ret i8* %[[RESULT:[0-9]+]] |
236 | // CHECK: } |
237 | |
238 | void *test_InterlockedCompareExchangePointer_nf(void * volatile *Destination, |
239 | void *Exchange, void *Comparand) { |
240 | return _InterlockedCompareExchangePointer_nf(Destination, Exchange, Comparand); |
241 | } |
242 | |
243 | // CHECK: define{{.*}}i8* @test_InterlockedCompareExchangePointer_nf(i8** {{[a-z_ ]*}}%Destination, i8* {{[a-z_ ]*}}%Exchange, i8* {{[a-z_ ]*}}%Comparand){{.*}}{ |
244 | // CHECK: %[[DEST:[0-9]+]] = bitcast i8** %Destination to [[iPTR]]* |
245 | // CHECK: %[[EXCHANGE:[0-9]+]] = ptrtoint i8* %Exchange to [[iPTR]] |
246 | // CHECK: %[[COMPARAND:[0-9]+]] = ptrtoint i8* %Comparand to [[iPTR]] |
247 | // CHECK: %[[XCHG:[0-9]+]] = cmpxchg volatile [[iPTR]]* %[[DEST:[0-9]+]], [[iPTR]] %[[COMPARAND:[0-9]+]], [[iPTR]] %[[EXCHANGE:[0-9]+]] monotonic monotonic |
248 | // CHECK: %[[EXTRACT:[0-9]+]] = extractvalue { [[iPTR]], i1 } %[[XCHG]], 0 |
249 | // CHECK: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXTRACT]] to i8* |
250 | // CHECK: ret i8* %[[RESULT:[0-9]+]] |
251 | // CHECK: } |
252 | |
253 | char test_InterlockedExchange8(char volatile *value, char mask) { |
254 | return _InterlockedExchange8(value, mask); |
255 | } |
256 | // CHECK: define{{.*}}i8 @test_InterlockedExchange8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
257 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask seq_cst |
258 | // CHECK: ret i8 [[RESULT:%[0-9]+]] |
259 | // CHECK: } |
260 | |
261 | short test_InterlockedExchange16(short volatile *value, short mask) { |
262 | return _InterlockedExchange16(value, mask); |
263 | } |
264 | // CHECK: define{{.*}}i16 @test_InterlockedExchange16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
265 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask seq_cst |
266 | // CHECK: ret i16 [[RESULT:%[0-9]+]] |
267 | // CHECK: } |
268 | |
269 | long test_InterlockedExchange(long volatile *value, long mask) { |
270 | return _InterlockedExchange(value, mask); |
271 | } |
272 | // CHECK: define{{.*}}i32 @test_InterlockedExchange(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
273 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask seq_cst |
274 | // CHECK: ret i32 [[RESULT:%[0-9]+]] |
275 | // CHECK: } |
276 | |
277 | char test_InterlockedExchangeAdd8(char volatile *value, char mask) { |
278 | return _InterlockedExchangeAdd8(value, mask); |
279 | } |
280 | // CHECK: define{{.*}}i8 @test_InterlockedExchangeAdd8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
281 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask seq_cst |
282 | // CHECK: ret i8 [[RESULT:%[0-9]+]] |
283 | // CHECK: } |
284 | |
285 | short test_InterlockedExchangeAdd16(short volatile *value, short mask) { |
286 | return _InterlockedExchangeAdd16(value, mask); |
287 | } |
288 | // CHECK: define{{.*}}i16 @test_InterlockedExchangeAdd16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
289 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask seq_cst |
290 | // CHECK: ret i16 [[RESULT:%[0-9]+]] |
291 | // CHECK: } |
292 | |
293 | long test_InterlockedExchangeAdd(long volatile *value, long mask) { |
294 | return _InterlockedExchangeAdd(value, mask); |
295 | } |
296 | // CHECK: define{{.*}}i32 @test_InterlockedExchangeAdd(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
297 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask seq_cst |
298 | // CHECK: ret i32 [[RESULT:%[0-9]+]] |
299 | // CHECK: } |
300 | |
301 | char test_InterlockedExchangeSub8(char volatile *value, char mask) { |
302 | return _InterlockedExchangeSub8(value, mask); |
303 | } |
304 | // CHECK: define{{.*}}i8 @test_InterlockedExchangeSub8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
305 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub i8* %value, i8 %mask seq_cst |
306 | // CHECK: ret i8 [[RESULT:%[0-9]+]] |
307 | // CHECK: } |
308 | |
309 | short test_InterlockedExchangeSub16(short volatile *value, short mask) { |
310 | return _InterlockedExchangeSub16(value, mask); |
311 | } |
312 | // CHECK: define{{.*}}i16 @test_InterlockedExchangeSub16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
313 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub i16* %value, i16 %mask seq_cst |
314 | // CHECK: ret i16 [[RESULT:%[0-9]+]] |
315 | // CHECK: } |
316 | |
317 | long test_InterlockedExchangeSub(long volatile *value, long mask) { |
318 | return _InterlockedExchangeSub(value, mask); |
319 | } |
320 | // CHECK: define{{.*}}i32 @test_InterlockedExchangeSub(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
321 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub i32* %value, i32 %mask seq_cst |
322 | // CHECK: ret i32 [[RESULT:%[0-9]+]] |
323 | // CHECK: } |
324 | |
325 | char test_InterlockedOr8(char volatile *value, char mask) { |
326 | return _InterlockedOr8(value, mask); |
327 | } |
328 | // CHECK: define{{.*}}i8 @test_InterlockedOr8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
329 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask seq_cst |
330 | // CHECK: ret i8 [[RESULT:%[0-9]+]] |
331 | // CHECK: } |
332 | |
333 | short test_InterlockedOr16(short volatile *value, short mask) { |
334 | return _InterlockedOr16(value, mask); |
335 | } |
336 | // CHECK: define{{.*}}i16 @test_InterlockedOr16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
337 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask seq_cst |
338 | // CHECK: ret i16 [[RESULT:%[0-9]+]] |
339 | // CHECK: } |
340 | |
341 | long test_InterlockedOr(long volatile *value, long mask) { |
342 | return _InterlockedOr(value, mask); |
343 | } |
344 | // CHECK: define{{.*}}i32 @test_InterlockedOr(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
345 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask seq_cst |
346 | // CHECK: ret i32 [[RESULT:%[0-9]+]] |
347 | // CHECK: } |
348 | |
349 | char test_InterlockedXor8(char volatile *value, char mask) { |
350 | return _InterlockedXor8(value, mask); |
351 | } |
352 | // CHECK: define{{.*}}i8 @test_InterlockedXor8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
353 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask seq_cst |
354 | // CHECK: ret i8 [[RESULT:%[0-9]+]] |
355 | // CHECK: } |
356 | |
357 | short test_InterlockedXor16(short volatile *value, short mask) { |
358 | return _InterlockedXor16(value, mask); |
359 | } |
360 | // CHECK: define{{.*}}i16 @test_InterlockedXor16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
361 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask seq_cst |
362 | // CHECK: ret i16 [[RESULT:%[0-9]+]] |
363 | // CHECK: } |
364 | |
365 | long test_InterlockedXor(long volatile *value, long mask) { |
366 | return _InterlockedXor(value, mask); |
367 | } |
368 | // CHECK: define{{.*}}i32 @test_InterlockedXor(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
369 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask seq_cst |
370 | // CHECK: ret i32 [[RESULT:%[0-9]+]] |
371 | // CHECK: } |
372 | |
373 | char test_InterlockedAnd8(char volatile *value, char mask) { |
374 | return _InterlockedAnd8(value, mask); |
375 | } |
376 | // CHECK: define{{.*}}i8 @test_InterlockedAnd8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
377 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask seq_cst |
378 | // CHECK: ret i8 [[RESULT:%[0-9]+]] |
379 | // CHECK: } |
380 | |
381 | short test_InterlockedAnd16(short volatile *value, short mask) { |
382 | return _InterlockedAnd16(value, mask); |
383 | } |
384 | // CHECK: define{{.*}}i16 @test_InterlockedAnd16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
385 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask seq_cst |
386 | // CHECK: ret i16 [[RESULT:%[0-9]+]] |
387 | // CHECK: } |
388 | |
389 | long test_InterlockedAnd(long volatile *value, long mask) { |
390 | return _InterlockedAnd(value, mask); |
391 | } |
392 | // CHECK: define{{.*}}i32 @test_InterlockedAnd(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
393 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask seq_cst |
394 | // CHECK: ret i32 [[RESULT:%[0-9]+]] |
395 | // CHECK: } |
396 | |
397 | char test_InterlockedCompareExchange8(char volatile *Destination, char Exchange, char Comperand) { |
398 | return _InterlockedCompareExchange8(Destination, Exchange, Comperand); |
399 | } |
400 | // CHECK: define{{.*}}i8 @test_InterlockedCompareExchange8(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{ |
401 | // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange seq_cst seq_cst |
402 | // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0 |
403 | // CHECK: ret i8 [[RESULT]] |
404 | // CHECK: } |
405 | |
406 | short test_InterlockedCompareExchange16(short volatile *Destination, short Exchange, short Comperand) { |
407 | return _InterlockedCompareExchange16(Destination, Exchange, Comperand); |
408 | } |
409 | // CHECK: define{{.*}}i16 @test_InterlockedCompareExchange16(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{ |
410 | // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange seq_cst seq_cst |
411 | // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0 |
412 | // CHECK: ret i16 [[RESULT]] |
413 | // CHECK: } |
414 | |
415 | long test_InterlockedCompareExchange(long volatile *Destination, long Exchange, long Comperand) { |
416 | return _InterlockedCompareExchange(Destination, Exchange, Comperand); |
417 | } |
418 | // CHECK: define{{.*}}i32 @test_InterlockedCompareExchange(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{ |
419 | // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange seq_cst seq_cst |
420 | // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0 |
421 | // CHECK: ret i32 [[RESULT]] |
422 | // CHECK: } |
423 | |
424 | __int64 test_InterlockedCompareExchange64(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) { |
425 | return _InterlockedCompareExchange64(Destination, Exchange, Comperand); |
426 | } |
427 | // CHECK: define{{.*}}i64 @test_InterlockedCompareExchange64(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{ |
428 | // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange seq_cst seq_cst |
429 | // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0 |
430 | // CHECK: ret i64 [[RESULT]] |
431 | // CHECK: } |
432 | |
433 | #if defined(__x86_64__) |
434 | unsigned char test_InterlockedCompareExchange128( |
435 | __int64 volatile *Destination, __int64 ExchangeHigh, |
436 | __int64 ExchangeLow, __int64 *ComparandResult) { |
437 | return _InterlockedCompareExchange128(++Destination, ++ExchangeHigh, |
438 | ++ExchangeLow, ++ComparandResult); |
439 | } |
440 | // CHECK-X64: define{{.*}}i8 @test_InterlockedCompareExchange128(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%ExchangeHigh, i64{{[a-z_ ]*}}%ExchangeLow, i64*{{[a-z_ ]*}}%ComparandResult){{.*}}{ |
441 | // CHECK-X64: %incdec.ptr = getelementptr inbounds i64, i64* %Destination, i64 1 |
442 | // CHECK-X64: %inc = add nsw i64 %ExchangeHigh, 1 |
443 | // CHECK-X64: %inc1 = add nsw i64 %ExchangeLow, 1 |
444 | // CHECK-X64: %incdec.ptr2 = getelementptr inbounds i64, i64* %ComparandResult, i64 1 |
445 | // CHECK-X64: [[DST:%[0-9]+]] = bitcast i64* %incdec.ptr to i128* |
446 | // CHECK-X64: [[EH:%[0-9]+]] = zext i64 %inc to i128 |
447 | // CHECK-X64: [[EL:%[0-9]+]] = zext i64 %inc1 to i128 |
448 | // CHECK-X64: [[CNR:%[0-9]+]] = bitcast i64* %incdec.ptr2 to i128* |
449 | // CHECK-X64: [[EHS:%[0-9]+]] = shl nuw i128 [[EH]], 64 |
450 | // CHECK-X64: [[EXP:%[0-9]+]] = or i128 [[EHS]], [[EL]] |
451 | // CHECK-X64: [[ORG:%[0-9]+]] = load i128, i128* [[CNR]], align 16 |
452 | // CHECK-X64: [[RES:%[0-9]+]] = cmpxchg volatile i128* [[DST]], i128 [[ORG]], i128 [[EXP]] seq_cst seq_cst |
453 | // CHECK-X64: [[OLD:%[0-9]+]] = extractvalue { i128, i1 } [[RES]], 0 |
454 | // CHECK-X64: store i128 [[OLD]], i128* [[CNR]], align 16 |
455 | // CHECK-X64: [[SUC1:%[0-9]+]] = extractvalue { i128, i1 } [[RES]], 1 |
456 | // CHECK-X64: [[SUC8:%[0-9]+]] = zext i1 [[SUC1]] to i8 |
457 | // CHECK-X64: ret i8 [[SUC8]] |
458 | // CHECK-X64: } |
459 | #endif |
460 | |
461 | short test_InterlockedIncrement16(short volatile *Addend) { |
462 | return _InterlockedIncrement16(Addend); |
463 | } |
464 | // CHECK: define{{.*}}i16 @test_InterlockedIncrement16(i16*{{[a-z_ ]*}}%Addend){{.*}}{ |
465 | // CHECK: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 seq_cst |
466 | // CHECK: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1 |
467 | // CHECK: ret i16 [[RESULT]] |
468 | // CHECK: } |
469 | |
470 | long test_InterlockedIncrement(long volatile *Addend) { |
471 | return _InterlockedIncrement(Addend); |
472 | } |
473 | // CHECK: define{{.*}}i32 @test_InterlockedIncrement(i32*{{[a-z_ ]*}}%Addend){{.*}}{ |
474 | // CHECK: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 seq_cst |
475 | // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1 |
476 | // CHECK: ret i32 [[RESULT]] |
477 | // CHECK: } |
478 | |
479 | short test_InterlockedDecrement16(short volatile *Addend) { |
480 | return _InterlockedDecrement16(Addend); |
481 | } |
482 | // CHECK: define{{.*}}i16 @test_InterlockedDecrement16(i16*{{[a-z_ ]*}}%Addend){{.*}}{ |
483 | // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 seq_cst |
484 | // CHECK: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1 |
485 | // CHECK: ret i16 [[RESULT]] |
486 | // CHECK: } |
487 | |
488 | long test_InterlockedDecrement(long volatile *Addend) { |
489 | return _InterlockedDecrement(Addend); |
490 | } |
491 | // CHECK: define{{.*}}i32 @test_InterlockedDecrement(i32*{{[a-z_ ]*}}%Addend){{.*}}{ |
492 | // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 seq_cst |
493 | // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1 |
494 | // CHECK: ret i32 [[RESULT]] |
495 | // CHECK: } |
496 | |
497 | char test_iso_volatile_load8(char volatile *p) { return __iso_volatile_load8(p); } |
498 | short test_iso_volatile_load16(short volatile *p) { return __iso_volatile_load16(p); } |
499 | int test_iso_volatile_load32(int volatile *p) { return __iso_volatile_load32(p); } |
500 | __int64 test_iso_volatile_load64(__int64 volatile *p) { return __iso_volatile_load64(p); } |
501 | |
502 | // CHECK: define{{.*}}i8 @test_iso_volatile_load8(i8*{{[a-z_ ]*}}%p) |
503 | // CHECK: = load volatile i8, i8* %p |
504 | // CHECK: define{{.*}}i16 @test_iso_volatile_load16(i16*{{[a-z_ ]*}}%p) |
505 | // CHECK: = load volatile i16, i16* %p |
506 | // CHECK: define{{.*}}i32 @test_iso_volatile_load32(i32*{{[a-z_ ]*}}%p) |
507 | // CHECK: = load volatile i32, i32* %p |
508 | // CHECK: define{{.*}}i64 @test_iso_volatile_load64(i64*{{[a-z_ ]*}}%p) |
509 | // CHECK: = load volatile i64, i64* %p |
510 | |
511 | void test_iso_volatile_store8(char volatile *p, char v) { __iso_volatile_store8(p, v); } |
512 | void test_iso_volatile_store16(short volatile *p, short v) { __iso_volatile_store16(p, v); } |
513 | void test_iso_volatile_store32(int volatile *p, int v) { __iso_volatile_store32(p, v); } |
514 | void test_iso_volatile_store64(__int64 volatile *p, __int64 v) { __iso_volatile_store64(p, v); } |
515 | |
516 | // CHECK: define{{.*}}void @test_iso_volatile_store8(i8*{{[a-z_ ]*}}%p, i8 {{[a-z_ ]*}}%v) |
517 | // CHECK: store volatile i8 %v, i8* %p |
518 | // CHECK: define{{.*}}void @test_iso_volatile_store16(i16*{{[a-z_ ]*}}%p, i16 {{[a-z_ ]*}}%v) |
519 | // CHECK: store volatile i16 %v, i16* %p |
520 | // CHECK: define{{.*}}void @test_iso_volatile_store32(i32*{{[a-z_ ]*}}%p, i32 {{[a-z_ ]*}}%v) |
521 | // CHECK: store volatile i32 %v, i32* %p |
522 | // CHECK: define{{.*}}void @test_iso_volatile_store64(i64*{{[a-z_ ]*}}%p, i64 {{[a-z_ ]*}}%v) |
523 | // CHECK: store volatile i64 %v, i64* %p |
524 | |
525 | |
526 | #if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) |
527 | __int64 test_InterlockedExchange64(__int64 volatile *value, __int64 mask) { |
528 | return _InterlockedExchange64(value, mask); |
529 | } |
530 | // CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedExchange64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
531 | // CHECK-ARM-X64: [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask seq_cst |
532 | // CHECK-ARM-X64: ret i64 [[RESULT:%[0-9]+]] |
533 | // CHECK-ARM-X64: } |
534 | |
535 | __int64 test_InterlockedExchangeAdd64(__int64 volatile *value, __int64 mask) { |
536 | return _InterlockedExchangeAdd64(value, mask); |
537 | } |
538 | // CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedExchangeAdd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
539 | // CHECK-ARM-X64: [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask seq_cst |
540 | // CHECK-ARM-X64: ret i64 [[RESULT:%[0-9]+]] |
541 | // CHECK-ARM-X64: } |
542 | |
543 | __int64 test_InterlockedExchangeSub64(__int64 volatile *value, __int64 mask) { |
544 | return _InterlockedExchangeSub64(value, mask); |
545 | } |
546 | // CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedExchangeSub64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
547 | // CHECK-ARM-X64: [[RESULT:%[0-9]+]] = atomicrmw sub i64* %value, i64 %mask seq_cst |
548 | // CHECK-ARM-X64: ret i64 [[RESULT:%[0-9]+]] |
549 | // CHECK-ARM-X64: } |
550 | |
551 | __int64 test_InterlockedOr64(__int64 volatile *value, __int64 mask) { |
552 | return _InterlockedOr64(value, mask); |
553 | } |
554 | // CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedOr64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
555 | // CHECK-ARM-X64: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask seq_cst |
556 | // CHECK-ARM-X64: ret i64 [[RESULT:%[0-9]+]] |
557 | // CHECK-ARM-X64: } |
558 | |
559 | __int64 test_InterlockedXor64(__int64 volatile *value, __int64 mask) { |
560 | return _InterlockedXor64(value, mask); |
561 | } |
562 | // CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedXor64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
563 | // CHECK-ARM-X64: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask seq_cst |
564 | // CHECK-ARM-X64: ret i64 [[RESULT:%[0-9]+]] |
565 | // CHECK-ARM-X64: } |
566 | |
567 | __int64 test_InterlockedAnd64(__int64 volatile *value, __int64 mask) { |
568 | return _InterlockedAnd64(value, mask); |
569 | } |
570 | // CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedAnd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
571 | // CHECK-ARM-X64: [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask seq_cst |
572 | // CHECK-ARM-X64: ret i64 [[RESULT:%[0-9]+]] |
573 | // CHECK-ARM-X64: } |
574 | |
575 | __int64 test_InterlockedIncrement64(__int64 volatile *Addend) { |
576 | return _InterlockedIncrement64(Addend); |
577 | } |
578 | // CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedIncrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{ |
579 | // CHECK-ARM-X64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 seq_cst |
580 | // CHECK-ARM-X64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1 |
581 | // CHECK-ARM-X64: ret i64 [[RESULT]] |
582 | // CHECK-ARM-X64: } |
583 | |
584 | __int64 test_InterlockedDecrement64(__int64 volatile *Addend) { |
585 | return _InterlockedDecrement64(Addend); |
586 | } |
587 | // CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedDecrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{ |
588 | // CHECK-ARM-X64: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 seq_cst |
589 | // CHECK-ARM-X64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1 |
590 | // CHECK-ARM-X64: ret i64 [[RESULT]] |
591 | // CHECK-ARM-X64: } |
592 | |
593 | #endif |
594 | |
595 | #if defined(__i386__) || defined(__x86_64__) |
596 | long test_InterlockedExchange_HLEAcquire(long volatile *Target, long Value) { |
597 | // CHECK-INTEL: define{{.*}} i32 @test_InterlockedExchange_HLEAcquire(i32*{{[a-z_ ]*}}%Target, i32{{[a-z_ ]*}}%Value) |
598 | // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf2 ; lock ; xchg $0, $1", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Target, i32 %Value, i32* %Target) |
599 | return _InterlockedExchange_HLEAcquire(Target, Value); |
600 | } |
601 | long test_InterlockedExchange_HLERelease(long volatile *Target, long Value) { |
602 | // CHECK-INTEL: define{{.*}} i32 @test_InterlockedExchange_HLERelease(i32*{{[a-z_ ]*}}%Target, i32{{[a-z_ ]*}}%Value) |
603 | // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf3 ; lock ; xchg $0, $1", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Target, i32 %Value, i32* %Target) |
604 | return _InterlockedExchange_HLERelease(Target, Value); |
605 | } |
606 | long test_InterlockedCompareExchange_HLEAcquire(long volatile *Destination, |
607 | long Exchange, long Comparand) { |
608 | // CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLEAcquire(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand) |
609 | // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $2, $1", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Destination, i32 %Exchange, i32 %Comparand, i32* %Destination) |
610 | return _InterlockedCompareExchange_HLEAcquire(Destination, Exchange, Comparand); |
611 | } |
612 | long test_InterlockedCompareExchange_HLERelease(long volatile *Destination, |
613 | long Exchange, long Comparand) { |
614 | // CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLERelease(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand) |
615 | // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $2, $1", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Destination, i32 %Exchange, i32 %Comparand, i32* %Destination) |
616 | return _InterlockedCompareExchange_HLERelease(Destination, Exchange, Comparand); |
617 | } |
618 | #endif |
619 | #if defined(__x86_64__) |
620 | __int64 test_InterlockedExchange64_HLEAcquire(__int64 volatile *Target, __int64 Value) { |
621 | // CHECK-X64: define{{.*}} i64 @test_InterlockedExchange64_HLEAcquire(i64*{{[a-z_ ]*}}%Target, i64{{[a-z_ ]*}}%Value) |
622 | // CHECK-X64: call i64 asm sideeffect ".byte 0xf2 ; lock ; xchg $0, $1", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Target, i64 %Value, i64* %Target) |
623 | return _InterlockedExchange64_HLEAcquire(Target, Value); |
624 | } |
625 | __int64 test_InterlockedExchange64_HLERelease(__int64 volatile *Target, __int64 Value) { |
626 | // CHECK-X64: define{{.*}} i64 @test_InterlockedExchange64_HLERelease(i64*{{[a-z_ ]*}}%Target, i64{{[a-z_ ]*}}%Value) |
627 | // CHECK-X64: call i64 asm sideeffect ".byte 0xf3 ; lock ; xchg $0, $1", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Target, i64 %Value, i64* %Target) |
628 | return _InterlockedExchange64_HLERelease(Target, Value); |
629 | } |
630 | __int64 test_InterlockedCompareExchange64_HLEAcquire(__int64 volatile *Destination, |
631 | __int64 Exchange, __int64 Comparand) { |
632 | // CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLEAcquire(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand) |
633 | // CHECK-X64: call i64 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $2, $1", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Destination, i64 %Exchange, i64 %Comparand, i64* %Destination) |
634 | return _InterlockedCompareExchange64_HLEAcquire(Destination, Exchange, Comparand); |
635 | } |
636 | __int64 test_InterlockedCompareExchange64_HLERelease(__int64 volatile *Destination, |
637 | __int64 Exchange, __int64 Comparand) { |
638 | // CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLERelease(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand) |
639 | // CHECK-X64: call i64 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $2, $1", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Destination, i64 %Exchange, i64 %Comparand, i64* %Destination) |
640 | return _InterlockedCompareExchange64_HLERelease(Destination, Exchange, Comparand); |
641 | } |
642 | #endif |
643 | |
644 | #if defined(__arm__) || defined(__aarch64__) |
645 | char test_InterlockedExchangeAdd8_acq(char volatile *value, char mask) { |
646 | return _InterlockedExchangeAdd8_acq(value, mask); |
647 | } |
648 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
649 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask acquire |
650 | // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] |
651 | // CHECK-ARM-ARM64: } |
652 | char test_InterlockedExchangeAdd8_rel(char volatile *value, char mask) { |
653 | return _InterlockedExchangeAdd8_rel(value, mask); |
654 | } |
655 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
656 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask release |
657 | // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] |
658 | // CHECK-ARM-ARM64: } |
659 | char test_InterlockedExchangeAdd8_nf(char volatile *value, char mask) { |
660 | return _InterlockedExchangeAdd8_nf(value, mask); |
661 | } |
662 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
663 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask monotonic |
664 | // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] |
665 | // CHECK-ARM-ARM64: } |
666 | short test_InterlockedExchangeAdd16_acq(short volatile *value, short mask) { |
667 | return _InterlockedExchangeAdd16_acq(value, mask); |
668 | } |
669 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
670 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask acquire |
671 | // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] |
672 | // CHECK-ARM-ARM64: } |
673 | short test_InterlockedExchangeAdd16_rel(short volatile *value, short mask) { |
674 | return _InterlockedExchangeAdd16_rel(value, mask); |
675 | } |
676 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
677 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask release |
678 | // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] |
679 | // CHECK-ARM-ARM64: } |
680 | short test_InterlockedExchangeAdd16_nf(short volatile *value, short mask) { |
681 | return _InterlockedExchangeAdd16_nf(value, mask); |
682 | } |
683 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
684 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask monotonic |
685 | // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] |
686 | // CHECK-ARM-ARM64: } |
687 | long test_InterlockedExchangeAdd_acq(long volatile *value, long mask) { |
688 | return _InterlockedExchangeAdd_acq(value, mask); |
689 | } |
690 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
691 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask acquire |
692 | // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] |
693 | // CHECK-ARM-ARM64: } |
694 | long test_InterlockedExchangeAdd_rel(long volatile *value, long mask) { |
695 | return _InterlockedExchangeAdd_rel(value, mask); |
696 | } |
697 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
698 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask release |
699 | // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] |
700 | // CHECK-ARM-ARM64: } |
701 | long test_InterlockedExchangeAdd_nf(long volatile *value, long mask) { |
702 | return _InterlockedExchangeAdd_nf(value, mask); |
703 | } |
704 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
705 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask monotonic |
706 | // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] |
707 | // CHECK-ARM-ARM64: } |
708 | __int64 test_InterlockedExchangeAdd64_acq(__int64 volatile *value, __int64 mask) { |
709 | return _InterlockedExchangeAdd64_acq(value, mask); |
710 | } |
711 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
712 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask acquire |
713 | // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] |
714 | // CHECK-ARM-ARM64: } |
715 | __int64 test_InterlockedExchangeAdd64_rel(__int64 volatile *value, __int64 mask) { |
716 | return _InterlockedExchangeAdd64_rel(value, mask); |
717 | } |
718 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
719 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask release |
720 | // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] |
721 | // CHECK-ARM-ARM64: } |
722 | __int64 test_InterlockedExchangeAdd64_nf(__int64 volatile *value, __int64 mask) { |
723 | return _InterlockedExchangeAdd64_nf(value, mask); |
724 | } |
725 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
726 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask monotonic |
727 | // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] |
728 | // CHECK-ARM-ARM64: } |
729 | |
730 | char test_InterlockedExchange8_acq(char volatile *value, char mask) { |
731 | return _InterlockedExchange8_acq(value, mask); |
732 | } |
733 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
734 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask acquire |
735 | // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] |
736 | // CHECK-ARM-ARM64: } |
737 | char test_InterlockedExchange8_rel(char volatile *value, char mask) { |
738 | return _InterlockedExchange8_rel(value, mask); |
739 | } |
740 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
741 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask release |
742 | // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] |
743 | // CHECK-ARM-ARM64: } |
744 | char test_InterlockedExchange8_nf(char volatile *value, char mask) { |
745 | return _InterlockedExchange8_nf(value, mask); |
746 | } |
747 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
748 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask monotonic |
749 | // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] |
750 | // CHECK-ARM-ARM64: } |
751 | short test_InterlockedExchange16_acq(short volatile *value, short mask) { |
752 | return _InterlockedExchange16_acq(value, mask); |
753 | } |
754 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
755 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask acquire |
756 | // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] |
757 | // CHECK-ARM-ARM64: } |
758 | short test_InterlockedExchange16_rel(short volatile *value, short mask) { |
759 | return _InterlockedExchange16_rel(value, mask); |
760 | } |
761 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
762 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask release |
763 | // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] |
764 | // CHECK-ARM-ARM64: } |
765 | short test_InterlockedExchange16_nf(short volatile *value, short mask) { |
766 | return _InterlockedExchange16_nf(value, mask); |
767 | } |
768 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
769 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask monotonic |
770 | // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] |
771 | // CHECK-ARM-ARM64: } |
772 | long test_InterlockedExchange_acq(long volatile *value, long mask) { |
773 | return _InterlockedExchange_acq(value, mask); |
774 | } |
775 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
776 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask acquire |
777 | // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] |
778 | // CHECK-ARM-ARM64: } |
779 | long test_InterlockedExchange_rel(long volatile *value, long mask) { |
780 | return _InterlockedExchange_rel(value, mask); |
781 | } |
782 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
783 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask release |
784 | // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] |
785 | // CHECK-ARM-ARM64: } |
786 | long test_InterlockedExchange_nf(long volatile *value, long mask) { |
787 | return _InterlockedExchange_nf(value, mask); |
788 | } |
789 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
790 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask monotonic |
791 | // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] |
792 | // CHECK-ARM-ARM64: } |
793 | __int64 test_InterlockedExchange64_acq(__int64 volatile *value, __int64 mask) { |
794 | return _InterlockedExchange64_acq(value, mask); |
795 | } |
796 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
797 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask acquire |
798 | // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] |
799 | // CHECK-ARM-ARM64: } |
800 | __int64 test_InterlockedExchange64_rel(__int64 volatile *value, __int64 mask) { |
801 | return _InterlockedExchange64_rel(value, mask); |
802 | } |
803 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
804 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask release |
805 | // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] |
806 | // CHECK-ARM-ARM64: } |
807 | __int64 test_InterlockedExchange64_nf(__int64 volatile *value, __int64 mask) { |
808 | return _InterlockedExchange64_nf(value, mask); |
809 | } |
810 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
811 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask monotonic |
812 | // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] |
813 | // CHECK-ARM-ARM64: } |
814 | |
815 | char test_InterlockedCompareExchange8_acq(char volatile *Destination, char Exchange, char Comperand) { |
816 | return _InterlockedCompareExchange8_acq(Destination, Exchange, Comperand); |
817 | } |
818 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_acq(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{ |
819 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange acquire acquire |
820 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0 |
821 | // CHECK-ARM-ARM64: ret i8 [[RESULT]] |
822 | // CHECK-ARM-ARM64: } |
823 | |
824 | char test_InterlockedCompareExchange8_rel(char volatile *Destination, char Exchange, char Comperand) { |
825 | return _InterlockedCompareExchange8_rel(Destination, Exchange, Comperand); |
826 | } |
827 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_rel(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{ |
828 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange release monotonic |
829 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0 |
830 | // CHECK-ARM-ARM64: ret i8 [[RESULT]] |
831 | // CHECK-ARM-ARM64: } |
832 | |
833 | char test_InterlockedCompareExchange8_nf(char volatile *Destination, char Exchange, char Comperand) { |
834 | return _InterlockedCompareExchange8_nf(Destination, Exchange, Comperand); |
835 | } |
836 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_nf(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{ |
837 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange monotonic monotonic |
838 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0 |
839 | // CHECK-ARM-ARM64: ret i8 [[RESULT]] |
840 | // CHECK-ARM-ARM64: } |
841 | |
842 | short test_InterlockedCompareExchange16_acq(short volatile *Destination, short Exchange, short Comperand) { |
843 | return _InterlockedCompareExchange16_acq(Destination, Exchange, Comperand); |
844 | } |
845 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_acq(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{ |
846 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange acquire acquire |
847 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0 |
848 | // CHECK-ARM-ARM64: ret i16 [[RESULT]] |
849 | // CHECK-ARM-ARM64: } |
850 | |
851 | short test_InterlockedCompareExchange16_rel(short volatile *Destination, short Exchange, short Comperand) { |
852 | return _InterlockedCompareExchange16_rel(Destination, Exchange, Comperand); |
853 | } |
854 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_rel(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{ |
855 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange release monotonic |
856 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0 |
857 | // CHECK-ARM-ARM64: ret i16 [[RESULT]] |
858 | // CHECK-ARM-ARM64: } |
859 | |
860 | short test_InterlockedCompareExchange16_nf(short volatile *Destination, short Exchange, short Comperand) { |
861 | return _InterlockedCompareExchange16_nf(Destination, Exchange, Comperand); |
862 | } |
863 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_nf(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{ |
864 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange monotonic monotonic |
865 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0 |
866 | // CHECK-ARM-ARM64: ret i16 [[RESULT]] |
867 | // CHECK-ARM-ARM64: } |
868 | |
869 | long test_InterlockedCompareExchange_acq(long volatile *Destination, long Exchange, long Comperand) { |
870 | return _InterlockedCompareExchange_acq(Destination, Exchange, Comperand); |
871 | } |
872 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_acq(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{ |
873 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange acquire acquire |
874 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0 |
875 | // CHECK-ARM-ARM64: ret i32 [[RESULT]] |
876 | // CHECK-ARM-ARM64: } |
877 | |
878 | long test_InterlockedCompareExchange_rel(long volatile *Destination, long Exchange, long Comperand) { |
879 | return _InterlockedCompareExchange_rel(Destination, Exchange, Comperand); |
880 | } |
881 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_rel(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{ |
882 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange release monotonic |
883 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0 |
884 | // CHECK-ARM-ARM64: ret i32 [[RESULT]] |
885 | // CHECK-ARM-ARM64: } |
886 | |
887 | long test_InterlockedCompareExchange_nf(long volatile *Destination, long Exchange, long Comperand) { |
888 | return _InterlockedCompareExchange_nf(Destination, Exchange, Comperand); |
889 | } |
890 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_nf(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{ |
891 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange monotonic monotonic |
892 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0 |
893 | // CHECK-ARM-ARM64: ret i32 [[RESULT]] |
894 | // CHECK-ARM-ARM64: } |
895 | |
896 | __int64 test_InterlockedCompareExchange64_acq(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) { |
897 | return _InterlockedCompareExchange64_acq(Destination, Exchange, Comperand); |
898 | } |
899 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_acq(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{ |
900 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange acquire acquire |
901 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0 |
902 | // CHECK-ARM-ARM64: ret i64 [[RESULT]] |
903 | // CHECK-ARM-ARM64: } |
904 | |
905 | __int64 test_InterlockedCompareExchange64_rel(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) { |
906 | return _InterlockedCompareExchange64_rel(Destination, Exchange, Comperand); |
907 | } |
908 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_rel(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{ |
909 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange release monotonic |
910 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0 |
911 | // CHECK-ARM-ARM64: ret i64 [[RESULT]] |
912 | // CHECK-ARM-ARM64: } |
913 | |
914 | __int64 test_InterlockedCompareExchange64_nf(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) { |
915 | return _InterlockedCompareExchange64_nf(Destination, Exchange, Comperand); |
916 | } |
917 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_nf(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{ |
918 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange monotonic monotonic |
919 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0 |
920 | // CHECK-ARM-ARM64: ret i64 [[RESULT]] |
921 | // CHECK-ARM-ARM64: } |
922 | |
923 | char test_InterlockedOr8_acq(char volatile *value, char mask) { |
924 | return _InterlockedOr8_acq(value, mask); |
925 | } |
926 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
927 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask acquire |
928 | // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] |
929 | // CHECK-ARM-ARM64: } |
930 | |
931 | char test_InterlockedOr8_rel(char volatile *value, char mask) { |
932 | return _InterlockedOr8_rel(value, mask); |
933 | } |
934 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
935 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask release |
936 | // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] |
937 | // CHECK-ARM-ARM64: } |
938 | |
939 | char test_InterlockedOr8_nf(char volatile *value, char mask) { |
940 | return _InterlockedOr8_nf(value, mask); |
941 | } |
942 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
943 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask monotonic |
944 | // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] |
945 | // CHECK-ARM-ARM64: } |
946 | |
947 | short test_InterlockedOr16_acq(short volatile *value, short mask) { |
948 | return _InterlockedOr16_acq(value, mask); |
949 | } |
950 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
951 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask acquire |
952 | // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] |
953 | // CHECK-ARM-ARM64: } |
954 | |
955 | short test_InterlockedOr16_rel(short volatile *value, short mask) { |
956 | return _InterlockedOr16_rel(value, mask); |
957 | } |
958 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
959 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask release |
960 | // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] |
961 | // CHECK-ARM-ARM64: } |
962 | |
963 | short test_InterlockedOr16_nf(short volatile *value, short mask) { |
964 | return _InterlockedOr16_nf(value, mask); |
965 | } |
966 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
967 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask monotonic |
968 | // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] |
969 | // CHECK-ARM-ARM64: } |
970 | |
971 | long test_InterlockedOr_acq(long volatile *value, long mask) { |
972 | return _InterlockedOr_acq(value, mask); |
973 | } |
974 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
975 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask acquire |
976 | // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] |
977 | // CHECK-ARM-ARM64: } |
978 | |
979 | long test_InterlockedOr_rel(long volatile *value, long mask) { |
980 | return _InterlockedOr_rel(value, mask); |
981 | } |
982 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
983 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask release |
984 | // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] |
985 | // CHECK-ARM-ARM64: } |
986 | |
987 | long test_InterlockedOr_nf(long volatile *value, long mask) { |
988 | return _InterlockedOr_nf(value, mask); |
989 | } |
990 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
991 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask monotonic |
992 | // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] |
993 | // CHECK-ARM-ARM64: } |
994 | |
995 | __int64 test_InterlockedOr64_acq(__int64 volatile *value, __int64 mask) { |
996 | return _InterlockedOr64_acq(value, mask); |
997 | } |
998 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
999 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask acquire |
1000 | // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] |
1001 | // CHECK-ARM-ARM64: } |
1002 | |
1003 | __int64 test_InterlockedOr64_rel(__int64 volatile *value, __int64 mask) { |
1004 | return _InterlockedOr64_rel(value, mask); |
1005 | } |
1006 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
1007 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask release |
1008 | // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] |
1009 | // CHECK-ARM-ARM64: } |
1010 | |
1011 | __int64 test_InterlockedOr64_nf(__int64 volatile *value, __int64 mask) { |
1012 | return _InterlockedOr64_nf(value, mask); |
1013 | } |
1014 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
1015 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask monotonic |
1016 | // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] |
1017 | // CHECK-ARM-ARM64: } |
1018 | |
1019 | char test_InterlockedXor8_acq(char volatile *value, char mask) { |
1020 | return _InterlockedXor8_acq(value, mask); |
1021 | } |
1022 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
1023 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask acquire |
1024 | // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] |
1025 | // CHECK-ARM-ARM64: } |
1026 | |
1027 | char test_InterlockedXor8_rel(char volatile *value, char mask) { |
1028 | return _InterlockedXor8_rel(value, mask); |
1029 | } |
1030 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
1031 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask release |
1032 | // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] |
1033 | // CHECK-ARM-ARM64: } |
1034 | |
1035 | char test_InterlockedXor8_nf(char volatile *value, char mask) { |
1036 | return _InterlockedXor8_nf(value, mask); |
1037 | } |
1038 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
1039 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask monotonic |
1040 | // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] |
1041 | // CHECK-ARM-ARM64: } |
1042 | |
1043 | short test_InterlockedXor16_acq(short volatile *value, short mask) { |
1044 | return _InterlockedXor16_acq(value, mask); |
1045 | } |
1046 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
1047 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask acquire |
1048 | // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] |
1049 | // CHECK-ARM-ARM64: } |
1050 | |
1051 | short test_InterlockedXor16_rel(short volatile *value, short mask) { |
1052 | return _InterlockedXor16_rel(value, mask); |
1053 | } |
1054 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
1055 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask release |
1056 | // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] |
1057 | // CHECK-ARM-ARM64: } |
1058 | |
1059 | short test_InterlockedXor16_nf(short volatile *value, short mask) { |
1060 | return _InterlockedXor16_nf(value, mask); |
1061 | } |
1062 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
1063 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask monotonic |
1064 | // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] |
1065 | // CHECK-ARM-ARM64: } |
1066 | |
1067 | long test_InterlockedXor_acq(long volatile *value, long mask) { |
1068 | return _InterlockedXor_acq(value, mask); |
1069 | } |
1070 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
1071 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask acquire |
1072 | // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] |
1073 | // CHECK-ARM-ARM64: } |
1074 | |
1075 | long test_InterlockedXor_rel(long volatile *value, long mask) { |
1076 | return _InterlockedXor_rel(value, mask); |
1077 | } |
1078 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
1079 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask release |
1080 | // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] |
1081 | // CHECK-ARM-ARM64: } |
1082 | |
1083 | long test_InterlockedXor_nf(long volatile *value, long mask) { |
1084 | return _InterlockedXor_nf(value, mask); |
1085 | } |
1086 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
1087 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask monotonic |
1088 | // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] |
1089 | // CHECK-ARM-ARM64: } |
1090 | |
1091 | __int64 test_InterlockedXor64_acq(__int64 volatile *value, __int64 mask) { |
1092 | return _InterlockedXor64_acq(value, mask); |
1093 | } |
1094 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
1095 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask acquire |
1096 | // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] |
1097 | // CHECK-ARM-ARM64: } |
1098 | |
1099 | __int64 test_InterlockedXor64_rel(__int64 volatile *value, __int64 mask) { |
1100 | return _InterlockedXor64_rel(value, mask); |
1101 | } |
1102 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
1103 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask release |
1104 | // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] |
1105 | // CHECK-ARM-ARM64: } |
1106 | |
1107 | __int64 test_InterlockedXor64_nf(__int64 volatile *value, __int64 mask) { |
1108 | return _InterlockedXor64_nf(value, mask); |
1109 | } |
1110 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
1111 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask monotonic |
1112 | // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] |
1113 | // CHECK-ARM-ARM64: } |
1114 | |
1115 | char test_InterlockedAnd8_acq(char volatile *value, char mask) { |
1116 | return _InterlockedAnd8_acq(value, mask); |
1117 | } |
1118 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
1119 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask acquire |
1120 | // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] |
1121 | // CHECK-ARM-ARM64: } |
1122 | |
1123 | char test_InterlockedAnd8_rel(char volatile *value, char mask) { |
1124 | return _InterlockedAnd8_rel(value, mask); |
1125 | } |
1126 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
1127 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask release |
1128 | // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] |
1129 | // CHECK-ARM-ARM64: } |
1130 | |
1131 | char test_InterlockedAnd8_nf(char volatile *value, char mask) { |
1132 | return _InterlockedAnd8_nf(value, mask); |
1133 | } |
1134 | // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ |
1135 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask monotonic |
1136 | // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] |
1137 | // CHECK-ARM-ARM64: } |
1138 | |
1139 | short test_InterlockedAnd16_acq(short volatile *value, short mask) { |
1140 | return _InterlockedAnd16_acq(value, mask); |
1141 | } |
1142 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
1143 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask acquire |
1144 | // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] |
1145 | // CHECK-ARM-ARM64: } |
1146 | |
1147 | short test_InterlockedAnd16_rel(short volatile *value, short mask) { |
1148 | return _InterlockedAnd16_rel(value, mask); |
1149 | } |
1150 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
1151 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask release |
1152 | // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] |
1153 | // CHECK-ARM-ARM64: } |
1154 | |
1155 | short test_InterlockedAnd16_nf(short volatile *value, short mask) { |
1156 | return _InterlockedAnd16_nf(value, mask); |
1157 | } |
1158 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ |
1159 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask monotonic |
1160 | // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] |
1161 | // CHECK-ARM-ARM64: } |
1162 | |
1163 | long test_InterlockedAnd_acq(long volatile *value, long mask) { |
1164 | return _InterlockedAnd_acq(value, mask); |
1165 | } |
1166 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
1167 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask acquire |
1168 | // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] |
1169 | // CHECK-ARM-ARM64: } |
1170 | |
1171 | long test_InterlockedAnd_rel(long volatile *value, long mask) { |
1172 | return _InterlockedAnd_rel(value, mask); |
1173 | } |
1174 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
1175 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask release |
1176 | // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] |
1177 | // CHECK-ARM-ARM64: } |
1178 | |
1179 | long test_InterlockedAnd_nf(long volatile *value, long mask) { |
1180 | return _InterlockedAnd_nf(value, mask); |
1181 | } |
1182 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
1183 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask monotonic |
1184 | // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] |
1185 | // CHECK-ARM-ARM64: } |
1186 | |
1187 | __int64 test_InterlockedAnd64_acq(__int64 volatile *value, __int64 mask) { |
1188 | return _InterlockedAnd64_acq(value, mask); |
1189 | } |
1190 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
1191 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask acquire |
1192 | // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] |
1193 | // CHECK-ARM-ARM64: } |
1194 | |
1195 | __int64 test_InterlockedAnd64_rel(__int64 volatile *value, __int64 mask) { |
1196 | return _InterlockedAnd64_rel(value, mask); |
1197 | } |
1198 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
1199 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask release |
1200 | // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] |
1201 | // CHECK-ARM-ARM64: } |
1202 | |
1203 | __int64 test_InterlockedAnd64_nf(__int64 volatile *value, __int64 mask) { |
1204 | return _InterlockedAnd64_nf(value, mask); |
1205 | } |
1206 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ |
1207 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask monotonic |
1208 | // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] |
1209 | // CHECK-ARM-ARM64: } |
1210 | |
1211 | short test_InterlockedIncrement16_acq(short volatile *Addend) { |
1212 | return _InterlockedIncrement16_acq(Addend); |
1213 | } |
1214 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_acq(i16*{{[a-z_ ]*}}%Addend){{.*}}{ |
1215 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 acquire |
1216 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1 |
1217 | // CHECK-ARM-ARM64: ret i16 [[RESULT]] |
1218 | // CHECK-ARM-ARM64: } |
1219 | |
1220 | short test_InterlockedIncrement16_rel(short volatile *Addend) { |
1221 | return _InterlockedIncrement16_rel(Addend); |
1222 | } |
1223 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_rel(i16*{{[a-z_ ]*}}%Addend){{.*}}{ |
1224 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 release |
1225 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1 |
1226 | // CHECK-ARM-ARM64: ret i16 [[RESULT]] |
1227 | // CHECK-ARM-ARM64: } |
1228 | |
1229 | short test_InterlockedIncrement16_nf(short volatile *Addend) { |
1230 | return _InterlockedIncrement16_nf(Addend); |
1231 | } |
1232 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_nf(i16*{{[a-z_ ]*}}%Addend){{.*}}{ |
1233 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 monotonic |
1234 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1 |
1235 | // CHECK-ARM-ARM64: ret i16 [[RESULT]] |
1236 | // CHECK-ARM-ARM64: } |
1237 | |
1238 | long test_InterlockedIncrement_acq(long volatile *Addend) { |
1239 | return _InterlockedIncrement_acq(Addend); |
1240 | } |
1241 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_acq(i32*{{[a-z_ ]*}}%Addend){{.*}}{ |
1242 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 acquire |
1243 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1 |
1244 | // CHECK-ARM-ARM64: ret i32 [[RESULT]] |
1245 | // CHECK-ARM-ARM64: } |
1246 | |
1247 | long test_InterlockedIncrement_rel(long volatile *Addend) { |
1248 | return _InterlockedIncrement_rel(Addend); |
1249 | } |
1250 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_rel(i32*{{[a-z_ ]*}}%Addend){{.*}}{ |
1251 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 release |
1252 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1 |
1253 | // CHECK-ARM-ARM64: ret i32 [[RESULT]] |
1254 | // CHECK-ARM-ARM64: } |
1255 | |
1256 | long test_InterlockedIncrement_nf(long volatile *Addend) { |
1257 | return _InterlockedIncrement_nf(Addend); |
1258 | } |
1259 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_nf(i32*{{[a-z_ ]*}}%Addend){{.*}}{ |
1260 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 monotonic |
1261 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1 |
1262 | // CHECK-ARM-ARM64: ret i32 [[RESULT]] |
1263 | // CHECK-ARM-ARM64: } |
1264 | |
1265 | __int64 test_InterlockedIncrement64_acq(__int64 volatile *Addend) { |
1266 | return _InterlockedIncrement64_acq(Addend); |
1267 | } |
1268 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_acq(i64*{{[a-z_ ]*}}%Addend){{.*}}{ |
1269 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 acquire |
1270 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1 |
1271 | // CHECK-ARM-ARM64: ret i64 [[RESULT]] |
1272 | // CHECK-ARM-ARM64: } |
1273 | |
1274 | __int64 test_InterlockedIncrement64_rel(__int64 volatile *Addend) { |
1275 | return _InterlockedIncrement64_rel(Addend); |
1276 | } |
1277 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_rel(i64*{{[a-z_ ]*}}%Addend){{.*}}{ |
1278 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 release |
1279 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1 |
1280 | // CHECK-ARM-ARM64: ret i64 [[RESULT]] |
1281 | // CHECK-ARM-ARM64: } |
1282 | |
1283 | __int64 test_InterlockedIncrement64_nf(__int64 volatile *Addend) { |
1284 | return _InterlockedIncrement64_nf(Addend); |
1285 | } |
1286 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_nf(i64*{{[a-z_ ]*}}%Addend){{.*}}{ |
1287 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 monotonic |
1288 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1 |
1289 | // CHECK-ARM-ARM64: ret i64 [[RESULT]] |
1290 | // CHECK-ARM-ARM64: } |
1291 | |
1292 | short test_InterlockedDecrement16_acq(short volatile *Addend) { |
1293 | return _InterlockedDecrement16_acq(Addend); |
1294 | } |
1295 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_acq(i16*{{[a-z_ ]*}}%Addend){{.*}}{ |
1296 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 acquire |
1297 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1 |
1298 | // CHECK-ARM-ARM64: ret i16 [[RESULT]] |
1299 | // CHECK-ARM-ARM64: } |
1300 | |
1301 | short test_InterlockedDecrement16_rel(short volatile *Addend) { |
1302 | return _InterlockedDecrement16_rel(Addend); |
1303 | } |
1304 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_rel(i16*{{[a-z_ ]*}}%Addend){{.*}}{ |
1305 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 release |
1306 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1 |
1307 | // CHECK-ARM-ARM64: ret i16 [[RESULT]] |
1308 | // CHECK-ARM-ARM64: } |
1309 | |
1310 | short test_InterlockedDecrement16_nf(short volatile *Addend) { |
1311 | return _InterlockedDecrement16_nf(Addend); |
1312 | } |
1313 | // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_nf(i16*{{[a-z_ ]*}}%Addend){{.*}}{ |
1314 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 monotonic |
1315 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1 |
1316 | // CHECK-ARM-ARM64: ret i16 [[RESULT]] |
1317 | // CHECK-ARM-ARM64: } |
1318 | |
1319 | long test_InterlockedDecrement_acq(long volatile *Addend) { |
1320 | return _InterlockedDecrement_acq(Addend); |
1321 | } |
1322 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_acq(i32*{{[a-z_ ]*}}%Addend){{.*}}{ |
1323 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 acquire |
1324 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1 |
1325 | // CHECK-ARM-ARM64: ret i32 [[RESULT]] |
1326 | // CHECK-ARM-ARM64: } |
1327 | |
1328 | long test_InterlockedDecrement_rel(long volatile *Addend) { |
1329 | return _InterlockedDecrement_rel(Addend); |
1330 | } |
1331 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_rel(i32*{{[a-z_ ]*}}%Addend){{.*}}{ |
1332 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 release |
1333 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1 |
1334 | // CHECK-ARM-ARM64: ret i32 [[RESULT]] |
1335 | // CHECK-ARM-ARM64: } |
1336 | |
1337 | long test_InterlockedDecrement_nf(long volatile *Addend) { |
1338 | return _InterlockedDecrement_nf(Addend); |
1339 | } |
1340 | // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_nf(i32*{{[a-z_ ]*}}%Addend){{.*}}{ |
1341 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 monotonic |
1342 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1 |
1343 | // CHECK-ARM-ARM64: ret i32 [[RESULT]] |
1344 | // CHECK-ARM-ARM64: } |
1345 | |
1346 | __int64 test_InterlockedDecrement64_acq(__int64 volatile *Addend) { |
1347 | return _InterlockedDecrement64_acq(Addend); |
1348 | } |
1349 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_acq(i64*{{[a-z_ ]*}}%Addend){{.*}}{ |
1350 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 acquire |
1351 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1 |
1352 | // CHECK-ARM-ARM64: ret i64 [[RESULT]] |
1353 | // CHECK-ARM-ARM64: } |
1354 | |
1355 | __int64 test_InterlockedDecrement64_rel(__int64 volatile *Addend) { |
1356 | return _InterlockedDecrement64_rel(Addend); |
1357 | } |
1358 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_rel(i64*{{[a-z_ ]*}}%Addend){{.*}}{ |
1359 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 release |
1360 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1 |
1361 | // CHECK-ARM-ARM64: ret i64 [[RESULT]] |
1362 | // CHECK-ARM-ARM64: } |
1363 | |
1364 | __int64 test_InterlockedDecrement64_nf(__int64 volatile *Addend) { |
1365 | return _InterlockedDecrement64_nf(Addend); |
1366 | } |
1367 | // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_nf(i64*{{[a-z_ ]*}}%Addend){{.*}}{ |
1368 | // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 monotonic |
1369 | // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1 |
1370 | // CHECK-ARM-ARM64: ret i64 [[RESULT]] |
1371 | // CHECK-ARM-ARM64: } |
1372 | #endif |
1373 | |
1374 | void test__fastfail() { |
1375 | __fastfail(42); |
1376 | } |
1377 | // CHECK-LABEL: define{{.*}} void @test__fastfail() |
1378 | // CHECK-ARM: call void asm sideeffect "udf #251", "{r0}"(i32 42) #[[NORETURN:[0-9]+]] |
1379 | // CHECK-INTEL: call void asm sideeffect "int $$0x29", "{cx}"(i32 42) #[[NORETURN]] |
1380 | // CHECK-ARM64: call void asm sideeffect "brk #0xF003", "{w0}"(i32 42) #[[NORETURN:[0-9]+]] |
1381 | |
1382 | // Attributes come last. |
1383 | |
1384 | // CHECK: attributes #[[NORETURN]] = { noreturn{{.*}} } |
1385 | |