1 | // RUN: %clang_cc1 -ffreestanding -fms-extensions \ |
2 | // RUN: -triple x86_64--darwin -Oz -emit-llvm %s -o - \ |
3 | // RUN: | FileCheck %s |
4 | // RUN: %clang_cc1 -ffreestanding -fms-extensions \ |
5 | // RUN: -triple x86_64--linux -Oz -emit-llvm %s -o - \ |
6 | // RUN: | FileCheck %s |
7 | |
8 | // LP64 targets use 'long' as 'int' for MS intrinsics (-fms-extensions) |
9 | #ifdef __LP64__ |
10 | #define LONG int |
11 | #else |
12 | #define LONG long |
13 | #endif |
14 | |
15 | unsigned char test_BitScanForward(unsigned LONG *Index, unsigned LONG Mask) { |
16 | return _BitScanForward(Index, Mask); |
17 | } |
18 | // CHECK: define{{.*}}i8 @test_BitScanForward(i32* {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{ |
19 | // CHECK: [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i32 %Mask, 0 |
20 | // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]] |
21 | // CHECK: [[END_LABEL]]: |
22 | // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ] |
23 | // CHECK: ret i8 [[RESULT]] |
24 | // CHECK: [[ISNOTZERO_LABEL]]: |
25 | // CHECK: [[INDEX:%[0-9]+]] = tail call i32 @llvm.cttz.i32(i32 %Mask, i1 true) |
26 | // CHECK: store i32 [[INDEX]], i32* %Index, align 4 |
27 | // CHECK: br label %[[END_LABEL]] |
28 | |
29 | unsigned char test_BitScanReverse(unsigned LONG *Index, unsigned LONG Mask) { |
30 | return _BitScanReverse(Index, Mask); |
31 | } |
32 | // CHECK: define{{.*}}i8 @test_BitScanReverse(i32* {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{ |
33 | // CHECK: [[ISNOTZERO:%[0-9]+]] = icmp eq i32 %Mask, 0 |
34 | // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]] |
35 | // CHECK: [[END_LABEL]]: |
36 | // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ] |
37 | // CHECK: ret i8 [[RESULT]] |
38 | // CHECK: [[ISNOTZERO_LABEL]]: |
39 | // CHECK: [[REVINDEX:%[0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %Mask, i1 true) |
40 | // CHECK: [[INDEX:%[0-9]+]] = xor i32 [[REVINDEX]], 31 |
41 | // CHECK: store i32 [[INDEX]], i32* %Index, align 4 |
42 | // CHECK: br label %[[END_LABEL]] |
43 | |
44 | #if defined(__x86_64__) |
45 | unsigned char test_BitScanForward64(unsigned LONG *Index, unsigned __int64 Mask) { |
46 | return _BitScanForward64(Index, Mask); |
47 | } |
48 | // CHECK: define{{.*}}i8 @test_BitScanForward64(i32* {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{ |
49 | // CHECK: [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i64 %Mask, 0 |
50 | // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]] |
51 | // CHECK: [[END_LABEL]]: |
52 | // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ] |
53 | // CHECK: ret i8 [[RESULT]] |
54 | // CHECK: [[ISNOTZERO_LABEL]]: |
55 | // CHECK: [[INDEX:%[0-9]+]] = tail call i64 @llvm.cttz.i64(i64 %Mask, i1 true) |
56 | // CHECK: [[TRUNC_INDEX:%[0-9]+]] = trunc i64 [[INDEX]] to i32 |
57 | // CHECK: store i32 [[TRUNC_INDEX]], i32* %Index, align 4 |
58 | // CHECK: br label %[[END_LABEL]] |
59 | |
60 | unsigned char test_BitScanReverse64(unsigned LONG *Index, unsigned __int64 Mask) { |
61 | return _BitScanReverse64(Index, Mask); |
62 | } |
63 | // CHECK: define{{.*}}i8 @test_BitScanReverse64(i32* {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{ |
64 | // CHECK: [[ISNOTZERO:%[0-9]+]] = icmp eq i64 %Mask, 0 |
65 | // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]] |
66 | // CHECK: [[END_LABEL]]: |
67 | // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ] |
68 | // CHECK: ret i8 [[RESULT]] |
69 | // CHECK: [[ISNOTZERO_LABEL]]: |
70 | // CHECK: [[REVINDEX:%[0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %Mask, i1 true) |
71 | // CHECK: [[TRUNC_REVINDEX:%[0-9]+]] = trunc i64 [[REVINDEX]] to i32 |
72 | // CHECK: [[INDEX:%[0-9]+]] = xor i32 [[TRUNC_REVINDEX]], 63 |
73 | // CHECK: store i32 [[INDEX]], i32* %Index, align 4 |
74 | // CHECK: br label %[[END_LABEL]] |
75 | #endif |
76 | |
77 | LONG test_InterlockedExchange(LONG volatile *value, LONG mask) { |
78 | return _InterlockedExchange(value, mask); |
79 | } |
80 | // CHECK: define{{.*}}i32 @test_InterlockedExchange(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
81 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask seq_cst |
82 | // CHECK: ret i32 [[RESULT:%[0-9]+]] |
83 | // CHECK: } |
84 | |
85 | LONG test_InterlockedExchangeAdd(LONG volatile *value, LONG mask) { |
86 | return _InterlockedExchangeAdd(value, mask); |
87 | } |
88 | // CHECK: define{{.*}}i32 @test_InterlockedExchangeAdd(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
89 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask seq_cst |
90 | // CHECK: ret i32 [[RESULT:%[0-9]+]] |
91 | // CHECK: } |
92 | |
93 | LONG test_InterlockedExchangeSub(LONG volatile *value, LONG mask) { |
94 | return _InterlockedExchangeSub(value, mask); |
95 | } |
96 | // CHECK: define{{.*}}i32 @test_InterlockedExchangeSub(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
97 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub i32* %value, i32 %mask seq_cst |
98 | // CHECK: ret i32 [[RESULT:%[0-9]+]] |
99 | // CHECK: } |
100 | |
101 | LONG test_InterlockedOr(LONG volatile *value, LONG mask) { |
102 | return _InterlockedOr(value, mask); |
103 | } |
104 | // CHECK: define{{.*}}i32 @test_InterlockedOr(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
105 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask seq_cst |
106 | // CHECK: ret i32 [[RESULT:%[0-9]+]] |
107 | // CHECK: } |
108 | |
109 | LONG test_InterlockedXor(LONG volatile *value, LONG mask) { |
110 | return _InterlockedXor(value, mask); |
111 | } |
112 | // CHECK: define{{.*}}i32 @test_InterlockedXor(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
113 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask seq_cst |
114 | // CHECK: ret i32 [[RESULT:%[0-9]+]] |
115 | // CHECK: } |
116 | |
117 | LONG test_InterlockedAnd(LONG volatile *value, LONG mask) { |
118 | return _InterlockedAnd(value, mask); |
119 | } |
120 | // CHECK: define{{.*}}i32 @test_InterlockedAnd(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ |
121 | // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask seq_cst |
122 | // CHECK: ret i32 [[RESULT:%[0-9]+]] |
123 | // CHECK: } |
124 | |
125 | LONG test_InterlockedCompareExchange(LONG volatile *Destination, LONG Exchange, LONG Comperand) { |
126 | return _InterlockedCompareExchange(Destination, Exchange, Comperand); |
127 | } |
128 | // CHECK: define{{.*}}i32 @test_InterlockedCompareExchange(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{ |
129 | // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange seq_cst seq_cst |
130 | // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0 |
131 | // CHECK: ret i32 [[RESULT]] |
132 | // CHECK: } |
133 | |
134 | LONG test_InterlockedIncrement(LONG volatile *Addend) { |
135 | return _InterlockedIncrement(Addend); |
136 | } |
137 | // CHECK: define{{.*}}i32 @test_InterlockedIncrement(i32*{{[a-z_ ]*}}%Addend){{.*}}{ |
138 | // CHECK: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 seq_cst |
139 | // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1 |
140 | // CHECK: ret i32 [[RESULT]] |
141 | // CHECK: } |
142 | |
143 | LONG test_InterlockedDecrement(LONG volatile *Addend) { |
144 | return _InterlockedDecrement(Addend); |
145 | } |
146 | // CHECK: define{{.*}}i32 @test_InterlockedDecrement(i32*{{[a-z_ ]*}}%Addend){{.*}}{ |
147 | // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 seq_cst |
148 | // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1 |
149 | // CHECK: ret i32 [[RESULT]] |
150 | // CHECK: } |
151 | |
152 | unsigned short test__lzcnt16(unsigned short x) { |
153 | return __lzcnt16(x); |
154 | } |
155 | // CHECK: i16 @test__lzcnt16 |
156 | // CHECK: [[RESULT:%[0-9]+]] = tail call i16 @llvm.ctlz.i16(i16 %x, i1 false) |
157 | // CHECK: ret i16 [[RESULT]] |
158 | // CHECK: } |
159 | |
160 | unsigned int test__lzcnt(unsigned int x) { |
161 | return __lzcnt(x); |
162 | } |
163 | // CHECK: i32 @test__lzcnt |
164 | // CHECK: [[RESULT:%[0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false) |
165 | // CHECK: ret i32 [[RESULT]] |
166 | // CHECK: } |
167 | |
168 | unsigned __int64 test__lzcnt64(unsigned __int64 x) { |
169 | return __lzcnt64(x); |
170 | } |
171 | // CHECK: i64 @test__lzcnt64 |
172 | // CHECK: [[RESULT:%[0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %x, i1 false) |
173 | // CHECK: ret i64 [[RESULT]] |
174 | // CHECK: } |
175 | |
176 | unsigned short test__popcnt16(unsigned short x) { |
177 | return __popcnt16(x); |
178 | } |
179 | // CHECK: i16 @test__popcnt16 |
180 | // CHECK: [[RESULT:%[0-9]+]] = tail call i16 @llvm.ctpop.i16(i16 %x) |
181 | // CHECK: ret i16 [[RESULT]] |
182 | // CHECK: } |
183 | |
184 | unsigned int test__popcnt(unsigned int x) { |
185 | return __popcnt(x); |
186 | } |
187 | // CHECK: i32 @test__popcnt |
188 | // CHECK: [[RESULT:%[0-9]+]] = tail call i32 @llvm.ctpop.i32(i32 %x) |
189 | // CHECK: ret i32 [[RESULT]] |
190 | // CHECK: } |
191 | |
192 | unsigned __int64 test__popcnt64(unsigned __int64 x) { |
193 | return __popcnt64(x); |
194 | } |
195 | // CHECK: i64 @test__popcnt64 |
196 | // CHECK: [[RESULT:%[0-9]+]] = tail call i64 @llvm.ctpop.i64(i64 %x) |
197 | // CHECK: ret i64 [[RESULT]] |
198 | // CHECK: } |
199 | |