1 | // RUN: %clang_cc1 %s -DTEST_XSAVE -O0 -triple=i686-unknown-unknown -target-feature +xsave -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=XSAVE |
2 | // RUN: %clang_cc1 %s -DTEST_XSAVE -O0 -triple=i686-unknown-unknown -target-feature +xsave -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=XSAVE |
3 | |
4 | // RUN: %clang_cc1 %s -DTEST_XGETBV -O0 -triple=i686-unknown-unknown -target-feature +xsave -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=XGETBV |
5 | // RUN: %clang_cc1 %s -DTEST_XSETBV -O0 -triple=i686-unknown-unknown -target-feature +xsave -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=XSETBV |
6 | |
7 | // RUN: %clang_cc1 %s -DTEST_XSAVEOPT -O0 -triple=i686-unknown-unknown -target-feature +xsave -target-feature +xsaveopt -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=XSAVEOPT |
8 | // RUN: %clang_cc1 %s -DTEST_XSAVEOPT -O0 -triple=i686-unknown-unknown -target-feature +xsave -target-feature +xsaveopt -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=XSAVEOPT |
9 | |
10 | // RUN: %clang_cc1 %s -DTEST_XSAVEC -O0 -triple=i686-unknown-unknown -target-feature +xsave -target-feature +xsavec -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=XSAVEC |
11 | // RUN: %clang_cc1 %s -DTEST_XSAVEC -O0 -triple=i686-unknown-unknown -target-feature +xsave -target-feature +xsavec -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=XSAVEC |
12 | |
13 | // RUN: %clang_cc1 %s -DTEST_XSAVES -O0 -triple=i686-unknown-unknown -target-feature +xsave -target-feature +xsaves -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=XSAVES |
14 | // RUN: %clang_cc1 %s -DTEST_XSAVES -O0 -triple=i686-unknown-unknown -target-feature +xsave -target-feature +xsaves -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=XSAVES |
15 | |
16 | // Don't include mm_malloc.h, it's system specific. |
17 | #define __MM_MALLOC_H |
18 | #include <x86intrin.h> |
19 | |
20 | void test() { |
21 | unsigned long long tmp_ULLi; |
22 | unsigned int tmp_Ui; |
23 | void* tmp_vp; |
24 | tmp_ULLi = 0; tmp_Ui = 0; tmp_vp = 0; |
25 | |
26 | #ifdef TEST_XSAVE |
27 | // XSAVE: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4 |
28 | // XSAVE: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 |
29 | // XSAVE: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32 |
30 | // XSAVE: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32 |
31 | // XSAVE: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 |
32 | // XSAVE: call void @llvm.x86.xsave(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) |
33 | (void)__builtin_ia32_xsave(tmp_vp, tmp_ULLi); |
34 | |
35 | // XSAVE: [[tmp_vp_3:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4 |
36 | // XSAVE: [[tmp_ULLi_3:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 |
37 | // XSAVE: [[high64_3:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_3]], 32 |
38 | // XSAVE: [[high32_3:%[0-9a-zA-Z]+]] = trunc i64 [[high64_3]] to i32 |
39 | // XSAVE: [[low32_3:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 |
40 | // XSAVE: call void @llvm.x86.xrstor(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]]) |
41 | (void)__builtin_ia32_xrstor(tmp_vp, tmp_ULLi); |
42 | |
43 | // XSAVE: call void @llvm.x86.xsave |
44 | (void)_xsave(tmp_vp, tmp_ULLi); |
45 | |
46 | // XSAVE: call void @llvm.x86.xrstor |
47 | (void)_xrstor(tmp_vp, tmp_ULLi); |
48 | #endif |
49 | |
50 | #ifdef TEST_XSAVEOPT |
51 | // XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4 |
52 | // XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 |
53 | // XSAVEOPT: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32 |
54 | // XSAVEOPT: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32 |
55 | // XSAVEOPT: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 |
56 | // XSAVEOPT: call void @llvm.x86.xsaveopt(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) |
57 | (void)__builtin_ia32_xsaveopt(tmp_vp, tmp_ULLi); |
58 | |
59 | // XSAVEOPT: call void @llvm.x86.xsaveopt |
60 | (void)_xsaveopt(tmp_vp, tmp_ULLi); |
61 | #endif |
62 | |
63 | #ifdef TEST_XSAVEC |
64 | // XSAVEC: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4 |
65 | // XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 |
66 | // XSAVEC: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32 |
67 | // XSAVEC: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32 |
68 | // XSAVEC: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 |
69 | // XSAVEC: call void @llvm.x86.xsavec(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) |
70 | (void)__builtin_ia32_xsavec(tmp_vp, tmp_ULLi); |
71 | |
72 | // XSAVEC: call void @llvm.x86.xsavec |
73 | (void)_xsavec(tmp_vp, tmp_ULLi); |
74 | #endif |
75 | |
76 | #ifdef TEST_XSAVES |
77 | // XSAVES: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4 |
78 | // XSAVES: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 |
79 | // XSAVES: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32 |
80 | // XSAVES: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32 |
81 | // XSAVES: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 |
82 | // XSAVES: call void @llvm.x86.xsaves(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) |
83 | (void)__builtin_ia32_xsaves(tmp_vp, tmp_ULLi); |
84 | |
85 | // XSAVES: [[tmp_vp_3:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4 |
86 | // XSAVES: [[tmp_ULLi_3:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 |
87 | // XSAVES: [[high64_3:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_3]], 32 |
88 | // XSAVES: [[high32_3:%[0-9a-zA-Z]+]] = trunc i64 [[high64_3]] to i32 |
89 | // XSAVES: [[low32_3:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 |
90 | // XSAVES: call void @llvm.x86.xrstors(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]]) |
91 | (void)__builtin_ia32_xrstors(tmp_vp, tmp_ULLi); |
92 | |
93 | // XSAVES: call void @llvm.x86.xsaves |
94 | (void)_xsaves(tmp_vp, tmp_ULLi); |
95 | |
96 | // XSAVES: call void @llvm.x86.xrstors |
97 | (void)_xrstors(tmp_vp, tmp_ULLi); |
98 | #endif |
99 | |
100 | #ifdef TEST_XGETBV |
101 | // XGETBV: [[tmp_Ui:%[0-9a-zA-z]+]] = load i32, i32* %tmp_Ui, align 4 |
102 | // XGETBV: call i64 @llvm.x86.xgetbv(i32 [[tmp_Ui]]) |
103 | tmp_ULLi = __builtin_ia32_xgetbv(tmp_Ui); |
104 | |
105 | // XGETBV: call i64 @llvm.x86.xgetbv |
106 | tmp_ULLi = _xgetbv(tmp_Ui); |
107 | #endif |
108 | |
109 | #ifdef TEST_XSETBV |
110 | // XSETBV: [[tmp_Ui:%[0-9a-zA-z]+]] = load i32, i32* %tmp_Ui, align 4 |
111 | // XSETBV: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 |
112 | // XSETBV: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32 |
113 | // XSETBV: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32 |
114 | // XSETBV: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 |
115 | // XSETBV: call void @llvm.x86.xsetbv(i32 [[tmp_Ui]], i32 [[high32_3]], i32 [[low32_3]]) |
116 | (void)__builtin_ia32_xsetbv(tmp_Ui, tmp_ULLi); |
117 | |
118 | // XSETBV: call void @llvm.x86.xsetbv |
119 | (void)_xsetbv(tmp_Ui, tmp_ULLi); |
120 | #endif |
121 | |
122 | } |
123 | |