1 | // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \ |
2 | // RUN: -disable-O0-optnone -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s |
3 | |
4 | #include <arm_neon.h> |
5 | |
6 | // CHECK-LABEL: define float @test_vdups_lane_f32(<2 x float> %a) #0 { |
7 | // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> |
8 | // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float> |
9 | // CHECK: [[VDUPS_LANE:%.*]] = extractelement <2 x float> [[TMP1]], i32 1 |
10 | // CHECK: ret float [[VDUPS_LANE]] |
11 | float32_t test_vdups_lane_f32(float32x2_t a) { |
12 | return vdups_lane_f32(a, 1); |
13 | } |
14 | |
15 | |
16 | // CHECK-LABEL: define double @test_vdupd_lane_f64(<1 x double> %a) #0 { |
17 | // CHECK: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> |
18 | // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double> |
19 | // CHECK: [[VDUPD_LANE:%.*]] = extractelement <1 x double> [[TMP1]], i32 0 |
20 | // CHECK: ret double [[VDUPD_LANE]] |
21 | float64_t test_vdupd_lane_f64(float64x1_t a) { |
22 | return vdupd_lane_f64(a, 0); |
23 | } |
24 | |
25 | |
26 | // CHECK-LABEL: define float @test_vdups_laneq_f32(<4 x float> %a) #1 { |
27 | // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> |
28 | // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> |
29 | // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x float> [[TMP1]], i32 3 |
30 | // CHECK: ret float [[VGETQ_LANE]] |
31 | float32_t test_vdups_laneq_f32(float32x4_t a) { |
32 | return vdups_laneq_f32(a, 3); |
33 | } |
34 | |
35 | |
36 | // CHECK-LABEL: define double @test_vdupd_laneq_f64(<2 x double> %a) #1 { |
37 | // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> |
38 | // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double> |
39 | // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x double> [[TMP1]], i32 1 |
40 | // CHECK: ret double [[VGETQ_LANE]] |
41 | float64_t test_vdupd_laneq_f64(float64x2_t a) { |
42 | return vdupd_laneq_f64(a, 1); |
43 | } |
44 | |
45 | |
46 | // CHECK-LABEL: define i8 @test_vdupb_lane_s8(<8 x i8> %a) #0 { |
47 | // CHECK: [[VGET_LANE:%.*]] = extractelement <8 x i8> %a, i32 7 |
48 | // CHECK: ret i8 [[VGET_LANE]] |
49 | int8_t test_vdupb_lane_s8(int8x8_t a) { |
50 | return vdupb_lane_s8(a, 7); |
51 | } |
52 | |
53 | |
54 | // CHECK-LABEL: define i16 @test_vduph_lane_s16(<4 x i16> %a) #0 { |
55 | // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> |
56 | // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> |
57 | // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 3 |
58 | // CHECK: ret i16 [[VGET_LANE]] |
59 | int16_t test_vduph_lane_s16(int16x4_t a) { |
60 | return vduph_lane_s16(a, 3); |
61 | } |
62 | |
63 | |
64 | // CHECK-LABEL: define i32 @test_vdups_lane_s32(<2 x i32> %a) #0 { |
65 | // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> |
66 | // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> |
67 | // CHECK: [[VGET_LANE:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1 |
68 | // CHECK: ret i32 [[VGET_LANE]] |
69 | int32_t test_vdups_lane_s32(int32x2_t a) { |
70 | return vdups_lane_s32(a, 1); |
71 | } |
72 | |
73 | |
74 | // CHECK-LABEL: define i64 @test_vdupd_lane_s64(<1 x i64> %a) #0 { |
75 | // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> |
76 | // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> |
77 | // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x i64> [[TMP1]], i32 0 |
78 | // CHECK: ret i64 [[VGET_LANE]] |
79 | int64_t test_vdupd_lane_s64(int64x1_t a) { |
80 | return vdupd_lane_s64(a, 0); |
81 | } |
82 | |
83 | |
84 | // CHECK-LABEL: define i8 @test_vdupb_lane_u8(<8 x i8> %a) #0 { |
85 | // CHECK: [[VGET_LANE:%.*]] = extractelement <8 x i8> %a, i32 7 |
86 | // CHECK: ret i8 [[VGET_LANE]] |
87 | uint8_t test_vdupb_lane_u8(uint8x8_t a) { |
88 | return vdupb_lane_u8(a, 7); |
89 | } |
90 | |
91 | |
92 | // CHECK-LABEL: define i16 @test_vduph_lane_u16(<4 x i16> %a) #0 { |
93 | // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> |
94 | // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> |
95 | // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 3 |
96 | // CHECK: ret i16 [[VGET_LANE]] |
97 | uint16_t test_vduph_lane_u16(uint16x4_t a) { |
98 | return vduph_lane_u16(a, 3); |
99 | } |
100 | |
101 | |
102 | // CHECK-LABEL: define i32 @test_vdups_lane_u32(<2 x i32> %a) #0 { |
103 | // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> |
104 | // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> |
105 | // CHECK: [[VGET_LANE:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1 |
106 | // CHECK: ret i32 [[VGET_LANE]] |
107 | uint32_t test_vdups_lane_u32(uint32x2_t a) { |
108 | return vdups_lane_u32(a, 1); |
109 | } |
110 | |
111 | |
112 | // CHECK-LABEL: define i64 @test_vdupd_lane_u64(<1 x i64> %a) #0 { |
113 | // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> |
114 | // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> |
115 | // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x i64> [[TMP1]], i32 0 |
116 | // CHECK: ret i64 [[VGET_LANE]] |
117 | uint64_t test_vdupd_lane_u64(uint64x1_t a) { |
118 | return vdupd_lane_u64(a, 0); |
119 | } |
120 | |
121 | // CHECK-LABEL: define i8 @test_vdupb_laneq_s8(<16 x i8> %a) #1 { |
122 | // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15 |
123 | // CHECK: ret i8 [[VGETQ_LANE]] |
124 | int8_t test_vdupb_laneq_s8(int8x16_t a) { |
125 | return vdupb_laneq_s8(a, 15); |
126 | } |
127 | |
128 | |
129 | // CHECK-LABEL: define i16 @test_vduph_laneq_s16(<8 x i16> %a) #1 { |
130 | // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> |
131 | // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> |
132 | // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 7 |
133 | // CHECK: ret i16 [[VGETQ_LANE]] |
134 | int16_t test_vduph_laneq_s16(int16x8_t a) { |
135 | return vduph_laneq_s16(a, 7); |
136 | } |
137 | |
138 | |
139 | // CHECK-LABEL: define i32 @test_vdups_laneq_s32(<4 x i32> %a) #1 { |
140 | // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> |
141 | // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> |
142 | // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3 |
143 | // CHECK: ret i32 [[VGETQ_LANE]] |
144 | int32_t test_vdups_laneq_s32(int32x4_t a) { |
145 | return vdups_laneq_s32(a, 3); |
146 | } |
147 | |
148 | |
149 | // CHECK-LABEL: define i64 @test_vdupd_laneq_s64(<2 x i64> %a) #1 { |
150 | // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> |
151 | // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> |
152 | // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1 |
153 | // CHECK: ret i64 [[VGETQ_LANE]] |
154 | int64_t test_vdupd_laneq_s64(int64x2_t a) { |
155 | return vdupd_laneq_s64(a, 1); |
156 | } |
157 | |
158 | |
159 | // CHECK-LABEL: define i8 @test_vdupb_laneq_u8(<16 x i8> %a) #1 { |
160 | // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15 |
161 | // CHECK: ret i8 [[VGETQ_LANE]] |
162 | uint8_t test_vdupb_laneq_u8(uint8x16_t a) { |
163 | return vdupb_laneq_u8(a, 15); |
164 | } |
165 | |
166 | |
167 | // CHECK-LABEL: define i16 @test_vduph_laneq_u16(<8 x i16> %a) #1 { |
168 | // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> |
169 | // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> |
170 | // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 7 |
171 | // CHECK: ret i16 [[VGETQ_LANE]] |
172 | uint16_t test_vduph_laneq_u16(uint16x8_t a) { |
173 | return vduph_laneq_u16(a, 7); |
174 | } |
175 | |
176 | |
177 | // CHECK-LABEL: define i32 @test_vdups_laneq_u32(<4 x i32> %a) #1 { |
178 | // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> |
179 | // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> |
180 | // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3 |
181 | // CHECK: ret i32 [[VGETQ_LANE]] |
182 | uint32_t test_vdups_laneq_u32(uint32x4_t a) { |
183 | return vdups_laneq_u32(a, 3); |
184 | } |
185 | |
186 | |
187 | // CHECK-LABEL: define i64 @test_vdupd_laneq_u64(<2 x i64> %a) #1 { |
188 | // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> |
189 | // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> |
190 | // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1 |
191 | // CHECK: ret i64 [[VGETQ_LANE]] |
192 | uint64_t test_vdupd_laneq_u64(uint64x2_t a) { |
193 | return vdupd_laneq_u64(a, 1); |
194 | } |
195 | |
196 | // CHECK-LABEL: define i8 @test_vdupb_lane_p8(<8 x i8> %a) #0 { |
197 | // CHECK: [[VGET_LANE:%.*]] = extractelement <8 x i8> %a, i32 7 |
198 | // CHECK: ret i8 [[VGET_LANE]] |
199 | poly8_t test_vdupb_lane_p8(poly8x8_t a) { |
200 | return vdupb_lane_p8(a, 7); |
201 | } |
202 | |
203 | // CHECK-LABEL: define i16 @test_vduph_lane_p16(<4 x i16> %a) #0 { |
204 | // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> |
205 | // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> |
206 | // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 3 |
207 | // CHECK: ret i16 [[VGET_LANE]] |
208 | poly16_t test_vduph_lane_p16(poly16x4_t a) { |
209 | return vduph_lane_p16(a, 3); |
210 | } |
211 | |
212 | // CHECK-LABEL: define i8 @test_vdupb_laneq_p8(<16 x i8> %a) #1 { |
213 | // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15 |
214 | // CHECK: ret i8 [[VGETQ_LANE]] |
215 | poly8_t test_vdupb_laneq_p8(poly8x16_t a) { |
216 | return vdupb_laneq_p8(a, 15); |
217 | } |
218 | |
219 | // CHECK-LABEL: define i16 @test_vduph_laneq_p16(<8 x i16> %a) #1 { |
220 | // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> |
221 | // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> |
222 | // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 7 |
223 | // CHECK: ret i16 [[VGETQ_LANE]] |
224 | poly16_t test_vduph_laneq_p16(poly16x8_t a) { |
225 | return vduph_laneq_p16(a, 7); |
226 | } |
227 | |
228 | // CHECK: attributes #0 ={{.*}}"min-legal-vector-width"="64" |
229 | // CHECK: attributes #1 ={{.*}}"min-legal-vector-width"="128" |
230 | |