1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
3 // RUN: %clang_cc1 -DPOLYMORPHIC -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
4
5 // REQUIRES: aarch64-registered-target || arm-registered-target
6
7 #include <arm_mve.h>
8
9 // CHECK-LABEL: @test_vcvtq_f16_s16(
10 // CHECK-NEXT: entry:
11 // CHECK-NEXT: [[TMP0:%.*]] = sitofp <8 x i16> [[A:%.*]] to <8 x half>
12 // CHECK-NEXT: ret <8 x half> [[TMP0]]
13 //
test_vcvtq_f16_s16(int16x8_t a)14 float16x8_t test_vcvtq_f16_s16(int16x8_t a)
15 {
16 #ifdef POLYMORPHIC
17 return vcvtq(a);
18 #else /* POLYMORPHIC */
19 return vcvtq_f16_s16(a);
20 #endif /* POLYMORPHIC */
21 }
22
23 // CHECK-LABEL: @test_vcvtq_f16_u16(
24 // CHECK-NEXT: entry:
25 // CHECK-NEXT: [[TMP0:%.*]] = uitofp <8 x i16> [[A:%.*]] to <8 x half>
26 // CHECK-NEXT: ret <8 x half> [[TMP0]]
27 //
test_vcvtq_f16_u16(uint16x8_t a)28 float16x8_t test_vcvtq_f16_u16(uint16x8_t a)
29 {
30 #ifdef POLYMORPHIC
31 return vcvtq(a);
32 #else /* POLYMORPHIC */
33 return vcvtq_f16_u16(a);
34 #endif /* POLYMORPHIC */
35 }
36
37 // CHECK-LABEL: @test_vcvtq_f32_s32(
38 // CHECK-NEXT: entry:
39 // CHECK-NEXT: [[TMP0:%.*]] = sitofp <4 x i32> [[A:%.*]] to <4 x float>
40 // CHECK-NEXT: ret <4 x float> [[TMP0]]
41 //
test_vcvtq_f32_s32(int32x4_t a)42 float32x4_t test_vcvtq_f32_s32(int32x4_t a)
43 {
44 #ifdef POLYMORPHIC
45 return vcvtq(a);
46 #else /* POLYMORPHIC */
47 return vcvtq_f32_s32(a);
48 #endif /* POLYMORPHIC */
49 }
50
51 // CHECK-LABEL: @test_vcvtq_f32_u32(
52 // CHECK-NEXT: entry:
53 // CHECK-NEXT: [[TMP0:%.*]] = uitofp <4 x i32> [[A:%.*]] to <4 x float>
54 // CHECK-NEXT: ret <4 x float> [[TMP0]]
55 //
test_vcvtq_f32_u32(uint32x4_t a)56 float32x4_t test_vcvtq_f32_u32(uint32x4_t a)
57 {
58 #ifdef POLYMORPHIC
59 return vcvtq(a);
60 #else /* POLYMORPHIC */
61 return vcvtq_f32_u32(a);
62 #endif /* POLYMORPHIC */
63 }
64
65 // CHECK-LABEL: @test_vcvtq_s16_f16(
66 // CHECK-NEXT: entry:
67 // CHECK-NEXT: [[TMP0:%.*]] = fptosi <8 x half> [[A:%.*]] to <8 x i16>
68 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
69 //
test_vcvtq_s16_f16(float16x8_t a)70 int16x8_t test_vcvtq_s16_f16(float16x8_t a)
71 {
72 return vcvtq_s16_f16(a);
73 }
74
75 // CHECK-LABEL: @test_vcvtq_s32_f32(
76 // CHECK-NEXT: entry:
77 // CHECK-NEXT: [[TMP0:%.*]] = fptosi <4 x float> [[A:%.*]] to <4 x i32>
78 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
79 //
test_vcvtq_s32_f32(float32x4_t a)80 int32x4_t test_vcvtq_s32_f32(float32x4_t a)
81 {
82 return vcvtq_s32_f32(a);
83 }
84
85 // CHECK-LABEL: @test_vcvtq_u16_f16(
86 // CHECK-NEXT: entry:
87 // CHECK-NEXT: [[TMP0:%.*]] = fptoui <8 x half> [[A:%.*]] to <8 x i16>
88 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
89 //
test_vcvtq_u16_f16(float16x8_t a)90 uint16x8_t test_vcvtq_u16_f16(float16x8_t a)
91 {
92 return vcvtq_u16_f16(a);
93 }
94
95 // CHECK-LABEL: @test_vcvtq_u32_f32(
96 // CHECK-NEXT: entry:
97 // CHECK-NEXT: [[TMP0:%.*]] = fptoui <4 x float> [[A:%.*]] to <4 x i32>
98 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
99 //
test_vcvtq_u32_f32(float32x4_t a)100 uint32x4_t test_vcvtq_u32_f32(float32x4_t a)
101 {
102 return vcvtq_u32_f32(a);
103 }
104
105 // CHECK-LABEL: @test_vcvtq_m_f16_s16(
106 // CHECK-NEXT: entry:
107 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
108 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
109 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.fp.int.predicated.v8f16.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 0, <8 x i1> [[TMP1]], <8 x half> [[INACTIVE:%.*]])
110 // CHECK-NEXT: ret <8 x half> [[TMP2]]
111 //
test_vcvtq_m_f16_s16(float16x8_t inactive,int16x8_t a,mve_pred16_t p)112 float16x8_t test_vcvtq_m_f16_s16(float16x8_t inactive, int16x8_t a, mve_pred16_t p)
113 {
114 #ifdef POLYMORPHIC
115 return vcvtq_m(inactive, a, p);
116 #else /* POLYMORPHIC */
117 return vcvtq_m_f16_s16(inactive, a, p);
118 #endif /* POLYMORPHIC */
119 }
120
121 // CHECK-LABEL: @test_vcvtq_m_f16_u16(
122 // CHECK-NEXT: entry:
123 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
124 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
125 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.fp.int.predicated.v8f16.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 1, <8 x i1> [[TMP1]], <8 x half> [[INACTIVE:%.*]])
126 // CHECK-NEXT: ret <8 x half> [[TMP2]]
127 //
test_vcvtq_m_f16_u16(float16x8_t inactive,uint16x8_t a,mve_pred16_t p)128 float16x8_t test_vcvtq_m_f16_u16(float16x8_t inactive, uint16x8_t a, mve_pred16_t p)
129 {
130 #ifdef POLYMORPHIC
131 return vcvtq_m(inactive, a, p);
132 #else /* POLYMORPHIC */
133 return vcvtq_m_f16_u16(inactive, a, p);
134 #endif /* POLYMORPHIC */
135 }
136
137 // CHECK-LABEL: @test_vcvtq_m_f32_s32(
138 // CHECK-NEXT: entry:
139 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
140 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
141 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.fp.int.predicated.v4f32.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 0, <4 x i1> [[TMP1]], <4 x float> [[INACTIVE:%.*]])
142 // CHECK-NEXT: ret <4 x float> [[TMP2]]
143 //
test_vcvtq_m_f32_s32(float32x4_t inactive,int32x4_t a,mve_pred16_t p)144 float32x4_t test_vcvtq_m_f32_s32(float32x4_t inactive, int32x4_t a, mve_pred16_t p)
145 {
146 #ifdef POLYMORPHIC
147 return vcvtq_m(inactive, a, p);
148 #else /* POLYMORPHIC */
149 return vcvtq_m_f32_s32(inactive, a, p);
150 #endif /* POLYMORPHIC */
151 }
152
153 // CHECK-LABEL: @test_vcvtq_m_f32_u32(
154 // CHECK-NEXT: entry:
155 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
156 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
157 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.fp.int.predicated.v4f32.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 1, <4 x i1> [[TMP1]], <4 x float> [[INACTIVE:%.*]])
158 // CHECK-NEXT: ret <4 x float> [[TMP2]]
159 //
test_vcvtq_m_f32_u32(float32x4_t inactive,uint32x4_t a,mve_pred16_t p)160 float32x4_t test_vcvtq_m_f32_u32(float32x4_t inactive, uint32x4_t a, mve_pred16_t p)
161 {
162 #ifdef POLYMORPHIC
163 return vcvtq_m(inactive, a, p);
164 #else /* POLYMORPHIC */
165 return vcvtq_m_f32_u32(inactive, a, p);
166 #endif /* POLYMORPHIC */
167 }
168
169 // CHECK-LABEL: @test_vcvtq_m_s16_f16(
170 // CHECK-NEXT: entry:
171 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
172 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
173 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvt.fp.int.predicated.v8i16.v8f16.v8i1(<8 x half> [[A:%.*]], i32 0, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
174 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
175 //
test_vcvtq_m_s16_f16(int16x8_t inactive,float16x8_t a,mve_pred16_t p)176 int16x8_t test_vcvtq_m_s16_f16(int16x8_t inactive, float16x8_t a, mve_pred16_t p)
177 {
178 #ifdef POLYMORPHIC
179 return vcvtq_m(inactive, a, p);
180 #else /* POLYMORPHIC */
181 return vcvtq_m_s16_f16(inactive, a, p);
182 #endif /* POLYMORPHIC */
183 }
184
185 // CHECK-LABEL: @test_vcvtq_m_s32_f32(
186 // CHECK-NEXT: entry:
187 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
188 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
189 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvt.fp.int.predicated.v4i32.v4f32.v4i1(<4 x float> [[A:%.*]], i32 0, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
190 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
191 //
test_vcvtq_m_s32_f32(int32x4_t inactive,float32x4_t a,mve_pred16_t p)192 int32x4_t test_vcvtq_m_s32_f32(int32x4_t inactive, float32x4_t a, mve_pred16_t p)
193 {
194 #ifdef POLYMORPHIC
195 return vcvtq_m(inactive, a, p);
196 #else /* POLYMORPHIC */
197 return vcvtq_m_s32_f32(inactive, a, p);
198 #endif /* POLYMORPHIC */
199 }
200
201 // CHECK-LABEL: @test_vcvtq_m_u16_f16(
202 // CHECK-NEXT: entry:
203 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
204 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
205 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvt.fp.int.predicated.v8i16.v8f16.v8i1(<8 x half> [[A:%.*]], i32 1, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
206 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
207 //
test_vcvtq_m_u16_f16(uint16x8_t inactive,float16x8_t a,mve_pred16_t p)208 uint16x8_t test_vcvtq_m_u16_f16(uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
209 {
210 #ifdef POLYMORPHIC
211 return vcvtq_m(inactive, a, p);
212 #else /* POLYMORPHIC */
213 return vcvtq_m_u16_f16(inactive, a, p);
214 #endif /* POLYMORPHIC */
215 }
216
217 // CHECK-LABEL: @test_vcvtq_m_u32_f32(
218 // CHECK-NEXT: entry:
219 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
220 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
221 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvt.fp.int.predicated.v4i32.v4f32.v4i1(<4 x float> [[A:%.*]], i32 1, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
222 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
223 //
test_vcvtq_m_u32_f32(uint32x4_t inactive,float32x4_t a,mve_pred16_t p)224 uint32x4_t test_vcvtq_m_u32_f32(uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
225 {
226 #ifdef POLYMORPHIC
227 return vcvtq_m(inactive, a, p);
228 #else /* POLYMORPHIC */
229 return vcvtq_m_u32_f32(inactive, a, p);
230 #endif /* POLYMORPHIC */
231 }
232
233 // CHECK-LABEL: @test_vcvtq_x_f16_s16(
234 // CHECK-NEXT: entry:
235 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
236 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
237 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.fp.int.predicated.v8f16.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 0, <8 x i1> [[TMP1]], <8 x half> undef)
238 // CHECK-NEXT: ret <8 x half> [[TMP2]]
239 //
test_vcvtq_x_f16_s16(int16x8_t a,mve_pred16_t p)240 float16x8_t test_vcvtq_x_f16_s16(int16x8_t a, mve_pred16_t p)
241 {
242 #ifdef POLYMORPHIC
243 return vcvtq_x(a, p);
244 #else /* POLYMORPHIC */
245 return vcvtq_x_f16_s16(a, p);
246 #endif /* POLYMORPHIC */
247 }
248
249 // CHECK-LABEL: @test_vcvtq_x_f16_u16(
250 // CHECK-NEXT: entry:
251 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
252 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
253 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.fp.int.predicated.v8f16.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 1, <8 x i1> [[TMP1]], <8 x half> undef)
254 // CHECK-NEXT: ret <8 x half> [[TMP2]]
255 //
test_vcvtq_x_f16_u16(uint16x8_t a,mve_pred16_t p)256 float16x8_t test_vcvtq_x_f16_u16(uint16x8_t a, mve_pred16_t p)
257 {
258 #ifdef POLYMORPHIC
259 return vcvtq_x(a, p);
260 #else /* POLYMORPHIC */
261 return vcvtq_x_f16_u16(a, p);
262 #endif /* POLYMORPHIC */
263 }
264
265 // CHECK-LABEL: @test_vcvtq_x_f32_s32(
266 // CHECK-NEXT: entry:
267 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
268 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
269 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.fp.int.predicated.v4f32.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 0, <4 x i1> [[TMP1]], <4 x float> undef)
270 // CHECK-NEXT: ret <4 x float> [[TMP2]]
271 //
test_vcvtq_x_f32_s32(int32x4_t a,mve_pred16_t p)272 float32x4_t test_vcvtq_x_f32_s32(int32x4_t a, mve_pred16_t p)
273 {
274 #ifdef POLYMORPHIC
275 return vcvtq_x(a, p);
276 #else /* POLYMORPHIC */
277 return vcvtq_x_f32_s32(a, p);
278 #endif /* POLYMORPHIC */
279 }
280
281 // CHECK-LABEL: @test_vcvtq_x_f32_u32(
282 // CHECK-NEXT: entry:
283 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
284 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
285 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.fp.int.predicated.v4f32.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 1, <4 x i1> [[TMP1]], <4 x float> undef)
286 // CHECK-NEXT: ret <4 x float> [[TMP2]]
287 //
test_vcvtq_x_f32_u32(uint32x4_t a,mve_pred16_t p)288 float32x4_t test_vcvtq_x_f32_u32(uint32x4_t a, mve_pred16_t p)
289 {
290 #ifdef POLYMORPHIC
291 return vcvtq_x(a, p);
292 #else /* POLYMORPHIC */
293 return vcvtq_x_f32_u32(a, p);
294 #endif /* POLYMORPHIC */
295 }
296
297 // CHECK-LABEL: @test_vcvtq_x_s16_f16(
298 // CHECK-NEXT: entry:
299 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
300 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
301 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvt.fp.int.predicated.v8i16.v8f16.v8i1(<8 x half> [[A:%.*]], i32 0, <8 x i1> [[TMP1]], <8 x i16> undef)
302 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
303 //
test_vcvtq_x_s16_f16(float16x8_t a,mve_pred16_t p)304 int16x8_t test_vcvtq_x_s16_f16(float16x8_t a, mve_pred16_t p)
305 {
306 return vcvtq_x_s16_f16(a, p);
307 }
308
309 // CHECK-LABEL: @test_vcvtq_x_s32_f32(
310 // CHECK-NEXT: entry:
311 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
312 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
313 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvt.fp.int.predicated.v4i32.v4f32.v4i1(<4 x float> [[A:%.*]], i32 0, <4 x i1> [[TMP1]], <4 x i32> undef)
314 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
315 //
test_vcvtq_x_s32_f32(float32x4_t a,mve_pred16_t p)316 int32x4_t test_vcvtq_x_s32_f32(float32x4_t a, mve_pred16_t p)
317 {
318 return vcvtq_x_s32_f32(a, p);
319 }
320
321 // CHECK-LABEL: @test_vcvtq_x_u16_f16(
322 // CHECK-NEXT: entry:
323 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
324 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
325 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvt.fp.int.predicated.v8i16.v8f16.v8i1(<8 x half> [[A:%.*]], i32 1, <8 x i1> [[TMP1]], <8 x i16> undef)
326 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
327 //
test_vcvtq_x_u16_f16(float16x8_t a,mve_pred16_t p)328 uint16x8_t test_vcvtq_x_u16_f16(float16x8_t a, mve_pred16_t p)
329 {
330 return vcvtq_x_u16_f16(a, p);
331 }
332
333 // CHECK-LABEL: @test_vcvtq_x_u32_f32(
334 // CHECK-NEXT: entry:
335 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
336 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
337 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvt.fp.int.predicated.v4i32.v4f32.v4i1(<4 x float> [[A:%.*]], i32 1, <4 x i1> [[TMP1]], <4 x i32> undef)
338 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
339 //
test_vcvtq_x_u32_f32(float32x4_t a,mve_pred16_t p)340 uint32x4_t test_vcvtq_x_u32_f32(float32x4_t a, mve_pred16_t p)
341 {
342 return vcvtq_x_u32_f32(a, p);
343 }
344
345 // CHECK-LABEL: @test_vcvttq_f16_f32(
346 // CHECK-NEXT: entry:
347 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.narrow(<8 x half> [[A:%.*]], <4 x float> [[B:%.*]], i32 1)
348 // CHECK-NEXT: ret <8 x half> [[TMP0]]
349 //
test_vcvttq_f16_f32(float16x8_t a,float32x4_t b)350 float16x8_t test_vcvttq_f16_f32(float16x8_t a, float32x4_t b)
351 {
352 return vcvttq_f16_f32(a, b);
353 }
354
355 // CHECK-LABEL: @test_vcvttq_m_f16_f32(
356 // CHECK-NEXT: entry:
357 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
358 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
359 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.narrow.predicated(<8 x half> [[A:%.*]], <4 x float> [[B:%.*]], i32 1, <4 x i1> [[TMP1]])
360 // CHECK-NEXT: ret <8 x half> [[TMP2]]
361 //
test_vcvttq_m_f16_f32(float16x8_t a,float32x4_t b,mve_pred16_t p)362 float16x8_t test_vcvttq_m_f16_f32(float16x8_t a, float32x4_t b, mve_pred16_t p)
363 {
364 return vcvttq_m_f16_f32(a, b, p);
365 }
366
367 // CHECK-LABEL: @test_vcvtq_n_f16_s16(
368 // CHECK-NEXT: entry:
369 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.fix.v8f16.v8i16(i32 0, <8 x i16> [[A:%.*]], i32 1)
370 // CHECK-NEXT: ret <8 x half> [[TMP0]]
371 //
test_vcvtq_n_f16_s16(int16x8_t a)372 float16x8_t test_vcvtq_n_f16_s16(int16x8_t a)
373 {
374 #ifdef POLYMORPHIC
375 return vcvtq_n(a, 1);
376 #else
377 return vcvtq_n_f16_s16(a, 1);
378 #endif
379 }
380
381 // CHECK-LABEL: @test_vcvtq_n_f16_u16(
382 // CHECK-NEXT: entry:
383 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.fix.v8f16.v8i16(i32 1, <8 x i16> [[A:%.*]], i32 2)
384 // CHECK-NEXT: ret <8 x half> [[TMP0]]
385 //
test_vcvtq_n_f16_u16(uint16x8_t a)386 float16x8_t test_vcvtq_n_f16_u16(uint16x8_t a)
387 {
388 #ifdef POLYMORPHIC
389 return vcvtq_n(a, 2);
390 #else
391 return vcvtq_n_f16_u16(a, 2);
392 #endif
393 }
394
395 // CHECK-LABEL: @test_vcvtq_n_f32_s32(
396 // CHECK-NEXT: entry:
397 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.fix.v4f32.v4i32(i32 0, <4 x i32> [[A:%.*]], i32 3)
398 // CHECK-NEXT: ret <4 x float> [[TMP0]]
399 //
test_vcvtq_n_f32_s32(int32x4_t a)400 float32x4_t test_vcvtq_n_f32_s32(int32x4_t a)
401 {
402 #ifdef POLYMORPHIC
403 return vcvtq_n(a, 3);
404 #else
405 return vcvtq_n_f32_s32(a, 3);
406 #endif
407 }
408
409 // CHECK-LABEL: @test_vcvtq_n_f32_u32(
410 // CHECK-NEXT: entry:
411 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.fix.v4f32.v4i32(i32 1, <4 x i32> [[A:%.*]], i32 32)
412 // CHECK-NEXT: ret <4 x float> [[TMP0]]
413 //
test_vcvtq_n_f32_u32(uint32x4_t a)414 float32x4_t test_vcvtq_n_f32_u32(uint32x4_t a)
415 {
416 #ifdef POLYMORPHIC
417 return vcvtq_n(a, 32);
418 #else
419 return vcvtq_n_f32_u32(a, 32);
420 #endif
421 }
422
423 // CHECK-LABEL: @test_vcvtq_n_s16_f16(
424 // CHECK-NEXT: entry:
425 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvt.fix.v8i16.v8f16(i32 0, <8 x half> [[A:%.*]], i32 1)
426 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
427 //
test_vcvtq_n_s16_f16(float16x8_t a)428 int16x8_t test_vcvtq_n_s16_f16(float16x8_t a)
429 {
430 return vcvtq_n_s16_f16(a, 1);
431 }
432
433 // CHECK-LABEL: @test_vcvtq_n_u16_f16(
434 // CHECK-NEXT: entry:
435 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvt.fix.v8i16.v8f16(i32 1, <8 x half> [[A:%.*]], i32 2)
436 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
437 //
test_vcvtq_n_u16_f16(float16x8_t a)438 uint16x8_t test_vcvtq_n_u16_f16(float16x8_t a)
439 {
440 return vcvtq_n_u16_f16(a, 2);
441 }
442
443 // CHECK-LABEL: @test_vcvtq_n_s32_f32(
444 // CHECK-NEXT: entry:
445 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvt.fix.v4i32.v4f32(i32 0, <4 x float> [[A:%.*]], i32 3)
446 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
447 //
test_vcvtq_n_s32_f32(float32x4_t a)448 int32x4_t test_vcvtq_n_s32_f32(float32x4_t a)
449 {
450 return vcvtq_n_s32_f32(a, 3);
451 }
452
453 // CHECK-LABEL: @test_vcvtq_n_u32_f32(
454 // CHECK-NEXT: entry:
455 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvt.fix.v4i32.v4f32(i32 1, <4 x float> [[A:%.*]], i32 32)
456 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
457 //
test_vcvtq_n_u32_f32(float32x4_t a)458 uint32x4_t test_vcvtq_n_u32_f32(float32x4_t a)
459 {
460 return vcvtq_n_u32_f32(a, 32);
461 }
462
463 // CHECK-LABEL: @test_vcvtq_m_n_f16_s16(
464 // CHECK-NEXT: entry:
465 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
466 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
467 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.fix.predicated.v8f16.v8i16.v8i1(i32 0, <8 x half> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], i32 1, <8 x i1> [[TMP1]])
468 // CHECK-NEXT: ret <8 x half> [[TMP2]]
469 //
test_vcvtq_m_n_f16_s16(float16x8_t inactive,int16x8_t a,mve_pred16_t p)470 float16x8_t test_vcvtq_m_n_f16_s16(float16x8_t inactive, int16x8_t a, mve_pred16_t p)
471 {
472 #ifdef POLYMORPHIC
473 return vcvtq_m_n(inactive, a, 1, p);
474 #else
475 return vcvtq_m_n_f16_s16(inactive, a, 1, p);
476 #endif
477 }
478
479 // CHECK-LABEL: @test_vcvtq_m_n_f16_u16(
480 // CHECK-NEXT: entry:
481 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
482 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
483 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.fix.predicated.v8f16.v8i16.v8i1(i32 1, <8 x half> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], i32 2, <8 x i1> [[TMP1]])
484 // CHECK-NEXT: ret <8 x half> [[TMP2]]
485 //
test_vcvtq_m_n_f16_u16(float16x8_t inactive,uint16x8_t a,mve_pred16_t p)486 float16x8_t test_vcvtq_m_n_f16_u16(float16x8_t inactive, uint16x8_t a, mve_pred16_t p)
487 {
488 #ifdef POLYMORPHIC
489 return vcvtq_m_n(inactive, a, 2, p);
490 #else
491 return vcvtq_m_n_f16_u16(inactive, a, 2, p);
492 #endif
493 }
494
495 // CHECK-LABEL: @test_vcvtq_m_n_f32_s32(
496 // CHECK-NEXT: entry:
497 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
498 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
499 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.fix.predicated.v4f32.v4i32.v4i1(i32 0, <4 x float> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], i32 3, <4 x i1> [[TMP1]])
500 // CHECK-NEXT: ret <4 x float> [[TMP2]]
501 //
test_vcvtq_m_n_f32_s32(float32x4_t inactive,int32x4_t a,mve_pred16_t p)502 float32x4_t test_vcvtq_m_n_f32_s32(float32x4_t inactive, int32x4_t a, mve_pred16_t p)
503 {
504 #ifdef POLYMORPHIC
505 return vcvtq_m_n(inactive, a, 3, p);
506 #else
507 return vcvtq_m_n_f32_s32(inactive, a, 3, p);
508 #endif
509 }
510
511 // CHECK-LABEL: @test_vcvtq_m_n_f32_u32(
512 // CHECK-NEXT: entry:
513 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
514 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
515 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.fix.predicated.v4f32.v4i32.v4i1(i32 1, <4 x float> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], i32 32, <4 x i1> [[TMP1]])
516 // CHECK-NEXT: ret <4 x float> [[TMP2]]
517 //
test_vcvtq_m_n_f32_u32(float32x4_t inactive,uint32x4_t a,mve_pred16_t p)518 float32x4_t test_vcvtq_m_n_f32_u32(float32x4_t inactive, uint32x4_t a, mve_pred16_t p)
519 {
520 #ifdef POLYMORPHIC
521 return vcvtq_m_n(inactive, a, 32, p);
522 #else
523 return vcvtq_m_n_f32_u32(inactive, a, 32, p);
524 #endif
525 }
526
527 // CHECK-LABEL: @test_vcvtq_m_n_s16_f16(
528 // CHECK-NEXT: entry:
529 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
530 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
531 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvt.fix.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], i32 1, <8 x i1> [[TMP1]])
532 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
533 //
test_vcvtq_m_n_s16_f16(int16x8_t inactive,float16x8_t a,mve_pred16_t p)534 int16x8_t test_vcvtq_m_n_s16_f16(int16x8_t inactive, float16x8_t a, mve_pred16_t p)
535 {
536 #ifdef POLYMORPHIC
537 return vcvtq_m_n(inactive, a, 1, p);
538 #else
539 return vcvtq_m_n_s16_f16(inactive, a, 1, p);
540 #endif
541 }
542
543 // CHECK-LABEL: @test_vcvtq_m_n_u16_f16(
544 // CHECK-NEXT: entry:
545 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
546 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
547 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvt.fix.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], i32 2, <8 x i1> [[TMP1]])
548 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
549 //
test_vcvtq_m_n_u16_f16(uint16x8_t inactive,float16x8_t a,mve_pred16_t p)550 uint16x8_t test_vcvtq_m_n_u16_f16(uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
551 {
552 #ifdef POLYMORPHIC
553 return vcvtq_m_n(inactive, a, 2, p);
554 #else
555 return vcvtq_m_n_u16_f16(inactive, a, 2, p);
556 #endif
557 }
558
559 // CHECK-LABEL: @test_vcvtq_m_n_s32_f32(
560 // CHECK-NEXT: entry:
561 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
562 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
563 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvt.fix.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], i32 3, <4 x i1> [[TMP1]])
564 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
565 //
test_vcvtq_m_n_s32_f32(int32x4_t inactive,float32x4_t a,mve_pred16_t p)566 int32x4_t test_vcvtq_m_n_s32_f32(int32x4_t inactive, float32x4_t a, mve_pred16_t p)
567 {
568 #ifdef POLYMORPHIC
569 return vcvtq_m_n(inactive, a, 3, p);
570 #else
571 return vcvtq_m_n_s32_f32(inactive, a, 3, p);
572 #endif
573 }
574
575 // CHECK-LABEL: @test_vcvtq_m_n_u32_f32(
576 // CHECK-NEXT: entry:
577 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
578 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
579 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvt.fix.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], i32 32, <4 x i1> [[TMP1]])
580 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
581 //
test_vcvtq_m_n_u32_f32(uint32x4_t inactive,float32x4_t a,mve_pred16_t p)582 uint32x4_t test_vcvtq_m_n_u32_f32(uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
583 {
584 #ifdef POLYMORPHIC
585 return vcvtq_m_n(inactive, a, 32, p);
586 #else
587 return vcvtq_m_n_u32_f32(inactive, a, 32, p);
588 #endif
589 }
590
591 // CHECK-LABEL: @test_vcvtq_x_n_f16_s16(
592 // CHECK-NEXT: entry:
593 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
594 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
595 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.fix.predicated.v8f16.v8i16.v8i1(i32 0, <8 x half> undef, <8 x i16> [[A:%.*]], i32 1, <8 x i1> [[TMP1]])
596 // CHECK-NEXT: ret <8 x half> [[TMP2]]
597 //
test_vcvtq_x_n_f16_s16(int16x8_t a,mve_pred16_t p)598 float16x8_t test_vcvtq_x_n_f16_s16(int16x8_t a, mve_pred16_t p)
599 {
600 #ifdef POLYMORPHIC
601 return vcvtq_x_n(a, 1, p);
602 #else
603 return vcvtq_x_n_f16_s16(a, 1, p);
604 #endif
605 }
606
607 // CHECK-LABEL: @test_vcvtq_x_n_f16_u16(
608 // CHECK-NEXT: entry:
609 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
610 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
611 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.fix.predicated.v8f16.v8i16.v8i1(i32 1, <8 x half> undef, <8 x i16> [[A:%.*]], i32 2, <8 x i1> [[TMP1]])
612 // CHECK-NEXT: ret <8 x half> [[TMP2]]
613 //
test_vcvtq_x_n_f16_u16(uint16x8_t a,mve_pred16_t p)614 float16x8_t test_vcvtq_x_n_f16_u16(uint16x8_t a, mve_pred16_t p)
615 {
616 #ifdef POLYMORPHIC
617 return vcvtq_x_n(a, 2, p);
618 #else
619 return vcvtq_x_n_f16_u16(a, 2, p);
620 #endif
621 }
622
623 // CHECK-LABEL: @test_vcvtq_x_n_f32_s32(
624 // CHECK-NEXT: entry:
625 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
626 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
627 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.fix.predicated.v4f32.v4i32.v4i1(i32 0, <4 x float> undef, <4 x i32> [[A:%.*]], i32 3, <4 x i1> [[TMP1]])
628 // CHECK-NEXT: ret <4 x float> [[TMP2]]
629 //
test_vcvtq_x_n_f32_s32(int32x4_t a,mve_pred16_t p)630 float32x4_t test_vcvtq_x_n_f32_s32(int32x4_t a, mve_pred16_t p)
631 {
632 #ifdef POLYMORPHIC
633 return vcvtq_x_n(a, 3, p);
634 #else
635 return vcvtq_x_n_f32_s32(a, 3, p);
636 #endif
637 }
638
639 // CHECK-LABEL: @test_vcvtq_x_n_f32_u32(
640 // CHECK-NEXT: entry:
641 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
642 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
643 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.fix.predicated.v4f32.v4i32.v4i1(i32 1, <4 x float> undef, <4 x i32> [[A:%.*]], i32 32, <4 x i1> [[TMP1]])
644 // CHECK-NEXT: ret <4 x float> [[TMP2]]
645 //
test_vcvtq_x_n_f32_u32(uint32x4_t a,mve_pred16_t p)646 float32x4_t test_vcvtq_x_n_f32_u32(uint32x4_t a, mve_pred16_t p)
647 {
648 #ifdef POLYMORPHIC
649 return vcvtq_x_n(a, 32, p);
650 #else
651 return vcvtq_x_n_f32_u32(a, 32, p);
652 #endif
653 }
654
655 // CHECK-LABEL: @test_vcvtq_x_n_s16_f16(
656 // CHECK-NEXT: entry:
657 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
658 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
659 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvt.fix.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> [[A:%.*]], i32 1, <8 x i1> [[TMP1]])
660 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
661 //
test_vcvtq_x_n_s16_f16(float16x8_t a,mve_pred16_t p)662 int16x8_t test_vcvtq_x_n_s16_f16(float16x8_t a, mve_pred16_t p)
663 {
664 return vcvtq_x_n_s16_f16(a, 1, p);
665 }
666
667 // CHECK-LABEL: @test_vcvtq_x_n_u16_f16(
668 // CHECK-NEXT: entry:
669 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
670 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
671 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvt.fix.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> [[A:%.*]], i32 2, <8 x i1> [[TMP1]])
672 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
673 //
test_vcvtq_x_n_u16_f16(float16x8_t a,mve_pred16_t p)674 uint16x8_t test_vcvtq_x_n_u16_f16(float16x8_t a, mve_pred16_t p)
675 {
676 return vcvtq_x_n_u16_f16(a, 2, p);
677 }
678
679 // CHECK-LABEL: @test_vcvtq_x_n_s32_f32(
680 // CHECK-NEXT: entry:
681 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
682 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
683 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvt.fix.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> [[A:%.*]], i32 3, <4 x i1> [[TMP1]])
684 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
685 //
test_vcvtq_x_n_s32_f32(float32x4_t a,mve_pred16_t p)686 int32x4_t test_vcvtq_x_n_s32_f32(float32x4_t a, mve_pred16_t p)
687 {
688 return vcvtq_x_n_s32_f32(a, 3, p);
689 }
690
691 // CHECK-LABEL: @test_vcvtq_x_n_u32_f32(
692 // CHECK-NEXT: entry:
693 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
694 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
695 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvt.fix.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> [[A:%.*]], i32 32, <4 x i1> [[TMP1]])
696 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
697 //
test_vcvtq_x_n_u32_f32(float32x4_t a,mve_pred16_t p)698 uint32x4_t test_vcvtq_x_n_u32_f32(float32x4_t a, mve_pred16_t p)
699 {
700 return vcvtq_x_n_u32_f32(a, 32, p);
701 }
702
703 // CHECK-LABEL: @test_vcvtbq_f32_f16(
704 // CHECK-NEXT: entry:
705 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.widen(<8 x half> [[A:%.*]], i32 0)
706 // CHECK-NEXT: ret <4 x float> [[TMP0]]
707 //
test_vcvtbq_f32_f16(float16x8_t a)708 float32x4_t test_vcvtbq_f32_f16(float16x8_t a)
709 {
710 return vcvtbq_f32_f16(a);
711 }
712
713 // CHECK-LABEL: @test_vcvttq_f32_f16(
714 // CHECK-NEXT: entry:
715 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.widen(<8 x half> [[A:%.*]], i32 1)
716 // CHECK-NEXT: ret <4 x float> [[TMP0]]
717 //
test_vcvttq_f32_f16(float16x8_t a)718 float32x4_t test_vcvttq_f32_f16(float16x8_t a)
719 {
720 return vcvttq_f32_f16(a);
721 }
722
723 // CHECK-LABEL: @test_vcvtbq_m_f32_f16(
724 // CHECK-NEXT: entry:
725 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
726 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
727 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.widen.predicated(<4 x float> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], i32 0, <4 x i1> [[TMP1]])
728 // CHECK-NEXT: ret <4 x float> [[TMP2]]
729 //
test_vcvtbq_m_f32_f16(float32x4_t inactive,float16x8_t a,mve_pred16_t p)730 float32x4_t test_vcvtbq_m_f32_f16(float32x4_t inactive, float16x8_t a, mve_pred16_t p)
731 {
732 return vcvtbq_m_f32_f16(inactive, a, p);
733 }
734
735 // CHECK-LABEL: @test_vcvttq_m_f32_f16(
736 // CHECK-NEXT: entry:
737 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
738 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
739 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.widen.predicated(<4 x float> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], i32 1, <4 x i1> [[TMP1]])
740 // CHECK-NEXT: ret <4 x float> [[TMP2]]
741 //
test_vcvttq_m_f32_f16(float32x4_t inactive,float16x8_t a,mve_pred16_t p)742 float32x4_t test_vcvttq_m_f32_f16(float32x4_t inactive, float16x8_t a, mve_pred16_t p)
743 {
744 return vcvttq_m_f32_f16(inactive, a, p);
745 }
746
747 // CHECK-LABEL: @test_vcvtbq_x_f32_f16(
748 // CHECK-NEXT: entry:
749 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
750 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
751 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.widen.predicated(<4 x float> undef, <8 x half> [[A:%.*]], i32 0, <4 x i1> [[TMP1]])
752 // CHECK-NEXT: ret <4 x float> [[TMP2]]
753 //
test_vcvtbq_x_f32_f16(float16x8_t a,mve_pred16_t p)754 float32x4_t test_vcvtbq_x_f32_f16(float16x8_t a, mve_pred16_t p)
755 {
756 return vcvtbq_x_f32_f16(a, p);
757 }
758
759 // CHECK-LABEL: @test_vcvttq_x_f32_f16(
760 // CHECK-NEXT: entry:
761 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
762 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
763 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.widen.predicated(<4 x float> undef, <8 x half> [[A:%.*]], i32 1, <4 x i1> [[TMP1]])
764 // CHECK-NEXT: ret <4 x float> [[TMP2]]
765 //
test_vcvttq_x_f32_f16(float16x8_t a,mve_pred16_t p)766 float32x4_t test_vcvttq_x_f32_f16(float16x8_t a, mve_pred16_t p)
767 {
768 return vcvttq_x_f32_f16(a, p);
769 }
770