xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfclass.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
8  <vscale x 1 x i16>,
9  <vscale x 1 x half>,
10  iXLen);
11
12define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1f16(
13; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16:
14; CHECK:       # %bb.0: # %entry
15; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
16; CHECK-NEXT:    vfclass.v v8, v8
17; CHECK-NEXT:    ret
18  <vscale x 1 x half> %0,
19  iXLen %1) nounwind {
20entry:
21  %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
22    <vscale x 1 x i16> undef,
23    <vscale x 1 x half> %0,
24    iXLen %1)
25
26  ret <vscale x 1 x i16> %a
27}
28
29declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
30  <vscale x 1 x i16>,
31  <vscale x 1 x half>,
32  <vscale x 1 x i1>,
33  iXLen, iXLen);
34
35define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
36; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16:
37; CHECK:       # %bb.0: # %entry
38; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
39; CHECK-NEXT:    vfclass.v v8, v9, v0.t
40; CHECK-NEXT:    ret
41  <vscale x 1 x i16> %0,
42  <vscale x 1 x half> %1,
43  <vscale x 1 x i1> %2,
44  iXLen %3) nounwind {
45entry:
46  %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
47    <vscale x 1 x i16> %0,
48    <vscale x 1 x half> %1,
49    <vscale x 1 x i1> %2,
50    iXLen %3, iXLen 0)
51
52  ret <vscale x 1 x i16> %a
53}
54
55declare <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16(
56  <vscale x 2 x i16>,
57  <vscale x 2 x half>,
58  iXLen);
59
60define <vscale x 2 x i16> @intrinsic_vfclass_v_nxv2i16_nxv2f16(
61; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2f16:
62; CHECK:       # %bb.0: # %entry
63; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
64; CHECK-NEXT:    vfclass.v v8, v8
65; CHECK-NEXT:    ret
66  <vscale x 2 x half> %0,
67  iXLen %1) nounwind {
68entry:
69  %a = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16(
70    <vscale x 2 x i16> undef,
71    <vscale x 2 x half> %0,
72    iXLen %1)
73
74  ret <vscale x 2 x i16> %a
75}
76
77declare <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16(
78  <vscale x 2 x i16>,
79  <vscale x 2 x half>,
80  <vscale x 2 x i1>,
81  iXLen, iXLen);
82
83define <vscale x 2 x i16> @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16(
84; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16:
85; CHECK:       # %bb.0: # %entry
86; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
87; CHECK-NEXT:    vfclass.v v8, v9, v0.t
88; CHECK-NEXT:    ret
89  <vscale x 2 x i16> %0,
90  <vscale x 2 x half> %1,
91  <vscale x 2 x i1> %2,
92  iXLen %3) nounwind {
93entry:
94  %a = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16(
95    <vscale x 2 x i16> %0,
96    <vscale x 2 x half> %1,
97    <vscale x 2 x i1> %2,
98    iXLen %3, iXLen 0)
99
100  ret <vscale x 2 x i16> %a
101}
102
103declare <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16(
104  <vscale x 4 x i16>,
105  <vscale x 4 x half>,
106  iXLen);
107
108define <vscale x 4 x i16> @intrinsic_vfclass_v_nxv4i16_nxv4f16(
109; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4f16:
110; CHECK:       # %bb.0: # %entry
111; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
112; CHECK-NEXT:    vfclass.v v8, v8
113; CHECK-NEXT:    ret
114  <vscale x 4 x half> %0,
115  iXLen %1) nounwind {
116entry:
117  %a = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16(
118    <vscale x 4 x i16> undef,
119    <vscale x 4 x half> %0,
120    iXLen %1)
121
122  ret <vscale x 4 x i16> %a
123}
124
125declare <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16(
126  <vscale x 4 x i16>,
127  <vscale x 4 x half>,
128  <vscale x 4 x i1>,
129  iXLen, iXLen);
130
131define <vscale x 4 x i16> @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16(
132; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16:
133; CHECK:       # %bb.0: # %entry
134; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
135; CHECK-NEXT:    vfclass.v v8, v9, v0.t
136; CHECK-NEXT:    ret
137  <vscale x 4 x i16> %0,
138  <vscale x 4 x half> %1,
139  <vscale x 4 x i1> %2,
140  iXLen %3) nounwind {
141entry:
142  %a = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16(
143    <vscale x 4 x i16> %0,
144    <vscale x 4 x half> %1,
145    <vscale x 4 x i1> %2,
146    iXLen %3, iXLen 0)
147
148  ret <vscale x 4 x i16> %a
149}
150
151declare <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16(
152  <vscale x 8 x i16>,
153  <vscale x 8 x half>,
154  iXLen);
155
156define <vscale x 8 x i16> @intrinsic_vfclass_v_nxv8i16_nxv8f16(
157; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8f16:
158; CHECK:       # %bb.0: # %entry
159; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
160; CHECK-NEXT:    vfclass.v v8, v8
161; CHECK-NEXT:    ret
162  <vscale x 8 x half> %0,
163  iXLen %1) nounwind {
164entry:
165  %a = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16(
166    <vscale x 8 x i16> undef,
167    <vscale x 8 x half> %0,
168    iXLen %1)
169
170  ret <vscale x 8 x i16> %a
171}
172
173declare <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16(
174  <vscale x 8 x i16>,
175  <vscale x 8 x half>,
176  <vscale x 8 x i1>,
177  iXLen, iXLen);
178
179define <vscale x 8 x i16> @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16(
180; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16:
181; CHECK:       # %bb.0: # %entry
182; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
183; CHECK-NEXT:    vfclass.v v8, v10, v0.t
184; CHECK-NEXT:    ret
185  <vscale x 8 x i16> %0,
186  <vscale x 8 x half> %1,
187  <vscale x 8 x i1> %2,
188  iXLen %3) nounwind {
189entry:
190  %a = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16(
191    <vscale x 8 x i16> %0,
192    <vscale x 8 x half> %1,
193    <vscale x 8 x i1> %2,
194    iXLen %3, iXLen 0)
195
196  ret <vscale x 8 x i16> %a
197}
198
199declare <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16(
200  <vscale x 16 x i16>,
201  <vscale x 16 x half>,
202  iXLen);
203
204define <vscale x 16 x i16> @intrinsic_vfclass_v_nxv16i16_nxv16f16(
205; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16f16:
206; CHECK:       # %bb.0: # %entry
207; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
208; CHECK-NEXT:    vfclass.v v8, v8
209; CHECK-NEXT:    ret
210  <vscale x 16 x half> %0,
211  iXLen %1) nounwind {
212entry:
213  %a = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16(
214    <vscale x 16 x i16> undef,
215    <vscale x 16 x half> %0,
216    iXLen %1)
217
218  ret <vscale x 16 x i16> %a
219}
220
221declare <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16(
222  <vscale x 16 x i16>,
223  <vscale x 16 x half>,
224  <vscale x 16 x i1>,
225  iXLen, iXLen);
226
227define <vscale x 16 x i16> @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16(
228; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16:
229; CHECK:       # %bb.0: # %entry
230; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
231; CHECK-NEXT:    vfclass.v v8, v12, v0.t
232; CHECK-NEXT:    ret
233  <vscale x 16 x i16> %0,
234  <vscale x 16 x half> %1,
235  <vscale x 16 x i1> %2,
236  iXLen %3) nounwind {
237entry:
238  %a = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16(
239    <vscale x 16 x i16> %0,
240    <vscale x 16 x half> %1,
241    <vscale x 16 x i1> %2,
242    iXLen %3, iXLen 0)
243
244  ret <vscale x 16 x i16> %a
245}
246
247declare <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16(
248  <vscale x 32 x i16>,
249  <vscale x 32 x half>,
250  iXLen);
251
252define <vscale x 32 x i16> @intrinsic_vfclass_v_nxv32i16_nxv32f16(
253; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32f16:
254; CHECK:       # %bb.0: # %entry
255; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
256; CHECK-NEXT:    vfclass.v v8, v8
257; CHECK-NEXT:    ret
258  <vscale x 32 x half> %0,
259  iXLen %1) nounwind {
260entry:
261  %a = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16(
262    <vscale x 32 x i16> undef,
263    <vscale x 32 x half> %0,
264    iXLen %1)
265
266  ret <vscale x 32 x i16> %a
267}
268
269declare <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16(
270  <vscale x 32 x i16>,
271  <vscale x 32 x half>,
272  <vscale x 32 x i1>,
273  iXLen, iXLen);
274
275define <vscale x 32 x i16> @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16(
276; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16:
277; CHECK:       # %bb.0: # %entry
278; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
279; CHECK-NEXT:    vfclass.v v8, v16, v0.t
280; CHECK-NEXT:    ret
281  <vscale x 32 x i16> %0,
282  <vscale x 32 x half> %1,
283  <vscale x 32 x i1> %2,
284  iXLen %3) nounwind {
285entry:
286  %a = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16(
287    <vscale x 32 x i16> %0,
288    <vscale x 32 x half> %1,
289    <vscale x 32 x i1> %2,
290    iXLen %3, iXLen 0)
291
292  ret <vscale x 32 x i16> %a
293}
294
295declare <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1i32(
296  <vscale x 1 x i32>,
297  <vscale x 1 x float>,
298  iXLen);
299
300define <vscale x 1 x i32> @intrinsic_vfclass_v_nxv1i32_nxv1f32(
301; CHECK-LABEL: intrinsic_vfclass_v_nxv1i32_nxv1f32:
302; CHECK:       # %bb.0: # %entry
303; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
304; CHECK-NEXT:    vfclass.v v8, v8
305; CHECK-NEXT:    ret
306  <vscale x 1 x float> %0,
307  iXLen %1) nounwind {
308entry:
309  %a = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1i32(
310    <vscale x 1 x i32> undef,
311    <vscale x 1 x float> %0,
312    iXLen %1)
313
314  ret <vscale x 1 x i32> %a
315}
316
317declare <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1i32(
318  <vscale x 1 x i32>,
319  <vscale x 1 x float>,
320  <vscale x 1 x i1>,
321  iXLen, iXLen);
322
323define <vscale x 1 x i32> @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32(
324; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32:
325; CHECK:       # %bb.0: # %entry
326; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
327; CHECK-NEXT:    vfclass.v v8, v9, v0.t
328; CHECK-NEXT:    ret
329  <vscale x 1 x i32> %0,
330  <vscale x 1 x float> %1,
331  <vscale x 1 x i1> %2,
332  iXLen %3) nounwind {
333entry:
334  %a = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1i32(
335    <vscale x 1 x i32> %0,
336    <vscale x 1 x float> %1,
337    <vscale x 1 x i1> %2,
338    iXLen %3, iXLen 0)
339
340  ret <vscale x 1 x i32> %a
341}
342
343declare <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(
344  <vscale x 2 x i32>,
345  <vscale x 2 x float>,
346  iXLen);
347
348define <vscale x 2 x i32> @intrinsic_vfclass_v_nxv2i32_nxv2f32(
349; CHECK-LABEL: intrinsic_vfclass_v_nxv2i32_nxv2f32:
350; CHECK:       # %bb.0: # %entry
351; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
352; CHECK-NEXT:    vfclass.v v8, v8
353; CHECK-NEXT:    ret
354  <vscale x 2 x float> %0,
355  iXLen %1) nounwind {
356entry:
357  %a = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(
358    <vscale x 2 x i32> undef,
359    <vscale x 2 x float> %0,
360    iXLen %1)
361
362  ret <vscale x 2 x i32> %a
363}
364
365declare <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2i32(
366  <vscale x 2 x i32>,
367  <vscale x 2 x float>,
368  <vscale x 2 x i1>,
369  iXLen, iXLen);
370
371define <vscale x 2 x i32> @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32(
372; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32:
373; CHECK:       # %bb.0: # %entry
374; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
375; CHECK-NEXT:    vfclass.v v8, v9, v0.t
376; CHECK-NEXT:    ret
377  <vscale x 2 x i32> %0,
378  <vscale x 2 x float> %1,
379  <vscale x 2 x i1> %2,
380  iXLen %3) nounwind {
381entry:
382  %a = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2i32(
383    <vscale x 2 x i32> %0,
384    <vscale x 2 x float> %1,
385    <vscale x 2 x i1> %2,
386    iXLen %3, iXLen 0)
387
388  ret <vscale x 2 x i32> %a
389}
390
391declare <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(
392  <vscale x 4 x i32>,
393  <vscale x 4 x float>,
394  iXLen);
395
396define <vscale x 4 x i32> @intrinsic_vfclass_v_nxv4i32_nxv4f32(
397; CHECK-LABEL: intrinsic_vfclass_v_nxv4i32_nxv4f32:
398; CHECK:       # %bb.0: # %entry
399; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
400; CHECK-NEXT:    vfclass.v v8, v8
401; CHECK-NEXT:    ret
402  <vscale x 4 x float> %0,
403  iXLen %1) nounwind {
404entry:
405  %a = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(
406    <vscale x 4 x i32> undef,
407    <vscale x 4 x float> %0,
408    iXLen %1)
409
410  ret <vscale x 4 x i32> %a
411}
412
413declare <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4i32(
414  <vscale x 4 x i32>,
415  <vscale x 4 x float>,
416  <vscale x 4 x i1>,
417  iXLen, iXLen);
418
419define <vscale x 4 x i32> @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32(
420; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32:
421; CHECK:       # %bb.0: # %entry
422; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
423; CHECK-NEXT:    vfclass.v v8, v10, v0.t
424; CHECK-NEXT:    ret
425  <vscale x 4 x i32> %0,
426  <vscale x 4 x float> %1,
427  <vscale x 4 x i1> %2,
428  iXLen %3) nounwind {
429entry:
430  %a = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4i32(
431    <vscale x 4 x i32> %0,
432    <vscale x 4 x float> %1,
433    <vscale x 4 x i1> %2,
434    iXLen %3, iXLen 0)
435
436  ret <vscale x 4 x i32> %a
437}
438
439declare <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8i32(
440  <vscale x 8 x i32>,
441  <vscale x 8 x float>,
442  iXLen);
443
444define <vscale x 8 x i32> @intrinsic_vfclass_v_nxv8i32_nxv8f32(
445; CHECK-LABEL: intrinsic_vfclass_v_nxv8i32_nxv8f32:
446; CHECK:       # %bb.0: # %entry
447; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
448; CHECK-NEXT:    vfclass.v v8, v8
449; CHECK-NEXT:    ret
450  <vscale x 8 x float> %0,
451  iXLen %1) nounwind {
452entry:
453  %a = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8i32(
454    <vscale x 8 x i32> undef,
455    <vscale x 8 x float> %0,
456    iXLen %1)
457
458  ret <vscale x 8 x i32> %a
459}
460
461declare <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8i32(
462  <vscale x 8 x i32>,
463  <vscale x 8 x float>,
464  <vscale x 8 x i1>,
465  iXLen, iXLen);
466
467define <vscale x 8 x i32> @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32(
468; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32:
469; CHECK:       # %bb.0: # %entry
470; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
471; CHECK-NEXT:    vfclass.v v8, v12, v0.t
472; CHECK-NEXT:    ret
473  <vscale x 8 x i32> %0,
474  <vscale x 8 x float> %1,
475  <vscale x 8 x i1> %2,
476  iXLen %3) nounwind {
477entry:
478  %a = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8i32(
479    <vscale x 8 x i32> %0,
480    <vscale x 8 x float> %1,
481    <vscale x 8 x i1> %2,
482    iXLen %3, iXLen 0)
483
484  ret <vscale x 8 x i32> %a
485}
486
487declare <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16i32(
488  <vscale x 16 x i32>,
489  <vscale x 16 x float>,
490  iXLen);
491
492define <vscale x 16 x i32> @intrinsic_vfclass_v_nxv16i32_nxv16f32(
493; CHECK-LABEL: intrinsic_vfclass_v_nxv16i32_nxv16f32:
494; CHECK:       # %bb.0: # %entry
495; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
496; CHECK-NEXT:    vfclass.v v8, v8
497; CHECK-NEXT:    ret
498  <vscale x 16 x float> %0,
499  iXLen %1) nounwind {
500entry:
501  %a = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16i32(
502    <vscale x 16 x i32> undef,
503    <vscale x 16 x float> %0,
504    iXLen %1)
505
506  ret <vscale x 16 x i32> %a
507}
508
509declare <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16i32(
510  <vscale x 16 x i32>,
511  <vscale x 16 x float>,
512  <vscale x 16 x i1>,
513  iXLen, iXLen);
514
515define <vscale x 16 x i32> @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32(
516; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32:
517; CHECK:       # %bb.0: # %entry
518; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
519; CHECK-NEXT:    vfclass.v v8, v16, v0.t
520; CHECK-NEXT:    ret
521  <vscale x 16 x i32> %0,
522  <vscale x 16 x float> %1,
523  <vscale x 16 x i1> %2,
524  iXLen %3) nounwind {
525entry:
526  %a = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16i32(
527    <vscale x 16 x i32> %0,
528    <vscale x 16 x float> %1,
529    <vscale x 16 x i1> %2,
530    iXLen %3, iXLen 0)
531
532  ret <vscale x 16 x i32> %a
533}
534
535declare <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1i64(
536  <vscale x 1 x i64>,
537  <vscale x 1 x double>,
538  iXLen);
539
540define <vscale x 1 x i64> @intrinsic_vfclass_v_nxv1i64_nxv1f64(
541; CHECK-LABEL: intrinsic_vfclass_v_nxv1i64_nxv1f64:
542; CHECK:       # %bb.0: # %entry
543; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
544; CHECK-NEXT:    vfclass.v v8, v8
545; CHECK-NEXT:    ret
546  <vscale x 1 x double> %0,
547  iXLen %1) nounwind {
548entry:
549  %a = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1i64(
550    <vscale x 1 x i64> undef,
551    <vscale x 1 x double> %0,
552    iXLen %1)
553
554  ret <vscale x 1 x i64> %a
555}
556
557declare <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1i64(
558  <vscale x 1 x i64>,
559  <vscale x 1 x double>,
560  <vscale x 1 x i1>,
561  iXLen, iXLen);
562
563define <vscale x 1 x i64> @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64(
564; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i64_nxv1f64:
565; CHECK:       # %bb.0: # %entry
566; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
567; CHECK-NEXT:    vfclass.v v8, v9, v0.t
568; CHECK-NEXT:    ret
569  <vscale x 1 x i64> %0,
570  <vscale x 1 x double> %1,
571  <vscale x 1 x i1> %2,
572  iXLen %3) nounwind {
573entry:
574  %a = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1i64(
575    <vscale x 1 x i64> %0,
576    <vscale x 1 x double> %1,
577    <vscale x 1 x i1> %2,
578    iXLen %3, iXLen 0)
579
580  ret <vscale x 1 x i64> %a
581}
582
583declare <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2i64(
584  <vscale x 2 x i64>,
585  <vscale x 2 x double>,
586  iXLen);
587
588define <vscale x 2 x i64> @intrinsic_vfclass_v_nxv2i64_nxv2f64(
589; CHECK-LABEL: intrinsic_vfclass_v_nxv2i64_nxv2f64:
590; CHECK:       # %bb.0: # %entry
591; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
592; CHECK-NEXT:    vfclass.v v8, v8
593; CHECK-NEXT:    ret
594  <vscale x 2 x double> %0,
595  iXLen %1) nounwind {
596entry:
597  %a = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2i64(
598    <vscale x 2 x i64> undef,
599    <vscale x 2 x double> %0,
600    iXLen %1)
601
602  ret <vscale x 2 x i64> %a
603}
604
605declare <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2i64(
606  <vscale x 2 x i64>,
607  <vscale x 2 x double>,
608  <vscale x 2 x i1>,
609  iXLen, iXLen);
610
611define <vscale x 2 x i64> @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64(
612; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i64_nxv2f64:
613; CHECK:       # %bb.0: # %entry
614; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
615; CHECK-NEXT:    vfclass.v v8, v10, v0.t
616; CHECK-NEXT:    ret
617  <vscale x 2 x i64> %0,
618  <vscale x 2 x double> %1,
619  <vscale x 2 x i1> %2,
620  iXLen %3) nounwind {
621entry:
622  %a = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2i64(
623    <vscale x 2 x i64> %0,
624    <vscale x 2 x double> %1,
625    <vscale x 2 x i1> %2,
626    iXLen %3, iXLen 0)
627
628  ret <vscale x 2 x i64> %a
629}
630
631declare <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4i64(
632  <vscale x 4 x i64>,
633  <vscale x 4 x double>,
634  iXLen);
635
636define <vscale x 4 x i64> @intrinsic_vfclass_v_nxv4i64_nxv4f64(
637; CHECK-LABEL: intrinsic_vfclass_v_nxv4i64_nxv4f64:
638; CHECK:       # %bb.0: # %entry
639; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
640; CHECK-NEXT:    vfclass.v v8, v8
641; CHECK-NEXT:    ret
642  <vscale x 4 x double> %0,
643  iXLen %1) nounwind {
644entry:
645  %a = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4i64(
646    <vscale x 4 x i64> undef,
647    <vscale x 4 x double> %0,
648    iXLen %1)
649
650  ret <vscale x 4 x i64> %a
651}
652
653declare <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4i64(
654  <vscale x 4 x i64>,
655  <vscale x 4 x double>,
656  <vscale x 4 x i1>,
657  iXLen, iXLen);
658
659define <vscale x 4 x i64> @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64(
660; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i64_nxv4f64:
661; CHECK:       # %bb.0: # %entry
662; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
663; CHECK-NEXT:    vfclass.v v8, v12, v0.t
664; CHECK-NEXT:    ret
665  <vscale x 4 x i64> %0,
666  <vscale x 4 x double> %1,
667  <vscale x 4 x i1> %2,
668  iXLen %3) nounwind {
669entry:
670  %a = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4i64(
671    <vscale x 4 x i64> %0,
672    <vscale x 4 x double> %1,
673    <vscale x 4 x i1> %2,
674    iXLen %3, iXLen 0)
675
676  ret <vscale x 4 x i64> %a
677}
678
679declare <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8i64(
680  <vscale x 8 x i64>,
681  <vscale x 8 x double>,
682  iXLen);
683
684define <vscale x 8 x i64> @intrinsic_vfclass_v_nxv8i64_nxv8f64(
685; CHECK-LABEL: intrinsic_vfclass_v_nxv8i64_nxv8f64:
686; CHECK:       # %bb.0: # %entry
687; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
688; CHECK-NEXT:    vfclass.v v8, v8
689; CHECK-NEXT:    ret
690  <vscale x 8 x double> %0,
691  iXLen %1) nounwind {
692entry:
693  %a = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8i64(
694    <vscale x 8 x i64> undef,
695    <vscale x 8 x double> %0,
696    iXLen %1)
697
698  ret <vscale x 8 x i64> %a
699}
700
701declare <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8i64(
702  <vscale x 8 x i64>,
703  <vscale x 8 x double>,
704  <vscale x 8 x i1>,
705  iXLen, iXLen);
706
707define <vscale x 8 x i64> @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64(
708; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i64_nxv8f64:
709; CHECK:       # %bb.0: # %entry
710; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
711; CHECK-NEXT:    vfclass.v v8, v16, v0.t
712; CHECK-NEXT:    ret
713  <vscale x 8 x i64> %0,
714  <vscale x 8 x double> %1,
715  <vscale x 8 x i1> %2,
716  iXLen %3) nounwind {
717entry:
718  %a = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8i64(
719    <vscale x 8 x i64> %0,
720    <vscale x 8 x double> %1,
721    <vscale x 8 x i1> %2,
722    iXLen %3, iXLen 0)
723
724  ret <vscale x 8 x i64> %a
725}
726