xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
8  <vscale x 1 x i32>,
9  <vscale x 1 x half>,
10  iXLen, iXLen);
11
12define <vscale x 1 x i32> @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
13; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16:
14; CHECK:       # %bb.0: # %entry
15; CHECK-NEXT:    fsrmi a1, 0
16; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
17; CHECK-NEXT:    vfwcvt.xu.f.v v9, v8
18; CHECK-NEXT:    fsrm a1
19; CHECK-NEXT:    vmv1r.v v8, v9
20; CHECK-NEXT:    ret
21entry:
22  %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
23    <vscale x 1 x i32> undef,
24    <vscale x 1 x half> %0,
25    iXLen 0, iXLen %1)
26
27  ret <vscale x 1 x i32> %a
28}
29
30declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
31  <vscale x 1 x i32>,
32  <vscale x 1 x half>,
33  <vscale x 1 x i1>,
34  iXLen, iXLen, iXLen);
35
36define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
37; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16:
38; CHECK:       # %bb.0: # %entry
39; CHECK-NEXT:    fsrmi a1, 0
40; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
41; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
42; CHECK-NEXT:    fsrm a1
43; CHECK-NEXT:    ret
44entry:
45  %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
46    <vscale x 1 x i32> %0,
47    <vscale x 1 x half> %1,
48    <vscale x 1 x i1> %2,
49    iXLen 0, iXLen %3, iXLen 1)
50
51  ret <vscale x 1 x i32> %a
52}
53
54declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16(
55  <vscale x 2 x i32>,
56  <vscale x 2 x half>,
57  iXLen, iXLen);
58
59define <vscale x 2 x i32> @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
60; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16:
61; CHECK:       # %bb.0: # %entry
62; CHECK-NEXT:    fsrmi a1, 0
63; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
64; CHECK-NEXT:    vfwcvt.xu.f.v v9, v8
65; CHECK-NEXT:    fsrm a1
66; CHECK-NEXT:    vmv1r.v v8, v9
67; CHECK-NEXT:    ret
68entry:
69  %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16(
70    <vscale x 2 x i32> undef,
71    <vscale x 2 x half> %0,
72    iXLen 0, iXLen %1)
73
74  ret <vscale x 2 x i32> %a
75}
76
77declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16(
78  <vscale x 2 x i32>,
79  <vscale x 2 x half>,
80  <vscale x 2 x i1>,
81  iXLen, iXLen, iXLen);
82
83define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x i32> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
84; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16:
85; CHECK:       # %bb.0: # %entry
86; CHECK-NEXT:    fsrmi a1, 0
87; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
88; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
89; CHECK-NEXT:    fsrm a1
90; CHECK-NEXT:    ret
91entry:
92  %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16(
93    <vscale x 2 x i32> %0,
94    <vscale x 2 x half> %1,
95    <vscale x 2 x i1> %2,
96    iXLen 0, iXLen %3, iXLen 1)
97
98  ret <vscale x 2 x i32> %a
99}
100
101declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16(
102  <vscale x 4 x i32>,
103  <vscale x 4 x half>,
104  iXLen, iXLen);
105
106define <vscale x 4 x i32> @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
107; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16:
108; CHECK:       # %bb.0: # %entry
109; CHECK-NEXT:    fsrmi a1, 0
110; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
111; CHECK-NEXT:    vfwcvt.xu.f.v v10, v8
112; CHECK-NEXT:    fsrm a1
113; CHECK-NEXT:    vmv2r.v v8, v10
114; CHECK-NEXT:    ret
115entry:
116  %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16(
117    <vscale x 4 x i32> undef,
118    <vscale x 4 x half> %0,
119    iXLen 0, iXLen %1)
120
121  ret <vscale x 4 x i32> %a
122}
123
124declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16(
125  <vscale x 4 x i32>,
126  <vscale x 4 x half>,
127  <vscale x 4 x i1>,
128  iXLen, iXLen, iXLen);
129
130define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x i32> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
131; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16:
132; CHECK:       # %bb.0: # %entry
133; CHECK-NEXT:    fsrmi a1, 0
134; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
135; CHECK-NEXT:    vfwcvt.xu.f.v v8, v10, v0.t
136; CHECK-NEXT:    fsrm a1
137; CHECK-NEXT:    ret
138entry:
139  %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16(
140    <vscale x 4 x i32> %0,
141    <vscale x 4 x half> %1,
142    <vscale x 4 x i1> %2,
143    iXLen 0, iXLen %3, iXLen 1)
144
145  ret <vscale x 4 x i32> %a
146}
147
148declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16(
149  <vscale x 8 x i32>,
150  <vscale x 8 x half>,
151  iXLen, iXLen);
152
153define <vscale x 8 x i32> @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
154; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16:
155; CHECK:       # %bb.0: # %entry
156; CHECK-NEXT:    fsrmi a1, 0
157; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
158; CHECK-NEXT:    vfwcvt.xu.f.v v12, v8
159; CHECK-NEXT:    fsrm a1
160; CHECK-NEXT:    vmv4r.v v8, v12
161; CHECK-NEXT:    ret
162entry:
163  %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16(
164    <vscale x 8 x i32> undef,
165    <vscale x 8 x half> %0,
166    iXLen 0, iXLen %1)
167
168  ret <vscale x 8 x i32> %a
169}
170
171declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16(
172  <vscale x 8 x i32>,
173  <vscale x 8 x half>,
174  <vscale x 8 x i1>,
175  iXLen, iXLen, iXLen);
176
177define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x i32> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
178; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16:
179; CHECK:       # %bb.0: # %entry
180; CHECK-NEXT:    fsrmi a1, 0
181; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
182; CHECK-NEXT:    vfwcvt.xu.f.v v8, v12, v0.t
183; CHECK-NEXT:    fsrm a1
184; CHECK-NEXT:    ret
185entry:
186  %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16(
187    <vscale x 8 x i32> %0,
188    <vscale x 8 x half> %1,
189    <vscale x 8 x i1> %2,
190    iXLen 0, iXLen %3, iXLen 1)
191
192  ret <vscale x 8 x i32> %a
193}
194
195declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16(
196  <vscale x 16 x i32>,
197  <vscale x 16 x half>,
198  iXLen, iXLen);
199
200define <vscale x 16 x i32> @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
201; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16:
202; CHECK:       # %bb.0: # %entry
203; CHECK-NEXT:    fsrmi a1, 0
204; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
205; CHECK-NEXT:    vfwcvt.xu.f.v v16, v8
206; CHECK-NEXT:    fsrm a1
207; CHECK-NEXT:    vmv8r.v v8, v16
208; CHECK-NEXT:    ret
209entry:
210  %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16(
211    <vscale x 16 x i32> undef,
212    <vscale x 16 x half> %0,
213    iXLen 0, iXLen %1)
214
215  ret <vscale x 16 x i32> %a
216}
217
218declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16(
219  <vscale x 16 x i32>,
220  <vscale x 16 x half>,
221  <vscale x 16 x i1>,
222  iXLen, iXLen, iXLen);
223
224define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16(<vscale x 16 x i32> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
225; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16:
226; CHECK:       # %bb.0: # %entry
227; CHECK-NEXT:    fsrmi a1, 0
228; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
229; CHECK-NEXT:    vfwcvt.xu.f.v v8, v16, v0.t
230; CHECK-NEXT:    fsrm a1
231; CHECK-NEXT:    ret
232entry:
233  %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16(
234    <vscale x 16 x i32> %0,
235    <vscale x 16 x half> %1,
236    <vscale x 16 x i1> %2,
237    iXLen 0, iXLen %3, iXLen 1)
238
239  ret <vscale x 16 x i32> %a
240}
241
242declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32(
243  <vscale x 1 x i64>,
244  <vscale x 1 x float>,
245  iXLen, iXLen);
246
247define <vscale x 1 x i64> @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
248; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32:
249; CHECK:       # %bb.0: # %entry
250; CHECK-NEXT:    fsrmi a1, 0
251; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
252; CHECK-NEXT:    vfwcvt.xu.f.v v9, v8
253; CHECK-NEXT:    fsrm a1
254; CHECK-NEXT:    vmv1r.v v8, v9
255; CHECK-NEXT:    ret
256entry:
257  %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32(
258    <vscale x 1 x i64> undef,
259    <vscale x 1 x float> %0,
260    iXLen 0, iXLen %1)
261
262  ret <vscale x 1 x i64> %a
263}
264
265declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32(
266  <vscale x 1 x i64>,
267  <vscale x 1 x float>,
268  <vscale x 1 x i1>,
269  iXLen, iXLen, iXLen);
270
271define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
272; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32:
273; CHECK:       # %bb.0: # %entry
274; CHECK-NEXT:    fsrmi a1, 0
275; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
276; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
277; CHECK-NEXT:    fsrm a1
278; CHECK-NEXT:    ret
279entry:
280  %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32(
281    <vscale x 1 x i64> %0,
282    <vscale x 1 x float> %1,
283    <vscale x 1 x i1> %2,
284    iXLen 0, iXLen %3, iXLen 1)
285
286  ret <vscale x 1 x i64> %a
287}
288
289declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32(
290  <vscale x 2 x i64>,
291  <vscale x 2 x float>,
292  iXLen, iXLen);
293
294define <vscale x 2 x i64> @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
295; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32:
296; CHECK:       # %bb.0: # %entry
297; CHECK-NEXT:    fsrmi a1, 0
298; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
299; CHECK-NEXT:    vfwcvt.xu.f.v v10, v8
300; CHECK-NEXT:    fsrm a1
301; CHECK-NEXT:    vmv2r.v v8, v10
302; CHECK-NEXT:    ret
303entry:
304  %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32(
305    <vscale x 2 x i64> undef,
306    <vscale x 2 x float> %0,
307    iXLen 0, iXLen %1)
308
309  ret <vscale x 2 x i64> %a
310}
311
312declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32(
313  <vscale x 2 x i64>,
314  <vscale x 2 x float>,
315  <vscale x 2 x i1>,
316  iXLen, iXLen, iXLen);
317
318define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
319; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32:
320; CHECK:       # %bb.0: # %entry
321; CHECK-NEXT:    fsrmi a1, 0
322; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
323; CHECK-NEXT:    vfwcvt.xu.f.v v8, v10, v0.t
324; CHECK-NEXT:    fsrm a1
325; CHECK-NEXT:    ret
326entry:
327  %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32(
328    <vscale x 2 x i64> %0,
329    <vscale x 2 x float> %1,
330    <vscale x 2 x i1> %2,
331    iXLen 0, iXLen %3, iXLen 1)
332
333  ret <vscale x 2 x i64> %a
334}
335
336declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32(
337  <vscale x 4 x i64>,
338  <vscale x 4 x float>,
339  iXLen, iXLen);
340
341define <vscale x 4 x i64> @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
342; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32:
343; CHECK:       # %bb.0: # %entry
344; CHECK-NEXT:    fsrmi a1, 0
345; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
346; CHECK-NEXT:    vfwcvt.xu.f.v v12, v8
347; CHECK-NEXT:    fsrm a1
348; CHECK-NEXT:    vmv4r.v v8, v12
349; CHECK-NEXT:    ret
350entry:
351  %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32(
352    <vscale x 4 x i64> undef,
353    <vscale x 4 x float> %0,
354    iXLen 0, iXLen %1)
355
356  ret <vscale x 4 x i64> %a
357}
358
359declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32(
360  <vscale x 4 x i64>,
361  <vscale x 4 x float>,
362  <vscale x 4 x i1>,
363  iXLen, iXLen, iXLen);
364
365define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
366; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32:
367; CHECK:       # %bb.0: # %entry
368; CHECK-NEXT:    fsrmi a1, 0
369; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
370; CHECK-NEXT:    vfwcvt.xu.f.v v8, v12, v0.t
371; CHECK-NEXT:    fsrm a1
372; CHECK-NEXT:    ret
373entry:
374  %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32(
375    <vscale x 4 x i64> %0,
376    <vscale x 4 x float> %1,
377    <vscale x 4 x i1> %2,
378    iXLen 0, iXLen %3, iXLen 1)
379
380  ret <vscale x 4 x i64> %a
381}
382
383declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32(
384  <vscale x 8 x i64>,
385  <vscale x 8 x float>,
386  iXLen, iXLen);
387
388define <vscale x 8 x i64> @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
389; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32:
390; CHECK:       # %bb.0: # %entry
391; CHECK-NEXT:    fsrmi a1, 0
392; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
393; CHECK-NEXT:    vfwcvt.xu.f.v v16, v8
394; CHECK-NEXT:    fsrm a1
395; CHECK-NEXT:    vmv8r.v v8, v16
396; CHECK-NEXT:    ret
397entry:
398  %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32(
399    <vscale x 8 x i64> undef,
400    <vscale x 8 x float> %0,
401    iXLen 0, iXLen %1)
402
403  ret <vscale x 8 x i64> %a
404}
405
406declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32(
407  <vscale x 8 x i64>,
408  <vscale x 8 x float>,
409  <vscale x 8 x i1>,
410  iXLen, iXLen, iXLen);
411
412define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
413; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32:
414; CHECK:       # %bb.0: # %entry
415; CHECK-NEXT:    fsrmi a1, 0
416; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
417; CHECK-NEXT:    vfwcvt.xu.f.v v8, v16, v0.t
418; CHECK-NEXT:    fsrm a1
419; CHECK-NEXT:    ret
420entry:
421  %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32(
422    <vscale x 8 x i64> %0,
423    <vscale x 8 x float> %1,
424    <vscale x 8 x i1> %2,
425    iXLen 0, iXLen %3, iXLen 1)
426
427  ret <vscale x 8 x i64> %a
428}
429
430define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_xu.f.v_rtz_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
431; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_rtz_nxv8i64_nxv8f32:
432; CHECK:       # %bb.0: # %entry
433; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
434; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v16, v0.t
435; CHECK-NEXT:    ret
436entry:
437  %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32(
438    <vscale x 8 x i64> %0,
439    <vscale x 8 x float> %1,
440    <vscale x 8 x i1> %2,
441    iXLen 1, iXLen %3, iXLen 1)
442
443  ret <vscale x 8 x i64> %a
444}
445