xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s
6
7declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
8define <vscale x 1 x half> @vsitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) strictfp {
9; CHECK-LABEL: vsitofp_nxv1i1_nxv1f16:
10; CHECK:       # %bb.0:
11; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
12; CHECK-NEXT:    vmv.v.i v8, 0
13; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
14; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
15; CHECK-NEXT:    ret
16  %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
17  ret <vscale x 1 x half> %evec
18}
19
20declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
21define <vscale x 1 x half> @vuitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) strictfp {
22; CHECK-LABEL: vuitofp_nxv1i1_nxv1f16:
23; CHECK:       # %bb.0:
24; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
25; CHECK-NEXT:    vmv.v.i v8, 0
26; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
27; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
28; CHECK-NEXT:    ret
29  %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
30  ret <vscale x 1 x half> %evec
31}
32
33declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
34define <vscale x 1 x float> @vsitofp_nxv1i1_nxv1f32(<vscale x 1 x i1> %va) strictfp {
35; CHECK-LABEL: vsitofp_nxv1i1_nxv1f32:
36; CHECK:       # %bb.0:
37; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
38; CHECK-NEXT:    vmv.v.i v8, 0
39; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
40; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
41; CHECK-NEXT:    ret
42  %evec = call <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
43  ret <vscale x 1 x float> %evec
44}
45
46declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
47define <vscale x 1 x float> @vuitofp_nxv1i1_nxv1f32(<vscale x 1 x i1> %va) strictfp {
48; CHECK-LABEL: vuitofp_nxv1i1_nxv1f32:
49; CHECK:       # %bb.0:
50; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
51; CHECK-NEXT:    vmv.v.i v8, 0
52; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
53; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
54; CHECK-NEXT:    ret
55  %evec = call <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
56  ret <vscale x 1 x float> %evec
57}
58
59declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
60define <vscale x 1 x double> @vsitofp_nxv1i1_nxv1f64(<vscale x 1 x i1> %va) strictfp {
61; CHECK-LABEL: vsitofp_nxv1i1_nxv1f64:
62; CHECK:       # %bb.0:
63; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
64; CHECK-NEXT:    vmv.v.i v8, 0
65; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
66; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
67; CHECK-NEXT:    ret
68  %evec = call <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
69  ret <vscale x 1 x double> %evec
70}
71
72declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
73define <vscale x 1 x double> @vuitofp_nxv1i1_nxv1f64(<vscale x 1 x i1> %va) strictfp {
74; CHECK-LABEL: vuitofp_nxv1i1_nxv1f64:
75; CHECK:       # %bb.0:
76; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
77; CHECK-NEXT:    vmv.v.i v8, 0
78; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
79; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
80; CHECK-NEXT:    ret
81  %evec = call <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
82  ret <vscale x 1 x double> %evec
83}
84
85declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
86define <vscale x 2 x half> @vsitofp_nxv2i1_nxv2f16(<vscale x 2 x i1> %va) strictfp {
87; CHECK-LABEL: vsitofp_nxv2i1_nxv2f16:
88; CHECK:       # %bb.0:
89; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
90; CHECK-NEXT:    vmv.v.i v8, 0
91; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
92; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
93; CHECK-NEXT:    ret
94  %evec = call <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
95  ret <vscale x 2 x half> %evec
96}
97
98declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
99define <vscale x 2 x half> @vuitofp_nxv2i1_nxv2f16(<vscale x 2 x i1> %va) strictfp {
100; CHECK-LABEL: vuitofp_nxv2i1_nxv2f16:
101; CHECK:       # %bb.0:
102; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
103; CHECK-NEXT:    vmv.v.i v8, 0
104; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
105; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
106; CHECK-NEXT:    ret
107  %evec = call <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
108  ret <vscale x 2 x half> %evec
109}
110
111declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
112define <vscale x 2 x float> @vsitofp_nxv2i1_nxv2f32(<vscale x 2 x i1> %va) strictfp {
113; CHECK-LABEL: vsitofp_nxv2i1_nxv2f32:
114; CHECK:       # %bb.0:
115; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
116; CHECK-NEXT:    vmv.v.i v8, 0
117; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
118; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
119; CHECK-NEXT:    ret
120  %evec = call <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
121  ret <vscale x 2 x float> %evec
122}
123
124declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
125define <vscale x 2 x float> @vuitofp_nxv2i1_nxv2f32(<vscale x 2 x i1> %va) strictfp {
126; CHECK-LABEL: vuitofp_nxv2i1_nxv2f32:
127; CHECK:       # %bb.0:
128; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
129; CHECK-NEXT:    vmv.v.i v8, 0
130; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
131; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
132; CHECK-NEXT:    ret
133  %evec = call <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
134  ret <vscale x 2 x float> %evec
135}
136
137declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
138define <vscale x 2 x double> @vsitofp_nxv2i1_nxv2f64(<vscale x 2 x i1> %va) strictfp {
139; CHECK-LABEL: vsitofp_nxv2i1_nxv2f64:
140; CHECK:       # %bb.0:
141; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
142; CHECK-NEXT:    vmv.v.i v8, 0
143; CHECK-NEXT:    vmerge.vim v10, v8, -1, v0
144; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
145; CHECK-NEXT:    ret
146  %evec = call <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
147  ret <vscale x 2 x double> %evec
148}
149
150declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
151define <vscale x 2 x double> @vuitofp_nxv2i1_nxv2f64(<vscale x 2 x i1> %va) strictfp {
152; CHECK-LABEL: vuitofp_nxv2i1_nxv2f64:
153; CHECK:       # %bb.0:
154; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
155; CHECK-NEXT:    vmv.v.i v8, 0
156; CHECK-NEXT:    vmerge.vim v10, v8, 1, v0
157; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
158; CHECK-NEXT:    ret
159  %evec = call <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
160  ret <vscale x 2 x double> %evec
161}
162
163declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
164define <vscale x 4 x half> @vsitofp_nxv4i1_nxv4f16(<vscale x 4 x i1> %va) strictfp {
165; CHECK-LABEL: vsitofp_nxv4i1_nxv4f16:
166; CHECK:       # %bb.0:
167; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
168; CHECK-NEXT:    vmv.v.i v8, 0
169; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
170; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
171; CHECK-NEXT:    ret
172  %evec = call <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
173  ret <vscale x 4 x half> %evec
174}
175
176declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
177define <vscale x 4 x half> @vuitofp_nxv4i1_nxv4f16(<vscale x 4 x i1> %va) strictfp {
178; CHECK-LABEL: vuitofp_nxv4i1_nxv4f16:
179; CHECK:       # %bb.0:
180; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
181; CHECK-NEXT:    vmv.v.i v8, 0
182; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
183; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
184; CHECK-NEXT:    ret
185  %evec = call <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
186  ret <vscale x 4 x half> %evec
187}
188
189declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
190define <vscale x 4 x float> @vsitofp_nxv4i1_nxv4f32(<vscale x 4 x i1> %va) strictfp {
191; CHECK-LABEL: vsitofp_nxv4i1_nxv4f32:
192; CHECK:       # %bb.0:
193; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
194; CHECK-NEXT:    vmv.v.i v8, 0
195; CHECK-NEXT:    vmerge.vim v10, v8, -1, v0
196; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
197; CHECK-NEXT:    ret
198  %evec = call <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
199  ret <vscale x 4 x float> %evec
200}
201
202declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
203define <vscale x 4 x float> @vuitofp_nxv4i1_nxv4f32(<vscale x 4 x i1> %va) strictfp {
204; CHECK-LABEL: vuitofp_nxv4i1_nxv4f32:
205; CHECK:       # %bb.0:
206; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
207; CHECK-NEXT:    vmv.v.i v8, 0
208; CHECK-NEXT:    vmerge.vim v10, v8, 1, v0
209; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
210; CHECK-NEXT:    ret
211  %evec = call <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
212  ret <vscale x 4 x float> %evec
213}
214
215declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
216define <vscale x 4 x double> @vsitofp_nxv4i1_nxv4f64(<vscale x 4 x i1> %va) strictfp {
217; CHECK-LABEL: vsitofp_nxv4i1_nxv4f64:
218; CHECK:       # %bb.0:
219; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
220; CHECK-NEXT:    vmv.v.i v8, 0
221; CHECK-NEXT:    vmerge.vim v12, v8, -1, v0
222; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
223; CHECK-NEXT:    ret
224  %evec = call <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
225  ret <vscale x 4 x double> %evec
226}
227
228declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
229define <vscale x 4 x double> @vuitofp_nxv4i1_nxv4f64(<vscale x 4 x i1> %va) strictfp {
230; CHECK-LABEL: vuitofp_nxv4i1_nxv4f64:
231; CHECK:       # %bb.0:
232; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
233; CHECK-NEXT:    vmv.v.i v8, 0
234; CHECK-NEXT:    vmerge.vim v12, v8, 1, v0
235; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
236; CHECK-NEXT:    ret
237  %evec = call <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
238  ret <vscale x 4 x double> %evec
239}
240
241declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
242define <vscale x 8 x half> @vsitofp_nxv8i1_nxv8f16(<vscale x 8 x i1> %va) strictfp {
243; CHECK-LABEL: vsitofp_nxv8i1_nxv8f16:
244; CHECK:       # %bb.0:
245; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
246; CHECK-NEXT:    vmv.v.i v8, 0
247; CHECK-NEXT:    vmerge.vim v10, v8, -1, v0
248; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
249; CHECK-NEXT:    ret
250  %evec = call <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
251  ret <vscale x 8 x half> %evec
252}
253
254declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
255define <vscale x 8 x half> @vuitofp_nxv8i1_nxv8f16(<vscale x 8 x i1> %va) strictfp {
256; CHECK-LABEL: vuitofp_nxv8i1_nxv8f16:
257; CHECK:       # %bb.0:
258; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
259; CHECK-NEXT:    vmv.v.i v8, 0
260; CHECK-NEXT:    vmerge.vim v10, v8, 1, v0
261; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
262; CHECK-NEXT:    ret
263  %evec = call <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
264  ret <vscale x 8 x half> %evec
265}
266
267declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
268define <vscale x 8 x float> @vsitofp_nxv8i1_nxv8f32(<vscale x 8 x i1> %va) strictfp {
269; CHECK-LABEL: vsitofp_nxv8i1_nxv8f32:
270; CHECK:       # %bb.0:
271; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
272; CHECK-NEXT:    vmv.v.i v8, 0
273; CHECK-NEXT:    vmerge.vim v12, v8, -1, v0
274; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
275; CHECK-NEXT:    ret
276  %evec = call <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
277  ret <vscale x 8 x float> %evec
278}
279
280declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
281define <vscale x 8 x float> @vuitofp_nxv8i1_nxv8f32(<vscale x 8 x i1> %va) strictfp {
282; CHECK-LABEL: vuitofp_nxv8i1_nxv8f32:
283; CHECK:       # %bb.0:
284; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
285; CHECK-NEXT:    vmv.v.i v8, 0
286; CHECK-NEXT:    vmerge.vim v12, v8, 1, v0
287; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
288; CHECK-NEXT:    ret
289  %evec = call <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
290  ret <vscale x 8 x float> %evec
291}
292
293declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
294define <vscale x 8 x double> @vsitofp_nxv8i1_nxv8f64(<vscale x 8 x i1> %va) strictfp {
295; CHECK-LABEL: vsitofp_nxv8i1_nxv8f64:
296; CHECK:       # %bb.0:
297; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
298; CHECK-NEXT:    vmv.v.i v8, 0
299; CHECK-NEXT:    vmerge.vim v16, v8, -1, v0
300; CHECK-NEXT:    vfwcvt.f.x.v v8, v16
301; CHECK-NEXT:    ret
302  %evec = call <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
303  ret <vscale x 8 x double> %evec
304}
305
306declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
307define <vscale x 8 x double> @vuitofp_nxv8i1_nxv8f64(<vscale x 8 x i1> %va) strictfp {
308; CHECK-LABEL: vuitofp_nxv8i1_nxv8f64:
309; CHECK:       # %bb.0:
310; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
311; CHECK-NEXT:    vmv.v.i v8, 0
312; CHECK-NEXT:    vmerge.vim v16, v8, 1, v0
313; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16
314; CHECK-NEXT:    ret
315  %evec = call <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
316  ret <vscale x 8 x double> %evec
317}
318
319declare <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i1(<vscale x 16 x i1>, metadata, metadata)
320define <vscale x 16 x half> @vsitofp_nxv16i1_nxv16f16(<vscale x 16 x i1> %va) strictfp {
321; CHECK-LABEL: vsitofp_nxv16i1_nxv16f16:
322; CHECK:       # %bb.0:
323; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
324; CHECK-NEXT:    vmv.v.i v8, 0
325; CHECK-NEXT:    vmerge.vim v12, v8, -1, v0
326; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
327; CHECK-NEXT:    ret
328  %evec = call <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i1(<vscale x 16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
329  ret <vscale x 16 x half> %evec
330}
331
332declare <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i1(<vscale x 16 x i1>, metadata, metadata)
333define <vscale x 16 x half> @vuitofp_nxv16i1_nxv16f16(<vscale x 16 x i1> %va) strictfp {
334; CHECK-LABEL: vuitofp_nxv16i1_nxv16f16:
335; CHECK:       # %bb.0:
336; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
337; CHECK-NEXT:    vmv.v.i v8, 0
338; CHECK-NEXT:    vmerge.vim v12, v8, 1, v0
339; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
340; CHECK-NEXT:    ret
341  %evec = call <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i1(<vscale x 16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
342  ret <vscale x 16 x half> %evec
343}
344
345declare <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i1(<vscale x 16 x i1>, metadata, metadata)
346define <vscale x 16 x float> @vsitofp_nxv16i1_nxv16f32(<vscale x 16 x i1> %va) strictfp {
347; CHECK-LABEL: vsitofp_nxv16i1_nxv16f32:
348; CHECK:       # %bb.0:
349; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
350; CHECK-NEXT:    vmv.v.i v8, 0
351; CHECK-NEXT:    vmerge.vim v16, v8, -1, v0
352; CHECK-NEXT:    vfwcvt.f.x.v v8, v16
353; CHECK-NEXT:    ret
354  %evec = call <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i1(<vscale x 16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
355  ret <vscale x 16 x float> %evec
356}
357
358declare <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i1(<vscale x 16 x i1>, metadata, metadata)
359define <vscale x 16 x float> @vuitofp_nxv16i1_nxv16f32(<vscale x 16 x i1> %va) strictfp {
360; CHECK-LABEL: vuitofp_nxv16i1_nxv16f32:
361; CHECK:       # %bb.0:
362; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
363; CHECK-NEXT:    vmv.v.i v8, 0
364; CHECK-NEXT:    vmerge.vim v16, v8, 1, v0
365; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16
366; CHECK-NEXT:    ret
367  %evec = call <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i1(<vscale x 16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
368  ret <vscale x 16 x float> %evec
369}
370
371declare <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i1(<vscale x 32 x i1>, metadata, metadata)
372define <vscale x 32 x half> @vsitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) strictfp {
373; CHECK-LABEL: vsitofp_nxv32i1_nxv32f16:
374; CHECK:       # %bb.0:
375; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
376; CHECK-NEXT:    vmv.v.i v8, 0
377; CHECK-NEXT:    vmerge.vim v16, v8, -1, v0
378; CHECK-NEXT:    vfwcvt.f.x.v v8, v16
379; CHECK-NEXT:    ret
380  %evec = call <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i1(<vscale x 32 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
381  ret <vscale x 32 x half> %evec
382}
383
384declare <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i1(<vscale x 32 x i1>, metadata, metadata)
385define <vscale x 32 x half> @vuitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) strictfp {
386; CHECK-LABEL: vuitofp_nxv32i1_nxv32f16:
387; CHECK:       # %bb.0:
388; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
389; CHECK-NEXT:    vmv.v.i v8, 0
390; CHECK-NEXT:    vmerge.vim v16, v8, 1, v0
391; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16
392; CHECK-NEXT:    ret
393  %evec = call <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i1(<vscale x 32 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
394  ret <vscale x 32 x half> %evec
395}
396
397declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
398define <vscale x 1 x half> @vsitofp_nxv1i8_nxv1f16(<vscale x 1 x i8> %va) strictfp {
399; CHECK-LABEL: vsitofp_nxv1i8_nxv1f16:
400; CHECK:       # %bb.0:
401; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
402; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
403; CHECK-NEXT:    vmv1r.v v8, v9
404; CHECK-NEXT:    ret
405  %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
406  ret <vscale x 1 x half> %evec
407}
408
409declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i7(<vscale x 1 x i7>, metadata, metadata)
410define <vscale x 1 x half> @vsitofp_nxv1i7_nxv1f16(<vscale x 1 x i7> %va) strictfp {
411; CHECK-LABEL: vsitofp_nxv1i7_nxv1f16:
412; CHECK:       # %bb.0:
413; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
414; CHECK-NEXT:    vadd.vv v8, v8, v8
415; CHECK-NEXT:    vsra.vi v9, v8, 1
416; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
417; CHECK-NEXT:    ret
418  %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i7(<vscale x 1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
419  ret <vscale x 1 x half> %evec
420}
421
422declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i7(<vscale x 1 x i7>, metadata, metadata)
423define <vscale x 1 x half> @vuitofp_nxv1i7_nxv1f16(<vscale x 1 x i7> %va) strictfp {
424; CHECK-LABEL: vuitofp_nxv1i7_nxv1f16:
425; CHECK:       # %bb.0:
426; CHECK-NEXT:    li a0, 127
427; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
428; CHECK-NEXT:    vand.vx v9, v8, a0
429; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
430; CHECK-NEXT:    ret
431  %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i7(<vscale x 1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
432  ret <vscale x 1 x half> %evec
433}
434
435declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
436define <vscale x 1 x half> @vuitofp_nxv1i8_nxv1f16(<vscale x 1 x i8> %va) strictfp {
437; CHECK-LABEL: vuitofp_nxv1i8_nxv1f16:
438; CHECK:       # %bb.0:
439; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
440; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
441; CHECK-NEXT:    vmv1r.v v8, v9
442; CHECK-NEXT:    ret
443  %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
444  ret <vscale x 1 x half> %evec
445}
446
447declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
448define <vscale x 1 x float> @vsitofp_nxv1i8_nxv1f32(<vscale x 1 x i8> %va) strictfp {
449; CHECK-LABEL: vsitofp_nxv1i8_nxv1f32:
450; CHECK:       # %bb.0:
451; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
452; CHECK-NEXT:    vsext.vf2 v9, v8
453; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
454; CHECK-NEXT:    ret
455  %evec = call <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
456  ret <vscale x 1 x float> %evec
457}
458
459declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
460define <vscale x 1 x float> @vuitofp_nxv1i8_nxv1f32(<vscale x 1 x i8> %va) strictfp {
461; CHECK-LABEL: vuitofp_nxv1i8_nxv1f32:
462; CHECK:       # %bb.0:
463; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
464; CHECK-NEXT:    vzext.vf2 v9, v8
465; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
466; CHECK-NEXT:    ret
467  %evec = call <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
468  ret <vscale x 1 x float> %evec
469}
470
471declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
472define <vscale x 1 x double> @vsitofp_nxv1i8_nxv1f64(<vscale x 1 x i8> %va) strictfp {
473; CHECK-LABEL: vsitofp_nxv1i8_nxv1f64:
474; CHECK:       # %bb.0:
475; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
476; CHECK-NEXT:    vsext.vf4 v9, v8
477; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
478; CHECK-NEXT:    ret
479  %evec = call <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
480  ret <vscale x 1 x double> %evec
481}
482
483declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
484define <vscale x 1 x double> @vuitofp_nxv1i8_nxv1f64(<vscale x 1 x i8> %va) strictfp {
485; CHECK-LABEL: vuitofp_nxv1i8_nxv1f64:
486; CHECK:       # %bb.0:
487; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
488; CHECK-NEXT:    vzext.vf4 v9, v8
489; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
490; CHECK-NEXT:    ret
491  %evec = call <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
492  ret <vscale x 1 x double> %evec
493}
494
495declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
496define <vscale x 2 x half> @vsitofp_nxv2i8_nxv2f16(<vscale x 2 x i8> %va) strictfp {
497; CHECK-LABEL: vsitofp_nxv2i8_nxv2f16:
498; CHECK:       # %bb.0:
499; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
500; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
501; CHECK-NEXT:    vmv1r.v v8, v9
502; CHECK-NEXT:    ret
503  %evec = call <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
504  ret <vscale x 2 x half> %evec
505}
506
507declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
508define <vscale x 2 x half> @vuitofp_nxv2i8_nxv2f16(<vscale x 2 x i8> %va) strictfp {
509; CHECK-LABEL: vuitofp_nxv2i8_nxv2f16:
510; CHECK:       # %bb.0:
511; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
512; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
513; CHECK-NEXT:    vmv1r.v v8, v9
514; CHECK-NEXT:    ret
515  %evec = call <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
516  ret <vscale x 2 x half> %evec
517}
518
519declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
520define <vscale x 2 x float> @vsitofp_nxv2i8_nxv2f32(<vscale x 2 x i8> %va) strictfp {
521; CHECK-LABEL: vsitofp_nxv2i8_nxv2f32:
522; CHECK:       # %bb.0:
523; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
524; CHECK-NEXT:    vsext.vf2 v9, v8
525; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
526; CHECK-NEXT:    ret
527  %evec = call <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
528  ret <vscale x 2 x float> %evec
529}
530
531declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
532define <vscale x 2 x float> @vuitofp_nxv2i8_nxv2f32(<vscale x 2 x i8> %va) strictfp {
533; CHECK-LABEL: vuitofp_nxv2i8_nxv2f32:
534; CHECK:       # %bb.0:
535; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
536; CHECK-NEXT:    vzext.vf2 v9, v8
537; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
538; CHECK-NEXT:    ret
539  %evec = call <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
540  ret <vscale x 2 x float> %evec
541}
542
543declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
544define <vscale x 2 x double> @vsitofp_nxv2i8_nxv2f64(<vscale x 2 x i8> %va) strictfp {
545; CHECK-LABEL: vsitofp_nxv2i8_nxv2f64:
546; CHECK:       # %bb.0:
547; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
548; CHECK-NEXT:    vsext.vf4 v10, v8
549; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
550; CHECK-NEXT:    ret
551  %evec = call <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
552  ret <vscale x 2 x double> %evec
553}
554
555declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
556define <vscale x 2 x double> @vuitofp_nxv2i8_nxv2f64(<vscale x 2 x i8> %va) strictfp {
557; CHECK-LABEL: vuitofp_nxv2i8_nxv2f64:
558; CHECK:       # %bb.0:
559; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
560; CHECK-NEXT:    vzext.vf4 v10, v8
561; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
562; CHECK-NEXT:    ret
563  %evec = call <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
564  ret <vscale x 2 x double> %evec
565}
566
567declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
568define <vscale x 4 x half> @vsitofp_nxv4i8_nxv4f16(<vscale x 4 x i8> %va) strictfp {
569; CHECK-LABEL: vsitofp_nxv4i8_nxv4f16:
570; CHECK:       # %bb.0:
571; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
572; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
573; CHECK-NEXT:    vmv1r.v v8, v9
574; CHECK-NEXT:    ret
575  %evec = call <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
576  ret <vscale x 4 x half> %evec
577}
578
579declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
580define <vscale x 4 x half> @vuitofp_nxv4i8_nxv4f16(<vscale x 4 x i8> %va) strictfp {
581; CHECK-LABEL: vuitofp_nxv4i8_nxv4f16:
582; CHECK:       # %bb.0:
583; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
584; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
585; CHECK-NEXT:    vmv1r.v v8, v9
586; CHECK-NEXT:    ret
587  %evec = call <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
588  ret <vscale x 4 x half> %evec
589}
590
591declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
592define <vscale x 4 x float> @vsitofp_nxv4i8_nxv4f32(<vscale x 4 x i8> %va) strictfp {
593; CHECK-LABEL: vsitofp_nxv4i8_nxv4f32:
594; CHECK:       # %bb.0:
595; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
596; CHECK-NEXT:    vsext.vf2 v10, v8
597; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
598; CHECK-NEXT:    ret
599  %evec = call <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
600  ret <vscale x 4 x float> %evec
601}
602
603declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
604define <vscale x 4 x float> @vuitofp_nxv4i8_nxv4f32(<vscale x 4 x i8> %va) strictfp {
605; CHECK-LABEL: vuitofp_nxv4i8_nxv4f32:
606; CHECK:       # %bb.0:
607; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
608; CHECK-NEXT:    vzext.vf2 v10, v8
609; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
610; CHECK-NEXT:    ret
611  %evec = call <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
612  ret <vscale x 4 x float> %evec
613}
614
615declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
616define <vscale x 4 x double> @vsitofp_nxv4i8_nxv4f64(<vscale x 4 x i8> %va) strictfp {
617; CHECK-LABEL: vsitofp_nxv4i8_nxv4f64:
618; CHECK:       # %bb.0:
619; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
620; CHECK-NEXT:    vsext.vf4 v12, v8
621; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
622; CHECK-NEXT:    ret
623  %evec = call <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
624  ret <vscale x 4 x double> %evec
625}
626
627declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
628define <vscale x 4 x double> @vuitofp_nxv4i8_nxv4f64(<vscale x 4 x i8> %va) strictfp {
629; CHECK-LABEL: vuitofp_nxv4i8_nxv4f64:
630; CHECK:       # %bb.0:
631; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
632; CHECK-NEXT:    vzext.vf4 v12, v8
633; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
634; CHECK-NEXT:    ret
635  %evec = call <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
636  ret <vscale x 4 x double> %evec
637}
638
639declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
640define <vscale x 8 x half> @vsitofp_nxv8i8_nxv8f16(<vscale x 8 x i8> %va) strictfp {
641; CHECK-LABEL: vsitofp_nxv8i8_nxv8f16:
642; CHECK:       # %bb.0:
643; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
644; CHECK-NEXT:    vfwcvt.f.x.v v10, v8
645; CHECK-NEXT:    vmv2r.v v8, v10
646; CHECK-NEXT:    ret
647  %evec = call <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
648  ret <vscale x 8 x half> %evec
649}
650
651declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
652define <vscale x 8 x half> @vuitofp_nxv8i8_nxv8f16(<vscale x 8 x i8> %va) strictfp {
653; CHECK-LABEL: vuitofp_nxv8i8_nxv8f16:
654; CHECK:       # %bb.0:
655; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
656; CHECK-NEXT:    vfwcvt.f.xu.v v10, v8
657; CHECK-NEXT:    vmv2r.v v8, v10
658; CHECK-NEXT:    ret
659  %evec = call <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
660  ret <vscale x 8 x half> %evec
661}
662
663declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
664define <vscale x 8 x float> @vsitofp_nxv8i8_nxv8f32(<vscale x 8 x i8> %va) strictfp {
665; CHECK-LABEL: vsitofp_nxv8i8_nxv8f32:
666; CHECK:       # %bb.0:
667; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
668; CHECK-NEXT:    vsext.vf2 v12, v8
669; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
670; CHECK-NEXT:    ret
671  %evec = call <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
672  ret <vscale x 8 x float> %evec
673}
674
675declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
676define <vscale x 8 x float> @vuitofp_nxv8i8_nxv8f32(<vscale x 8 x i8> %va) strictfp {
677; CHECK-LABEL: vuitofp_nxv8i8_nxv8f32:
678; CHECK:       # %bb.0:
679; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
680; CHECK-NEXT:    vzext.vf2 v12, v8
681; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
682; CHECK-NEXT:    ret
683  %evec = call <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
684  ret <vscale x 8 x float> %evec
685}
686
687declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
688define <vscale x 8 x double> @vsitofp_nxv8i8_nxv8f64(<vscale x 8 x i8> %va) strictfp {
689; CHECK-LABEL: vsitofp_nxv8i8_nxv8f64:
690; CHECK:       # %bb.0:
691; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
692; CHECK-NEXT:    vsext.vf4 v16, v8
693; CHECK-NEXT:    vfwcvt.f.x.v v8, v16
694; CHECK-NEXT:    ret
695  %evec = call <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
696  ret <vscale x 8 x double> %evec
697}
698
699declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
700define <vscale x 8 x double> @vuitofp_nxv8i8_nxv8f64(<vscale x 8 x i8> %va) strictfp {
701; CHECK-LABEL: vuitofp_nxv8i8_nxv8f64:
702; CHECK:       # %bb.0:
703; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
704; CHECK-NEXT:    vzext.vf4 v16, v8
705; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16
706; CHECK-NEXT:    ret
707  %evec = call <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
708  ret <vscale x 8 x double> %evec
709}
710
711declare <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i8(<vscale x 16 x i8>, metadata, metadata)
712define <vscale x 16 x half> @vsitofp_nxv16i8_nxv16f16(<vscale x 16 x i8> %va) strictfp {
713; CHECK-LABEL: vsitofp_nxv16i8_nxv16f16:
714; CHECK:       # %bb.0:
715; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
716; CHECK-NEXT:    vfwcvt.f.x.v v12, v8
717; CHECK-NEXT:    vmv4r.v v8, v12
718; CHECK-NEXT:    ret
719  %evec = call <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i8(<vscale x 16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
720  ret <vscale x 16 x half> %evec
721}
722
723declare <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i8(<vscale x 16 x i8>, metadata, metadata)
724define <vscale x 16 x half> @vuitofp_nxv16i8_nxv16f16(<vscale x 16 x i8> %va) strictfp {
725; CHECK-LABEL: vuitofp_nxv16i8_nxv16f16:
726; CHECK:       # %bb.0:
727; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
728; CHECK-NEXT:    vfwcvt.f.xu.v v12, v8
729; CHECK-NEXT:    vmv4r.v v8, v12
730; CHECK-NEXT:    ret
731  %evec = call <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i8(<vscale x 16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
732  ret <vscale x 16 x half> %evec
733}
734
735declare <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i8(<vscale x 16 x i8>, metadata, metadata)
736define <vscale x 16 x float> @vsitofp_nxv16i8_nxv16f32(<vscale x 16 x i8> %va) strictfp {
737; CHECK-LABEL: vsitofp_nxv16i8_nxv16f32:
738; CHECK:       # %bb.0:
739; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
740; CHECK-NEXT:    vsext.vf2 v16, v8
741; CHECK-NEXT:    vfwcvt.f.x.v v8, v16
742; CHECK-NEXT:    ret
743  %evec = call <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i8(<vscale x 16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
744  ret <vscale x 16 x float> %evec
745}
746
747declare <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i8(<vscale x 16 x i8>, metadata, metadata)
748define <vscale x 16 x float> @vuitofp_nxv16i8_nxv16f32(<vscale x 16 x i8> %va) strictfp {
749; CHECK-LABEL: vuitofp_nxv16i8_nxv16f32:
750; CHECK:       # %bb.0:
751; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
752; CHECK-NEXT:    vzext.vf2 v16, v8
753; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16
754; CHECK-NEXT:    ret
755  %evec = call <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i8(<vscale x 16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
756  ret <vscale x 16 x float> %evec
757}
758
759declare <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i8(<vscale x 32 x i8>, metadata, metadata)
760define <vscale x 32 x half> @vsitofp_nxv32i8_nxv32f16(<vscale x 32 x i8> %va) strictfp {
761; CHECK-LABEL: vsitofp_nxv32i8_nxv32f16:
762; CHECK:       # %bb.0:
763; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
764; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
765; CHECK-NEXT:    vmv8r.v v8, v16
766; CHECK-NEXT:    ret
767  %evec = call <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i8(<vscale x 32 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
768  ret <vscale x 32 x half> %evec
769}
770
771declare <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i8(<vscale x 32 x i8>, metadata, metadata)
772define <vscale x 32 x half> @vuitofp_nxv32i8_nxv32f16(<vscale x 32 x i8> %va) strictfp {
773; CHECK-LABEL: vuitofp_nxv32i8_nxv32f16:
774; CHECK:       # %bb.0:
775; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
776; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
777; CHECK-NEXT:    vmv8r.v v8, v16
778; CHECK-NEXT:    ret
779  %evec = call <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i8(<vscale x 32 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
780  ret <vscale x 32 x half> %evec
781}
782
783declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
784define <vscale x 1 x half> @vsitofp_nxv1i16_nxv1f16(<vscale x 1 x i16> %va) strictfp {
785; CHECK-LABEL: vsitofp_nxv1i16_nxv1f16:
786; CHECK:       # %bb.0:
787; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
788; CHECK-NEXT:    vfcvt.f.x.v v8, v8
789; CHECK-NEXT:    ret
790  %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
791  ret <vscale x 1 x half> %evec
792}
793
794declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
795define <vscale x 1 x half> @vuitofp_nxv1i16_nxv1f16(<vscale x 1 x i16> %va) strictfp {
796; CHECK-LABEL: vuitofp_nxv1i16_nxv1f16:
797; CHECK:       # %bb.0:
798; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
799; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
800; CHECK-NEXT:    ret
801  %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
802  ret <vscale x 1 x half> %evec
803}
804
805declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
806define <vscale x 1 x float> @vsitofp_nxv1i16_nxv1f32(<vscale x 1 x i16> %va) strictfp {
807; CHECK-LABEL: vsitofp_nxv1i16_nxv1f32:
808; CHECK:       # %bb.0:
809; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
810; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
811; CHECK-NEXT:    vmv1r.v v8, v9
812; CHECK-NEXT:    ret
813  %evec = call <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
814  ret <vscale x 1 x float> %evec
815}
816
817declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
818define <vscale x 1 x float> @vuitofp_nxv1i16_nxv1f32(<vscale x 1 x i16> %va) strictfp {
819; CHECK-LABEL: vuitofp_nxv1i16_nxv1f32:
820; CHECK:       # %bb.0:
821; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
822; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
823; CHECK-NEXT:    vmv1r.v v8, v9
824; CHECK-NEXT:    ret
825  %evec = call <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
826  ret <vscale x 1 x float> %evec
827}
828
829declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
830define <vscale x 1 x double> @vsitofp_nxv1i16_nxv1f64(<vscale x 1 x i16> %va) strictfp {
831; CHECK-LABEL: vsitofp_nxv1i16_nxv1f64:
832; CHECK:       # %bb.0:
833; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
834; CHECK-NEXT:    vsext.vf2 v9, v8
835; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
836; CHECK-NEXT:    ret
837  %evec = call <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
838  ret <vscale x 1 x double> %evec
839}
840
841declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
842define <vscale x 1 x double> @vuitofp_nxv1i16_nxv1f64(<vscale x 1 x i16> %va) strictfp {
843; CHECK-LABEL: vuitofp_nxv1i16_nxv1f64:
844; CHECK:       # %bb.0:
845; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
846; CHECK-NEXT:    vzext.vf2 v9, v8
847; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
848; CHECK-NEXT:    ret
849  %evec = call <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
850  ret <vscale x 1 x double> %evec
851}
852
853declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
854define <vscale x 2 x half> @vsitofp_nxv2i16_nxv2f16(<vscale x 2 x i16> %va) strictfp {
855; CHECK-LABEL: vsitofp_nxv2i16_nxv2f16:
856; CHECK:       # %bb.0:
857; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
858; CHECK-NEXT:    vfcvt.f.x.v v8, v8
859; CHECK-NEXT:    ret
860  %evec = call <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
861  ret <vscale x 2 x half> %evec
862}
863
864declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
865define <vscale x 2 x half> @vuitofp_nxv2i16_nxv2f16(<vscale x 2 x i16> %va) strictfp {
866; CHECK-LABEL: vuitofp_nxv2i16_nxv2f16:
867; CHECK:       # %bb.0:
868; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
869; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
870; CHECK-NEXT:    ret
871  %evec = call <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
872  ret <vscale x 2 x half> %evec
873}
874
875declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
876define <vscale x 2 x float> @vsitofp_nxv2i16_nxv2f32(<vscale x 2 x i16> %va) strictfp {
877; CHECK-LABEL: vsitofp_nxv2i16_nxv2f32:
878; CHECK:       # %bb.0:
879; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
880; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
881; CHECK-NEXT:    vmv1r.v v8, v9
882; CHECK-NEXT:    ret
883  %evec = call <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
884  ret <vscale x 2 x float> %evec
885}
886
887declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
888define <vscale x 2 x float> @vuitofp_nxv2i16_nxv2f32(<vscale x 2 x i16> %va) strictfp {
889; CHECK-LABEL: vuitofp_nxv2i16_nxv2f32:
890; CHECK:       # %bb.0:
891; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
892; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
893; CHECK-NEXT:    vmv1r.v v8, v9
894; CHECK-NEXT:    ret
895  %evec = call <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
896  ret <vscale x 2 x float> %evec
897}
898
899declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
900define <vscale x 2 x double> @vsitofp_nxv2i16_nxv2f64(<vscale x 2 x i16> %va) strictfp {
901; CHECK-LABEL: vsitofp_nxv2i16_nxv2f64:
902; CHECK:       # %bb.0:
903; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
904; CHECK-NEXT:    vsext.vf2 v10, v8
905; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
906; CHECK-NEXT:    ret
907  %evec = call <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
908  ret <vscale x 2 x double> %evec
909}
910
911declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
912define <vscale x 2 x double> @vuitofp_nxv2i16_nxv2f64(<vscale x 2 x i16> %va) strictfp {
913; CHECK-LABEL: vuitofp_nxv2i16_nxv2f64:
914; CHECK:       # %bb.0:
915; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
916; CHECK-NEXT:    vzext.vf2 v10, v8
917; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
918; CHECK-NEXT:    ret
919  %evec = call <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
920  ret <vscale x 2 x double> %evec
921}
922
923declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
924define <vscale x 4 x half> @vsitofp_nxv4i16_nxv4f16(<vscale x 4 x i16> %va) strictfp {
925; CHECK-LABEL: vsitofp_nxv4i16_nxv4f16:
926; CHECK:       # %bb.0:
927; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
928; CHECK-NEXT:    vfcvt.f.x.v v8, v8
929; CHECK-NEXT:    ret
930  %evec = call <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
931  ret <vscale x 4 x half> %evec
932}
933
934declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
935define <vscale x 4 x half> @vuitofp_nxv4i16_nxv4f16(<vscale x 4 x i16> %va) strictfp {
936; CHECK-LABEL: vuitofp_nxv4i16_nxv4f16:
937; CHECK:       # %bb.0:
938; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
939; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
940; CHECK-NEXT:    ret
941  %evec = call <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
942  ret <vscale x 4 x half> %evec
943}
944
945declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
946define <vscale x 4 x float> @vsitofp_nxv4i16_nxv4f32(<vscale x 4 x i16> %va) strictfp {
947; CHECK-LABEL: vsitofp_nxv4i16_nxv4f32:
948; CHECK:       # %bb.0:
949; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
950; CHECK-NEXT:    vfwcvt.f.x.v v10, v8
951; CHECK-NEXT:    vmv2r.v v8, v10
952; CHECK-NEXT:    ret
953  %evec = call <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
954  ret <vscale x 4 x float> %evec
955}
956
957declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
958define <vscale x 4 x float> @vuitofp_nxv4i16_nxv4f32(<vscale x 4 x i16> %va) strictfp {
959; CHECK-LABEL: vuitofp_nxv4i16_nxv4f32:
960; CHECK:       # %bb.0:
961; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
962; CHECK-NEXT:    vfwcvt.f.xu.v v10, v8
963; CHECK-NEXT:    vmv2r.v v8, v10
964; CHECK-NEXT:    ret
965  %evec = call <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
966  ret <vscale x 4 x float> %evec
967}
968
969declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
970define <vscale x 4 x double> @vsitofp_nxv4i16_nxv4f64(<vscale x 4 x i16> %va) strictfp {
971; CHECK-LABEL: vsitofp_nxv4i16_nxv4f64:
972; CHECK:       # %bb.0:
973; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
974; CHECK-NEXT:    vsext.vf2 v12, v8
975; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
976; CHECK-NEXT:    ret
977  %evec = call <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
978  ret <vscale x 4 x double> %evec
979}
980
981declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
982define <vscale x 4 x double> @vuitofp_nxv4i16_nxv4f64(<vscale x 4 x i16> %va) strictfp {
983; CHECK-LABEL: vuitofp_nxv4i16_nxv4f64:
984; CHECK:       # %bb.0:
985; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
986; CHECK-NEXT:    vzext.vf2 v12, v8
987; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
988; CHECK-NEXT:    ret
989  %evec = call <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
990  ret <vscale x 4 x double> %evec
991}
992
993declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
994define <vscale x 8 x half> @vsitofp_nxv8i16_nxv8f16(<vscale x 8 x i16> %va) strictfp {
995; CHECK-LABEL: vsitofp_nxv8i16_nxv8f16:
996; CHECK:       # %bb.0:
997; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
998; CHECK-NEXT:    vfcvt.f.x.v v8, v8
999; CHECK-NEXT:    ret
1000  %evec = call <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1001  ret <vscale x 8 x half> %evec
1002}
1003
1004declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
1005define <vscale x 8 x half> @vuitofp_nxv8i16_nxv8f16(<vscale x 8 x i16> %va) strictfp {
1006; CHECK-LABEL: vuitofp_nxv8i16_nxv8f16:
1007; CHECK:       # %bb.0:
1008; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1009; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1010; CHECK-NEXT:    ret
1011  %evec = call <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1012  ret <vscale x 8 x half> %evec
1013}
1014
1015declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
1016define <vscale x 8 x float> @vsitofp_nxv8i16_nxv8f32(<vscale x 8 x i16> %va) strictfp {
1017; CHECK-LABEL: vsitofp_nxv8i16_nxv8f32:
1018; CHECK:       # %bb.0:
1019; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1020; CHECK-NEXT:    vfwcvt.f.x.v v12, v8
1021; CHECK-NEXT:    vmv4r.v v8, v12
1022; CHECK-NEXT:    ret
1023  %evec = call <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1024  ret <vscale x 8 x float> %evec
1025}
1026
1027declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
1028define <vscale x 8 x float> @vuitofp_nxv8i16_nxv8f32(<vscale x 8 x i16> %va) strictfp {
1029; CHECK-LABEL: vuitofp_nxv8i16_nxv8f32:
1030; CHECK:       # %bb.0:
1031; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1032; CHECK-NEXT:    vfwcvt.f.xu.v v12, v8
1033; CHECK-NEXT:    vmv4r.v v8, v12
1034; CHECK-NEXT:    ret
1035  %evec = call <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1036  ret <vscale x 8 x float> %evec
1037}
1038
1039declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
1040define <vscale x 8 x double> @vsitofp_nxv8i16_nxv8f64(<vscale x 8 x i16> %va) strictfp {
1041; CHECK-LABEL: vsitofp_nxv8i16_nxv8f64:
1042; CHECK:       # %bb.0:
1043; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1044; CHECK-NEXT:    vsext.vf2 v16, v8
1045; CHECK-NEXT:    vfwcvt.f.x.v v8, v16
1046; CHECK-NEXT:    ret
1047  %evec = call <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1048  ret <vscale x 8 x double> %evec
1049}
1050
1051declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
1052define <vscale x 8 x double> @vuitofp_nxv8i16_nxv8f64(<vscale x 8 x i16> %va) strictfp {
1053; CHECK-LABEL: vuitofp_nxv8i16_nxv8f64:
1054; CHECK:       # %bb.0:
1055; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1056; CHECK-NEXT:    vzext.vf2 v16, v8
1057; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16
1058; CHECK-NEXT:    ret
1059  %evec = call <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1060  ret <vscale x 8 x double> %evec
1061}
1062
1063declare <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i16(<vscale x 16 x i16>, metadata, metadata)
1064define <vscale x 16 x half> @vsitofp_nxv16i16_nxv16f16(<vscale x 16 x i16> %va) strictfp {
1065; CHECK-LABEL: vsitofp_nxv16i16_nxv16f16:
1066; CHECK:       # %bb.0:
1067; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
1068; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1069; CHECK-NEXT:    ret
1070  %evec = call <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i16(<vscale x 16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1071  ret <vscale x 16 x half> %evec
1072}
1073
1074declare <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i16(<vscale x 16 x i16>, metadata, metadata)
1075define <vscale x 16 x half> @vuitofp_nxv16i16_nxv16f16(<vscale x 16 x i16> %va) strictfp {
1076; CHECK-LABEL: vuitofp_nxv16i16_nxv16f16:
1077; CHECK:       # %bb.0:
1078; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
1079; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1080; CHECK-NEXT:    ret
1081  %evec = call <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i16(<vscale x 16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1082  ret <vscale x 16 x half> %evec
1083}
1084
1085declare <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i16(<vscale x 16 x i16>, metadata, metadata)
1086define <vscale x 16 x float> @vsitofp_nxv16i16_nxv16f32(<vscale x 16 x i16> %va) strictfp {
1087; CHECK-LABEL: vsitofp_nxv16i16_nxv16f32:
1088; CHECK:       # %bb.0:
1089; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
1090; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
1091; CHECK-NEXT:    vmv8r.v v8, v16
1092; CHECK-NEXT:    ret
1093  %evec = call <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i16(<vscale x 16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1094  ret <vscale x 16 x float> %evec
1095}
1096
1097declare <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i16(<vscale x 16 x i16>, metadata, metadata)
1098define <vscale x 16 x float> @vuitofp_nxv16i16_nxv16f32(<vscale x 16 x i16> %va) strictfp {
1099; CHECK-LABEL: vuitofp_nxv16i16_nxv16f32:
1100; CHECK:       # %bb.0:
1101; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
1102; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
1103; CHECK-NEXT:    vmv8r.v v8, v16
1104; CHECK-NEXT:    ret
1105  %evec = call <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i16(<vscale x 16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1106  ret <vscale x 16 x float> %evec
1107}
1108
1109declare <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i16(<vscale x 32 x i16>, metadata, metadata)
1110define <vscale x 32 x half> @vsitofp_nxv32i16_nxv32f16(<vscale x 32 x i16> %va) strictfp {
1111; CHECK-LABEL: vsitofp_nxv32i16_nxv32f16:
1112; CHECK:       # %bb.0:
1113; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
1114; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1115; CHECK-NEXT:    ret
1116  %evec = call <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i16(<vscale x 32 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1117  ret <vscale x 32 x half> %evec
1118}
1119
1120declare <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i16(<vscale x 32 x i16>, metadata, metadata)
1121define <vscale x 32 x half> @vuitofp_nxv32i16_nxv32f16(<vscale x 32 x i16> %va) strictfp {
1122; CHECK-LABEL: vuitofp_nxv32i16_nxv32f16:
1123; CHECK:       # %bb.0:
1124; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
1125; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1126; CHECK-NEXT:    ret
1127  %evec = call <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i16(<vscale x 32 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1128  ret <vscale x 32 x half> %evec
1129}
1130
1131declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
1132define <vscale x 1 x half> @vsitofp_nxv1i32_nxv1f16(<vscale x 1 x i32> %va) strictfp {
1133; CHECK-LABEL: vsitofp_nxv1i32_nxv1f16:
1134; CHECK:       # %bb.0:
1135; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
1136; CHECK-NEXT:    vfncvt.f.x.w v9, v8
1137; CHECK-NEXT:    vmv1r.v v8, v9
1138; CHECK-NEXT:    ret
1139  %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1140  ret <vscale x 1 x half> %evec
1141}
1142
1143declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
1144define <vscale x 1 x half> @vuitofp_nxv1i32_nxv1f16(<vscale x 1 x i32> %va) strictfp {
1145; CHECK-LABEL: vuitofp_nxv1i32_nxv1f16:
1146; CHECK:       # %bb.0:
1147; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
1148; CHECK-NEXT:    vfncvt.f.xu.w v9, v8
1149; CHECK-NEXT:    vmv1r.v v8, v9
1150; CHECK-NEXT:    ret
1151  %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1152  ret <vscale x 1 x half> %evec
1153}
1154
1155declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
1156define <vscale x 1 x float> @vsitofp_nxv1i32_nxv1f32(<vscale x 1 x i32> %va) strictfp {
1157; CHECK-LABEL: vsitofp_nxv1i32_nxv1f32:
1158; CHECK:       # %bb.0:
1159; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
1160; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1161; CHECK-NEXT:    ret
1162  %evec = call <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1163  ret <vscale x 1 x float> %evec
1164}
1165
1166declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
1167define <vscale x 1 x float> @vuitofp_nxv1i32_nxv1f32(<vscale x 1 x i32> %va) strictfp {
1168; CHECK-LABEL: vuitofp_nxv1i32_nxv1f32:
1169; CHECK:       # %bb.0:
1170; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
1171; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1172; CHECK-NEXT:    ret
1173  %evec = call <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1174  ret <vscale x 1 x float> %evec
1175}
1176
1177declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
1178define <vscale x 1 x double> @vsitofp_nxv1i32_nxv1f64(<vscale x 1 x i32> %va) strictfp {
1179; CHECK-LABEL: vsitofp_nxv1i32_nxv1f64:
1180; CHECK:       # %bb.0:
1181; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
1182; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
1183; CHECK-NEXT:    vmv1r.v v8, v9
1184; CHECK-NEXT:    ret
1185  %evec = call <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1186  ret <vscale x 1 x double> %evec
1187}
1188
1189declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
1190define <vscale x 1 x double> @vuitofp_nxv1i32_nxv1f64(<vscale x 1 x i32> %va) strictfp {
1191; CHECK-LABEL: vuitofp_nxv1i32_nxv1f64:
1192; CHECK:       # %bb.0:
1193; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
1194; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
1195; CHECK-NEXT:    vmv1r.v v8, v9
1196; CHECK-NEXT:    ret
1197  %evec = call <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1198  ret <vscale x 1 x double> %evec
1199}
1200
1201declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
1202define <vscale x 2 x half> @vsitofp_nxv2i32_nxv2f16(<vscale x 2 x i32> %va) strictfp {
1203; CHECK-LABEL: vsitofp_nxv2i32_nxv2f16:
1204; CHECK:       # %bb.0:
1205; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
1206; CHECK-NEXT:    vfncvt.f.x.w v9, v8
1207; CHECK-NEXT:    vmv1r.v v8, v9
1208; CHECK-NEXT:    ret
1209  %evec = call <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1210  ret <vscale x 2 x half> %evec
1211}
1212
1213declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
1214define <vscale x 2 x half> @vuitofp_nxv2i32_nxv2f16(<vscale x 2 x i32> %va) strictfp {
1215; CHECK-LABEL: vuitofp_nxv2i32_nxv2f16:
1216; CHECK:       # %bb.0:
1217; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
1218; CHECK-NEXT:    vfncvt.f.xu.w v9, v8
1219; CHECK-NEXT:    vmv1r.v v8, v9
1220; CHECK-NEXT:    ret
1221  %evec = call <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1222  ret <vscale x 2 x half> %evec
1223}
1224
1225declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
1226define <vscale x 2 x float> @vsitofp_nxv2i32_nxv2f32(<vscale x 2 x i32> %va) strictfp {
1227; CHECK-LABEL: vsitofp_nxv2i32_nxv2f32:
1228; CHECK:       # %bb.0:
1229; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
1230; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1231; CHECK-NEXT:    ret
1232  %evec = call <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1233  ret <vscale x 2 x float> %evec
1234}
1235
1236declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
1237define <vscale x 2 x float> @vuitofp_nxv2i32_nxv2f32(<vscale x 2 x i32> %va) strictfp {
1238; CHECK-LABEL: vuitofp_nxv2i32_nxv2f32:
1239; CHECK:       # %bb.0:
1240; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
1241; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1242; CHECK-NEXT:    ret
1243  %evec = call <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1244  ret <vscale x 2 x float> %evec
1245}
1246
1247declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
1248define <vscale x 2 x double> @vsitofp_nxv2i32_nxv2f64(<vscale x 2 x i32> %va) strictfp {
1249; CHECK-LABEL: vsitofp_nxv2i32_nxv2f64:
1250; CHECK:       # %bb.0:
1251; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
1252; CHECK-NEXT:    vfwcvt.f.x.v v10, v8
1253; CHECK-NEXT:    vmv2r.v v8, v10
1254; CHECK-NEXT:    ret
1255  %evec = call <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1256  ret <vscale x 2 x double> %evec
1257}
1258
1259declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
1260define <vscale x 2 x double> @vuitofp_nxv2i32_nxv2f64(<vscale x 2 x i32> %va) strictfp {
1261; CHECK-LABEL: vuitofp_nxv2i32_nxv2f64:
1262; CHECK:       # %bb.0:
1263; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
1264; CHECK-NEXT:    vfwcvt.f.xu.v v10, v8
1265; CHECK-NEXT:    vmv2r.v v8, v10
1266; CHECK-NEXT:    ret
1267  %evec = call <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1268  ret <vscale x 2 x double> %evec
1269}
1270
1271declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
1272define <vscale x 4 x half> @vsitofp_nxv4i32_nxv4f16(<vscale x 4 x i32> %va) strictfp {
1273; CHECK-LABEL: vsitofp_nxv4i32_nxv4f16:
1274; CHECK:       # %bb.0:
1275; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
1276; CHECK-NEXT:    vfncvt.f.x.w v10, v8
1277; CHECK-NEXT:    vmv.v.v v8, v10
1278; CHECK-NEXT:    ret
1279  %evec = call <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1280  ret <vscale x 4 x half> %evec
1281}
1282
1283declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
1284define <vscale x 4 x half> @vuitofp_nxv4i32_nxv4f16(<vscale x 4 x i32> %va) strictfp {
1285; CHECK-LABEL: vuitofp_nxv4i32_nxv4f16:
1286; CHECK:       # %bb.0:
1287; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
1288; CHECK-NEXT:    vfncvt.f.xu.w v10, v8
1289; CHECK-NEXT:    vmv.v.v v8, v10
1290; CHECK-NEXT:    ret
1291  %evec = call <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1292  ret <vscale x 4 x half> %evec
1293}
1294
1295declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
1296define <vscale x 4 x float> @vsitofp_nxv4i32_nxv4f32(<vscale x 4 x i32> %va) strictfp {
1297; CHECK-LABEL: vsitofp_nxv4i32_nxv4f32:
1298; CHECK:       # %bb.0:
1299; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
1300; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1301; CHECK-NEXT:    ret
1302  %evec = call <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1303  ret <vscale x 4 x float> %evec
1304}
1305
1306declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
1307define <vscale x 4 x float> @vuitofp_nxv4i32_nxv4f32(<vscale x 4 x i32> %va) strictfp {
1308; CHECK-LABEL: vuitofp_nxv4i32_nxv4f32:
1309; CHECK:       # %bb.0:
1310; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
1311; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1312; CHECK-NEXT:    ret
1313  %evec = call <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1314  ret <vscale x 4 x float> %evec
1315}
1316
1317declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
1318define <vscale x 4 x double> @vsitofp_nxv4i32_nxv4f64(<vscale x 4 x i32> %va) strictfp {
1319; CHECK-LABEL: vsitofp_nxv4i32_nxv4f64:
1320; CHECK:       # %bb.0:
1321; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
1322; CHECK-NEXT:    vfwcvt.f.x.v v12, v8
1323; CHECK-NEXT:    vmv4r.v v8, v12
1324; CHECK-NEXT:    ret
1325  %evec = call <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1326  ret <vscale x 4 x double> %evec
1327}
1328
1329declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
1330define <vscale x 4 x double> @vuitofp_nxv4i32_nxv4f64(<vscale x 4 x i32> %va) strictfp {
1331; CHECK-LABEL: vuitofp_nxv4i32_nxv4f64:
1332; CHECK:       # %bb.0:
1333; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
1334; CHECK-NEXT:    vfwcvt.f.xu.v v12, v8
1335; CHECK-NEXT:    vmv4r.v v8, v12
1336; CHECK-NEXT:    ret
1337  %evec = call <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1338  ret <vscale x 4 x double> %evec
1339}
1340
1341declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
1342define <vscale x 8 x half> @vsitofp_nxv8i32_nxv8f16(<vscale x 8 x i32> %va) strictfp {
1343; CHECK-LABEL: vsitofp_nxv8i32_nxv8f16:
1344; CHECK:       # %bb.0:
1345; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1346; CHECK-NEXT:    vfncvt.f.x.w v12, v8
1347; CHECK-NEXT:    vmv.v.v v8, v12
1348; CHECK-NEXT:    ret
1349  %evec = call <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1350  ret <vscale x 8 x half> %evec
1351}
1352
1353declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
1354define <vscale x 8 x half> @vuitofp_nxv8i32_nxv8f16(<vscale x 8 x i32> %va) strictfp {
1355; CHECK-LABEL: vuitofp_nxv8i32_nxv8f16:
1356; CHECK:       # %bb.0:
1357; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1358; CHECK-NEXT:    vfncvt.f.xu.w v12, v8
1359; CHECK-NEXT:    vmv.v.v v8, v12
1360; CHECK-NEXT:    ret
1361  %evec = call <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1362  ret <vscale x 8 x half> %evec
1363}
1364
1365declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
1366define <vscale x 8 x float> @vsitofp_nxv8i32_nxv8f32(<vscale x 8 x i32> %va) strictfp {
1367; CHECK-LABEL: vsitofp_nxv8i32_nxv8f32:
1368; CHECK:       # %bb.0:
1369; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1370; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1371; CHECK-NEXT:    ret
1372  %evec = call <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1373  ret <vscale x 8 x float> %evec
1374}
1375
1376declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
1377define <vscale x 8 x float> @vuitofp_nxv8i32_nxv8f32(<vscale x 8 x i32> %va) strictfp {
1378; CHECK-LABEL: vuitofp_nxv8i32_nxv8f32:
1379; CHECK:       # %bb.0:
1380; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1381; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1382; CHECK-NEXT:    ret
1383  %evec = call <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1384  ret <vscale x 8 x float> %evec
1385}
1386
1387declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
1388define <vscale x 8 x double> @vsitofp_nxv8i32_nxv8f64(<vscale x 8 x i32> %va) strictfp {
1389; CHECK-LABEL: vsitofp_nxv8i32_nxv8f64:
1390; CHECK:       # %bb.0:
1391; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1392; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
1393; CHECK-NEXT:    vmv8r.v v8, v16
1394; CHECK-NEXT:    ret
1395  %evec = call <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1396  ret <vscale x 8 x double> %evec
1397}
1398
1399declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
1400define <vscale x 8 x double> @vuitofp_nxv8i32_nxv8f64(<vscale x 8 x i32> %va) strictfp {
1401; CHECK-LABEL: vuitofp_nxv8i32_nxv8f64:
1402; CHECK:       # %bb.0:
1403; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1404; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
1405; CHECK-NEXT:    vmv8r.v v8, v16
1406; CHECK-NEXT:    ret
1407  %evec = call <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1408  ret <vscale x 8 x double> %evec
1409}
1410
1411declare <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i32(<vscale x 16 x i32>, metadata, metadata)
1412define <vscale x 16 x half> @vsitofp_nxv16i32_nxv16f16(<vscale x 16 x i32> %va) strictfp {
1413; CHECK-LABEL: vsitofp_nxv16i32_nxv16f16:
1414; CHECK:       # %bb.0:
1415; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
1416; CHECK-NEXT:    vfncvt.f.x.w v16, v8
1417; CHECK-NEXT:    vmv.v.v v8, v16
1418; CHECK-NEXT:    ret
1419  %evec = call <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i32(<vscale x 16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1420  ret <vscale x 16 x half> %evec
1421}
1422
1423declare <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i32(<vscale x 16 x i32>, metadata, metadata)
1424define <vscale x 16 x half> @vuitofp_nxv16i32_nxv16f16(<vscale x 16 x i32> %va) strictfp {
1425; CHECK-LABEL: vuitofp_nxv16i32_nxv16f16:
1426; CHECK:       # %bb.0:
1427; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
1428; CHECK-NEXT:    vfncvt.f.xu.w v16, v8
1429; CHECK-NEXT:    vmv.v.v v8, v16
1430; CHECK-NEXT:    ret
1431  %evec = call <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i32(<vscale x 16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1432  ret <vscale x 16 x half> %evec
1433}
1434
1435declare <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i32(<vscale x 16 x i32>, metadata, metadata)
1436define <vscale x 16 x float> @vsitofp_nxv16i32_nxv16f32(<vscale x 16 x i32> %va) strictfp {
1437; CHECK-LABEL: vsitofp_nxv16i32_nxv16f32:
1438; CHECK:       # %bb.0:
1439; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
1440; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1441; CHECK-NEXT:    ret
1442  %evec = call <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i32(<vscale x 16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1443  ret <vscale x 16 x float> %evec
1444}
1445
1446declare <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i32(<vscale x 16 x i32>, metadata, metadata)
1447define <vscale x 16 x float> @vuitofp_nxv16i32_nxv16f32(<vscale x 16 x i32> %va) strictfp {
1448; CHECK-LABEL: vuitofp_nxv16i32_nxv16f32:
1449; CHECK:       # %bb.0:
1450; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
1451; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1452; CHECK-NEXT:    ret
1453  %evec = call <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i32(<vscale x 16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1454  ret <vscale x 16 x float> %evec
1455}
1456
1457declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
1458define <vscale x 1 x half> @vsitofp_nxv1i64_nxv1f16(<vscale x 1 x i64> %va) strictfp {
1459; CHECK-LABEL: vsitofp_nxv1i64_nxv1f16:
1460; CHECK:       # %bb.0:
1461; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
1462; CHECK-NEXT:    vfncvt.f.x.w v9, v8
1463; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1464; CHECK-NEXT:    vfncvt.f.f.w v8, v9
1465; CHECK-NEXT:    ret
1466  %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1467  ret <vscale x 1 x half> %evec
1468}
1469
1470declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
1471define <vscale x 1 x half> @vuitofp_nxv1i64_nxv1f16(<vscale x 1 x i64> %va) strictfp {
1472; CHECK-LABEL: vuitofp_nxv1i64_nxv1f16:
1473; CHECK:       # %bb.0:
1474; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
1475; CHECK-NEXT:    vfncvt.f.xu.w v9, v8
1476; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1477; CHECK-NEXT:    vfncvt.f.f.w v8, v9
1478; CHECK-NEXT:    ret
1479  %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1480  ret <vscale x 1 x half> %evec
1481}
1482
1483declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
1484define <vscale x 1 x float> @vsitofp_nxv1i64_nxv1f32(<vscale x 1 x i64> %va) strictfp {
1485; CHECK-LABEL: vsitofp_nxv1i64_nxv1f32:
1486; CHECK:       # %bb.0:
1487; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
1488; CHECK-NEXT:    vfncvt.f.x.w v9, v8
1489; CHECK-NEXT:    vmv1r.v v8, v9
1490; CHECK-NEXT:    ret
1491  %evec = call <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1492  ret <vscale x 1 x float> %evec
1493}
1494
1495declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
1496define <vscale x 1 x float> @vuitofp_nxv1i64_nxv1f32(<vscale x 1 x i64> %va) strictfp {
1497; CHECK-LABEL: vuitofp_nxv1i64_nxv1f32:
1498; CHECK:       # %bb.0:
1499; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
1500; CHECK-NEXT:    vfncvt.f.xu.w v9, v8
1501; CHECK-NEXT:    vmv1r.v v8, v9
1502; CHECK-NEXT:    ret
1503  %evec = call <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1504  ret <vscale x 1 x float> %evec
1505}
1506
1507declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
1508define <vscale x 1 x double> @vsitofp_nxv1i64_nxv1f64(<vscale x 1 x i64> %va) strictfp {
1509; CHECK-LABEL: vsitofp_nxv1i64_nxv1f64:
1510; CHECK:       # %bb.0:
1511; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
1512; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1513; CHECK-NEXT:    ret
1514  %evec = call <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1515  ret <vscale x 1 x double> %evec
1516}
1517
1518declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
1519define <vscale x 1 x double> @vuitofp_nxv1i64_nxv1f64(<vscale x 1 x i64> %va) strictfp {
1520; CHECK-LABEL: vuitofp_nxv1i64_nxv1f64:
1521; CHECK:       # %bb.0:
1522; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
1523; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1524; CHECK-NEXT:    ret
1525  %evec = call <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1526  ret <vscale x 1 x double> %evec
1527}
1528
1529
1530declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
1531define <vscale x 2 x half> @vsitofp_nxv2i64_nxv2f16(<vscale x 2 x i64> %va) strictfp {
1532; CHECK-LABEL: vsitofp_nxv2i64_nxv2f16:
1533; CHECK:       # %bb.0:
1534; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
1535; CHECK-NEXT:    vfncvt.f.x.w v10, v8
1536; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
1537; CHECK-NEXT:    vfncvt.f.f.w v8, v10
1538; CHECK-NEXT:    ret
1539  %evec = call <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1540  ret <vscale x 2 x half> %evec
1541}
1542
1543declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
1544define <vscale x 2 x half> @vuitofp_nxv2i64_nxv2f16(<vscale x 2 x i64> %va) strictfp {
1545; CHECK-LABEL: vuitofp_nxv2i64_nxv2f16:
1546; CHECK:       # %bb.0:
1547; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
1548; CHECK-NEXT:    vfncvt.f.xu.w v10, v8
1549; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
1550; CHECK-NEXT:    vfncvt.f.f.w v8, v10
1551; CHECK-NEXT:    ret
1552  %evec = call <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1553  ret <vscale x 2 x half> %evec
1554}
1555
1556declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
1557define <vscale x 2 x float> @vsitofp_nxv2i64_nxv2f32(<vscale x 2 x i64> %va) strictfp {
1558; CHECK-LABEL: vsitofp_nxv2i64_nxv2f32:
1559; CHECK:       # %bb.0:
1560; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
1561; CHECK-NEXT:    vfncvt.f.x.w v10, v8
1562; CHECK-NEXT:    vmv.v.v v8, v10
1563; CHECK-NEXT:    ret
1564  %evec = call <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1565  ret <vscale x 2 x float> %evec
1566}
1567
1568declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
1569define <vscale x 2 x float> @vuitofp_nxv2i64_nxv2f32(<vscale x 2 x i64> %va) strictfp {
1570; CHECK-LABEL: vuitofp_nxv2i64_nxv2f32:
1571; CHECK:       # %bb.0:
1572; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
1573; CHECK-NEXT:    vfncvt.f.xu.w v10, v8
1574; CHECK-NEXT:    vmv.v.v v8, v10
1575; CHECK-NEXT:    ret
1576  %evec = call <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1577  ret <vscale x 2 x float> %evec
1578}
1579
1580declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
1581define <vscale x 2 x double> @vsitofp_nxv2i64_nxv2f64(<vscale x 2 x i64> %va) strictfp {
1582; CHECK-LABEL: vsitofp_nxv2i64_nxv2f64:
1583; CHECK:       # %bb.0:
1584; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
1585; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1586; CHECK-NEXT:    ret
1587  %evec = call <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1588  ret <vscale x 2 x double> %evec
1589}
1590
1591declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
1592define <vscale x 2 x double> @vuitofp_nxv2i64_nxv2f64(<vscale x 2 x i64> %va) strictfp {
1593; CHECK-LABEL: vuitofp_nxv2i64_nxv2f64:
1594; CHECK:       # %bb.0:
1595; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
1596; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1597; CHECK-NEXT:    ret
1598  %evec = call <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1599  ret <vscale x 2 x double> %evec
1600}
1601
1602declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
1603define <vscale x 4 x half> @vsitofp_nxv4i64_nxv4f16(<vscale x 4 x i64> %va) strictfp {
1604; CHECK-LABEL: vsitofp_nxv4i64_nxv4f16:
1605; CHECK:       # %bb.0:
1606; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
1607; CHECK-NEXT:    vfncvt.f.x.w v12, v8
1608; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
1609; CHECK-NEXT:    vfncvt.f.f.w v8, v12
1610; CHECK-NEXT:    ret
1611  %evec = call <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1612  ret <vscale x 4 x half> %evec
1613}
1614
1615declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
1616define <vscale x 4 x half> @vuitofp_nxv4i64_nxv4f16(<vscale x 4 x i64> %va) strictfp {
1617; CHECK-LABEL: vuitofp_nxv4i64_nxv4f16:
1618; CHECK:       # %bb.0:
1619; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
1620; CHECK-NEXT:    vfncvt.f.xu.w v12, v8
1621; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
1622; CHECK-NEXT:    vfncvt.f.f.w v8, v12
1623; CHECK-NEXT:    ret
1624  %evec = call <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1625  ret <vscale x 4 x half> %evec
1626}
1627
1628declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
1629define <vscale x 4 x float> @vsitofp_nxv4i64_nxv4f32(<vscale x 4 x i64> %va) strictfp {
1630; CHECK-LABEL: vsitofp_nxv4i64_nxv4f32:
1631; CHECK:       # %bb.0:
1632; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
1633; CHECK-NEXT:    vfncvt.f.x.w v12, v8
1634; CHECK-NEXT:    vmv.v.v v8, v12
1635; CHECK-NEXT:    ret
1636  %evec = call <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1637  ret <vscale x 4 x float> %evec
1638}
1639
1640declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
1641define <vscale x 4 x float> @vuitofp_nxv4i64_nxv4f32(<vscale x 4 x i64> %va) strictfp {
1642; CHECK-LABEL: vuitofp_nxv4i64_nxv4f32:
1643; CHECK:       # %bb.0:
1644; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
1645; CHECK-NEXT:    vfncvt.f.xu.w v12, v8
1646; CHECK-NEXT:    vmv.v.v v8, v12
1647; CHECK-NEXT:    ret
1648  %evec = call <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1649  ret <vscale x 4 x float> %evec
1650}
1651
1652declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
1653define <vscale x 4 x double> @vsitofp_nxv4i64_nxv4f64(<vscale x 4 x i64> %va) strictfp {
1654; CHECK-LABEL: vsitofp_nxv4i64_nxv4f64:
1655; CHECK:       # %bb.0:
1656; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
1657; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1658; CHECK-NEXT:    ret
1659  %evec = call <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1660  ret <vscale x 4 x double> %evec
1661}
1662
1663declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
1664define <vscale x 4 x double> @vuitofp_nxv4i64_nxv4f64(<vscale x 4 x i64> %va) strictfp {
1665; CHECK-LABEL: vuitofp_nxv4i64_nxv4f64:
1666; CHECK:       # %bb.0:
1667; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
1668; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1669; CHECK-NEXT:    ret
1670  %evec = call <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1671  ret <vscale x 4 x double> %evec
1672}
1673
1674declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
1675define <vscale x 8 x half> @vsitofp_nxv8i64_nxv8f16(<vscale x 8 x i64> %va) strictfp {
1676; CHECK-LABEL: vsitofp_nxv8i64_nxv8f16:
1677; CHECK:       # %bb.0:
1678; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1679; CHECK-NEXT:    vfncvt.f.x.w v16, v8
1680; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
1681; CHECK-NEXT:    vfncvt.f.f.w v8, v16
1682; CHECK-NEXT:    ret
1683  %evec = call <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1684  ret <vscale x 8 x half> %evec
1685}
1686
1687declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
1688define <vscale x 8 x half> @vuitofp_nxv8i64_nxv8f16(<vscale x 8 x i64> %va) strictfp {
1689; CHECK-LABEL: vuitofp_nxv8i64_nxv8f16:
1690; CHECK:       # %bb.0:
1691; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1692; CHECK-NEXT:    vfncvt.f.xu.w v16, v8
1693; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
1694; CHECK-NEXT:    vfncvt.f.f.w v8, v16
1695; CHECK-NEXT:    ret
1696  %evec = call <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1697  ret <vscale x 8 x half> %evec
1698}
1699
1700declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
1701define <vscale x 8 x float> @vsitofp_nxv8i64_nxv8f32(<vscale x 8 x i64> %va) strictfp {
1702; CHECK-LABEL: vsitofp_nxv8i64_nxv8f32:
1703; CHECK:       # %bb.0:
1704; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1705; CHECK-NEXT:    vfncvt.f.x.w v16, v8
1706; CHECK-NEXT:    vmv.v.v v8, v16
1707; CHECK-NEXT:    ret
1708  %evec = call <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1709  ret <vscale x 8 x float> %evec
1710}
1711
1712declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
1713define <vscale x 8 x float> @vuitofp_nxv8i64_nxv8f32(<vscale x 8 x i64> %va) strictfp {
1714; CHECK-LABEL: vuitofp_nxv8i64_nxv8f32:
1715; CHECK:       # %bb.0:
1716; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1717; CHECK-NEXT:    vfncvt.f.xu.w v16, v8
1718; CHECK-NEXT:    vmv.v.v v8, v16
1719; CHECK-NEXT:    ret
1720  %evec = call <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1721  ret <vscale x 8 x float> %evec
1722}
1723
1724declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
1725define <vscale x 8 x double> @vsitofp_nxv8i64_nxv8f64(<vscale x 8 x i64> %va) strictfp {
1726; CHECK-LABEL: vsitofp_nxv8i64_nxv8f64:
1727; CHECK:       # %bb.0:
1728; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
1729; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1730; CHECK-NEXT:    ret
1731  %evec = call <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1732  ret <vscale x 8 x double> %evec
1733}
1734
1735declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
1736define <vscale x 8 x double> @vuitofp_nxv8i64_nxv8f64(<vscale x 8 x i64> %va) strictfp {
1737; CHECK-LABEL: vuitofp_nxv8i64_nxv8f64:
1738; CHECK:       # %bb.0:
1739; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
1740; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1741; CHECK-NEXT:    ret
1742  %evec = call <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1743  ret <vscale x 8 x double> %evec
1744}
1745