xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll (revision 6da5968f5ecc2a2e8b0697e335f4dec1b3bbfd01)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i1(<1 x i1>, metadata, metadata)
8define <1 x half> @vsitofp_v1i1_v1f16(<1 x i1> %va) strictfp {
9; CHECK-LABEL: vsitofp_v1i1_v1f16:
10; CHECK:       # %bb.0:
11; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
12; CHECK-NEXT:    vmv.s.x v8, zero
13; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
14; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
15; CHECK-NEXT:    ret
16  %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
17  ret <1 x half> %evec
18}
19
20declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i1(<1 x i1>, metadata, metadata)
21define <1 x half> @vuitofp_v1i1_v1f16(<1 x i1> %va) strictfp {
22; CHECK-LABEL: vuitofp_v1i1_v1f16:
23; CHECK:       # %bb.0:
24; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
25; CHECK-NEXT:    vmv.s.x v8, zero
26; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
27; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
28; CHECK-NEXT:    ret
29  %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
30  ret <1 x half> %evec
31}
32
33declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i1(<1 x i1>, metadata, metadata)
34define <1 x float> @vsitofp_v1i1_v1f32(<1 x i1> %va) strictfp {
35; CHECK-LABEL: vsitofp_v1i1_v1f32:
36; CHECK:       # %bb.0:
37; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
38; CHECK-NEXT:    vmv.s.x v8, zero
39; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
40; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
41; CHECK-NEXT:    ret
42  %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
43  ret <1 x float> %evec
44}
45
46declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i1(<1 x i1>, metadata, metadata)
47define <1 x float> @vuitofp_v1i1_v1f32(<1 x i1> %va) strictfp {
48; CHECK-LABEL: vuitofp_v1i1_v1f32:
49; CHECK:       # %bb.0:
50; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
51; CHECK-NEXT:    vmv.s.x v8, zero
52; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
53; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
54; CHECK-NEXT:    ret
55  %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
56  ret <1 x float> %evec
57}
58
59declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i1(<1 x i1>, metadata, metadata)
60define <1 x double> @vsitofp_v1i1_v1f64(<1 x i1> %va) strictfp {
61; CHECK-LABEL: vsitofp_v1i1_v1f64:
62; CHECK:       # %bb.0:
63; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
64; CHECK-NEXT:    vmv.s.x v8, zero
65; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
66; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
67; CHECK-NEXT:    ret
68  %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
69  ret <1 x double> %evec
70}
71
72declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i1(<1 x i1>, metadata, metadata)
73define <1 x double> @vuitofp_v1i1_v1f64(<1 x i1> %va) strictfp {
74; CHECK-LABEL: vuitofp_v1i1_v1f64:
75; CHECK:       # %bb.0:
76; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
77; CHECK-NEXT:    vmv.s.x v8, zero
78; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
79; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
80; CHECK-NEXT:    ret
81  %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
82  ret <1 x double> %evec
83}
84
85declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i1(<2 x i1>, metadata, metadata)
86define <2 x half> @vsitofp_v2i1_v2f16(<2 x i1> %va) strictfp {
87; CHECK-LABEL: vsitofp_v2i1_v2f16:
88; CHECK:       # %bb.0:
89; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
90; CHECK-NEXT:    vmv.v.i v8, 0
91; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
92; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
93; CHECK-NEXT:    ret
94  %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
95  ret <2 x half> %evec
96}
97
98declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i1(<2 x i1>, metadata, metadata)
99define <2 x half> @vuitofp_v2i1_v2f16(<2 x i1> %va) strictfp {
100; CHECK-LABEL: vuitofp_v2i1_v2f16:
101; CHECK:       # %bb.0:
102; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
103; CHECK-NEXT:    vmv.v.i v8, 0
104; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
105; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
106; CHECK-NEXT:    ret
107  %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
108  ret <2 x half> %evec
109}
110
111declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i1(<2 x i1>, metadata, metadata)
112define <2 x float> @vsitofp_v2i1_v2f32(<2 x i1> %va) strictfp {
113; CHECK-LABEL: vsitofp_v2i1_v2f32:
114; CHECK:       # %bb.0:
115; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
116; CHECK-NEXT:    vmv.v.i v8, 0
117; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
118; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
119; CHECK-NEXT:    ret
120  %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
121  ret <2 x float> %evec
122}
123
124declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i1(<2 x i1>, metadata, metadata)
125define <2 x float> @vuitofp_v2i1_v2f32(<2 x i1> %va) strictfp {
126; CHECK-LABEL: vuitofp_v2i1_v2f32:
127; CHECK:       # %bb.0:
128; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
129; CHECK-NEXT:    vmv.v.i v8, 0
130; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
131; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
132; CHECK-NEXT:    ret
133  %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
134  ret <2 x float> %evec
135}
136
137declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i1(<2 x i1>, metadata, metadata)
138define <2 x double> @vsitofp_v2i1_v2f64(<2 x i1> %va) strictfp {
139; CHECK-LABEL: vsitofp_v2i1_v2f64:
140; CHECK:       # %bb.0:
141; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
142; CHECK-NEXT:    vmv.v.i v8, 0
143; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
144; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
145; CHECK-NEXT:    ret
146  %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
147  ret <2 x double> %evec
148}
149
150declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i1(<2 x i1>, metadata, metadata)
151define <2 x double> @vuitofp_v2i1_v2f64(<2 x i1> %va) strictfp {
152; CHECK-LABEL: vuitofp_v2i1_v2f64:
153; CHECK:       # %bb.0:
154; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
155; CHECK-NEXT:    vmv.v.i v8, 0
156; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
157; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
158; CHECK-NEXT:    ret
159  %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
160  ret <2 x double> %evec
161}
162
163declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i1(<4 x i1>, metadata, metadata)
164define <4 x half> @vsitofp_v4i1_v4f16(<4 x i1> %va) strictfp {
165; CHECK-LABEL: vsitofp_v4i1_v4f16:
166; CHECK:       # %bb.0:
167; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
168; CHECK-NEXT:    vmv.v.i v8, 0
169; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
170; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
171; CHECK-NEXT:    ret
172  %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
173  ret <4 x half> %evec
174}
175
176declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i1(<4 x i1>, metadata, metadata)
177define <4 x half> @vuitofp_v4i1_v4f16(<4 x i1> %va) strictfp {
178; CHECK-LABEL: vuitofp_v4i1_v4f16:
179; CHECK:       # %bb.0:
180; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
181; CHECK-NEXT:    vmv.v.i v8, 0
182; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
183; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
184; CHECK-NEXT:    ret
185  %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
186  ret <4 x half> %evec
187}
188
189declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i1(<4 x i1>, metadata, metadata)
190define <4 x float> @vsitofp_v4i1_v4f32(<4 x i1> %va) strictfp {
191; CHECK-LABEL: vsitofp_v4i1_v4f32:
192; CHECK:       # %bb.0:
193; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
194; CHECK-NEXT:    vmv.v.i v8, 0
195; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
196; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
197; CHECK-NEXT:    ret
198  %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
199  ret <4 x float> %evec
200}
201
202declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i1(<4 x i1>, metadata, metadata)
203define <4 x float> @vuitofp_v4i1_v4f32(<4 x i1> %va) strictfp {
204; CHECK-LABEL: vuitofp_v4i1_v4f32:
205; CHECK:       # %bb.0:
206; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
207; CHECK-NEXT:    vmv.v.i v8, 0
208; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
209; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
210; CHECK-NEXT:    ret
211  %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
212  ret <4 x float> %evec
213}
214
215declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i1(<4 x i1>, metadata, metadata)
216define <4 x double> @vsitofp_v4i1_v4f64(<4 x i1> %va) strictfp {
217; CHECK-LABEL: vsitofp_v4i1_v4f64:
218; CHECK:       # %bb.0:
219; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
220; CHECK-NEXT:    vmv.v.i v8, 0
221; CHECK-NEXT:    vmerge.vim v10, v8, -1, v0
222; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
223; CHECK-NEXT:    ret
224  %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
225  ret <4 x double> %evec
226}
227
228declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i1(<4 x i1>, metadata, metadata)
229define <4 x double> @vuitofp_v4i1_v4f64(<4 x i1> %va) strictfp {
230; CHECK-LABEL: vuitofp_v4i1_v4f64:
231; CHECK:       # %bb.0:
232; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
233; CHECK-NEXT:    vmv.v.i v8, 0
234; CHECK-NEXT:    vmerge.vim v10, v8, 1, v0
235; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
236; CHECK-NEXT:    ret
237  %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
238  ret <4 x double> %evec
239}
240
241declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i1(<8 x i1>, metadata, metadata)
242define <8 x half> @vsitofp_v8i1_v8f16(<8 x i1> %va) strictfp {
243; CHECK-LABEL: vsitofp_v8i1_v8f16:
244; CHECK:       # %bb.0:
245; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
246; CHECK-NEXT:    vmv.v.i v8, 0
247; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
248; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
249; CHECK-NEXT:    ret
250  %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
251  ret <8 x half> %evec
252}
253
254declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i1(<8 x i1>, metadata, metadata)
255define <8 x half> @vuitofp_v8i1_v8f16(<8 x i1> %va) strictfp {
256; CHECK-LABEL: vuitofp_v8i1_v8f16:
257; CHECK:       # %bb.0:
258; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
259; CHECK-NEXT:    vmv.v.i v8, 0
260; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
261; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
262; CHECK-NEXT:    ret
263  %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
264  ret <8 x half> %evec
265}
266
267declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i1(<8 x i1>, metadata, metadata)
268define <8 x float> @vsitofp_v8i1_v8f32(<8 x i1> %va) strictfp {
269; CHECK-LABEL: vsitofp_v8i1_v8f32:
270; CHECK:       # %bb.0:
271; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
272; CHECK-NEXT:    vmv.v.i v8, 0
273; CHECK-NEXT:    vmerge.vim v10, v8, -1, v0
274; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
275; CHECK-NEXT:    ret
276  %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
277  ret <8 x float> %evec
278}
279
280declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i1(<8 x i1>, metadata, metadata)
281define <8 x float> @vuitofp_v8i1_v8f32(<8 x i1> %va) strictfp {
282; CHECK-LABEL: vuitofp_v8i1_v8f32:
283; CHECK:       # %bb.0:
284; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
285; CHECK-NEXT:    vmv.v.i v8, 0
286; CHECK-NEXT:    vmerge.vim v10, v8, 1, v0
287; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
288; CHECK-NEXT:    ret
289  %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
290  ret <8 x float> %evec
291}
292
293declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i1(<8 x i1>, metadata, metadata)
294define <8 x double> @vsitofp_v8i1_v8f64(<8 x i1> %va) strictfp {
295; CHECK-LABEL: vsitofp_v8i1_v8f64:
296; CHECK:       # %bb.0:
297; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
298; CHECK-NEXT:    vmv.v.i v8, 0
299; CHECK-NEXT:    vmerge.vim v12, v8, -1, v0
300; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
301; CHECK-NEXT:    ret
302  %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
303  ret <8 x double> %evec
304}
305
306declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i1(<8 x i1>, metadata, metadata)
307define <8 x double> @vuitofp_v8i1_v8f64(<8 x i1> %va) strictfp {
308; CHECK-LABEL: vuitofp_v8i1_v8f64:
309; CHECK:       # %bb.0:
310; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
311; CHECK-NEXT:    vmv.v.i v8, 0
312; CHECK-NEXT:    vmerge.vim v12, v8, 1, v0
313; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
314; CHECK-NEXT:    ret
315  %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
316  ret <8 x double> %evec
317}
318
319declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i1(<16 x i1>, metadata, metadata)
320define <16 x half> @vsitofp_v16i1_v16f16(<16 x i1> %va) strictfp {
321; CHECK-LABEL: vsitofp_v16i1_v16f16:
322; CHECK:       # %bb.0:
323; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
324; CHECK-NEXT:    vmv.v.i v8, 0
325; CHECK-NEXT:    vmerge.vim v10, v8, -1, v0
326; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
327; CHECK-NEXT:    ret
328  %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
329  ret <16 x half> %evec
330}
331
332declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i1(<16 x i1>, metadata, metadata)
333define <16 x half> @vuitofp_v16i1_v16f16(<16 x i1> %va) strictfp {
334; CHECK-LABEL: vuitofp_v16i1_v16f16:
335; CHECK:       # %bb.0:
336; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
337; CHECK-NEXT:    vmv.v.i v8, 0
338; CHECK-NEXT:    vmerge.vim v10, v8, 1, v0
339; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
340; CHECK-NEXT:    ret
341  %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
342  ret <16 x half> %evec
343}
344
345declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i1(<16 x i1>, metadata, metadata)
346define <16 x float> @vsitofp_v16i1_v16f32(<16 x i1> %va) strictfp {
347; CHECK-LABEL: vsitofp_v16i1_v16f32:
348; CHECK:       # %bb.0:
349; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
350; CHECK-NEXT:    vmv.v.i v8, 0
351; CHECK-NEXT:    vmerge.vim v12, v8, -1, v0
352; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
353; CHECK-NEXT:    ret
354  %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
355  ret <16 x float> %evec
356}
357
358declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i1(<16 x i1>, metadata, metadata)
359define <16 x float> @vuitofp_v16i1_v16f32(<16 x i1> %va) strictfp {
360; CHECK-LABEL: vuitofp_v16i1_v16f32:
361; CHECK:       # %bb.0:
362; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
363; CHECK-NEXT:    vmv.v.i v8, 0
364; CHECK-NEXT:    vmerge.vim v12, v8, 1, v0
365; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
366; CHECK-NEXT:    ret
367  %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
368  ret <16 x float> %evec
369}
370
371declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i1(<32 x i1>, metadata, metadata)
372define <32 x half> @vsitofp_v32i1_v32f16(<32 x i1> %va) strictfp {
373; CHECK-LABEL: vsitofp_v32i1_v32f16:
374; CHECK:       # %bb.0:
375; CHECK-NEXT:    li a0, 32
376; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
377; CHECK-NEXT:    vmv.v.i v8, 0
378; CHECK-NEXT:    vmerge.vim v12, v8, -1, v0
379; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
380; CHECK-NEXT:    ret
381  %evec = call <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i1(<32 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
382  ret <32 x half> %evec
383}
384
385declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i1(<32 x i1>, metadata, metadata)
386define <32 x half> @vuitofp_v32i1_v32f16(<32 x i1> %va) strictfp {
387; CHECK-LABEL: vuitofp_v32i1_v32f16:
388; CHECK:       # %bb.0:
389; CHECK-NEXT:    li a0, 32
390; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
391; CHECK-NEXT:    vmv.v.i v8, 0
392; CHECK-NEXT:    vmerge.vim v12, v8, 1, v0
393; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
394; CHECK-NEXT:    ret
395  %evec = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i1(<32 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
396  ret <32 x half> %evec
397}
398
399declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i8(<1 x i8>, metadata, metadata)
400define <1 x half> @vsitofp_v1i8_v1f16(<1 x i8> %va) strictfp {
401; CHECK-LABEL: vsitofp_v1i8_v1f16:
402; CHECK:       # %bb.0:
403; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
404; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
405; CHECK-NEXT:    vmv1r.v v8, v9
406; CHECK-NEXT:    ret
407  %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
408  ret <1 x half> %evec
409}
410
411declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i7(<1 x i7>, metadata, metadata)
412define <1 x half> @vsitofp_v1i7_v1f16(<1 x i7> %va) strictfp {
413; RV32-LABEL: vsitofp_v1i7_v1f16:
414; RV32:       # %bb.0:
415; RV32-NEXT:    slli a0, a0, 25
416; RV32-NEXT:    srai a0, a0, 25
417; RV32-NEXT:    fcvt.h.w fa5, a0
418; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
419; RV32-NEXT:    vfmv.s.f v8, fa5
420; RV32-NEXT:    ret
421;
422; RV64-LABEL: vsitofp_v1i7_v1f16:
423; RV64:       # %bb.0:
424; RV64-NEXT:    slli a0, a0, 57
425; RV64-NEXT:    srai a0, a0, 57
426; RV64-NEXT:    fcvt.h.w fa5, a0
427; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
428; RV64-NEXT:    vfmv.s.f v8, fa5
429; RV64-NEXT:    ret
430  %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i7(<1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
431  ret <1 x half> %evec
432}
433
434declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i7(<1 x i7>, metadata, metadata)
435define <1 x half> @vuitofp_v1i7_v1f16(<1 x i7> %va) strictfp {
436; CHECK-LABEL: vuitofp_v1i7_v1f16:
437; CHECK:       # %bb.0:
438; CHECK-NEXT:    andi a0, a0, 127
439; CHECK-NEXT:    fcvt.h.wu fa5, a0
440; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
441; CHECK-NEXT:    vfmv.s.f v8, fa5
442; CHECK-NEXT:    ret
443  %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i7(<1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
444  ret <1 x half> %evec
445}
446
447declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i8(<1 x i8>, metadata, metadata)
448define <1 x half> @vuitofp_v1i8_v1f16(<1 x i8> %va) strictfp {
449; CHECK-LABEL: vuitofp_v1i8_v1f16:
450; CHECK:       # %bb.0:
451; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
452; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
453; CHECK-NEXT:    vmv1r.v v8, v9
454; CHECK-NEXT:    ret
455  %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
456  ret <1 x half> %evec
457}
458
459declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i8(<1 x i8>, metadata, metadata)
460define <1 x float> @vsitofp_v1i8_v1f32(<1 x i8> %va) strictfp {
461; CHECK-LABEL: vsitofp_v1i8_v1f32:
462; CHECK:       # %bb.0:
463; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
464; CHECK-NEXT:    vsext.vf2 v9, v8
465; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
466; CHECK-NEXT:    ret
467  %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
468  ret <1 x float> %evec
469}
470
471declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i8(<1 x i8>, metadata, metadata)
472define <1 x float> @vuitofp_v1i8_v1f32(<1 x i8> %va) strictfp {
473; CHECK-LABEL: vuitofp_v1i8_v1f32:
474; CHECK:       # %bb.0:
475; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
476; CHECK-NEXT:    vzext.vf2 v9, v8
477; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
478; CHECK-NEXT:    ret
479  %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
480  ret <1 x float> %evec
481}
482
483declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i8(<1 x i8>, metadata, metadata)
484define <1 x double> @vsitofp_v1i8_v1f64(<1 x i8> %va) strictfp {
485; CHECK-LABEL: vsitofp_v1i8_v1f64:
486; CHECK:       # %bb.0:
487; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
488; CHECK-NEXT:    vsext.vf4 v9, v8
489; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
490; CHECK-NEXT:    ret
491  %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
492  ret <1 x double> %evec
493}
494
495declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i8(<1 x i8>, metadata, metadata)
496define <1 x double> @vuitofp_v1i8_v1f64(<1 x i8> %va) strictfp {
497; CHECK-LABEL: vuitofp_v1i8_v1f64:
498; CHECK:       # %bb.0:
499; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
500; CHECK-NEXT:    vzext.vf4 v9, v8
501; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
502; CHECK-NEXT:    ret
503  %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
504  ret <1 x double> %evec
505}
506
507declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i8(<2 x i8>, metadata, metadata)
508define <2 x half> @vsitofp_v2i8_v2f16(<2 x i8> %va) strictfp {
509; CHECK-LABEL: vsitofp_v2i8_v2f16:
510; CHECK:       # %bb.0:
511; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
512; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
513; CHECK-NEXT:    vmv1r.v v8, v9
514; CHECK-NEXT:    ret
515  %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
516  ret <2 x half> %evec
517}
518
519declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i8(<2 x i8>, metadata, metadata)
520define <2 x half> @vuitofp_v2i8_v2f16(<2 x i8> %va) strictfp {
521; CHECK-LABEL: vuitofp_v2i8_v2f16:
522; CHECK:       # %bb.0:
523; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
524; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
525; CHECK-NEXT:    vmv1r.v v8, v9
526; CHECK-NEXT:    ret
527  %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
528  ret <2 x half> %evec
529}
530
531declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i8(<2 x i8>, metadata, metadata)
532define <2 x float> @vsitofp_v2i8_v2f32(<2 x i8> %va) strictfp {
533; CHECK-LABEL: vsitofp_v2i8_v2f32:
534; CHECK:       # %bb.0:
535; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
536; CHECK-NEXT:    vsext.vf2 v9, v8
537; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
538; CHECK-NEXT:    ret
539  %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
540  ret <2 x float> %evec
541}
542
543declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i8(<2 x i8>, metadata, metadata)
544define <2 x float> @vuitofp_v2i8_v2f32(<2 x i8> %va) strictfp {
545; CHECK-LABEL: vuitofp_v2i8_v2f32:
546; CHECK:       # %bb.0:
547; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
548; CHECK-NEXT:    vzext.vf2 v9, v8
549; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
550; CHECK-NEXT:    ret
551  %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
552  ret <2 x float> %evec
553}
554
555declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i8(<2 x i8>, metadata, metadata)
556define <2 x double> @vsitofp_v2i8_v2f64(<2 x i8> %va) strictfp {
557; CHECK-LABEL: vsitofp_v2i8_v2f64:
558; CHECK:       # %bb.0:
559; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
560; CHECK-NEXT:    vsext.vf4 v9, v8
561; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
562; CHECK-NEXT:    ret
563  %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
564  ret <2 x double> %evec
565}
566
567declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i8(<2 x i8>, metadata, metadata)
568define <2 x double> @vuitofp_v2i8_v2f64(<2 x i8> %va) strictfp {
569; CHECK-LABEL: vuitofp_v2i8_v2f64:
570; CHECK:       # %bb.0:
571; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
572; CHECK-NEXT:    vzext.vf4 v9, v8
573; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
574; CHECK-NEXT:    ret
575  %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
576  ret <2 x double> %evec
577}
578
579declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i8(<4 x i8>, metadata, metadata)
580define <4 x half> @vsitofp_v4i8_v4f16(<4 x i8> %va) strictfp {
581; CHECK-LABEL: vsitofp_v4i8_v4f16:
582; CHECK:       # %bb.0:
583; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
584; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
585; CHECK-NEXT:    vmv1r.v v8, v9
586; CHECK-NEXT:    ret
587  %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
588  ret <4 x half> %evec
589}
590
591declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i8(<4 x i8>, metadata, metadata)
592define <4 x half> @vuitofp_v4i8_v4f16(<4 x i8> %va) strictfp {
593; CHECK-LABEL: vuitofp_v4i8_v4f16:
594; CHECK:       # %bb.0:
595; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
596; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
597; CHECK-NEXT:    vmv1r.v v8, v9
598; CHECK-NEXT:    ret
599  %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
600  ret <4 x half> %evec
601}
602
603declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i8(<4 x i8>, metadata, metadata)
604define <4 x float> @vsitofp_v4i8_v4f32(<4 x i8> %va) strictfp {
605; CHECK-LABEL: vsitofp_v4i8_v4f32:
606; CHECK:       # %bb.0:
607; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
608; CHECK-NEXT:    vsext.vf2 v9, v8
609; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
610; CHECK-NEXT:    ret
611  %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
612  ret <4 x float> %evec
613}
614
615declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i8(<4 x i8>, metadata, metadata)
616define <4 x float> @vuitofp_v4i8_v4f32(<4 x i8> %va) strictfp {
617; CHECK-LABEL: vuitofp_v4i8_v4f32:
618; CHECK:       # %bb.0:
619; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
620; CHECK-NEXT:    vzext.vf2 v9, v8
621; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
622; CHECK-NEXT:    ret
623  %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
624  ret <4 x float> %evec
625}
626
627declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i8(<4 x i8>, metadata, metadata)
628define <4 x double> @vsitofp_v4i8_v4f64(<4 x i8> %va) strictfp {
629; CHECK-LABEL: vsitofp_v4i8_v4f64:
630; CHECK:       # %bb.0:
631; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
632; CHECK-NEXT:    vsext.vf4 v10, v8
633; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
634; CHECK-NEXT:    ret
635  %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
636  ret <4 x double> %evec
637}
638
639declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i8(<4 x i8>, metadata, metadata)
640define <4 x double> @vuitofp_v4i8_v4f64(<4 x i8> %va) strictfp {
641; CHECK-LABEL: vuitofp_v4i8_v4f64:
642; CHECK:       # %bb.0:
643; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
644; CHECK-NEXT:    vzext.vf4 v10, v8
645; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
646; CHECK-NEXT:    ret
647  %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
648  ret <4 x double> %evec
649}
650
651declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i8(<8 x i8>, metadata, metadata)
652define <8 x half> @vsitofp_v8i8_v8f16(<8 x i8> %va) strictfp {
653; CHECK-LABEL: vsitofp_v8i8_v8f16:
654; CHECK:       # %bb.0:
655; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
656; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
657; CHECK-NEXT:    vmv1r.v v8, v9
658; CHECK-NEXT:    ret
659  %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
660  ret <8 x half> %evec
661}
662
663declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i8(<8 x i8>, metadata, metadata)
664define <8 x half> @vuitofp_v8i8_v8f16(<8 x i8> %va) strictfp {
665; CHECK-LABEL: vuitofp_v8i8_v8f16:
666; CHECK:       # %bb.0:
667; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
668; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
669; CHECK-NEXT:    vmv1r.v v8, v9
670; CHECK-NEXT:    ret
671  %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
672  ret <8 x half> %evec
673}
674
675declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i8(<8 x i8>, metadata, metadata)
676define <8 x float> @vsitofp_v8i8_v8f32(<8 x i8> %va) strictfp {
677; CHECK-LABEL: vsitofp_v8i8_v8f32:
678; CHECK:       # %bb.0:
679; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
680; CHECK-NEXT:    vsext.vf2 v10, v8
681; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
682; CHECK-NEXT:    ret
683  %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
684  ret <8 x float> %evec
685}
686
687declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i8(<8 x i8>, metadata, metadata)
688define <8 x float> @vuitofp_v8i8_v8f32(<8 x i8> %va) strictfp {
689; CHECK-LABEL: vuitofp_v8i8_v8f32:
690; CHECK:       # %bb.0:
691; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
692; CHECK-NEXT:    vzext.vf2 v10, v8
693; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
694; CHECK-NEXT:    ret
695  %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
696  ret <8 x float> %evec
697}
698
699declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i8(<8 x i8>, metadata, metadata)
700define <8 x double> @vsitofp_v8i8_v8f64(<8 x i8> %va) strictfp {
701; CHECK-LABEL: vsitofp_v8i8_v8f64:
702; CHECK:       # %bb.0:
703; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
704; CHECK-NEXT:    vsext.vf4 v12, v8
705; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
706; CHECK-NEXT:    ret
707  %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
708  ret <8 x double> %evec
709}
710
711declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i8(<8 x i8>, metadata, metadata)
712define <8 x double> @vuitofp_v8i8_v8f64(<8 x i8> %va) strictfp {
713; CHECK-LABEL: vuitofp_v8i8_v8f64:
714; CHECK:       # %bb.0:
715; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
716; CHECK-NEXT:    vzext.vf4 v12, v8
717; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
718; CHECK-NEXT:    ret
719  %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
720  ret <8 x double> %evec
721}
722
723declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i8(<16 x i8>, metadata, metadata)
724define <16 x half> @vsitofp_v16i8_v16f16(<16 x i8> %va) strictfp {
725; CHECK-LABEL: vsitofp_v16i8_v16f16:
726; CHECK:       # %bb.0:
727; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
728; CHECK-NEXT:    vfwcvt.f.x.v v10, v8
729; CHECK-NEXT:    vmv2r.v v8, v10
730; CHECK-NEXT:    ret
731  %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
732  ret <16 x half> %evec
733}
734
735declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i8(<16 x i8>, metadata, metadata)
736define <16 x half> @vuitofp_v16i8_v16f16(<16 x i8> %va) strictfp {
737; CHECK-LABEL: vuitofp_v16i8_v16f16:
738; CHECK:       # %bb.0:
739; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
740; CHECK-NEXT:    vfwcvt.f.xu.v v10, v8
741; CHECK-NEXT:    vmv2r.v v8, v10
742; CHECK-NEXT:    ret
743  %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
744  ret <16 x half> %evec
745}
746
747declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i8(<16 x i8>, metadata, metadata)
748define <16 x float> @vsitofp_v16i8_v16f32(<16 x i8> %va) strictfp {
749; CHECK-LABEL: vsitofp_v16i8_v16f32:
750; CHECK:       # %bb.0:
751; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
752; CHECK-NEXT:    vsext.vf2 v12, v8
753; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
754; CHECK-NEXT:    ret
755  %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
756  ret <16 x float> %evec
757}
758
759declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i8(<16 x i8>, metadata, metadata)
760define <16 x float> @vuitofp_v16i8_v16f32(<16 x i8> %va) strictfp {
761; CHECK-LABEL: vuitofp_v16i8_v16f32:
762; CHECK:       # %bb.0:
763; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
764; CHECK-NEXT:    vzext.vf2 v12, v8
765; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
766; CHECK-NEXT:    ret
767  %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
768  ret <16 x float> %evec
769}
770
771declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i8(<32 x i8>, metadata, metadata)
772define <32 x half> @vsitofp_v32i8_v32f16(<32 x i8> %va) strictfp {
773; CHECK-LABEL: vsitofp_v32i8_v32f16:
774; CHECK:       # %bb.0:
775; CHECK-NEXT:    li a0, 32
776; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
777; CHECK-NEXT:    vfwcvt.f.x.v v12, v8
778; CHECK-NEXT:    vmv4r.v v8, v12
779; CHECK-NEXT:    ret
780  %evec = call <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i8(<32 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
781  ret <32 x half> %evec
782}
783
784declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i8(<32 x i8>, metadata, metadata)
785define <32 x half> @vuitofp_v32i8_v32f16(<32 x i8> %va) strictfp {
786; CHECK-LABEL: vuitofp_v32i8_v32f16:
787; CHECK:       # %bb.0:
788; CHECK-NEXT:    li a0, 32
789; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
790; CHECK-NEXT:    vfwcvt.f.xu.v v12, v8
791; CHECK-NEXT:    vmv4r.v v8, v12
792; CHECK-NEXT:    ret
793  %evec = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i8(<32 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
794  ret <32 x half> %evec
795}
796
797declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i16(<1 x i16>, metadata, metadata)
798define <1 x half> @vsitofp_v1i16_v1f16(<1 x i16> %va) strictfp {
799; CHECK-LABEL: vsitofp_v1i16_v1f16:
800; CHECK:       # %bb.0:
801; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
802; CHECK-NEXT:    vfcvt.f.x.v v8, v8
803; CHECK-NEXT:    ret
804  %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
805  ret <1 x half> %evec
806}
807
808declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i16(<1 x i16>, metadata, metadata)
809define <1 x half> @vuitofp_v1i16_v1f16(<1 x i16> %va) strictfp {
810; CHECK-LABEL: vuitofp_v1i16_v1f16:
811; CHECK:       # %bb.0:
812; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
813; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
814; CHECK-NEXT:    ret
815  %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
816  ret <1 x half> %evec
817}
818
819declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i16(<1 x i16>, metadata, metadata)
820define <1 x float> @vsitofp_v1i16_v1f32(<1 x i16> %va) strictfp {
821; CHECK-LABEL: vsitofp_v1i16_v1f32:
822; CHECK:       # %bb.0:
823; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
824; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
825; CHECK-NEXT:    vmv1r.v v8, v9
826; CHECK-NEXT:    ret
827  %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
828  ret <1 x float> %evec
829}
830
831declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i16(<1 x i16>, metadata, metadata)
832define <1 x float> @vuitofp_v1i16_v1f32(<1 x i16> %va) strictfp {
833; CHECK-LABEL: vuitofp_v1i16_v1f32:
834; CHECK:       # %bb.0:
835; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
836; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
837; CHECK-NEXT:    vmv1r.v v8, v9
838; CHECK-NEXT:    ret
839  %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
840  ret <1 x float> %evec
841}
842
843declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i16(<1 x i16>, metadata, metadata)
844define <1 x double> @vsitofp_v1i16_v1f64(<1 x i16> %va) strictfp {
845; CHECK-LABEL: vsitofp_v1i16_v1f64:
846; CHECK:       # %bb.0:
847; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
848; CHECK-NEXT:    vsext.vf2 v9, v8
849; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
850; CHECK-NEXT:    ret
851  %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
852  ret <1 x double> %evec
853}
854
855declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i16(<1 x i16>, metadata, metadata)
856define <1 x double> @vuitofp_v1i16_v1f64(<1 x i16> %va) strictfp {
857; CHECK-LABEL: vuitofp_v1i16_v1f64:
858; CHECK:       # %bb.0:
859; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
860; CHECK-NEXT:    vzext.vf2 v9, v8
861; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
862; CHECK-NEXT:    ret
863  %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
864  ret <1 x double> %evec
865}
866
867declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i16(<2 x i16>, metadata, metadata)
868define <2 x half> @vsitofp_v2i16_v2f16(<2 x i16> %va) strictfp {
869; CHECK-LABEL: vsitofp_v2i16_v2f16:
870; CHECK:       # %bb.0:
871; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
872; CHECK-NEXT:    vfcvt.f.x.v v8, v8
873; CHECK-NEXT:    ret
874  %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
875  ret <2 x half> %evec
876}
877
878declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i16(<2 x i16>, metadata, metadata)
879define <2 x half> @vuitofp_v2i16_v2f16(<2 x i16> %va) strictfp {
880; CHECK-LABEL: vuitofp_v2i16_v2f16:
881; CHECK:       # %bb.0:
882; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
883; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
884; CHECK-NEXT:    ret
885  %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
886  ret <2 x half> %evec
887}
888
889declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i16(<2 x i16>, metadata, metadata)
890define <2 x float> @vsitofp_v2i16_v2f32(<2 x i16> %va) strictfp {
891; CHECK-LABEL: vsitofp_v2i16_v2f32:
892; CHECK:       # %bb.0:
893; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
894; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
895; CHECK-NEXT:    vmv1r.v v8, v9
896; CHECK-NEXT:    ret
897  %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
898  ret <2 x float> %evec
899}
900
901declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i16(<2 x i16>, metadata, metadata)
902define <2 x float> @vuitofp_v2i16_v2f32(<2 x i16> %va) strictfp {
903; CHECK-LABEL: vuitofp_v2i16_v2f32:
904; CHECK:       # %bb.0:
905; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
906; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
907; CHECK-NEXT:    vmv1r.v v8, v9
908; CHECK-NEXT:    ret
909  %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
910  ret <2 x float> %evec
911}
912
913declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
914define <2 x double> @vsitofp_v2i16_v2f64(<2 x i16> %va) strictfp {
915; CHECK-LABEL: vsitofp_v2i16_v2f64:
916; CHECK:       # %bb.0:
917; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
918; CHECK-NEXT:    vsext.vf2 v9, v8
919; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
920; CHECK-NEXT:    ret
921  %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
922  ret <2 x double> %evec
923}
924
925declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
926define <2 x double> @vuitofp_v2i16_v2f64(<2 x i16> %va) strictfp {
927; CHECK-LABEL: vuitofp_v2i16_v2f64:
928; CHECK:       # %bb.0:
929; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
930; CHECK-NEXT:    vzext.vf2 v9, v8
931; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
932; CHECK-NEXT:    ret
933  %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
934  ret <2 x double> %evec
935}
936
937declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i16(<4 x i16>, metadata, metadata)
938define <4 x half> @vsitofp_v4i16_v4f16(<4 x i16> %va) strictfp {
939; CHECK-LABEL: vsitofp_v4i16_v4f16:
940; CHECK:       # %bb.0:
941; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
942; CHECK-NEXT:    vfcvt.f.x.v v8, v8
943; CHECK-NEXT:    ret
944  %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
945  ret <4 x half> %evec
946}
947
948declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i16(<4 x i16>, metadata, metadata)
949define <4 x half> @vuitofp_v4i16_v4f16(<4 x i16> %va) strictfp {
950; CHECK-LABEL: vuitofp_v4i16_v4f16:
951; CHECK:       # %bb.0:
952; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
953; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
954; CHECK-NEXT:    ret
955  %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
956  ret <4 x half> %evec
957}
958
959declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16>, metadata, metadata)
960define <4 x float> @vsitofp_v4i16_v4f32(<4 x i16> %va) strictfp {
961; CHECK-LABEL: vsitofp_v4i16_v4f32:
962; CHECK:       # %bb.0:
963; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
964; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
965; CHECK-NEXT:    vmv1r.v v8, v9
966; CHECK-NEXT:    ret
967  %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
968  ret <4 x float> %evec
969}
970
971declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i16(<4 x i16>, metadata, metadata)
972define <4 x float> @vuitofp_v4i16_v4f32(<4 x i16> %va) strictfp {
973; CHECK-LABEL: vuitofp_v4i16_v4f32:
974; CHECK:       # %bb.0:
975; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
976; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
977; CHECK-NEXT:    vmv1r.v v8, v9
978; CHECK-NEXT:    ret
979  %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
980  ret <4 x float> %evec
981}
982
983declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i16(<4 x i16>, metadata, metadata)
984define <4 x double> @vsitofp_v4i16_v4f64(<4 x i16> %va) strictfp {
985; CHECK-LABEL: vsitofp_v4i16_v4f64:
986; CHECK:       # %bb.0:
987; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
988; CHECK-NEXT:    vsext.vf2 v10, v8
989; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
990; CHECK-NEXT:    ret
991  %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
992  ret <4 x double> %evec
993}
994
995declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i16(<4 x i16>, metadata, metadata)
996define <4 x double> @vuitofp_v4i16_v4f64(<4 x i16> %va) strictfp {
997; CHECK-LABEL: vuitofp_v4i16_v4f64:
998; CHECK:       # %bb.0:
999; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1000; CHECK-NEXT:    vzext.vf2 v10, v8
1001; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
1002; CHECK-NEXT:    ret
1003  %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1004  ret <4 x double> %evec
1005}
1006
1007declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i16(<8 x i16>, metadata, metadata)
1008define <8 x half> @vsitofp_v8i16_v8f16(<8 x i16> %va) strictfp {
1009; CHECK-LABEL: vsitofp_v8i16_v8f16:
1010; CHECK:       # %bb.0:
1011; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
1012; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1013; CHECK-NEXT:    ret
1014  %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1015  ret <8 x half> %evec
1016}
1017
1018declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i16(<8 x i16>, metadata, metadata)
1019define <8 x half> @vuitofp_v8i16_v8f16(<8 x i16> %va) strictfp {
1020; CHECK-LABEL: vuitofp_v8i16_v8f16:
1021; CHECK:       # %bb.0:
1022; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
1023; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1024; CHECK-NEXT:    ret
1025  %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1026  ret <8 x half> %evec
1027}
1028
1029declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i16(<8 x i16>, metadata, metadata)
1030define <8 x float> @vsitofp_v8i16_v8f32(<8 x i16> %va) strictfp {
1031; CHECK-LABEL: vsitofp_v8i16_v8f32:
1032; CHECK:       # %bb.0:
1033; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
1034; CHECK-NEXT:    vfwcvt.f.x.v v10, v8
1035; CHECK-NEXT:    vmv2r.v v8, v10
1036; CHECK-NEXT:    ret
1037  %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1038  ret <8 x float> %evec
1039}
1040
1041declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i16(<8 x i16>, metadata, metadata)
1042define <8 x float> @vuitofp_v8i16_v8f32(<8 x i16> %va) strictfp {
1043; CHECK-LABEL: vuitofp_v8i16_v8f32:
1044; CHECK:       # %bb.0:
1045; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
1046; CHECK-NEXT:    vfwcvt.f.xu.v v10, v8
1047; CHECK-NEXT:    vmv2r.v v8, v10
1048; CHECK-NEXT:    ret
1049  %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1050  ret <8 x float> %evec
1051}
1052
1053declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i16(<8 x i16>, metadata, metadata)
1054define <8 x double> @vsitofp_v8i16_v8f64(<8 x i16> %va) strictfp {
1055; CHECK-LABEL: vsitofp_v8i16_v8f64:
1056; CHECK:       # %bb.0:
1057; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
1058; CHECK-NEXT:    vsext.vf2 v12, v8
1059; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
1060; CHECK-NEXT:    ret
1061  %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1062  ret <8 x double> %evec
1063}
1064
1065declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i16(<8 x i16>, metadata, metadata)
1066define <8 x double> @vuitofp_v8i16_v8f64(<8 x i16> %va) strictfp {
1067; CHECK-LABEL: vuitofp_v8i16_v8f64:
1068; CHECK:       # %bb.0:
1069; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
1070; CHECK-NEXT:    vzext.vf2 v12, v8
1071; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
1072; CHECK-NEXT:    ret
1073  %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1074  ret <8 x double> %evec
1075}
1076
1077declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i16(<16 x i16>, metadata, metadata)
1078define <16 x half> @vsitofp_v16i16_v16f16(<16 x i16> %va) strictfp {
1079; CHECK-LABEL: vsitofp_v16i16_v16f16:
1080; CHECK:       # %bb.0:
1081; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
1082; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1083; CHECK-NEXT:    ret
1084  %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1085  ret <16 x half> %evec
1086}
1087
1088declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i16(<16 x i16>, metadata, metadata)
1089define <16 x half> @vuitofp_v16i16_v16f16(<16 x i16> %va) strictfp {
1090; CHECK-LABEL: vuitofp_v16i16_v16f16:
1091; CHECK:       # %bb.0:
1092; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
1093; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1094; CHECK-NEXT:    ret
1095  %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1096  ret <16 x half> %evec
1097}
1098
1099declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i16(<16 x i16>, metadata, metadata)
1100define <16 x float> @vsitofp_v16i16_v16f32(<16 x i16> %va) strictfp {
1101; CHECK-LABEL: vsitofp_v16i16_v16f32:
1102; CHECK:       # %bb.0:
1103; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
1104; CHECK-NEXT:    vfwcvt.f.x.v v12, v8
1105; CHECK-NEXT:    vmv4r.v v8, v12
1106; CHECK-NEXT:    ret
1107  %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1108  ret <16 x float> %evec
1109}
1110
1111declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i16(<16 x i16>, metadata, metadata)
1112define <16 x float> @vuitofp_v16i16_v16f32(<16 x i16> %va) strictfp {
1113; CHECK-LABEL: vuitofp_v16i16_v16f32:
1114; CHECK:       # %bb.0:
1115; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
1116; CHECK-NEXT:    vfwcvt.f.xu.v v12, v8
1117; CHECK-NEXT:    vmv4r.v v8, v12
1118; CHECK-NEXT:    ret
1119  %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1120  ret <16 x float> %evec
1121}
1122
1123declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i16(<32 x i16>, metadata, metadata)
1124define <32 x half> @vsitofp_v32i16_v32f16(<32 x i16> %va) strictfp {
1125; CHECK-LABEL: vsitofp_v32i16_v32f16:
1126; CHECK:       # %bb.0:
1127; CHECK-NEXT:    li a0, 32
1128; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
1129; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1130; CHECK-NEXT:    ret
1131  %evec = call <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i16(<32 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1132  ret <32 x half> %evec
1133}
1134
1135declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i16(<32 x i16>, metadata, metadata)
1136define <32 x half> @vuitofp_v32i16_v32f16(<32 x i16> %va) strictfp {
1137; CHECK-LABEL: vuitofp_v32i16_v32f16:
1138; CHECK:       # %bb.0:
1139; CHECK-NEXT:    li a0, 32
1140; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
1141; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1142; CHECK-NEXT:    ret
1143  %evec = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i16(<32 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1144  ret <32 x half> %evec
1145}
1146
1147declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i32(<1 x i32>, metadata, metadata)
1148define <1 x half> @vsitofp_v1i32_v1f16(<1 x i32> %va) strictfp {
1149; CHECK-LABEL: vsitofp_v1i32_v1f16:
1150; CHECK:       # %bb.0:
1151; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
1152; CHECK-NEXT:    vfncvt.f.x.w v9, v8
1153; CHECK-NEXT:    vmv1r.v v8, v9
1154; CHECK-NEXT:    ret
1155  %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1156  ret <1 x half> %evec
1157}
1158
1159declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i32(<1 x i32>, metadata, metadata)
1160define <1 x half> @vuitofp_v1i32_v1f16(<1 x i32> %va) strictfp {
1161; CHECK-LABEL: vuitofp_v1i32_v1f16:
1162; CHECK:       # %bb.0:
1163; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
1164; CHECK-NEXT:    vfncvt.f.xu.w v9, v8
1165; CHECK-NEXT:    vmv1r.v v8, v9
1166; CHECK-NEXT:    ret
1167  %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1168  ret <1 x half> %evec
1169}
1170
1171declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32>, metadata, metadata)
1172define <1 x float> @vsitofp_v1i32_v1f32(<1 x i32> %va) strictfp {
1173; CHECK-LABEL: vsitofp_v1i32_v1f32:
1174; CHECK:       # %bb.0:
1175; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
1176; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1177; CHECK-NEXT:    ret
1178  %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1179  ret <1 x float> %evec
1180}
1181
1182declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i32(<1 x i32>, metadata, metadata)
1183define <1 x float> @vuitofp_v1i32_v1f32(<1 x i32> %va) strictfp {
1184; CHECK-LABEL: vuitofp_v1i32_v1f32:
1185; CHECK:       # %bb.0:
1186; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
1187; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1188; CHECK-NEXT:    ret
1189  %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1190  ret <1 x float> %evec
1191}
1192
1193declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
1194define <1 x double> @vsitofp_v1i32_v1f64(<1 x i32> %va) strictfp {
1195; CHECK-LABEL: vsitofp_v1i32_v1f64:
1196; CHECK:       # %bb.0:
1197; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
1198; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
1199; CHECK-NEXT:    vmv1r.v v8, v9
1200; CHECK-NEXT:    ret
1201  %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1202  ret <1 x double> %evec
1203}
1204
1205declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
1206define <1 x double> @vuitofp_v1i32_v1f64(<1 x i32> %va) strictfp {
1207; CHECK-LABEL: vuitofp_v1i32_v1f64:
1208; CHECK:       # %bb.0:
1209; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
1210; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
1211; CHECK-NEXT:    vmv1r.v v8, v9
1212; CHECK-NEXT:    ret
1213  %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1214  ret <1 x double> %evec
1215}
1216
1217declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i32(<2 x i32>, metadata, metadata)
1218define <2 x half> @vsitofp_v2i32_v2f16(<2 x i32> %va) strictfp {
1219; CHECK-LABEL: vsitofp_v2i32_v2f16:
1220; CHECK:       # %bb.0:
1221; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
1222; CHECK-NEXT:    vfncvt.f.x.w v9, v8
1223; CHECK-NEXT:    vmv1r.v v8, v9
1224; CHECK-NEXT:    ret
1225  %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1226  ret <2 x half> %evec
1227}
1228
1229declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i32(<2 x i32>, metadata, metadata)
1230define <2 x half> @vuitofp_v2i32_v2f16(<2 x i32> %va) strictfp {
1231; CHECK-LABEL: vuitofp_v2i32_v2f16:
1232; CHECK:       # %bb.0:
1233; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
1234; CHECK-NEXT:    vfncvt.f.xu.w v9, v8
1235; CHECK-NEXT:    vmv1r.v v8, v9
1236; CHECK-NEXT:    ret
1237  %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1238  ret <2 x half> %evec
1239}
1240
1241declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32>, metadata, metadata)
1242define <2 x float> @vsitofp_v2i32_v2f32(<2 x i32> %va) strictfp {
1243; CHECK-LABEL: vsitofp_v2i32_v2f32:
1244; CHECK:       # %bb.0:
1245; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1246; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1247; CHECK-NEXT:    ret
1248  %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1249  ret <2 x float> %evec
1250}
1251
1252declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i32(<2 x i32>, metadata, metadata)
1253define <2 x float> @vuitofp_v2i32_v2f32(<2 x i32> %va) strictfp {
1254; CHECK-LABEL: vuitofp_v2i32_v2f32:
1255; CHECK:       # %bb.0:
1256; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1257; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1258; CHECK-NEXT:    ret
1259  %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1260  ret <2 x float> %evec
1261}
1262
1263declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
1264define <2 x double> @vsitofp_v2i32_v2f64(<2 x i32> %va) strictfp {
1265; CHECK-LABEL: vsitofp_v2i32_v2f64:
1266; CHECK:       # %bb.0:
1267; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1268; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
1269; CHECK-NEXT:    vmv1r.v v8, v9
1270; CHECK-NEXT:    ret
1271  %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1272  ret <2 x double> %evec
1273}
1274
1275declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
1276define <2 x double> @vuitofp_v2i32_v2f64(<2 x i32> %va) strictfp {
1277; CHECK-LABEL: vuitofp_v2i32_v2f64:
1278; CHECK:       # %bb.0:
1279; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1280; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
1281; CHECK-NEXT:    vmv1r.v v8, v9
1282; CHECK-NEXT:    ret
1283  %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1284  ret <2 x double> %evec
1285}
1286
1287declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i32(<4 x i32>, metadata, metadata)
1288define <4 x half> @vsitofp_v4i32_v4f16(<4 x i32> %va) strictfp {
1289; CHECK-LABEL: vsitofp_v4i32_v4f16:
1290; CHECK:       # %bb.0:
1291; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
1292; CHECK-NEXT:    vfncvt.f.x.w v9, v8
1293; CHECK-NEXT:    vmv1r.v v8, v9
1294; CHECK-NEXT:    ret
1295  %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1296  ret <4 x half> %evec
1297}
1298
1299declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i32(<4 x i32>, metadata, metadata)
1300define <4 x half> @vuitofp_v4i32_v4f16(<4 x i32> %va) strictfp {
1301; CHECK-LABEL: vuitofp_v4i32_v4f16:
1302; CHECK:       # %bb.0:
1303; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
1304; CHECK-NEXT:    vfncvt.f.xu.w v9, v8
1305; CHECK-NEXT:    vmv1r.v v8, v9
1306; CHECK-NEXT:    ret
1307  %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1308  ret <4 x half> %evec
1309}
1310
1311declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
1312define <4 x float> @vsitofp_v4i32_v4f32(<4 x i32> %va) strictfp {
1313; CHECK-LABEL: vsitofp_v4i32_v4f32:
1314; CHECK:       # %bb.0:
1315; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1316; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1317; CHECK-NEXT:    ret
1318  %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1319  ret <4 x float> %evec
1320}
1321
1322declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
1323define <4 x float> @vuitofp_v4i32_v4f32(<4 x i32> %va) strictfp {
1324; CHECK-LABEL: vuitofp_v4i32_v4f32:
1325; CHECK:       # %bb.0:
1326; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1327; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1328; CHECK-NEXT:    ret
1329  %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1330  ret <4 x float> %evec
1331}
1332
1333declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32>, metadata, metadata)
1334define <4 x double> @vsitofp_v4i32_v4f64(<4 x i32> %va) strictfp {
1335; CHECK-LABEL: vsitofp_v4i32_v4f64:
1336; CHECK:       # %bb.0:
1337; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1338; CHECK-NEXT:    vfwcvt.f.x.v v10, v8
1339; CHECK-NEXT:    vmv2r.v v8, v10
1340; CHECK-NEXT:    ret
1341  %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1342  ret <4 x double> %evec
1343}
1344
1345declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i32(<4 x i32>, metadata, metadata)
1346define <4 x double> @vuitofp_v4i32_v4f64(<4 x i32> %va) strictfp {
1347; CHECK-LABEL: vuitofp_v4i32_v4f64:
1348; CHECK:       # %bb.0:
1349; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1350; CHECK-NEXT:    vfwcvt.f.xu.v v10, v8
1351; CHECK-NEXT:    vmv2r.v v8, v10
1352; CHECK-NEXT:    ret
1353  %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1354  ret <4 x double> %evec
1355}
1356
1357declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i32(<8 x i32>, metadata, metadata)
1358define <8 x half> @vsitofp_v8i32_v8f16(<8 x i32> %va) strictfp {
1359; CHECK-LABEL: vsitofp_v8i32_v8f16:
1360; CHECK:       # %bb.0:
1361; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
1362; CHECK-NEXT:    vfncvt.f.x.w v10, v8
1363; CHECK-NEXT:    vmv.v.v v8, v10
1364; CHECK-NEXT:    ret
1365  %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1366  ret <8 x half> %evec
1367}
1368
1369declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i32(<8 x i32>, metadata, metadata)
1370define <8 x half> @vuitofp_v8i32_v8f16(<8 x i32> %va) strictfp {
1371; CHECK-LABEL: vuitofp_v8i32_v8f16:
1372; CHECK:       # %bb.0:
1373; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
1374; CHECK-NEXT:    vfncvt.f.xu.w v10, v8
1375; CHECK-NEXT:    vmv.v.v v8, v10
1376; CHECK-NEXT:    ret
1377  %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1378  ret <8 x half> %evec
1379}
1380
1381declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i32(<8 x i32>, metadata, metadata)
1382define <8 x float> @vsitofp_v8i32_v8f32(<8 x i32> %va) strictfp {
1383; CHECK-LABEL: vsitofp_v8i32_v8f32:
1384; CHECK:       # %bb.0:
1385; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
1386; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1387; CHECK-NEXT:    ret
1388  %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1389  ret <8 x float> %evec
1390}
1391
1392declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i32(<8 x i32>, metadata, metadata)
1393define <8 x float> @vuitofp_v8i32_v8f32(<8 x i32> %va) strictfp {
1394; CHECK-LABEL: vuitofp_v8i32_v8f32:
1395; CHECK:       # %bb.0:
1396; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
1397; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1398; CHECK-NEXT:    ret
1399  %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1400  ret <8 x float> %evec
1401}
1402
1403declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i32(<8 x i32>, metadata, metadata)
1404define <8 x double> @vsitofp_v8i32_v8f64(<8 x i32> %va) strictfp {
1405; CHECK-LABEL: vsitofp_v8i32_v8f64:
1406; CHECK:       # %bb.0:
1407; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
1408; CHECK-NEXT:    vfwcvt.f.x.v v12, v8
1409; CHECK-NEXT:    vmv4r.v v8, v12
1410; CHECK-NEXT:    ret
1411  %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1412  ret <8 x double> %evec
1413}
1414
1415declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i32(<8 x i32>, metadata, metadata)
1416define <8 x double> @vuitofp_v8i32_v8f64(<8 x i32> %va) strictfp {
1417; CHECK-LABEL: vuitofp_v8i32_v8f64:
1418; CHECK:       # %bb.0:
1419; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
1420; CHECK-NEXT:    vfwcvt.f.xu.v v12, v8
1421; CHECK-NEXT:    vmv4r.v v8, v12
1422; CHECK-NEXT:    ret
1423  %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1424  ret <8 x double> %evec
1425}
1426
1427declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i32(<16 x i32>, metadata, metadata)
1428define <16 x half> @vsitofp_v16i32_v16f16(<16 x i32> %va) strictfp {
1429; CHECK-LABEL: vsitofp_v16i32_v16f16:
1430; CHECK:       # %bb.0:
1431; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
1432; CHECK-NEXT:    vfncvt.f.x.w v12, v8
1433; CHECK-NEXT:    vmv.v.v v8, v12
1434; CHECK-NEXT:    ret
1435  %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1436  ret <16 x half> %evec
1437}
1438
1439declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i32(<16 x i32>, metadata, metadata)
1440define <16 x half> @vuitofp_v16i32_v16f16(<16 x i32> %va) strictfp {
1441; CHECK-LABEL: vuitofp_v16i32_v16f16:
1442; CHECK:       # %bb.0:
1443; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
1444; CHECK-NEXT:    vfncvt.f.xu.w v12, v8
1445; CHECK-NEXT:    vmv.v.v v8, v12
1446; CHECK-NEXT:    ret
1447  %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1448  ret <16 x half> %evec
1449}
1450
1451declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i32(<16 x i32>, metadata, metadata)
1452define <16 x float> @vsitofp_v16i32_v16f32(<16 x i32> %va) strictfp {
1453; CHECK-LABEL: vsitofp_v16i32_v16f32:
1454; CHECK:       # %bb.0:
1455; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
1456; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1457; CHECK-NEXT:    ret
1458  %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1459  ret <16 x float> %evec
1460}
1461
1462declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i32(<16 x i32>, metadata, metadata)
1463define <16 x float> @vuitofp_v16i32_v16f32(<16 x i32> %va) strictfp {
1464; CHECK-LABEL: vuitofp_v16i32_v16f32:
1465; CHECK:       # %bb.0:
1466; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
1467; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1468; CHECK-NEXT:    ret
1469  %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1470  ret <16 x float> %evec
1471}
1472
1473declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i64(<1 x i64>, metadata, metadata)
1474define <1 x half> @vsitofp_v1i64_v1f16(<1 x i64> %va) strictfp {
1475; CHECK-LABEL: vsitofp_v1i64_v1f16:
1476; CHECK:       # %bb.0:
1477; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
1478; CHECK-NEXT:    vfncvt.f.x.w v9, v8
1479; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1480; CHECK-NEXT:    vfncvt.f.f.w v8, v9
1481; CHECK-NEXT:    ret
1482  %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1483  ret <1 x half> %evec
1484}
1485
1486declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i64(<1 x i64>, metadata, metadata)
1487define <1 x half> @vuitofp_v1i64_v1f16(<1 x i64> %va) strictfp {
1488; CHECK-LABEL: vuitofp_v1i64_v1f16:
1489; CHECK:       # %bb.0:
1490; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
1491; CHECK-NEXT:    vfncvt.f.xu.w v9, v8
1492; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1493; CHECK-NEXT:    vfncvt.f.f.w v8, v9
1494; CHECK-NEXT:    ret
1495  %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1496  ret <1 x half> %evec
1497}
1498
1499declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i64(<1 x i64>, metadata, metadata)
1500define <1 x float> @vsitofp_v1i64_v1f32(<1 x i64> %va) strictfp {
1501; CHECK-LABEL: vsitofp_v1i64_v1f32:
1502; CHECK:       # %bb.0:
1503; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
1504; CHECK-NEXT:    vfncvt.f.x.w v9, v8
1505; CHECK-NEXT:    vmv1r.v v8, v9
1506; CHECK-NEXT:    ret
1507  %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1508  ret <1 x float> %evec
1509}
1510
1511declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i64(<1 x i64>, metadata, metadata)
1512define <1 x float> @vuitofp_v1i64_v1f32(<1 x i64> %va) strictfp {
1513; CHECK-LABEL: vuitofp_v1i64_v1f32:
1514; CHECK:       # %bb.0:
1515; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
1516; CHECK-NEXT:    vfncvt.f.xu.w v9, v8
1517; CHECK-NEXT:    vmv1r.v v8, v9
1518; CHECK-NEXT:    ret
1519  %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1520  ret <1 x float> %evec
1521}
1522
1523declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
1524define <1 x double> @vsitofp_v1i64_v1f64(<1 x i64> %va) strictfp {
1525; CHECK-LABEL: vsitofp_v1i64_v1f64:
1526; CHECK:       # %bb.0:
1527; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
1528; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1529; CHECK-NEXT:    ret
1530  %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1531  ret <1 x double> %evec
1532}
1533
1534declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
1535define <1 x double> @vuitofp_v1i64_v1f64(<1 x i64> %va) strictfp {
1536; CHECK-LABEL: vuitofp_v1i64_v1f64:
1537; CHECK:       # %bb.0:
1538; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
1539; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1540; CHECK-NEXT:    ret
1541  %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1542  ret <1 x double> %evec
1543}
1544
1545
1546declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i64(<2 x i64>, metadata, metadata)
1547define <2 x half> @vsitofp_v2i64_v2f16(<2 x i64> %va) strictfp {
1548; CHECK-LABEL: vsitofp_v2i64_v2f16:
1549; CHECK:       # %bb.0:
1550; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1551; CHECK-NEXT:    vfncvt.f.x.w v9, v8
1552; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1553; CHECK-NEXT:    vfncvt.f.f.w v8, v9
1554; CHECK-NEXT:    ret
1555  %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1556  ret <2 x half> %evec
1557}
1558
1559declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i64(<2 x i64>, metadata, metadata)
1560define <2 x half> @vuitofp_v2i64_v2f16(<2 x i64> %va) strictfp {
1561; CHECK-LABEL: vuitofp_v2i64_v2f16:
1562; CHECK:       # %bb.0:
1563; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1564; CHECK-NEXT:    vfncvt.f.xu.w v9, v8
1565; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1566; CHECK-NEXT:    vfncvt.f.f.w v8, v9
1567; CHECK-NEXT:    ret
1568  %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1569  ret <2 x half> %evec
1570}
1571
1572declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
1573define <2 x float> @vsitofp_v2i64_v2f32(<2 x i64> %va) strictfp {
1574; CHECK-LABEL: vsitofp_v2i64_v2f32:
1575; CHECK:       # %bb.0:
1576; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1577; CHECK-NEXT:    vfncvt.f.x.w v9, v8
1578; CHECK-NEXT:    vmv1r.v v8, v9
1579; CHECK-NEXT:    ret
1580  %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1581  ret <2 x float> %evec
1582}
1583
1584declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
1585define <2 x float> @vuitofp_v2i64_v2f32(<2 x i64> %va) strictfp {
1586; CHECK-LABEL: vuitofp_v2i64_v2f32:
1587; CHECK:       # %bb.0:
1588; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1589; CHECK-NEXT:    vfncvt.f.xu.w v9, v8
1590; CHECK-NEXT:    vmv1r.v v8, v9
1591; CHECK-NEXT:    ret
1592  %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1593  ret <2 x float> %evec
1594}
1595
1596declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
1597define <2 x double> @vsitofp_v2i64_v2f64(<2 x i64> %va) strictfp {
1598; CHECK-LABEL: vsitofp_v2i64_v2f64:
1599; CHECK:       # %bb.0:
1600; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
1601; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1602; CHECK-NEXT:    ret
1603  %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1604  ret <2 x double> %evec
1605}
1606
1607declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
1608define <2 x double> @vuitofp_v2i64_v2f64(<2 x i64> %va) strictfp {
1609; CHECK-LABEL: vuitofp_v2i64_v2f64:
1610; CHECK:       # %bb.0:
1611; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
1612; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1613; CHECK-NEXT:    ret
1614  %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1615  ret <2 x double> %evec
1616}
1617
1618declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i64(<4 x i64>, metadata, metadata)
1619define <4 x half> @vsitofp_v4i64_v4f16(<4 x i64> %va) strictfp {
1620; CHECK-LABEL: vsitofp_v4i64_v4f16:
1621; CHECK:       # %bb.0:
1622; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1623; CHECK-NEXT:    vfncvt.f.x.w v10, v8
1624; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
1625; CHECK-NEXT:    vfncvt.f.f.w v8, v10
1626; CHECK-NEXT:    ret
1627  %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1628  ret <4 x half> %evec
1629}
1630
1631declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i64(<4 x i64>, metadata, metadata)
1632define <4 x half> @vuitofp_v4i64_v4f16(<4 x i64> %va) strictfp {
1633; CHECK-LABEL: vuitofp_v4i64_v4f16:
1634; CHECK:       # %bb.0:
1635; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1636; CHECK-NEXT:    vfncvt.f.xu.w v10, v8
1637; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
1638; CHECK-NEXT:    vfncvt.f.f.w v8, v10
1639; CHECK-NEXT:    ret
1640  %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1641  ret <4 x half> %evec
1642}
1643
1644declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
1645define <4 x float> @vsitofp_v4i64_v4f32(<4 x i64> %va) strictfp {
1646; CHECK-LABEL: vsitofp_v4i64_v4f32:
1647; CHECK:       # %bb.0:
1648; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1649; CHECK-NEXT:    vfncvt.f.x.w v10, v8
1650; CHECK-NEXT:    vmv.v.v v8, v10
1651; CHECK-NEXT:    ret
1652  %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1653  ret <4 x float> %evec
1654}
1655
1656declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
1657define <4 x float> @vuitofp_v4i64_v4f32(<4 x i64> %va) strictfp {
1658; CHECK-LABEL: vuitofp_v4i64_v4f32:
1659; CHECK:       # %bb.0:
1660; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1661; CHECK-NEXT:    vfncvt.f.xu.w v10, v8
1662; CHECK-NEXT:    vmv.v.v v8, v10
1663; CHECK-NEXT:    ret
1664  %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1665  ret <4 x float> %evec
1666}
1667
1668declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i64(<4 x i64>, metadata, metadata)
1669define <4 x double> @vsitofp_v4i64_v4f64(<4 x i64> %va) strictfp {
1670; CHECK-LABEL: vsitofp_v4i64_v4f64:
1671; CHECK:       # %bb.0:
1672; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
1673; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1674; CHECK-NEXT:    ret
1675  %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1676  ret <4 x double> %evec
1677}
1678
1679declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i64(<4 x i64>, metadata, metadata)
1680define <4 x double> @vuitofp_v4i64_v4f64(<4 x i64> %va) strictfp {
1681; CHECK-LABEL: vuitofp_v4i64_v4f64:
1682; CHECK:       # %bb.0:
1683; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
1684; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1685; CHECK-NEXT:    ret
1686  %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1687  ret <4 x double> %evec
1688}
1689
1690declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i64(<8 x i64>, metadata, metadata)
1691define <8 x half> @vsitofp_v8i64_v8f16(<8 x i64> %va) strictfp {
1692; CHECK-LABEL: vsitofp_v8i64_v8f16:
1693; CHECK:       # %bb.0:
1694; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
1695; CHECK-NEXT:    vfncvt.f.x.w v12, v8
1696; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
1697; CHECK-NEXT:    vfncvt.f.f.w v8, v12
1698; CHECK-NEXT:    ret
1699  %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1700  ret <8 x half> %evec
1701}
1702
1703declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i64(<8 x i64>, metadata, metadata)
1704define <8 x half> @vuitofp_v8i64_v8f16(<8 x i64> %va) strictfp {
1705; CHECK-LABEL: vuitofp_v8i64_v8f16:
1706; CHECK:       # %bb.0:
1707; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
1708; CHECK-NEXT:    vfncvt.f.xu.w v12, v8
1709; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
1710; CHECK-NEXT:    vfncvt.f.f.w v8, v12
1711; CHECK-NEXT:    ret
1712  %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1713  ret <8 x half> %evec
1714}
1715
1716declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i64(<8 x i64>, metadata, metadata)
1717define <8 x float> @vsitofp_v8i64_v8f32(<8 x i64> %va) strictfp {
1718; CHECK-LABEL: vsitofp_v8i64_v8f32:
1719; CHECK:       # %bb.0:
1720; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
1721; CHECK-NEXT:    vfncvt.f.x.w v12, v8
1722; CHECK-NEXT:    vmv.v.v v8, v12
1723; CHECK-NEXT:    ret
1724  %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1725  ret <8 x float> %evec
1726}
1727
1728declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i64(<8 x i64>, metadata, metadata)
1729define <8 x float> @vuitofp_v8i64_v8f32(<8 x i64> %va) strictfp {
1730; CHECK-LABEL: vuitofp_v8i64_v8f32:
1731; CHECK:       # %bb.0:
1732; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
1733; CHECK-NEXT:    vfncvt.f.xu.w v12, v8
1734; CHECK-NEXT:    vmv.v.v v8, v12
1735; CHECK-NEXT:    ret
1736  %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1737  ret <8 x float> %evec
1738}
1739
1740declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64>, metadata, metadata)
1741define <8 x double> @vsitofp_v8i64_v8f64(<8 x i64> %va) strictfp {
1742; CHECK-LABEL: vsitofp_v8i64_v8f64:
1743; CHECK:       # %bb.0:
1744; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
1745; CHECK-NEXT:    vfcvt.f.x.v v8, v8
1746; CHECK-NEXT:    ret
1747  %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1748  ret <8 x double> %evec
1749}
1750
1751declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64>, metadata, metadata)
1752define <8 x double> @vuitofp_v8i64_v8f64(<8 x i64> %va) strictfp {
1753; CHECK-LABEL: vuitofp_v8i64_v8f64:
1754; CHECK:       # %bb.0:
1755; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
1756; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
1757; CHECK-NEXT:    ret
1758  %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1759  ret <8 x double> %evec
1760}
1761