xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll (revision b48e5f0ff3f25e8bdd3ae473dca00511336cbd6f)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
3; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
5; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
6
7define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i7(<vscale x 2 x i7> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
8; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i7:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    li a1, 127
11; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
12; CHECK-NEXT:    vand.vx v8, v8, a1
13; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
14; CHECK-NEXT:    vzext.vf2 v9, v8, v0.t
15; CHECK-NEXT:    vfwcvt.f.xu.v v10, v9, v0.t
16; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
17; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
18; CHECK-NEXT:    ret
19  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i7(<vscale x 2 x i7> %va, <vscale x 2 x i1> %m, i32 %evl)
20  ret <vscale x 2 x bfloat> %v
21}
22
23define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
24; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i8:
25; CHECK:       # %bb.0:
26; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
27; CHECK-NEXT:    vzext.vf2 v9, v8, v0.t
28; CHECK-NEXT:    vfwcvt.f.xu.v v10, v9, v0.t
29; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
30; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
31; CHECK-NEXT:    ret
32  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
33  ret <vscale x 2 x bfloat> %v
34}
35
36define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
37; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i8_unmasked:
38; CHECK:       # %bb.0:
39; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
40; CHECK-NEXT:    vzext.vf2 v9, v8
41; CHECK-NEXT:    vfwcvt.f.xu.v v10, v9
42; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
43; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
44; CHECK-NEXT:    ret
45  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
46  ret <vscale x 2 x bfloat> %v
47}
48
49define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
50; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i16:
51; CHECK:       # %bb.0:
52; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
53; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8, v0.t
54; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
55; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
56; CHECK-NEXT:    ret
57  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
58  ret <vscale x 2 x bfloat> %v
59}
60
61define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
62; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i16_unmasked:
63; CHECK:       # %bb.0:
64; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
65; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
66; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
67; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
68; CHECK-NEXT:    ret
69  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
70  ret <vscale x 2 x bfloat> %v
71}
72
73define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
74; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i32:
75; CHECK:       # %bb.0:
76; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
77; CHECK-NEXT:    vfcvt.f.xu.v v9, v8, v0.t
78; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
79; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
80; CHECK-NEXT:    ret
81  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
82  ret <vscale x 2 x bfloat> %v
83}
84
85define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
86; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i32_unmasked:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
89; CHECK-NEXT:    vfcvt.f.xu.v v9, v8
90; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
91; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
92; CHECK-NEXT:    ret
93  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
94  ret <vscale x 2 x bfloat> %v
95}
96
97define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
98; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i64:
99; CHECK:       # %bb.0:
100; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
101; CHECK-NEXT:    vfncvt.f.xu.w v10, v8, v0.t
102; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
103; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
104; CHECK-NEXT:    ret
105  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
106  ret <vscale x 2 x bfloat> %v
107}
108
109define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
110; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i64_unmasked:
111; CHECK:       # %bb.0:
112; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
113; CHECK-NEXT:    vfncvt.f.xu.w v10, v8
114; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
115; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
116; CHECK-NEXT:    ret
117  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
118  ret <vscale x 2 x bfloat> %v
119}
120
121declare <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i7(<vscale x 2 x i7>, <vscale x 2 x i1>, i32)
122
123define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i7(<vscale x 2 x i7> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
124; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i7:
125; ZVFH:       # %bb.0:
126; ZVFH-NEXT:    li a1, 127
127; ZVFH-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
128; ZVFH-NEXT:    vand.vx v9, v8, a1
129; ZVFH-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
130; ZVFH-NEXT:    ret
131;
132; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i7:
133; ZVFHMIN:       # %bb.0:
134; ZVFHMIN-NEXT:    li a1, 127
135; ZVFHMIN-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
136; ZVFHMIN-NEXT:    vand.vx v8, v8, a1
137; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
138; ZVFHMIN-NEXT:    vzext.vf2 v9, v8, v0.t
139; ZVFHMIN-NEXT:    vfwcvt.f.xu.v v10, v9, v0.t
140; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
141; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
142; ZVFHMIN-NEXT:    ret
143  %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i7(<vscale x 2 x i7> %va, <vscale x 2 x i1> %m, i32 %evl)
144  ret <vscale x 2 x half> %v
145}
146
147declare <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i32)
148
149define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
150; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i8:
151; ZVFH:       # %bb.0:
152; ZVFH-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
153; ZVFH-NEXT:    vfwcvt.f.xu.v v9, v8, v0.t
154; ZVFH-NEXT:    vmv1r.v v8, v9
155; ZVFH-NEXT:    ret
156;
157; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i8:
158; ZVFHMIN:       # %bb.0:
159; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
160; ZVFHMIN-NEXT:    vzext.vf2 v9, v8, v0.t
161; ZVFHMIN-NEXT:    vfwcvt.f.xu.v v10, v9, v0.t
162; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
163; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
164; ZVFHMIN-NEXT:    ret
165  %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
166  ret <vscale x 2 x half> %v
167}
168
169define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
170; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i8_unmasked:
171; ZVFH:       # %bb.0:
172; ZVFH-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
173; ZVFH-NEXT:    vfwcvt.f.xu.v v9, v8
174; ZVFH-NEXT:    vmv1r.v v8, v9
175; ZVFH-NEXT:    ret
176;
177; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i8_unmasked:
178; ZVFHMIN:       # %bb.0:
179; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
180; ZVFHMIN-NEXT:    vzext.vf2 v9, v8
181; ZVFHMIN-NEXT:    vfwcvt.f.xu.v v10, v9
182; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
183; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
184; ZVFHMIN-NEXT:    ret
185  %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
186  ret <vscale x 2 x half> %v
187}
188
189declare <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32)
190
191define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
192; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i16:
193; ZVFH:       # %bb.0:
194; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
195; ZVFH-NEXT:    vfcvt.f.xu.v v8, v8, v0.t
196; ZVFH-NEXT:    ret
197;
198; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i16:
199; ZVFHMIN:       # %bb.0:
200; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
201; ZVFHMIN-NEXT:    vfwcvt.f.xu.v v9, v8, v0.t
202; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
203; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
204; ZVFHMIN-NEXT:    ret
205  %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
206  ret <vscale x 2 x half> %v
207}
208
209define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
210; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i16_unmasked:
211; ZVFH:       # %bb.0:
212; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
213; ZVFH-NEXT:    vfcvt.f.xu.v v8, v8
214; ZVFH-NEXT:    ret
215;
216; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i16_unmasked:
217; ZVFHMIN:       # %bb.0:
218; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
219; ZVFHMIN-NEXT:    vfwcvt.f.xu.v v9, v8
220; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
221; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
222; ZVFHMIN-NEXT:    ret
223  %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
224  ret <vscale x 2 x half> %v
225}
226
227declare <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32)
228
229define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
230; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i32:
231; ZVFH:       # %bb.0:
232; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
233; ZVFH-NEXT:    vfncvt.f.xu.w v9, v8, v0.t
234; ZVFH-NEXT:    vmv1r.v v8, v9
235; ZVFH-NEXT:    ret
236;
237; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i32:
238; ZVFHMIN:       # %bb.0:
239; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
240; ZVFHMIN-NEXT:    vfcvt.f.xu.v v9, v8, v0.t
241; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
242; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
243; ZVFHMIN-NEXT:    ret
244  %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
245  ret <vscale x 2 x half> %v
246}
247
248define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
249; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i32_unmasked:
250; ZVFH:       # %bb.0:
251; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
252; ZVFH-NEXT:    vfncvt.f.xu.w v9, v8
253; ZVFH-NEXT:    vmv1r.v v8, v9
254; ZVFH-NEXT:    ret
255;
256; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i32_unmasked:
257; ZVFHMIN:       # %bb.0:
258; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
259; ZVFHMIN-NEXT:    vfcvt.f.xu.v v9, v8
260; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
261; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
262; ZVFHMIN-NEXT:    ret
263  %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
264  ret <vscale x 2 x half> %v
265}
266
267declare <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
268
269define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
270; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i64:
271; ZVFH:       # %bb.0:
272; ZVFH-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
273; ZVFH-NEXT:    vfncvt.f.xu.w v10, v8, v0.t
274; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
275; ZVFH-NEXT:    vfncvt.f.f.w v8, v10, v0.t
276; ZVFH-NEXT:    ret
277;
278; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i64:
279; ZVFHMIN:       # %bb.0:
280; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
281; ZVFHMIN-NEXT:    vfncvt.f.xu.w v10, v8, v0.t
282; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
283; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
284; ZVFHMIN-NEXT:    ret
285  %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
286  ret <vscale x 2 x half> %v
287}
288
289define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
290; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i64_unmasked:
291; ZVFH:       # %bb.0:
292; ZVFH-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
293; ZVFH-NEXT:    vfncvt.f.xu.w v10, v8
294; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
295; ZVFH-NEXT:    vfncvt.f.f.w v8, v10
296; ZVFH-NEXT:    ret
297;
298; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i64_unmasked:
299; ZVFHMIN:       # %bb.0:
300; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
301; ZVFHMIN-NEXT:    vfncvt.f.xu.w v10, v8
302; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
303; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
304; ZVFHMIN-NEXT:    ret
305  %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
306  ret <vscale x 2 x half> %v
307}
308
309declare <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i32)
310
311define <vscale x 2 x float> @vuitofp_nxv2f32_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
312; CHECK-LABEL: vuitofp_nxv2f32_nxv2i8:
313; CHECK:       # %bb.0:
314; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
315; CHECK-NEXT:    vzext.vf2 v9, v8, v0.t
316; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
317; CHECK-NEXT:    ret
318  %v = call <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
319  ret <vscale x 2 x float> %v
320}
321
322define <vscale x 2 x float> @vuitofp_nxv2f32_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
323; CHECK-LABEL: vuitofp_nxv2f32_nxv2i8_unmasked:
324; CHECK:       # %bb.0:
325; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
326; CHECK-NEXT:    vzext.vf2 v9, v8
327; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
328; CHECK-NEXT:    ret
329  %v = call <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
330  ret <vscale x 2 x float> %v
331}
332
333declare <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32)
334
335define <vscale x 2 x float> @vuitofp_nxv2f32_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
336; CHECK-LABEL: vuitofp_nxv2f32_nxv2i16:
337; CHECK:       # %bb.0:
338; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
339; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8, v0.t
340; CHECK-NEXT:    vmv1r.v v8, v9
341; CHECK-NEXT:    ret
342  %v = call <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
343  ret <vscale x 2 x float> %v
344}
345
346define <vscale x 2 x float> @vuitofp_nxv2f32_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
347; CHECK-LABEL: vuitofp_nxv2f32_nxv2i16_unmasked:
348; CHECK:       # %bb.0:
349; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
350; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
351; CHECK-NEXT:    vmv1r.v v8, v9
352; CHECK-NEXT:    ret
353  %v = call <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
354  ret <vscale x 2 x float> %v
355}
356
357declare <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32)
358
359define <vscale x 2 x float> @vuitofp_nxv2f32_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
360; CHECK-LABEL: vuitofp_nxv2f32_nxv2i32:
361; CHECK:       # %bb.0:
362; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
363; CHECK-NEXT:    vfcvt.f.xu.v v8, v8, v0.t
364; CHECK-NEXT:    ret
365  %v = call <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
366  ret <vscale x 2 x float> %v
367}
368
369define <vscale x 2 x float> @vuitofp_nxv2f32_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
370; CHECK-LABEL: vuitofp_nxv2f32_nxv2i32_unmasked:
371; CHECK:       # %bb.0:
372; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
373; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
374; CHECK-NEXT:    ret
375  %v = call <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
376  ret <vscale x 2 x float> %v
377}
378
379declare <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
380
381define <vscale x 2 x float> @vuitofp_nxv2f32_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
382; CHECK-LABEL: vuitofp_nxv2f32_nxv2i64:
383; CHECK:       # %bb.0:
384; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
385; CHECK-NEXT:    vfncvt.f.xu.w v10, v8, v0.t
386; CHECK-NEXT:    vmv.v.v v8, v10
387; CHECK-NEXT:    ret
388  %v = call <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
389  ret <vscale x 2 x float> %v
390}
391
392define <vscale x 2 x float> @vuitofp_nxv2f32_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
393; CHECK-LABEL: vuitofp_nxv2f32_nxv2i64_unmasked:
394; CHECK:       # %bb.0:
395; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
396; CHECK-NEXT:    vfncvt.f.xu.w v10, v8
397; CHECK-NEXT:    vmv.v.v v8, v10
398; CHECK-NEXT:    ret
399  %v = call <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
400  ret <vscale x 2 x float> %v
401}
402
403declare <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i32)
404
405define <vscale x 2 x double> @vuitofp_nxv2f64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
406; CHECK-LABEL: vuitofp_nxv2f64_nxv2i8:
407; CHECK:       # %bb.0:
408; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
409; CHECK-NEXT:    vzext.vf4 v10, v8, v0.t
410; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
411; CHECK-NEXT:    ret
412  %v = call <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
413  ret <vscale x 2 x double> %v
414}
415
416define <vscale x 2 x double> @vuitofp_nxv2f64_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
417; CHECK-LABEL: vuitofp_nxv2f64_nxv2i8_unmasked:
418; CHECK:       # %bb.0:
419; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
420; CHECK-NEXT:    vzext.vf4 v10, v8
421; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
422; CHECK-NEXT:    ret
423  %v = call <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
424  ret <vscale x 2 x double> %v
425}
426
427declare <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32)
428
429define <vscale x 2 x double> @vuitofp_nxv2f64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
430; CHECK-LABEL: vuitofp_nxv2f64_nxv2i16:
431; CHECK:       # %bb.0:
432; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
433; CHECK-NEXT:    vzext.vf2 v10, v8, v0.t
434; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
435; CHECK-NEXT:    ret
436  %v = call <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
437  ret <vscale x 2 x double> %v
438}
439
440define <vscale x 2 x double> @vuitofp_nxv2f64_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
441; CHECK-LABEL: vuitofp_nxv2f64_nxv2i16_unmasked:
442; CHECK:       # %bb.0:
443; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
444; CHECK-NEXT:    vzext.vf2 v10, v8
445; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
446; CHECK-NEXT:    ret
447  %v = call <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
448  ret <vscale x 2 x double> %v
449}
450
451declare <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32)
452
453define <vscale x 2 x double> @vuitofp_nxv2f64_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
454; CHECK-LABEL: vuitofp_nxv2f64_nxv2i32:
455; CHECK:       # %bb.0:
456; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
457; CHECK-NEXT:    vfwcvt.f.xu.v v10, v8, v0.t
458; CHECK-NEXT:    vmv2r.v v8, v10
459; CHECK-NEXT:    ret
460  %v = call <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
461  ret <vscale x 2 x double> %v
462}
463
464define <vscale x 2 x double> @vuitofp_nxv2f64_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
465; CHECK-LABEL: vuitofp_nxv2f64_nxv2i32_unmasked:
466; CHECK:       # %bb.0:
467; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
468; CHECK-NEXT:    vfwcvt.f.xu.v v10, v8
469; CHECK-NEXT:    vmv2r.v v8, v10
470; CHECK-NEXT:    ret
471  %v = call <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
472  ret <vscale x 2 x double> %v
473}
474
475declare <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
476
477define <vscale x 2 x double> @vuitofp_nxv2f64_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
478; CHECK-LABEL: vuitofp_nxv2f64_nxv2i64:
479; CHECK:       # %bb.0:
480; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
481; CHECK-NEXT:    vfcvt.f.xu.v v8, v8, v0.t
482; CHECK-NEXT:    ret
483  %v = call <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
484  ret <vscale x 2 x double> %v
485}
486
487define <vscale x 2 x double> @vuitofp_nxv2f64_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
488; CHECK-LABEL: vuitofp_nxv2f64_nxv2i64_unmasked:
489; CHECK:       # %bb.0:
490; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
491; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
492; CHECK-NEXT:    ret
493  %v = call <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
494  ret <vscale x 2 x double> %v
495}
496
497declare <vscale x 32 x half> @llvm.vp.uitofp.nxv32f16.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i1>, i32)
498
499define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
500; ZVFH-LABEL: vuitofp_nxv32f16_nxv32i32:
501; ZVFH:       # %bb.0:
502; ZVFH-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
503; ZVFH-NEXT:    vmv1r.v v24, v0
504; ZVFH-NEXT:    csrr a1, vlenb
505; ZVFH-NEXT:    srli a2, a1, 2
506; ZVFH-NEXT:    slli a1, a1, 1
507; ZVFH-NEXT:    vslidedown.vx v0, v0, a2
508; ZVFH-NEXT:    sub a2, a0, a1
509; ZVFH-NEXT:    sltu a3, a0, a2
510; ZVFH-NEXT:    addi a3, a3, -1
511; ZVFH-NEXT:    and a2, a3, a2
512; ZVFH-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
513; ZVFH-NEXT:    vfncvt.f.xu.w v28, v16, v0.t
514; ZVFH-NEXT:    bltu a0, a1, .LBB34_2
515; ZVFH-NEXT:  # %bb.1:
516; ZVFH-NEXT:    mv a0, a1
517; ZVFH-NEXT:  .LBB34_2:
518; ZVFH-NEXT:    vmv1r.v v0, v24
519; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
520; ZVFH-NEXT:    vfncvt.f.xu.w v24, v8, v0.t
521; ZVFH-NEXT:    vmv8r.v v8, v24
522; ZVFH-NEXT:    ret
523;
524; ZVFHMIN-LABEL: vuitofp_nxv32f16_nxv32i32:
525; ZVFHMIN:       # %bb.0:
526; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
527; ZVFHMIN-NEXT:    vmv1r.v v7, v0
528; ZVFHMIN-NEXT:    csrr a1, vlenb
529; ZVFHMIN-NEXT:    srli a2, a1, 2
530; ZVFHMIN-NEXT:    slli a1, a1, 1
531; ZVFHMIN-NEXT:    vslidedown.vx v0, v0, a2
532; ZVFHMIN-NEXT:    sub a2, a0, a1
533; ZVFHMIN-NEXT:    sltu a3, a0, a2
534; ZVFHMIN-NEXT:    addi a3, a3, -1
535; ZVFHMIN-NEXT:    and a2, a3, a2
536; ZVFHMIN-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
537; ZVFHMIN-NEXT:    vfcvt.f.xu.v v24, v16, v0.t
538; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
539; ZVFHMIN-NEXT:    vfncvt.f.f.w v20, v24
540; ZVFHMIN-NEXT:    bltu a0, a1, .LBB34_2
541; ZVFHMIN-NEXT:  # %bb.1:
542; ZVFHMIN-NEXT:    mv a0, a1
543; ZVFHMIN-NEXT:  .LBB34_2:
544; ZVFHMIN-NEXT:    vmv1r.v v0, v7
545; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
546; ZVFHMIN-NEXT:    vfcvt.f.xu.v v8, v8, v0.t
547; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
548; ZVFHMIN-NEXT:    vfncvt.f.f.w v16, v8
549; ZVFHMIN-NEXT:    vmv8r.v v8, v16
550; ZVFHMIN-NEXT:    ret
551  %v = call <vscale x 32 x half> @llvm.vp.uitofp.nxv32f16.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 %evl)
552  ret <vscale x 32 x half> %v
553}
554
555declare <vscale x 32 x float> @llvm.vp.uitofp.nxv32f32.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i1>, i32)
556
557define <vscale x 32 x float> @vuitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
558; CHECK-LABEL: vuitofp_nxv32f32_nxv32i32:
559; CHECK:       # %bb.0:
560; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
561; CHECK-NEXT:    vmv1r.v v24, v0
562; CHECK-NEXT:    csrr a1, vlenb
563; CHECK-NEXT:    srli a2, a1, 2
564; CHECK-NEXT:    slli a1, a1, 1
565; CHECK-NEXT:    vslidedown.vx v0, v0, a2
566; CHECK-NEXT:    sub a2, a0, a1
567; CHECK-NEXT:    sltu a3, a0, a2
568; CHECK-NEXT:    addi a3, a3, -1
569; CHECK-NEXT:    and a2, a3, a2
570; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
571; CHECK-NEXT:    vfcvt.f.xu.v v16, v16, v0.t
572; CHECK-NEXT:    bltu a0, a1, .LBB35_2
573; CHECK-NEXT:  # %bb.1:
574; CHECK-NEXT:    mv a0, a1
575; CHECK-NEXT:  .LBB35_2:
576; CHECK-NEXT:    vmv1r.v v0, v24
577; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
578; CHECK-NEXT:    vfcvt.f.xu.v v8, v8, v0.t
579; CHECK-NEXT:    ret
580  %v = call <vscale x 32 x float> @llvm.vp.uitofp.nxv32f32.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 %evl)
581  ret <vscale x 32 x float> %v
582}
583
584define <vscale x 32 x float> @vuitofp_nxv32f32_nxv32i32_unmasked(<vscale x 32 x i32> %va, i32 zeroext %evl) {
585; CHECK-LABEL: vuitofp_nxv32f32_nxv32i32_unmasked:
586; CHECK:       # %bb.0:
587; CHECK-NEXT:    csrr a1, vlenb
588; CHECK-NEXT:    slli a1, a1, 1
589; CHECK-NEXT:    sub a2, a0, a1
590; CHECK-NEXT:    sltu a3, a0, a2
591; CHECK-NEXT:    addi a3, a3, -1
592; CHECK-NEXT:    and a2, a3, a2
593; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
594; CHECK-NEXT:    vfcvt.f.xu.v v16, v16
595; CHECK-NEXT:    bltu a0, a1, .LBB36_2
596; CHECK-NEXT:  # %bb.1:
597; CHECK-NEXT:    mv a0, a1
598; CHECK-NEXT:  .LBB36_2:
599; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
600; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
601; CHECK-NEXT:    ret
602  %v = call <vscale x 32 x float> @llvm.vp.uitofp.nxv32f32.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
603  ret <vscale x 32 x float> %v
604}
605