xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll (revision 0ebe48f068c0ca69f76ed68b621c9294acd75f76)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | \
3; RUN:   FileCheck %s -check-prefix=RV32
4; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | \
5; RUN:   FileCheck %s -check-prefix=RV64
6
7; ================================================================================
8; trunc <vscale x 1 x double>
9; ================================================================================
10
11declare <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double>)
12
13define <vscale x 1 x i8> @trunc_nxv1f64_to_si8(<vscale x 1 x double> %x) {
14; RV32-LABEL: trunc_nxv1f64_to_si8:
15; RV32:       # %bb.0:
16; RV32-NEXT:    lui a0, %hi(.LCPI0_0)
17; RV32-NEXT:    fld fa5, %lo(.LCPI0_0)(a0)
18; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
19; RV32-NEXT:    vfabs.v v9, v8
20; RV32-NEXT:    vmflt.vf v0, v9, fa5
21; RV32-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
22; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
23; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
24; RV32-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
25; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
26; RV32-NEXT:    vfncvt.rtz.x.f.w v9, v8
27; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
28; RV32-NEXT:    vnsrl.wi v8, v9, 0
29; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
30; RV32-NEXT:    vnsrl.wi v8, v8, 0
31; RV32-NEXT:    ret
32;
33; RV64-LABEL: trunc_nxv1f64_to_si8:
34; RV64:       # %bb.0:
35; RV64-NEXT:    lui a0, %hi(.LCPI0_0)
36; RV64-NEXT:    fld fa5, %lo(.LCPI0_0)(a0)
37; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
38; RV64-NEXT:    vfabs.v v9, v8
39; RV64-NEXT:    vmflt.vf v0, v9, fa5
40; RV64-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
41; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
42; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
43; RV64-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
44; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
45; RV64-NEXT:    vfncvt.rtz.x.f.w v9, v8
46; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
47; RV64-NEXT:    vnsrl.wi v8, v9, 0
48; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
49; RV64-NEXT:    vnsrl.wi v8, v8, 0
50; RV64-NEXT:    ret
51  %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
52  %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i8>
53  ret <vscale x 1 x i8> %b
54}
55
56define <vscale x 1 x i8> @trunc_nxv1f64_to_ui8(<vscale x 1 x double> %x) {
57; RV32-LABEL: trunc_nxv1f64_to_ui8:
58; RV32:       # %bb.0:
59; RV32-NEXT:    lui a0, %hi(.LCPI1_0)
60; RV32-NEXT:    fld fa5, %lo(.LCPI1_0)(a0)
61; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
62; RV32-NEXT:    vfabs.v v9, v8
63; RV32-NEXT:    vmflt.vf v0, v9, fa5
64; RV32-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
65; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
66; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
67; RV32-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
68; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
69; RV32-NEXT:    vfncvt.rtz.xu.f.w v9, v8
70; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
71; RV32-NEXT:    vnsrl.wi v8, v9, 0
72; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
73; RV32-NEXT:    vnsrl.wi v8, v8, 0
74; RV32-NEXT:    ret
75;
76; RV64-LABEL: trunc_nxv1f64_to_ui8:
77; RV64:       # %bb.0:
78; RV64-NEXT:    lui a0, %hi(.LCPI1_0)
79; RV64-NEXT:    fld fa5, %lo(.LCPI1_0)(a0)
80; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
81; RV64-NEXT:    vfabs.v v9, v8
82; RV64-NEXT:    vmflt.vf v0, v9, fa5
83; RV64-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
84; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
85; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
86; RV64-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
87; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
88; RV64-NEXT:    vfncvt.rtz.xu.f.w v9, v8
89; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
90; RV64-NEXT:    vnsrl.wi v8, v9, 0
91; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
92; RV64-NEXT:    vnsrl.wi v8, v8, 0
93; RV64-NEXT:    ret
94  %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
95  %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i8>
96  ret <vscale x 1 x i8> %b
97}
98
99define <vscale x 1 x i16> @trunc_nxv1f64_to_si16(<vscale x 1 x double> %x) {
100; RV32-LABEL: trunc_nxv1f64_to_si16:
101; RV32:       # %bb.0:
102; RV32-NEXT:    lui a0, %hi(.LCPI2_0)
103; RV32-NEXT:    fld fa5, %lo(.LCPI2_0)(a0)
104; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
105; RV32-NEXT:    vfabs.v v9, v8
106; RV32-NEXT:    vmflt.vf v0, v9, fa5
107; RV32-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
108; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
109; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
110; RV32-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
111; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
112; RV32-NEXT:    vfncvt.rtz.x.f.w v9, v8
113; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
114; RV32-NEXT:    vnsrl.wi v8, v9, 0
115; RV32-NEXT:    ret
116;
117; RV64-LABEL: trunc_nxv1f64_to_si16:
118; RV64:       # %bb.0:
119; RV64-NEXT:    lui a0, %hi(.LCPI2_0)
120; RV64-NEXT:    fld fa5, %lo(.LCPI2_0)(a0)
121; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
122; RV64-NEXT:    vfabs.v v9, v8
123; RV64-NEXT:    vmflt.vf v0, v9, fa5
124; RV64-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
125; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
126; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
127; RV64-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
128; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
129; RV64-NEXT:    vfncvt.rtz.x.f.w v9, v8
130; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
131; RV64-NEXT:    vnsrl.wi v8, v9, 0
132; RV64-NEXT:    ret
133  %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
134  %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i16>
135  ret <vscale x 1 x i16> %b
136}
137
138define <vscale x 1 x i16> @trunc_nxv1f64_to_ui16(<vscale x 1 x double> %x) {
139; RV32-LABEL: trunc_nxv1f64_to_ui16:
140; RV32:       # %bb.0:
141; RV32-NEXT:    lui a0, %hi(.LCPI3_0)
142; RV32-NEXT:    fld fa5, %lo(.LCPI3_0)(a0)
143; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
144; RV32-NEXT:    vfabs.v v9, v8
145; RV32-NEXT:    vmflt.vf v0, v9, fa5
146; RV32-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
147; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
148; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
149; RV32-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
150; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
151; RV32-NEXT:    vfncvt.rtz.xu.f.w v9, v8
152; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
153; RV32-NEXT:    vnsrl.wi v8, v9, 0
154; RV32-NEXT:    ret
155;
156; RV64-LABEL: trunc_nxv1f64_to_ui16:
157; RV64:       # %bb.0:
158; RV64-NEXT:    lui a0, %hi(.LCPI3_0)
159; RV64-NEXT:    fld fa5, %lo(.LCPI3_0)(a0)
160; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
161; RV64-NEXT:    vfabs.v v9, v8
162; RV64-NEXT:    vmflt.vf v0, v9, fa5
163; RV64-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
164; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
165; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
166; RV64-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
167; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
168; RV64-NEXT:    vfncvt.rtz.xu.f.w v9, v8
169; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
170; RV64-NEXT:    vnsrl.wi v8, v9, 0
171; RV64-NEXT:    ret
172  %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
173  %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i16>
174  ret <vscale x 1 x i16> %b
175}
176
177define <vscale x 1 x i32> @trunc_nxv1f64_to_si32(<vscale x 1 x double> %x) {
178; RV32-LABEL: trunc_nxv1f64_to_si32:
179; RV32:       # %bb.0:
180; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
181; RV32-NEXT:    vfncvt.rtz.x.f.w v9, v8
182; RV32-NEXT:    vmv1r.v v8, v9
183; RV32-NEXT:    ret
184;
185; RV64-LABEL: trunc_nxv1f64_to_si32:
186; RV64:       # %bb.0:
187; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
188; RV64-NEXT:    vfncvt.rtz.x.f.w v9, v8
189; RV64-NEXT:    vmv1r.v v8, v9
190; RV64-NEXT:    ret
191  %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
192  %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i32>
193  ret <vscale x 1 x i32> %b
194}
195
196define <vscale x 1 x i32> @trunc_nxv1f64_to_ui32(<vscale x 1 x double> %x) {
197; RV32-LABEL: trunc_nxv1f64_to_ui32:
198; RV32:       # %bb.0:
199; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
200; RV32-NEXT:    vfncvt.rtz.xu.f.w v9, v8
201; RV32-NEXT:    vmv1r.v v8, v9
202; RV32-NEXT:    ret
203;
204; RV64-LABEL: trunc_nxv1f64_to_ui32:
205; RV64:       # %bb.0:
206; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
207; RV64-NEXT:    vfncvt.rtz.xu.f.w v9, v8
208; RV64-NEXT:    vmv1r.v v8, v9
209; RV64-NEXT:    ret
210  %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
211  %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i32>
212  ret <vscale x 1 x i32> %b
213}
214
215define <vscale x 1 x i64> @trunc_nxv1f64_to_si64(<vscale x 1 x double> %x) {
216; RV32-LABEL: trunc_nxv1f64_to_si64:
217; RV32:       # %bb.0:
218; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
219; RV32-NEXT:    vfcvt.rtz.x.f.v v8, v8
220; RV32-NEXT:    ret
221;
222; RV64-LABEL: trunc_nxv1f64_to_si64:
223; RV64:       # %bb.0:
224; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
225; RV64-NEXT:    vfcvt.rtz.x.f.v v8, v8
226; RV64-NEXT:    ret
227  %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
228  %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i64>
229  ret <vscale x 1 x i64> %b
230}
231
232define <vscale x 1 x i64> @trunc_nxv1f64_to_ui64(<vscale x 1 x double> %x) {
233; RV32-LABEL: trunc_nxv1f64_to_ui64:
234; RV32:       # %bb.0:
235; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
236; RV32-NEXT:    vfcvt.rtz.xu.f.v v8, v8
237; RV32-NEXT:    ret
238;
239; RV64-LABEL: trunc_nxv1f64_to_ui64:
240; RV64:       # %bb.0:
241; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
242; RV64-NEXT:    vfcvt.rtz.xu.f.v v8, v8
243; RV64-NEXT:    ret
244  %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
245  %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i64>
246  ret <vscale x 1 x i64> %b
247}
248
249; ================================================================================
250; trunc <vscale x 4 x double>
251; ================================================================================
252
253declare <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double>)
254
255define <vscale x 4 x i8> @trunc_nxv4f64_to_si8(<vscale x 4 x double> %x) {
256; RV32-LABEL: trunc_nxv4f64_to_si8:
257; RV32:       # %bb.0:
258; RV32-NEXT:    lui a0, %hi(.LCPI8_0)
259; RV32-NEXT:    fld fa5, %lo(.LCPI8_0)(a0)
260; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
261; RV32-NEXT:    vfabs.v v12, v8
262; RV32-NEXT:    vmflt.vf v0, v12, fa5
263; RV32-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
264; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
265; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
266; RV32-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
267; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
268; RV32-NEXT:    vfncvt.rtz.x.f.w v12, v8
269; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
270; RV32-NEXT:    vnsrl.wi v8, v12, 0
271; RV32-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
272; RV32-NEXT:    vnsrl.wi v8, v8, 0
273; RV32-NEXT:    ret
274;
275; RV64-LABEL: trunc_nxv4f64_to_si8:
276; RV64:       # %bb.0:
277; RV64-NEXT:    lui a0, %hi(.LCPI8_0)
278; RV64-NEXT:    fld fa5, %lo(.LCPI8_0)(a0)
279; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
280; RV64-NEXT:    vfabs.v v12, v8
281; RV64-NEXT:    vmflt.vf v0, v12, fa5
282; RV64-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
283; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
284; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
285; RV64-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
286; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
287; RV64-NEXT:    vfncvt.rtz.x.f.w v12, v8
288; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
289; RV64-NEXT:    vnsrl.wi v8, v12, 0
290; RV64-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
291; RV64-NEXT:    vnsrl.wi v8, v8, 0
292; RV64-NEXT:    ret
293  %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
294  %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i8>
295  ret <vscale x 4 x i8> %b
296}
297
298define <vscale x 4 x i8> @trunc_nxv4f64_to_ui8(<vscale x 4 x double> %x) {
299; RV32-LABEL: trunc_nxv4f64_to_ui8:
300; RV32:       # %bb.0:
301; RV32-NEXT:    lui a0, %hi(.LCPI9_0)
302; RV32-NEXT:    fld fa5, %lo(.LCPI9_0)(a0)
303; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
304; RV32-NEXT:    vfabs.v v12, v8
305; RV32-NEXT:    vmflt.vf v0, v12, fa5
306; RV32-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
307; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
308; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
309; RV32-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
310; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
311; RV32-NEXT:    vfncvt.rtz.xu.f.w v12, v8
312; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
313; RV32-NEXT:    vnsrl.wi v8, v12, 0
314; RV32-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
315; RV32-NEXT:    vnsrl.wi v8, v8, 0
316; RV32-NEXT:    ret
317;
318; RV64-LABEL: trunc_nxv4f64_to_ui8:
319; RV64:       # %bb.0:
320; RV64-NEXT:    lui a0, %hi(.LCPI9_0)
321; RV64-NEXT:    fld fa5, %lo(.LCPI9_0)(a0)
322; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
323; RV64-NEXT:    vfabs.v v12, v8
324; RV64-NEXT:    vmflt.vf v0, v12, fa5
325; RV64-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
326; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
327; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
328; RV64-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
329; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
330; RV64-NEXT:    vfncvt.rtz.xu.f.w v12, v8
331; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
332; RV64-NEXT:    vnsrl.wi v8, v12, 0
333; RV64-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
334; RV64-NEXT:    vnsrl.wi v8, v8, 0
335; RV64-NEXT:    ret
336  %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
337  %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i8>
338  ret <vscale x 4 x i8> %b
339}
340
341define <vscale x 4 x i16> @trunc_nxv4f64_to_si16(<vscale x 4 x double> %x) {
342; RV32-LABEL: trunc_nxv4f64_to_si16:
343; RV32:       # %bb.0:
344; RV32-NEXT:    lui a0, %hi(.LCPI10_0)
345; RV32-NEXT:    fld fa5, %lo(.LCPI10_0)(a0)
346; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
347; RV32-NEXT:    vfabs.v v12, v8
348; RV32-NEXT:    vmflt.vf v0, v12, fa5
349; RV32-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
350; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
351; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
352; RV32-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
353; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
354; RV32-NEXT:    vfncvt.rtz.x.f.w v12, v8
355; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
356; RV32-NEXT:    vnsrl.wi v8, v12, 0
357; RV32-NEXT:    ret
358;
359; RV64-LABEL: trunc_nxv4f64_to_si16:
360; RV64:       # %bb.0:
361; RV64-NEXT:    lui a0, %hi(.LCPI10_0)
362; RV64-NEXT:    fld fa5, %lo(.LCPI10_0)(a0)
363; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
364; RV64-NEXT:    vfabs.v v12, v8
365; RV64-NEXT:    vmflt.vf v0, v12, fa5
366; RV64-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
367; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
368; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
369; RV64-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
370; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
371; RV64-NEXT:    vfncvt.rtz.x.f.w v12, v8
372; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
373; RV64-NEXT:    vnsrl.wi v8, v12, 0
374; RV64-NEXT:    ret
375  %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
376  %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i16>
377  ret <vscale x 4 x i16> %b
378}
379
380define <vscale x 4 x i16> @trunc_nxv4f64_to_ui16(<vscale x 4 x double> %x) {
381; RV32-LABEL: trunc_nxv4f64_to_ui16:
382; RV32:       # %bb.0:
383; RV32-NEXT:    lui a0, %hi(.LCPI11_0)
384; RV32-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
385; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
386; RV32-NEXT:    vfabs.v v12, v8
387; RV32-NEXT:    vmflt.vf v0, v12, fa5
388; RV32-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
389; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
390; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
391; RV32-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
392; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
393; RV32-NEXT:    vfncvt.rtz.xu.f.w v12, v8
394; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
395; RV32-NEXT:    vnsrl.wi v8, v12, 0
396; RV32-NEXT:    ret
397;
398; RV64-LABEL: trunc_nxv4f64_to_ui16:
399; RV64:       # %bb.0:
400; RV64-NEXT:    lui a0, %hi(.LCPI11_0)
401; RV64-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
402; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
403; RV64-NEXT:    vfabs.v v12, v8
404; RV64-NEXT:    vmflt.vf v0, v12, fa5
405; RV64-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
406; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
407; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
408; RV64-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
409; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
410; RV64-NEXT:    vfncvt.rtz.xu.f.w v12, v8
411; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
412; RV64-NEXT:    vnsrl.wi v8, v12, 0
413; RV64-NEXT:    ret
414  %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
415  %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i16>
416  ret <vscale x 4 x i16> %b
417}
418
419define <vscale x 4 x i32> @trunc_nxv4f64_to_si32(<vscale x 4 x double> %x) {
420; RV32-LABEL: trunc_nxv4f64_to_si32:
421; RV32:       # %bb.0:
422; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
423; RV32-NEXT:    vfncvt.rtz.x.f.w v12, v8
424; RV32-NEXT:    vmv.v.v v8, v12
425; RV32-NEXT:    ret
426;
427; RV64-LABEL: trunc_nxv4f64_to_si32:
428; RV64:       # %bb.0:
429; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
430; RV64-NEXT:    vfncvt.rtz.x.f.w v12, v8
431; RV64-NEXT:    vmv.v.v v8, v12
432; RV64-NEXT:    ret
433  %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
434  %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i32>
435  ret <vscale x 4 x i32> %b
436}
437
438define <vscale x 4 x i32> @trunc_nxv4f64_to_ui32(<vscale x 4 x double> %x) {
439; RV32-LABEL: trunc_nxv4f64_to_ui32:
440; RV32:       # %bb.0:
441; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
442; RV32-NEXT:    vfncvt.rtz.xu.f.w v12, v8
443; RV32-NEXT:    vmv.v.v v8, v12
444; RV32-NEXT:    ret
445;
446; RV64-LABEL: trunc_nxv4f64_to_ui32:
447; RV64:       # %bb.0:
448; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
449; RV64-NEXT:    vfncvt.rtz.xu.f.w v12, v8
450; RV64-NEXT:    vmv.v.v v8, v12
451; RV64-NEXT:    ret
452  %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
453  %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i32>
454  ret <vscale x 4 x i32> %b
455}
456
457define <vscale x 4 x i64> @trunc_nxv4f64_to_si64(<vscale x 4 x double> %x) {
458; RV32-LABEL: trunc_nxv4f64_to_si64:
459; RV32:       # %bb.0:
460; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
461; RV32-NEXT:    vfcvt.rtz.x.f.v v8, v8
462; RV32-NEXT:    ret
463;
464; RV64-LABEL: trunc_nxv4f64_to_si64:
465; RV64:       # %bb.0:
466; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
467; RV64-NEXT:    vfcvt.rtz.x.f.v v8, v8
468; RV64-NEXT:    ret
469  %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
470  %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i64>
471  ret <vscale x 4 x i64> %b
472}
473
474define <vscale x 4 x i64> @trunc_nxv4f64_to_ui64(<vscale x 4 x double> %x) {
475; RV32-LABEL: trunc_nxv4f64_to_ui64:
476; RV32:       # %bb.0:
477; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
478; RV32-NEXT:    vfcvt.rtz.xu.f.v v8, v8
479; RV32-NEXT:    ret
480;
481; RV64-LABEL: trunc_nxv4f64_to_ui64:
482; RV64:       # %bb.0:
483; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
484; RV64-NEXT:    vfcvt.rtz.xu.f.v v8, v8
485; RV64-NEXT:    ret
486  %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
487  %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i64>
488  ret <vscale x 4 x i64> %b
489}
490
491; ================================================================================
492; ceil <vscale x 1 x double>
493; ================================================================================
494
495declare <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double>)
496
497define <vscale x 1 x i8> @ceil_nxv1f64_to_si8(<vscale x 1 x double> %x) {
498; RV32-LABEL: ceil_nxv1f64_to_si8:
499; RV32:       # %bb.0:
500; RV32-NEXT:    lui a0, %hi(.LCPI16_0)
501; RV32-NEXT:    fld fa5, %lo(.LCPI16_0)(a0)
502; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
503; RV32-NEXT:    vfabs.v v9, v8
504; RV32-NEXT:    vmflt.vf v0, v9, fa5
505; RV32-NEXT:    fsrmi a0, 3
506; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
507; RV32-NEXT:    fsrm a0
508; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
509; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
510; RV32-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
511; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
512; RV32-NEXT:    vfncvt.rtz.x.f.w v9, v8
513; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
514; RV32-NEXT:    vnsrl.wi v8, v9, 0
515; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
516; RV32-NEXT:    vnsrl.wi v8, v8, 0
517; RV32-NEXT:    ret
518;
519; RV64-LABEL: ceil_nxv1f64_to_si8:
520; RV64:       # %bb.0:
521; RV64-NEXT:    lui a0, %hi(.LCPI16_0)
522; RV64-NEXT:    fld fa5, %lo(.LCPI16_0)(a0)
523; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
524; RV64-NEXT:    vfabs.v v9, v8
525; RV64-NEXT:    vmflt.vf v0, v9, fa5
526; RV64-NEXT:    fsrmi a0, 3
527; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
528; RV64-NEXT:    fsrm a0
529; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
530; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
531; RV64-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
532; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
533; RV64-NEXT:    vfncvt.rtz.x.f.w v9, v8
534; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
535; RV64-NEXT:    vnsrl.wi v8, v9, 0
536; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
537; RV64-NEXT:    vnsrl.wi v8, v8, 0
538; RV64-NEXT:    ret
539  %a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
540  %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i8>
541  ret <vscale x 1 x i8> %b
542}
543
544define <vscale x 1 x i8> @ceil_nxv1f64_to_ui8(<vscale x 1 x double> %x) {
545; RV32-LABEL: ceil_nxv1f64_to_ui8:
546; RV32:       # %bb.0:
547; RV32-NEXT:    lui a0, %hi(.LCPI17_0)
548; RV32-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
549; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
550; RV32-NEXT:    vfabs.v v9, v8
551; RV32-NEXT:    vmflt.vf v0, v9, fa5
552; RV32-NEXT:    fsrmi a0, 3
553; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
554; RV32-NEXT:    fsrm a0
555; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
556; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
557; RV32-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
558; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
559; RV32-NEXT:    vfncvt.rtz.xu.f.w v9, v8
560; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
561; RV32-NEXT:    vnsrl.wi v8, v9, 0
562; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
563; RV32-NEXT:    vnsrl.wi v8, v8, 0
564; RV32-NEXT:    ret
565;
566; RV64-LABEL: ceil_nxv1f64_to_ui8:
567; RV64:       # %bb.0:
568; RV64-NEXT:    lui a0, %hi(.LCPI17_0)
569; RV64-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
570; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
571; RV64-NEXT:    vfabs.v v9, v8
572; RV64-NEXT:    vmflt.vf v0, v9, fa5
573; RV64-NEXT:    fsrmi a0, 3
574; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
575; RV64-NEXT:    fsrm a0
576; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
577; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
578; RV64-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
579; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
580; RV64-NEXT:    vfncvt.rtz.xu.f.w v9, v8
581; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
582; RV64-NEXT:    vnsrl.wi v8, v9, 0
583; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
584; RV64-NEXT:    vnsrl.wi v8, v8, 0
585; RV64-NEXT:    ret
586  %a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
587  %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i8>
588  ret <vscale x 1 x i8> %b
589}
590
591define <vscale x 1 x i16> @ceil_nxv1f64_to_si16(<vscale x 1 x double> %x) {
592; RV32-LABEL: ceil_nxv1f64_to_si16:
593; RV32:       # %bb.0:
594; RV32-NEXT:    lui a0, %hi(.LCPI18_0)
595; RV32-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
596; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
597; RV32-NEXT:    vfabs.v v9, v8
598; RV32-NEXT:    vmflt.vf v0, v9, fa5
599; RV32-NEXT:    fsrmi a0, 3
600; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
601; RV32-NEXT:    fsrm a0
602; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
603; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
604; RV32-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
605; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
606; RV32-NEXT:    vfncvt.rtz.x.f.w v9, v8
607; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
608; RV32-NEXT:    vnsrl.wi v8, v9, 0
609; RV32-NEXT:    ret
610;
611; RV64-LABEL: ceil_nxv1f64_to_si16:
612; RV64:       # %bb.0:
613; RV64-NEXT:    lui a0, %hi(.LCPI18_0)
614; RV64-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
615; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
616; RV64-NEXT:    vfabs.v v9, v8
617; RV64-NEXT:    vmflt.vf v0, v9, fa5
618; RV64-NEXT:    fsrmi a0, 3
619; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
620; RV64-NEXT:    fsrm a0
621; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
622; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
623; RV64-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
624; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
625; RV64-NEXT:    vfncvt.rtz.x.f.w v9, v8
626; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
627; RV64-NEXT:    vnsrl.wi v8, v9, 0
628; RV64-NEXT:    ret
629  %a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
630  %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i16>
631  ret <vscale x 1 x i16> %b
632}
633
634define <vscale x 1 x i16> @ceil_nxv1f64_to_ui16(<vscale x 1 x double> %x) {
635; RV32-LABEL: ceil_nxv1f64_to_ui16:
636; RV32:       # %bb.0:
637; RV32-NEXT:    lui a0, %hi(.LCPI19_0)
638; RV32-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
639; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
640; RV32-NEXT:    vfabs.v v9, v8
641; RV32-NEXT:    vmflt.vf v0, v9, fa5
642; RV32-NEXT:    fsrmi a0, 3
643; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
644; RV32-NEXT:    fsrm a0
645; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
646; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
647; RV32-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
648; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
649; RV32-NEXT:    vfncvt.rtz.xu.f.w v9, v8
650; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
651; RV32-NEXT:    vnsrl.wi v8, v9, 0
652; RV32-NEXT:    ret
653;
654; RV64-LABEL: ceil_nxv1f64_to_ui16:
655; RV64:       # %bb.0:
656; RV64-NEXT:    lui a0, %hi(.LCPI19_0)
657; RV64-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
658; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
659; RV64-NEXT:    vfabs.v v9, v8
660; RV64-NEXT:    vmflt.vf v0, v9, fa5
661; RV64-NEXT:    fsrmi a0, 3
662; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
663; RV64-NEXT:    fsrm a0
664; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
665; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
666; RV64-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
667; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
668; RV64-NEXT:    vfncvt.rtz.xu.f.w v9, v8
669; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
670; RV64-NEXT:    vnsrl.wi v8, v9, 0
671; RV64-NEXT:    ret
672  %a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
673  %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i16>
674  ret <vscale x 1 x i16> %b
675}
676
677define <vscale x 1 x i32> @ceil_nxv1f64_to_si32(<vscale x 1 x double> %x) {
678; RV32-LABEL: ceil_nxv1f64_to_si32:
679; RV32:       # %bb.0:
680; RV32-NEXT:    fsrmi a0, 3
681; RV32-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
682; RV32-NEXT:    vfncvt.x.f.w v9, v8
683; RV32-NEXT:    fsrm a0
684; RV32-NEXT:    vmv1r.v v8, v9
685; RV32-NEXT:    ret
686;
687; RV64-LABEL: ceil_nxv1f64_to_si32:
688; RV64:       # %bb.0:
689; RV64-NEXT:    fsrmi a0, 3
690; RV64-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
691; RV64-NEXT:    vfncvt.x.f.w v9, v8
692; RV64-NEXT:    fsrm a0
693; RV64-NEXT:    vmv1r.v v8, v9
694; RV64-NEXT:    ret
695  %a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
696  %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i32>
697  ret <vscale x 1 x i32> %b
698}
699
700define <vscale x 1 x i32> @ceil_nxv1f64_to_ui32(<vscale x 1 x double> %x) {
701; RV32-LABEL: ceil_nxv1f64_to_ui32:
702; RV32:       # %bb.0:
703; RV32-NEXT:    fsrmi a0, 3
704; RV32-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
705; RV32-NEXT:    vfncvt.xu.f.w v9, v8
706; RV32-NEXT:    fsrm a0
707; RV32-NEXT:    vmv1r.v v8, v9
708; RV32-NEXT:    ret
709;
710; RV64-LABEL: ceil_nxv1f64_to_ui32:
711; RV64:       # %bb.0:
712; RV64-NEXT:    fsrmi a0, 3
713; RV64-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
714; RV64-NEXT:    vfncvt.xu.f.w v9, v8
715; RV64-NEXT:    fsrm a0
716; RV64-NEXT:    vmv1r.v v8, v9
717; RV64-NEXT:    ret
718  %a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
719  %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i32>
720  ret <vscale x 1 x i32> %b
721}
722
723define <vscale x 1 x i64> @ceil_nxv1f64_to_si64(<vscale x 1 x double> %x) {
724; RV32-LABEL: ceil_nxv1f64_to_si64:
725; RV32:       # %bb.0:
726; RV32-NEXT:    fsrmi a0, 3
727; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
728; RV32-NEXT:    vfcvt.x.f.v v8, v8
729; RV32-NEXT:    fsrm a0
730; RV32-NEXT:    ret
731;
732; RV64-LABEL: ceil_nxv1f64_to_si64:
733; RV64:       # %bb.0:
734; RV64-NEXT:    fsrmi a0, 3
735; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
736; RV64-NEXT:    vfcvt.x.f.v v8, v8
737; RV64-NEXT:    fsrm a0
738; RV64-NEXT:    ret
739  %a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
740  %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i64>
741  ret <vscale x 1 x i64> %b
742}
743
744define <vscale x 1 x i64> @ceil_nxv1f64_to_ui64(<vscale x 1 x double> %x) {
745; RV32-LABEL: ceil_nxv1f64_to_ui64:
746; RV32:       # %bb.0:
747; RV32-NEXT:    fsrmi a0, 3
748; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
749; RV32-NEXT:    vfcvt.xu.f.v v8, v8
750; RV32-NEXT:    fsrm a0
751; RV32-NEXT:    ret
752;
753; RV64-LABEL: ceil_nxv1f64_to_ui64:
754; RV64:       # %bb.0:
755; RV64-NEXT:    fsrmi a0, 3
756; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
757; RV64-NEXT:    vfcvt.xu.f.v v8, v8
758; RV64-NEXT:    fsrm a0
759; RV64-NEXT:    ret
760  %a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
761  %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i64>
762  ret <vscale x 1 x i64> %b
763}
764
765; ================================================================================
766; ceil <vscale x 4 x double>
767; ================================================================================
768
769declare <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double>)
770
771define <vscale x 4 x i8> @ceil_nxv4f64_to_si8(<vscale x 4 x double> %x) {
772; RV32-LABEL: ceil_nxv4f64_to_si8:
773; RV32:       # %bb.0:
774; RV32-NEXT:    lui a0, %hi(.LCPI24_0)
775; RV32-NEXT:    fld fa5, %lo(.LCPI24_0)(a0)
776; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
777; RV32-NEXT:    vfabs.v v12, v8
778; RV32-NEXT:    vmflt.vf v0, v12, fa5
779; RV32-NEXT:    fsrmi a0, 3
780; RV32-NEXT:    vfcvt.x.f.v v12, v8, v0.t
781; RV32-NEXT:    fsrm a0
782; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
783; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
784; RV32-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
785; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
786; RV32-NEXT:    vfncvt.rtz.x.f.w v12, v8
787; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
788; RV32-NEXT:    vnsrl.wi v8, v12, 0
789; RV32-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
790; RV32-NEXT:    vnsrl.wi v8, v8, 0
791; RV32-NEXT:    ret
792;
793; RV64-LABEL: ceil_nxv4f64_to_si8:
794; RV64:       # %bb.0:
795; RV64-NEXT:    lui a0, %hi(.LCPI24_0)
796; RV64-NEXT:    fld fa5, %lo(.LCPI24_0)(a0)
797; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
798; RV64-NEXT:    vfabs.v v12, v8
799; RV64-NEXT:    vmflt.vf v0, v12, fa5
800; RV64-NEXT:    fsrmi a0, 3
801; RV64-NEXT:    vfcvt.x.f.v v12, v8, v0.t
802; RV64-NEXT:    fsrm a0
803; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
804; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
805; RV64-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
806; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
807; RV64-NEXT:    vfncvt.rtz.x.f.w v12, v8
808; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
809; RV64-NEXT:    vnsrl.wi v8, v12, 0
810; RV64-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
811; RV64-NEXT:    vnsrl.wi v8, v8, 0
812; RV64-NEXT:    ret
813  %a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
814  %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i8>
815  ret <vscale x 4 x i8> %b
816}
817
818define <vscale x 4 x i8> @ceil_nxv4f64_to_ui8(<vscale x 4 x double> %x) {
819; RV32-LABEL: ceil_nxv4f64_to_ui8:
820; RV32:       # %bb.0:
821; RV32-NEXT:    lui a0, %hi(.LCPI25_0)
822; RV32-NEXT:    fld fa5, %lo(.LCPI25_0)(a0)
823; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
824; RV32-NEXT:    vfabs.v v12, v8
825; RV32-NEXT:    vmflt.vf v0, v12, fa5
826; RV32-NEXT:    fsrmi a0, 3
827; RV32-NEXT:    vfcvt.x.f.v v12, v8, v0.t
828; RV32-NEXT:    fsrm a0
829; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
830; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
831; RV32-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
832; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
833; RV32-NEXT:    vfncvt.rtz.xu.f.w v12, v8
834; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
835; RV32-NEXT:    vnsrl.wi v8, v12, 0
836; RV32-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
837; RV32-NEXT:    vnsrl.wi v8, v8, 0
838; RV32-NEXT:    ret
839;
840; RV64-LABEL: ceil_nxv4f64_to_ui8:
841; RV64:       # %bb.0:
842; RV64-NEXT:    lui a0, %hi(.LCPI25_0)
843; RV64-NEXT:    fld fa5, %lo(.LCPI25_0)(a0)
844; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
845; RV64-NEXT:    vfabs.v v12, v8
846; RV64-NEXT:    vmflt.vf v0, v12, fa5
847; RV64-NEXT:    fsrmi a0, 3
848; RV64-NEXT:    vfcvt.x.f.v v12, v8, v0.t
849; RV64-NEXT:    fsrm a0
850; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
851; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
852; RV64-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
853; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
854; RV64-NEXT:    vfncvt.rtz.xu.f.w v12, v8
855; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
856; RV64-NEXT:    vnsrl.wi v8, v12, 0
857; RV64-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
858; RV64-NEXT:    vnsrl.wi v8, v8, 0
859; RV64-NEXT:    ret
860  %a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
861  %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i8>
862  ret <vscale x 4 x i8> %b
863}
864
865define <vscale x 4 x i16> @ceil_nxv4f64_to_si16(<vscale x 4 x double> %x) {
866; RV32-LABEL: ceil_nxv4f64_to_si16:
867; RV32:       # %bb.0:
868; RV32-NEXT:    lui a0, %hi(.LCPI26_0)
869; RV32-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
870; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
871; RV32-NEXT:    vfabs.v v12, v8
872; RV32-NEXT:    vmflt.vf v0, v12, fa5
873; RV32-NEXT:    fsrmi a0, 3
874; RV32-NEXT:    vfcvt.x.f.v v12, v8, v0.t
875; RV32-NEXT:    fsrm a0
876; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
877; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
878; RV32-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
879; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
880; RV32-NEXT:    vfncvt.rtz.x.f.w v12, v8
881; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
882; RV32-NEXT:    vnsrl.wi v8, v12, 0
883; RV32-NEXT:    ret
884;
885; RV64-LABEL: ceil_nxv4f64_to_si16:
886; RV64:       # %bb.0:
887; RV64-NEXT:    lui a0, %hi(.LCPI26_0)
888; RV64-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
889; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
890; RV64-NEXT:    vfabs.v v12, v8
891; RV64-NEXT:    vmflt.vf v0, v12, fa5
892; RV64-NEXT:    fsrmi a0, 3
893; RV64-NEXT:    vfcvt.x.f.v v12, v8, v0.t
894; RV64-NEXT:    fsrm a0
895; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
896; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
897; RV64-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
898; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
899; RV64-NEXT:    vfncvt.rtz.x.f.w v12, v8
900; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
901; RV64-NEXT:    vnsrl.wi v8, v12, 0
902; RV64-NEXT:    ret
903  %a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
904  %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i16>
905  ret <vscale x 4 x i16> %b
906}
907
908define <vscale x 4 x i16> @ceil_nxv4f64_to_ui16(<vscale x 4 x double> %x) {
909; RV32-LABEL: ceil_nxv4f64_to_ui16:
910; RV32:       # %bb.0:
911; RV32-NEXT:    lui a0, %hi(.LCPI27_0)
912; RV32-NEXT:    fld fa5, %lo(.LCPI27_0)(a0)
913; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
914; RV32-NEXT:    vfabs.v v12, v8
915; RV32-NEXT:    vmflt.vf v0, v12, fa5
916; RV32-NEXT:    fsrmi a0, 3
917; RV32-NEXT:    vfcvt.x.f.v v12, v8, v0.t
918; RV32-NEXT:    fsrm a0
919; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
920; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
921; RV32-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
922; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
923; RV32-NEXT:    vfncvt.rtz.xu.f.w v12, v8
924; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
925; RV32-NEXT:    vnsrl.wi v8, v12, 0
926; RV32-NEXT:    ret
927;
928; RV64-LABEL: ceil_nxv4f64_to_ui16:
929; RV64:       # %bb.0:
930; RV64-NEXT:    lui a0, %hi(.LCPI27_0)
931; RV64-NEXT:    fld fa5, %lo(.LCPI27_0)(a0)
932; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
933; RV64-NEXT:    vfabs.v v12, v8
934; RV64-NEXT:    vmflt.vf v0, v12, fa5
935; RV64-NEXT:    fsrmi a0, 3
936; RV64-NEXT:    vfcvt.x.f.v v12, v8, v0.t
937; RV64-NEXT:    fsrm a0
938; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
939; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
940; RV64-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
941; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
942; RV64-NEXT:    vfncvt.rtz.xu.f.w v12, v8
943; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
944; RV64-NEXT:    vnsrl.wi v8, v12, 0
945; RV64-NEXT:    ret
946  %a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
947  %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i16>
948  ret <vscale x 4 x i16> %b
949}
950
951define <vscale x 4 x i32> @ceil_nxv4f64_to_si32(<vscale x 4 x double> %x) {
952; RV32-LABEL: ceil_nxv4f64_to_si32:
953; RV32:       # %bb.0:
954; RV32-NEXT:    fsrmi a0, 3
955; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
956; RV32-NEXT:    vfncvt.x.f.w v12, v8
957; RV32-NEXT:    fsrm a0
958; RV32-NEXT:    vmv.v.v v8, v12
959; RV32-NEXT:    ret
960;
961; RV64-LABEL: ceil_nxv4f64_to_si32:
962; RV64:       # %bb.0:
963; RV64-NEXT:    fsrmi a0, 3
964; RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
965; RV64-NEXT:    vfncvt.x.f.w v12, v8
966; RV64-NEXT:    fsrm a0
967; RV64-NEXT:    vmv.v.v v8, v12
968; RV64-NEXT:    ret
969  %a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
970  %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i32>
971  ret <vscale x 4 x i32> %b
972}
973
974define <vscale x 4 x i32> @ceil_nxv4f64_to_ui32(<vscale x 4 x double> %x) {
975; RV32-LABEL: ceil_nxv4f64_to_ui32:
976; RV32:       # %bb.0:
977; RV32-NEXT:    fsrmi a0, 3
978; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
979; RV32-NEXT:    vfncvt.xu.f.w v12, v8
980; RV32-NEXT:    fsrm a0
981; RV32-NEXT:    vmv.v.v v8, v12
982; RV32-NEXT:    ret
983;
984; RV64-LABEL: ceil_nxv4f64_to_ui32:
985; RV64:       # %bb.0:
986; RV64-NEXT:    fsrmi a0, 3
987; RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
988; RV64-NEXT:    vfncvt.xu.f.w v12, v8
989; RV64-NEXT:    fsrm a0
990; RV64-NEXT:    vmv.v.v v8, v12
991; RV64-NEXT:    ret
992  %a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
993  %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i32>
994  ret <vscale x 4 x i32> %b
995}
996
997define <vscale x 4 x i64> @ceil_nxv4f64_to_si64(<vscale x 4 x double> %x) {
998; RV32-LABEL: ceil_nxv4f64_to_si64:
999; RV32:       # %bb.0:
1000; RV32-NEXT:    fsrmi a0, 3
1001; RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
1002; RV32-NEXT:    vfcvt.x.f.v v8, v8
1003; RV32-NEXT:    fsrm a0
1004; RV32-NEXT:    ret
1005;
1006; RV64-LABEL: ceil_nxv4f64_to_si64:
1007; RV64:       # %bb.0:
1008; RV64-NEXT:    fsrmi a0, 3
1009; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
1010; RV64-NEXT:    vfcvt.x.f.v v8, v8
1011; RV64-NEXT:    fsrm a0
1012; RV64-NEXT:    ret
1013  %a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
1014  %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i64>
1015  ret <vscale x 4 x i64> %b
1016}
1017
1018define <vscale x 4 x i64> @ceil_nxv4f64_to_ui64(<vscale x 4 x double> %x) {
1019; RV32-LABEL: ceil_nxv4f64_to_ui64:
1020; RV32:       # %bb.0:
1021; RV32-NEXT:    fsrmi a0, 3
1022; RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
1023; RV32-NEXT:    vfcvt.xu.f.v v8, v8
1024; RV32-NEXT:    fsrm a0
1025; RV32-NEXT:    ret
1026;
1027; RV64-LABEL: ceil_nxv4f64_to_ui64:
1028; RV64:       # %bb.0:
1029; RV64-NEXT:    fsrmi a0, 3
1030; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
1031; RV64-NEXT:    vfcvt.xu.f.v v8, v8
1032; RV64-NEXT:    fsrm a0
1033; RV64-NEXT:    ret
1034  %a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
1035  %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i64>
1036  ret <vscale x 4 x i64> %b
1037}
1038
1039; ================================================================================
1040; rint <vscale x 1 x double>
1041; ================================================================================
1042
1043declare <vscale x 1 x double> @llvm.rint.nxv1f64(<vscale x 1 x double>)
1044
1045define <vscale x 1 x i8> @rint_nxv1f64_to_si8(<vscale x 1 x double> %x) {
1046; RV32-LABEL: rint_nxv1f64_to_si8:
1047; RV32:       # %bb.0:
1048; RV32-NEXT:    lui a0, %hi(.LCPI32_0)
1049; RV32-NEXT:    fld fa5, %lo(.LCPI32_0)(a0)
1050; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
1051; RV32-NEXT:    vfabs.v v9, v8
1052; RV32-NEXT:    vmflt.vf v0, v9, fa5
1053; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
1054; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
1055; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
1056; RV32-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
1057; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
1058; RV32-NEXT:    vfncvt.rtz.x.f.w v9, v8
1059; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1060; RV32-NEXT:    vnsrl.wi v8, v9, 0
1061; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
1062; RV32-NEXT:    vnsrl.wi v8, v8, 0
1063; RV32-NEXT:    ret
1064;
1065; RV64-LABEL: rint_nxv1f64_to_si8:
1066; RV64:       # %bb.0:
1067; RV64-NEXT:    lui a0, %hi(.LCPI32_0)
1068; RV64-NEXT:    fld fa5, %lo(.LCPI32_0)(a0)
1069; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
1070; RV64-NEXT:    vfabs.v v9, v8
1071; RV64-NEXT:    vmflt.vf v0, v9, fa5
1072; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
1073; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
1074; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
1075; RV64-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
1076; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
1077; RV64-NEXT:    vfncvt.rtz.x.f.w v9, v8
1078; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1079; RV64-NEXT:    vnsrl.wi v8, v9, 0
1080; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
1081; RV64-NEXT:    vnsrl.wi v8, v8, 0
1082; RV64-NEXT:    ret
1083  %a = call <vscale x 1 x double> @llvm.rint.nxv1f64(<vscale x 1 x double> %x)
1084  %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i8>
1085  ret <vscale x 1 x i8> %b
1086}
1087
1088define <vscale x 1 x i8> @rint_nxv1f64_to_ui8(<vscale x 1 x double> %x) {
1089; RV32-LABEL: rint_nxv1f64_to_ui8:
1090; RV32:       # %bb.0:
1091; RV32-NEXT:    lui a0, %hi(.LCPI33_0)
1092; RV32-NEXT:    fld fa5, %lo(.LCPI33_0)(a0)
1093; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
1094; RV32-NEXT:    vfabs.v v9, v8
1095; RV32-NEXT:    vmflt.vf v0, v9, fa5
1096; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
1097; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
1098; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
1099; RV32-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
1100; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
1101; RV32-NEXT:    vfncvt.rtz.xu.f.w v9, v8
1102; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1103; RV32-NEXT:    vnsrl.wi v8, v9, 0
1104; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
1105; RV32-NEXT:    vnsrl.wi v8, v8, 0
1106; RV32-NEXT:    ret
1107;
1108; RV64-LABEL: rint_nxv1f64_to_ui8:
1109; RV64:       # %bb.0:
1110; RV64-NEXT:    lui a0, %hi(.LCPI33_0)
1111; RV64-NEXT:    fld fa5, %lo(.LCPI33_0)(a0)
1112; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
1113; RV64-NEXT:    vfabs.v v9, v8
1114; RV64-NEXT:    vmflt.vf v0, v9, fa5
1115; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
1116; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
1117; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
1118; RV64-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
1119; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
1120; RV64-NEXT:    vfncvt.rtz.xu.f.w v9, v8
1121; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1122; RV64-NEXT:    vnsrl.wi v8, v9, 0
1123; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
1124; RV64-NEXT:    vnsrl.wi v8, v8, 0
1125; RV64-NEXT:    ret
1126  %a = call <vscale x 1 x double> @llvm.rint.nxv1f64(<vscale x 1 x double> %x)
1127  %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i8>
1128  ret <vscale x 1 x i8> %b
1129}
1130
1131define <vscale x 1 x i16> @rint_nxv1f64_to_si16(<vscale x 1 x double> %x) {
1132; RV32-LABEL: rint_nxv1f64_to_si16:
1133; RV32:       # %bb.0:
1134; RV32-NEXT:    lui a0, %hi(.LCPI34_0)
1135; RV32-NEXT:    fld fa5, %lo(.LCPI34_0)(a0)
1136; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
1137; RV32-NEXT:    vfabs.v v9, v8
1138; RV32-NEXT:    vmflt.vf v0, v9, fa5
1139; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
1140; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
1141; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
1142; RV32-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
1143; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
1144; RV32-NEXT:    vfncvt.rtz.x.f.w v9, v8
1145; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1146; RV32-NEXT:    vnsrl.wi v8, v9, 0
1147; RV32-NEXT:    ret
1148;
1149; RV64-LABEL: rint_nxv1f64_to_si16:
1150; RV64:       # %bb.0:
1151; RV64-NEXT:    lui a0, %hi(.LCPI34_0)
1152; RV64-NEXT:    fld fa5, %lo(.LCPI34_0)(a0)
1153; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
1154; RV64-NEXT:    vfabs.v v9, v8
1155; RV64-NEXT:    vmflt.vf v0, v9, fa5
1156; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
1157; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
1158; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
1159; RV64-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
1160; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
1161; RV64-NEXT:    vfncvt.rtz.x.f.w v9, v8
1162; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1163; RV64-NEXT:    vnsrl.wi v8, v9, 0
1164; RV64-NEXT:    ret
1165  %a = call <vscale x 1 x double> @llvm.rint.nxv1f64(<vscale x 1 x double> %x)
1166  %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i16>
1167  ret <vscale x 1 x i16> %b
1168}
1169
1170define <vscale x 1 x i16> @rint_nxv1f64_to_ui16(<vscale x 1 x double> %x) {
1171; RV32-LABEL: rint_nxv1f64_to_ui16:
1172; RV32:       # %bb.0:
1173; RV32-NEXT:    lui a0, %hi(.LCPI35_0)
1174; RV32-NEXT:    fld fa5, %lo(.LCPI35_0)(a0)
1175; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
1176; RV32-NEXT:    vfabs.v v9, v8
1177; RV32-NEXT:    vmflt.vf v0, v9, fa5
1178; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
1179; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
1180; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
1181; RV32-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
1182; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
1183; RV32-NEXT:    vfncvt.rtz.xu.f.w v9, v8
1184; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1185; RV32-NEXT:    vnsrl.wi v8, v9, 0
1186; RV32-NEXT:    ret
1187;
1188; RV64-LABEL: rint_nxv1f64_to_ui16:
1189; RV64:       # %bb.0:
1190; RV64-NEXT:    lui a0, %hi(.LCPI35_0)
1191; RV64-NEXT:    fld fa5, %lo(.LCPI35_0)(a0)
1192; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
1193; RV64-NEXT:    vfabs.v v9, v8
1194; RV64-NEXT:    vmflt.vf v0, v9, fa5
1195; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
1196; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
1197; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
1198; RV64-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
1199; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
1200; RV64-NEXT:    vfncvt.rtz.xu.f.w v9, v8
1201; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1202; RV64-NEXT:    vnsrl.wi v8, v9, 0
1203; RV64-NEXT:    ret
1204  %a = call <vscale x 1 x double> @llvm.rint.nxv1f64(<vscale x 1 x double> %x)
1205  %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i16>
1206  ret <vscale x 1 x i16> %b
1207}
1208
1209define <vscale x 1 x i32> @rint_nxv1f64_to_si32(<vscale x 1 x double> %x) {
1210; RV32-LABEL: rint_nxv1f64_to_si32:
1211; RV32:       # %bb.0:
1212; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
1213; RV32-NEXT:    vfncvt.x.f.w v9, v8
1214; RV32-NEXT:    vmv1r.v v8, v9
1215; RV32-NEXT:    ret
1216;
1217; RV64-LABEL: rint_nxv1f64_to_si32:
1218; RV64:       # %bb.0:
1219; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
1220; RV64-NEXT:    vfncvt.x.f.w v9, v8
1221; RV64-NEXT:    vmv1r.v v8, v9
1222; RV64-NEXT:    ret
1223  %a = call <vscale x 1 x double> @llvm.rint.nxv1f64(<vscale x 1 x double> %x)
1224  %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i32>
1225  ret <vscale x 1 x i32> %b
1226}
1227
1228define <vscale x 1 x i32> @rint_nxv1f64_to_ui32(<vscale x 1 x double> %x) {
1229; RV32-LABEL: rint_nxv1f64_to_ui32:
1230; RV32:       # %bb.0:
1231; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
1232; RV32-NEXT:    vfncvt.xu.f.w v9, v8
1233; RV32-NEXT:    vmv1r.v v8, v9
1234; RV32-NEXT:    ret
1235;
1236; RV64-LABEL: rint_nxv1f64_to_ui32:
1237; RV64:       # %bb.0:
1238; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
1239; RV64-NEXT:    vfncvt.xu.f.w v9, v8
1240; RV64-NEXT:    vmv1r.v v8, v9
1241; RV64-NEXT:    ret
1242  %a = call <vscale x 1 x double> @llvm.rint.nxv1f64(<vscale x 1 x double> %x)
1243  %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i32>
1244  ret <vscale x 1 x i32> %b
1245}
1246
1247define <vscale x 1 x i64> @rint_nxv1f64_to_si64(<vscale x 1 x double> %x) {
1248; RV32-LABEL: rint_nxv1f64_to_si64:
1249; RV32:       # %bb.0:
1250; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
1251; RV32-NEXT:    vfcvt.x.f.v v8, v8
1252; RV32-NEXT:    ret
1253;
1254; RV64-LABEL: rint_nxv1f64_to_si64:
1255; RV64:       # %bb.0:
1256; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
1257; RV64-NEXT:    vfcvt.x.f.v v8, v8
1258; RV64-NEXT:    ret
1259  %a = call <vscale x 1 x double> @llvm.rint.nxv1f64(<vscale x 1 x double> %x)
1260  %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i64>
1261  ret <vscale x 1 x i64> %b
1262}
1263
1264define <vscale x 1 x i64> @rint_nxv1f64_to_ui64(<vscale x 1 x double> %x) {
1265; RV32-LABEL: rint_nxv1f64_to_ui64:
1266; RV32:       # %bb.0:
1267; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
1268; RV32-NEXT:    vfcvt.xu.f.v v8, v8
1269; RV32-NEXT:    ret
1270;
1271; RV64-LABEL: rint_nxv1f64_to_ui64:
1272; RV64:       # %bb.0:
1273; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
1274; RV64-NEXT:    vfcvt.xu.f.v v8, v8
1275; RV64-NEXT:    ret
1276  %a = call <vscale x 1 x double> @llvm.rint.nxv1f64(<vscale x 1 x double> %x)
1277  %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i64>
1278  ret <vscale x 1 x i64> %b
1279}
1280
1281; ================================================================================
1282; rint <vscale x 4 x double>
1283; ================================================================================
1284
1285declare <vscale x 4 x double> @llvm.rint.nxv4f64(<vscale x 4 x double>)
1286
1287define <vscale x 4 x i8> @rint_nxv4f64_to_si8(<vscale x 4 x double> %x) {
1288; RV32-LABEL: rint_nxv4f64_to_si8:
1289; RV32:       # %bb.0:
1290; RV32-NEXT:    lui a0, %hi(.LCPI40_0)
1291; RV32-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
1292; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
1293; RV32-NEXT:    vfabs.v v12, v8
1294; RV32-NEXT:    vmflt.vf v0, v12, fa5
1295; RV32-NEXT:    vfcvt.x.f.v v12, v8, v0.t
1296; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
1297; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
1298; RV32-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
1299; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
1300; RV32-NEXT:    vfncvt.rtz.x.f.w v12, v8
1301; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
1302; RV32-NEXT:    vnsrl.wi v8, v12, 0
1303; RV32-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
1304; RV32-NEXT:    vnsrl.wi v8, v8, 0
1305; RV32-NEXT:    ret
1306;
1307; RV64-LABEL: rint_nxv4f64_to_si8:
1308; RV64:       # %bb.0:
1309; RV64-NEXT:    lui a0, %hi(.LCPI40_0)
1310; RV64-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
1311; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
1312; RV64-NEXT:    vfabs.v v12, v8
1313; RV64-NEXT:    vmflt.vf v0, v12, fa5
1314; RV64-NEXT:    vfcvt.x.f.v v12, v8, v0.t
1315; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
1316; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
1317; RV64-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
1318; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
1319; RV64-NEXT:    vfncvt.rtz.x.f.w v12, v8
1320; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
1321; RV64-NEXT:    vnsrl.wi v8, v12, 0
1322; RV64-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
1323; RV64-NEXT:    vnsrl.wi v8, v8, 0
1324; RV64-NEXT:    ret
1325  %a = call <vscale x 4 x double> @llvm.rint.nxv4f64(<vscale x 4 x double> %x)
1326  %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i8>
1327  ret <vscale x 4 x i8> %b
1328}
1329
1330define <vscale x 4 x i8> @rint_nxv4f64_to_ui8(<vscale x 4 x double> %x) {
1331; RV32-LABEL: rint_nxv4f64_to_ui8:
1332; RV32:       # %bb.0:
1333; RV32-NEXT:    lui a0, %hi(.LCPI41_0)
1334; RV32-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
1335; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
1336; RV32-NEXT:    vfabs.v v12, v8
1337; RV32-NEXT:    vmflt.vf v0, v12, fa5
1338; RV32-NEXT:    vfcvt.x.f.v v12, v8, v0.t
1339; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
1340; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
1341; RV32-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
1342; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
1343; RV32-NEXT:    vfncvt.rtz.xu.f.w v12, v8
1344; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
1345; RV32-NEXT:    vnsrl.wi v8, v12, 0
1346; RV32-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
1347; RV32-NEXT:    vnsrl.wi v8, v8, 0
1348; RV32-NEXT:    ret
1349;
1350; RV64-LABEL: rint_nxv4f64_to_ui8:
1351; RV64:       # %bb.0:
1352; RV64-NEXT:    lui a0, %hi(.LCPI41_0)
1353; RV64-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
1354; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
1355; RV64-NEXT:    vfabs.v v12, v8
1356; RV64-NEXT:    vmflt.vf v0, v12, fa5
1357; RV64-NEXT:    vfcvt.x.f.v v12, v8, v0.t
1358; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
1359; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
1360; RV64-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
1361; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
1362; RV64-NEXT:    vfncvt.rtz.xu.f.w v12, v8
1363; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
1364; RV64-NEXT:    vnsrl.wi v8, v12, 0
1365; RV64-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
1366; RV64-NEXT:    vnsrl.wi v8, v8, 0
1367; RV64-NEXT:    ret
1368  %a = call <vscale x 4 x double> @llvm.rint.nxv4f64(<vscale x 4 x double> %x)
1369  %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i8>
1370  ret <vscale x 4 x i8> %b
1371}
1372
1373define <vscale x 4 x i16> @rint_nxv4f64_to_si16(<vscale x 4 x double> %x) {
1374; RV32-LABEL: rint_nxv4f64_to_si16:
1375; RV32:       # %bb.0:
1376; RV32-NEXT:    lui a0, %hi(.LCPI42_0)
1377; RV32-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
1378; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
1379; RV32-NEXT:    vfabs.v v12, v8
1380; RV32-NEXT:    vmflt.vf v0, v12, fa5
1381; RV32-NEXT:    vfcvt.x.f.v v12, v8, v0.t
1382; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
1383; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
1384; RV32-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
1385; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
1386; RV32-NEXT:    vfncvt.rtz.x.f.w v12, v8
1387; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
1388; RV32-NEXT:    vnsrl.wi v8, v12, 0
1389; RV32-NEXT:    ret
1390;
1391; RV64-LABEL: rint_nxv4f64_to_si16:
1392; RV64:       # %bb.0:
1393; RV64-NEXT:    lui a0, %hi(.LCPI42_0)
1394; RV64-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
1395; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
1396; RV64-NEXT:    vfabs.v v12, v8
1397; RV64-NEXT:    vmflt.vf v0, v12, fa5
1398; RV64-NEXT:    vfcvt.x.f.v v12, v8, v0.t
1399; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
1400; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
1401; RV64-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
1402; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
1403; RV64-NEXT:    vfncvt.rtz.x.f.w v12, v8
1404; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
1405; RV64-NEXT:    vnsrl.wi v8, v12, 0
1406; RV64-NEXT:    ret
1407  %a = call <vscale x 4 x double> @llvm.rint.nxv4f64(<vscale x 4 x double> %x)
1408  %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i16>
1409  ret <vscale x 4 x i16> %b
1410}
1411
1412define <vscale x 4 x i16> @rint_nxv4f64_to_ui16(<vscale x 4 x double> %x) {
1413; RV32-LABEL: rint_nxv4f64_to_ui16:
1414; RV32:       # %bb.0:
1415; RV32-NEXT:    lui a0, %hi(.LCPI43_0)
1416; RV32-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
1417; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
1418; RV32-NEXT:    vfabs.v v12, v8
1419; RV32-NEXT:    vmflt.vf v0, v12, fa5
1420; RV32-NEXT:    vfcvt.x.f.v v12, v8, v0.t
1421; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
1422; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
1423; RV32-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
1424; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
1425; RV32-NEXT:    vfncvt.rtz.xu.f.w v12, v8
1426; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
1427; RV32-NEXT:    vnsrl.wi v8, v12, 0
1428; RV32-NEXT:    ret
1429;
1430; RV64-LABEL: rint_nxv4f64_to_ui16:
1431; RV64:       # %bb.0:
1432; RV64-NEXT:    lui a0, %hi(.LCPI43_0)
1433; RV64-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
1434; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
1435; RV64-NEXT:    vfabs.v v12, v8
1436; RV64-NEXT:    vmflt.vf v0, v12, fa5
1437; RV64-NEXT:    vfcvt.x.f.v v12, v8, v0.t
1438; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
1439; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
1440; RV64-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
1441; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
1442; RV64-NEXT:    vfncvt.rtz.xu.f.w v12, v8
1443; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
1444; RV64-NEXT:    vnsrl.wi v8, v12, 0
1445; RV64-NEXT:    ret
1446  %a = call <vscale x 4 x double> @llvm.rint.nxv4f64(<vscale x 4 x double> %x)
1447  %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i16>
1448  ret <vscale x 4 x i16> %b
1449}
1450
1451define <vscale x 4 x i32> @rint_nxv4f64_to_si32(<vscale x 4 x double> %x) {
1452; RV32-LABEL: rint_nxv4f64_to_si32:
1453; RV32:       # %bb.0:
1454; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
1455; RV32-NEXT:    vfncvt.x.f.w v12, v8
1456; RV32-NEXT:    vmv.v.v v8, v12
1457; RV32-NEXT:    ret
1458;
1459; RV64-LABEL: rint_nxv4f64_to_si32:
1460; RV64:       # %bb.0:
1461; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
1462; RV64-NEXT:    vfncvt.x.f.w v12, v8
1463; RV64-NEXT:    vmv.v.v v8, v12
1464; RV64-NEXT:    ret
1465  %a = call <vscale x 4 x double> @llvm.rint.nxv4f64(<vscale x 4 x double> %x)
1466  %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i32>
1467  ret <vscale x 4 x i32> %b
1468}
1469
1470define <vscale x 4 x i32> @rint_nxv4f64_to_ui32(<vscale x 4 x double> %x) {
1471; RV32-LABEL: rint_nxv4f64_to_ui32:
1472; RV32:       # %bb.0:
1473; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
1474; RV32-NEXT:    vfncvt.xu.f.w v12, v8
1475; RV32-NEXT:    vmv.v.v v8, v12
1476; RV32-NEXT:    ret
1477;
1478; RV64-LABEL: rint_nxv4f64_to_ui32:
1479; RV64:       # %bb.0:
1480; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
1481; RV64-NEXT:    vfncvt.xu.f.w v12, v8
1482; RV64-NEXT:    vmv.v.v v8, v12
1483; RV64-NEXT:    ret
1484  %a = call <vscale x 4 x double> @llvm.rint.nxv4f64(<vscale x 4 x double> %x)
1485  %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i32>
1486  ret <vscale x 4 x i32> %b
1487}
1488
1489define <vscale x 4 x i64> @rint_nxv4f64_to_si64(<vscale x 4 x double> %x) {
1490; RV32-LABEL: rint_nxv4f64_to_si64:
1491; RV32:       # %bb.0:
1492; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
1493; RV32-NEXT:    vfcvt.x.f.v v8, v8
1494; RV32-NEXT:    ret
1495;
1496; RV64-LABEL: rint_nxv4f64_to_si64:
1497; RV64:       # %bb.0:
1498; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
1499; RV64-NEXT:    vfcvt.x.f.v v8, v8
1500; RV64-NEXT:    ret
1501  %a = call <vscale x 4 x double> @llvm.rint.nxv4f64(<vscale x 4 x double> %x)
1502  %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i64>
1503  ret <vscale x 4 x i64> %b
1504}
1505
1506define <vscale x 4 x i64> @rint_nxv4f64_to_ui64(<vscale x 4 x double> %x) {
1507; RV32-LABEL: rint_nxv4f64_to_ui64:
1508; RV32:       # %bb.0:
1509; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
1510; RV32-NEXT:    vfcvt.xu.f.v v8, v8
1511; RV32-NEXT:    ret
1512;
1513; RV64-LABEL: rint_nxv4f64_to_ui64:
1514; RV64:       # %bb.0:
1515; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
1516; RV64-NEXT:    vfcvt.xu.f.v v8, v8
1517; RV64-NEXT:    ret
1518  %a = call <vscale x 4 x double> @llvm.rint.nxv4f64(<vscale x 4 x double> %x)
1519  %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i64>
1520  ret <vscale x 4 x i64> %b
1521}
1522