xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
3; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
4; RUN:     --check-prefixes=CHECK,ZVFH
5; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
6; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
7; RUN:     --check-prefixes=CHECK,ZVFH
8; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
9; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
10; RUN:     --check-prefixes=CHECK,ZVFHMIN
11; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
12; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
13; RUN:     --check-prefixes=CHECK,ZVFHMIN
14
15define <vscale x 1 x bfloat> @trunc_nxv1bf16(<vscale x 1 x bfloat> %x) {
16; CHECK-LABEL: trunc_nxv1bf16:
17; CHECK:       # %bb.0:
18; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
19; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
20; CHECK-NEXT:    lui a0, 307200
21; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
22; CHECK-NEXT:    vfabs.v v8, v9
23; CHECK-NEXT:    fmv.w.x fa5, a0
24; CHECK-NEXT:    vmflt.vf v0, v8, fa5
25; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
26; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
27; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
28; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
29; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
30; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
31; CHECK-NEXT:    ret
32  %a = call <vscale x 1 x bfloat> @llvm.trunc.nxv1bf16(<vscale x 1 x bfloat> %x)
33  ret <vscale x 1 x bfloat> %a
34}
35
36define <vscale x 2 x bfloat> @trunc_nxv2bf16(<vscale x 2 x bfloat> %x) {
37; CHECK-LABEL: trunc_nxv2bf16:
38; CHECK:       # %bb.0:
39; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
40; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
41; CHECK-NEXT:    lui a0, 307200
42; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
43; CHECK-NEXT:    vfabs.v v8, v9
44; CHECK-NEXT:    fmv.w.x fa5, a0
45; CHECK-NEXT:    vmflt.vf v0, v8, fa5
46; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
47; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
48; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
49; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
50; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
51; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
52; CHECK-NEXT:    ret
53  %a = call <vscale x 2 x bfloat> @llvm.trunc.nxv2bf16(<vscale x 2 x bfloat> %x)
54  ret <vscale x 2 x bfloat> %a
55}
56
57define <vscale x 4 x bfloat> @trunc_nxv4bf16(<vscale x 4 x bfloat> %x) {
58; CHECK-LABEL: trunc_nxv4bf16:
59; CHECK:       # %bb.0:
60; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
61; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
62; CHECK-NEXT:    lui a0, 307200
63; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
64; CHECK-NEXT:    vfabs.v v8, v10
65; CHECK-NEXT:    fmv.w.x fa5, a0
66; CHECK-NEXT:    vmflt.vf v0, v8, fa5
67; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
68; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
69; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
70; CHECK-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
71; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
72; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
73; CHECK-NEXT:    ret
74  %a = call <vscale x 4 x bfloat> @llvm.trunc.nxv4bf16(<vscale x 4 x bfloat> %x)
75  ret <vscale x 4 x bfloat> %a
76}
77
78define <vscale x 8 x bfloat> @trunc_nxv8bf16(<vscale x 8 x bfloat> %x) {
79; CHECK-LABEL: trunc_nxv8bf16:
80; CHECK:       # %bb.0:
81; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
82; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
83; CHECK-NEXT:    lui a0, 307200
84; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
85; CHECK-NEXT:    vfabs.v v8, v12
86; CHECK-NEXT:    fmv.w.x fa5, a0
87; CHECK-NEXT:    vmflt.vf v0, v8, fa5
88; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
89; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
90; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
91; CHECK-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
92; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
93; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
94; CHECK-NEXT:    ret
95  %a = call <vscale x 8 x bfloat> @llvm.trunc.nxv8bf16(<vscale x 8 x bfloat> %x)
96  ret <vscale x 8 x bfloat> %a
97}
98
99define <vscale x 16 x bfloat> @trunc_nxv16bf16(<vscale x 16 x bfloat> %x) {
100; CHECK-LABEL: trunc_nxv16bf16:
101; CHECK:       # %bb.0:
102; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
103; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
104; CHECK-NEXT:    lui a0, 307200
105; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
106; CHECK-NEXT:    vfabs.v v8, v16
107; CHECK-NEXT:    fmv.w.x fa5, a0
108; CHECK-NEXT:    vmflt.vf v0, v8, fa5
109; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
110; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
111; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
112; CHECK-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
113; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
114; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
115; CHECK-NEXT:    ret
116  %a = call <vscale x 16 x bfloat> @llvm.trunc.nxv16bf16(<vscale x 16 x bfloat> %x)
117  ret <vscale x 16 x bfloat> %a
118}
119
120define <vscale x 32 x bfloat> @trunc_nxv32bf16(<vscale x 32 x bfloat> %x) {
121; CHECK-LABEL: trunc_nxv32bf16:
122; CHECK:       # %bb.0:
123; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
124; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
125; CHECK-NEXT:    lui a0, 307200
126; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
127; CHECK-NEXT:    vfabs.v v24, v16
128; CHECK-NEXT:    fmv.w.x fa5, a0
129; CHECK-NEXT:    vmflt.vf v0, v24, fa5
130; CHECK-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
131; CHECK-NEXT:    vfcvt.f.x.v v24, v24, v0.t
132; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
133; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
134; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
135; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12
136; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
137; CHECK-NEXT:    vfabs.v v8, v24
138; CHECK-NEXT:    vmflt.vf v0, v8, fa5
139; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
140; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
141; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
142; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v24, v0.t
143; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
144; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
145; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
146; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
147; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24
148; CHECK-NEXT:    ret
149  %a = call <vscale x 32 x bfloat> @llvm.trunc.nxv32bf16(<vscale x 32 x bfloat> %x)
150  ret <vscale x 32 x bfloat> %a
151}
152
153define <vscale x 1 x half> @trunc_nxv1f16(<vscale x 1 x half> %x) {
154; ZVFH-LABEL: trunc_nxv1f16:
155; ZVFH:       # %bb.0:
156; ZVFH-NEXT:    lui a0, %hi(.LCPI6_0)
157; ZVFH-NEXT:    flh fa5, %lo(.LCPI6_0)(a0)
158; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
159; ZVFH-NEXT:    vfabs.v v9, v8
160; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
161; ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
162; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
163; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
164; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
165; ZVFH-NEXT:    ret
166;
167; ZVFHMIN-LABEL: trunc_nxv1f16:
168; ZVFHMIN:       # %bb.0:
169; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
170; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
171; ZVFHMIN-NEXT:    lui a0, 307200
172; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
173; ZVFHMIN-NEXT:    vfabs.v v8, v9
174; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
175; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
176; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
177; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
178; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
179; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
180; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
181; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
182; ZVFHMIN-NEXT:    ret
183  %a = call <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half> %x)
184  ret <vscale x 1 x half> %a
185}
186declare <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half>)
187
188define <vscale x 2 x half> @trunc_nxv2f16(<vscale x 2 x half> %x) {
189; ZVFH-LABEL: trunc_nxv2f16:
190; ZVFH:       # %bb.0:
191; ZVFH-NEXT:    lui a0, %hi(.LCPI7_0)
192; ZVFH-NEXT:    flh fa5, %lo(.LCPI7_0)(a0)
193; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
194; ZVFH-NEXT:    vfabs.v v9, v8
195; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
196; ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
197; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
198; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
199; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
200; ZVFH-NEXT:    ret
201;
202; ZVFHMIN-LABEL: trunc_nxv2f16:
203; ZVFHMIN:       # %bb.0:
204; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
205; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
206; ZVFHMIN-NEXT:    lui a0, 307200
207; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
208; ZVFHMIN-NEXT:    vfabs.v v8, v9
209; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
210; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
211; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
212; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
213; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
214; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
215; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
216; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
217; ZVFHMIN-NEXT:    ret
218  %a = call <vscale x 2 x half> @llvm.trunc.nxv2f16(<vscale x 2 x half> %x)
219  ret <vscale x 2 x half> %a
220}
221declare <vscale x 2 x half> @llvm.trunc.nxv2f16(<vscale x 2 x half>)
222
223define <vscale x 4 x half> @trunc_nxv4f16(<vscale x 4 x half> %x) {
224; ZVFH-LABEL: trunc_nxv4f16:
225; ZVFH:       # %bb.0:
226; ZVFH-NEXT:    lui a0, %hi(.LCPI8_0)
227; ZVFH-NEXT:    flh fa5, %lo(.LCPI8_0)(a0)
228; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
229; ZVFH-NEXT:    vfabs.v v9, v8
230; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
231; ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
232; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
233; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
234; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
235; ZVFH-NEXT:    ret
236;
237; ZVFHMIN-LABEL: trunc_nxv4f16:
238; ZVFHMIN:       # %bb.0:
239; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
240; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
241; ZVFHMIN-NEXT:    lui a0, 307200
242; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
243; ZVFHMIN-NEXT:    vfabs.v v8, v10
244; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
245; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
246; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
247; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
248; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
249; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
250; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
251; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
252; ZVFHMIN-NEXT:    ret
253  %a = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> %x)
254  ret <vscale x 4 x half> %a
255}
256declare <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half>)
257
258define <vscale x 8 x half> @trunc_nxv8f16(<vscale x 8 x half> %x) {
259; ZVFH-LABEL: trunc_nxv8f16:
260; ZVFH:       # %bb.0:
261; ZVFH-NEXT:    lui a0, %hi(.LCPI9_0)
262; ZVFH-NEXT:    flh fa5, %lo(.LCPI9_0)(a0)
263; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
264; ZVFH-NEXT:    vfabs.v v10, v8
265; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
266; ZVFH-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
267; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
268; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
269; ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
270; ZVFH-NEXT:    ret
271;
272; ZVFHMIN-LABEL: trunc_nxv8f16:
273; ZVFHMIN:       # %bb.0:
274; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
275; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
276; ZVFHMIN-NEXT:    lui a0, 307200
277; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
278; ZVFHMIN-NEXT:    vfabs.v v8, v12
279; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
280; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
281; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
282; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
283; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
284; ZVFHMIN-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
285; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
286; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
287; ZVFHMIN-NEXT:    ret
288  %a = call <vscale x 8 x half> @llvm.trunc.nxv8f16(<vscale x 8 x half> %x)
289  ret <vscale x 8 x half> %a
290}
291declare <vscale x 8 x half> @llvm.trunc.nxv8f16(<vscale x 8 x half>)
292
293define <vscale x 16 x half> @trunc_nxv16f16(<vscale x 16 x half> %x) {
294; ZVFH-LABEL: trunc_nxv16f16:
295; ZVFH:       # %bb.0:
296; ZVFH-NEXT:    lui a0, %hi(.LCPI10_0)
297; ZVFH-NEXT:    flh fa5, %lo(.LCPI10_0)(a0)
298; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
299; ZVFH-NEXT:    vfabs.v v12, v8
300; ZVFH-NEXT:    vmflt.vf v0, v12, fa5
301; ZVFH-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
302; ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
303; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
304; ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
305; ZVFH-NEXT:    ret
306;
307; ZVFHMIN-LABEL: trunc_nxv16f16:
308; ZVFHMIN:       # %bb.0:
309; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
310; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
311; ZVFHMIN-NEXT:    lui a0, 307200
312; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
313; ZVFHMIN-NEXT:    vfabs.v v8, v16
314; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
315; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
316; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
317; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
318; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
319; ZVFHMIN-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
320; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
321; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
322; ZVFHMIN-NEXT:    ret
323  %a = call <vscale x 16 x half> @llvm.trunc.nxv16f16(<vscale x 16 x half> %x)
324  ret <vscale x 16 x half> %a
325}
326declare <vscale x 16 x half> @llvm.trunc.nxv16f16(<vscale x 16 x half>)
327
328define <vscale x 32 x half> @trunc_nxv32f16(<vscale x 32 x half> %x) {
329; ZVFH-LABEL: trunc_nxv32f16:
330; ZVFH:       # %bb.0:
331; ZVFH-NEXT:    lui a0, %hi(.LCPI11_0)
332; ZVFH-NEXT:    flh fa5, %lo(.LCPI11_0)(a0)
333; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
334; ZVFH-NEXT:    vfabs.v v16, v8
335; ZVFH-NEXT:    vmflt.vf v0, v16, fa5
336; ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
337; ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
338; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
339; ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
340; ZVFH-NEXT:    ret
341;
342; ZVFHMIN-LABEL: trunc_nxv32f16:
343; ZVFHMIN:       # %bb.0:
344; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
345; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
346; ZVFHMIN-NEXT:    lui a0, 307200
347; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
348; ZVFHMIN-NEXT:    vfabs.v v24, v16
349; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
350; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
351; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
352; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
353; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
354; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
355; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
356; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
357; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
358; ZVFHMIN-NEXT:    vfabs.v v8, v24
359; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
360; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
361; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
362; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
363; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v24, v0.t
364; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
365; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
366; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
367; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
368; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24
369; ZVFHMIN-NEXT:    ret
370  %a = call <vscale x 32 x half> @llvm.trunc.nxv32f16(<vscale x 32 x half> %x)
371  ret <vscale x 32 x half> %a
372}
373declare <vscale x 32 x half> @llvm.trunc.nxv32f16(<vscale x 32 x half>)
374
375define <vscale x 1 x float> @trunc_nxv1f32(<vscale x 1 x float> %x) {
376; CHECK-LABEL: trunc_nxv1f32:
377; CHECK:       # %bb.0:
378; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
379; CHECK-NEXT:    vfabs.v v9, v8
380; CHECK-NEXT:    lui a0, 307200
381; CHECK-NEXT:    fmv.w.x fa5, a0
382; CHECK-NEXT:    vmflt.vf v0, v9, fa5
383; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
384; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
385; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
386; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
387; CHECK-NEXT:    ret
388  %a = call <vscale x 1 x float> @llvm.trunc.nxv1f32(<vscale x 1 x float> %x)
389  ret <vscale x 1 x float> %a
390}
391declare <vscale x 1 x float> @llvm.trunc.nxv1f32(<vscale x 1 x float>)
392
393define <vscale x 2 x float> @trunc_nxv2f32(<vscale x 2 x float> %x) {
394; CHECK-LABEL: trunc_nxv2f32:
395; CHECK:       # %bb.0:
396; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
397; CHECK-NEXT:    vfabs.v v9, v8
398; CHECK-NEXT:    lui a0, 307200
399; CHECK-NEXT:    fmv.w.x fa5, a0
400; CHECK-NEXT:    vmflt.vf v0, v9, fa5
401; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
402; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
403; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
404; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
405; CHECK-NEXT:    ret
406  %a = call <vscale x 2 x float> @llvm.trunc.nxv2f32(<vscale x 2 x float> %x)
407  ret <vscale x 2 x float> %a
408}
409declare <vscale x 2 x float> @llvm.trunc.nxv2f32(<vscale x 2 x float>)
410
411define <vscale x 4 x float> @trunc_nxv4f32(<vscale x 4 x float> %x) {
412; CHECK-LABEL: trunc_nxv4f32:
413; CHECK:       # %bb.0:
414; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
415; CHECK-NEXT:    vfabs.v v10, v8
416; CHECK-NEXT:    lui a0, 307200
417; CHECK-NEXT:    fmv.w.x fa5, a0
418; CHECK-NEXT:    vmflt.vf v0, v10, fa5
419; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
420; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
421; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
422; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
423; CHECK-NEXT:    ret
424  %a = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> %x)
425  ret <vscale x 4 x float> %a
426}
427declare <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float>)
428
429define <vscale x 8 x float> @trunc_nxv8f32(<vscale x 8 x float> %x) {
430; CHECK-LABEL: trunc_nxv8f32:
431; CHECK:       # %bb.0:
432; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
433; CHECK-NEXT:    vfabs.v v12, v8
434; CHECK-NEXT:    lui a0, 307200
435; CHECK-NEXT:    fmv.w.x fa5, a0
436; CHECK-NEXT:    vmflt.vf v0, v12, fa5
437; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
438; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
439; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
440; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
441; CHECK-NEXT:    ret
442  %a = call <vscale x 8 x float> @llvm.trunc.nxv8f32(<vscale x 8 x float> %x)
443  ret <vscale x 8 x float> %a
444}
445declare <vscale x 8 x float> @llvm.trunc.nxv8f32(<vscale x 8 x float>)
446
447define <vscale x 16 x float> @trunc_nxv16f32(<vscale x 16 x float> %x) {
448; CHECK-LABEL: trunc_nxv16f32:
449; CHECK:       # %bb.0:
450; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
451; CHECK-NEXT:    vfabs.v v16, v8
452; CHECK-NEXT:    lui a0, 307200
453; CHECK-NEXT:    fmv.w.x fa5, a0
454; CHECK-NEXT:    vmflt.vf v0, v16, fa5
455; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
456; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
457; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
458; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
459; CHECK-NEXT:    ret
460  %a = call <vscale x 16 x float> @llvm.trunc.nxv16f32(<vscale x 16 x float> %x)
461  ret <vscale x 16 x float> %a
462}
463declare <vscale x 16 x float> @llvm.trunc.nxv16f32(<vscale x 16 x float>)
464
465define <vscale x 1 x double> @trunc_nxv1f64(<vscale x 1 x double> %x) {
466; CHECK-LABEL: trunc_nxv1f64:
467; CHECK:       # %bb.0:
468; CHECK-NEXT:    lui a0, %hi(.LCPI17_0)
469; CHECK-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
470; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
471; CHECK-NEXT:    vfabs.v v9, v8
472; CHECK-NEXT:    vmflt.vf v0, v9, fa5
473; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
474; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
475; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
476; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
477; CHECK-NEXT:    ret
478  %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
479  ret <vscale x 1 x double> %a
480}
481declare <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double>)
482
483define <vscale x 2 x double> @trunc_nxv2f64(<vscale x 2 x double> %x) {
484; CHECK-LABEL: trunc_nxv2f64:
485; CHECK:       # %bb.0:
486; CHECK-NEXT:    lui a0, %hi(.LCPI18_0)
487; CHECK-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
488; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
489; CHECK-NEXT:    vfabs.v v10, v8
490; CHECK-NEXT:    vmflt.vf v0, v10, fa5
491; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
492; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
493; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
494; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
495; CHECK-NEXT:    ret
496  %a = call <vscale x 2 x double> @llvm.trunc.nxv2f64(<vscale x 2 x double> %x)
497  ret <vscale x 2 x double> %a
498}
499declare <vscale x 2 x double> @llvm.trunc.nxv2f64(<vscale x 2 x double>)
500
501define <vscale x 4 x double> @trunc_nxv4f64(<vscale x 4 x double> %x) {
502; CHECK-LABEL: trunc_nxv4f64:
503; CHECK:       # %bb.0:
504; CHECK-NEXT:    lui a0, %hi(.LCPI19_0)
505; CHECK-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
506; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
507; CHECK-NEXT:    vfabs.v v12, v8
508; CHECK-NEXT:    vmflt.vf v0, v12, fa5
509; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
510; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
511; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
512; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
513; CHECK-NEXT:    ret
514  %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
515  ret <vscale x 4 x double> %a
516}
517declare <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double>)
518
519define <vscale x 8 x double> @trunc_nxv8f64(<vscale x 8 x double> %x) {
520; CHECK-LABEL: trunc_nxv8f64:
521; CHECK:       # %bb.0:
522; CHECK-NEXT:    lui a0, %hi(.LCPI20_0)
523; CHECK-NEXT:    fld fa5, %lo(.LCPI20_0)(a0)
524; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
525; CHECK-NEXT:    vfabs.v v16, v8
526; CHECK-NEXT:    vmflt.vf v0, v16, fa5
527; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
528; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
529; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
530; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
531; CHECK-NEXT:    ret
532  %a = call <vscale x 8 x double> @llvm.trunc.nxv8f64(<vscale x 8 x double> %x)
533  ret <vscale x 8 x double> %a
534}
535declare <vscale x 8 x double> @llvm.trunc.nxv8f64(<vscale x 8 x double>)
536