xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s
6
7define <vscale x 1 x half> @trunc_nxv1f16(<vscale x 1 x half> %x) strictfp {
8; CHECK-LABEL: trunc_nxv1f16:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
11; CHECK-NEXT:    vmfne.vv v0, v8, v8
12; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
13; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a0)
14; CHECK-NEXT:    vfadd.vv v8, v8, v8, v0.t
15; CHECK-NEXT:    vfabs.v v9, v8
16; CHECK-NEXT:    vmflt.vf v0, v9, fa5
17; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
18; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
19; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
20; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
21; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
22; CHECK-NEXT:    ret
23  %a = call <vscale x 1 x half> @llvm.experimental.constrained.trunc.nxv1f16(<vscale x 1 x half> %x, metadata !"fpexcept.strict")
24  ret <vscale x 1 x half> %a
25}
26declare <vscale x 1 x half> @llvm.experimental.constrained.trunc.nxv1f16(<vscale x 1 x half>, metadata)
27
28define <vscale x 2 x half> @trunc_nxv2f16(<vscale x 2 x half> %x) strictfp {
29; CHECK-LABEL: trunc_nxv2f16:
30; CHECK:       # %bb.0:
31; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
32; CHECK-NEXT:    vmfne.vv v0, v8, v8
33; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
34; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a0)
35; CHECK-NEXT:    vfadd.vv v8, v8, v8, v0.t
36; CHECK-NEXT:    vfabs.v v9, v8
37; CHECK-NEXT:    vmflt.vf v0, v9, fa5
38; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
39; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
40; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
41; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
42; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
43; CHECK-NEXT:    ret
44  %a = call <vscale x 2 x half> @llvm.experimental.constrained.trunc.nxv2f16(<vscale x 2 x half> %x, metadata !"fpexcept.strict")
45  ret <vscale x 2 x half> %a
46}
47declare <vscale x 2 x half> @llvm.experimental.constrained.trunc.nxv2f16(<vscale x 2 x half>, metadata)
48
49define <vscale x 4 x half> @trunc_nxv4f16(<vscale x 4 x half> %x) strictfp {
50; CHECK-LABEL: trunc_nxv4f16:
51; CHECK:       # %bb.0:
52; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
53; CHECK-NEXT:    vmfne.vv v0, v8, v8
54; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
55; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a0)
56; CHECK-NEXT:    vfadd.vv v8, v8, v8, v0.t
57; CHECK-NEXT:    vfabs.v v9, v8
58; CHECK-NEXT:    vmflt.vf v0, v9, fa5
59; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
60; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
61; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
62; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
63; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
64; CHECK-NEXT:    ret
65  %a = call <vscale x 4 x half> @llvm.experimental.constrained.trunc.nxv4f16(<vscale x 4 x half> %x, metadata !"fpexcept.strict")
66  ret <vscale x 4 x half> %a
67}
68declare <vscale x 4 x half> @llvm.experimental.constrained.trunc.nxv4f16(<vscale x 4 x half>, metadata)
69
70define <vscale x 8 x half> @trunc_nxv8f16(<vscale x 8 x half> %x) strictfp {
71; CHECK-LABEL: trunc_nxv8f16:
72; CHECK:       # %bb.0:
73; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
74; CHECK-NEXT:    vmfne.vv v0, v8, v8
75; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
76; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a0)
77; CHECK-NEXT:    vfadd.vv v8, v8, v8, v0.t
78; CHECK-NEXT:    vfabs.v v10, v8
79; CHECK-NEXT:    vmflt.vf v0, v10, fa5
80; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
81; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
82; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
83; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
84; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
85; CHECK-NEXT:    ret
86  %a = call <vscale x 8 x half> @llvm.experimental.constrained.trunc.nxv8f16(<vscale x 8 x half> %x, metadata !"fpexcept.strict")
87  ret <vscale x 8 x half> %a
88}
89declare <vscale x 8 x half> @llvm.experimental.constrained.trunc.nxv8f16(<vscale x 8 x half>, metadata)
90
91define <vscale x 16 x half> @trunc_nxv16f16(<vscale x 16 x half> %x) strictfp {
92; CHECK-LABEL: trunc_nxv16f16:
93; CHECK:       # %bb.0:
94; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
95; CHECK-NEXT:    vmfne.vv v0, v8, v8
96; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
97; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a0)
98; CHECK-NEXT:    vfadd.vv v8, v8, v8, v0.t
99; CHECK-NEXT:    vfabs.v v12, v8
100; CHECK-NEXT:    vmflt.vf v0, v12, fa5
101; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
102; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
103; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
104; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
105; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
106; CHECK-NEXT:    ret
107  %a = call <vscale x 16 x half> @llvm.experimental.constrained.trunc.nxv16f16(<vscale x 16 x half> %x, metadata !"fpexcept.strict")
108  ret <vscale x 16 x half> %a
109}
110declare <vscale x 16 x half> @llvm.experimental.constrained.trunc.nxv16f16(<vscale x 16 x half>, metadata)
111
112define <vscale x 32 x half> @trunc_nxv32f16(<vscale x 32 x half> %x) strictfp {
113; CHECK-LABEL: trunc_nxv32f16:
114; CHECK:       # %bb.0:
115; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
116; CHECK-NEXT:    vmfne.vv v0, v8, v8
117; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
118; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a0)
119; CHECK-NEXT:    vfadd.vv v8, v8, v8, v0.t
120; CHECK-NEXT:    vfabs.v v16, v8
121; CHECK-NEXT:    vmflt.vf v0, v16, fa5
122; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
123; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
124; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
125; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
126; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
127; CHECK-NEXT:    ret
128  %a = call <vscale x 32 x half> @llvm.experimental.constrained.trunc.nxv32f16(<vscale x 32 x half> %x, metadata !"fpexcept.strict")
129  ret <vscale x 32 x half> %a
130}
131declare <vscale x 32 x half> @llvm.experimental.constrained.trunc.nxv32f16(<vscale x 32 x half>, metadata)
132
133define <vscale x 1 x float> @trunc_nxv1f32(<vscale x 1 x float> %x) strictfp {
134; CHECK-LABEL: trunc_nxv1f32:
135; CHECK:       # %bb.0:
136; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
137; CHECK-NEXT:    vmfne.vv v0, v8, v8
138; CHECK-NEXT:    lui a0, 307200
139; CHECK-NEXT:    vfadd.vv v8, v8, v8, v0.t
140; CHECK-NEXT:    fmv.w.x fa5, a0
141; CHECK-NEXT:    vfabs.v v9, v8
142; CHECK-NEXT:    vmflt.vf v0, v9, fa5
143; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
144; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
145; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
146; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
147; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
148; CHECK-NEXT:    ret
149  %a = call <vscale x 1 x float> @llvm.experimental.constrained.trunc.nxv1f32(<vscale x 1 x float> %x, metadata !"fpexcept.strict")
150  ret <vscale x 1 x float> %a
151}
152declare <vscale x 1 x float> @llvm.experimental.constrained.trunc.nxv1f32(<vscale x 1 x float>, metadata)
153
154define <vscale x 2 x float> @trunc_nxv2f32(<vscale x 2 x float> %x) strictfp {
155; CHECK-LABEL: trunc_nxv2f32:
156; CHECK:       # %bb.0:
157; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
158; CHECK-NEXT:    vmfne.vv v0, v8, v8
159; CHECK-NEXT:    lui a0, 307200
160; CHECK-NEXT:    vfadd.vv v8, v8, v8, v0.t
161; CHECK-NEXT:    fmv.w.x fa5, a0
162; CHECK-NEXT:    vfabs.v v9, v8
163; CHECK-NEXT:    vmflt.vf v0, v9, fa5
164; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
165; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
166; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
167; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
168; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
169; CHECK-NEXT:    ret
170  %a = call <vscale x 2 x float> @llvm.experimental.constrained.trunc.nxv2f32(<vscale x 2 x float> %x, metadata !"fpexcept.strict")
171  ret <vscale x 2 x float> %a
172}
173declare <vscale x 2 x float> @llvm.experimental.constrained.trunc.nxv2f32(<vscale x 2 x float>, metadata)
174
175define <vscale x 4 x float> @trunc_nxv4f32(<vscale x 4 x float> %x) strictfp {
176; CHECK-LABEL: trunc_nxv4f32:
177; CHECK:       # %bb.0:
178; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
179; CHECK-NEXT:    vmfne.vv v0, v8, v8
180; CHECK-NEXT:    lui a0, 307200
181; CHECK-NEXT:    vfadd.vv v8, v8, v8, v0.t
182; CHECK-NEXT:    fmv.w.x fa5, a0
183; CHECK-NEXT:    vfabs.v v10, v8
184; CHECK-NEXT:    vmflt.vf v0, v10, fa5
185; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
186; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
187; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
188; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
189; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
190; CHECK-NEXT:    ret
191  %a = call <vscale x 4 x float> @llvm.experimental.constrained.trunc.nxv4f32(<vscale x 4 x float> %x, metadata !"fpexcept.strict")
192  ret <vscale x 4 x float> %a
193}
194declare <vscale x 4 x float> @llvm.experimental.constrained.trunc.nxv4f32(<vscale x 4 x float>, metadata)
195
196define <vscale x 8 x float> @trunc_nxv8f32(<vscale x 8 x float> %x) strictfp {
197; CHECK-LABEL: trunc_nxv8f32:
198; CHECK:       # %bb.0:
199; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
200; CHECK-NEXT:    vmfne.vv v0, v8, v8
201; CHECK-NEXT:    lui a0, 307200
202; CHECK-NEXT:    vfadd.vv v8, v8, v8, v0.t
203; CHECK-NEXT:    fmv.w.x fa5, a0
204; CHECK-NEXT:    vfabs.v v12, v8
205; CHECK-NEXT:    vmflt.vf v0, v12, fa5
206; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
207; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
208; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
209; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
210; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
211; CHECK-NEXT:    ret
212  %a = call <vscale x 8 x float> @llvm.experimental.constrained.trunc.nxv8f32(<vscale x 8 x float> %x, metadata !"fpexcept.strict")
213  ret <vscale x 8 x float> %a
214}
215declare <vscale x 8 x float> @llvm.experimental.constrained.trunc.nxv8f32(<vscale x 8 x float>, metadata)
216
217define <vscale x 16 x float> @trunc_nxv16f32(<vscale x 16 x float> %x) strictfp {
218; CHECK-LABEL: trunc_nxv16f32:
219; CHECK:       # %bb.0:
220; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
221; CHECK-NEXT:    vmfne.vv v0, v8, v8
222; CHECK-NEXT:    lui a0, 307200
223; CHECK-NEXT:    vfadd.vv v8, v8, v8, v0.t
224; CHECK-NEXT:    fmv.w.x fa5, a0
225; CHECK-NEXT:    vfabs.v v16, v8
226; CHECK-NEXT:    vmflt.vf v0, v16, fa5
227; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
228; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
229; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
230; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
231; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
232; CHECK-NEXT:    ret
233  %a = call <vscale x 16 x float> @llvm.experimental.constrained.trunc.nxv16f32(<vscale x 16 x float> %x, metadata !"fpexcept.strict")
234  ret <vscale x 16 x float> %a
235}
236declare <vscale x 16 x float> @llvm.experimental.constrained.trunc.nxv16f32(<vscale x 16 x float>, metadata)
237
238define <vscale x 1 x double> @trunc_nxv1f64(<vscale x 1 x double> %x) strictfp {
239; CHECK-LABEL: trunc_nxv1f64:
240; CHECK:       # %bb.0:
241; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
242; CHECK-NEXT:    vmfne.vv v0, v8, v8
243; CHECK-NEXT:    lui a0, %hi(.LCPI11_0)
244; CHECK-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
245; CHECK-NEXT:    vfadd.vv v8, v8, v8, v0.t
246; CHECK-NEXT:    vfabs.v v9, v8
247; CHECK-NEXT:    vmflt.vf v0, v9, fa5
248; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
249; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
250; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
251; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
252; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
253; CHECK-NEXT:    ret
254  %a = call <vscale x 1 x double> @llvm.experimental.constrained.trunc.nxv1f64(<vscale x 1 x double> %x, metadata !"fpexcept.strict")
255  ret <vscale x 1 x double> %a
256}
257declare <vscale x 1 x double> @llvm.experimental.constrained.trunc.nxv1f64(<vscale x 1 x double>, metadata)
258
259define <vscale x 2 x double> @trunc_nxv2f64(<vscale x 2 x double> %x) strictfp {
260; CHECK-LABEL: trunc_nxv2f64:
261; CHECK:       # %bb.0:
262; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
263; CHECK-NEXT:    vmfne.vv v0, v8, v8
264; CHECK-NEXT:    lui a0, %hi(.LCPI12_0)
265; CHECK-NEXT:    fld fa5, %lo(.LCPI12_0)(a0)
266; CHECK-NEXT:    vfadd.vv v8, v8, v8, v0.t
267; CHECK-NEXT:    vfabs.v v10, v8
268; CHECK-NEXT:    vmflt.vf v0, v10, fa5
269; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
270; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
271; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
272; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
273; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
274; CHECK-NEXT:    ret
275  %a = call <vscale x 2 x double> @llvm.experimental.constrained.trunc.nxv2f64(<vscale x 2 x double> %x, metadata !"fpexcept.strict")
276  ret <vscale x 2 x double> %a
277}
278declare <vscale x 2 x double> @llvm.experimental.constrained.trunc.nxv2f64(<vscale x 2 x double>, metadata)
279
280define <vscale x 4 x double> @trunc_nxv4f64(<vscale x 4 x double> %x) strictfp {
281; CHECK-LABEL: trunc_nxv4f64:
282; CHECK:       # %bb.0:
283; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
284; CHECK-NEXT:    vmfne.vv v0, v8, v8
285; CHECK-NEXT:    lui a0, %hi(.LCPI13_0)
286; CHECK-NEXT:    fld fa5, %lo(.LCPI13_0)(a0)
287; CHECK-NEXT:    vfadd.vv v8, v8, v8, v0.t
288; CHECK-NEXT:    vfabs.v v12, v8
289; CHECK-NEXT:    vmflt.vf v0, v12, fa5
290; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
291; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
292; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
293; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
294; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
295; CHECK-NEXT:    ret
296  %a = call <vscale x 4 x double> @llvm.experimental.constrained.trunc.nxv4f64(<vscale x 4 x double> %x, metadata !"fpexcept.strict")
297  ret <vscale x 4 x double> %a
298}
299declare <vscale x 4 x double> @llvm.experimental.constrained.trunc.nxv4f64(<vscale x 4 x double>, metadata)
300
301define <vscale x 8 x double> @trunc_nxv8f64(<vscale x 8 x double> %x) strictfp {
302; CHECK-LABEL: trunc_nxv8f64:
303; CHECK:       # %bb.0:
304; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
305; CHECK-NEXT:    vmfne.vv v0, v8, v8
306; CHECK-NEXT:    lui a0, %hi(.LCPI14_0)
307; CHECK-NEXT:    fld fa5, %lo(.LCPI14_0)(a0)
308; CHECK-NEXT:    vfadd.vv v8, v8, v8, v0.t
309; CHECK-NEXT:    vfabs.v v16, v8
310; CHECK-NEXT:    vmflt.vf v0, v16, fa5
311; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
312; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
313; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
314; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
315; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
316; CHECK-NEXT:    ret
317  %a = call <vscale x 8 x double> @llvm.experimental.constrained.trunc.nxv8f64(<vscale x 8 x double> %x, metadata !"fpexcept.strict")
318  ret <vscale x 8 x double> %a
319}
320declare <vscale x 8 x double> @llvm.experimental.constrained.trunc.nxv8f64(<vscale x 8 x double>, metadata)
321