xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfptrunc-constrained-sdnode.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v,+zvfbfmin -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+zvfbfmin -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s
6; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v,+zvfbfmin -target-abi=ilp32d \
7; RUN:     -verify-machineinstrs < %s | FileCheck %s
8; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+zvfbfmin -target-abi=lp64d \
9; RUN:     -verify-machineinstrs < %s | FileCheck %s
10
11declare <vscale x 1 x float> @llvm.experimental.constrained.fptrunc.nxv1f32.nxv1f64(<vscale x 1 x double>, metadata, metadata)
12define <vscale x 1 x float> @vfptrunc_nxv1f64_nxv1f32(<vscale x 1 x double> %va) strictfp {
13; CHECK-LABEL: vfptrunc_nxv1f64_nxv1f32:
14; CHECK:       # %bb.0:
15; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
16; CHECK-NEXT:    vfncvt.f.f.w v9, v8
17; CHECK-NEXT:    vmv1r.v v8, v9
18; CHECK-NEXT:    ret
19  %evec = call <vscale x 1 x float> @llvm.experimental.constrained.fptrunc.nxv1f32.nxv1f64(<vscale x 1 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
20  ret <vscale x 1 x float> %evec
21}
22
23declare <vscale x 1 x half> @llvm.experimental.constrained.fptrunc.nxv1f16.nxv1f64(<vscale x 1 x double>, metadata, metadata)
24define <vscale x 1 x half> @vfptrunc_nxv1f64_nxv1f16(<vscale x 1 x double> %va) strictfp {
25; CHECK-LABEL: vfptrunc_nxv1f64_nxv1f16:
26; CHECK:       # %bb.0:
27; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
28; CHECK-NEXT:    vfncvt.rod.f.f.w v9, v8
29; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
30; CHECK-NEXT:    vfncvt.f.f.w v8, v9
31; CHECK-NEXT:    ret
32  %evec = call <vscale x 1 x half> @llvm.experimental.constrained.fptrunc.nxv1f16.nxv1f64(<vscale x 1 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
33  ret <vscale x 1 x half> %evec
34}
35
36declare <vscale x 1 x half> @llvm.experimental.constrained.fptrunc.nxv1f16.nxv1f32(<vscale x 1 x float>, metadata, metadata)
37define <vscale x 1 x half> @vfptrunc_nxv1f32_nxv1f16(<vscale x 1 x float> %va) strictfp {
38; CHECK-LABEL: vfptrunc_nxv1f32_nxv1f16:
39; CHECK:       # %bb.0:
40; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
41; CHECK-NEXT:    vfncvt.f.f.w v9, v8
42; CHECK-NEXT:    vmv1r.v v8, v9
43; CHECK-NEXT:    ret
44  %evec = call <vscale x 1 x half> @llvm.experimental.constrained.fptrunc.nxv1f16.nxv1f32(<vscale x 1 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
45  ret <vscale x 1 x half> %evec
46}
47
48declare <vscale x 2 x float> @llvm.experimental.constrained.fptrunc.nxv2f32.nxv2f64(<vscale x 2 x double>, metadata, metadata)
49define <vscale x 2 x float> @vfptrunc_nxv2f64_nxv2f32(<vscale x 2 x double> %va) strictfp {
50; CHECK-LABEL: vfptrunc_nxv2f64_nxv2f32:
51; CHECK:       # %bb.0:
52; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
53; CHECK-NEXT:    vfncvt.f.f.w v10, v8
54; CHECK-NEXT:    vmv.v.v v8, v10
55; CHECK-NEXT:    ret
56  %evec = call <vscale x 2 x float> @llvm.experimental.constrained.fptrunc.nxv2f32.nxv2f64(<vscale x 2 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
57  ret <vscale x 2 x float> %evec
58}
59
60declare <vscale x 2 x half> @llvm.experimental.constrained.fptrunc.nxv2f16.nxv2f64(<vscale x 2 x double>, metadata, metadata)
61define <vscale x 2 x half> @vfptrunc_nxv2f64_nxv2f16(<vscale x 2 x double> %va) strictfp {
62; CHECK-LABEL: vfptrunc_nxv2f64_nxv2f16:
63; CHECK:       # %bb.0:
64; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
65; CHECK-NEXT:    vfncvt.rod.f.f.w v10, v8
66; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
67; CHECK-NEXT:    vfncvt.f.f.w v8, v10
68; CHECK-NEXT:    ret
69  %evec = call <vscale x 2 x half> @llvm.experimental.constrained.fptrunc.nxv2f16.nxv2f64(<vscale x 2 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
70  ret <vscale x 2 x half> %evec
71}
72
73declare <vscale x 2 x half> @llvm.experimental.constrained.fptrunc.nxv2f16.nxv2f32(<vscale x 2 x float>, metadata, metadata)
74define <vscale x 2 x half> @vfptrunc_nxv2f32_nxv2f16(<vscale x 2 x float> %va) strictfp {
75; CHECK-LABEL: vfptrunc_nxv2f32_nxv2f16:
76; CHECK:       # %bb.0:
77; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
78; CHECK-NEXT:    vfncvt.f.f.w v9, v8
79; CHECK-NEXT:    vmv1r.v v8, v9
80; CHECK-NEXT:    ret
81  %evec = call <vscale x 2 x half> @llvm.experimental.constrained.fptrunc.nxv2f16.nxv2f32(<vscale x 2 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
82  ret <vscale x 2 x half> %evec
83}
84
85declare <vscale x 4 x float> @llvm.experimental.constrained.fptrunc.nxv4f32.nxv4f64(<vscale x 4 x double>, metadata, metadata)
86define <vscale x 4 x float> @vfptrunc_nxv4f64_nxv4f32(<vscale x 4 x double> %va) strictfp {
87; CHECK-LABEL: vfptrunc_nxv4f64_nxv4f32:
88; CHECK:       # %bb.0:
89; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
90; CHECK-NEXT:    vfncvt.f.f.w v12, v8
91; CHECK-NEXT:    vmv.v.v v8, v12
92; CHECK-NEXT:    ret
93  %evec = call <vscale x 4 x float> @llvm.experimental.constrained.fptrunc.nxv4f32.nxv4f64(<vscale x 4 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
94  ret <vscale x 4 x float> %evec
95}
96
97declare <vscale x 4 x half> @llvm.experimental.constrained.fptrunc.nxv4f16.nxv4f64(<vscale x 4 x double>, metadata, metadata)
98define <vscale x 4 x half> @vfptrunc_nxv4f64_nxv4f16(<vscale x 4 x double> %va) strictfp {
99; CHECK-LABEL: vfptrunc_nxv4f64_nxv4f16:
100; CHECK:       # %bb.0:
101; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
102; CHECK-NEXT:    vfncvt.rod.f.f.w v12, v8
103; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
104; CHECK-NEXT:    vfncvt.f.f.w v8, v12
105; CHECK-NEXT:    ret
106  %evec = call <vscale x 4 x half> @llvm.experimental.constrained.fptrunc.nxv4f16.nxv4f64(<vscale x 4 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
107  ret <vscale x 4 x half> %evec
108}
109
110declare <vscale x 4 x half> @llvm.experimental.constrained.fptrunc.nxv4f16.nxv4f32(<vscale x 4 x float>, metadata, metadata)
111define <vscale x 4 x half> @vfptrunc_nxv4f32_nxv4f16(<vscale x 4 x float> %va) strictfp {
112; CHECK-LABEL: vfptrunc_nxv4f32_nxv4f16:
113; CHECK:       # %bb.0:
114; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
115; CHECK-NEXT:    vfncvt.f.f.w v10, v8
116; CHECK-NEXT:    vmv.v.v v8, v10
117; CHECK-NEXT:    ret
118  %evec = call <vscale x 4 x half> @llvm.experimental.constrained.fptrunc.nxv4f16.nxv4f32(<vscale x 4 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
119  ret <vscale x 4 x half> %evec
120}
121
122declare <vscale x 8 x float> @llvm.experimental.constrained.fptrunc.nxv8f32.nxv8f64(<vscale x 8 x double>, metadata, metadata)
123define <vscale x 8 x float> @vfptrunc_nxv8f64_nxv8f32(<vscale x 8 x double> %va) strictfp {
124; CHECK-LABEL: vfptrunc_nxv8f64_nxv8f32:
125; CHECK:       # %bb.0:
126; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
127; CHECK-NEXT:    vfncvt.f.f.w v16, v8
128; CHECK-NEXT:    vmv.v.v v8, v16
129; CHECK-NEXT:    ret
130  %evec = call <vscale x 8 x float> @llvm.experimental.constrained.fptrunc.nxv8f32.nxv8f64(<vscale x 8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
131  ret <vscale x 8 x float> %evec
132}
133
134declare <vscale x 8 x half> @llvm.experimental.constrained.fptrunc.nxv8f16.nxv8f64(<vscale x 8 x double>, metadata, metadata)
135define <vscale x 8 x half> @vfptrunc_nxv8f64_nxv8f16(<vscale x 8 x double> %va) strictfp {
136; CHECK-LABEL: vfptrunc_nxv8f64_nxv8f16:
137; CHECK:       # %bb.0:
138; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
139; CHECK-NEXT:    vfncvt.rod.f.f.w v16, v8
140; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
141; CHECK-NEXT:    vfncvt.f.f.w v8, v16
142; CHECK-NEXT:    ret
143  %evec = call <vscale x 8 x half> @llvm.experimental.constrained.fptrunc.nxv8f16.nxv8f64(<vscale x 8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
144  ret <vscale x 8 x half> %evec
145}
146
147declare <vscale x 8 x half> @llvm.experimental.constrained.fptrunc.nxv8f16.nxv8f32(<vscale x 8 x float>, metadata, metadata)
148define <vscale x 8 x half> @vfptrunc_nxv8f32_nxv8f16(<vscale x 8 x float> %va) strictfp {
149; CHECK-LABEL: vfptrunc_nxv8f32_nxv8f16:
150; CHECK:       # %bb.0:
151; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
152; CHECK-NEXT:    vfncvt.f.f.w v12, v8
153; CHECK-NEXT:    vmv.v.v v8, v12
154; CHECK-NEXT:    ret
155  %evec = call <vscale x 8 x half> @llvm.experimental.constrained.fptrunc.nxv8f16.nxv8f32(<vscale x 8 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
156  ret <vscale x 8 x half> %evec
157}
158
159declare <vscale x 1 x bfloat> @llvm.experimental.constrained.fptrunc.nxv1bf16.nxv1f64(<vscale x 1 x double>, metadata, metadata)
160define <vscale x 1 x bfloat> @vfptrunc_nxv1f64_nxv1bf16(<vscale x 1 x double> %va) strictfp {
161; CHECK-LABEL: vfptrunc_nxv1f64_nxv1bf16:
162; CHECK:       # %bb.0:
163; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
164; CHECK-NEXT:    vfncvt.rod.f.f.w v9, v8
165; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
166; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
167; CHECK-NEXT:    ret
168  %evec = call <vscale x 1 x bfloat> @llvm.experimental.constrained.fptrunc.nxv1bf16.nxv1f64(<vscale x 1 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
169  ret <vscale x 1 x bfloat> %evec
170}
171
172declare <vscale x 1 x bfloat> @llvm.experimental.constrained.fptrunc.nxv1bf16.nxv1f32(<vscale x 1 x float>, metadata, metadata)
173define <vscale x 1 x bfloat> @vfptrunc_nxv1f32_nxv1bf16(<vscale x 1 x float> %va) strictfp {
174; CHECK-LABEL: vfptrunc_nxv1f32_nxv1bf16:
175; CHECK:       # %bb.0:
176; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
177; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
178; CHECK-NEXT:    vmv1r.v v8, v9
179; CHECK-NEXT:    ret
180  %evec = call <vscale x 1 x bfloat> @llvm.experimental.constrained.fptrunc.nxv1bf16.nxv1f32(<vscale x 1 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
181  ret <vscale x 1 x bfloat> %evec
182}
183
184declare <vscale x 2 x bfloat> @llvm.experimental.constrained.fptrunc.nxv2bf16.nxv2f64(<vscale x 2 x double>, metadata, metadata)
185define <vscale x 2 x bfloat> @vfptrunc_nxv2f64_nxv2bf16(<vscale x 2 x double> %va) strictfp {
186; CHECK-LABEL: vfptrunc_nxv2f64_nxv2bf16:
187; CHECK:       # %bb.0:
188; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
189; CHECK-NEXT:    vfncvt.rod.f.f.w v10, v8
190; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
191; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
192; CHECK-NEXT:    ret
193  %evec = call <vscale x 2 x bfloat> @llvm.experimental.constrained.fptrunc.nxv2bf16.nxv2f64(<vscale x 2 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
194  ret <vscale x 2 x bfloat> %evec
195}
196
197declare <vscale x 2 x bfloat> @llvm.experimental.constrained.fptrunc.nxv2bf16.nxv2f32(<vscale x 2 x float>, metadata, metadata)
198define <vscale x 2 x bfloat> @vfptrunc_nxv2f32_nxv2bf16(<vscale x 2 x float> %va) strictfp {
199; CHECK-LABEL: vfptrunc_nxv2f32_nxv2bf16:
200; CHECK:       # %bb.0:
201; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
202; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
203; CHECK-NEXT:    vmv1r.v v8, v9
204; CHECK-NEXT:    ret
205  %evec = call <vscale x 2 x bfloat> @llvm.experimental.constrained.fptrunc.nxv2bf16.nxv2f32(<vscale x 2 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
206  ret <vscale x 2 x bfloat> %evec
207}
208
209declare <vscale x 4 x bfloat> @llvm.experimental.constrained.fptrunc.nxv4bf16.nxv4f64(<vscale x 4 x double>, metadata, metadata)
210define <vscale x 4 x bfloat> @vfptrunc_nxv4f64_nxv4bf16(<vscale x 4 x double> %va) strictfp {
211; CHECK-LABEL: vfptrunc_nxv4f64_nxv4bf16:
212; CHECK:       # %bb.0:
213; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
214; CHECK-NEXT:    vfncvt.rod.f.f.w v12, v8
215; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
216; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
217; CHECK-NEXT:    ret
218  %evec = call <vscale x 4 x bfloat> @llvm.experimental.constrained.fptrunc.nxv4bf16.nxv4f64(<vscale x 4 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
219  ret <vscale x 4 x bfloat> %evec
220}
221
222declare <vscale x 4 x bfloat> @llvm.experimental.constrained.fptrunc.nxv4bf16.nxv4f32(<vscale x 4 x float>, metadata, metadata)
223define <vscale x 4 x bfloat> @vfptrunc_nxv4f32_nxv4bf16(<vscale x 4 x float> %va) strictfp {
224; CHECK-LABEL: vfptrunc_nxv4f32_nxv4bf16:
225; CHECK:       # %bb.0:
226; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
227; CHECK-NEXT:    vfncvtbf16.f.f.w v10, v8
228; CHECK-NEXT:    vmv.v.v v8, v10
229; CHECK-NEXT:    ret
230  %evec = call <vscale x 4 x bfloat> @llvm.experimental.constrained.fptrunc.nxv4bf16.nxv4f32(<vscale x 4 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
231  ret <vscale x 4 x bfloat> %evec
232}
233
234declare <vscale x 8 x bfloat> @llvm.experimental.constrained.fptrunc.nxv8bf16.nxv8f64(<vscale x 8 x double>, metadata, metadata)
235define <vscale x 8 x bfloat> @vfptrunc_nxv8f64_nxv8bf16(<vscale x 8 x double> %va) strictfp {
236; CHECK-LABEL: vfptrunc_nxv8f64_nxv8bf16:
237; CHECK:       # %bb.0:
238; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
239; CHECK-NEXT:    vfncvt.rod.f.f.w v16, v8
240; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
241; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
242; CHECK-NEXT:    ret
243  %evec = call <vscale x 8 x bfloat> @llvm.experimental.constrained.fptrunc.nxv8bf16.nxv8f64(<vscale x 8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
244  ret <vscale x 8 x bfloat> %evec
245}
246
247declare <vscale x 8 x bfloat> @llvm.experimental.constrained.fptrunc.nxv8bf16.nxv8f32(<vscale x 8 x float>, metadata, metadata)
248define <vscale x 8 x bfloat> @vfptrunc_nxv8f32_nxv8bf16(<vscale x 8 x float> %va) strictfp {
249; CHECK-LABEL: vfptrunc_nxv8f32_nxv8bf16:
250; CHECK:       # %bb.0:
251; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
252; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v8
253; CHECK-NEXT:    vmv.v.v v8, v12
254; CHECK-NEXT:    ret
255  %evec = call <vscale x 8 x bfloat> @llvm.experimental.constrained.fptrunc.nxv8bf16.nxv8f32(<vscale x 8 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
256  ret <vscale x 8 x bfloat> %evec
257}
258