xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptrunc-constrained-sdnode.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v,+zvfbfmin -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+zvfbfmin -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s
6; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v,+zvfbfmin -target-abi=ilp32d \
7; RUN:     -verify-machineinstrs < %s | FileCheck %s
8; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+zvfbfmin -target-abi=lp64d \
9; RUN:     -verify-machineinstrs < %s | FileCheck %s
10
11declare <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double>, metadata, metadata)
12define <2 x float> @vfptrunc_v2f64_v2f32(<2 x double> %va) strictfp {
13; CHECK-LABEL: vfptrunc_v2f64_v2f32:
14; CHECK:       # %bb.0:
15; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
16; CHECK-NEXT:    vfncvt.f.f.w v9, v8
17; CHECK-NEXT:    vmv1r.v v8, v9
18; CHECK-NEXT:    ret
19  %evec = call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
20  ret <2 x float> %evec
21}
22
23declare <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f64(<2 x double>, metadata, metadata)
24define <2 x half> @vfptrunc_v2f64_v2f16(<2 x double> %va) strictfp {
25; CHECK-LABEL: vfptrunc_v2f64_v2f16:
26; CHECK:       # %bb.0:
27; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
28; CHECK-NEXT:    vfncvt.rod.f.f.w v9, v8
29; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
30; CHECK-NEXT:    vfncvt.f.f.w v8, v9
31; CHECK-NEXT:    ret
32  %evec = call <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f64(<2 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
33  ret <2 x half> %evec
34}
35
36declare <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f32(<2 x float>, metadata, metadata)
37define <2 x half> @vfptrunc_v2f32_v2f16(<2 x float> %va) strictfp {
38; CHECK-LABEL: vfptrunc_v2f32_v2f16:
39; CHECK:       # %bb.0:
40; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
41; CHECK-NEXT:    vfncvt.f.f.w v9, v8
42; CHECK-NEXT:    vmv1r.v v8, v9
43; CHECK-NEXT:    ret
44  %evec = call <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f32(<2 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
45  ret <2 x half> %evec
46}
47
48declare <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(<4 x double>, metadata, metadata)
49define <4 x float> @vfptrunc_v4f64_v4f32(<4 x double> %va) strictfp {
50; CHECK-LABEL: vfptrunc_v4f64_v4f32:
51; CHECK:       # %bb.0:
52; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
53; CHECK-NEXT:    vfncvt.f.f.w v10, v8
54; CHECK-NEXT:    vmv.v.v v8, v10
55; CHECK-NEXT:    ret
56  %evec = call <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(<4 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
57  ret <4 x float> %evec
58}
59
60declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double>, metadata, metadata)
61define <4 x half> @vfptrunc_v4f64_v4f16(<4 x double> %va) strictfp {
62; CHECK-LABEL: vfptrunc_v4f64_v4f16:
63; CHECK:       # %bb.0:
64; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
65; CHECK-NEXT:    vfncvt.rod.f.f.w v10, v8
66; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
67; CHECK-NEXT:    vfncvt.f.f.w v8, v10
68; CHECK-NEXT:    ret
69  %evec = call <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
70  ret <4 x half> %evec
71}
72
73declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f32(<4 x float>, metadata, metadata)
74define <4 x half> @vfptrunc_v4f32_v4f16(<4 x float> %va) strictfp {
75; CHECK-LABEL: vfptrunc_v4f32_v4f16:
76; CHECK:       # %bb.0:
77; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
78; CHECK-NEXT:    vfncvt.f.f.w v9, v8
79; CHECK-NEXT:    vmv1r.v v8, v9
80; CHECK-NEXT:    ret
81  %evec = call <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f32(<4 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
82  ret <4 x half> %evec
83}
84
85declare <8 x float> @llvm.experimental.constrained.fptrunc.v8f32.v8f64(<8 x double>, metadata, metadata)
86define <8 x float> @vfptrunc_v8f64_v8f32(<8 x double> %va) strictfp {
87; CHECK-LABEL: vfptrunc_v8f64_v8f32:
88; CHECK:       # %bb.0:
89; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
90; CHECK-NEXT:    vfncvt.f.f.w v12, v8
91; CHECK-NEXT:    vmv.v.v v8, v12
92; CHECK-NEXT:    ret
93  %evec = call <8 x float> @llvm.experimental.constrained.fptrunc.v8f32.v8f64(<8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
94  ret <8 x float> %evec
95}
96
97declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f64(<8 x double>, metadata, metadata)
98define <8 x half> @vfptrunc_v8f64_v8f16(<8 x double> %va) strictfp {
99; CHECK-LABEL: vfptrunc_v8f64_v8f16:
100; CHECK:       # %bb.0:
101; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
102; CHECK-NEXT:    vfncvt.rod.f.f.w v12, v8
103; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
104; CHECK-NEXT:    vfncvt.f.f.w v8, v12
105; CHECK-NEXT:    ret
106  %evec = call <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f64(<8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
107  ret <8 x half> %evec
108}
109
110declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f32(<8 x float>, metadata, metadata)
111define <8 x half> @vfptrunc_v8f32_v8f16(<8 x float> %va) strictfp {
112; CHECK-LABEL: vfptrunc_v8f32_v8f16:
113; CHECK:       # %bb.0:
114; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
115; CHECK-NEXT:    vfncvt.f.f.w v10, v8
116; CHECK-NEXT:    vmv.v.v v8, v10
117; CHECK-NEXT:    ret
118  %evec = call <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f32(<8 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
119  ret <8 x half> %evec
120}
121
122declare <2 x bfloat> @llvm.experimental.constrained.fptrunc.v2bf16.v2f64(<2 x double>, metadata, metadata)
123define <2 x bfloat> @vfptrunc_v2f64_v2bf16(<2 x double> %va) strictfp {
124; CHECK-LABEL: vfptrunc_v2f64_v2bf16:
125; CHECK:       # %bb.0:
126; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
127; CHECK-NEXT:    vfncvt.rod.f.f.w v9, v8
128; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
129; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
130; CHECK-NEXT:    ret
131  %evec = call <2 x bfloat> @llvm.experimental.constrained.fptrunc.v2bf16.v2f64(<2 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
132  ret <2 x bfloat> %evec
133}
134
135declare <2 x bfloat> @llvm.experimental.constrained.fptrunc.v2bf16.v2f32(<2 x float>, metadata, metadata)
136define <2 x bfloat> @vfptrunc_v2f32_v2bf16(<2 x float> %va) strictfp {
137; CHECK-LABEL: vfptrunc_v2f32_v2bf16:
138; CHECK:       # %bb.0:
139; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
140; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
141; CHECK-NEXT:    vmv1r.v v8, v9
142; CHECK-NEXT:    ret
143  %evec = call <2 x bfloat> @llvm.experimental.constrained.fptrunc.v2bf16.v2f32(<2 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
144  ret <2 x bfloat> %evec
145}
146
147declare <4 x bfloat> @llvm.experimental.constrained.fptrunc.v4bf16.v4f64(<4 x double>, metadata, metadata)
148define <4 x bfloat> @vfptrunc_v4f64_v4bf16(<4 x double> %va) strictfp {
149; CHECK-LABEL: vfptrunc_v4f64_v4bf16:
150; CHECK:       # %bb.0:
151; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
152; CHECK-NEXT:    vfncvt.rod.f.f.w v10, v8
153; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
154; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
155; CHECK-NEXT:    ret
156  %evec = call <4 x bfloat> @llvm.experimental.constrained.fptrunc.v4bf16.v4f64(<4 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
157  ret <4 x bfloat> %evec
158}
159
160declare <4 x bfloat> @llvm.experimental.constrained.fptrunc.v4bf16.v4f32(<4 x float>, metadata, metadata)
161define <4 x bfloat> @vfptrunc_v4f32_v4bf16(<4 x float> %va) strictfp {
162; CHECK-LABEL: vfptrunc_v4f32_v4bf16:
163; CHECK:       # %bb.0:
164; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
165; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
166; CHECK-NEXT:    vmv1r.v v8, v9
167; CHECK-NEXT:    ret
168  %evec = call <4 x bfloat> @llvm.experimental.constrained.fptrunc.v4bf16.v4f32(<4 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
169  ret <4 x bfloat> %evec
170}
171
172declare <8 x bfloat> @llvm.experimental.constrained.fptrunc.v8bf16.v8f64(<8 x double>, metadata, metadata)
173define <8 x bfloat> @vfptrunc_v8f64_v8bf16(<8 x double> %va) strictfp {
174; CHECK-LABEL: vfptrunc_v8f64_v8bf16:
175; CHECK:       # %bb.0:
176; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
177; CHECK-NEXT:    vfncvt.rod.f.f.w v12, v8
178; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
179; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
180; CHECK-NEXT:    ret
181  %evec = call <8 x bfloat> @llvm.experimental.constrained.fptrunc.v8bf16.v8f64(<8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
182  ret <8 x bfloat> %evec
183}
184
185declare <8 x bfloat> @llvm.experimental.constrained.fptrunc.v8bf16.v8f32(<8 x float>, metadata, metadata)
186define <8 x bfloat> @vfptrunc_v8f32_v8bf16(<8 x float> %va) strictfp {
187; CHECK-LABEL: vfptrunc_v8f32_v8bf16:
188; CHECK:       # %bb.0:
189; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
190; CHECK-NEXT:    vfncvtbf16.f.f.w v10, v8
191; CHECK-NEXT:    vmv.v.v v8, v10
192; CHECK-NEXT:    ret
193  %evec = call <8 x bfloat> @llvm.experimental.constrained.fptrunc.v8bf16.v8f32(<8 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
194  ret <8 x bfloat> %evec
195}
196