xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v,+zvfbfmin -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+zvfbfmin -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s
6; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v,+zvfbfmin -target-abi=ilp32d \
7; RUN:     -verify-machineinstrs < %s | FileCheck %s
8; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+zvfbfmin -target-abi=lp64d \
9; RUN:     -verify-machineinstrs < %s | FileCheck %s
10
11define <vscale x 1 x half> @vfptrunc_nxv1f32_nxv1f16(<vscale x 1 x float> %va) {
12;
13; CHECK-LABEL: vfptrunc_nxv1f32_nxv1f16:
14; CHECK:       # %bb.0:
15; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
16; CHECK-NEXT:    vfncvt.f.f.w v9, v8
17; CHECK-NEXT:    vmv1r.v v8, v9
18; CHECK-NEXT:    ret
19  %evec = fptrunc <vscale x 1 x float> %va to <vscale x 1 x half>
20  ret <vscale x 1 x half> %evec
21}
22
23define <vscale x 2 x half> @vfptrunc_nxv2f32_nxv2f16(<vscale x 2 x float> %va) {
24;
25; CHECK-LABEL: vfptrunc_nxv2f32_nxv2f16:
26; CHECK:       # %bb.0:
27; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
28; CHECK-NEXT:    vfncvt.f.f.w v9, v8
29; CHECK-NEXT:    vmv1r.v v8, v9
30; CHECK-NEXT:    ret
31  %evec = fptrunc <vscale x 2 x float> %va to <vscale x 2 x half>
32  ret <vscale x 2 x half> %evec
33}
34
35define <vscale x 4 x half> @vfptrunc_nxv4f32_nxv4f16(<vscale x 4 x float> %va) {
36;
37; CHECK-LABEL: vfptrunc_nxv4f32_nxv4f16:
38; CHECK:       # %bb.0:
39; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
40; CHECK-NEXT:    vfncvt.f.f.w v10, v8
41; CHECK-NEXT:    vmv.v.v v8, v10
42; CHECK-NEXT:    ret
43  %evec = fptrunc <vscale x 4 x float> %va to <vscale x 4 x half>
44  ret <vscale x 4 x half> %evec
45}
46
47define <vscale x 8 x half> @vfptrunc_nxv8f32_nxv8f16(<vscale x 8 x float> %va) {
48;
49; CHECK-LABEL: vfptrunc_nxv8f32_nxv8f16:
50; CHECK:       # %bb.0:
51; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
52; CHECK-NEXT:    vfncvt.f.f.w v12, v8
53; CHECK-NEXT:    vmv.v.v v8, v12
54; CHECK-NEXT:    ret
55  %evec = fptrunc <vscale x 8 x float> %va to <vscale x 8 x half>
56  ret <vscale x 8 x half> %evec
57}
58
59define <vscale x 16 x half> @vfptrunc_nxv16f32_nxv16f16(<vscale x 16 x float> %va) {
60;
61; CHECK-LABEL: vfptrunc_nxv16f32_nxv16f16:
62; CHECK:       # %bb.0:
63; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
64; CHECK-NEXT:    vfncvt.f.f.w v16, v8
65; CHECK-NEXT:    vmv.v.v v8, v16
66; CHECK-NEXT:    ret
67  %evec = fptrunc <vscale x 16 x float> %va to <vscale x 16 x half>
68  ret <vscale x 16 x half> %evec
69}
70
71define <vscale x 1 x half> @vfptrunc_nxv1f64_nxv1f16(<vscale x 1 x double> %va) {
72;
73; CHECK-LABEL: vfptrunc_nxv1f64_nxv1f16:
74; CHECK:       # %bb.0:
75; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
76; CHECK-NEXT:    vfncvt.rod.f.f.w v9, v8
77; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
78; CHECK-NEXT:    vfncvt.f.f.w v8, v9
79; CHECK-NEXT:    ret
80  %evec = fptrunc <vscale x 1 x double> %va to <vscale x 1 x half>
81  ret <vscale x 1 x half> %evec
82}
83
84define <vscale x 1 x float> @vfptrunc_nxv1f64_nxv1f32(<vscale x 1 x double> %va) {
85;
86; CHECK-LABEL: vfptrunc_nxv1f64_nxv1f32:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
89; CHECK-NEXT:    vfncvt.f.f.w v9, v8
90; CHECK-NEXT:    vmv1r.v v8, v9
91; CHECK-NEXT:    ret
92  %evec = fptrunc <vscale x 1 x double> %va to <vscale x 1 x float>
93  ret <vscale x 1 x float> %evec
94}
95
96define <vscale x 2 x half> @vfptrunc_nxv2f64_nxv2f16(<vscale x 2 x double> %va) {
97;
98; CHECK-LABEL: vfptrunc_nxv2f64_nxv2f16:
99; CHECK:       # %bb.0:
100; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
101; CHECK-NEXT:    vfncvt.rod.f.f.w v10, v8
102; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
103; CHECK-NEXT:    vfncvt.f.f.w v8, v10
104; CHECK-NEXT:    ret
105  %evec = fptrunc <vscale x 2 x double> %va to <vscale x 2 x half>
106  ret <vscale x 2 x half> %evec
107}
108
109define <vscale x 2 x float> @vfptrunc_nxv2f64_nxv2f32(<vscale x 2 x double> %va) {
110;
111; CHECK-LABEL: vfptrunc_nxv2f64_nxv2f32:
112; CHECK:       # %bb.0:
113; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
114; CHECK-NEXT:    vfncvt.f.f.w v10, v8
115; CHECK-NEXT:    vmv.v.v v8, v10
116; CHECK-NEXT:    ret
117  %evec = fptrunc <vscale x 2 x double> %va to <vscale x 2 x float>
118  ret <vscale x 2 x float> %evec
119}
120
121define <vscale x 4 x half> @vfptrunc_nxv4f64_nxv4f16(<vscale x 4 x double> %va) {
122;
123; CHECK-LABEL: vfptrunc_nxv4f64_nxv4f16:
124; CHECK:       # %bb.0:
125; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
126; CHECK-NEXT:    vfncvt.rod.f.f.w v12, v8
127; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
128; CHECK-NEXT:    vfncvt.f.f.w v8, v12
129; CHECK-NEXT:    ret
130  %evec = fptrunc <vscale x 4 x double> %va to <vscale x 4 x half>
131  ret <vscale x 4 x half> %evec
132}
133
134define <vscale x 4 x float> @vfptrunc_nxv4f64_nxv4f32(<vscale x 4 x double> %va) {
135;
136; CHECK-LABEL: vfptrunc_nxv4f64_nxv4f32:
137; CHECK:       # %bb.0:
138; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
139; CHECK-NEXT:    vfncvt.f.f.w v12, v8
140; CHECK-NEXT:    vmv.v.v v8, v12
141; CHECK-NEXT:    ret
142  %evec = fptrunc <vscale x 4 x double> %va to <vscale x 4 x float>
143  ret <vscale x 4 x float> %evec
144}
145
146define <vscale x 8 x half> @vfptrunc_nxv8f64_nxv8f16(<vscale x 8 x double> %va) {
147;
148; CHECK-LABEL: vfptrunc_nxv8f64_nxv8f16:
149; CHECK:       # %bb.0:
150; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
151; CHECK-NEXT:    vfncvt.rod.f.f.w v16, v8
152; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
153; CHECK-NEXT:    vfncvt.f.f.w v8, v16
154; CHECK-NEXT:    ret
155  %evec = fptrunc <vscale x 8 x double> %va to <vscale x 8 x half>
156  ret <vscale x 8 x half> %evec
157}
158
159define <vscale x 8 x float> @vfptrunc_nxv8f64_nxv8f32(<vscale x 8 x double> %va) {
160;
161; CHECK-LABEL: vfptrunc_nxv8f64_nxv8f32:
162; CHECK:       # %bb.0:
163; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
164; CHECK-NEXT:    vfncvt.f.f.w v16, v8
165; CHECK-NEXT:    vmv.v.v v8, v16
166; CHECK-NEXT:    ret
167  %evec = fptrunc <vscale x 8 x double> %va to <vscale x 8 x float>
168  ret <vscale x 8 x float> %evec
169}
170
171define <vscale x 1 x bfloat> @vfptrunc_nxv1f32_nxv1bf16(<vscale x 1 x float> %va) {
172;
173; CHECK-LABEL: vfptrunc_nxv1f32_nxv1bf16:
174; CHECK:       # %bb.0:
175; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
176; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
177; CHECK-NEXT:    vmv1r.v v8, v9
178; CHECK-NEXT:    ret
179  %evec = fptrunc <vscale x 1 x float> %va to <vscale x 1 x bfloat>
180  ret <vscale x 1 x bfloat> %evec
181}
182
183define <vscale x 2 x bfloat> @vfptrunc_nxv2f32_nxv2bf16(<vscale x 2 x float> %va) {
184;
185; CHECK-LABEL: vfptrunc_nxv2f32_nxv2bf16:
186; CHECK:       # %bb.0:
187; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
188; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
189; CHECK-NEXT:    vmv1r.v v8, v9
190; CHECK-NEXT:    ret
191  %evec = fptrunc <vscale x 2 x float> %va to <vscale x 2 x bfloat>
192  ret <vscale x 2 x bfloat> %evec
193}
194
195define <vscale x 4 x bfloat> @vfptrunc_nxv4f32_nxv4bf16(<vscale x 4 x float> %va) {
196;
197; CHECK-LABEL: vfptrunc_nxv4f32_nxv4bf16:
198; CHECK:       # %bb.0:
199; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
200; CHECK-NEXT:    vfncvtbf16.f.f.w v10, v8
201; CHECK-NEXT:    vmv.v.v v8, v10
202; CHECK-NEXT:    ret
203  %evec = fptrunc <vscale x 4 x float> %va to <vscale x 4 x bfloat>
204  ret <vscale x 4 x bfloat> %evec
205}
206
207define <vscale x 8 x bfloat> @vfptrunc_nxv8f32_nxv8bf16(<vscale x 8 x float> %va) {
208;
209; CHECK-LABEL: vfptrunc_nxv8f32_nxv8bf16:
210; CHECK:       # %bb.0:
211; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
212; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v8
213; CHECK-NEXT:    vmv.v.v v8, v12
214; CHECK-NEXT:    ret
215  %evec = fptrunc <vscale x 8 x float> %va to <vscale x 8 x bfloat>
216  ret <vscale x 8 x bfloat> %evec
217}
218
219define <vscale x 16 x bfloat> @vfptrunc_nxv16f32_nxv16bf16(<vscale x 16 x float> %va) {
220;
221; CHECK-LABEL: vfptrunc_nxv16f32_nxv16bf16:
222; CHECK:       # %bb.0:
223; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
224; CHECK-NEXT:    vfncvtbf16.f.f.w v16, v8
225; CHECK-NEXT:    vmv.v.v v8, v16
226; CHECK-NEXT:    ret
227  %evec = fptrunc <vscale x 16 x float> %va to <vscale x 16 x bfloat>
228  ret <vscale x 16 x bfloat> %evec
229}
230
231define <vscale x 1 x bfloat> @vfptrunc_nxv1f64_nxv1bf16(<vscale x 1 x double> %va) {
232;
233; CHECK-LABEL: vfptrunc_nxv1f64_nxv1bf16:
234; CHECK:       # %bb.0:
235; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
236; CHECK-NEXT:    vfncvt.rod.f.f.w v9, v8
237; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
238; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
239; CHECK-NEXT:    ret
240  %evec = fptrunc <vscale x 1 x double> %va to <vscale x 1 x bfloat>
241  ret <vscale x 1 x bfloat> %evec
242}
243