xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfpext-constrained-sdnode.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v,+zvfbfmin -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+zvfbfmin -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s
6
7declare <vscale x 1 x float> @llvm.experimental.constrained.fpext.nxv1f32.nxv1f16(<vscale x 1 x half>, metadata)
8define <vscale x 1 x float> @vfpext_nxv1f16_nxv1f32(<vscale x 1 x half> %va) strictfp {
9; CHECK-LABEL: vfpext_nxv1f16_nxv1f32:
10; CHECK:       # %bb.0:
11; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
12; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
13; CHECK-NEXT:    vmv1r.v v8, v9
14; CHECK-NEXT:    ret
15  %evec = call <vscale x 1 x float> @llvm.experimental.constrained.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict")
16  ret <vscale x 1 x float> %evec
17}
18
19declare <vscale x 1 x double> @llvm.experimental.constrained.fpext.nxv1f64.nxv1f16(<vscale x 1 x half>, metadata)
20define <vscale x 1 x double> @vfpext_nxv1f16_nxv1f64(<vscale x 1 x half> %va) strictfp {
21; CHECK-LABEL: vfpext_nxv1f16_nxv1f64:
22; CHECK:       # %bb.0:
23; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
24; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
25; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
26; CHECK-NEXT:    vfwcvt.f.f.v v8, v9
27; CHECK-NEXT:    ret
28  %evec = call <vscale x 1 x double> @llvm.experimental.constrained.fpext.nxv1f64.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict")
29  ret <vscale x 1 x double> %evec
30}
31
32declare <vscale x 2 x float> @llvm.experimental.constrained.fpext.nxv2f32.nxv2f16(<vscale x 2 x half>, metadata)
33define <vscale x 2 x float> @vfpext_nxv2f16_nxv2f32(<vscale x 2 x half> %va) strictfp {
34; CHECK-LABEL: vfpext_nxv2f16_nxv2f32:
35; CHECK:       # %bb.0:
36; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
37; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
38; CHECK-NEXT:    vmv1r.v v8, v9
39; CHECK-NEXT:    ret
40  %evec = call <vscale x 2 x float> @llvm.experimental.constrained.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict")
41  ret <vscale x 2 x float> %evec
42}
43
44declare <vscale x 2 x double> @llvm.experimental.constrained.fpext.nxv2f64.nxv2f16(<vscale x 2 x half>, metadata)
45define <vscale x 2 x double> @vfpext_nxv2f16_nxv2f64(<vscale x 2 x half> %va) strictfp {
46; CHECK-LABEL: vfpext_nxv2f16_nxv2f64:
47; CHECK:       # %bb.0:
48; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
49; CHECK-NEXT:    vfwcvt.f.f.v v10, v8
50; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
51; CHECK-NEXT:    vfwcvt.f.f.v v8, v10
52; CHECK-NEXT:    ret
53  %evec = call <vscale x 2 x double> @llvm.experimental.constrained.fpext.nxv2f64.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict")
54  ret <vscale x 2 x double> %evec
55}
56
57declare <vscale x 4 x float> @llvm.experimental.constrained.fpext.nxv4f32.nxv4f16(<vscale x 4 x half>, metadata)
58define <vscale x 4 x float> @vfpext_nxv4f16_nxv4f32(<vscale x 4 x half> %va) strictfp {
59; CHECK-LABEL: vfpext_nxv4f16_nxv4f32:
60; CHECK:       # %bb.0:
61; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
62; CHECK-NEXT:    vfwcvt.f.f.v v10, v8
63; CHECK-NEXT:    vmv2r.v v8, v10
64; CHECK-NEXT:    ret
65  %evec = call <vscale x 4 x float> @llvm.experimental.constrained.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict")
66  ret <vscale x 4 x float> %evec
67}
68
69declare <vscale x 4 x double> @llvm.experimental.constrained.fpext.nxv4f64.nxv4f16(<vscale x 4 x half>, metadata)
70define <vscale x 4 x double> @vfpext_nxv4f16_nxv4f64(<vscale x 4 x half> %va) strictfp {
71; CHECK-LABEL: vfpext_nxv4f16_nxv4f64:
72; CHECK:       # %bb.0:
73; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
74; CHECK-NEXT:    vfwcvt.f.f.v v12, v8
75; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
76; CHECK-NEXT:    vfwcvt.f.f.v v8, v12
77; CHECK-NEXT:    ret
78  %evec = call <vscale x 4 x double> @llvm.experimental.constrained.fpext.nxv4f64.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict")
79  ret <vscale x 4 x double> %evec
80}
81
82declare <vscale x 8 x float> @llvm.experimental.constrained.fpext.nxv8f32.nxv8f16(<vscale x 8 x half>, metadata)
83define <vscale x 8 x float> @vfpext_nxv8f16_nxv8f32(<vscale x 8 x half> %va) strictfp {
84; CHECK-LABEL: vfpext_nxv8f16_nxv8f32:
85; CHECK:       # %bb.0:
86; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
87; CHECK-NEXT:    vfwcvt.f.f.v v12, v8
88; CHECK-NEXT:    vmv4r.v v8, v12
89; CHECK-NEXT:    ret
90  %evec = call <vscale x 8 x float> @llvm.experimental.constrained.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict")
91  ret <vscale x 8 x float> %evec
92}
93
94declare <vscale x 8 x double> @llvm.experimental.constrained.fpext.nxv8f64.nxv8f16(<vscale x 8 x half>, metadata)
95define <vscale x 8 x double> @vfpext_nxv8f16_nxv8f64(<vscale x 8 x half> %va) strictfp {
96; CHECK-LABEL: vfpext_nxv8f16_nxv8f64:
97; CHECK:       # %bb.0:
98; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
99; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
100; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
101; CHECK-NEXT:    vfwcvt.f.f.v v8, v16
102; CHECK-NEXT:    ret
103  %evec = call <vscale x 8 x double> @llvm.experimental.constrained.fpext.nxv8f64.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict")
104  ret <vscale x 8 x double> %evec
105}
106
107declare <vscale x 1 x double> @llvm.experimental.constrained.fpext.nxv1f64.nxv1f32(<vscale x 1 x float>, metadata)
108define <vscale x 1 x double> @vfpext_nxv1f32_nxv1f64(<vscale x 1 x float> %va) strictfp {
109; CHECK-LABEL: vfpext_nxv1f32_nxv1f64:
110; CHECK:       # %bb.0:
111; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
112; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
113; CHECK-NEXT:    vmv1r.v v8, v9
114; CHECK-NEXT:    ret
115  %evec = call <vscale x 1 x double> @llvm.experimental.constrained.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict")
116  ret <vscale x 1 x double> %evec
117}
118
119declare <vscale x 2 x double> @llvm.experimental.constrained.fpext.nxv2f64.nxv2f32(<vscale x 2 x float>, metadata)
120define <vscale x 2 x double> @vfpext_nxv2f32_nxv2f64(<vscale x 2 x float> %va) strictfp {
121; CHECK-LABEL: vfpext_nxv2f32_nxv2f64:
122; CHECK:       # %bb.0:
123; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
124; CHECK-NEXT:    vfwcvt.f.f.v v10, v8
125; CHECK-NEXT:    vmv2r.v v8, v10
126; CHECK-NEXT:    ret
127  %evec = call <vscale x 2 x double> @llvm.experimental.constrained.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict")
128  ret <vscale x 2 x double> %evec
129}
130
131declare <vscale x 4 x double> @llvm.experimental.constrained.fpext.nxv4f64.nxv4f32(<vscale x 4 x float>, metadata)
132define <vscale x 4 x double> @vfpext_nxv4f32_nxv4f64(<vscale x 4 x float> %va) strictfp {
133; CHECK-LABEL: vfpext_nxv4f32_nxv4f64:
134; CHECK:       # %bb.0:
135; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
136; CHECK-NEXT:    vfwcvt.f.f.v v12, v8
137; CHECK-NEXT:    vmv4r.v v8, v12
138; CHECK-NEXT:    ret
139  %evec = call <vscale x 4 x double> @llvm.experimental.constrained.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict")
140  ret <vscale x 4 x double> %evec
141}
142
143declare <vscale x 8 x double> @llvm.experimental.constrained.fpext.nxv8f64.nxv8f32(<vscale x 8 x float>, metadata)
144define <vscale x 8 x double> @vfpext_nxv8f32_nxv8f64(<vscale x 8 x float> %va) strictfp {
145; CHECK-LABEL: vfpext_nxv8f32_nxv8f64:
146; CHECK:       # %bb.0:
147; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
148; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
149; CHECK-NEXT:    vmv8r.v v8, v16
150; CHECK-NEXT:    ret
151  %evec = call <vscale x 8 x double> @llvm.experimental.constrained.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict")
152  ret <vscale x 8 x double> %evec
153}
154
155declare <vscale x 1 x float> @llvm.experimental.constrained.fpext.nxv1f32.nxv1bf16(<vscale x 1 x bfloat>, metadata)
156define <vscale x 1 x float> @vfpext_nxv1bf16_nxv1f32(<vscale x 1 x bfloat> %va) strictfp {
157; CHECK-LABEL: vfpext_nxv1bf16_nxv1f32:
158; CHECK:       # %bb.0:
159; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
160; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
161; CHECK-NEXT:    vmv1r.v v8, v9
162; CHECK-NEXT:    ret
163  %evec = call <vscale x 1 x float> @llvm.experimental.constrained.fpext.nxv1f32.nxv1bf16(<vscale x 1 x bfloat> %va, metadata !"fpexcept.strict")
164  ret <vscale x 1 x float> %evec
165}
166
167declare <vscale x 1 x double> @llvm.experimental.constrained.fpext.nxv1f64.nxv1bf16(<vscale x 1 x bfloat>, metadata)
168define <vscale x 1 x double> @vfpext_nxv1bf16_nxv1f64(<vscale x 1 x bfloat> %va) strictfp {
169; CHECK-LABEL: vfpext_nxv1bf16_nxv1f64:
170; CHECK:       # %bb.0:
171; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
172; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
173; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
174; CHECK-NEXT:    vfwcvt.f.f.v v8, v9
175; CHECK-NEXT:    ret
176  %evec = call <vscale x 1 x double> @llvm.experimental.constrained.fpext.nxv1f64.nxv1bf16(<vscale x 1 x bfloat> %va, metadata !"fpexcept.strict")
177  ret <vscale x 1 x double> %evec
178}
179
180declare <vscale x 2 x float> @llvm.experimental.constrained.fpext.nxv2f32.nxv2bf16(<vscale x 2 x bfloat>, metadata)
181define <vscale x 2 x float> @vfpext_nxv2bf16_nxv2f32(<vscale x 2 x bfloat> %va) strictfp {
182; CHECK-LABEL: vfpext_nxv2bf16_nxv2f32:
183; CHECK:       # %bb.0:
184; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
185; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
186; CHECK-NEXT:    vmv1r.v v8, v9
187; CHECK-NEXT:    ret
188  %evec = call <vscale x 2 x float> @llvm.experimental.constrained.fpext.nxv2f32.nxv2bf16(<vscale x 2 x bfloat> %va, metadata !"fpexcept.strict")
189  ret <vscale x 2 x float> %evec
190}
191
192declare <vscale x 2 x double> @llvm.experimental.constrained.fpext.nxv2f64.nxv2bf16(<vscale x 2 x bfloat>, metadata)
193define <vscale x 2 x double> @vfpext_nxv2bf16_nxv2f64(<vscale x 2 x bfloat> %va) strictfp {
194; CHECK-LABEL: vfpext_nxv2bf16_nxv2f64:
195; CHECK:       # %bb.0:
196; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
197; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
198; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
199; CHECK-NEXT:    vfwcvt.f.f.v v8, v10
200; CHECK-NEXT:    ret
201  %evec = call <vscale x 2 x double> @llvm.experimental.constrained.fpext.nxv2f64.nxv2bf16(<vscale x 2 x bfloat> %va, metadata !"fpexcept.strict")
202  ret <vscale x 2 x double> %evec
203}
204
205declare <vscale x 4 x float> @llvm.experimental.constrained.fpext.nxv4f32.nxv4bf16(<vscale x 4 x bfloat>, metadata)
206define <vscale x 4 x float> @vfpext_nxv4bf16_nxv4f32(<vscale x 4 x bfloat> %va) strictfp {
207; CHECK-LABEL: vfpext_nxv4bf16_nxv4f32:
208; CHECK:       # %bb.0:
209; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
210; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
211; CHECK-NEXT:    vmv2r.v v8, v10
212; CHECK-NEXT:    ret
213  %evec = call <vscale x 4 x float> @llvm.experimental.constrained.fpext.nxv4f32.nxv4bf16(<vscale x 4 x bfloat> %va, metadata !"fpexcept.strict")
214  ret <vscale x 4 x float> %evec
215}
216
217declare <vscale x 4 x double> @llvm.experimental.constrained.fpext.nxv4f64.nxv4bf16(<vscale x 4 x bfloat>, metadata)
218define <vscale x 4 x double> @vfpext_nxv4bf16_nxv4f64(<vscale x 4 x bfloat> %va) strictfp {
219; CHECK-LABEL: vfpext_nxv4bf16_nxv4f64:
220; CHECK:       # %bb.0:
221; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
222; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
223; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
224; CHECK-NEXT:    vfwcvt.f.f.v v8, v12
225; CHECK-NEXT:    ret
226  %evec = call <vscale x 4 x double> @llvm.experimental.constrained.fpext.nxv4f64.nxv4bf16(<vscale x 4 x bfloat> %va, metadata !"fpexcept.strict")
227  ret <vscale x 4 x double> %evec
228}
229
230declare <vscale x 8 x float> @llvm.experimental.constrained.fpext.nxv8f32.nxv8bf16(<vscale x 8 x bfloat>, metadata)
231define <vscale x 8 x float> @vfpext_nxv8bf16_nxv8f32(<vscale x 8 x bfloat> %va) strictfp {
232; CHECK-LABEL: vfpext_nxv8bf16_nxv8f32:
233; CHECK:       # %bb.0:
234; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
235; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
236; CHECK-NEXT:    vmv4r.v v8, v12
237; CHECK-NEXT:    ret
238  %evec = call <vscale x 8 x float> @llvm.experimental.constrained.fpext.nxv8f32.nxv8bf16(<vscale x 8 x bfloat> %va, metadata !"fpexcept.strict")
239  ret <vscale x 8 x float> %evec
240}
241
242declare <vscale x 8 x double> @llvm.experimental.constrained.fpext.nxv8f64.nxv8bf16(<vscale x 8 x bfloat>, metadata)
243define <vscale x 8 x double> @vfpext_nxv8bf16_nxv8f64(<vscale x 8 x bfloat> %va) strictfp {
244; CHECK-LABEL: vfpext_nxv8bf16_nxv8f64:
245; CHECK:       # %bb.0:
246; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
247; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
248; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
249; CHECK-NEXT:    vfwcvt.f.f.v v8, v16
250; CHECK-NEXT:    ret
251  %evec = call <vscale x 8 x double> @llvm.experimental.constrained.fpext.nxv8f64.nxv8bf16(<vscale x 8 x bfloat> %va, metadata !"fpexcept.strict")
252  ret <vscale x 8 x double> %evec
253}
254