xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
8  <vscale x 1 x float>,
9  <vscale x 1 x half>,
10  <vscale x 1 x half>,
11  iXLen, iXLen);
12
13define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    fsrmi a1, 0
17; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
18; CHECK-NEXT:    vfwmul.vv v10, v8, v9
19; CHECK-NEXT:    fsrm a1
20; CHECK-NEXT:    vmv1r.v v8, v10
21; CHECK-NEXT:    ret
22entry:
23  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
24    <vscale x 1 x float> undef,
25    <vscale x 1 x half> %0,
26    <vscale x 1 x half> %1,
27    iXLen 0, iXLen %2)
28
29  ret <vscale x 1 x float> %a
30}
31
32declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
33  <vscale x 1 x float>,
34  <vscale x 1 x half>,
35  <vscale x 1 x half>,
36  <vscale x 1 x i1>,
37  iXLen, iXLen, iXLen);
38
39define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
40; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16:
41; CHECK:       # %bb.0: # %entry
42; CHECK-NEXT:    fsrmi a1, 0
43; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
44; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
45; CHECK-NEXT:    fsrm a1
46; CHECK-NEXT:    ret
47entry:
48  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
49    <vscale x 1 x float> %0,
50    <vscale x 1 x half> %1,
51    <vscale x 1 x half> %2,
52    <vscale x 1 x i1> %3,
53    iXLen 0, iXLen %4, iXLen 1)
54
55  ret <vscale x 1 x float> %a
56}
57
58declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16(
59  <vscale x 2 x float>,
60  <vscale x 2 x half>,
61  <vscale x 2 x half>,
62  iXLen, iXLen);
63
64define <vscale x 2 x float> @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
65; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16:
66; CHECK:       # %bb.0: # %entry
67; CHECK-NEXT:    fsrmi a1, 0
68; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
69; CHECK-NEXT:    vfwmul.vv v10, v8, v9
70; CHECK-NEXT:    fsrm a1
71; CHECK-NEXT:    vmv1r.v v8, v10
72; CHECK-NEXT:    ret
73entry:
74  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16(
75    <vscale x 2 x float> undef,
76    <vscale x 2 x half> %0,
77    <vscale x 2 x half> %1,
78    iXLen 0, iXLen %2)
79
80  ret <vscale x 2 x float> %a
81}
82
83declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16(
84  <vscale x 2 x float>,
85  <vscale x 2 x half>,
86  <vscale x 2 x half>,
87  <vscale x 2 x i1>,
88  iXLen, iXLen, iXLen);
89
90define <vscale x 2 x float> @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
91; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16:
92; CHECK:       # %bb.0: # %entry
93; CHECK-NEXT:    fsrmi a1, 0
94; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
95; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
96; CHECK-NEXT:    fsrm a1
97; CHECK-NEXT:    ret
98entry:
99  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16(
100    <vscale x 2 x float> %0,
101    <vscale x 2 x half> %1,
102    <vscale x 2 x half> %2,
103    <vscale x 2 x i1> %3,
104    iXLen 0, iXLen %4, iXLen 1)
105
106  ret <vscale x 2 x float> %a
107}
108
109declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16(
110  <vscale x 4 x float>,
111  <vscale x 4 x half>,
112  <vscale x 4 x half>,
113  iXLen, iXLen);
114
115define <vscale x 4 x float> @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
116; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16:
117; CHECK:       # %bb.0: # %entry
118; CHECK-NEXT:    fsrmi a1, 0
119; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
120; CHECK-NEXT:    vfwmul.vv v10, v8, v9
121; CHECK-NEXT:    fsrm a1
122; CHECK-NEXT:    vmv2r.v v8, v10
123; CHECK-NEXT:    ret
124entry:
125  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16(
126    <vscale x 4 x float> undef,
127    <vscale x 4 x half> %0,
128    <vscale x 4 x half> %1,
129    iXLen 0, iXLen %2)
130
131  ret <vscale x 4 x float> %a
132}
133
134declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16(
135  <vscale x 4 x float>,
136  <vscale x 4 x half>,
137  <vscale x 4 x half>,
138  <vscale x 4 x i1>,
139  iXLen, iXLen, iXLen);
140
141define <vscale x 4 x float> @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
142; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16:
143; CHECK:       # %bb.0: # %entry
144; CHECK-NEXT:    fsrmi a1, 0
145; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
146; CHECK-NEXT:    vfwmul.vv v8, v10, v11, v0.t
147; CHECK-NEXT:    fsrm a1
148; CHECK-NEXT:    ret
149entry:
150  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16(
151    <vscale x 4 x float> %0,
152    <vscale x 4 x half> %1,
153    <vscale x 4 x half> %2,
154    <vscale x 4 x i1> %3,
155    iXLen 0, iXLen %4, iXLen 1)
156
157  ret <vscale x 4 x float> %a
158}
159
160declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16(
161  <vscale x 8 x float>,
162  <vscale x 8 x half>,
163  <vscale x 8 x half>,
164  iXLen, iXLen);
165
166define <vscale x 8 x float> @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
167; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16:
168; CHECK:       # %bb.0: # %entry
169; CHECK-NEXT:    fsrmi a1, 0
170; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
171; CHECK-NEXT:    vfwmul.vv v12, v8, v10
172; CHECK-NEXT:    fsrm a1
173; CHECK-NEXT:    vmv4r.v v8, v12
174; CHECK-NEXT:    ret
175entry:
176  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16(
177    <vscale x 8 x float> undef,
178    <vscale x 8 x half> %0,
179    <vscale x 8 x half> %1,
180    iXLen 0, iXLen %2)
181
182  ret <vscale x 8 x float> %a
183}
184
185declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16(
186  <vscale x 8 x float>,
187  <vscale x 8 x half>,
188  <vscale x 8 x half>,
189  <vscale x 8 x i1>,
190  iXLen, iXLen, iXLen);
191
192define <vscale x 8 x float> @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
193; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16:
194; CHECK:       # %bb.0: # %entry
195; CHECK-NEXT:    fsrmi a1, 0
196; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
197; CHECK-NEXT:    vfwmul.vv v8, v12, v14, v0.t
198; CHECK-NEXT:    fsrm a1
199; CHECK-NEXT:    ret
200entry:
201  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16(
202    <vscale x 8 x float> %0,
203    <vscale x 8 x half> %1,
204    <vscale x 8 x half> %2,
205    <vscale x 8 x i1> %3,
206    iXLen 0, iXLen %4, iXLen 1)
207
208  ret <vscale x 8 x float> %a
209}
210
211declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16(
212  <vscale x 16 x float>,
213  <vscale x 16 x half>,
214  <vscale x 16 x half>,
215  iXLen, iXLen);
216
217define <vscale x 16 x float> @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
218; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16:
219; CHECK:       # %bb.0: # %entry
220; CHECK-NEXT:    fsrmi a1, 0
221; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
222; CHECK-NEXT:    vfwmul.vv v16, v8, v12
223; CHECK-NEXT:    fsrm a1
224; CHECK-NEXT:    vmv8r.v v8, v16
225; CHECK-NEXT:    ret
226entry:
227  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16(
228    <vscale x 16 x float> undef,
229    <vscale x 16 x half> %0,
230    <vscale x 16 x half> %1,
231    iXLen 0, iXLen %2)
232
233  ret <vscale x 16 x float> %a
234}
235
236declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16(
237  <vscale x 16 x float>,
238  <vscale x 16 x half>,
239  <vscale x 16 x half>,
240  <vscale x 16 x i1>,
241  iXLen, iXLen, iXLen);
242
243define <vscale x 16 x float> @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
244; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16:
245; CHECK:       # %bb.0: # %entry
246; CHECK-NEXT:    fsrmi a1, 0
247; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
248; CHECK-NEXT:    vfwmul.vv v8, v16, v20, v0.t
249; CHECK-NEXT:    fsrm a1
250; CHECK-NEXT:    ret
251entry:
252  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16(
253    <vscale x 16 x float> %0,
254    <vscale x 16 x half> %1,
255    <vscale x 16 x half> %2,
256    <vscale x 16 x i1> %3,
257    iXLen 0, iXLen %4, iXLen 1)
258
259  ret <vscale x 16 x float> %a
260}
261
262declare <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32(
263  <vscale x 1 x double>,
264  <vscale x 1 x float>,
265  <vscale x 1 x float>,
266  iXLen, iXLen);
267
268define <vscale x 1 x double> @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
269; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32:
270; CHECK:       # %bb.0: # %entry
271; CHECK-NEXT:    fsrmi a1, 0
272; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
273; CHECK-NEXT:    vfwmul.vv v10, v8, v9
274; CHECK-NEXT:    fsrm a1
275; CHECK-NEXT:    vmv1r.v v8, v10
276; CHECK-NEXT:    ret
277entry:
278  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32(
279    <vscale x 1 x double> undef,
280    <vscale x 1 x float> %0,
281    <vscale x 1 x float> %1,
282    iXLen 0, iXLen %2)
283
284  ret <vscale x 1 x double> %a
285}
286
287declare <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32(
288  <vscale x 1 x double>,
289  <vscale x 1 x float>,
290  <vscale x 1 x float>,
291  <vscale x 1 x i1>,
292  iXLen, iXLen, iXLen);
293
294define <vscale x 1 x double> @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
295; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32:
296; CHECK:       # %bb.0: # %entry
297; CHECK-NEXT:    fsrmi a1, 0
298; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
299; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
300; CHECK-NEXT:    fsrm a1
301; CHECK-NEXT:    ret
302entry:
303  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32(
304    <vscale x 1 x double> %0,
305    <vscale x 1 x float> %1,
306    <vscale x 1 x float> %2,
307    <vscale x 1 x i1> %3,
308    iXLen 0, iXLen %4, iXLen 1)
309
310  ret <vscale x 1 x double> %a
311}
312
313declare <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32(
314  <vscale x 2 x double>,
315  <vscale x 2 x float>,
316  <vscale x 2 x float>,
317  iXLen, iXLen);
318
319define <vscale x 2 x double> @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
320; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32:
321; CHECK:       # %bb.0: # %entry
322; CHECK-NEXT:    fsrmi a1, 0
323; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
324; CHECK-NEXT:    vfwmul.vv v10, v8, v9
325; CHECK-NEXT:    fsrm a1
326; CHECK-NEXT:    vmv2r.v v8, v10
327; CHECK-NEXT:    ret
328entry:
329  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32(
330    <vscale x 2 x double> undef,
331    <vscale x 2 x float> %0,
332    <vscale x 2 x float> %1,
333    iXLen 0, iXLen %2)
334
335  ret <vscale x 2 x double> %a
336}
337
338declare <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32(
339  <vscale x 2 x double>,
340  <vscale x 2 x float>,
341  <vscale x 2 x float>,
342  <vscale x 2 x i1>,
343  iXLen, iXLen, iXLen);
344
345define <vscale x 2 x double> @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
346; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32:
347; CHECK:       # %bb.0: # %entry
348; CHECK-NEXT:    fsrmi a1, 0
349; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
350; CHECK-NEXT:    vfwmul.vv v8, v10, v11, v0.t
351; CHECK-NEXT:    fsrm a1
352; CHECK-NEXT:    ret
353entry:
354  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32(
355    <vscale x 2 x double> %0,
356    <vscale x 2 x float> %1,
357    <vscale x 2 x float> %2,
358    <vscale x 2 x i1> %3,
359    iXLen 0, iXLen %4, iXLen 1)
360
361  ret <vscale x 2 x double> %a
362}
363
364declare <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(
365  <vscale x 4 x double>,
366  <vscale x 4 x float>,
367  <vscale x 4 x float>,
368  iXLen, iXLen);
369
370define <vscale x 4 x double> @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
371; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32:
372; CHECK:       # %bb.0: # %entry
373; CHECK-NEXT:    fsrmi a1, 0
374; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
375; CHECK-NEXT:    vfwmul.vv v12, v8, v10
376; CHECK-NEXT:    fsrm a1
377; CHECK-NEXT:    vmv4r.v v8, v12
378; CHECK-NEXT:    ret
379entry:
380  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(
381    <vscale x 4 x double> undef,
382    <vscale x 4 x float> %0,
383    <vscale x 4 x float> %1,
384    iXLen 0, iXLen %2)
385
386  ret <vscale x 4 x double> %a
387}
388
389declare <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32(
390  <vscale x 4 x double>,
391  <vscale x 4 x float>,
392  <vscale x 4 x float>,
393  <vscale x 4 x i1>,
394  iXLen, iXLen, iXLen);
395
396define <vscale x 4 x double> @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
397; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32:
398; CHECK:       # %bb.0: # %entry
399; CHECK-NEXT:    fsrmi a1, 0
400; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
401; CHECK-NEXT:    vfwmul.vv v8, v12, v14, v0.t
402; CHECK-NEXT:    fsrm a1
403; CHECK-NEXT:    ret
404entry:
405  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32(
406    <vscale x 4 x double> %0,
407    <vscale x 4 x float> %1,
408    <vscale x 4 x float> %2,
409    <vscale x 4 x i1> %3,
410    iXLen 0, iXLen %4, iXLen 1)
411
412  ret <vscale x 4 x double> %a
413}
414
415declare <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32(
416  <vscale x 8 x double>,
417  <vscale x 8 x float>,
418  <vscale x 8 x float>,
419  iXLen, iXLen);
420
421define <vscale x 8 x double> @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
422; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32:
423; CHECK:       # %bb.0: # %entry
424; CHECK-NEXT:    fsrmi a1, 0
425; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
426; CHECK-NEXT:    vfwmul.vv v16, v8, v12
427; CHECK-NEXT:    fsrm a1
428; CHECK-NEXT:    vmv8r.v v8, v16
429; CHECK-NEXT:    ret
430entry:
431  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32(
432    <vscale x 8 x double> undef,
433    <vscale x 8 x float> %0,
434    <vscale x 8 x float> %1,
435    iXLen 0, iXLen %2)
436
437  ret <vscale x 8 x double> %a
438}
439
440declare <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32(
441  <vscale x 8 x double>,
442  <vscale x 8 x float>,
443  <vscale x 8 x float>,
444  <vscale x 8 x i1>,
445  iXLen, iXLen, iXLen);
446
447define <vscale x 8 x double> @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
448; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32:
449; CHECK:       # %bb.0: # %entry
450; CHECK-NEXT:    fsrmi a1, 0
451; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
452; CHECK-NEXT:    vfwmul.vv v8, v16, v20, v0.t
453; CHECK-NEXT:    fsrm a1
454; CHECK-NEXT:    ret
455entry:
456  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32(
457    <vscale x 8 x double> %0,
458    <vscale x 8 x float> %1,
459    <vscale x 8 x float> %2,
460    <vscale x 8 x i1> %3,
461    iXLen 0, iXLen %4, iXLen 1)
462
463  ret <vscale x 8 x double> %a
464}
465
466declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16(
467  <vscale x 1 x float>,
468  <vscale x 1 x half>,
469  half,
470  iXLen, iXLen);
471
472define <vscale x 1 x float> @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
473; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16:
474; CHECK:       # %bb.0: # %entry
475; CHECK-NEXT:    fsrmi a1, 0
476; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
477; CHECK-NEXT:    vfwmul.vf v9, v8, fa0
478; CHECK-NEXT:    fsrm a1
479; CHECK-NEXT:    vmv1r.v v8, v9
480; CHECK-NEXT:    ret
481entry:
482  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16(
483    <vscale x 1 x float> undef,
484    <vscale x 1 x half> %0,
485    half %1,
486    iXLen 0, iXLen %2)
487
488  ret <vscale x 1 x float> %a
489}
490
491declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16(
492  <vscale x 1 x float>,
493  <vscale x 1 x half>,
494  half,
495  <vscale x 1 x i1>,
496  iXLen, iXLen, iXLen);
497
498define <vscale x 1 x float> @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
499; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16:
500; CHECK:       # %bb.0: # %entry
501; CHECK-NEXT:    fsrmi a1, 0
502; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
503; CHECK-NEXT:    vfwmul.vf v8, v9, fa0, v0.t
504; CHECK-NEXT:    fsrm a1
505; CHECK-NEXT:    ret
506entry:
507  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16(
508    <vscale x 1 x float> %0,
509    <vscale x 1 x half> %1,
510    half %2,
511    <vscale x 1 x i1> %3,
512    iXLen 0, iXLen %4, iXLen 1)
513
514  ret <vscale x 1 x float> %a
515}
516
517declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16(
518  <vscale x 2 x float>,
519  <vscale x 2 x half>,
520  half,
521  iXLen, iXLen);
522
523define <vscale x 2 x float> @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
524; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16:
525; CHECK:       # %bb.0: # %entry
526; CHECK-NEXT:    fsrmi a1, 0
527; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
528; CHECK-NEXT:    vfwmul.vf v9, v8, fa0
529; CHECK-NEXT:    fsrm a1
530; CHECK-NEXT:    vmv1r.v v8, v9
531; CHECK-NEXT:    ret
532entry:
533  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16(
534    <vscale x 2 x float> undef,
535    <vscale x 2 x half> %0,
536    half %1,
537    iXLen 0, iXLen %2)
538
539  ret <vscale x 2 x float> %a
540}
541
542declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16(
543  <vscale x 2 x float>,
544  <vscale x 2 x half>,
545  half,
546  <vscale x 2 x i1>,
547  iXLen, iXLen, iXLen);
548
549define <vscale x 2 x float> @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
550; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16:
551; CHECK:       # %bb.0: # %entry
552; CHECK-NEXT:    fsrmi a1, 0
553; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
554; CHECK-NEXT:    vfwmul.vf v8, v9, fa0, v0.t
555; CHECK-NEXT:    fsrm a1
556; CHECK-NEXT:    ret
557entry:
558  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16(
559    <vscale x 2 x float> %0,
560    <vscale x 2 x half> %1,
561    half %2,
562    <vscale x 2 x i1> %3,
563    iXLen 0, iXLen %4, iXLen 1)
564
565  ret <vscale x 2 x float> %a
566}
567
568declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16(
569  <vscale x 4 x float>,
570  <vscale x 4 x half>,
571  half,
572  iXLen, iXLen);
573
574define <vscale x 4 x float> @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
575; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16:
576; CHECK:       # %bb.0: # %entry
577; CHECK-NEXT:    fsrmi a1, 0
578; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
579; CHECK-NEXT:    vfwmul.vf v10, v8, fa0
580; CHECK-NEXT:    fsrm a1
581; CHECK-NEXT:    vmv2r.v v8, v10
582; CHECK-NEXT:    ret
583entry:
584  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16(
585    <vscale x 4 x float> undef,
586    <vscale x 4 x half> %0,
587    half %1,
588    iXLen 0, iXLen %2)
589
590  ret <vscale x 4 x float> %a
591}
592
593declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16(
594  <vscale x 4 x float>,
595  <vscale x 4 x half>,
596  half,
597  <vscale x 4 x i1>,
598  iXLen, iXLen, iXLen);
599
600define <vscale x 4 x float> @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
601; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16:
602; CHECK:       # %bb.0: # %entry
603; CHECK-NEXT:    fsrmi a1, 0
604; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
605; CHECK-NEXT:    vfwmul.vf v8, v10, fa0, v0.t
606; CHECK-NEXT:    fsrm a1
607; CHECK-NEXT:    ret
608entry:
609  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16(
610    <vscale x 4 x float> %0,
611    <vscale x 4 x half> %1,
612    half %2,
613    <vscale x 4 x i1> %3,
614    iXLen 0, iXLen %4, iXLen 1)
615
616  ret <vscale x 4 x float> %a
617}
618
619declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16(
620  <vscale x 8 x float>,
621  <vscale x 8 x half>,
622  half,
623  iXLen, iXLen);
624
625define <vscale x 8 x float> @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
626; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16:
627; CHECK:       # %bb.0: # %entry
628; CHECK-NEXT:    fsrmi a1, 0
629; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
630; CHECK-NEXT:    vfwmul.vf v12, v8, fa0
631; CHECK-NEXT:    fsrm a1
632; CHECK-NEXT:    vmv4r.v v8, v12
633; CHECK-NEXT:    ret
634entry:
635  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16(
636    <vscale x 8 x float> undef,
637    <vscale x 8 x half> %0,
638    half %1,
639    iXLen 0, iXLen %2)
640
641  ret <vscale x 8 x float> %a
642}
643
644declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16(
645  <vscale x 8 x float>,
646  <vscale x 8 x half>,
647  half,
648  <vscale x 8 x i1>,
649  iXLen, iXLen, iXLen);
650
651define <vscale x 8 x float> @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
652; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16:
653; CHECK:       # %bb.0: # %entry
654; CHECK-NEXT:    fsrmi a1, 0
655; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
656; CHECK-NEXT:    vfwmul.vf v8, v12, fa0, v0.t
657; CHECK-NEXT:    fsrm a1
658; CHECK-NEXT:    ret
659entry:
660  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16(
661    <vscale x 8 x float> %0,
662    <vscale x 8 x half> %1,
663    half %2,
664    <vscale x 8 x i1> %3,
665    iXLen 0, iXLen %4, iXLen 1)
666
667  ret <vscale x 8 x float> %a
668}
669
670declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16(
671  <vscale x 16 x float>,
672  <vscale x 16 x half>,
673  half,
674  iXLen, iXLen);
675
676define <vscale x 16 x float> @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
677; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16:
678; CHECK:       # %bb.0: # %entry
679; CHECK-NEXT:    fsrmi a1, 0
680; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
681; CHECK-NEXT:    vfwmul.vf v16, v8, fa0
682; CHECK-NEXT:    fsrm a1
683; CHECK-NEXT:    vmv8r.v v8, v16
684; CHECK-NEXT:    ret
685entry:
686  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16(
687    <vscale x 16 x float> undef,
688    <vscale x 16 x half> %0,
689    half %1,
690    iXLen 0, iXLen %2)
691
692  ret <vscale x 16 x float> %a
693}
694
695declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16(
696  <vscale x 16 x float>,
697  <vscale x 16 x half>,
698  half,
699  <vscale x 16 x i1>,
700  iXLen, iXLen, iXLen);
701
702define <vscale x 16 x float> @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
703; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16:
704; CHECK:       # %bb.0: # %entry
705; CHECK-NEXT:    fsrmi a1, 0
706; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
707; CHECK-NEXT:    vfwmul.vf v8, v16, fa0, v0.t
708; CHECK-NEXT:    fsrm a1
709; CHECK-NEXT:    ret
710entry:
711  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16(
712    <vscale x 16 x float> %0,
713    <vscale x 16 x half> %1,
714    half %2,
715    <vscale x 16 x i1> %3,
716    iXLen 0, iXLen %4, iXLen 1)
717
718  ret <vscale x 16 x float> %a
719}
720
721declare <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32(
722  <vscale x 1 x double>,
723  <vscale x 1 x float>,
724  float,
725  iXLen, iXLen);
726
727define <vscale x 1 x double> @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
728; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32:
729; CHECK:       # %bb.0: # %entry
730; CHECK-NEXT:    fsrmi a1, 0
731; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
732; CHECK-NEXT:    vfwmul.vf v9, v8, fa0
733; CHECK-NEXT:    fsrm a1
734; CHECK-NEXT:    vmv1r.v v8, v9
735; CHECK-NEXT:    ret
736entry:
737  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32(
738    <vscale x 1 x double> undef,
739    <vscale x 1 x float> %0,
740    float %1,
741    iXLen 0, iXLen %2)
742
743  ret <vscale x 1 x double> %a
744}
745
746declare <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32(
747  <vscale x 1 x double>,
748  <vscale x 1 x float>,
749  float,
750  <vscale x 1 x i1>,
751  iXLen, iXLen, iXLen);
752
753define <vscale x 1 x double> @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
754; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32:
755; CHECK:       # %bb.0: # %entry
756; CHECK-NEXT:    fsrmi a1, 0
757; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
758; CHECK-NEXT:    vfwmul.vf v8, v9, fa0, v0.t
759; CHECK-NEXT:    fsrm a1
760; CHECK-NEXT:    ret
761entry:
762  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32(
763    <vscale x 1 x double> %0,
764    <vscale x 1 x float> %1,
765    float %2,
766    <vscale x 1 x i1> %3,
767    iXLen 0, iXLen %4, iXLen 1)
768
769  ret <vscale x 1 x double> %a
770}
771
772declare <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32(
773  <vscale x 2 x double>,
774  <vscale x 2 x float>,
775  float,
776  iXLen, iXLen);
777
778define <vscale x 2 x double> @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
779; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32:
780; CHECK:       # %bb.0: # %entry
781; CHECK-NEXT:    fsrmi a1, 0
782; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
783; CHECK-NEXT:    vfwmul.vf v10, v8, fa0
784; CHECK-NEXT:    fsrm a1
785; CHECK-NEXT:    vmv2r.v v8, v10
786; CHECK-NEXT:    ret
787entry:
788  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32(
789    <vscale x 2 x double> undef,
790    <vscale x 2 x float> %0,
791    float %1,
792    iXLen 0, iXLen %2)
793
794  ret <vscale x 2 x double> %a
795}
796
797declare <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32(
798  <vscale x 2 x double>,
799  <vscale x 2 x float>,
800  float,
801  <vscale x 2 x i1>,
802  iXLen, iXLen, iXLen);
803
804define <vscale x 2 x double> @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
805; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32:
806; CHECK:       # %bb.0: # %entry
807; CHECK-NEXT:    fsrmi a1, 0
808; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
809; CHECK-NEXT:    vfwmul.vf v8, v10, fa0, v0.t
810; CHECK-NEXT:    fsrm a1
811; CHECK-NEXT:    ret
812entry:
813  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32(
814    <vscale x 2 x double> %0,
815    <vscale x 2 x float> %1,
816    float %2,
817    <vscale x 2 x i1> %3,
818    iXLen 0, iXLen %4, iXLen 1)
819
820  ret <vscale x 2 x double> %a
821}
822
823declare <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(
824  <vscale x 4 x double>,
825  <vscale x 4 x float>,
826  float,
827  iXLen, iXLen);
828
829define <vscale x 4 x double> @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
830; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32:
831; CHECK:       # %bb.0: # %entry
832; CHECK-NEXT:    fsrmi a1, 0
833; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
834; CHECK-NEXT:    vfwmul.vf v12, v8, fa0
835; CHECK-NEXT:    fsrm a1
836; CHECK-NEXT:    vmv4r.v v8, v12
837; CHECK-NEXT:    ret
838entry:
839  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(
840    <vscale x 4 x double> undef,
841    <vscale x 4 x float> %0,
842    float %1,
843    iXLen 0, iXLen %2)
844
845  ret <vscale x 4 x double> %a
846}
847
848declare <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32(
849  <vscale x 4 x double>,
850  <vscale x 4 x float>,
851  float,
852  <vscale x 4 x i1>,
853  iXLen, iXLen, iXLen);
854
855define <vscale x 4 x double> @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
856; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32:
857; CHECK:       # %bb.0: # %entry
858; CHECK-NEXT:    fsrmi a1, 0
859; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
860; CHECK-NEXT:    vfwmul.vf v8, v12, fa0, v0.t
861; CHECK-NEXT:    fsrm a1
862; CHECK-NEXT:    ret
863entry:
864  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32(
865    <vscale x 4 x double> %0,
866    <vscale x 4 x float> %1,
867    float %2,
868    <vscale x 4 x i1> %3,
869    iXLen 0, iXLen %4, iXLen 1)
870
871  ret <vscale x 4 x double> %a
872}
873
874declare <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32(
875  <vscale x 8 x double>,
876  <vscale x 8 x float>,
877  float,
878  iXLen, iXLen);
879
880define <vscale x 8 x double> @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
881; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32:
882; CHECK:       # %bb.0: # %entry
883; CHECK-NEXT:    fsrmi a1, 0
884; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
885; CHECK-NEXT:    vfwmul.vf v16, v8, fa0
886; CHECK-NEXT:    fsrm a1
887; CHECK-NEXT:    vmv8r.v v8, v16
888; CHECK-NEXT:    ret
889entry:
890  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32(
891    <vscale x 8 x double> undef,
892    <vscale x 8 x float> %0,
893    float %1,
894    iXLen 0, iXLen %2)
895
896  ret <vscale x 8 x double> %a
897}
898
899declare <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32(
900  <vscale x 8 x double>,
901  <vscale x 8 x float>,
902  float,
903  <vscale x 8 x i1>,
904  iXLen, iXLen, iXLen);
905
906define <vscale x 8 x double> @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
907; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32:
908; CHECK:       # %bb.0: # %entry
909; CHECK-NEXT:    fsrmi a1, 0
910; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
911; CHECK-NEXT:    vfwmul.vf v8, v16, fa0, v0.t
912; CHECK-NEXT:    fsrm a1
913; CHECK-NEXT:    ret
914entry:
915  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32(
916    <vscale x 8 x double> %0,
917    <vscale x 8 x float> %1,
918    float %2,
919    <vscale x 8 x i1> %3,
920    iXLen 0, iXLen %4, iXLen 1)
921
922  ret <vscale x 8 x double> %a
923}
924