xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
8  <vscale x 1 x float>,
9  <vscale x 1 x float>,
10  <vscale x 1 x half>,
11  iXLen, iXLen);
12
13define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    fsrmi a1, 0
17; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
18; CHECK-NEXT:    vfwadd.wv v8, v8, v9
19; CHECK-NEXT:    fsrm a1
20; CHECK-NEXT:    ret
21entry:
22  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
23    <vscale x 1 x float> undef,
24    <vscale x 1 x float> %0,
25    <vscale x 1 x half> %1,
26    iXLen 0, iXLen %2)
27
28  ret <vscale x 1 x float> %a
29}
30
31declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
32  <vscale x 1 x float>,
33  <vscale x 1 x float>,
34  <vscale x 1 x half>,
35  <vscale x 1 x i1>,
36  iXLen, iXLen, iXLen);
37
38define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16:
40; CHECK:       # %bb.0: # %entry
41; CHECK-NEXT:    fsrmi a1, 0
42; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
43; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
44; CHECK-NEXT:    fsrm a1
45; CHECK-NEXT:    ret
46entry:
47  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
48    <vscale x 1 x float> %0,
49    <vscale x 1 x float> %1,
50    <vscale x 1 x half> %2,
51    <vscale x 1 x i1> %3,
52    iXLen 0, iXLen %4, iXLen 1)
53
54  ret <vscale x 1 x float> %a
55}
56
57declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
58  <vscale x 2 x float>,
59  <vscale x 2 x float>,
60  <vscale x 2 x half>,
61  iXLen, iXLen);
62
63define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
64; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16:
65; CHECK:       # %bb.0: # %entry
66; CHECK-NEXT:    fsrmi a1, 0
67; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
68; CHECK-NEXT:    vfwadd.wv v8, v8, v9
69; CHECK-NEXT:    fsrm a1
70; CHECK-NEXT:    ret
71entry:
72  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
73    <vscale x 2 x float> undef,
74    <vscale x 2 x float> %0,
75    <vscale x 2 x half> %1,
76    iXLen 0, iXLen %2)
77
78  ret <vscale x 2 x float> %a
79}
80
81declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
82  <vscale x 2 x float>,
83  <vscale x 2 x float>,
84  <vscale x 2 x half>,
85  <vscale x 2 x i1>,
86  iXLen, iXLen, iXLen);
87
88define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
89; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16:
90; CHECK:       # %bb.0: # %entry
91; CHECK-NEXT:    fsrmi a1, 0
92; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
93; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
94; CHECK-NEXT:    fsrm a1
95; CHECK-NEXT:    ret
96entry:
97  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
98    <vscale x 2 x float> %0,
99    <vscale x 2 x float> %1,
100    <vscale x 2 x half> %2,
101    <vscale x 2 x i1> %3,
102    iXLen 0, iXLen %4, iXLen 1)
103
104  ret <vscale x 2 x float> %a
105}
106
107declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
108  <vscale x 4 x float>,
109  <vscale x 4 x float>,
110  <vscale x 4 x half>,
111  iXLen, iXLen);
112
113define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
114; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16:
115; CHECK:       # %bb.0: # %entry
116; CHECK-NEXT:    fsrmi a1, 0
117; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
118; CHECK-NEXT:    vfwadd.wv v8, v8, v10
119; CHECK-NEXT:    fsrm a1
120; CHECK-NEXT:    ret
121entry:
122  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
123    <vscale x 4 x float> undef,
124    <vscale x 4 x float> %0,
125    <vscale x 4 x half> %1,
126    iXLen 0, iXLen %2)
127
128  ret <vscale x 4 x float> %a
129}
130
131declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
132  <vscale x 4 x float>,
133  <vscale x 4 x float>,
134  <vscale x 4 x half>,
135  <vscale x 4 x i1>,
136  iXLen, iXLen, iXLen);
137
138define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
139; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16:
140; CHECK:       # %bb.0: # %entry
141; CHECK-NEXT:    fsrmi a1, 0
142; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
143; CHECK-NEXT:    vfwadd.wv v8, v10, v12, v0.t
144; CHECK-NEXT:    fsrm a1
145; CHECK-NEXT:    ret
146entry:
147  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
148    <vscale x 4 x float> %0,
149    <vscale x 4 x float> %1,
150    <vscale x 4 x half> %2,
151    <vscale x 4 x i1> %3,
152    iXLen 0, iXLen %4, iXLen 1)
153
154  ret <vscale x 4 x float> %a
155}
156
157declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
158  <vscale x 8 x float>,
159  <vscale x 8 x float>,
160  <vscale x 8 x half>,
161  iXLen, iXLen);
162
163define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
164; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16:
165; CHECK:       # %bb.0: # %entry
166; CHECK-NEXT:    fsrmi a1, 0
167; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
168; CHECK-NEXT:    vfwadd.wv v8, v8, v12
169; CHECK-NEXT:    fsrm a1
170; CHECK-NEXT:    ret
171entry:
172  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
173    <vscale x 8 x float> undef,
174    <vscale x 8 x float> %0,
175    <vscale x 8 x half> %1,
176    iXLen 0, iXLen %2)
177
178  ret <vscale x 8 x float> %a
179}
180
181declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
182  <vscale x 8 x float>,
183  <vscale x 8 x float>,
184  <vscale x 8 x half>,
185  <vscale x 8 x i1>,
186  iXLen, iXLen, iXLen);
187
188define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
189; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16:
190; CHECK:       # %bb.0: # %entry
191; CHECK-NEXT:    fsrmi a1, 0
192; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
193; CHECK-NEXT:    vfwadd.wv v8, v12, v16, v0.t
194; CHECK-NEXT:    fsrm a1
195; CHECK-NEXT:    ret
196entry:
197  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
198    <vscale x 8 x float> %0,
199    <vscale x 8 x float> %1,
200    <vscale x 8 x half> %2,
201    <vscale x 8 x i1> %3,
202    iXLen 0, iXLen %4, iXLen 1)
203
204  ret <vscale x 8 x float> %a
205}
206
207declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16(
208  <vscale x 16 x float>,
209  <vscale x 16 x float>,
210  <vscale x 16 x half>,
211  iXLen, iXLen);
212
213define <vscale x 16 x float> @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
214; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16:
215; CHECK:       # %bb.0: # %entry
216; CHECK-NEXT:    fsrmi a1, 0
217; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
218; CHECK-NEXT:    vfwadd.wv v8, v8, v16
219; CHECK-NEXT:    fsrm a1
220; CHECK-NEXT:    ret
221entry:
222  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16(
223    <vscale x 16 x float> undef,
224    <vscale x 16 x float> %0,
225    <vscale x 16 x half> %1,
226    iXLen 0, iXLen %2)
227
228  ret <vscale x 16 x float> %a
229}
230
231declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
232  <vscale x 16 x float>,
233  <vscale x 16 x float>,
234  <vscale x 16 x half>,
235  <vscale x 16 x i1>,
236  iXLen, iXLen, iXLen);
237
238define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
239; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16:
240; CHECK:       # %bb.0: # %entry
241; CHECK-NEXT:    vl4re16.v v24, (a0)
242; CHECK-NEXT:    fsrmi a0, 0
243; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
244; CHECK-NEXT:    vfwadd.wv v8, v16, v24, v0.t
245; CHECK-NEXT:    fsrm a0
246; CHECK-NEXT:    ret
247entry:
248  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
249    <vscale x 16 x float> %0,
250    <vscale x 16 x float> %1,
251    <vscale x 16 x half> %2,
252    <vscale x 16 x i1> %3,
253    iXLen 0, iXLen %4, iXLen 1)
254
255  ret <vscale x 16 x float> %a
256}
257
258declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
259  <vscale x 1 x double>,
260  <vscale x 1 x double>,
261  <vscale x 1 x float>,
262  iXLen, iXLen);
263
264define <vscale x 1 x double> @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
265; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32:
266; CHECK:       # %bb.0: # %entry
267; CHECK-NEXT:    fsrmi a1, 0
268; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
269; CHECK-NEXT:    vfwadd.wv v8, v8, v9
270; CHECK-NEXT:    fsrm a1
271; CHECK-NEXT:    ret
272entry:
273  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
274    <vscale x 1 x double> undef,
275    <vscale x 1 x double> %0,
276    <vscale x 1 x float> %1,
277    iXLen 0, iXLen %2)
278
279  ret <vscale x 1 x double> %a
280}
281
282declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
283  <vscale x 1 x double>,
284  <vscale x 1 x double>,
285  <vscale x 1 x float>,
286  <vscale x 1 x i1>,
287  iXLen, iXLen, iXLen);
288
289define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
290; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32:
291; CHECK:       # %bb.0: # %entry
292; CHECK-NEXT:    fsrmi a1, 0
293; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
294; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
295; CHECK-NEXT:    fsrm a1
296; CHECK-NEXT:    ret
297entry:
298  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
299    <vscale x 1 x double> %0,
300    <vscale x 1 x double> %1,
301    <vscale x 1 x float> %2,
302    <vscale x 1 x i1> %3,
303    iXLen 0, iXLen %4, iXLen 1)
304
305  ret <vscale x 1 x double> %a
306}
307
308declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
309  <vscale x 2 x double>,
310  <vscale x 2 x double>,
311  <vscale x 2 x float>,
312  iXLen, iXLen);
313
314define <vscale x 2 x double> @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
315; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32:
316; CHECK:       # %bb.0: # %entry
317; CHECK-NEXT:    fsrmi a1, 0
318; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
319; CHECK-NEXT:    vfwadd.wv v8, v8, v10
320; CHECK-NEXT:    fsrm a1
321; CHECK-NEXT:    ret
322entry:
323  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
324    <vscale x 2 x double> undef,
325    <vscale x 2 x double> %0,
326    <vscale x 2 x float> %1,
327    iXLen 0, iXLen %2)
328
329  ret <vscale x 2 x double> %a
330}
331
332declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
333  <vscale x 2 x double>,
334  <vscale x 2 x double>,
335  <vscale x 2 x float>,
336  <vscale x 2 x i1>,
337  iXLen, iXLen, iXLen);
338
339define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
340; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32:
341; CHECK:       # %bb.0: # %entry
342; CHECK-NEXT:    fsrmi a1, 0
343; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
344; CHECK-NEXT:    vfwadd.wv v8, v10, v12, v0.t
345; CHECK-NEXT:    fsrm a1
346; CHECK-NEXT:    ret
347entry:
348  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
349    <vscale x 2 x double> %0,
350    <vscale x 2 x double> %1,
351    <vscale x 2 x float> %2,
352    <vscale x 2 x i1> %3,
353    iXLen 0, iXLen %4, iXLen 1)
354
355  ret <vscale x 2 x double> %a
356}
357
358declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
359  <vscale x 4 x double>,
360  <vscale x 4 x double>,
361  <vscale x 4 x float>,
362  iXLen, iXLen);
363
364define <vscale x 4 x double> @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
365; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32:
366; CHECK:       # %bb.0: # %entry
367; CHECK-NEXT:    fsrmi a1, 0
368; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
369; CHECK-NEXT:    vfwadd.wv v8, v8, v12
370; CHECK-NEXT:    fsrm a1
371; CHECK-NEXT:    ret
372entry:
373  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
374    <vscale x 4 x double> undef,
375    <vscale x 4 x double> %0,
376    <vscale x 4 x float> %1,
377    iXLen 0, iXLen %2)
378
379  ret <vscale x 4 x double> %a
380}
381
382declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
383  <vscale x 4 x double>,
384  <vscale x 4 x double>,
385  <vscale x 4 x float>,
386  <vscale x 4 x i1>,
387  iXLen, iXLen, iXLen);
388
389define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
390; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32:
391; CHECK:       # %bb.0: # %entry
392; CHECK-NEXT:    fsrmi a1, 0
393; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
394; CHECK-NEXT:    vfwadd.wv v8, v12, v16, v0.t
395; CHECK-NEXT:    fsrm a1
396; CHECK-NEXT:    ret
397entry:
398  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
399    <vscale x 4 x double> %0,
400    <vscale x 4 x double> %1,
401    <vscale x 4 x float> %2,
402    <vscale x 4 x i1> %3,
403    iXLen 0, iXLen %4, iXLen 1)
404
405  ret <vscale x 4 x double> %a
406}
407
408declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
409  <vscale x 8 x double>,
410  <vscale x 8 x double>,
411  <vscale x 8 x float>,
412  iXLen, iXLen);
413
414define <vscale x 8 x double> @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
415; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32:
416; CHECK:       # %bb.0: # %entry
417; CHECK-NEXT:    fsrmi a1, 0
418; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
419; CHECK-NEXT:    vfwadd.wv v8, v8, v16
420; CHECK-NEXT:    fsrm a1
421; CHECK-NEXT:    ret
422entry:
423  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
424    <vscale x 8 x double> undef,
425    <vscale x 8 x double> %0,
426    <vscale x 8 x float> %1,
427    iXLen 0, iXLen %2)
428
429  ret <vscale x 8 x double> %a
430}
431
432declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
433  <vscale x 8 x double>,
434  <vscale x 8 x double>,
435  <vscale x 8 x float>,
436  <vscale x 8 x i1>,
437  iXLen, iXLen, iXLen);
438
439define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
440; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32:
441; CHECK:       # %bb.0: # %entry
442; CHECK-NEXT:    vl4re32.v v24, (a0)
443; CHECK-NEXT:    fsrmi a0, 0
444; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
445; CHECK-NEXT:    vfwadd.wv v8, v16, v24, v0.t
446; CHECK-NEXT:    fsrm a0
447; CHECK-NEXT:    ret
448entry:
449  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
450    <vscale x 8 x double> %0,
451    <vscale x 8 x double> %1,
452    <vscale x 8 x float> %2,
453    <vscale x 8 x i1> %3,
454    iXLen 0, iXLen %4, iXLen 1)
455
456  ret <vscale x 8 x double> %a
457}
458
459declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
460  <vscale x 1 x float>,
461  <vscale x 1 x float>,
462  half,
463  iXLen, iXLen);
464
465define <vscale x 1 x float> @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, iXLen %2) nounwind {
466; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16:
467; CHECK:       # %bb.0: # %entry
468; CHECK-NEXT:    fsrmi a1, 0
469; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
470; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
471; CHECK-NEXT:    fsrm a1
472; CHECK-NEXT:    ret
473entry:
474  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
475    <vscale x 1 x float> undef,
476    <vscale x 1 x float> %0,
477    half %1,
478    iXLen 0, iXLen %2)
479
480  ret <vscale x 1 x float> %a
481}
482
483declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
484  <vscale x 1 x float>,
485  <vscale x 1 x float>,
486  half,
487  <vscale x 1 x i1>,
488  iXLen, iXLen, iXLen);
489
490define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
491; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16:
492; CHECK:       # %bb.0: # %entry
493; CHECK-NEXT:    fsrmi a1, 0
494; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
495; CHECK-NEXT:    vfwadd.wf v8, v9, fa0, v0.t
496; CHECK-NEXT:    fsrm a1
497; CHECK-NEXT:    ret
498entry:
499  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
500    <vscale x 1 x float> %0,
501    <vscale x 1 x float> %1,
502    half %2,
503    <vscale x 1 x i1> %3,
504    iXLen 0, iXLen %4, iXLen 1)
505
506  ret <vscale x 1 x float> %a
507}
508
509declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
510  <vscale x 2 x float>,
511  <vscale x 2 x float>,
512  half,
513  iXLen, iXLen);
514
515define <vscale x 2 x float> @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, iXLen %2) nounwind {
516; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16:
517; CHECK:       # %bb.0: # %entry
518; CHECK-NEXT:    fsrmi a1, 0
519; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
520; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
521; CHECK-NEXT:    fsrm a1
522; CHECK-NEXT:    ret
523entry:
524  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
525    <vscale x 2 x float> undef,
526    <vscale x 2 x float> %0,
527    half %1,
528    iXLen 0, iXLen %2)
529
530  ret <vscale x 2 x float> %a
531}
532
533declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
534  <vscale x 2 x float>,
535  <vscale x 2 x float>,
536  half,
537  <vscale x 2 x i1>,
538  iXLen, iXLen, iXLen);
539
540define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
541; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16:
542; CHECK:       # %bb.0: # %entry
543; CHECK-NEXT:    fsrmi a1, 0
544; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
545; CHECK-NEXT:    vfwadd.wf v8, v9, fa0, v0.t
546; CHECK-NEXT:    fsrm a1
547; CHECK-NEXT:    ret
548entry:
549  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
550    <vscale x 2 x float> %0,
551    <vscale x 2 x float> %1,
552    half %2,
553    <vscale x 2 x i1> %3,
554    iXLen 0, iXLen %4, iXLen 1)
555
556  ret <vscale x 2 x float> %a
557}
558
559declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
560  <vscale x 4 x float>,
561  <vscale x 4 x float>,
562  half,
563  iXLen, iXLen);
564
565define <vscale x 4 x float> @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, iXLen %2) nounwind {
566; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16:
567; CHECK:       # %bb.0: # %entry
568; CHECK-NEXT:    fsrmi a1, 0
569; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
570; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
571; CHECK-NEXT:    fsrm a1
572; CHECK-NEXT:    ret
573entry:
574  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
575    <vscale x 4 x float> undef,
576    <vscale x 4 x float> %0,
577    half %1,
578    iXLen 0, iXLen %2)
579
580  ret <vscale x 4 x float> %a
581}
582
583declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
584  <vscale x 4 x float>,
585  <vscale x 4 x float>,
586  half,
587  <vscale x 4 x i1>,
588  iXLen, iXLen, iXLen);
589
590define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
591; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16:
592; CHECK:       # %bb.0: # %entry
593; CHECK-NEXT:    fsrmi a1, 0
594; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
595; CHECK-NEXT:    vfwadd.wf v8, v10, fa0, v0.t
596; CHECK-NEXT:    fsrm a1
597; CHECK-NEXT:    ret
598entry:
599  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
600    <vscale x 4 x float> %0,
601    <vscale x 4 x float> %1,
602    half %2,
603    <vscale x 4 x i1> %3,
604    iXLen 0, iXLen %4, iXLen 1)
605
606  ret <vscale x 4 x float> %a
607}
608
609declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
610  <vscale x 8 x float>,
611  <vscale x 8 x float>,
612  half,
613  iXLen, iXLen);
614
615define <vscale x 8 x float> @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, iXLen %2) nounwind {
616; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16:
617; CHECK:       # %bb.0: # %entry
618; CHECK-NEXT:    fsrmi a1, 0
619; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
620; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
621; CHECK-NEXT:    fsrm a1
622; CHECK-NEXT:    ret
623entry:
624  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
625    <vscale x 8 x float> undef,
626    <vscale x 8 x float> %0,
627    half %1,
628    iXLen 0, iXLen %2)
629
630  ret <vscale x 8 x float> %a
631}
632
633declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
634  <vscale x 8 x float>,
635  <vscale x 8 x float>,
636  half,
637  <vscale x 8 x i1>,
638  iXLen, iXLen, iXLen);
639
640define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
641; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16:
642; CHECK:       # %bb.0: # %entry
643; CHECK-NEXT:    fsrmi a1, 0
644; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
645; CHECK-NEXT:    vfwadd.wf v8, v12, fa0, v0.t
646; CHECK-NEXT:    fsrm a1
647; CHECK-NEXT:    ret
648entry:
649  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
650    <vscale x 8 x float> %0,
651    <vscale x 8 x float> %1,
652    half %2,
653    <vscale x 8 x i1> %3,
654    iXLen 0, iXLen %4, iXLen 1)
655
656  ret <vscale x 8 x float> %a
657}
658
659declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
660  <vscale x 16 x float>,
661  <vscale x 16 x float>,
662  half,
663  iXLen, iXLen);
664
665define <vscale x 16 x float> @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, iXLen %2) nounwind {
666; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16:
667; CHECK:       # %bb.0: # %entry
668; CHECK-NEXT:    fsrmi a1, 0
669; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
670; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
671; CHECK-NEXT:    fsrm a1
672; CHECK-NEXT:    ret
673entry:
674  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
675    <vscale x 16 x float> undef,
676    <vscale x 16 x float> %0,
677    half %1,
678    iXLen 0, iXLen %2)
679
680  ret <vscale x 16 x float> %a
681}
682
683declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
684  <vscale x 16 x float>,
685  <vscale x 16 x float>,
686  half,
687  <vscale x 16 x i1>,
688  iXLen, iXLen, iXLen);
689
690define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
691; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16:
692; CHECK:       # %bb.0: # %entry
693; CHECK-NEXT:    fsrmi a1, 0
694; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
695; CHECK-NEXT:    vfwadd.wf v8, v16, fa0, v0.t
696; CHECK-NEXT:    fsrm a1
697; CHECK-NEXT:    ret
698entry:
699  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
700    <vscale x 16 x float> %0,
701    <vscale x 16 x float> %1,
702    half %2,
703    <vscale x 16 x i1> %3,
704    iXLen 0, iXLen %4, iXLen 1)
705
706  ret <vscale x 16 x float> %a
707}
708
709declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
710  <vscale x 1 x double>,
711  <vscale x 1 x double>,
712  float,
713  iXLen, iXLen);
714
715define <vscale x 1 x double> @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, iXLen %2) nounwind {
716; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32:
717; CHECK:       # %bb.0: # %entry
718; CHECK-NEXT:    fsrmi a1, 0
719; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
720; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
721; CHECK-NEXT:    fsrm a1
722; CHECK-NEXT:    ret
723entry:
724  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
725    <vscale x 1 x double> undef,
726    <vscale x 1 x double> %0,
727    float %1,
728    iXLen 0, iXLen %2)
729
730  ret <vscale x 1 x double> %a
731}
732
733declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
734  <vscale x 1 x double>,
735  <vscale x 1 x double>,
736  float,
737  <vscale x 1 x i1>,
738  iXLen, iXLen, iXLen);
739
740define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
741; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32:
742; CHECK:       # %bb.0: # %entry
743; CHECK-NEXT:    fsrmi a1, 0
744; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
745; CHECK-NEXT:    vfwadd.wf v8, v9, fa0, v0.t
746; CHECK-NEXT:    fsrm a1
747; CHECK-NEXT:    ret
748entry:
749  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
750    <vscale x 1 x double> %0,
751    <vscale x 1 x double> %1,
752    float %2,
753    <vscale x 1 x i1> %3,
754    iXLen 0, iXLen %4, iXLen 1)
755
756  ret <vscale x 1 x double> %a
757}
758
759declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
760  <vscale x 2 x double>,
761  <vscale x 2 x double>,
762  float,
763  iXLen, iXLen);
764
765define <vscale x 2 x double> @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, iXLen %2) nounwind {
766; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32:
767; CHECK:       # %bb.0: # %entry
768; CHECK-NEXT:    fsrmi a1, 0
769; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
770; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
771; CHECK-NEXT:    fsrm a1
772; CHECK-NEXT:    ret
773entry:
774  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
775    <vscale x 2 x double> undef,
776    <vscale x 2 x double> %0,
777    float %1,
778    iXLen 0, iXLen %2)
779
780  ret <vscale x 2 x double> %a
781}
782
783declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
784  <vscale x 2 x double>,
785  <vscale x 2 x double>,
786  float,
787  <vscale x 2 x i1>,
788  iXLen, iXLen, iXLen);
789
790define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
791; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32:
792; CHECK:       # %bb.0: # %entry
793; CHECK-NEXT:    fsrmi a1, 0
794; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
795; CHECK-NEXT:    vfwadd.wf v8, v10, fa0, v0.t
796; CHECK-NEXT:    fsrm a1
797; CHECK-NEXT:    ret
798entry:
799  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
800    <vscale x 2 x double> %0,
801    <vscale x 2 x double> %1,
802    float %2,
803    <vscale x 2 x i1> %3,
804    iXLen 0, iXLen %4, iXLen 1)
805
806  ret <vscale x 2 x double> %a
807}
808
809declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
810  <vscale x 4 x double>,
811  <vscale x 4 x double>,
812  float,
813  iXLen, iXLen);
814
815define <vscale x 4 x double> @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, iXLen %2) nounwind {
816; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32:
817; CHECK:       # %bb.0: # %entry
818; CHECK-NEXT:    fsrmi a1, 0
819; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
820; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
821; CHECK-NEXT:    fsrm a1
822; CHECK-NEXT:    ret
823entry:
824  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
825    <vscale x 4 x double> undef,
826    <vscale x 4 x double> %0,
827    float %1,
828    iXLen 0, iXLen %2)
829
830  ret <vscale x 4 x double> %a
831}
832
833declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
834  <vscale x 4 x double>,
835  <vscale x 4 x double>,
836  float,
837  <vscale x 4 x i1>,
838  iXLen, iXLen, iXLen);
839
840define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
841; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32:
842; CHECK:       # %bb.0: # %entry
843; CHECK-NEXT:    fsrmi a1, 0
844; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
845; CHECK-NEXT:    vfwadd.wf v8, v12, fa0, v0.t
846; CHECK-NEXT:    fsrm a1
847; CHECK-NEXT:    ret
848entry:
849  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
850    <vscale x 4 x double> %0,
851    <vscale x 4 x double> %1,
852    float %2,
853    <vscale x 4 x i1> %3,
854    iXLen 0, iXLen %4, iXLen 1)
855
856  ret <vscale x 4 x double> %a
857}
858
859declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
860  <vscale x 8 x double>,
861  <vscale x 8 x double>,
862  float,
863  iXLen, iXLen);
864
865define <vscale x 8 x double> @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, iXLen %2) nounwind {
866; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32:
867; CHECK:       # %bb.0: # %entry
868; CHECK-NEXT:    fsrmi a1, 0
869; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
870; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
871; CHECK-NEXT:    fsrm a1
872; CHECK-NEXT:    ret
873entry:
874  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
875    <vscale x 8 x double> undef,
876    <vscale x 8 x double> %0,
877    float %1,
878    iXLen 0, iXLen %2)
879
880  ret <vscale x 8 x double> %a
881}
882
883declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
884  <vscale x 8 x double>,
885  <vscale x 8 x double>,
886  float,
887  <vscale x 8 x i1>,
888  iXLen, iXLen, iXLen);
889
890define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
891; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32:
892; CHECK:       # %bb.0: # %entry
893; CHECK-NEXT:    fsrmi a1, 0
894; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
895; CHECK-NEXT:    vfwadd.wf v8, v16, fa0, v0.t
896; CHECK-NEXT:    fsrm a1
897; CHECK-NEXT:    ret
898entry:
899  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
900    <vscale x 8 x double> %0,
901    <vscale x 8 x double> %1,
902    float %2,
903    <vscale x 8 x i1> %3,
904    iXLen 0, iXLen %4, iXLen 1)
905
906  ret <vscale x 8 x double> %a
907}
908
909define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
910; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16:
911; CHECK:       # %bb.0: # %entry
912; CHECK-NEXT:    fsrmi a1, 0
913; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
914; CHECK-NEXT:    vfwadd.wv v8, v8, v9, v0.t
915; CHECK-NEXT:    fsrm a1
916; CHECK-NEXT:    ret
917entry:
918  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
919    <vscale x 1 x float> %0,
920    <vscale x 1 x float> %0,
921    <vscale x 1 x half> %1,
922    <vscale x 1 x i1> %2,
923    iXLen 0, iXLen %3, iXLen 1)
924
925  ret <vscale x 1 x float> %a
926}
927
928define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
929; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16:
930; CHECK:       # %bb.0: # %entry
931; CHECK-NEXT:    fsrmi a1, 0
932; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
933; CHECK-NEXT:    vfwadd.wv v8, v8, v9, v0.t
934; CHECK-NEXT:    fsrm a1
935; CHECK-NEXT:    ret
936entry:
937  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
938    <vscale x 2 x float> %0,
939    <vscale x 2 x float> %0,
940    <vscale x 2 x half> %1,
941    <vscale x 2 x i1> %2,
942    iXLen 0, iXLen %3, iXLen 1)
943
944  ret <vscale x 2 x float> %a
945}
946
947define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
948; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16:
949; CHECK:       # %bb.0: # %entry
950; CHECK-NEXT:    fsrmi a1, 0
951; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
952; CHECK-NEXT:    vfwadd.wv v8, v8, v10, v0.t
953; CHECK-NEXT:    fsrm a1
954; CHECK-NEXT:    ret
955entry:
956  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
957    <vscale x 4 x float> %0,
958    <vscale x 4 x float> %0,
959    <vscale x 4 x half> %1,
960    <vscale x 4 x i1> %2,
961    iXLen 0, iXLen %3, iXLen 1)
962
963  ret <vscale x 4 x float> %a
964}
965
966define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
967; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16:
968; CHECK:       # %bb.0: # %entry
969; CHECK-NEXT:    fsrmi a1, 0
970; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
971; CHECK-NEXT:    vfwadd.wv v8, v8, v12, v0.t
972; CHECK-NEXT:    fsrm a1
973; CHECK-NEXT:    ret
974entry:
975  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
976    <vscale x 8 x float> %0,
977    <vscale x 8 x float> %0,
978    <vscale x 8 x half> %1,
979    <vscale x 8 x i1> %2,
980    iXLen 0, iXLen %3, iXLen 1)
981
982  ret <vscale x 8 x float> %a
983}
984
985define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
986; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16:
987; CHECK:       # %bb.0: # %entry
988; CHECK-NEXT:    fsrmi a1, 0
989; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
990; CHECK-NEXT:    vfwadd.wv v8, v8, v16, v0.t
991; CHECK-NEXT:    fsrm a1
992; CHECK-NEXT:    ret
993entry:
994  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
995    <vscale x 16 x float> %0,
996    <vscale x 16 x float> %0,
997    <vscale x 16 x half> %1,
998    <vscale x 16 x i1> %2,
999    iXLen 0, iXLen %3, iXLen 1)
1000
1001  ret <vscale x 16 x float> %a
1002}
1003
1004define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1005; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32:
1006; CHECK:       # %bb.0: # %entry
1007; CHECK-NEXT:    fsrmi a1, 0
1008; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
1009; CHECK-NEXT:    vfwadd.wv v8, v8, v9, v0.t
1010; CHECK-NEXT:    fsrm a1
1011; CHECK-NEXT:    ret
1012entry:
1013  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
1014    <vscale x 1 x double> %0,
1015    <vscale x 1 x double> %0,
1016    <vscale x 1 x float> %1,
1017    <vscale x 1 x i1> %2,
1018    iXLen 0, iXLen %3, iXLen 1)
1019
1020  ret <vscale x 1 x double> %a
1021}
1022
1023define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1024; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32:
1025; CHECK:       # %bb.0: # %entry
1026; CHECK-NEXT:    fsrmi a1, 0
1027; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
1028; CHECK-NEXT:    vfwadd.wv v8, v8, v10, v0.t
1029; CHECK-NEXT:    fsrm a1
1030; CHECK-NEXT:    ret
1031entry:
1032  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
1033    <vscale x 2 x double> %0,
1034    <vscale x 2 x double> %0,
1035    <vscale x 2 x float> %1,
1036    <vscale x 2 x i1> %2,
1037    iXLen 0, iXLen %3, iXLen 1)
1038
1039  ret <vscale x 2 x double> %a
1040}
1041
1042define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1043; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32:
1044; CHECK:       # %bb.0: # %entry
1045; CHECK-NEXT:    fsrmi a1, 0
1046; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
1047; CHECK-NEXT:    vfwadd.wv v8, v8, v12, v0.t
1048; CHECK-NEXT:    fsrm a1
1049; CHECK-NEXT:    ret
1050entry:
1051  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
1052    <vscale x 4 x double> %0,
1053    <vscale x 4 x double> %0,
1054    <vscale x 4 x float> %1,
1055    <vscale x 4 x i1> %2,
1056    iXLen 0, iXLen %3, iXLen 1)
1057
1058  ret <vscale x 4 x double> %a
1059}
1060
1061define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1062; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32:
1063; CHECK:       # %bb.0: # %entry
1064; CHECK-NEXT:    fsrmi a1, 0
1065; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1066; CHECK-NEXT:    vfwadd.wv v8, v8, v16, v0.t
1067; CHECK-NEXT:    fsrm a1
1068; CHECK-NEXT:    ret
1069entry:
1070  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
1071    <vscale x 8 x double> %0,
1072    <vscale x 8 x double> %0,
1073    <vscale x 8 x float> %1,
1074    <vscale x 8 x i1> %2,
1075    iXLen 0, iXLen %3, iXLen 1)
1076
1077  ret <vscale x 8 x double> %a
1078}
1079
1080define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1081; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16:
1082; CHECK:       # %bb.0: # %entry
1083; CHECK-NEXT:    fsrmi a1, 0
1084; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
1085; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
1086; CHECK-NEXT:    fsrm a1
1087; CHECK-NEXT:    ret
1088entry:
1089  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
1090    <vscale x 1 x float> %0,
1091    <vscale x 1 x float> %0,
1092    half %1,
1093    <vscale x 1 x i1> %2,
1094    iXLen 0, iXLen %3, iXLen 1)
1095
1096  ret <vscale x 1 x float> %a
1097}
1098
1099define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1100; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16:
1101; CHECK:       # %bb.0: # %entry
1102; CHECK-NEXT:    fsrmi a1, 0
1103; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
1104; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
1105; CHECK-NEXT:    fsrm a1
1106; CHECK-NEXT:    ret
1107entry:
1108  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
1109    <vscale x 2 x float> %0,
1110    <vscale x 2 x float> %0,
1111    half %1,
1112    <vscale x 2 x i1> %2,
1113    iXLen 0, iXLen %3, iXLen 1)
1114
1115  ret <vscale x 2 x float> %a
1116}
1117
1118define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1119; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16:
1120; CHECK:       # %bb.0: # %entry
1121; CHECK-NEXT:    fsrmi a1, 0
1122; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
1123; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
1124; CHECK-NEXT:    fsrm a1
1125; CHECK-NEXT:    ret
1126entry:
1127  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
1128    <vscale x 4 x float> %0,
1129    <vscale x 4 x float> %0,
1130    half %1,
1131    <vscale x 4 x i1> %2,
1132    iXLen 0, iXLen %3, iXLen 1)
1133
1134  ret <vscale x 4 x float> %a
1135}
1136
1137define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1138; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16:
1139; CHECK:       # %bb.0: # %entry
1140; CHECK-NEXT:    fsrmi a1, 0
1141; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
1142; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
1143; CHECK-NEXT:    fsrm a1
1144; CHECK-NEXT:    ret
1145entry:
1146  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
1147    <vscale x 8 x float> %0,
1148    <vscale x 8 x float> %0,
1149    half %1,
1150    <vscale x 8 x i1> %2,
1151    iXLen 0, iXLen %3, iXLen 1)
1152
1153  ret <vscale x 8 x float> %a
1154}
1155
1156define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1157; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16:
1158; CHECK:       # %bb.0: # %entry
1159; CHECK-NEXT:    fsrmi a1, 0
1160; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
1161; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
1162; CHECK-NEXT:    fsrm a1
1163; CHECK-NEXT:    ret
1164entry:
1165  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
1166    <vscale x 16 x float> %0,
1167    <vscale x 16 x float> %0,
1168    half %1,
1169    <vscale x 16 x i1> %2,
1170    iXLen 0, iXLen %3, iXLen 1)
1171
1172  ret <vscale x 16 x float> %a
1173}
1174
1175define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1176; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32:
1177; CHECK:       # %bb.0: # %entry
1178; CHECK-NEXT:    fsrmi a1, 0
1179; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
1180; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
1181; CHECK-NEXT:    fsrm a1
1182; CHECK-NEXT:    ret
1183entry:
1184  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
1185    <vscale x 1 x double> %0,
1186    <vscale x 1 x double> %0,
1187    float %1,
1188    <vscale x 1 x i1> %2,
1189    iXLen 0, iXLen %3, iXLen 1)
1190
1191  ret <vscale x 1 x double> %a
1192}
1193
1194define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1195; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32:
1196; CHECK:       # %bb.0: # %entry
1197; CHECK-NEXT:    fsrmi a1, 0
1198; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
1199; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
1200; CHECK-NEXT:    fsrm a1
1201; CHECK-NEXT:    ret
1202entry:
1203  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
1204    <vscale x 2 x double> %0,
1205    <vscale x 2 x double> %0,
1206    float %1,
1207    <vscale x 2 x i1> %2,
1208    iXLen 0, iXLen %3, iXLen 1)
1209
1210  ret <vscale x 2 x double> %a
1211}
1212
1213define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1214; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32:
1215; CHECK:       # %bb.0: # %entry
1216; CHECK-NEXT:    fsrmi a1, 0
1217; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
1218; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
1219; CHECK-NEXT:    fsrm a1
1220; CHECK-NEXT:    ret
1221entry:
1222  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
1223    <vscale x 4 x double> %0,
1224    <vscale x 4 x double> %0,
1225    float %1,
1226    <vscale x 4 x i1> %2,
1227    iXLen 0, iXLen %3, iXLen 1)
1228
1229  ret <vscale x 4 x double> %a
1230}
1231
1232define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1233; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32:
1234; CHECK:       # %bb.0: # %entry
1235; CHECK-NEXT:    fsrmi a1, 0
1236; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1237; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
1238; CHECK-NEXT:    fsrm a1
1239; CHECK-NEXT:    ret
1240entry:
1241  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
1242    <vscale x 8 x double> %0,
1243    <vscale x 8 x double> %0,
1244    float %1,
1245    <vscale x 8 x i1> %2,
1246    iXLen 0, iXLen %3, iXLen 1)
1247
1248  ret <vscale x 8 x double> %a
1249}
1250
1251define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
1252; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16:
1253; CHECK:       # %bb.0: # %entry
1254; CHECK-NEXT:    fsrmi a1, 0
1255; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1256; CHECK-NEXT:    vfwadd.wv v10, v9, v8
1257; CHECK-NEXT:    fsrm a1
1258; CHECK-NEXT:    vmv1r.v v8, v10
1259; CHECK-NEXT:    ret
1260entry:
1261  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
1262    <vscale x 1 x float> undef,
1263    <vscale x 1 x float> %1,
1264    <vscale x 1 x half> %0,
1265    iXLen 0, iXLen %2)
1266
1267  ret <vscale x 1 x float> %a
1268}
1269
1270define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
1271; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16:
1272; CHECK:       # %bb.0: # %entry
1273; CHECK-NEXT:    fsrmi a1, 0
1274; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
1275; CHECK-NEXT:    vfwadd.wv v10, v9, v8
1276; CHECK-NEXT:    fsrm a1
1277; CHECK-NEXT:    vmv1r.v v8, v10
1278; CHECK-NEXT:    ret
1279entry:
1280  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
1281    <vscale x 2 x float> undef,
1282    <vscale x 2 x float> %1,
1283    <vscale x 2 x half> %0,
1284    iXLen 0, iXLen %2)
1285
1286  ret <vscale x 2 x float> %a
1287}
1288
1289define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
1290; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16:
1291; CHECK:       # %bb.0: # %entry
1292; CHECK-NEXT:    fsrmi a1, 0
1293; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
1294; CHECK-NEXT:    vfwadd.wv v12, v10, v8
1295; CHECK-NEXT:    fsrm a1
1296; CHECK-NEXT:    vmv2r.v v8, v12
1297; CHECK-NEXT:    ret
1298entry:
1299  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
1300    <vscale x 4 x float> undef,
1301    <vscale x 4 x float> %1,
1302    <vscale x 4 x half> %0,
1303    iXLen 0, iXLen %2)
1304
1305  ret <vscale x 4 x float> %a
1306}
1307
1308define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
1309; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16:
1310; CHECK:       # %bb.0: # %entry
1311; CHECK-NEXT:    fsrmi a1, 0
1312; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
1313; CHECK-NEXT:    vfwadd.wv v16, v12, v8
1314; CHECK-NEXT:    fsrm a1
1315; CHECK-NEXT:    vmv4r.v v8, v16
1316; CHECK-NEXT:    ret
1317entry:
1318  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
1319    <vscale x 8 x float> undef,
1320    <vscale x 8 x float> %1,
1321    <vscale x 8 x half> %0,
1322    iXLen 0, iXLen %2)
1323
1324  ret <vscale x 8 x float> %a
1325}
1326
1327define <vscale x 1 x double> @intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
1328; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32:
1329; CHECK:       # %bb.0: # %entry
1330; CHECK-NEXT:    fsrmi a1, 0
1331; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
1332; CHECK-NEXT:    vfwadd.wv v10, v9, v8
1333; CHECK-NEXT:    fsrm a1
1334; CHECK-NEXT:    vmv1r.v v8, v10
1335; CHECK-NEXT:    ret
1336entry:
1337  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
1338    <vscale x 1 x double> undef,
1339    <vscale x 1 x double> %1,
1340    <vscale x 1 x float> %0,
1341    iXLen 0, iXLen %2)
1342
1343  ret <vscale x 1 x double> %a
1344}
1345
1346define <vscale x 2 x double> @intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
1347; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32:
1348; CHECK:       # %bb.0: # %entry
1349; CHECK-NEXT:    fsrmi a1, 0
1350; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
1351; CHECK-NEXT:    vfwadd.wv v12, v10, v8
1352; CHECK-NEXT:    fsrm a1
1353; CHECK-NEXT:    vmv2r.v v8, v12
1354; CHECK-NEXT:    ret
1355entry:
1356  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
1357    <vscale x 2 x double> undef,
1358    <vscale x 2 x double> %1,
1359    <vscale x 2 x float> %0,
1360    iXLen 0, iXLen %2)
1361
1362  ret <vscale x 2 x double> %a
1363}
1364
1365define <vscale x 4 x double> @intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
1366; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32:
1367; CHECK:       # %bb.0: # %entry
1368; CHECK-NEXT:    fsrmi a1, 0
1369; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
1370; CHECK-NEXT:    vfwadd.wv v16, v12, v8
1371; CHECK-NEXT:    fsrm a1
1372; CHECK-NEXT:    vmv4r.v v8, v16
1373; CHECK-NEXT:    ret
1374entry:
1375  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
1376    <vscale x 4 x double> undef,
1377    <vscale x 4 x double> %1,
1378    <vscale x 4 x float> %0,
1379    iXLen 0, iXLen %2)
1380
1381  ret <vscale x 4 x double> %a
1382}
1383
1384define <vscale x 8 x double> @intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
1385; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32:
1386; CHECK:       # %bb.0: # %entry
1387; CHECK-NEXT:    fsrmi a1, 0
1388; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
1389; CHECK-NEXT:    vfwadd.wv v24, v16, v8
1390; CHECK-NEXT:    fsrm a1
1391; CHECK-NEXT:    vmv8r.v v8, v24
1392; CHECK-NEXT:    ret
1393entry:
1394  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
1395    <vscale x 8 x double> undef,
1396    <vscale x 8 x double> %1,
1397    <vscale x 8 x float> %0,
1398    iXLen 0, iXLen %2)
1399
1400  ret <vscale x 8 x double> %a
1401}
1402