xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
8  <vscale x 1 x float>,
9  <vscale x 1 x half>,
10  <vscale x 1 x half>,
11  iXLen, iXLen, iXLen);
12
13define <vscale x 1 x float>  @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
14; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    fsrmi a1, 0
17; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
18; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
19; CHECK-NEXT:    fsrm a1
20; CHECK-NEXT:    ret
21entry:
22  %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
23    <vscale x 1 x float> %0,
24    <vscale x 1 x half> %1,
25    <vscale x 1 x half> %2,
26    iXLen 0, iXLen %3, iXLen 0)
27
28  ret <vscale x 1 x float> %a
29}
30
31declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16(
32  <vscale x 1 x float>,
33  <vscale x 1 x half>,
34  <vscale x 1 x half>,
35  <vscale x 1 x i1>,
36  iXLen, iXLen, iXLen);
37
38define <vscale x 1 x float>  @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16:
40; CHECK:       # %bb.0: # %entry
41; CHECK-NEXT:    fsrmi a1, 0
42; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
43; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
44; CHECK-NEXT:    fsrm a1
45; CHECK-NEXT:    ret
46entry:
47  %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16(
48    <vscale x 1 x float> %0,
49    <vscale x 1 x half> %1,
50    <vscale x 1 x half> %2,
51    <vscale x 1 x i1> %3,
52    iXLen 0, iXLen %4, iXLen 0)
53
54  ret <vscale x 1 x float> %a
55}
56
57declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16(
58  <vscale x 2 x float>,
59  <vscale x 2 x half>,
60  <vscale x 2 x half>,
61  iXLen, iXLen, iXLen);
62
63define <vscale x 2 x float>  @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
64; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16:
65; CHECK:       # %bb.0: # %entry
66; CHECK-NEXT:    fsrmi a1, 0
67; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
68; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
69; CHECK-NEXT:    fsrm a1
70; CHECK-NEXT:    ret
71entry:
72  %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16(
73    <vscale x 2 x float> %0,
74    <vscale x 2 x half> %1,
75    <vscale x 2 x half> %2,
76    iXLen 0, iXLen %3, iXLen 0)
77
78  ret <vscale x 2 x float> %a
79}
80
81declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16(
82  <vscale x 2 x float>,
83  <vscale x 2 x half>,
84  <vscale x 2 x half>,
85  <vscale x 2 x i1>,
86  iXLen, iXLen, iXLen);
87
88define <vscale x 2 x float>  @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
89; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16:
90; CHECK:       # %bb.0: # %entry
91; CHECK-NEXT:    fsrmi a1, 0
92; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
93; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
94; CHECK-NEXT:    fsrm a1
95; CHECK-NEXT:    ret
96entry:
97  %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16(
98    <vscale x 2 x float> %0,
99    <vscale x 2 x half> %1,
100    <vscale x 2 x half> %2,
101    <vscale x 2 x i1> %3,
102    iXLen 0, iXLen %4, iXLen 0)
103
104  ret <vscale x 2 x float> %a
105}
106
107declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16(
108  <vscale x 4 x float>,
109  <vscale x 4 x half>,
110  <vscale x 4 x half>,
111  iXLen, iXLen, iXLen);
112
113define <vscale x 4 x float>  @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
114; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16:
115; CHECK:       # %bb.0: # %entry
116; CHECK-NEXT:    fsrmi a1, 0
117; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
118; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11
119; CHECK-NEXT:    fsrm a1
120; CHECK-NEXT:    ret
121entry:
122  %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16(
123    <vscale x 4 x float> %0,
124    <vscale x 4 x half> %1,
125    <vscale x 4 x half> %2,
126    iXLen 0, iXLen %3, iXLen 0)
127
128  ret <vscale x 4 x float> %a
129}
130
131declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16(
132  <vscale x 4 x float>,
133  <vscale x 4 x half>,
134  <vscale x 4 x half>,
135  <vscale x 4 x i1>,
136  iXLen, iXLen, iXLen);
137
138define <vscale x 4 x float>  @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
139; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16:
140; CHECK:       # %bb.0: # %entry
141; CHECK-NEXT:    fsrmi a1, 0
142; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
143; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11, v0.t
144; CHECK-NEXT:    fsrm a1
145; CHECK-NEXT:    ret
146entry:
147  %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16(
148    <vscale x 4 x float> %0,
149    <vscale x 4 x half> %1,
150    <vscale x 4 x half> %2,
151    <vscale x 4 x i1> %3,
152    iXLen 0, iXLen %4, iXLen 0)
153
154  ret <vscale x 4 x float> %a
155}
156
157declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16(
158  <vscale x 8 x float>,
159  <vscale x 8 x half>,
160  <vscale x 8 x half>,
161  iXLen, iXLen, iXLen);
162
163define <vscale x 8 x float>  @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
164; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16:
165; CHECK:       # %bb.0: # %entry
166; CHECK-NEXT:    fsrmi a1, 0
167; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
168; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14
169; CHECK-NEXT:    fsrm a1
170; CHECK-NEXT:    ret
171entry:
172  %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16(
173    <vscale x 8 x float> %0,
174    <vscale x 8 x half> %1,
175    <vscale x 8 x half> %2,
176    iXLen 0, iXLen %3, iXLen 0)
177
178  ret <vscale x 8 x float> %a
179}
180
181declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16(
182  <vscale x 8 x float>,
183  <vscale x 8 x half>,
184  <vscale x 8 x half>,
185  <vscale x 8 x i1>,
186  iXLen, iXLen, iXLen);
187
188define <vscale x 8 x float>  @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
189; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16:
190; CHECK:       # %bb.0: # %entry
191; CHECK-NEXT:    fsrmi a1, 0
192; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
193; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14, v0.t
194; CHECK-NEXT:    fsrm a1
195; CHECK-NEXT:    ret
196entry:
197  %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16(
198    <vscale x 8 x float> %0,
199    <vscale x 8 x half> %1,
200    <vscale x 8 x half> %2,
201    <vscale x 8 x i1> %3,
202    iXLen 0, iXLen %4, iXLen 0)
203
204  ret <vscale x 8 x float> %a
205}
206
207declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16(
208  <vscale x 16 x float>,
209  <vscale x 16 x half>,
210  <vscale x 16 x half>,
211  iXLen, iXLen, iXLen);
212
213define <vscale x 16 x float>  @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
214; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16:
215; CHECK:       # %bb.0: # %entry
216; CHECK-NEXT:    fsrmi a1, 0
217; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
218; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20
219; CHECK-NEXT:    fsrm a1
220; CHECK-NEXT:    ret
221entry:
222  %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16(
223    <vscale x 16 x float> %0,
224    <vscale x 16 x half> %1,
225    <vscale x 16 x half> %2,
226    iXLen 0, iXLen %3, iXLen 0)
227
228  ret <vscale x 16 x float> %a
229}
230
231declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16(
232  <vscale x 16 x float>,
233  <vscale x 16 x half>,
234  <vscale x 16 x half>,
235  <vscale x 16 x i1>,
236  iXLen, iXLen, iXLen);
237
238define <vscale x 16 x float>  @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
239; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16:
240; CHECK:       # %bb.0: # %entry
241; CHECK-NEXT:    fsrmi a1, 0
242; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
243; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20, v0.t
244; CHECK-NEXT:    fsrm a1
245; CHECK-NEXT:    ret
246entry:
247  %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16(
248    <vscale x 16 x float> %0,
249    <vscale x 16 x half> %1,
250    <vscale x 16 x half> %2,
251    <vscale x 16 x i1> %3,
252    iXLen 0, iXLen %4, iXLen 0)
253
254  ret <vscale x 16 x float> %a
255}
256
257declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32(
258  <vscale x 1 x double>,
259  <vscale x 1 x float>,
260  <vscale x 1 x float>,
261  iXLen, iXLen, iXLen);
262
263define <vscale x 1 x double>  @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
264; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32:
265; CHECK:       # %bb.0: # %entry
266; CHECK-NEXT:    fsrmi a1, 0
267; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
268; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
269; CHECK-NEXT:    fsrm a1
270; CHECK-NEXT:    ret
271entry:
272  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32(
273    <vscale x 1 x double> %0,
274    <vscale x 1 x float> %1,
275    <vscale x 1 x float> %2,
276    iXLen 0, iXLen %3, iXLen 0)
277
278  ret <vscale x 1 x double> %a
279}
280
281declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32(
282  <vscale x 1 x double>,
283  <vscale x 1 x float>,
284  <vscale x 1 x float>,
285  <vscale x 1 x i1>,
286  iXLen, iXLen, iXLen);
287
288define <vscale x 1 x double>  @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
289; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32:
290; CHECK:       # %bb.0: # %entry
291; CHECK-NEXT:    fsrmi a1, 0
292; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
293; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
294; CHECK-NEXT:    fsrm a1
295; CHECK-NEXT:    ret
296entry:
297  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32(
298    <vscale x 1 x double> %0,
299    <vscale x 1 x float> %1,
300    <vscale x 1 x float> %2,
301    <vscale x 1 x i1> %3,
302    iXLen 0, iXLen %4, iXLen 0)
303
304  ret <vscale x 1 x double> %a
305}
306
307declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32(
308  <vscale x 2 x double>,
309  <vscale x 2 x float>,
310  <vscale x 2 x float>,
311  iXLen, iXLen, iXLen);
312
313define <vscale x 2 x double>  @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
314; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32:
315; CHECK:       # %bb.0: # %entry
316; CHECK-NEXT:    fsrmi a1, 0
317; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
318; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11
319; CHECK-NEXT:    fsrm a1
320; CHECK-NEXT:    ret
321entry:
322  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32(
323    <vscale x 2 x double> %0,
324    <vscale x 2 x float> %1,
325    <vscale x 2 x float> %2,
326    iXLen 0, iXLen %3, iXLen 0)
327
328  ret <vscale x 2 x double> %a
329}
330
331declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32(
332  <vscale x 2 x double>,
333  <vscale x 2 x float>,
334  <vscale x 2 x float>,
335  <vscale x 2 x i1>,
336  iXLen, iXLen, iXLen);
337
338define <vscale x 2 x double>  @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
339; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32:
340; CHECK:       # %bb.0: # %entry
341; CHECK-NEXT:    fsrmi a1, 0
342; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
343; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11, v0.t
344; CHECK-NEXT:    fsrm a1
345; CHECK-NEXT:    ret
346entry:
347  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32(
348    <vscale x 2 x double> %0,
349    <vscale x 2 x float> %1,
350    <vscale x 2 x float> %2,
351    <vscale x 2 x i1> %3,
352    iXLen 0, iXLen %4, iXLen 0)
353
354  ret <vscale x 2 x double> %a
355}
356
357declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32(
358  <vscale x 4 x double>,
359  <vscale x 4 x float>,
360  <vscale x 4 x float>,
361  iXLen, iXLen, iXLen);
362
363define <vscale x 4 x double>  @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
364; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32:
365; CHECK:       # %bb.0: # %entry
366; CHECK-NEXT:    fsrmi a1, 0
367; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
368; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14
369; CHECK-NEXT:    fsrm a1
370; CHECK-NEXT:    ret
371entry:
372  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32(
373    <vscale x 4 x double> %0,
374    <vscale x 4 x float> %1,
375    <vscale x 4 x float> %2,
376    iXLen 0, iXLen %3, iXLen 0)
377
378  ret <vscale x 4 x double> %a
379}
380
381declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32(
382  <vscale x 4 x double>,
383  <vscale x 4 x float>,
384  <vscale x 4 x float>,
385  <vscale x 4 x i1>,
386  iXLen, iXLen, iXLen);
387
388define <vscale x 4 x double>  @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
389; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32:
390; CHECK:       # %bb.0: # %entry
391; CHECK-NEXT:    fsrmi a1, 0
392; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
393; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14, v0.t
394; CHECK-NEXT:    fsrm a1
395; CHECK-NEXT:    ret
396entry:
397  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32(
398    <vscale x 4 x double> %0,
399    <vscale x 4 x float> %1,
400    <vscale x 4 x float> %2,
401    <vscale x 4 x i1> %3,
402    iXLen 0, iXLen %4, iXLen 0)
403
404  ret <vscale x 4 x double> %a
405}
406
407declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32(
408  <vscale x 8 x double>,
409  <vscale x 8 x float>,
410  <vscale x 8 x float>,
411  iXLen, iXLen, iXLen);
412
413define <vscale x 8 x double>  @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
414; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32:
415; CHECK:       # %bb.0: # %entry
416; CHECK-NEXT:    fsrmi a1, 0
417; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
418; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20
419; CHECK-NEXT:    fsrm a1
420; CHECK-NEXT:    ret
421entry:
422  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32(
423    <vscale x 8 x double> %0,
424    <vscale x 8 x float> %1,
425    <vscale x 8 x float> %2,
426    iXLen 0, iXLen %3, iXLen 0)
427
428  ret <vscale x 8 x double> %a
429}
430
431declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32(
432  <vscale x 8 x double>,
433  <vscale x 8 x float>,
434  <vscale x 8 x float>,
435  <vscale x 8 x i1>,
436  iXLen, iXLen, iXLen);
437
438define <vscale x 8 x double>  @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
439; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32:
440; CHECK:       # %bb.0: # %entry
441; CHECK-NEXT:    fsrmi a1, 0
442; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
443; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20, v0.t
444; CHECK-NEXT:    fsrm a1
445; CHECK-NEXT:    ret
446entry:
447  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32(
448    <vscale x 8 x double> %0,
449    <vscale x 8 x float> %1,
450    <vscale x 8 x float> %2,
451    <vscale x 8 x i1> %3,
452    iXLen 0, iXLen %4, iXLen 0)
453
454  ret <vscale x 8 x double> %a
455}
456
457declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.f16(
458  <vscale x 1 x float>,
459  half,
460  <vscale x 1 x half>,
461  iXLen, iXLen, iXLen);
462
463define <vscale x 1 x float>  @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
464; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16:
465; CHECK:       # %bb.0: # %entry
466; CHECK-NEXT:    fsrmi a1, 0
467; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
468; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v9
469; CHECK-NEXT:    fsrm a1
470; CHECK-NEXT:    ret
471entry:
472  %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.f16(
473    <vscale x 1 x float> %0,
474    half %1,
475    <vscale x 1 x half> %2,
476    iXLen 0, iXLen %3, iXLen 0)
477
478  ret <vscale x 1 x float> %a
479}
480
481declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.f16(
482  <vscale x 1 x float>,
483  half,
484  <vscale x 1 x half>,
485  <vscale x 1 x i1>,
486  iXLen, iXLen, iXLen);
487
488define <vscale x 1 x float> @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
489; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16:
490; CHECK:       # %bb.0: # %entry
491; CHECK-NEXT:    fsrmi a1, 0
492; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
493; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v9, v0.t
494; CHECK-NEXT:    fsrm a1
495; CHECK-NEXT:    ret
496entry:
497  %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.f16(
498    <vscale x 1 x float> %0,
499    half %1,
500    <vscale x 1 x half> %2,
501    <vscale x 1 x i1> %3,
502    iXLen 0, iXLen %4, iXLen 0)
503
504  ret <vscale x 1 x float> %a
505}
506
507declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.f16(
508  <vscale x 2 x float>,
509  half,
510  <vscale x 2 x half>,
511  iXLen, iXLen, iXLen);
512
513define <vscale x 2 x float>  @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
514; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16:
515; CHECK:       # %bb.0: # %entry
516; CHECK-NEXT:    fsrmi a1, 0
517; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
518; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v9
519; CHECK-NEXT:    fsrm a1
520; CHECK-NEXT:    ret
521entry:
522  %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.f16(
523    <vscale x 2 x float> %0,
524    half %1,
525    <vscale x 2 x half> %2,
526    iXLen 0, iXLen %3, iXLen 0)
527
528  ret <vscale x 2 x float> %a
529}
530
531declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.f16(
532  <vscale x 2 x float>,
533  half,
534  <vscale x 2 x half>,
535  <vscale x 2 x i1>,
536  iXLen, iXLen, iXLen);
537
538define <vscale x 2 x float> @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
539; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16:
540; CHECK:       # %bb.0: # %entry
541; CHECK-NEXT:    fsrmi a1, 0
542; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
543; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v9, v0.t
544; CHECK-NEXT:    fsrm a1
545; CHECK-NEXT:    ret
546entry:
547  %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.f16(
548    <vscale x 2 x float> %0,
549    half %1,
550    <vscale x 2 x half> %2,
551    <vscale x 2 x i1> %3,
552    iXLen 0, iXLen %4, iXLen 0)
553
554  ret <vscale x 2 x float> %a
555}
556
557declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.f16(
558  <vscale x 4 x float>,
559  half,
560  <vscale x 4 x half>,
561  iXLen, iXLen, iXLen);
562
563define <vscale x 4 x float>  @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
564; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16:
565; CHECK:       # %bb.0: # %entry
566; CHECK-NEXT:    fsrmi a1, 0
567; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
568; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v10
569; CHECK-NEXT:    fsrm a1
570; CHECK-NEXT:    ret
571entry:
572  %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.f16(
573    <vscale x 4 x float> %0,
574    half %1,
575    <vscale x 4 x half> %2,
576    iXLen 0, iXLen %3, iXLen 0)
577
578  ret <vscale x 4 x float> %a
579}
580
581declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.f16(
582  <vscale x 4 x float>,
583  half,
584  <vscale x 4 x half>,
585  <vscale x 4 x i1>,
586  iXLen, iXLen, iXLen);
587
588define <vscale x 4 x float> @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
589; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16:
590; CHECK:       # %bb.0: # %entry
591; CHECK-NEXT:    fsrmi a1, 0
592; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
593; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v10, v0.t
594; CHECK-NEXT:    fsrm a1
595; CHECK-NEXT:    ret
596entry:
597  %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.f16(
598    <vscale x 4 x float> %0,
599    half %1,
600    <vscale x 4 x half> %2,
601    <vscale x 4 x i1> %3,
602    iXLen 0, iXLen %4, iXLen 0)
603
604  ret <vscale x 4 x float> %a
605}
606
607declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.f16(
608  <vscale x 8 x float>,
609  half,
610  <vscale x 8 x half>,
611  iXLen, iXLen, iXLen);
612
613define <vscale x 8 x float>  @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
614; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16:
615; CHECK:       # %bb.0: # %entry
616; CHECK-NEXT:    fsrmi a1, 0
617; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
618; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v12
619; CHECK-NEXT:    fsrm a1
620; CHECK-NEXT:    ret
621entry:
622  %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.f16(
623    <vscale x 8 x float> %0,
624    half %1,
625    <vscale x 8 x half> %2,
626    iXLen 0, iXLen %3, iXLen 0)
627
628  ret <vscale x 8 x float> %a
629}
630
631declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.f16(
632  <vscale x 8 x float>,
633  half,
634  <vscale x 8 x half>,
635  <vscale x 8 x i1>,
636  iXLen, iXLen, iXLen);
637
638define <vscale x 8 x float> @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
639; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16:
640; CHECK:       # %bb.0: # %entry
641; CHECK-NEXT:    fsrmi a1, 0
642; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
643; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v12, v0.t
644; CHECK-NEXT:    fsrm a1
645; CHECK-NEXT:    ret
646entry:
647  %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.f16(
648    <vscale x 8 x float> %0,
649    half %1,
650    <vscale x 8 x half> %2,
651    <vscale x 8 x i1> %3,
652    iXLen 0, iXLen %4, iXLen 0)
653
654  ret <vscale x 8 x float> %a
655}
656
657declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.f16(
658  <vscale x 16 x float>,
659  half,
660  <vscale x 16 x half>,
661  iXLen, iXLen, iXLen);
662
663define <vscale x 16 x float>  @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
664; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16:
665; CHECK:       # %bb.0: # %entry
666; CHECK-NEXT:    fsrmi a1, 0
667; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
668; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v16
669; CHECK-NEXT:    fsrm a1
670; CHECK-NEXT:    ret
671entry:
672  %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.f16(
673    <vscale x 16 x float> %0,
674    half %1,
675    <vscale x 16 x half> %2,
676    iXLen 0, iXLen %3, iXLen 0)
677
678  ret <vscale x 16 x float> %a
679}
680
681declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.f16(
682  <vscale x 16 x float>,
683  half,
684  <vscale x 16 x half>,
685  <vscale x 16 x i1>,
686  iXLen, iXLen, iXLen);
687
688define <vscale x 16 x float> @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
689; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16:
690; CHECK:       # %bb.0: # %entry
691; CHECK-NEXT:    fsrmi a1, 0
692; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
693; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v16, v0.t
694; CHECK-NEXT:    fsrm a1
695; CHECK-NEXT:    ret
696entry:
697  %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.f16(
698    <vscale x 16 x float> %0,
699    half %1,
700    <vscale x 16 x half> %2,
701    <vscale x 16 x i1> %3,
702    iXLen 0, iXLen %4, iXLen 0)
703
704  ret <vscale x 16 x float> %a
705}
706
707declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32(
708  <vscale x 1 x double>,
709  float,
710  <vscale x 1 x float>,
711  iXLen, iXLen, iXLen);
712
713define <vscale x 1 x double>  @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
714; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32:
715; CHECK:       # %bb.0: # %entry
716; CHECK-NEXT:    fsrmi a1, 0
717; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
718; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v9
719; CHECK-NEXT:    fsrm a1
720; CHECK-NEXT:    ret
721entry:
722  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32(
723    <vscale x 1 x double> %0,
724    float %1,
725    <vscale x 1 x float> %2,
726    iXLen 0, iXLen %3, iXLen 0)
727
728  ret <vscale x 1 x double> %a
729}
730
731declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32(
732  <vscale x 1 x double>,
733  float,
734  <vscale x 1 x float>,
735  <vscale x 1 x i1>,
736  iXLen, iXLen, iXLen);
737
738define <vscale x 1 x double> @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
739; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32:
740; CHECK:       # %bb.0: # %entry
741; CHECK-NEXT:    fsrmi a1, 0
742; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
743; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v9, v0.t
744; CHECK-NEXT:    fsrm a1
745; CHECK-NEXT:    ret
746entry:
747  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32(
748    <vscale x 1 x double> %0,
749    float %1,
750    <vscale x 1 x float> %2,
751    <vscale x 1 x i1> %3,
752    iXLen 0, iXLen %4, iXLen 0)
753
754  ret <vscale x 1 x double> %a
755}
756
757declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32(
758  <vscale x 2 x double>,
759  float,
760  <vscale x 2 x float>,
761  iXLen, iXLen, iXLen);
762
763define <vscale x 2 x double>  @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
764; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32:
765; CHECK:       # %bb.0: # %entry
766; CHECK-NEXT:    fsrmi a1, 0
767; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
768; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v10
769; CHECK-NEXT:    fsrm a1
770; CHECK-NEXT:    ret
771entry:
772  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32(
773    <vscale x 2 x double> %0,
774    float %1,
775    <vscale x 2 x float> %2,
776    iXLen 0, iXLen %3, iXLen 0)
777
778  ret <vscale x 2 x double> %a
779}
780
781declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32(
782  <vscale x 2 x double>,
783  float,
784  <vscale x 2 x float>,
785  <vscale x 2 x i1>,
786  iXLen, iXLen, iXLen);
787
788define <vscale x 2 x double> @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
789; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32:
790; CHECK:       # %bb.0: # %entry
791; CHECK-NEXT:    fsrmi a1, 0
792; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
793; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v10, v0.t
794; CHECK-NEXT:    fsrm a1
795; CHECK-NEXT:    ret
796entry:
797  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32(
798    <vscale x 2 x double> %0,
799    float %1,
800    <vscale x 2 x float> %2,
801    <vscale x 2 x i1> %3,
802    iXLen 0, iXLen %4, iXLen 0)
803
804  ret <vscale x 2 x double> %a
805}
806
807declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32(
808  <vscale x 4 x double>,
809  float,
810  <vscale x 4 x float>,
811  iXLen, iXLen, iXLen);
812
813define <vscale x 4 x double>  @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
814; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32:
815; CHECK:       # %bb.0: # %entry
816; CHECK-NEXT:    fsrmi a1, 0
817; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
818; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v12
819; CHECK-NEXT:    fsrm a1
820; CHECK-NEXT:    ret
821entry:
822  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32(
823    <vscale x 4 x double> %0,
824    float %1,
825    <vscale x 4 x float> %2,
826    iXLen 0, iXLen %3, iXLen 0)
827
828  ret <vscale x 4 x double> %a
829}
830
831declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32(
832  <vscale x 4 x double>,
833  float,
834  <vscale x 4 x float>,
835  <vscale x 4 x i1>,
836  iXLen, iXLen, iXLen);
837
838define <vscale x 4 x double> @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
839; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32:
840; CHECK:       # %bb.0: # %entry
841; CHECK-NEXT:    fsrmi a1, 0
842; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
843; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v12, v0.t
844; CHECK-NEXT:    fsrm a1
845; CHECK-NEXT:    ret
846entry:
847  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32(
848    <vscale x 4 x double> %0,
849    float %1,
850    <vscale x 4 x float> %2,
851    <vscale x 4 x i1> %3,
852    iXLen 0, iXLen %4, iXLen 0)
853
854  ret <vscale x 4 x double> %a
855}
856
857declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32(
858  <vscale x 8 x double>,
859  float,
860  <vscale x 8 x float>,
861  iXLen, iXLen, iXLen);
862
863define <vscale x 8 x double>  @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
864; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32:
865; CHECK:       # %bb.0: # %entry
866; CHECK-NEXT:    fsrmi a1, 0
867; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
868; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v16
869; CHECK-NEXT:    fsrm a1
870; CHECK-NEXT:    ret
871entry:
872  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32(
873    <vscale x 8 x double> %0,
874    float %1,
875    <vscale x 8 x float> %2,
876    iXLen 0, iXLen %3, iXLen 0)
877
878  ret <vscale x 8 x double> %a
879}
880
881declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32(
882  <vscale x 8 x double>,
883  float,
884  <vscale x 8 x float>,
885  <vscale x 8 x i1>,
886  iXLen, iXLen, iXLen);
887
888define <vscale x 8 x double> @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
889; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32:
890; CHECK:       # %bb.0: # %entry
891; CHECK-NEXT:    fsrmi a1, 0
892; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
893; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v16, v0.t
894; CHECK-NEXT:    fsrm a1
895; CHECK-NEXT:    ret
896entry:
897  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32(
898    <vscale x 8 x double> %0,
899    float %1,
900    <vscale x 8 x float> %2,
901    <vscale x 8 x i1> %3,
902    iXLen 0, iXLen %4, iXLen 0)
903
904  ret <vscale x 8 x double> %a
905}
906