xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
8  <vscale x 1 x half>,
9  <vscale x 1 x half>,
10  <vscale x 1 x half>,
11  iXLen, iXLen, iXLen);
12
13define <vscale x 1 x half>  @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
14; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    fsrmi a1, 0
17; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
18; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
19; CHECK-NEXT:    fsrm a1
20; CHECK-NEXT:    ret
21entry:
22  %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
23    <vscale x 1 x half> %0,
24    <vscale x 1 x half> %1,
25    <vscale x 1 x half> %2,
26    iXLen 0, iXLen %3, iXLen 0)
27
28  ret <vscale x 1 x half> %a
29}
30
31declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
32  <vscale x 1 x half>,
33  <vscale x 1 x half>,
34  <vscale x 1 x half>,
35  <vscale x 1 x i1>,
36  iXLen, iXLen, iXLen);
37
38define <vscale x 1 x half>  @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
40; CHECK:       # %bb.0: # %entry
41; CHECK-NEXT:    fsrmi a1, 0
42; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
43; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
44; CHECK-NEXT:    fsrm a1
45; CHECK-NEXT:    ret
46entry:
47  %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
48    <vscale x 1 x half> %0,
49    <vscale x 1 x half> %1,
50    <vscale x 1 x half> %2,
51    <vscale x 1 x i1> %3,
52    iXLen 0, iXLen %4, iXLen 0);
53
54  ret <vscale x 1 x half> %a
55}
56
57declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
58  <vscale x 2 x half>,
59  <vscale x 2 x half>,
60  <vscale x 2 x half>,
61  iXLen, iXLen, iXLen);
62
63define <vscale x 2 x half>  @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
64; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16:
65; CHECK:       # %bb.0: # %entry
66; CHECK-NEXT:    fsrmi a1, 0
67; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
68; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
69; CHECK-NEXT:    fsrm a1
70; CHECK-NEXT:    ret
71entry:
72  %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
73    <vscale x 2 x half> %0,
74    <vscale x 2 x half> %1,
75    <vscale x 2 x half> %2,
76    iXLen 0, iXLen %3, iXLen 0)
77
78  ret <vscale x 2 x half> %a
79}
80
81declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
82  <vscale x 2 x half>,
83  <vscale x 2 x half>,
84  <vscale x 2 x half>,
85  <vscale x 2 x i1>,
86  iXLen, iXLen, iXLen);
87
88define <vscale x 2 x half>  @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
89; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
90; CHECK:       # %bb.0: # %entry
91; CHECK-NEXT:    fsrmi a1, 0
92; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
93; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
94; CHECK-NEXT:    fsrm a1
95; CHECK-NEXT:    ret
96entry:
97  %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
98    <vscale x 2 x half> %0,
99    <vscale x 2 x half> %1,
100    <vscale x 2 x half> %2,
101    <vscale x 2 x i1> %3,
102    iXLen 0, iXLen %4, iXLen 0);
103
104  ret <vscale x 2 x half> %a
105}
106
107declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
108  <vscale x 4 x half>,
109  <vscale x 4 x half>,
110  <vscale x 4 x half>,
111  iXLen, iXLen, iXLen);
112
113define <vscale x 4 x half>  @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
114; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16:
115; CHECK:       # %bb.0: # %entry
116; CHECK-NEXT:    fsrmi a1, 0
117; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
118; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
119; CHECK-NEXT:    fsrm a1
120; CHECK-NEXT:    ret
121entry:
122  %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
123    <vscale x 4 x half> %0,
124    <vscale x 4 x half> %1,
125    <vscale x 4 x half> %2,
126    iXLen 0, iXLen %3, iXLen 0)
127
128  ret <vscale x 4 x half> %a
129}
130
131declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
132  <vscale x 4 x half>,
133  <vscale x 4 x half>,
134  <vscale x 4 x half>,
135  <vscale x 4 x i1>,
136  iXLen, iXLen, iXLen);
137
138define <vscale x 4 x half>  @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
139; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
140; CHECK:       # %bb.0: # %entry
141; CHECK-NEXT:    fsrmi a1, 0
142; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
143; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
144; CHECK-NEXT:    fsrm a1
145; CHECK-NEXT:    ret
146entry:
147  %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
148    <vscale x 4 x half> %0,
149    <vscale x 4 x half> %1,
150    <vscale x 4 x half> %2,
151    <vscale x 4 x i1> %3,
152    iXLen 0, iXLen %4, iXLen 0);
153
154  ret <vscale x 4 x half> %a
155}
156
157declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
158  <vscale x 8 x half>,
159  <vscale x 8 x half>,
160  <vscale x 8 x half>,
161  iXLen, iXLen, iXLen);
162
163define <vscale x 8 x half>  @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
164; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16:
165; CHECK:       # %bb.0: # %entry
166; CHECK-NEXT:    fsrmi a1, 0
167; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
168; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
169; CHECK-NEXT:    fsrm a1
170; CHECK-NEXT:    ret
171entry:
172  %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
173    <vscale x 8 x half> %0,
174    <vscale x 8 x half> %1,
175    <vscale x 8 x half> %2,
176    iXLen 0, iXLen %3, iXLen 0)
177
178  ret <vscale x 8 x half> %a
179}
180
181declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
182  <vscale x 8 x half>,
183  <vscale x 8 x half>,
184  <vscale x 8 x half>,
185  <vscale x 8 x i1>,
186  iXLen, iXLen, iXLen);
187
188define <vscale x 8 x half>  @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
189; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
190; CHECK:       # %bb.0: # %entry
191; CHECK-NEXT:    fsrmi a1, 0
192; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
193; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
194; CHECK-NEXT:    fsrm a1
195; CHECK-NEXT:    ret
196entry:
197  %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
198    <vscale x 8 x half> %0,
199    <vscale x 8 x half> %1,
200    <vscale x 8 x half> %2,
201    <vscale x 8 x i1> %3,
202    iXLen 0, iXLen %4, iXLen 0);
203
204  ret <vscale x 8 x half> %a
205}
206
207declare <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16(
208  <vscale x 16 x half>,
209  <vscale x 16 x half>,
210  <vscale x 16 x half>,
211  iXLen, iXLen, iXLen);
212
213define <vscale x 16 x half>  @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
214; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16:
215; CHECK:       # %bb.0: # %entry
216; CHECK-NEXT:    fsrmi a1, 0
217; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
218; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
219; CHECK-NEXT:    fsrm a1
220; CHECK-NEXT:    ret
221entry:
222  %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16(
223    <vscale x 16 x half> %0,
224    <vscale x 16 x half> %1,
225    <vscale x 16 x half> %2,
226    iXLen 0, iXLen %3, iXLen 0)
227
228  ret <vscale x 16 x half> %a
229}
230
231declare <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16(
232  <vscale x 16 x half>,
233  <vscale x 16 x half>,
234  <vscale x 16 x half>,
235  <vscale x 16 x i1>,
236  iXLen, iXLen, iXLen);
237
238define <vscale x 16 x half>  @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
239; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
240; CHECK:       # %bb.0: # %entry
241; CHECK-NEXT:    fsrmi a1, 0
242; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
243; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
244; CHECK-NEXT:    fsrm a1
245; CHECK-NEXT:    ret
246entry:
247  %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16(
248    <vscale x 16 x half> %0,
249    <vscale x 16 x half> %1,
250    <vscale x 16 x half> %2,
251    <vscale x 16 x i1> %3,
252    iXLen 0, iXLen %4, iXLen 0);
253
254  ret <vscale x 16 x half> %a
255}
256
257declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
258  <vscale x 1 x float>,
259  <vscale x 1 x float>,
260  <vscale x 1 x float>,
261  iXLen, iXLen, iXLen);
262
263define <vscale x 1 x float>  @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
264; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32:
265; CHECK:       # %bb.0: # %entry
266; CHECK-NEXT:    fsrmi a1, 0
267; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
268; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
269; CHECK-NEXT:    fsrm a1
270; CHECK-NEXT:    ret
271entry:
272  %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
273    <vscale x 1 x float> %0,
274    <vscale x 1 x float> %1,
275    <vscale x 1 x float> %2,
276    iXLen 0, iXLen %3, iXLen 0)
277
278  ret <vscale x 1 x float> %a
279}
280
281declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
282  <vscale x 1 x float>,
283  <vscale x 1 x float>,
284  <vscale x 1 x float>,
285  <vscale x 1 x i1>,
286  iXLen, iXLen, iXLen);
287
288define <vscale x 1 x float>  @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
289; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
290; CHECK:       # %bb.0: # %entry
291; CHECK-NEXT:    fsrmi a1, 0
292; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
293; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
294; CHECK-NEXT:    fsrm a1
295; CHECK-NEXT:    ret
296entry:
297  %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
298    <vscale x 1 x float> %0,
299    <vscale x 1 x float> %1,
300    <vscale x 1 x float> %2,
301    <vscale x 1 x i1> %3,
302    iXLen 0, iXLen %4, iXLen 0);
303
304  ret <vscale x 1 x float> %a
305}
306
307declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
308  <vscale x 2 x float>,
309  <vscale x 2 x float>,
310  <vscale x 2 x float>,
311  iXLen, iXLen, iXLen);
312
313define <vscale x 2 x float>  @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
314; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32:
315; CHECK:       # %bb.0: # %entry
316; CHECK-NEXT:    fsrmi a1, 0
317; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
318; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
319; CHECK-NEXT:    fsrm a1
320; CHECK-NEXT:    ret
321entry:
322  %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
323    <vscale x 2 x float> %0,
324    <vscale x 2 x float> %1,
325    <vscale x 2 x float> %2,
326    iXLen 0, iXLen %3, iXLen 0)
327
328  ret <vscale x 2 x float> %a
329}
330
331declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
332  <vscale x 2 x float>,
333  <vscale x 2 x float>,
334  <vscale x 2 x float>,
335  <vscale x 2 x i1>,
336  iXLen, iXLen, iXLen);
337
338define <vscale x 2 x float>  @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
339; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
340; CHECK:       # %bb.0: # %entry
341; CHECK-NEXT:    fsrmi a1, 0
342; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
343; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
344; CHECK-NEXT:    fsrm a1
345; CHECK-NEXT:    ret
346entry:
347  %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
348    <vscale x 2 x float> %0,
349    <vscale x 2 x float> %1,
350    <vscale x 2 x float> %2,
351    <vscale x 2 x i1> %3,
352    iXLen 0, iXLen %4, iXLen 0);
353
354  ret <vscale x 2 x float> %a
355}
356
357declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
358  <vscale x 4 x float>,
359  <vscale x 4 x float>,
360  <vscale x 4 x float>,
361  iXLen, iXLen, iXLen);
362
363define <vscale x 4 x float>  @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
364; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32:
365; CHECK:       # %bb.0: # %entry
366; CHECK-NEXT:    fsrmi a1, 0
367; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
368; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
369; CHECK-NEXT:    fsrm a1
370; CHECK-NEXT:    ret
371entry:
372  %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
373    <vscale x 4 x float> %0,
374    <vscale x 4 x float> %1,
375    <vscale x 4 x float> %2,
376    iXLen 0, iXLen %3, iXLen 0)
377
378  ret <vscale x 4 x float> %a
379}
380
381declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
382  <vscale x 4 x float>,
383  <vscale x 4 x float>,
384  <vscale x 4 x float>,
385  <vscale x 4 x i1>,
386  iXLen, iXLen, iXLen);
387
388define <vscale x 4 x float>  @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
389; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
390; CHECK:       # %bb.0: # %entry
391; CHECK-NEXT:    fsrmi a1, 0
392; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
393; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
394; CHECK-NEXT:    fsrm a1
395; CHECK-NEXT:    ret
396entry:
397  %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
398    <vscale x 4 x float> %0,
399    <vscale x 4 x float> %1,
400    <vscale x 4 x float> %2,
401    <vscale x 4 x i1> %3,
402    iXLen 0, iXLen %4, iXLen 0);
403
404  ret <vscale x 4 x float> %a
405}
406
407declare <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32(
408  <vscale x 8 x float>,
409  <vscale x 8 x float>,
410  <vscale x 8 x float>,
411  iXLen, iXLen, iXLen);
412
413define <vscale x 8 x float>  @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
414; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32:
415; CHECK:       # %bb.0: # %entry
416; CHECK-NEXT:    fsrmi a1, 0
417; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
418; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
419; CHECK-NEXT:    fsrm a1
420; CHECK-NEXT:    ret
421entry:
422  %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32(
423    <vscale x 8 x float> %0,
424    <vscale x 8 x float> %1,
425    <vscale x 8 x float> %2,
426    iXLen 0, iXLen %3, iXLen 0)
427
428  ret <vscale x 8 x float> %a
429}
430
431declare <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32(
432  <vscale x 8 x float>,
433  <vscale x 8 x float>,
434  <vscale x 8 x float>,
435  <vscale x 8 x i1>,
436  iXLen, iXLen, iXLen);
437
438define <vscale x 8 x float>  @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
439; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
440; CHECK:       # %bb.0: # %entry
441; CHECK-NEXT:    fsrmi a1, 0
442; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
443; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
444; CHECK-NEXT:    fsrm a1
445; CHECK-NEXT:    ret
446entry:
447  %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32(
448    <vscale x 8 x float> %0,
449    <vscale x 8 x float> %1,
450    <vscale x 8 x float> %2,
451    <vscale x 8 x i1> %3,
452    iXLen 0, iXLen %4, iXLen 0);
453
454  ret <vscale x 8 x float> %a
455}
456
457declare <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
458  <vscale x 1 x double>,
459  <vscale x 1 x double>,
460  <vscale x 1 x double>,
461  iXLen, iXLen, iXLen);
462
463define <vscale x 1 x double>  @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
464; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64:
465; CHECK:       # %bb.0: # %entry
466; CHECK-NEXT:    fsrmi a1, 0
467; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
468; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
469; CHECK-NEXT:    fsrm a1
470; CHECK-NEXT:    ret
471entry:
472  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
473    <vscale x 1 x double> %0,
474    <vscale x 1 x double> %1,
475    <vscale x 1 x double> %2,
476    iXLen 0, iXLen %3, iXLen 0)
477
478  ret <vscale x 1 x double> %a
479}
480
481declare <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
482  <vscale x 1 x double>,
483  <vscale x 1 x double>,
484  <vscale x 1 x double>,
485  <vscale x 1 x i1>,
486  iXLen, iXLen, iXLen);
487
488define <vscale x 1 x double>  @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
489; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
490; CHECK:       # %bb.0: # %entry
491; CHECK-NEXT:    fsrmi a1, 0
492; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
493; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
494; CHECK-NEXT:    fsrm a1
495; CHECK-NEXT:    ret
496entry:
497  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
498    <vscale x 1 x double> %0,
499    <vscale x 1 x double> %1,
500    <vscale x 1 x double> %2,
501    <vscale x 1 x i1> %3,
502    iXLen 0, iXLen %4, iXLen 0);
503
504  ret <vscale x 1 x double> %a
505}
506
507declare <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
508  <vscale x 2 x double>,
509  <vscale x 2 x double>,
510  <vscale x 2 x double>,
511  iXLen, iXLen, iXLen);
512
513define <vscale x 2 x double>  @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
514; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64:
515; CHECK:       # %bb.0: # %entry
516; CHECK-NEXT:    fsrmi a1, 0
517; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
518; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
519; CHECK-NEXT:    fsrm a1
520; CHECK-NEXT:    ret
521entry:
522  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
523    <vscale x 2 x double> %0,
524    <vscale x 2 x double> %1,
525    <vscale x 2 x double> %2,
526    iXLen 0, iXLen %3, iXLen 0)
527
528  ret <vscale x 2 x double> %a
529}
530
531declare <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
532  <vscale x 2 x double>,
533  <vscale x 2 x double>,
534  <vscale x 2 x double>,
535  <vscale x 2 x i1>,
536  iXLen, iXLen, iXLen);
537
538define <vscale x 2 x double>  @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
539; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
540; CHECK:       # %bb.0: # %entry
541; CHECK-NEXT:    fsrmi a1, 0
542; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
543; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
544; CHECK-NEXT:    fsrm a1
545; CHECK-NEXT:    ret
546entry:
547  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
548    <vscale x 2 x double> %0,
549    <vscale x 2 x double> %1,
550    <vscale x 2 x double> %2,
551    <vscale x 2 x i1> %3,
552    iXLen 0, iXLen %4, iXLen 0);
553
554  ret <vscale x 2 x double> %a
555}
556
557declare <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64(
558  <vscale x 4 x double>,
559  <vscale x 4 x double>,
560  <vscale x 4 x double>,
561  iXLen, iXLen, iXLen);
562
563define <vscale x 4 x double>  @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
564; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64:
565; CHECK:       # %bb.0: # %entry
566; CHECK-NEXT:    fsrmi a1, 0
567; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
568; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
569; CHECK-NEXT:    fsrm a1
570; CHECK-NEXT:    ret
571entry:
572  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64(
573    <vscale x 4 x double> %0,
574    <vscale x 4 x double> %1,
575    <vscale x 4 x double> %2,
576    iXLen 0, iXLen %3, iXLen 0)
577
578  ret <vscale x 4 x double> %a
579}
580
581declare <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64(
582  <vscale x 4 x double>,
583  <vscale x 4 x double>,
584  <vscale x 4 x double>,
585  <vscale x 4 x i1>,
586  iXLen, iXLen, iXLen);
587
588define <vscale x 4 x double>  @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
589; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
590; CHECK:       # %bb.0: # %entry
591; CHECK-NEXT:    fsrmi a1, 0
592; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
593; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
594; CHECK-NEXT:    fsrm a1
595; CHECK-NEXT:    ret
596entry:
597  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64(
598    <vscale x 4 x double> %0,
599    <vscale x 4 x double> %1,
600    <vscale x 4 x double> %2,
601    <vscale x 4 x i1> %3,
602    iXLen 0, iXLen %4, iXLen 0);
603
604  ret <vscale x 4 x double> %a
605}
606
607declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
608  <vscale x 1 x half>,
609  half,
610  <vscale x 1 x half>,
611  iXLen, iXLen, iXLen);
612
613define <vscale x 1 x half>  @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
614; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16:
615; CHECK:       # %bb.0: # %entry
616; CHECK-NEXT:    fsrmi a1, 0
617; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
618; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9
619; CHECK-NEXT:    fsrm a1
620; CHECK-NEXT:    ret
621entry:
622  %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
623    <vscale x 1 x half> %0,
624    half %1,
625    <vscale x 1 x half> %2,
626    iXLen 0, iXLen %3, iXLen 0)
627
628  ret <vscale x 1 x half> %a
629}
630
631declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16(
632  <vscale x 1 x half>,
633  half,
634  <vscale x 1 x half>,
635  <vscale x 1 x i1>,
636  iXLen, iXLen, iXLen);
637
638define <vscale x 1 x half> @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
639; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16:
640; CHECK:       # %bb.0: # %entry
641; CHECK-NEXT:    fsrmi a1, 0
642; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
643; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9, v0.t
644; CHECK-NEXT:    fsrm a1
645; CHECK-NEXT:    ret
646entry:
647  %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16(
648    <vscale x 1 x half> %0,
649    half %1,
650    <vscale x 1 x half> %2,
651    <vscale x 1 x i1> %3,
652    iXLen 0, iXLen %4, iXLen 0);
653
654  ret <vscale x 1 x half> %a
655}
656
657declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16(
658  <vscale x 2 x half>,
659  half,
660  <vscale x 2 x half>,
661  iXLen, iXLen, iXLen);
662
663define <vscale x 2 x half>  @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
664; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16:
665; CHECK:       # %bb.0: # %entry
666; CHECK-NEXT:    fsrmi a1, 0
667; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
668; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9
669; CHECK-NEXT:    fsrm a1
670; CHECK-NEXT:    ret
671entry:
672  %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16(
673    <vscale x 2 x half> %0,
674    half %1,
675    <vscale x 2 x half> %2,
676    iXLen 0, iXLen %3, iXLen 0)
677
678  ret <vscale x 2 x half> %a
679}
680
681declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16(
682  <vscale x 2 x half>,
683  half,
684  <vscale x 2 x half>,
685  <vscale x 2 x i1>,
686  iXLen, iXLen, iXLen);
687
688define <vscale x 2 x half> @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
689; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16:
690; CHECK:       # %bb.0: # %entry
691; CHECK-NEXT:    fsrmi a1, 0
692; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
693; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9, v0.t
694; CHECK-NEXT:    fsrm a1
695; CHECK-NEXT:    ret
696entry:
697  %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16(
698    <vscale x 2 x half> %0,
699    half %1,
700    <vscale x 2 x half> %2,
701    <vscale x 2 x i1> %3,
702    iXLen 0, iXLen %4, iXLen 0);
703
704  ret <vscale x 2 x half> %a
705}
706
707declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16(
708  <vscale x 4 x half>,
709  half,
710  <vscale x 4 x half>,
711  iXLen, iXLen, iXLen);
712
713define <vscale x 4 x half>  @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
714; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16:
715; CHECK:       # %bb.0: # %entry
716; CHECK-NEXT:    fsrmi a1, 0
717; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
718; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9
719; CHECK-NEXT:    fsrm a1
720; CHECK-NEXT:    ret
721entry:
722  %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16(
723    <vscale x 4 x half> %0,
724    half %1,
725    <vscale x 4 x half> %2,
726    iXLen 0, iXLen %3, iXLen 0)
727
728  ret <vscale x 4 x half> %a
729}
730
731declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16(
732  <vscale x 4 x half>,
733  half,
734  <vscale x 4 x half>,
735  <vscale x 4 x i1>,
736  iXLen, iXLen, iXLen);
737
738define <vscale x 4 x half> @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
739; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16:
740; CHECK:       # %bb.0: # %entry
741; CHECK-NEXT:    fsrmi a1, 0
742; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
743; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9, v0.t
744; CHECK-NEXT:    fsrm a1
745; CHECK-NEXT:    ret
746entry:
747  %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16(
748    <vscale x 4 x half> %0,
749    half %1,
750    <vscale x 4 x half> %2,
751    <vscale x 4 x i1> %3,
752    iXLen 0, iXLen %4, iXLen 0);
753
754  ret <vscale x 4 x half> %a
755}
756
757declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16(
758  <vscale x 8 x half>,
759  half,
760  <vscale x 8 x half>,
761  iXLen, iXLen, iXLen);
762
763define <vscale x 8 x half>  @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
764; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16:
765; CHECK:       # %bb.0: # %entry
766; CHECK-NEXT:    fsrmi a1, 0
767; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
768; CHECK-NEXT:    vfnmsub.vf v8, fa0, v10
769; CHECK-NEXT:    fsrm a1
770; CHECK-NEXT:    ret
771entry:
772  %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16(
773    <vscale x 8 x half> %0,
774    half %1,
775    <vscale x 8 x half> %2,
776    iXLen 0, iXLen %3, iXLen 0)
777
778  ret <vscale x 8 x half> %a
779}
780
781declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16(
782  <vscale x 8 x half>,
783  half,
784  <vscale x 8 x half>,
785  <vscale x 8 x i1>,
786  iXLen, iXLen, iXLen);
787
788define <vscale x 8 x half> @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
789; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16:
790; CHECK:       # %bb.0: # %entry
791; CHECK-NEXT:    fsrmi a1, 0
792; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
793; CHECK-NEXT:    vfnmsub.vf v8, fa0, v10, v0.t
794; CHECK-NEXT:    fsrm a1
795; CHECK-NEXT:    ret
796entry:
797  %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16(
798    <vscale x 8 x half> %0,
799    half %1,
800    <vscale x 8 x half> %2,
801    <vscale x 8 x i1> %3,
802    iXLen 0, iXLen %4, iXLen 0);
803
804  ret <vscale x 8 x half> %a
805}
806
807declare <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16(
808  <vscale x 16 x half>,
809  half,
810  <vscale x 16 x half>,
811  iXLen, iXLen, iXLen);
812
813define <vscale x 16 x half>  @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
814; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16:
815; CHECK:       # %bb.0: # %entry
816; CHECK-NEXT:    fsrmi a1, 0
817; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
818; CHECK-NEXT:    vfnmsub.vf v8, fa0, v12
819; CHECK-NEXT:    fsrm a1
820; CHECK-NEXT:    ret
821entry:
822  %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16(
823    <vscale x 16 x half> %0,
824    half %1,
825    <vscale x 16 x half> %2,
826    iXLen 0, iXLen %3, iXLen 0)
827
828  ret <vscale x 16 x half> %a
829}
830
831declare <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16(
832  <vscale x 16 x half>,
833  half,
834  <vscale x 16 x half>,
835  <vscale x 16 x i1>,
836  iXLen, iXLen, iXLen);
837
838define <vscale x 16 x half> @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
839; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16:
840; CHECK:       # %bb.0: # %entry
841; CHECK-NEXT:    fsrmi a1, 0
842; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
843; CHECK-NEXT:    vfnmsub.vf v8, fa0, v12, v0.t
844; CHECK-NEXT:    fsrm a1
845; CHECK-NEXT:    ret
846entry:
847  %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16(
848    <vscale x 16 x half> %0,
849    half %1,
850    <vscale x 16 x half> %2,
851    <vscale x 16 x i1> %3,
852    iXLen 0, iXLen %4, iXLen 0);
853
854  ret <vscale x 16 x half> %a
855}
856
857declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32(
858  <vscale x 1 x float>,
859  float,
860  <vscale x 1 x float>,
861  iXLen, iXLen, iXLen);
862
863define <vscale x 1 x float>  @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
864; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32:
865; CHECK:       # %bb.0: # %entry
866; CHECK-NEXT:    fsrmi a1, 0
867; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
868; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9
869; CHECK-NEXT:    fsrm a1
870; CHECK-NEXT:    ret
871entry:
872  %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32(
873    <vscale x 1 x float> %0,
874    float %1,
875    <vscale x 1 x float> %2,
876    iXLen 0, iXLen %3, iXLen 0)
877
878  ret <vscale x 1 x float> %a
879}
880
881declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32(
882  <vscale x 1 x float>,
883  float,
884  <vscale x 1 x float>,
885  <vscale x 1 x i1>,
886  iXLen, iXLen, iXLen);
887
888define <vscale x 1 x float> @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
889; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32:
890; CHECK:       # %bb.0: # %entry
891; CHECK-NEXT:    fsrmi a1, 0
892; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
893; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9, v0.t
894; CHECK-NEXT:    fsrm a1
895; CHECK-NEXT:    ret
896entry:
897  %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32(
898    <vscale x 1 x float> %0,
899    float %1,
900    <vscale x 1 x float> %2,
901    <vscale x 1 x i1> %3,
902    iXLen 0, iXLen %4, iXLen 0);
903
904  ret <vscale x 1 x float> %a
905}
906
907declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32(
908  <vscale x 2 x float>,
909  float,
910  <vscale x 2 x float>,
911  iXLen, iXLen, iXLen);
912
913define <vscale x 2 x float>  @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
914; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32:
915; CHECK:       # %bb.0: # %entry
916; CHECK-NEXT:    fsrmi a1, 0
917; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
918; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9
919; CHECK-NEXT:    fsrm a1
920; CHECK-NEXT:    ret
921entry:
922  %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32(
923    <vscale x 2 x float> %0,
924    float %1,
925    <vscale x 2 x float> %2,
926    iXLen 0, iXLen %3, iXLen 0)
927
928  ret <vscale x 2 x float> %a
929}
930
931declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32(
932  <vscale x 2 x float>,
933  float,
934  <vscale x 2 x float>,
935  <vscale x 2 x i1>,
936  iXLen, iXLen, iXLen);
937
938define <vscale x 2 x float> @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
939; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32:
940; CHECK:       # %bb.0: # %entry
941; CHECK-NEXT:    fsrmi a1, 0
942; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
943; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9, v0.t
944; CHECK-NEXT:    fsrm a1
945; CHECK-NEXT:    ret
946entry:
947  %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32(
948    <vscale x 2 x float> %0,
949    float %1,
950    <vscale x 2 x float> %2,
951    <vscale x 2 x i1> %3,
952    iXLen 0, iXLen %4, iXLen 0);
953
954  ret <vscale x 2 x float> %a
955}
956
957declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32(
958  <vscale x 4 x float>,
959  float,
960  <vscale x 4 x float>,
961  iXLen, iXLen, iXLen);
962
963define <vscale x 4 x float>  @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
964; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32:
965; CHECK:       # %bb.0: # %entry
966; CHECK-NEXT:    fsrmi a1, 0
967; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
968; CHECK-NEXT:    vfnmsub.vf v8, fa0, v10
969; CHECK-NEXT:    fsrm a1
970; CHECK-NEXT:    ret
971entry:
972  %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32(
973    <vscale x 4 x float> %0,
974    float %1,
975    <vscale x 4 x float> %2,
976    iXLen 0, iXLen %3, iXLen 0)
977
978  ret <vscale x 4 x float> %a
979}
980
981declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32(
982  <vscale x 4 x float>,
983  float,
984  <vscale x 4 x float>,
985  <vscale x 4 x i1>,
986  iXLen, iXLen, iXLen);
987
988define <vscale x 4 x float> @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
989; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32:
990; CHECK:       # %bb.0: # %entry
991; CHECK-NEXT:    fsrmi a1, 0
992; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
993; CHECK-NEXT:    vfnmsub.vf v8, fa0, v10, v0.t
994; CHECK-NEXT:    fsrm a1
995; CHECK-NEXT:    ret
996entry:
997  %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32(
998    <vscale x 4 x float> %0,
999    float %1,
1000    <vscale x 4 x float> %2,
1001    <vscale x 4 x i1> %3,
1002    iXLen 0, iXLen %4, iXLen 0);
1003
1004  ret <vscale x 4 x float> %a
1005}
1006
1007declare <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32(
1008  <vscale x 8 x float>,
1009  float,
1010  <vscale x 8 x float>,
1011  iXLen, iXLen, iXLen);
1012
1013define <vscale x 8 x float>  @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
1014; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32:
1015; CHECK:       # %bb.0: # %entry
1016; CHECK-NEXT:    fsrmi a1, 0
1017; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
1018; CHECK-NEXT:    vfnmsub.vf v8, fa0, v12
1019; CHECK-NEXT:    fsrm a1
1020; CHECK-NEXT:    ret
1021entry:
1022  %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32(
1023    <vscale x 8 x float> %0,
1024    float %1,
1025    <vscale x 8 x float> %2,
1026    iXLen 0, iXLen %3, iXLen 0)
1027
1028  ret <vscale x 8 x float> %a
1029}
1030
1031declare <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32(
1032  <vscale x 8 x float>,
1033  float,
1034  <vscale x 8 x float>,
1035  <vscale x 8 x i1>,
1036  iXLen, iXLen, iXLen);
1037
1038define <vscale x 8 x float> @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1039; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32:
1040; CHECK:       # %bb.0: # %entry
1041; CHECK-NEXT:    fsrmi a1, 0
1042; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
1043; CHECK-NEXT:    vfnmsub.vf v8, fa0, v12, v0.t
1044; CHECK-NEXT:    fsrm a1
1045; CHECK-NEXT:    ret
1046entry:
1047  %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32(
1048    <vscale x 8 x float> %0,
1049    float %1,
1050    <vscale x 8 x float> %2,
1051    <vscale x 8 x i1> %3,
1052    iXLen 0, iXLen %4, iXLen 0);
1053
1054  ret <vscale x 8 x float> %a
1055}
1056
1057declare <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64(
1058  <vscale x 1 x double>,
1059  double,
1060  <vscale x 1 x double>,
1061  iXLen, iXLen, iXLen);
1062
1063define <vscale x 1 x double>  @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
1064; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64:
1065; CHECK:       # %bb.0: # %entry
1066; CHECK-NEXT:    fsrmi a1, 0
1067; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
1068; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9
1069; CHECK-NEXT:    fsrm a1
1070; CHECK-NEXT:    ret
1071entry:
1072  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64(
1073    <vscale x 1 x double> %0,
1074    double %1,
1075    <vscale x 1 x double> %2,
1076    iXLen 0, iXLen %3, iXLen 0)
1077
1078  ret <vscale x 1 x double> %a
1079}
1080
1081declare <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64(
1082  <vscale x 1 x double>,
1083  double,
1084  <vscale x 1 x double>,
1085  <vscale x 1 x i1>,
1086  iXLen, iXLen, iXLen);
1087
1088define <vscale x 1 x double> @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1089; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64:
1090; CHECK:       # %bb.0: # %entry
1091; CHECK-NEXT:    fsrmi a1, 0
1092; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
1093; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9, v0.t
1094; CHECK-NEXT:    fsrm a1
1095; CHECK-NEXT:    ret
1096entry:
1097  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64(
1098    <vscale x 1 x double> %0,
1099    double %1,
1100    <vscale x 1 x double> %2,
1101    <vscale x 1 x i1> %3,
1102    iXLen 0, iXLen %4, iXLen 0);
1103
1104  ret <vscale x 1 x double> %a
1105}
1106
1107declare <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64(
1108  <vscale x 2 x double>,
1109  double,
1110  <vscale x 2 x double>,
1111  iXLen, iXLen, iXLen);
1112
1113define <vscale x 2 x double>  @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
1114; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64:
1115; CHECK:       # %bb.0: # %entry
1116; CHECK-NEXT:    fsrmi a1, 0
1117; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
1118; CHECK-NEXT:    vfnmsub.vf v8, fa0, v10
1119; CHECK-NEXT:    fsrm a1
1120; CHECK-NEXT:    ret
1121entry:
1122  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64(
1123    <vscale x 2 x double> %0,
1124    double %1,
1125    <vscale x 2 x double> %2,
1126    iXLen 0, iXLen %3, iXLen 0)
1127
1128  ret <vscale x 2 x double> %a
1129}
1130
1131declare <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64(
1132  <vscale x 2 x double>,
1133  double,
1134  <vscale x 2 x double>,
1135  <vscale x 2 x i1>,
1136  iXLen, iXLen, iXLen);
1137
1138define <vscale x 2 x double> @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1139; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64:
1140; CHECK:       # %bb.0: # %entry
1141; CHECK-NEXT:    fsrmi a1, 0
1142; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
1143; CHECK-NEXT:    vfnmsub.vf v8, fa0, v10, v0.t
1144; CHECK-NEXT:    fsrm a1
1145; CHECK-NEXT:    ret
1146entry:
1147  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64(
1148    <vscale x 2 x double> %0,
1149    double %1,
1150    <vscale x 2 x double> %2,
1151    <vscale x 2 x i1> %3,
1152    iXLen 0, iXLen %4, iXLen 0);
1153
1154  ret <vscale x 2 x double> %a
1155}
1156
1157declare <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64(
1158  <vscale x 4 x double>,
1159  double,
1160  <vscale x 4 x double>,
1161  iXLen, iXLen, iXLen);
1162
1163define <vscale x 4 x double>  @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
1164; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64:
1165; CHECK:       # %bb.0: # %entry
1166; CHECK-NEXT:    fsrmi a1, 0
1167; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
1168; CHECK-NEXT:    vfnmsub.vf v8, fa0, v12
1169; CHECK-NEXT:    fsrm a1
1170; CHECK-NEXT:    ret
1171entry:
1172  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64(
1173    <vscale x 4 x double> %0,
1174    double %1,
1175    <vscale x 4 x double> %2,
1176    iXLen 0, iXLen %3, iXLen 0)
1177
1178  ret <vscale x 4 x double> %a
1179}
1180
1181declare <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64(
1182  <vscale x 4 x double>,
1183  double,
1184  <vscale x 4 x double>,
1185  <vscale x 4 x i1>,
1186  iXLen, iXLen, iXLen);
1187
1188define <vscale x 4 x double> @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1189; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64:
1190; CHECK:       # %bb.0: # %entry
1191; CHECK-NEXT:    fsrmi a1, 0
1192; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
1193; CHECK-NEXT:    vfnmsub.vf v8, fa0, v12, v0.t
1194; CHECK-NEXT:    fsrm a1
1195; CHECK-NEXT:    ret
1196entry:
1197  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64(
1198    <vscale x 4 x double> %0,
1199    double %1,
1200    <vscale x 4 x double> %2,
1201    <vscale x 4 x i1> %3,
1202    iXLen 0, iXLen %4, iXLen 0);
1203
1204  ret <vscale x 4 x double> %a
1205}
1206