xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll (revision b6c0f1bfa79a3a32d841ac5ab1f94c3aee3b5d90)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zvfh,+v -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zvfh,+v -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
6; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
7; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
8; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
9; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
10
11; This tests a mix of vfnmsac and vfnmsub by using different operand orders to
12; trigger commuting in TwoAddressInstructionPass.
13
14declare <vscale x 1 x half> @llvm.experimental.constrained.fma.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, metadata, metadata)
15
16define <vscale x 1 x half> @vfnmsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc) strictfp {
17; ZVFH-LABEL: vfnmsub_vv_nxv1f16:
18; ZVFH:       # %bb.0:
19; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
20; ZVFH-NEXT:    vfnmsub.vv v8, v9, v10
21; ZVFH-NEXT:    ret
22;
23; ZVFHMIN-LABEL: vfnmsub_vv_nxv1f16:
24; ZVFHMIN:       # %bb.0:
25; ZVFHMIN-NEXT:    lui a0, 8
26; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
27; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v10
28; ZVFHMIN-NEXT:    vxor.vx v8, v8, a0
29; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
30; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
31; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
32; ZVFHMIN-NEXT:    vfmadd.vv v12, v10, v11
33; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
34; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
35; ZVFHMIN-NEXT:    ret
36  %neg = fneg <vscale x 1 x half> %va
37  %vd = call <vscale x 1 x half> @llvm.experimental.constrained.fma.nxv1f16(<vscale x 1 x half> %neg, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc, metadata !"round.dynamic", metadata !"fpexcept.strict")
38  ret <vscale x 1 x half> %vd
39}
40
41define <vscale x 1 x half> @vfnmsub_vf_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, half %c) strictfp {
42; ZVFH-LABEL: vfnmsub_vf_nxv1f16:
43; ZVFH:       # %bb.0:
44; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
45; ZVFH-NEXT:    vfnmsub.vf v8, fa0, v9
46; ZVFH-NEXT:    ret
47;
48; ZVFHMIN-LABEL: vfnmsub_vf_nxv1f16:
49; ZVFHMIN:       # %bb.0:
50; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
51; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
52; ZVFHMIN-NEXT:    vmv.v.x v10, a0
53; ZVFHMIN-NEXT:    lui a0, 8
54; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v9
55; ZVFHMIN-NEXT:    vxor.vx v8, v8, a0
56; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
57; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v10
58; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
59; ZVFHMIN-NEXT:    vfmadd.vv v12, v9, v11
60; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
61; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
62; ZVFHMIN-NEXT:    ret
63  %head = insertelement <vscale x 1 x half> poison, half %c, i32 0
64  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
65  %neg = fneg <vscale x 1 x half> %va
66  %vd = call <vscale x 1 x half> @llvm.experimental.constrained.fma.nxv1f16(<vscale x 1 x half> %neg, <vscale x 1 x half> %splat, <vscale x 1 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
67  ret <vscale x 1 x half> %vd
68}
69
70declare <vscale x 2 x half> @llvm.experimental.constrained.fma.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, metadata, metadata)
71
72define <vscale x 2 x half> @vfnmsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc) strictfp {
73; ZVFH-LABEL: vfnmsub_vv_nxv2f16:
74; ZVFH:       # %bb.0:
75; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
76; ZVFH-NEXT:    vfnmsub.vv v8, v10, v9
77; ZVFH-NEXT:    ret
78;
79; ZVFHMIN-LABEL: vfnmsub_vv_nxv2f16:
80; ZVFHMIN:       # %bb.0:
81; ZVFHMIN-NEXT:    lui a0, 8
82; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
83; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v9
84; ZVFHMIN-NEXT:    vxor.vx v8, v8, a0
85; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
86; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v10
87; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
88; ZVFHMIN-NEXT:    vfmadd.vv v12, v9, v11
89; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
90; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
91; ZVFHMIN-NEXT:    ret
92  %neg = fneg <vscale x 2 x half> %va
93  %vd = call <vscale x 2 x half> @llvm.experimental.constrained.fma.nxv2f16(<vscale x 2 x half> %neg, <vscale x 2 x half> %vc, <vscale x 2 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
94  ret <vscale x 2 x half> %vd
95}
96
97define <vscale x 2 x half> @vfnmsub_vf_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, half %c) strictfp {
98; ZVFH-LABEL: vfnmsub_vf_nxv2f16:
99; ZVFH:       # %bb.0:
100; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
101; ZVFH-NEXT:    vfnmsub.vf v8, fa0, v9
102; ZVFH-NEXT:    ret
103;
104; ZVFHMIN-LABEL: vfnmsub_vf_nxv2f16:
105; ZVFHMIN:       # %bb.0:
106; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
107; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
108; ZVFHMIN-NEXT:    vmv.v.x v10, a0
109; ZVFHMIN-NEXT:    lui a0, 8
110; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v9
111; ZVFHMIN-NEXT:    vxor.vx v8, v8, a0
112; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
113; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v10
114; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
115; ZVFHMIN-NEXT:    vfmadd.vv v12, v9, v11
116; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
117; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
118; ZVFHMIN-NEXT:    ret
119  %head = insertelement <vscale x 2 x half> poison, half %c, i32 0
120  %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
121  %neg = fneg <vscale x 2 x half> %va
122  %vd = call <vscale x 2 x half> @llvm.experimental.constrained.fma.nxv2f16(<vscale x 2 x half> %splat, <vscale x 2 x half> %neg, <vscale x 2 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
123  ret <vscale x 2 x half> %vd
124}
125
126declare <vscale x 4 x half> @llvm.experimental.constrained.fma.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, metadata, metadata)
127
128define <vscale x 4 x half> @vfnmsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc) strictfp {
129; ZVFH-LABEL: vfnmsub_vv_nxv4f16:
130; ZVFH:       # %bb.0:
131; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
132; ZVFH-NEXT:    vfnmsub.vv v8, v9, v10
133; ZVFH-NEXT:    ret
134;
135; ZVFHMIN-LABEL: vfnmsub_vv_nxv4f16:
136; ZVFHMIN:       # %bb.0:
137; ZVFHMIN-NEXT:    lui a0, 8
138; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
139; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v10
140; ZVFHMIN-NEXT:    vxor.vx v9, v9, a0
141; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
142; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v8
143; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
144; ZVFHMIN-NEXT:    vfmadd.vv v14, v10, v12
145; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
146; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v14
147; ZVFHMIN-NEXT:    ret
148  %neg = fneg <vscale x 4 x half> %vb
149  %vd = call <vscale x 4 x half> @llvm.experimental.constrained.fma.nxv4f16(<vscale x 4 x half> %neg, <vscale x 4 x half> %va, <vscale x 4 x half> %vc, metadata !"round.dynamic", metadata !"fpexcept.strict")
150  ret <vscale x 4 x half> %vd
151}
152
153define <vscale x 4 x half> @vfnmsub_vf_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, half %c) strictfp {
154; ZVFH-LABEL: vfnmsub_vf_nxv4f16:
155; ZVFH:       # %bb.0:
156; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
157; ZVFH-NEXT:    vfnmsub.vf v8, fa0, v9
158; ZVFH-NEXT:    ret
159;
160; ZVFHMIN-LABEL: vfnmsub_vf_nxv4f16:
161; ZVFHMIN:       # %bb.0:
162; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
163; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
164; ZVFHMIN-NEXT:    vmv.v.x v10, a0
165; ZVFHMIN-NEXT:    lui a0, 8
166; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
167; ZVFHMIN-NEXT:    vxor.vx v9, v10, a0
168; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
169; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v8
170; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
171; ZVFHMIN-NEXT:    vfmadd.vv v14, v10, v12
172; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
173; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v14
174; ZVFHMIN-NEXT:    ret
175  %head = insertelement <vscale x 4 x half> poison, half %c, i32 0
176  %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
177  %neg = fneg <vscale x 4 x half> %splat
178  %vd = call <vscale x 4 x half> @llvm.experimental.constrained.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %neg, <vscale x 4 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
179  ret <vscale x 4 x half> %vd
180}
181
182declare <vscale x 8 x half> @llvm.experimental.constrained.fma.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, metadata, metadata)
183
184define <vscale x 8 x half> @vfnmsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc) strictfp {
185; ZVFH-LABEL: vfnmsub_vv_nxv8f16:
186; ZVFH:       # %bb.0:
187; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
188; ZVFH-NEXT:    vfnmsac.vv v8, v12, v10
189; ZVFH-NEXT:    ret
190;
191; ZVFHMIN-LABEL: vfnmsub_vv_nxv8f16:
192; ZVFHMIN:       # %bb.0:
193; ZVFHMIN-NEXT:    lui a0, 8
194; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
195; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
196; ZVFHMIN-NEXT:    vxor.vx v8, v10, a0
197; ZVFHMIN-NEXT:    vfwcvt.f.f.v v20, v8
198; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
199; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
200; ZVFHMIN-NEXT:    vfmadd.vv v24, v20, v16
201; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
202; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v24
203; ZVFHMIN-NEXT:    ret
204  %neg = fneg <vscale x 8 x half> %vb
205  %vd = call <vscale x 8 x half> @llvm.experimental.constrained.fma.nxv8f16(<vscale x 8 x half> %neg, <vscale x 8 x half> %vc, <vscale x 8 x half> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
206  ret <vscale x 8 x half> %vd
207}
208
209define <vscale x 8 x half> @vfnmsub_vf_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, half %c) strictfp {
210; ZVFH-LABEL: vfnmsub_vf_nxv8f16:
211; ZVFH:       # %bb.0:
212; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
213; ZVFH-NEXT:    vfnmsac.vf v8, fa0, v10
214; ZVFH-NEXT:    ret
215;
216; ZVFHMIN-LABEL: vfnmsub_vf_nxv8f16:
217; ZVFHMIN:       # %bb.0:
218; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
219; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
220; ZVFHMIN-NEXT:    vmv.v.x v12, a0
221; ZVFHMIN-NEXT:    lui a0, 8
222; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
223; ZVFHMIN-NEXT:    vxor.vx v8, v12, a0
224; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
225; ZVFHMIN-NEXT:    vfwcvt.f.f.v v20, v10
226; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
227; ZVFHMIN-NEXT:    vfmadd.vv v20, v12, v16
228; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
229; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v20
230; ZVFHMIN-NEXT:    ret
231  %head = insertelement <vscale x 8 x half> poison, half %c, i32 0
232  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
233  %neg = fneg <vscale x 8 x half> %splat
234  %vd = call <vscale x 8 x half> @llvm.experimental.constrained.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %neg, <vscale x 8 x half> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
235  ret <vscale x 8 x half> %vd
236}
237
238declare <vscale x 16 x half> @llvm.experimental.constrained.fma.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>, metadata, metadata)
239
240define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc) strictfp {
241; ZVFH-LABEL: vfnmsub_vv_nxv16f16:
242; ZVFH:       # %bb.0:
243; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
244; ZVFH-NEXT:    vfnmsub.vv v8, v16, v12
245; ZVFH-NEXT:    ret
246;
247; ZVFHMIN-LABEL: vfnmsub_vv_nxv16f16:
248; ZVFHMIN:       # %bb.0:
249; ZVFHMIN-NEXT:    lui a0, 8
250; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
251; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
252; ZVFHMIN-NEXT:    vxor.vx v12, v16, a0
253; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12
254; ZVFHMIN-NEXT:    vfwcvt.f.f.v v0, v8
255; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
256; ZVFHMIN-NEXT:    vfmadd.vv v0, v16, v24
257; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
258; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v0
259; ZVFHMIN-NEXT:    ret
260  %neg = fneg <vscale x 16 x half> %vc
261  %vd = call <vscale x 16 x half> @llvm.experimental.constrained.fma.nxv16f16(<vscale x 16 x half> %neg, <vscale x 16 x half> %va, <vscale x 16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
262  ret <vscale x 16 x half> %vd
263}
264
265define <vscale x 16 x half> @vfnmsub_vf_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, half %c) strictfp {
266; ZVFH-LABEL: vfnmsub_vf_nxv16f16:
267; ZVFH:       # %bb.0:
268; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
269; ZVFH-NEXT:    vfnmsub.vf v8, fa0, v12
270; ZVFH-NEXT:    ret
271;
272; ZVFHMIN-LABEL: vfnmsub_vf_nxv16f16:
273; ZVFHMIN:       # %bb.0:
274; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
275; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
276; ZVFHMIN-NEXT:    vmv.v.x v16, a0
277; ZVFHMIN-NEXT:    lui a0, 8
278; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
279; ZVFHMIN-NEXT:    vxor.vx v12, v16, a0
280; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12
281; ZVFHMIN-NEXT:    vfwcvt.f.f.v v0, v8
282; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
283; ZVFHMIN-NEXT:    vfmadd.vv v0, v16, v24
284; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
285; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v0
286; ZVFHMIN-NEXT:    ret
287  %head = insertelement <vscale x 16 x half> poison, half %c, i32 0
288  %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
289  %neg = fneg <vscale x 16 x half> %splat
290  %vd = call <vscale x 16 x half> @llvm.experimental.constrained.fma.nxv16f16(<vscale x 16 x half> %neg, <vscale x 16 x half> %va, <vscale x 16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
291  ret <vscale x 16 x half> %vd
292}
293
294declare <vscale x 32 x half> @llvm.experimental.constrained.fma.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>, metadata, metadata)
295
296define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc) strictfp {
297; ZVFH-LABEL: vfnmsub_vv_nxv32f16:
298; ZVFH:       # %bb.0:
299; ZVFH-NEXT:    vl8re16.v v24, (a0)
300; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
301; ZVFH-NEXT:    vfnmsub.vv v8, v24, v16
302; ZVFH-NEXT:    ret
303;
304; ZVFHMIN-LABEL: vfnmsub_vv_nxv32f16:
305; ZVFHMIN:       # %bb.0:
306; ZVFHMIN-NEXT:    addi sp, sp, -16
307; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
308; ZVFHMIN-NEXT:    csrr a1, vlenb
309; ZVFHMIN-NEXT:    li a2, 24
310; ZVFHMIN-NEXT:    mul a1, a1, a2
311; ZVFHMIN-NEXT:    sub sp, sp, a1
312; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
313; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
314; ZVFHMIN-NEXT:    vmv8r.v v0, v16
315; ZVFHMIN-NEXT:    csrr a1, vlenb
316; ZVFHMIN-NEXT:    slli a1, a1, 4
317; ZVFHMIN-NEXT:    add a1, sp, a1
318; ZVFHMIN-NEXT:    addi a1, a1, 16
319; ZVFHMIN-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
320; ZVFHMIN-NEXT:    csrr a1, vlenb
321; ZVFHMIN-NEXT:    slli a1, a1, 3
322; ZVFHMIN-NEXT:    add a1, sp, a1
323; ZVFHMIN-NEXT:    addi a1, a1, 16
324; ZVFHMIN-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
325; ZVFHMIN-NEXT:    vl8re16.v v24, (a0)
326; ZVFHMIN-NEXT:    lui a0, 8
327; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v0
328; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
329; ZVFHMIN-NEXT:    vxor.vx v0, v24, a0
330; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
331; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8
332; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v0
333; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
334; ZVFHMIN-NEXT:    vfmadd.vv v24, v8, v16
335; ZVFHMIN-NEXT:    csrr a0, vlenb
336; ZVFHMIN-NEXT:    slli a0, a0, 4
337; ZVFHMIN-NEXT:    add a0, sp, a0
338; ZVFHMIN-NEXT:    addi a0, a0, 16
339; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
340; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
341; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12
342; ZVFHMIN-NEXT:    addi a0, sp, 16
343; ZVFHMIN-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
344; ZVFHMIN-NEXT:    csrr a0, vlenb
345; ZVFHMIN-NEXT:    slli a0, a0, 3
346; ZVFHMIN-NEXT:    add a0, sp, a0
347; ZVFHMIN-NEXT:    addi a0, a0, 16
348; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
349; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12
350; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v4
351; ZVFHMIN-NEXT:    addi a0, sp, 16
352; ZVFHMIN-NEXT:    vl8r.v v0, (a0) # Unknown-size Folded Reload
353; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
354; ZVFHMIN-NEXT:    vfmadd.vv v16, v8, v0
355; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
356; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v24
357; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v16
358; ZVFHMIN-NEXT:    csrr a0, vlenb
359; ZVFHMIN-NEXT:    li a1, 24
360; ZVFHMIN-NEXT:    mul a0, a0, a1
361; ZVFHMIN-NEXT:    add sp, sp, a0
362; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
363; ZVFHMIN-NEXT:    addi sp, sp, 16
364; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
365; ZVFHMIN-NEXT:    ret
366  %neg = fneg <vscale x 32 x half> %vc
367  %vd = call <vscale x 32 x half> @llvm.experimental.constrained.fma.nxv32f16(<vscale x 32 x half> %neg, <vscale x 32 x half> %va, <vscale x 32 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
368  ret <vscale x 32 x half> %vd
369}
370
371define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, half %c) strictfp {
372; ZVFH-LABEL: vfnmsub_vf_nxv32f16:
373; ZVFH:       # %bb.0:
374; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
375; ZVFH-NEXT:    vfnmsac.vf v8, fa0, v16
376; ZVFH-NEXT:    ret
377;
378; ZVFHMIN-LABEL: vfnmsub_vf_nxv32f16:
379; ZVFHMIN:       # %bb.0:
380; ZVFHMIN-NEXT:    addi sp, sp, -16
381; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
382; ZVFHMIN-NEXT:    csrr a0, vlenb
383; ZVFHMIN-NEXT:    li a1, 24
384; ZVFHMIN-NEXT:    mul a0, a0, a1
385; ZVFHMIN-NEXT:    sub sp, sp, a0
386; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
387; ZVFHMIN-NEXT:    csrr a0, vlenb
388; ZVFHMIN-NEXT:    slli a0, a0, 3
389; ZVFHMIN-NEXT:    add a0, sp, a0
390; ZVFHMIN-NEXT:    addi a0, a0, 16
391; ZVFHMIN-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
392; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
393; ZVFHMIN-NEXT:    vmv8r.v v0, v8
394; ZVFHMIN-NEXT:    csrr a0, vlenb
395; ZVFHMIN-NEXT:    slli a0, a0, 4
396; ZVFHMIN-NEXT:    add a0, sp, a0
397; ZVFHMIN-NEXT:    addi a0, a0, 16
398; ZVFHMIN-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
399; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
400; ZVFHMIN-NEXT:    vmv.v.x v24, a0
401; ZVFHMIN-NEXT:    lui a0, 8
402; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
403; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v0
404; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
405; ZVFHMIN-NEXT:    vxor.vx v0, v24, a0
406; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
407; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16
408; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v0
409; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
410; ZVFHMIN-NEXT:    vfmadd.vv v24, v16, v8
411; ZVFHMIN-NEXT:    csrr a0, vlenb
412; ZVFHMIN-NEXT:    slli a0, a0, 4
413; ZVFHMIN-NEXT:    add a0, sp, a0
414; ZVFHMIN-NEXT:    addi a0, a0, 16
415; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
416; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
417; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12
418; ZVFHMIN-NEXT:    addi a0, sp, 16
419; ZVFHMIN-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
420; ZVFHMIN-NEXT:    csrr a0, vlenb
421; ZVFHMIN-NEXT:    slli a0, a0, 3
422; ZVFHMIN-NEXT:    add a0, sp, a0
423; ZVFHMIN-NEXT:    addi a0, a0, 16
424; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
425; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12
426; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v4
427; ZVFHMIN-NEXT:    addi a0, sp, 16
428; ZVFHMIN-NEXT:    vl8r.v v0, (a0) # Unknown-size Folded Reload
429; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
430; ZVFHMIN-NEXT:    vfmadd.vv v16, v8, v0
431; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
432; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v24
433; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v16
434; ZVFHMIN-NEXT:    csrr a0, vlenb
435; ZVFHMIN-NEXT:    li a1, 24
436; ZVFHMIN-NEXT:    mul a0, a0, a1
437; ZVFHMIN-NEXT:    add sp, sp, a0
438; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
439; ZVFHMIN-NEXT:    addi sp, sp, 16
440; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
441; ZVFHMIN-NEXT:    ret
442  %head = insertelement <vscale x 32 x half> poison, half %c, i32 0
443  %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
444  %neg = fneg <vscale x 32 x half> %splat
445  %vd = call <vscale x 32 x half> @llvm.experimental.constrained.fma.nxv32f16(<vscale x 32 x half> %neg, <vscale x 32 x half> %vb, <vscale x 32 x half> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
446  ret <vscale x 32 x half> %vd
447}
448
449declare <vscale x 1 x float> @llvm.experimental.constrained.fma.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, metadata, metadata)
450
451define <vscale x 1 x float> @vfnmsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc) strictfp {
452; CHECK-LABEL: vfnmsub_vv_nxv1f32:
453; CHECK:       # %bb.0:
454; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
455; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
456; CHECK-NEXT:    ret
457  %neg = fneg <vscale x 1 x float> %vb
458  %vd = call <vscale x 1 x float> @llvm.experimental.constrained.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %neg, <vscale x 1 x float> %vc, metadata !"round.dynamic", metadata !"fpexcept.strict")
459  ret <vscale x 1 x float> %vd
460}
461
462define <vscale x 1 x float> @vfnmsub_vf_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, float %c) strictfp {
463; CHECK-LABEL: vfnmsub_vf_nxv1f32:
464; CHECK:       # %bb.0:
465; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
466; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9
467; CHECK-NEXT:    ret
468  %head = insertelement <vscale x 1 x float> poison, float %c, i32 0
469  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
470  %neg = fneg <vscale x 1 x float> %va
471  %vd = call <vscale x 1 x float> @llvm.experimental.constrained.fma.nxv1f32(<vscale x 1 x float> %neg, <vscale x 1 x float> %splat, <vscale x 1 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
472  ret <vscale x 1 x float> %vd
473}
474
475declare <vscale x 2 x float> @llvm.experimental.constrained.fma.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
476
477define <vscale x 2 x float> @vfnmsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc) strictfp {
478; CHECK-LABEL: vfnmsub_vv_nxv2f32:
479; CHECK:       # %bb.0:
480; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
481; CHECK-NEXT:    vfnmsub.vv v8, v10, v9
482; CHECK-NEXT:    ret
483  %neg = fneg <vscale x 2 x float> %vc
484  %vd = call <vscale x 2 x float> @llvm.experimental.constrained.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %neg, <vscale x 2 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
485  ret <vscale x 2 x float> %vd
486}
487
488define <vscale x 2 x float> @vfnmsub_vf_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, float %c) strictfp {
489; CHECK-LABEL: vfnmsub_vf_nxv2f32:
490; CHECK:       # %bb.0:
491; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
492; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9
493; CHECK-NEXT:    ret
494  %head = insertelement <vscale x 2 x float> poison, float %c, i32 0
495  %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
496  %neg = fneg <vscale x 2 x float> %va
497  %vd = call <vscale x 2 x float> @llvm.experimental.constrained.fma.nxv2f32(<vscale x 2 x float> %splat, <vscale x 2 x float> %neg, <vscale x 2 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
498  ret <vscale x 2 x float> %vd
499}
500
501declare <vscale x 4 x float> @llvm.experimental.constrained.fma.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, metadata, metadata)
502
503define <vscale x 4 x float> @vfnmsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc) strictfp {
504; CHECK-LABEL: vfnmsub_vv_nxv4f32:
505; CHECK:       # %bb.0:
506; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
507; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
508; CHECK-NEXT:    ret
509  %neg = fneg <vscale x 4 x float> %va
510  %vd = call <vscale x 4 x float> @llvm.experimental.constrained.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %neg, <vscale x 4 x float> %vc, metadata !"round.dynamic", metadata !"fpexcept.strict")
511  ret <vscale x 4 x float> %vd
512}
513
514define <vscale x 4 x float> @vfnmsub_vf_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, float %c) strictfp {
515; CHECK-LABEL: vfnmsub_vf_nxv4f32:
516; CHECK:       # %bb.0:
517; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
518; CHECK-NEXT:    vfnmsub.vf v8, fa0, v10
519; CHECK-NEXT:    ret
520  %head = insertelement <vscale x 4 x float> poison, float %c, i32 0
521  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
522  %neg = fneg <vscale x 4 x float> %splat
523  %vd = call <vscale x 4 x float> @llvm.experimental.constrained.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %neg, <vscale x 4 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
524  ret <vscale x 4 x float> %vd
525}
526
527declare <vscale x 8 x float> @llvm.experimental.constrained.fma.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, metadata, metadata)
528
529define <vscale x 8 x float> @vfnmsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc) strictfp {
530; CHECK-LABEL: vfnmsub_vv_nxv8f32:
531; CHECK:       # %bb.0:
532; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
533; CHECK-NEXT:    vfnmsac.vv v8, v16, v12
534; CHECK-NEXT:    ret
535  %neg = fneg <vscale x 8 x float> %vc
536  %vd = call <vscale x 8 x float> @llvm.experimental.constrained.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %neg, <vscale x 8 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
537  ret <vscale x 8 x float> %vd
538}
539
540define <vscale x 8 x float> @vfnmsub_vf_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, float %c) strictfp {
541; CHECK-LABEL: vfnmsub_vf_nxv8f32:
542; CHECK:       # %bb.0:
543; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
544; CHECK-NEXT:    vfnmsac.vf v8, fa0, v12
545; CHECK-NEXT:    ret
546  %head = insertelement <vscale x 8 x float> poison, float %c, i32 0
547  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
548  %neg = fneg <vscale x 8 x float> %splat
549  %vd = call <vscale x 8 x float> @llvm.experimental.constrained.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %neg, <vscale x 8 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
550  ret <vscale x 8 x float> %vd
551}
552
553declare <vscale x 16 x float> @llvm.experimental.constrained.fma.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, metadata, metadata)
554
555define <vscale x 16 x float> @vfnmsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc) strictfp {
556; CHECK-LABEL: vfnmsub_vv_nxv16f32:
557; CHECK:       # %bb.0:
558; CHECK-NEXT:    vl8re32.v v24, (a0)
559; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
560; CHECK-NEXT:    vfnmsub.vv v8, v24, v16
561; CHECK-NEXT:    ret
562  %neg = fneg <vscale x 16 x float> %va
563  %vd = call <vscale x 16 x float> @llvm.experimental.constrained.fma.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x float> %neg, <vscale x 16 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
564  ret <vscale x 16 x float> %vd
565}
566
567define <vscale x 16 x float> @vfnmsub_vf_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, float %c) strictfp {
568; CHECK-LABEL: vfnmsub_vf_nxv16f32:
569; CHECK:       # %bb.0:
570; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
571; CHECK-NEXT:    vfnmsub.vf v8, fa0, v16
572; CHECK-NEXT:    ret
573  %head = insertelement <vscale x 16 x float> poison, float %c, i32 0
574  %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
575  %neg = fneg <vscale x 16 x float> %splat
576  %vd = call <vscale x 16 x float> @llvm.experimental.constrained.fma.nxv16f32(<vscale x 16 x float> %neg, <vscale x 16 x float> %va, <vscale x 16 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
577  ret <vscale x 16 x float> %vd
578}
579
580declare <vscale x 1 x double> @llvm.experimental.constrained.fma.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, metadata, metadata)
581
582define <vscale x 1 x double> @vfnmsub_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc) strictfp {
583; CHECK-LABEL: vfnmsub_vv_nxv1f64:
584; CHECK:       # %bb.0:
585; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
586; CHECK-NEXT:    vfnmsac.vv v8, v10, v9
587; CHECK-NEXT:    ret
588  %neg = fneg <vscale x 1 x double> %vb
589  %vd = call <vscale x 1 x double> @llvm.experimental.constrained.fma.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x double> %neg, <vscale x 1 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
590  ret <vscale x 1 x double> %vd
591}
592
593define <vscale x 1 x double> @vfnmsub_vf_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, double %c) strictfp {
594; CHECK-LABEL: vfnmsub_vf_nxv1f64:
595; CHECK:       # %bb.0:
596; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
597; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9
598; CHECK-NEXT:    ret
599  %head = insertelement <vscale x 1 x double> poison, double %c, i32 0
600  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
601  %neg = fneg <vscale x 1 x double> %va
602  %vd = call <vscale x 1 x double> @llvm.experimental.constrained.fma.nxv1f64(<vscale x 1 x double> %neg, <vscale x 1 x double> %splat, <vscale x 1 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
603  ret <vscale x 1 x double> %vd
604}
605
606declare <vscale x 2 x double> @llvm.experimental.constrained.fma.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, metadata, metadata)
607
608define <vscale x 2 x double> @vfnmsub_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc) strictfp {
609; CHECK-LABEL: vfnmsub_vv_nxv2f64:
610; CHECK:       # %bb.0:
611; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
612; CHECK-NEXT:    vfnmsub.vv v8, v12, v10
613; CHECK-NEXT:    ret
614  %neg = fneg <vscale x 2 x double> %va
615  %vd = call <vscale x 2 x double> @llvm.experimental.constrained.fma.nxv2f64(<vscale x 2 x double> %neg, <vscale x 2 x double> %vc, <vscale x 2 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
616  ret <vscale x 2 x double> %vd
617}
618
619define <vscale x 2 x double> @vfnmsub_vf_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, double %c) strictfp {
620; CHECK-LABEL: vfnmsub_vf_nxv2f64:
621; CHECK:       # %bb.0:
622; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
623; CHECK-NEXT:    vfnmsub.vf v8, fa0, v10
624; CHECK-NEXT:    ret
625  %head = insertelement <vscale x 2 x double> poison, double %c, i32 0
626  %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
627  %neg = fneg <vscale x 2 x double> %va
628  %vd = call <vscale x 2 x double> @llvm.experimental.constrained.fma.nxv2f64(<vscale x 2 x double> %splat, <vscale x 2 x double> %neg, <vscale x 2 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
629  ret <vscale x 2 x double> %vd
630}
631
632declare <vscale x 4 x double> @llvm.experimental.constrained.fma.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>, metadata, metadata)
633
634define <vscale x 4 x double> @vfnmsub_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc) strictfp {
635; CHECK-LABEL: vfnmsub_vv_nxv4f64:
636; CHECK:       # %bb.0:
637; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
638; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
639; CHECK-NEXT:    ret
640  %neg = fneg <vscale x 4 x double> %vb
641  %vd = call <vscale x 4 x double> @llvm.experimental.constrained.fma.nxv4f64(<vscale x 4 x double> %neg, <vscale x 4 x double> %va, <vscale x 4 x double> %vc, metadata !"round.dynamic", metadata !"fpexcept.strict")
642  ret <vscale x 4 x double> %vd
643}
644
645define <vscale x 4 x double> @vfnmsub_vf_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, double %c) strictfp {
646; CHECK-LABEL: vfnmsub_vf_nxv4f64:
647; CHECK:       # %bb.0:
648; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
649; CHECK-NEXT:    vfnmsub.vf v8, fa0, v12
650; CHECK-NEXT:    ret
651  %head = insertelement <vscale x 4 x double> poison, double %c, i32 0
652  %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
653  %neg = fneg <vscale x 4 x double> %splat
654  %vd = call <vscale x 4 x double> @llvm.experimental.constrained.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %neg, <vscale x 4 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
655  ret <vscale x 4 x double> %vd
656}
657
658declare <vscale x 8 x double> @llvm.experimental.constrained.fma.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>, metadata, metadata)
659
660define <vscale x 8 x double> @vfnmsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc) strictfp {
661; CHECK-LABEL: vfnmsub_vv_nxv8f64:
662; CHECK:       # %bb.0:
663; CHECK-NEXT:    vl8re64.v v24, (a0)
664; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
665; CHECK-NEXT:    vfnmsac.vv v8, v16, v24
666; CHECK-NEXT:    ret
667  %neg = fneg <vscale x 8 x double> %vb
668  %vd = call <vscale x 8 x double> @llvm.experimental.constrained.fma.nxv8f64(<vscale x 8 x double> %neg, <vscale x 8 x double> %vc, <vscale x 8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
669  ret <vscale x 8 x double> %vd
670}
671
672define <vscale x 8 x double> @vfnmsub_vf_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, double %c) strictfp {
673; CHECK-LABEL: vfnmsub_vf_nxv8f64:
674; CHECK:       # %bb.0:
675; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
676; CHECK-NEXT:    vfnmsac.vf v8, fa0, v16
677; CHECK-NEXT:    ret
678  %head = insertelement <vscale x 8 x double> poison, double %c, i32 0
679  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
680  %neg = fneg <vscale x 8 x double> %splat
681  %vd = call <vscale x 8 x double> @llvm.experimental.constrained.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %neg, <vscale x 8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
682  ret <vscale x 8 x double> %vd
683}
684