xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfsub.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
8  <vscale x 1 x half>,
9  <vscale x 1 x half>,
10  <vscale x 1 x half>,
11  iXLen, iXLen);
12
13define <vscale x 1 x half> @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
17; CHECK-NEXT:    vfsub.vv v8, v8, v9
18; CHECK-NEXT:    ret
19entry:
20  %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
21    <vscale x 1 x half> undef,
22    <vscale x 1 x half> %0,
23    <vscale x 1 x half> %1,
24    iXLen 7, iXLen %2)
25
26  ret <vscale x 1 x half> %a
27}
28
29declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
30  <vscale x 1 x half>,
31  <vscale x 1 x half>,
32  <vscale x 1 x half>,
33  <vscale x 1 x i1>,
34  iXLen, iXLen, iXLen);
35
36define <vscale x 1 x half> @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
38; CHECK:       # %bb.0: # %entry
39; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
40; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
41; CHECK-NEXT:    ret
42entry:
43  %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
44    <vscale x 1 x half> %0,
45    <vscale x 1 x half> %1,
46    <vscale x 1 x half> %2,
47    <vscale x 1 x i1> %3,
48    iXLen 7, iXLen %4, iXLen 1)
49
50  ret <vscale x 1 x half> %a
51}
52
53declare <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
54  <vscale x 2 x half>,
55  <vscale x 2 x half>,
56  <vscale x 2 x half>,
57  iXLen, iXLen);
58
59define <vscale x 2 x half> @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
60; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16:
61; CHECK:       # %bb.0: # %entry
62; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
63; CHECK-NEXT:    vfsub.vv v8, v8, v9
64; CHECK-NEXT:    ret
65entry:
66  %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
67    <vscale x 2 x half> undef,
68    <vscale x 2 x half> %0,
69    <vscale x 2 x half> %1,
70    iXLen 7, iXLen %2)
71
72  ret <vscale x 2 x half> %a
73}
74
75declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
76  <vscale x 2 x half>,
77  <vscale x 2 x half>,
78  <vscale x 2 x half>,
79  <vscale x 2 x i1>,
80  iXLen, iXLen, iXLen);
81
82define <vscale x 2 x half> @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
83; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
84; CHECK:       # %bb.0: # %entry
85; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
86; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
87; CHECK-NEXT:    ret
88entry:
89  %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
90    <vscale x 2 x half> %0,
91    <vscale x 2 x half> %1,
92    <vscale x 2 x half> %2,
93    <vscale x 2 x i1> %3,
94    iXLen 7, iXLen %4, iXLen 1)
95
96  ret <vscale x 2 x half> %a
97}
98
99declare <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
100  <vscale x 4 x half>,
101  <vscale x 4 x half>,
102  <vscale x 4 x half>,
103  iXLen, iXLen);
104
105define <vscale x 4 x half> @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
106; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16:
107; CHECK:       # %bb.0: # %entry
108; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
109; CHECK-NEXT:    vfsub.vv v8, v8, v9
110; CHECK-NEXT:    ret
111entry:
112  %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
113    <vscale x 4 x half> undef,
114    <vscale x 4 x half> %0,
115    <vscale x 4 x half> %1,
116    iXLen 7, iXLen %2)
117
118  ret <vscale x 4 x half> %a
119}
120
121declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
122  <vscale x 4 x half>,
123  <vscale x 4 x half>,
124  <vscale x 4 x half>,
125  <vscale x 4 x i1>,
126  iXLen, iXLen, iXLen);
127
128define <vscale x 4 x half> @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
129; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
130; CHECK:       # %bb.0: # %entry
131; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
132; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
133; CHECK-NEXT:    ret
134entry:
135  %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
136    <vscale x 4 x half> %0,
137    <vscale x 4 x half> %1,
138    <vscale x 4 x half> %2,
139    <vscale x 4 x i1> %3,
140    iXLen 7, iXLen %4, iXLen 1)
141
142  ret <vscale x 4 x half> %a
143}
144
145declare <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
146  <vscale x 8 x half>,
147  <vscale x 8 x half>,
148  <vscale x 8 x half>,
149  iXLen, iXLen);
150
151define <vscale x 8 x half> @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
152; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16:
153; CHECK:       # %bb.0: # %entry
154; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
155; CHECK-NEXT:    vfsub.vv v8, v8, v10
156; CHECK-NEXT:    ret
157entry:
158  %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
159    <vscale x 8 x half> undef,
160    <vscale x 8 x half> %0,
161    <vscale x 8 x half> %1,
162    iXLen 7, iXLen %2)
163
164  ret <vscale x 8 x half> %a
165}
166
167declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
168  <vscale x 8 x half>,
169  <vscale x 8 x half>,
170  <vscale x 8 x half>,
171  <vscale x 8 x i1>,
172  iXLen, iXLen, iXLen);
173
174define <vscale x 8 x half> @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
175; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
176; CHECK:       # %bb.0: # %entry
177; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
178; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
179; CHECK-NEXT:    ret
180entry:
181  %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
182    <vscale x 8 x half> %0,
183    <vscale x 8 x half> %1,
184    <vscale x 8 x half> %2,
185    <vscale x 8 x i1> %3,
186    iXLen 7, iXLen %4, iXLen 1)
187
188  ret <vscale x 8 x half> %a
189}
190
191declare <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
192  <vscale x 16 x half>,
193  <vscale x 16 x half>,
194  <vscale x 16 x half>,
195  iXLen, iXLen);
196
197define <vscale x 16 x half> @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
198; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16:
199; CHECK:       # %bb.0: # %entry
200; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
201; CHECK-NEXT:    vfsub.vv v8, v8, v12
202; CHECK-NEXT:    ret
203entry:
204  %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
205    <vscale x 16 x half> undef,
206    <vscale x 16 x half> %0,
207    <vscale x 16 x half> %1,
208    iXLen 7, iXLen %2)
209
210  ret <vscale x 16 x half> %a
211}
212
213declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
214  <vscale x 16 x half>,
215  <vscale x 16 x half>,
216  <vscale x 16 x half>,
217  <vscale x 16 x i1>,
218  iXLen, iXLen, iXLen);
219
220define <vscale x 16 x half> @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
221; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
222; CHECK:       # %bb.0: # %entry
223; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
224; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
225; CHECK-NEXT:    ret
226entry:
227  %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
228    <vscale x 16 x half> %0,
229    <vscale x 16 x half> %1,
230    <vscale x 16 x half> %2,
231    <vscale x 16 x i1> %3,
232    iXLen 7, iXLen %4, iXLen 1)
233
234  ret <vscale x 16 x half> %a
235}
236
237declare <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
238  <vscale x 32 x half>,
239  <vscale x 32 x half>,
240  <vscale x 32 x half>,
241  iXLen, iXLen);
242
243define <vscale x 32 x half> @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
244; CHECK-LABEL: intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16:
245; CHECK:       # %bb.0: # %entry
246; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
247; CHECK-NEXT:    vfsub.vv v8, v8, v16
248; CHECK-NEXT:    ret
249entry:
250  %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
251    <vscale x 32 x half> undef,
252    <vscale x 32 x half> %0,
253    <vscale x 32 x half> %1,
254    iXLen 7, iXLen %2)
255
256  ret <vscale x 32 x half> %a
257}
258
259declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
260  <vscale x 32 x half>,
261  <vscale x 32 x half>,
262  <vscale x 32 x half>,
263  <vscale x 32 x i1>,
264  iXLen, iXLen, iXLen);
265
266define <vscale x 32 x half> @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
267; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16:
268; CHECK:       # %bb.0: # %entry
269; CHECK-NEXT:    vl8re16.v v24, (a0)
270; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
271; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
272; CHECK-NEXT:    ret
273entry:
274  %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
275    <vscale x 32 x half> %0,
276    <vscale x 32 x half> %1,
277    <vscale x 32 x half> %2,
278    <vscale x 32 x i1> %3,
279    iXLen 7, iXLen %4, iXLen 1)
280
281  ret <vscale x 32 x half> %a
282}
283
284declare <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
285  <vscale x 1 x float>,
286  <vscale x 1 x float>,
287  <vscale x 1 x float>,
288  iXLen, iXLen);
289
290define <vscale x 1 x float> @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
291; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32:
292; CHECK:       # %bb.0: # %entry
293; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
294; CHECK-NEXT:    vfsub.vv v8, v8, v9
295; CHECK-NEXT:    ret
296entry:
297  %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
298    <vscale x 1 x float> undef,
299    <vscale x 1 x float> %0,
300    <vscale x 1 x float> %1,
301    iXLen 7, iXLen %2)
302
303  ret <vscale x 1 x float> %a
304}
305
306declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
307  <vscale x 1 x float>,
308  <vscale x 1 x float>,
309  <vscale x 1 x float>,
310  <vscale x 1 x i1>,
311  iXLen, iXLen, iXLen);
312
313define <vscale x 1 x float> @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
314; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
315; CHECK:       # %bb.0: # %entry
316; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
317; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
318; CHECK-NEXT:    ret
319entry:
320  %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
321    <vscale x 1 x float> %0,
322    <vscale x 1 x float> %1,
323    <vscale x 1 x float> %2,
324    <vscale x 1 x i1> %3,
325    iXLen 7, iXLen %4, iXLen 1)
326
327  ret <vscale x 1 x float> %a
328}
329
330declare <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
331  <vscale x 2 x float>,
332  <vscale x 2 x float>,
333  <vscale x 2 x float>,
334  iXLen, iXLen);
335
336define <vscale x 2 x float> @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
337; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32:
338; CHECK:       # %bb.0: # %entry
339; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
340; CHECK-NEXT:    vfsub.vv v8, v8, v9
341; CHECK-NEXT:    ret
342entry:
343  %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
344    <vscale x 2 x float> undef,
345    <vscale x 2 x float> %0,
346    <vscale x 2 x float> %1,
347    iXLen 7, iXLen %2)
348
349  ret <vscale x 2 x float> %a
350}
351
352declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
353  <vscale x 2 x float>,
354  <vscale x 2 x float>,
355  <vscale x 2 x float>,
356  <vscale x 2 x i1>,
357  iXLen, iXLen, iXLen);
358
359define <vscale x 2 x float> @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
360; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
361; CHECK:       # %bb.0: # %entry
362; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
363; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
364; CHECK-NEXT:    ret
365entry:
366  %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
367    <vscale x 2 x float> %0,
368    <vscale x 2 x float> %1,
369    <vscale x 2 x float> %2,
370    <vscale x 2 x i1> %3,
371    iXLen 7, iXLen %4, iXLen 1)
372
373  ret <vscale x 2 x float> %a
374}
375
376declare <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
377  <vscale x 4 x float>,
378  <vscale x 4 x float>,
379  <vscale x 4 x float>,
380  iXLen, iXLen);
381
382define <vscale x 4 x float> @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
383; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32:
384; CHECK:       # %bb.0: # %entry
385; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
386; CHECK-NEXT:    vfsub.vv v8, v8, v10
387; CHECK-NEXT:    ret
388entry:
389  %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
390    <vscale x 4 x float> undef,
391    <vscale x 4 x float> %0,
392    <vscale x 4 x float> %1,
393    iXLen 7, iXLen %2)
394
395  ret <vscale x 4 x float> %a
396}
397
398declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
399  <vscale x 4 x float>,
400  <vscale x 4 x float>,
401  <vscale x 4 x float>,
402  <vscale x 4 x i1>,
403  iXLen, iXLen, iXLen);
404
405define <vscale x 4 x float> @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
406; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
407; CHECK:       # %bb.0: # %entry
408; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
409; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
410; CHECK-NEXT:    ret
411entry:
412  %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
413    <vscale x 4 x float> %0,
414    <vscale x 4 x float> %1,
415    <vscale x 4 x float> %2,
416    <vscale x 4 x i1> %3,
417    iXLen 7, iXLen %4, iXLen 1)
418
419  ret <vscale x 4 x float> %a
420}
421
422declare <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
423  <vscale x 8 x float>,
424  <vscale x 8 x float>,
425  <vscale x 8 x float>,
426  iXLen, iXLen);
427
428define <vscale x 8 x float> @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
429; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32:
430; CHECK:       # %bb.0: # %entry
431; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
432; CHECK-NEXT:    vfsub.vv v8, v8, v12
433; CHECK-NEXT:    ret
434entry:
435  %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
436    <vscale x 8 x float> undef,
437    <vscale x 8 x float> %0,
438    <vscale x 8 x float> %1,
439    iXLen 7, iXLen %2)
440
441  ret <vscale x 8 x float> %a
442}
443
444declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
445  <vscale x 8 x float>,
446  <vscale x 8 x float>,
447  <vscale x 8 x float>,
448  <vscale x 8 x i1>,
449  iXLen, iXLen, iXLen);
450
451define <vscale x 8 x float> @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
452; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
453; CHECK:       # %bb.0: # %entry
454; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
455; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
456; CHECK-NEXT:    ret
457entry:
458  %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
459    <vscale x 8 x float> %0,
460    <vscale x 8 x float> %1,
461    <vscale x 8 x float> %2,
462    <vscale x 8 x i1> %3,
463    iXLen 7, iXLen %4, iXLen 1)
464
465  ret <vscale x 8 x float> %a
466}
467
468declare <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
469  <vscale x 16 x float>,
470  <vscale x 16 x float>,
471  <vscale x 16 x float>,
472  iXLen, iXLen);
473
474define <vscale x 16 x float> @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
475; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32:
476; CHECK:       # %bb.0: # %entry
477; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
478; CHECK-NEXT:    vfsub.vv v8, v8, v16
479; CHECK-NEXT:    ret
480entry:
481  %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
482    <vscale x 16 x float> undef,
483    <vscale x 16 x float> %0,
484    <vscale x 16 x float> %1,
485    iXLen 7, iXLen %2)
486
487  ret <vscale x 16 x float> %a
488}
489
490declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
491  <vscale x 16 x float>,
492  <vscale x 16 x float>,
493  <vscale x 16 x float>,
494  <vscale x 16 x i1>,
495  iXLen, iXLen, iXLen);
496
497define <vscale x 16 x float> @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
498; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32:
499; CHECK:       # %bb.0: # %entry
500; CHECK-NEXT:    vl8re32.v v24, (a0)
501; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
502; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
503; CHECK-NEXT:    ret
504entry:
505  %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
506    <vscale x 16 x float> %0,
507    <vscale x 16 x float> %1,
508    <vscale x 16 x float> %2,
509    <vscale x 16 x i1> %3,
510    iXLen 7, iXLen %4, iXLen 1)
511
512  ret <vscale x 16 x float> %a
513}
514
515declare <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(
516  <vscale x 1 x double>,
517  <vscale x 1 x double>,
518  <vscale x 1 x double>,
519  iXLen, iXLen);
520
521define <vscale x 1 x double> @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
522; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64:
523; CHECK:       # %bb.0: # %entry
524; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
525; CHECK-NEXT:    vfsub.vv v8, v8, v9
526; CHECK-NEXT:    ret
527entry:
528  %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(
529    <vscale x 1 x double> undef,
530    <vscale x 1 x double> %0,
531    <vscale x 1 x double> %1,
532    iXLen 7, iXLen %2)
533
534  ret <vscale x 1 x double> %a
535}
536
537declare <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64(
538  <vscale x 1 x double>,
539  <vscale x 1 x double>,
540  <vscale x 1 x double>,
541  <vscale x 1 x i1>,
542  iXLen, iXLen, iXLen);
543
544define <vscale x 1 x double> @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
545; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
546; CHECK:       # %bb.0: # %entry
547; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
548; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
549; CHECK-NEXT:    ret
550entry:
551  %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64(
552    <vscale x 1 x double> %0,
553    <vscale x 1 x double> %1,
554    <vscale x 1 x double> %2,
555    <vscale x 1 x i1> %3,
556    iXLen 7, iXLen %4, iXLen 1)
557
558  ret <vscale x 1 x double> %a
559}
560
561declare <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64(
562  <vscale x 2 x double>,
563  <vscale x 2 x double>,
564  <vscale x 2 x double>,
565  iXLen, iXLen);
566
567define <vscale x 2 x double> @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
568; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64:
569; CHECK:       # %bb.0: # %entry
570; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
571; CHECK-NEXT:    vfsub.vv v8, v8, v10
572; CHECK-NEXT:    ret
573entry:
574  %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64(
575    <vscale x 2 x double> undef,
576    <vscale x 2 x double> %0,
577    <vscale x 2 x double> %1,
578    iXLen 7, iXLen %2)
579
580  ret <vscale x 2 x double> %a
581}
582
583declare <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64(
584  <vscale x 2 x double>,
585  <vscale x 2 x double>,
586  <vscale x 2 x double>,
587  <vscale x 2 x i1>,
588  iXLen, iXLen, iXLen);
589
590define <vscale x 2 x double> @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
591; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
592; CHECK:       # %bb.0: # %entry
593; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
594; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
595; CHECK-NEXT:    ret
596entry:
597  %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64(
598    <vscale x 2 x double> %0,
599    <vscale x 2 x double> %1,
600    <vscale x 2 x double> %2,
601    <vscale x 2 x i1> %3,
602    iXLen 7, iXLen %4, iXLen 1)
603
604  ret <vscale x 2 x double> %a
605}
606
607declare <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64(
608  <vscale x 4 x double>,
609  <vscale x 4 x double>,
610  <vscale x 4 x double>,
611  iXLen, iXLen);
612
613define <vscale x 4 x double> @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
614; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64:
615; CHECK:       # %bb.0: # %entry
616; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
617; CHECK-NEXT:    vfsub.vv v8, v8, v12
618; CHECK-NEXT:    ret
619entry:
620  %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64(
621    <vscale x 4 x double> undef,
622    <vscale x 4 x double> %0,
623    <vscale x 4 x double> %1,
624    iXLen 7, iXLen %2)
625
626  ret <vscale x 4 x double> %a
627}
628
629declare <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64(
630  <vscale x 4 x double>,
631  <vscale x 4 x double>,
632  <vscale x 4 x double>,
633  <vscale x 4 x i1>,
634  iXLen, iXLen, iXLen);
635
636define <vscale x 4 x double> @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
637; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
638; CHECK:       # %bb.0: # %entry
639; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
640; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
641; CHECK-NEXT:    ret
642entry:
643  %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64(
644    <vscale x 4 x double> %0,
645    <vscale x 4 x double> %1,
646    <vscale x 4 x double> %2,
647    <vscale x 4 x i1> %3,
648    iXLen 7, iXLen %4, iXLen 1)
649
650  ret <vscale x 4 x double> %a
651}
652
653declare <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64(
654  <vscale x 8 x double>,
655  <vscale x 8 x double>,
656  <vscale x 8 x double>,
657  iXLen, iXLen);
658
659define <vscale x 8 x double> @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
660; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64:
661; CHECK:       # %bb.0: # %entry
662; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
663; CHECK-NEXT:    vfsub.vv v8, v8, v16
664; CHECK-NEXT:    ret
665entry:
666  %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64(
667    <vscale x 8 x double> undef,
668    <vscale x 8 x double> %0,
669    <vscale x 8 x double> %1,
670    iXLen 7, iXLen %2)
671
672  ret <vscale x 8 x double> %a
673}
674
675declare <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64(
676  <vscale x 8 x double>,
677  <vscale x 8 x double>,
678  <vscale x 8 x double>,
679  <vscale x 8 x i1>,
680  iXLen, iXLen, iXLen);
681
682define <vscale x 8 x double> @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
683; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64:
684; CHECK:       # %bb.0: # %entry
685; CHECK-NEXT:    vl8re64.v v24, (a0)
686; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
687; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
688; CHECK-NEXT:    ret
689entry:
690  %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64(
691    <vscale x 8 x double> %0,
692    <vscale x 8 x double> %1,
693    <vscale x 8 x double> %2,
694    <vscale x 8 x i1> %3,
695    iXLen 7, iXLen %4, iXLen 1)
696
697  ret <vscale x 8 x double> %a
698}
699
700declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
701  <vscale x 1 x half>,
702  <vscale x 1 x half>,
703  half,
704  iXLen, iXLen);
705
706define <vscale x 1 x half> @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
707; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16:
708; CHECK:       # %bb.0: # %entry
709; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
710; CHECK-NEXT:    vfsub.vf v8, v8, fa0
711; CHECK-NEXT:    ret
712entry:
713  %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
714    <vscale x 1 x half> undef,
715    <vscale x 1 x half> %0,
716    half %1,
717    iXLen 7, iXLen %2)
718
719  ret <vscale x 1 x half> %a
720}
721
722declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
723  <vscale x 1 x half>,
724  <vscale x 1 x half>,
725  half,
726  <vscale x 1 x i1>,
727  iXLen, iXLen, iXLen);
728
729define <vscale x 1 x half> @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
730; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16:
731; CHECK:       # %bb.0: # %entry
732; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
733; CHECK-NEXT:    vfsub.vf v8, v9, fa0, v0.t
734; CHECK-NEXT:    ret
735entry:
736  %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
737    <vscale x 1 x half> %0,
738    <vscale x 1 x half> %1,
739    half %2,
740    <vscale x 1 x i1> %3,
741    iXLen 7, iXLen %4, iXLen 1)
742
743  ret <vscale x 1 x half> %a
744}
745
746declare <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
747  <vscale x 2 x half>,
748  <vscale x 2 x half>,
749  half,
750  iXLen, iXLen);
751
752define <vscale x 2 x half> @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
753; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16:
754; CHECK:       # %bb.0: # %entry
755; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
756; CHECK-NEXT:    vfsub.vf v8, v8, fa0
757; CHECK-NEXT:    ret
758entry:
759  %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
760    <vscale x 2 x half> undef,
761    <vscale x 2 x half> %0,
762    half %1,
763    iXLen 7, iXLen %2)
764
765  ret <vscale x 2 x half> %a
766}
767
768declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
769  <vscale x 2 x half>,
770  <vscale x 2 x half>,
771  half,
772  <vscale x 2 x i1>,
773  iXLen, iXLen, iXLen);
774
775define <vscale x 2 x half> @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
776; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16:
777; CHECK:       # %bb.0: # %entry
778; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
779; CHECK-NEXT:    vfsub.vf v8, v9, fa0, v0.t
780; CHECK-NEXT:    ret
781entry:
782  %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
783    <vscale x 2 x half> %0,
784    <vscale x 2 x half> %1,
785    half %2,
786    <vscale x 2 x i1> %3,
787    iXLen 7, iXLen %4, iXLen 1)
788
789  ret <vscale x 2 x half> %a
790}
791
792declare <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
793  <vscale x 4 x half>,
794  <vscale x 4 x half>,
795  half,
796  iXLen, iXLen);
797
798define <vscale x 4 x half> @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
799; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16:
800; CHECK:       # %bb.0: # %entry
801; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
802; CHECK-NEXT:    vfsub.vf v8, v8, fa0
803; CHECK-NEXT:    ret
804entry:
805  %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
806    <vscale x 4 x half> undef,
807    <vscale x 4 x half> %0,
808    half %1,
809    iXLen 7, iXLen %2)
810
811  ret <vscale x 4 x half> %a
812}
813
814declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
815  <vscale x 4 x half>,
816  <vscale x 4 x half>,
817  half,
818  <vscale x 4 x i1>,
819  iXLen, iXLen, iXLen);
820
821define <vscale x 4 x half> @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
822; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16:
823; CHECK:       # %bb.0: # %entry
824; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
825; CHECK-NEXT:    vfsub.vf v8, v9, fa0, v0.t
826; CHECK-NEXT:    ret
827entry:
828  %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
829    <vscale x 4 x half> %0,
830    <vscale x 4 x half> %1,
831    half %2,
832    <vscale x 4 x i1> %3,
833    iXLen 7, iXLen %4, iXLen 1)
834
835  ret <vscale x 4 x half> %a
836}
837
838declare <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
839  <vscale x 8 x half>,
840  <vscale x 8 x half>,
841  half,
842  iXLen, iXLen);
843
844define <vscale x 8 x half> @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
845; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16:
846; CHECK:       # %bb.0: # %entry
847; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
848; CHECK-NEXT:    vfsub.vf v8, v8, fa0
849; CHECK-NEXT:    ret
850entry:
851  %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
852    <vscale x 8 x half> undef,
853    <vscale x 8 x half> %0,
854    half %1,
855    iXLen 7, iXLen %2)
856
857  ret <vscale x 8 x half> %a
858}
859
860declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
861  <vscale x 8 x half>,
862  <vscale x 8 x half>,
863  half,
864  <vscale x 8 x i1>,
865  iXLen, iXLen, iXLen);
866
867define <vscale x 8 x half> @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
868; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16:
869; CHECK:       # %bb.0: # %entry
870; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
871; CHECK-NEXT:    vfsub.vf v8, v10, fa0, v0.t
872; CHECK-NEXT:    ret
873entry:
874  %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
875    <vscale x 8 x half> %0,
876    <vscale x 8 x half> %1,
877    half %2,
878    <vscale x 8 x i1> %3,
879    iXLen 7, iXLen %4, iXLen 1)
880
881  ret <vscale x 8 x half> %a
882}
883
884declare <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
885  <vscale x 16 x half>,
886  <vscale x 16 x half>,
887  half,
888  iXLen, iXLen);
889
890define <vscale x 16 x half> @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
891; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16:
892; CHECK:       # %bb.0: # %entry
893; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
894; CHECK-NEXT:    vfsub.vf v8, v8, fa0
895; CHECK-NEXT:    ret
896entry:
897  %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
898    <vscale x 16 x half> undef,
899    <vscale x 16 x half> %0,
900    half %1,
901    iXLen 7, iXLen %2)
902
903  ret <vscale x 16 x half> %a
904}
905
906declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
907  <vscale x 16 x half>,
908  <vscale x 16 x half>,
909  half,
910  <vscale x 16 x i1>,
911  iXLen, iXLen, iXLen);
912
913define <vscale x 16 x half> @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
914; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16:
915; CHECK:       # %bb.0: # %entry
916; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
917; CHECK-NEXT:    vfsub.vf v8, v12, fa0, v0.t
918; CHECK-NEXT:    ret
919entry:
920  %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
921    <vscale x 16 x half> %0,
922    <vscale x 16 x half> %1,
923    half %2,
924    <vscale x 16 x i1> %3,
925    iXLen 7, iXLen %4, iXLen 1)
926
927  ret <vscale x 16 x half> %a
928}
929
930declare <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
931  <vscale x 32 x half>,
932  <vscale x 32 x half>,
933  half,
934  iXLen, iXLen);
935
936define <vscale x 32 x half> @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
937; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16:
938; CHECK:       # %bb.0: # %entry
939; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
940; CHECK-NEXT:    vfsub.vf v8, v8, fa0
941; CHECK-NEXT:    ret
942entry:
943  %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
944    <vscale x 32 x half> undef,
945    <vscale x 32 x half> %0,
946    half %1,
947    iXLen 7, iXLen %2)
948
949  ret <vscale x 32 x half> %a
950}
951
952declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
953  <vscale x 32 x half>,
954  <vscale x 32 x half>,
955  half,
956  <vscale x 32 x i1>,
957  iXLen, iXLen, iXLen);
958
959define <vscale x 32 x half> @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
960; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16:
961; CHECK:       # %bb.0: # %entry
962; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
963; CHECK-NEXT:    vfsub.vf v8, v16, fa0, v0.t
964; CHECK-NEXT:    ret
965entry:
966  %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
967    <vscale x 32 x half> %0,
968    <vscale x 32 x half> %1,
969    half %2,
970    <vscale x 32 x i1> %3,
971    iXLen 7, iXLen %4, iXLen 1)
972
973  ret <vscale x 32 x half> %a
974}
975
976declare <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
977  <vscale x 1 x float>,
978  <vscale x 1 x float>,
979  float,
980  iXLen, iXLen);
981
982define <vscale x 1 x float> @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
983; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32:
984; CHECK:       # %bb.0: # %entry
985; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
986; CHECK-NEXT:    vfsub.vf v8, v8, fa0
987; CHECK-NEXT:    ret
988entry:
989  %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
990    <vscale x 1 x float> undef,
991    <vscale x 1 x float> %0,
992    float %1,
993    iXLen 7, iXLen %2)
994
995  ret <vscale x 1 x float> %a
996}
997
998declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
999  <vscale x 1 x float>,
1000  <vscale x 1 x float>,
1001  float,
1002  <vscale x 1 x i1>,
1003  iXLen, iXLen, iXLen);
1004
1005define <vscale x 1 x float> @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1006; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32:
1007; CHECK:       # %bb.0: # %entry
1008; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
1009; CHECK-NEXT:    vfsub.vf v8, v9, fa0, v0.t
1010; CHECK-NEXT:    ret
1011entry:
1012  %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
1013    <vscale x 1 x float> %0,
1014    <vscale x 1 x float> %1,
1015    float %2,
1016    <vscale x 1 x i1> %3,
1017    iXLen 7, iXLen %4, iXLen 1)
1018
1019  ret <vscale x 1 x float> %a
1020}
1021
1022declare <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
1023  <vscale x 2 x float>,
1024  <vscale x 2 x float>,
1025  float,
1026  iXLen, iXLen);
1027
1028define <vscale x 2 x float> @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
1029; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32:
1030; CHECK:       # %bb.0: # %entry
1031; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
1032; CHECK-NEXT:    vfsub.vf v8, v8, fa0
1033; CHECK-NEXT:    ret
1034entry:
1035  %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
1036    <vscale x 2 x float> undef,
1037    <vscale x 2 x float> %0,
1038    float %1,
1039    iXLen 7, iXLen %2)
1040
1041  ret <vscale x 2 x float> %a
1042}
1043
1044declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
1045  <vscale x 2 x float>,
1046  <vscale x 2 x float>,
1047  float,
1048  <vscale x 2 x i1>,
1049  iXLen, iXLen, iXLen);
1050
1051define <vscale x 2 x float> @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1052; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32:
1053; CHECK:       # %bb.0: # %entry
1054; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
1055; CHECK-NEXT:    vfsub.vf v8, v9, fa0, v0.t
1056; CHECK-NEXT:    ret
1057entry:
1058  %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
1059    <vscale x 2 x float> %0,
1060    <vscale x 2 x float> %1,
1061    float %2,
1062    <vscale x 2 x i1> %3,
1063    iXLen 7, iXLen %4, iXLen 1)
1064
1065  ret <vscale x 2 x float> %a
1066}
1067
1068declare <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
1069  <vscale x 4 x float>,
1070  <vscale x 4 x float>,
1071  float,
1072  iXLen, iXLen);
1073
1074define <vscale x 4 x float> @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
1075; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32:
1076; CHECK:       # %bb.0: # %entry
1077; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
1078; CHECK-NEXT:    vfsub.vf v8, v8, fa0
1079; CHECK-NEXT:    ret
1080entry:
1081  %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
1082    <vscale x 4 x float> undef,
1083    <vscale x 4 x float> %0,
1084    float %1,
1085    iXLen 7, iXLen %2)
1086
1087  ret <vscale x 4 x float> %a
1088}
1089
1090declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
1091  <vscale x 4 x float>,
1092  <vscale x 4 x float>,
1093  float,
1094  <vscale x 4 x i1>,
1095  iXLen, iXLen, iXLen);
1096
1097define <vscale x 4 x float> @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1098; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32:
1099; CHECK:       # %bb.0: # %entry
1100; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
1101; CHECK-NEXT:    vfsub.vf v8, v10, fa0, v0.t
1102; CHECK-NEXT:    ret
1103entry:
1104  %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
1105    <vscale x 4 x float> %0,
1106    <vscale x 4 x float> %1,
1107    float %2,
1108    <vscale x 4 x i1> %3,
1109    iXLen 7, iXLen %4, iXLen 1)
1110
1111  ret <vscale x 4 x float> %a
1112}
1113
1114declare <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
1115  <vscale x 8 x float>,
1116  <vscale x 8 x float>,
1117  float,
1118  iXLen, iXLen);
1119
1120define <vscale x 8 x float> @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
1121; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32:
1122; CHECK:       # %bb.0: # %entry
1123; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
1124; CHECK-NEXT:    vfsub.vf v8, v8, fa0
1125; CHECK-NEXT:    ret
1126entry:
1127  %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
1128    <vscale x 8 x float> undef,
1129    <vscale x 8 x float> %0,
1130    float %1,
1131    iXLen 7, iXLen %2)
1132
1133  ret <vscale x 8 x float> %a
1134}
1135
1136declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
1137  <vscale x 8 x float>,
1138  <vscale x 8 x float>,
1139  float,
1140  <vscale x 8 x i1>,
1141  iXLen, iXLen, iXLen);
1142
1143define <vscale x 8 x float> @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1144; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32:
1145; CHECK:       # %bb.0: # %entry
1146; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1147; CHECK-NEXT:    vfsub.vf v8, v12, fa0, v0.t
1148; CHECK-NEXT:    ret
1149entry:
1150  %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
1151    <vscale x 8 x float> %0,
1152    <vscale x 8 x float> %1,
1153    float %2,
1154    <vscale x 8 x i1> %3,
1155    iXLen 7, iXLen %4, iXLen 1)
1156
1157  ret <vscale x 8 x float> %a
1158}
1159
1160declare <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
1161  <vscale x 16 x float>,
1162  <vscale x 16 x float>,
1163  float,
1164  iXLen, iXLen);
1165
1166define <vscale x 16 x float> @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
1167; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32:
1168; CHECK:       # %bb.0: # %entry
1169; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
1170; CHECK-NEXT:    vfsub.vf v8, v8, fa0
1171; CHECK-NEXT:    ret
1172entry:
1173  %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
1174    <vscale x 16 x float> undef,
1175    <vscale x 16 x float> %0,
1176    float %1,
1177    iXLen 7, iXLen %2)
1178
1179  ret <vscale x 16 x float> %a
1180}
1181
1182declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
1183  <vscale x 16 x float>,
1184  <vscale x 16 x float>,
1185  float,
1186  <vscale x 16 x i1>,
1187  iXLen, iXLen, iXLen);
1188
1189define <vscale x 16 x float> @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1190; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32:
1191; CHECK:       # %bb.0: # %entry
1192; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
1193; CHECK-NEXT:    vfsub.vf v8, v16, fa0, v0.t
1194; CHECK-NEXT:    ret
1195entry:
1196  %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
1197    <vscale x 16 x float> %0,
1198    <vscale x 16 x float> %1,
1199    float %2,
1200    <vscale x 16 x i1> %3,
1201    iXLen 7, iXLen %4, iXLen 1)
1202
1203  ret <vscale x 16 x float> %a
1204}
1205
1206declare <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64(
1207  <vscale x 1 x double>,
1208  <vscale x 1 x double>,
1209  double,
1210  iXLen, iXLen);
1211
1212define <vscale x 1 x double> @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
1213; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64:
1214; CHECK:       # %bb.0: # %entry
1215; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1216; CHECK-NEXT:    vfsub.vf v8, v8, fa0
1217; CHECK-NEXT:    ret
1218entry:
1219  %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64(
1220    <vscale x 1 x double> undef,
1221    <vscale x 1 x double> %0,
1222    double %1,
1223    iXLen 7, iXLen %2)
1224
1225  ret <vscale x 1 x double> %a
1226}
1227
1228declare <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64(
1229  <vscale x 1 x double>,
1230  <vscale x 1 x double>,
1231  double,
1232  <vscale x 1 x i1>,
1233  iXLen, iXLen, iXLen);
1234
1235define <vscale x 1 x double> @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1236; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64:
1237; CHECK:       # %bb.0: # %entry
1238; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
1239; CHECK-NEXT:    vfsub.vf v8, v9, fa0, v0.t
1240; CHECK-NEXT:    ret
1241entry:
1242  %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64(
1243    <vscale x 1 x double> %0,
1244    <vscale x 1 x double> %1,
1245    double %2,
1246    <vscale x 1 x i1> %3,
1247    iXLen 7, iXLen %4, iXLen 1)
1248
1249  ret <vscale x 1 x double> %a
1250}
1251
1252declare <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64(
1253  <vscale x 2 x double>,
1254  <vscale x 2 x double>,
1255  double,
1256  iXLen, iXLen);
1257
1258define <vscale x 2 x double> @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
1259; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64:
1260; CHECK:       # %bb.0: # %entry
1261; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1262; CHECK-NEXT:    vfsub.vf v8, v8, fa0
1263; CHECK-NEXT:    ret
1264entry:
1265  %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64(
1266    <vscale x 2 x double> undef,
1267    <vscale x 2 x double> %0,
1268    double %1,
1269    iXLen 7, iXLen %2)
1270
1271  ret <vscale x 2 x double> %a
1272}
1273
1274declare <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64(
1275  <vscale x 2 x double>,
1276  <vscale x 2 x double>,
1277  double,
1278  <vscale x 2 x i1>,
1279  iXLen, iXLen, iXLen);
1280
1281define <vscale x 2 x double> @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1282; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64:
1283; CHECK:       # %bb.0: # %entry
1284; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
1285; CHECK-NEXT:    vfsub.vf v8, v10, fa0, v0.t
1286; CHECK-NEXT:    ret
1287entry:
1288  %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64(
1289    <vscale x 2 x double> %0,
1290    <vscale x 2 x double> %1,
1291    double %2,
1292    <vscale x 2 x i1> %3,
1293    iXLen 7, iXLen %4, iXLen 1)
1294
1295  ret <vscale x 2 x double> %a
1296}
1297
1298declare <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64(
1299  <vscale x 4 x double>,
1300  <vscale x 4 x double>,
1301  double,
1302  iXLen, iXLen);
1303
1304define <vscale x 4 x double> @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
1305; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64:
1306; CHECK:       # %bb.0: # %entry
1307; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1308; CHECK-NEXT:    vfsub.vf v8, v8, fa0
1309; CHECK-NEXT:    ret
1310entry:
1311  %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64(
1312    <vscale x 4 x double> undef,
1313    <vscale x 4 x double> %0,
1314    double %1,
1315    iXLen 7, iXLen %2)
1316
1317  ret <vscale x 4 x double> %a
1318}
1319
1320declare <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64(
1321  <vscale x 4 x double>,
1322  <vscale x 4 x double>,
1323  double,
1324  <vscale x 4 x i1>,
1325  iXLen, iXLen, iXLen);
1326
1327define <vscale x 4 x double> @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1328; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64:
1329; CHECK:       # %bb.0: # %entry
1330; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
1331; CHECK-NEXT:    vfsub.vf v8, v12, fa0, v0.t
1332; CHECK-NEXT:    ret
1333entry:
1334  %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64(
1335    <vscale x 4 x double> %0,
1336    <vscale x 4 x double> %1,
1337    double %2,
1338    <vscale x 4 x i1> %3,
1339    iXLen 7, iXLen %4, iXLen 1)
1340
1341  ret <vscale x 4 x double> %a
1342}
1343
1344declare <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64(
1345  <vscale x 8 x double>,
1346  <vscale x 8 x double>,
1347  double,
1348  iXLen, iXLen);
1349
1350define <vscale x 8 x double> @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
1351; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64:
1352; CHECK:       # %bb.0: # %entry
1353; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1354; CHECK-NEXT:    vfsub.vf v8, v8, fa0
1355; CHECK-NEXT:    ret
1356entry:
1357  %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64(
1358    <vscale x 8 x double> undef,
1359    <vscale x 8 x double> %0,
1360    double %1,
1361    iXLen 7, iXLen %2)
1362
1363  ret <vscale x 8 x double> %a
1364}
1365
1366declare <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64(
1367  <vscale x 8 x double>,
1368  <vscale x 8 x double>,
1369  double,
1370  <vscale x 8 x i1>,
1371  iXLen, iXLen, iXLen);
1372
1373define <vscale x 8 x double> @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1374; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64:
1375; CHECK:       # %bb.0: # %entry
1376; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
1377; CHECK-NEXT:    vfsub.vf v8, v16, fa0, v0.t
1378; CHECK-NEXT:    ret
1379entry:
1380  %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64(
1381    <vscale x 8 x double> %0,
1382    <vscale x 8 x double> %1,
1383    double %2,
1384    <vscale x 8 x i1> %3,
1385    iXLen 7, iXLen %4, iXLen 1)
1386
1387  ret <vscale x 8 x double> %a
1388}
1389