xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vmadd.ll (revision 2967e5f8007d873a3e9d97870d2461d0827a3976)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
8  <vscale x 1 x i8>,
9  <vscale x 1 x i8>,
10  <vscale x 1 x i8>,
11  iXLen,
12  iXLen);
13
14define <vscale x 1 x i8>  @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
15; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8:
16; CHECK:       # %bb.0: # %entry
17; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
18; CHECK-NEXT:    vmadd.vv v8, v9, v10
19; CHECK-NEXT:    ret
20entry:
21  %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
22    <vscale x 1 x i8> %0,
23    <vscale x 1 x i8> %1,
24    <vscale x 1 x i8> %2,
25    iXLen %3, iXLen 0)
26
27  ret <vscale x 1 x i8> %a
28}
29
30declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
31  <vscale x 1 x i8>,
32  <vscale x 1 x i8>,
33  <vscale x 1 x i8>,
34  <vscale x 1 x i1>,
35  iXLen, iXLen);
36
37define <vscale x 1 x i8>  @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
39; CHECK:       # %bb.0: # %entry
40; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
41; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
42; CHECK-NEXT:    ret
43entry:
44  %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
45    <vscale x 1 x i8> %0,
46    <vscale x 1 x i8> %1,
47    <vscale x 1 x i8> %2,
48    <vscale x 1 x i1> %3,
49    iXLen %4, iXLen 0)
50
51  ret <vscale x 1 x i8> %a
52}
53
54declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
55  <vscale x 2 x i8>,
56  <vscale x 2 x i8>,
57  <vscale x 2 x i8>,
58  iXLen,
59  iXLen);
60
61define <vscale x 2 x i8>  @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
62; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8:
63; CHECK:       # %bb.0: # %entry
64; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
65; CHECK-NEXT:    vmadd.vv v8, v9, v10
66; CHECK-NEXT:    ret
67entry:
68  %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
69    <vscale x 2 x i8> %0,
70    <vscale x 2 x i8> %1,
71    <vscale x 2 x i8> %2,
72    iXLen %3, iXLen 0)
73
74  ret <vscale x 2 x i8> %a
75}
76
77declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
78  <vscale x 2 x i8>,
79  <vscale x 2 x i8>,
80  <vscale x 2 x i8>,
81  <vscale x 2 x i1>,
82  iXLen, iXLen);
83
84define <vscale x 2 x i8>  @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8:
86; CHECK:       # %bb.0: # %entry
87; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
88; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
89; CHECK-NEXT:    ret
90entry:
91  %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
92    <vscale x 2 x i8> %0,
93    <vscale x 2 x i8> %1,
94    <vscale x 2 x i8> %2,
95    <vscale x 2 x i1> %3,
96    iXLen %4, iXLen 0)
97
98  ret <vscale x 2 x i8> %a
99}
100
101declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
102  <vscale x 4 x i8>,
103  <vscale x 4 x i8>,
104  <vscale x 4 x i8>,
105  iXLen,
106  iXLen);
107
108define <vscale x 4 x i8>  @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
109; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8:
110; CHECK:       # %bb.0: # %entry
111; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
112; CHECK-NEXT:    vmadd.vv v8, v9, v10
113; CHECK-NEXT:    ret
114entry:
115  %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
116    <vscale x 4 x i8> %0,
117    <vscale x 4 x i8> %1,
118    <vscale x 4 x i8> %2,
119    iXLen %3, iXLen 0)
120
121  ret <vscale x 4 x i8> %a
122}
123
124declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
125  <vscale x 4 x i8>,
126  <vscale x 4 x i8>,
127  <vscale x 4 x i8>,
128  <vscale x 4 x i1>,
129  iXLen, iXLen);
130
131define <vscale x 4 x i8>  @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8:
133; CHECK:       # %bb.0: # %entry
134; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
135; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
136; CHECK-NEXT:    ret
137entry:
138  %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
139    <vscale x 4 x i8> %0,
140    <vscale x 4 x i8> %1,
141    <vscale x 4 x i8> %2,
142    <vscale x 4 x i1> %3,
143    iXLen %4, iXLen 0)
144
145  ret <vscale x 4 x i8> %a
146}
147
148declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
149  <vscale x 8 x i8>,
150  <vscale x 8 x i8>,
151  <vscale x 8 x i8>,
152  iXLen,
153  iXLen);
154
155define <vscale x 8 x i8>  @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
156; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8:
157; CHECK:       # %bb.0: # %entry
158; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
159; CHECK-NEXT:    vmadd.vv v8, v9, v10
160; CHECK-NEXT:    ret
161entry:
162  %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
163    <vscale x 8 x i8> %0,
164    <vscale x 8 x i8> %1,
165    <vscale x 8 x i8> %2,
166    iXLen %3, iXLen 0)
167
168  ret <vscale x 8 x i8> %a
169}
170
171declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
172  <vscale x 8 x i8>,
173  <vscale x 8 x i8>,
174  <vscale x 8 x i8>,
175  <vscale x 8 x i1>,
176  iXLen, iXLen);
177
178define <vscale x 8 x i8>  @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8:
180; CHECK:       # %bb.0: # %entry
181; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
182; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
183; CHECK-NEXT:    ret
184entry:
185  %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
186    <vscale x 8 x i8> %0,
187    <vscale x 8 x i8> %1,
188    <vscale x 8 x i8> %2,
189    <vscale x 8 x i1> %3,
190    iXLen %4, iXLen 0)
191
192  ret <vscale x 8 x i8> %a
193}
194
195declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
196  <vscale x 16 x i8>,
197  <vscale x 16 x i8>,
198  <vscale x 16 x i8>,
199  iXLen,
200  iXLen);
201
202define <vscale x 16 x i8>  @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
203; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8:
204; CHECK:       # %bb.0: # %entry
205; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
206; CHECK-NEXT:    vmadd.vv v8, v10, v12
207; CHECK-NEXT:    ret
208entry:
209  %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
210    <vscale x 16 x i8> %0,
211    <vscale x 16 x i8> %1,
212    <vscale x 16 x i8> %2,
213    iXLen %3, iXLen 0)
214
215  ret <vscale x 16 x i8> %a
216}
217
218declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
219  <vscale x 16 x i8>,
220  <vscale x 16 x i8>,
221  <vscale x 16 x i8>,
222  <vscale x 16 x i1>,
223  iXLen, iXLen);
224
225define <vscale x 16 x i8>  @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
226; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8:
227; CHECK:       # %bb.0: # %entry
228; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
229; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
230; CHECK-NEXT:    ret
231entry:
232  %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
233    <vscale x 16 x i8> %0,
234    <vscale x 16 x i8> %1,
235    <vscale x 16 x i8> %2,
236    <vscale x 16 x i1> %3,
237    iXLen %4, iXLen 0)
238
239  ret <vscale x 16 x i8> %a
240}
241
242declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
243  <vscale x 32 x i8>,
244  <vscale x 32 x i8>,
245  <vscale x 32 x i8>,
246  iXLen,
247  iXLen);
248
249define <vscale x 32 x i8>  @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
250; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8:
251; CHECK:       # %bb.0: # %entry
252; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
253; CHECK-NEXT:    vmadd.vv v8, v12, v16
254; CHECK-NEXT:    ret
255entry:
256  %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
257    <vscale x 32 x i8> %0,
258    <vscale x 32 x i8> %1,
259    <vscale x 32 x i8> %2,
260    iXLen %3, iXLen 0)
261
262  ret <vscale x 32 x i8> %a
263}
264
265declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
266  <vscale x 32 x i8>,
267  <vscale x 32 x i8>,
268  <vscale x 32 x i8>,
269  <vscale x 32 x i1>,
270  iXLen, iXLen);
271
272define <vscale x 32 x i8>  @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
273; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8:
274; CHECK:       # %bb.0: # %entry
275; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
276; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
277; CHECK-NEXT:    ret
278entry:
279  %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
280    <vscale x 32 x i8> %0,
281    <vscale x 32 x i8> %1,
282    <vscale x 32 x i8> %2,
283    <vscale x 32 x i1> %3,
284    iXLen %4, iXLen 0)
285
286  ret <vscale x 32 x i8> %a
287}
288
289declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
290  <vscale x 1 x i16>,
291  <vscale x 1 x i16>,
292  <vscale x 1 x i16>,
293  iXLen,
294  iXLen);
295
296define <vscale x 1 x i16>  @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
297; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16:
298; CHECK:       # %bb.0: # %entry
299; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
300; CHECK-NEXT:    vmadd.vv v8, v9, v10
301; CHECK-NEXT:    ret
302entry:
303  %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
304    <vscale x 1 x i16> %0,
305    <vscale x 1 x i16> %1,
306    <vscale x 1 x i16> %2,
307    iXLen %3, iXLen 0)
308
309  ret <vscale x 1 x i16> %a
310}
311
312declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
313  <vscale x 1 x i16>,
314  <vscale x 1 x i16>,
315  <vscale x 1 x i16>,
316  <vscale x 1 x i1>,
317  iXLen, iXLen);
318
319define <vscale x 1 x i16>  @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
320; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16:
321; CHECK:       # %bb.0: # %entry
322; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
323; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
324; CHECK-NEXT:    ret
325entry:
326  %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
327    <vscale x 1 x i16> %0,
328    <vscale x 1 x i16> %1,
329    <vscale x 1 x i16> %2,
330    <vscale x 1 x i1> %3,
331    iXLen %4, iXLen 0)
332
333  ret <vscale x 1 x i16> %a
334}
335
336declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
337  <vscale x 2 x i16>,
338  <vscale x 2 x i16>,
339  <vscale x 2 x i16>,
340  iXLen,
341  iXLen);
342
343define <vscale x 2 x i16>  @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
344; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16:
345; CHECK:       # %bb.0: # %entry
346; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
347; CHECK-NEXT:    vmadd.vv v8, v9, v10
348; CHECK-NEXT:    ret
349entry:
350  %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
351    <vscale x 2 x i16> %0,
352    <vscale x 2 x i16> %1,
353    <vscale x 2 x i16> %2,
354    iXLen %3, iXLen 0)
355
356  ret <vscale x 2 x i16> %a
357}
358
359declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
360  <vscale x 2 x i16>,
361  <vscale x 2 x i16>,
362  <vscale x 2 x i16>,
363  <vscale x 2 x i1>,
364  iXLen, iXLen);
365
366define <vscale x 2 x i16>  @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
367; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16:
368; CHECK:       # %bb.0: # %entry
369; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
370; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
371; CHECK-NEXT:    ret
372entry:
373  %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
374    <vscale x 2 x i16> %0,
375    <vscale x 2 x i16> %1,
376    <vscale x 2 x i16> %2,
377    <vscale x 2 x i1> %3,
378    iXLen %4, iXLen 0)
379
380  ret <vscale x 2 x i16> %a
381}
382
383declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
384  <vscale x 4 x i16>,
385  <vscale x 4 x i16>,
386  <vscale x 4 x i16>,
387  iXLen,
388  iXLen);
389
390define <vscale x 4 x i16>  @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
391; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16:
392; CHECK:       # %bb.0: # %entry
393; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
394; CHECK-NEXT:    vmadd.vv v8, v9, v10
395; CHECK-NEXT:    ret
396entry:
397  %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
398    <vscale x 4 x i16> %0,
399    <vscale x 4 x i16> %1,
400    <vscale x 4 x i16> %2,
401    iXLen %3, iXLen 0)
402
403  ret <vscale x 4 x i16> %a
404}
405
406declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
407  <vscale x 4 x i16>,
408  <vscale x 4 x i16>,
409  <vscale x 4 x i16>,
410  <vscale x 4 x i1>,
411  iXLen, iXLen);
412
413define <vscale x 4 x i16>  @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
414; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16:
415; CHECK:       # %bb.0: # %entry
416; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
417; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
418; CHECK-NEXT:    ret
419entry:
420  %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
421    <vscale x 4 x i16> %0,
422    <vscale x 4 x i16> %1,
423    <vscale x 4 x i16> %2,
424    <vscale x 4 x i1> %3,
425    iXLen %4, iXLen 0)
426
427  ret <vscale x 4 x i16> %a
428}
429
430declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
431  <vscale x 8 x i16>,
432  <vscale x 8 x i16>,
433  <vscale x 8 x i16>,
434  iXLen,
435  iXLen);
436
437define <vscale x 8 x i16>  @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
438; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16:
439; CHECK:       # %bb.0: # %entry
440; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
441; CHECK-NEXT:    vmadd.vv v8, v10, v12
442; CHECK-NEXT:    ret
443entry:
444  %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
445    <vscale x 8 x i16> %0,
446    <vscale x 8 x i16> %1,
447    <vscale x 8 x i16> %2,
448    iXLen %3, iXLen 0)
449
450  ret <vscale x 8 x i16> %a
451}
452
453declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
454  <vscale x 8 x i16>,
455  <vscale x 8 x i16>,
456  <vscale x 8 x i16>,
457  <vscale x 8 x i1>,
458  iXLen, iXLen);
459
460define <vscale x 8 x i16>  @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
461; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16:
462; CHECK:       # %bb.0: # %entry
463; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
464; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
465; CHECK-NEXT:    ret
466entry:
467  %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
468    <vscale x 8 x i16> %0,
469    <vscale x 8 x i16> %1,
470    <vscale x 8 x i16> %2,
471    <vscale x 8 x i1> %3,
472    iXLen %4, iXLen 0)
473
474  ret <vscale x 8 x i16> %a
475}
476
477declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
478  <vscale x 16 x i16>,
479  <vscale x 16 x i16>,
480  <vscale x 16 x i16>,
481  iXLen,
482  iXLen);
483
484define <vscale x 16 x i16>  @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
485; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16:
486; CHECK:       # %bb.0: # %entry
487; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
488; CHECK-NEXT:    vmadd.vv v8, v12, v16
489; CHECK-NEXT:    ret
490entry:
491  %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
492    <vscale x 16 x i16> %0,
493    <vscale x 16 x i16> %1,
494    <vscale x 16 x i16> %2,
495    iXLen %3, iXLen 0)
496
497  ret <vscale x 16 x i16> %a
498}
499
500declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
501  <vscale x 16 x i16>,
502  <vscale x 16 x i16>,
503  <vscale x 16 x i16>,
504  <vscale x 16 x i1>,
505  iXLen, iXLen);
506
507define <vscale x 16 x i16>  @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
508; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16:
509; CHECK:       # %bb.0: # %entry
510; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
511; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
512; CHECK-NEXT:    ret
513entry:
514  %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
515    <vscale x 16 x i16> %0,
516    <vscale x 16 x i16> %1,
517    <vscale x 16 x i16> %2,
518    <vscale x 16 x i1> %3,
519    iXLen %4, iXLen 0)
520
521  ret <vscale x 16 x i16> %a
522}
523
524declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
525  <vscale x 1 x i32>,
526  <vscale x 1 x i32>,
527  <vscale x 1 x i32>,
528  iXLen,
529  iXLen);
530
531define <vscale x 1 x i32>  @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
532; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32:
533; CHECK:       # %bb.0: # %entry
534; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
535; CHECK-NEXT:    vmadd.vv v8, v9, v10
536; CHECK-NEXT:    ret
537entry:
538  %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
539    <vscale x 1 x i32> %0,
540    <vscale x 1 x i32> %1,
541    <vscale x 1 x i32> %2,
542    iXLen %3, iXLen 0)
543
544  ret <vscale x 1 x i32> %a
545}
546
547declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
548  <vscale x 1 x i32>,
549  <vscale x 1 x i32>,
550  <vscale x 1 x i32>,
551  <vscale x 1 x i1>,
552  iXLen, iXLen);
553
554define <vscale x 1 x i32>  @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
555; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32:
556; CHECK:       # %bb.0: # %entry
557; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
558; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
559; CHECK-NEXT:    ret
560entry:
561  %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
562    <vscale x 1 x i32> %0,
563    <vscale x 1 x i32> %1,
564    <vscale x 1 x i32> %2,
565    <vscale x 1 x i1> %3,
566    iXLen %4, iXLen 0)
567
568  ret <vscale x 1 x i32> %a
569}
570
571declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
572  <vscale x 2 x i32>,
573  <vscale x 2 x i32>,
574  <vscale x 2 x i32>,
575  iXLen,
576  iXLen);
577
578define <vscale x 2 x i32>  @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
579; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32:
580; CHECK:       # %bb.0: # %entry
581; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
582; CHECK-NEXT:    vmadd.vv v8, v9, v10
583; CHECK-NEXT:    ret
584entry:
585  %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
586    <vscale x 2 x i32> %0,
587    <vscale x 2 x i32> %1,
588    <vscale x 2 x i32> %2,
589    iXLen %3, iXLen 0)
590
591  ret <vscale x 2 x i32> %a
592}
593
594declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
595  <vscale x 2 x i32>,
596  <vscale x 2 x i32>,
597  <vscale x 2 x i32>,
598  <vscale x 2 x i1>,
599  iXLen, iXLen);
600
601define <vscale x 2 x i32>  @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
602; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32:
603; CHECK:       # %bb.0: # %entry
604; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
605; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
606; CHECK-NEXT:    ret
607entry:
608  %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
609    <vscale x 2 x i32> %0,
610    <vscale x 2 x i32> %1,
611    <vscale x 2 x i32> %2,
612    <vscale x 2 x i1> %3,
613    iXLen %4, iXLen 0)
614
615  ret <vscale x 2 x i32> %a
616}
617
618declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
619  <vscale x 4 x i32>,
620  <vscale x 4 x i32>,
621  <vscale x 4 x i32>,
622  iXLen,
623  iXLen);
624
625define <vscale x 4 x i32>  @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
626; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32:
627; CHECK:       # %bb.0: # %entry
628; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
629; CHECK-NEXT:    vmadd.vv v8, v10, v12
630; CHECK-NEXT:    ret
631entry:
632  %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
633    <vscale x 4 x i32> %0,
634    <vscale x 4 x i32> %1,
635    <vscale x 4 x i32> %2,
636    iXLen %3, iXLen 0)
637
638  ret <vscale x 4 x i32> %a
639}
640
641declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
642  <vscale x 4 x i32>,
643  <vscale x 4 x i32>,
644  <vscale x 4 x i32>,
645  <vscale x 4 x i1>,
646  iXLen, iXLen);
647
648define <vscale x 4 x i32>  @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
649; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32:
650; CHECK:       # %bb.0: # %entry
651; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
652; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
653; CHECK-NEXT:    ret
654entry:
655  %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
656    <vscale x 4 x i32> %0,
657    <vscale x 4 x i32> %1,
658    <vscale x 4 x i32> %2,
659    <vscale x 4 x i1> %3,
660    iXLen %4, iXLen 0)
661
662  ret <vscale x 4 x i32> %a
663}
664
665declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
666  <vscale x 8 x i32>,
667  <vscale x 8 x i32>,
668  <vscale x 8 x i32>,
669  iXLen,
670  iXLen);
671
672define <vscale x 8 x i32>  @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
673; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32:
674; CHECK:       # %bb.0: # %entry
675; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
676; CHECK-NEXT:    vmadd.vv v8, v12, v16
677; CHECK-NEXT:    ret
678entry:
679  %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
680    <vscale x 8 x i32> %0,
681    <vscale x 8 x i32> %1,
682    <vscale x 8 x i32> %2,
683    iXLen %3, iXLen 0)
684
685  ret <vscale x 8 x i32> %a
686}
687
688declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
689  <vscale x 8 x i32>,
690  <vscale x 8 x i32>,
691  <vscale x 8 x i32>,
692  <vscale x 8 x i1>,
693  iXLen, iXLen);
694
695define <vscale x 8 x i32>  @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
696; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32:
697; CHECK:       # %bb.0: # %entry
698; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
699; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
700; CHECK-NEXT:    ret
701entry:
702  %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
703    <vscale x 8 x i32> %0,
704    <vscale x 8 x i32> %1,
705    <vscale x 8 x i32> %2,
706    <vscale x 8 x i1> %3,
707    iXLen %4, iXLen 0)
708
709  ret <vscale x 8 x i32> %a
710}
711
712declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
713  <vscale x 1 x i64>,
714  <vscale x 1 x i64>,
715  <vscale x 1 x i64>,
716  iXLen,
717  iXLen);
718
719define <vscale x 1 x i64>  @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
720; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64:
721; CHECK:       # %bb.0: # %entry
722; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
723; CHECK-NEXT:    vmadd.vv v8, v9, v10
724; CHECK-NEXT:    ret
725entry:
726  %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
727    <vscale x 1 x i64> %0,
728    <vscale x 1 x i64> %1,
729    <vscale x 1 x i64> %2,
730    iXLen %3, iXLen 0)
731
732  ret <vscale x 1 x i64> %a
733}
734
735declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
736  <vscale x 1 x i64>,
737  <vscale x 1 x i64>,
738  <vscale x 1 x i64>,
739  <vscale x 1 x i1>,
740  iXLen, iXLen);
741
742define <vscale x 1 x i64>  @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
743; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
744; CHECK:       # %bb.0: # %entry
745; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
746; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
747; CHECK-NEXT:    ret
748entry:
749  %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
750    <vscale x 1 x i64> %0,
751    <vscale x 1 x i64> %1,
752    <vscale x 1 x i64> %2,
753    <vscale x 1 x i1> %3,
754    iXLen %4, iXLen 0)
755
756  ret <vscale x 1 x i64> %a
757}
758
759declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
760  <vscale x 2 x i64>,
761  <vscale x 2 x i64>,
762  <vscale x 2 x i64>,
763  iXLen,
764  iXLen);
765
766define <vscale x 2 x i64>  @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
767; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64:
768; CHECK:       # %bb.0: # %entry
769; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
770; CHECK-NEXT:    vmadd.vv v8, v10, v12
771; CHECK-NEXT:    ret
772entry:
773  %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
774    <vscale x 2 x i64> %0,
775    <vscale x 2 x i64> %1,
776    <vscale x 2 x i64> %2,
777    iXLen %3, iXLen 0)
778
779  ret <vscale x 2 x i64> %a
780}
781
782declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
783  <vscale x 2 x i64>,
784  <vscale x 2 x i64>,
785  <vscale x 2 x i64>,
786  <vscale x 2 x i1>,
787  iXLen, iXLen);
788
789define <vscale x 2 x i64>  @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
790; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
791; CHECK:       # %bb.0: # %entry
792; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
793; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
794; CHECK-NEXT:    ret
795entry:
796  %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
797    <vscale x 2 x i64> %0,
798    <vscale x 2 x i64> %1,
799    <vscale x 2 x i64> %2,
800    <vscale x 2 x i1> %3,
801    iXLen %4, iXLen 0)
802
803  ret <vscale x 2 x i64> %a
804}
805
806declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
807  <vscale x 4 x i64>,
808  <vscale x 4 x i64>,
809  <vscale x 4 x i64>,
810  iXLen,
811  iXLen);
812
813define <vscale x 4 x i64>  @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
814; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64:
815; CHECK:       # %bb.0: # %entry
816; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
817; CHECK-NEXT:    vmadd.vv v8, v12, v16
818; CHECK-NEXT:    ret
819entry:
820  %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
821    <vscale x 4 x i64> %0,
822    <vscale x 4 x i64> %1,
823    <vscale x 4 x i64> %2,
824    iXLen %3, iXLen 0)
825
826  ret <vscale x 4 x i64> %a
827}
828
829declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
830  <vscale x 4 x i64>,
831  <vscale x 4 x i64>,
832  <vscale x 4 x i64>,
833  <vscale x 4 x i1>,
834  iXLen, iXLen);
835
836define <vscale x 4 x i64>  @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
837; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
838; CHECK:       # %bb.0: # %entry
839; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
840; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
841; CHECK-NEXT:    ret
842entry:
843  %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
844    <vscale x 4 x i64> %0,
845    <vscale x 4 x i64> %1,
846    <vscale x 4 x i64> %2,
847    <vscale x 4 x i1> %3,
848    iXLen %4, iXLen 0)
849
850  ret <vscale x 4 x i64> %a
851}
852
853declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
854  <vscale x 1 x i8>,
855  i8,
856  <vscale x 1 x i8>,
857  iXLen,
858  iXLen);
859
860define <vscale x 1 x i8>  @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
861; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8:
862; CHECK:       # %bb.0: # %entry
863; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
864; CHECK-NEXT:    vmadd.vx v8, a0, v9
865; CHECK-NEXT:    ret
866entry:
867  %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
868    <vscale x 1 x i8> %0,
869    i8 %1,
870    <vscale x 1 x i8> %2,
871    iXLen %3, iXLen 0)
872
873  ret <vscale x 1 x i8> %a
874}
875
876declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
877  <vscale x 1 x i8>,
878  i8,
879  <vscale x 1 x i8>,
880  <vscale x 1 x i1>,
881  iXLen, iXLen);
882
883define <vscale x 1 x i8> @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
884; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8:
885; CHECK:       # %bb.0: # %entry
886; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
887; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
888; CHECK-NEXT:    ret
889entry:
890  %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
891    <vscale x 1 x i8> %0,
892    i8 %1,
893    <vscale x 1 x i8> %2,
894    <vscale x 1 x i1> %3,
895    iXLen %4, iXLen 0)
896
897  ret <vscale x 1 x i8> %a
898}
899
900declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
901  <vscale x 2 x i8>,
902  i8,
903  <vscale x 2 x i8>,
904  iXLen,
905  iXLen);
906
907define <vscale x 2 x i8>  @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
908; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8:
909; CHECK:       # %bb.0: # %entry
910; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
911; CHECK-NEXT:    vmadd.vx v8, a0, v9
912; CHECK-NEXT:    ret
913entry:
914  %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
915    <vscale x 2 x i8> %0,
916    i8 %1,
917    <vscale x 2 x i8> %2,
918    iXLen %3, iXLen 0)
919
920  ret <vscale x 2 x i8> %a
921}
922
923declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
924  <vscale x 2 x i8>,
925  i8,
926  <vscale x 2 x i8>,
927  <vscale x 2 x i1>,
928  iXLen, iXLen);
929
930define <vscale x 2 x i8> @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
931; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8:
932; CHECK:       # %bb.0: # %entry
933; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
934; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
935; CHECK-NEXT:    ret
936entry:
937  %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
938    <vscale x 2 x i8> %0,
939    i8 %1,
940    <vscale x 2 x i8> %2,
941    <vscale x 2 x i1> %3,
942    iXLen %4, iXLen 0)
943
944  ret <vscale x 2 x i8> %a
945}
946
947declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
948  <vscale x 4 x i8>,
949  i8,
950  <vscale x 4 x i8>,
951  iXLen,
952  iXLen);
953
954define <vscale x 4 x i8>  @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
955; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8:
956; CHECK:       # %bb.0: # %entry
957; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
958; CHECK-NEXT:    vmadd.vx v8, a0, v9
959; CHECK-NEXT:    ret
960entry:
961  %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
962    <vscale x 4 x i8> %0,
963    i8 %1,
964    <vscale x 4 x i8> %2,
965    iXLen %3, iXLen 0)
966
967  ret <vscale x 4 x i8> %a
968}
969
970declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
971  <vscale x 4 x i8>,
972  i8,
973  <vscale x 4 x i8>,
974  <vscale x 4 x i1>,
975  iXLen, iXLen);
976
977define <vscale x 4 x i8> @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
978; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8:
979; CHECK:       # %bb.0: # %entry
980; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
981; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
982; CHECK-NEXT:    ret
983entry:
984  %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
985    <vscale x 4 x i8> %0,
986    i8 %1,
987    <vscale x 4 x i8> %2,
988    <vscale x 4 x i1> %3,
989    iXLen %4, iXLen 0)
990
991  ret <vscale x 4 x i8> %a
992}
993
994declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
995  <vscale x 8 x i8>,
996  i8,
997  <vscale x 8 x i8>,
998  iXLen,
999  iXLen);
1000
1001define <vscale x 8 x i8>  @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
1002; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8:
1003; CHECK:       # %bb.0: # %entry
1004; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
1005; CHECK-NEXT:    vmadd.vx v8, a0, v9
1006; CHECK-NEXT:    ret
1007entry:
1008  %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
1009    <vscale x 8 x i8> %0,
1010    i8 %1,
1011    <vscale x 8 x i8> %2,
1012    iXLen %3, iXLen 0)
1013
1014  ret <vscale x 8 x i8> %a
1015}
1016
1017declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
1018  <vscale x 8 x i8>,
1019  i8,
1020  <vscale x 8 x i8>,
1021  <vscale x 8 x i1>,
1022  iXLen, iXLen);
1023
1024define <vscale x 8 x i8> @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1025; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8:
1026; CHECK:       # %bb.0: # %entry
1027; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
1028; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
1029; CHECK-NEXT:    ret
1030entry:
1031  %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
1032    <vscale x 8 x i8> %0,
1033    i8 %1,
1034    <vscale x 8 x i8> %2,
1035    <vscale x 8 x i1> %3,
1036    iXLen %4, iXLen 0)
1037
1038  ret <vscale x 8 x i8> %a
1039}
1040
1041declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
1042  <vscale x 16 x i8>,
1043  i8,
1044  <vscale x 16 x i8>,
1045  iXLen,
1046  iXLen);
1047
1048define <vscale x 16 x i8>  @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
1049; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8:
1050; CHECK:       # %bb.0: # %entry
1051; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
1052; CHECK-NEXT:    vmadd.vx v8, a0, v10
1053; CHECK-NEXT:    ret
1054entry:
1055  %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
1056    <vscale x 16 x i8> %0,
1057    i8 %1,
1058    <vscale x 16 x i8> %2,
1059    iXLen %3, iXLen 0)
1060
1061  ret <vscale x 16 x i8> %a
1062}
1063
1064declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
1065  <vscale x 16 x i8>,
1066  i8,
1067  <vscale x 16 x i8>,
1068  <vscale x 16 x i1>,
1069  iXLen, iXLen);
1070
1071define <vscale x 16 x i8> @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1072; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8:
1073; CHECK:       # %bb.0: # %entry
1074; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
1075; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
1076; CHECK-NEXT:    ret
1077entry:
1078  %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
1079    <vscale x 16 x i8> %0,
1080    i8 %1,
1081    <vscale x 16 x i8> %2,
1082    <vscale x 16 x i1> %3,
1083    iXLen %4, iXLen 0)
1084
1085  ret <vscale x 16 x i8> %a
1086}
1087
1088declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
1089  <vscale x 32 x i8>,
1090  i8,
1091  <vscale x 32 x i8>,
1092  iXLen,
1093  iXLen);
1094
1095define <vscale x 32 x i8>  @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
1096; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8:
1097; CHECK:       # %bb.0: # %entry
1098; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
1099; CHECK-NEXT:    vmadd.vx v8, a0, v12
1100; CHECK-NEXT:    ret
1101entry:
1102  %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
1103    <vscale x 32 x i8> %0,
1104    i8 %1,
1105    <vscale x 32 x i8> %2,
1106    iXLen %3, iXLen 0)
1107
1108  ret <vscale x 32 x i8> %a
1109}
1110
1111declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
1112  <vscale x 32 x i8>,
1113  i8,
1114  <vscale x 32 x i8>,
1115  <vscale x 32 x i1>,
1116  iXLen, iXLen);
1117
1118define <vscale x 32 x i8> @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1119; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8:
1120; CHECK:       # %bb.0: # %entry
1121; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
1122; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
1123; CHECK-NEXT:    ret
1124entry:
1125  %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
1126    <vscale x 32 x i8> %0,
1127    i8 %1,
1128    <vscale x 32 x i8> %2,
1129    <vscale x 32 x i1> %3,
1130    iXLen %4, iXLen 0)
1131
1132  ret <vscale x 32 x i8> %a
1133}
1134
1135declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
1136  <vscale x 1 x i16>,
1137  i16,
1138  <vscale x 1 x i16>,
1139  iXLen,
1140  iXLen);
1141
1142define <vscale x 1 x i16>  @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
1143; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16:
1144; CHECK:       # %bb.0: # %entry
1145; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
1146; CHECK-NEXT:    vmadd.vx v8, a0, v9
1147; CHECK-NEXT:    ret
1148entry:
1149  %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
1150    <vscale x 1 x i16> %0,
1151    i16 %1,
1152    <vscale x 1 x i16> %2,
1153    iXLen %3, iXLen 0)
1154
1155  ret <vscale x 1 x i16> %a
1156}
1157
1158declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
1159  <vscale x 1 x i16>,
1160  i16,
1161  <vscale x 1 x i16>,
1162  <vscale x 1 x i1>,
1163  iXLen, iXLen);
1164
1165define <vscale x 1 x i16> @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1166; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16:
1167; CHECK:       # %bb.0: # %entry
1168; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
1169; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
1170; CHECK-NEXT:    ret
1171entry:
1172  %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
1173    <vscale x 1 x i16> %0,
1174    i16 %1,
1175    <vscale x 1 x i16> %2,
1176    <vscale x 1 x i1> %3,
1177    iXLen %4, iXLen 0)
1178
1179  ret <vscale x 1 x i16> %a
1180}
1181
1182declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
1183  <vscale x 2 x i16>,
1184  i16,
1185  <vscale x 2 x i16>,
1186  iXLen,
1187  iXLen);
1188
1189define <vscale x 2 x i16>  @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
1190; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16:
1191; CHECK:       # %bb.0: # %entry
1192; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
1193; CHECK-NEXT:    vmadd.vx v8, a0, v9
1194; CHECK-NEXT:    ret
1195entry:
1196  %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
1197    <vscale x 2 x i16> %0,
1198    i16 %1,
1199    <vscale x 2 x i16> %2,
1200    iXLen %3, iXLen 0)
1201
1202  ret <vscale x 2 x i16> %a
1203}
1204
1205declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
1206  <vscale x 2 x i16>,
1207  i16,
1208  <vscale x 2 x i16>,
1209  <vscale x 2 x i1>,
1210  iXLen, iXLen);
1211
1212define <vscale x 2 x i16> @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1213; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16:
1214; CHECK:       # %bb.0: # %entry
1215; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
1216; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
1217; CHECK-NEXT:    ret
1218entry:
1219  %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
1220    <vscale x 2 x i16> %0,
1221    i16 %1,
1222    <vscale x 2 x i16> %2,
1223    <vscale x 2 x i1> %3,
1224    iXLen %4, iXLen 0)
1225
1226  ret <vscale x 2 x i16> %a
1227}
1228
1229declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
1230  <vscale x 4 x i16>,
1231  i16,
1232  <vscale x 4 x i16>,
1233  iXLen,
1234  iXLen);
1235
1236define <vscale x 4 x i16>  @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
1237; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16:
1238; CHECK:       # %bb.0: # %entry
1239; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
1240; CHECK-NEXT:    vmadd.vx v8, a0, v9
1241; CHECK-NEXT:    ret
1242entry:
1243  %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
1244    <vscale x 4 x i16> %0,
1245    i16 %1,
1246    <vscale x 4 x i16> %2,
1247    iXLen %3, iXLen 0)
1248
1249  ret <vscale x 4 x i16> %a
1250}
1251
1252declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
1253  <vscale x 4 x i16>,
1254  i16,
1255  <vscale x 4 x i16>,
1256  <vscale x 4 x i1>,
1257  iXLen, iXLen);
1258
1259define <vscale x 4 x i16> @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1260; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16:
1261; CHECK:       # %bb.0: # %entry
1262; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
1263; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
1264; CHECK-NEXT:    ret
1265entry:
1266  %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
1267    <vscale x 4 x i16> %0,
1268    i16 %1,
1269    <vscale x 4 x i16> %2,
1270    <vscale x 4 x i1> %3,
1271    iXLen %4, iXLen 0)
1272
1273  ret <vscale x 4 x i16> %a
1274}
1275
1276declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
1277  <vscale x 8 x i16>,
1278  i16,
1279  <vscale x 8 x i16>,
1280  iXLen,
1281  iXLen);
1282
1283define <vscale x 8 x i16>  @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
1284; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16:
1285; CHECK:       # %bb.0: # %entry
1286; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
1287; CHECK-NEXT:    vmadd.vx v8, a0, v10
1288; CHECK-NEXT:    ret
1289entry:
1290  %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
1291    <vscale x 8 x i16> %0,
1292    i16 %1,
1293    <vscale x 8 x i16> %2,
1294    iXLen %3, iXLen 0)
1295
1296  ret <vscale x 8 x i16> %a
1297}
1298
1299declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
1300  <vscale x 8 x i16>,
1301  i16,
1302  <vscale x 8 x i16>,
1303  <vscale x 8 x i1>,
1304  iXLen, iXLen);
1305
1306define <vscale x 8 x i16> @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1307; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16:
1308; CHECK:       # %bb.0: # %entry
1309; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
1310; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
1311; CHECK-NEXT:    ret
1312entry:
1313  %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
1314    <vscale x 8 x i16> %0,
1315    i16 %1,
1316    <vscale x 8 x i16> %2,
1317    <vscale x 8 x i1> %3,
1318    iXLen %4, iXLen 0)
1319
1320  ret <vscale x 8 x i16> %a
1321}
1322
1323declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
1324  <vscale x 16 x i16>,
1325  i16,
1326  <vscale x 16 x i16>,
1327  iXLen,
1328  iXLen);
1329
1330define <vscale x 16 x i16>  @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
1331; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16:
1332; CHECK:       # %bb.0: # %entry
1333; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
1334; CHECK-NEXT:    vmadd.vx v8, a0, v12
1335; CHECK-NEXT:    ret
1336entry:
1337  %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
1338    <vscale x 16 x i16> %0,
1339    i16 %1,
1340    <vscale x 16 x i16> %2,
1341    iXLen %3, iXLen 0)
1342
1343  ret <vscale x 16 x i16> %a
1344}
1345
1346declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
1347  <vscale x 16 x i16>,
1348  i16,
1349  <vscale x 16 x i16>,
1350  <vscale x 16 x i1>,
1351  iXLen, iXLen);
1352
1353define <vscale x 16 x i16> @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1354; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16:
1355; CHECK:       # %bb.0: # %entry
1356; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
1357; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
1358; CHECK-NEXT:    ret
1359entry:
1360  %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
1361    <vscale x 16 x i16> %0,
1362    i16 %1,
1363    <vscale x 16 x i16> %2,
1364    <vscale x 16 x i1> %3,
1365    iXLen %4, iXLen 0)
1366
1367  ret <vscale x 16 x i16> %a
1368}
1369
1370declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
1371  <vscale x 1 x i32>,
1372  i32,
1373  <vscale x 1 x i32>,
1374  iXLen,
1375  iXLen);
1376
1377define <vscale x 1 x i32>  @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
1378; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32:
1379; CHECK:       # %bb.0: # %entry
1380; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
1381; CHECK-NEXT:    vmadd.vx v8, a0, v9
1382; CHECK-NEXT:    ret
1383entry:
1384  %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
1385    <vscale x 1 x i32> %0,
1386    i32 %1,
1387    <vscale x 1 x i32> %2,
1388    iXLen %3, iXLen 0)
1389
1390  ret <vscale x 1 x i32> %a
1391}
1392
1393declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
1394  <vscale x 1 x i32>,
1395  i32,
1396  <vscale x 1 x i32>,
1397  <vscale x 1 x i1>,
1398  iXLen, iXLen);
1399
1400define <vscale x 1 x i32> @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1401; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32:
1402; CHECK:       # %bb.0: # %entry
1403; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
1404; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
1405; CHECK-NEXT:    ret
1406entry:
1407  %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
1408    <vscale x 1 x i32> %0,
1409    i32 %1,
1410    <vscale x 1 x i32> %2,
1411    <vscale x 1 x i1> %3,
1412    iXLen %4, iXLen 0)
1413
1414  ret <vscale x 1 x i32> %a
1415}
1416
1417declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
1418  <vscale x 2 x i32>,
1419  i32,
1420  <vscale x 2 x i32>,
1421  iXLen,
1422  iXLen);
1423
1424define <vscale x 2 x i32>  @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
1425; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32:
1426; CHECK:       # %bb.0: # %entry
1427; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
1428; CHECK-NEXT:    vmadd.vx v8, a0, v9
1429; CHECK-NEXT:    ret
1430entry:
1431  %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
1432    <vscale x 2 x i32> %0,
1433    i32 %1,
1434    <vscale x 2 x i32> %2,
1435    iXLen %3, iXLen 0)
1436
1437  ret <vscale x 2 x i32> %a
1438}
1439
1440declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
1441  <vscale x 2 x i32>,
1442  i32,
1443  <vscale x 2 x i32>,
1444  <vscale x 2 x i1>,
1445  iXLen, iXLen);
1446
1447define <vscale x 2 x i32> @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1448; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32:
1449; CHECK:       # %bb.0: # %entry
1450; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1451; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
1452; CHECK-NEXT:    ret
1453entry:
1454  %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
1455    <vscale x 2 x i32> %0,
1456    i32 %1,
1457    <vscale x 2 x i32> %2,
1458    <vscale x 2 x i1> %3,
1459    iXLen %4, iXLen 0)
1460
1461  ret <vscale x 2 x i32> %a
1462}
1463
1464declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
1465  <vscale x 4 x i32>,
1466  i32,
1467  <vscale x 4 x i32>,
1468  iXLen,
1469  iXLen);
1470
1471define <vscale x 4 x i32>  @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
1472; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32:
1473; CHECK:       # %bb.0: # %entry
1474; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
1475; CHECK-NEXT:    vmadd.vx v8, a0, v10
1476; CHECK-NEXT:    ret
1477entry:
1478  %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
1479    <vscale x 4 x i32> %0,
1480    i32 %1,
1481    <vscale x 4 x i32> %2,
1482    iXLen %3, iXLen 0)
1483
1484  ret <vscale x 4 x i32> %a
1485}
1486
1487declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
1488  <vscale x 4 x i32>,
1489  i32,
1490  <vscale x 4 x i32>,
1491  <vscale x 4 x i1>,
1492  iXLen, iXLen);
1493
1494define <vscale x 4 x i32> @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1495; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32:
1496; CHECK:       # %bb.0: # %entry
1497; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1498; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
1499; CHECK-NEXT:    ret
1500entry:
1501  %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
1502    <vscale x 4 x i32> %0,
1503    i32 %1,
1504    <vscale x 4 x i32> %2,
1505    <vscale x 4 x i1> %3,
1506    iXLen %4, iXLen 0)
1507
1508  ret <vscale x 4 x i32> %a
1509}
1510
1511declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
1512  <vscale x 8 x i32>,
1513  i32,
1514  <vscale x 8 x i32>,
1515  iXLen,
1516  iXLen);
1517
1518define <vscale x 8 x i32>  @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
1519; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32:
1520; CHECK:       # %bb.0: # %entry
1521; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
1522; CHECK-NEXT:    vmadd.vx v8, a0, v12
1523; CHECK-NEXT:    ret
1524entry:
1525  %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
1526    <vscale x 8 x i32> %0,
1527    i32 %1,
1528    <vscale x 8 x i32> %2,
1529    iXLen %3, iXLen 0)
1530
1531  ret <vscale x 8 x i32> %a
1532}
1533
1534declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
1535  <vscale x 8 x i32>,
1536  i32,
1537  <vscale x 8 x i32>,
1538  <vscale x 8 x i1>,
1539  iXLen, iXLen);
1540
1541define <vscale x 8 x i32> @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1542; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32:
1543; CHECK:       # %bb.0: # %entry
1544; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1545; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
1546; CHECK-NEXT:    ret
1547entry:
1548  %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
1549    <vscale x 8 x i32> %0,
1550    i32 %1,
1551    <vscale x 8 x i32> %2,
1552    <vscale x 8 x i1> %3,
1553    iXLen %4, iXLen 0)
1554
1555  ret <vscale x 8 x i32> %a
1556}
1557
1558declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
1559  <vscale x 1 x i64>,
1560  i64,
1561  <vscale x 1 x i64>,
1562  iXLen,
1563  iXLen);
1564
1565define <vscale x 1 x i64>  @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
1566; RV32-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
1567; RV32:       # %bb.0: # %entry
1568; RV32-NEXT:    addi sp, sp, -16
1569; RV32-NEXT:    sw a0, 8(sp)
1570; RV32-NEXT:    sw a1, 12(sp)
1571; RV32-NEXT:    addi a0, sp, 8
1572; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1573; RV32-NEXT:    vlse64.v v10, (a0), zero
1574; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
1575; RV32-NEXT:    vmadd.vv v8, v10, v9
1576; RV32-NEXT:    addi sp, sp, 16
1577; RV32-NEXT:    ret
1578;
1579; RV64-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
1580; RV64:       # %bb.0: # %entry
1581; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
1582; RV64-NEXT:    vmadd.vx v8, a0, v9
1583; RV64-NEXT:    ret
1584entry:
1585  %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
1586    <vscale x 1 x i64> %0,
1587    i64 %1,
1588    <vscale x 1 x i64> %2,
1589    iXLen %3, iXLen 0)
1590
1591  ret <vscale x 1 x i64> %a
1592}
1593
1594declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
1595  <vscale x 1 x i64>,
1596  i64,
1597  <vscale x 1 x i64>,
1598  <vscale x 1 x i1>,
1599  iXLen, iXLen);
1600
1601define <vscale x 1 x i64> @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1602; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64:
1603; RV32:       # %bb.0: # %entry
1604; RV32-NEXT:    addi sp, sp, -16
1605; RV32-NEXT:    sw a0, 8(sp)
1606; RV32-NEXT:    sw a1, 12(sp)
1607; RV32-NEXT:    addi a0, sp, 8
1608; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1609; RV32-NEXT:    vlse64.v v10, (a0), zero
1610; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
1611; RV32-NEXT:    vmadd.vv v8, v10, v9, v0.t
1612; RV32-NEXT:    addi sp, sp, 16
1613; RV32-NEXT:    ret
1614;
1615; RV64-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64:
1616; RV64:       # %bb.0: # %entry
1617; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
1618; RV64-NEXT:    vmadd.vx v8, a0, v9, v0.t
1619; RV64-NEXT:    ret
1620entry:
1621  %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
1622    <vscale x 1 x i64> %0,
1623    i64 %1,
1624    <vscale x 1 x i64> %2,
1625    <vscale x 1 x i1> %3,
1626    iXLen %4, iXLen 0)
1627
1628  ret <vscale x 1 x i64> %a
1629}
1630
1631declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
1632  <vscale x 2 x i64>,
1633  i64,
1634  <vscale x 2 x i64>,
1635  iXLen,
1636  iXLen);
1637
1638define <vscale x 2 x i64>  @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
1639; RV32-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64:
1640; RV32:       # %bb.0: # %entry
1641; RV32-NEXT:    addi sp, sp, -16
1642; RV32-NEXT:    sw a0, 8(sp)
1643; RV32-NEXT:    sw a1, 12(sp)
1644; RV32-NEXT:    addi a0, sp, 8
1645; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1646; RV32-NEXT:    vlse64.v v12, (a0), zero
1647; RV32-NEXT:    vsetvli zero, zero, e64, m2, tu, ma
1648; RV32-NEXT:    vmadd.vv v8, v12, v10
1649; RV32-NEXT:    addi sp, sp, 16
1650; RV32-NEXT:    ret
1651;
1652; RV64-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64:
1653; RV64:       # %bb.0: # %entry
1654; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
1655; RV64-NEXT:    vmadd.vx v8, a0, v10
1656; RV64-NEXT:    ret
1657entry:
1658  %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
1659    <vscale x 2 x i64> %0,
1660    i64 %1,
1661    <vscale x 2 x i64> %2,
1662    iXLen %3, iXLen 0)
1663
1664  ret <vscale x 2 x i64> %a
1665}
1666
1667declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
1668  <vscale x 2 x i64>,
1669  i64,
1670  <vscale x 2 x i64>,
1671  <vscale x 2 x i1>,
1672  iXLen, iXLen);
1673
1674define <vscale x 2 x i64> @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1675; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64:
1676; RV32:       # %bb.0: # %entry
1677; RV32-NEXT:    addi sp, sp, -16
1678; RV32-NEXT:    sw a0, 8(sp)
1679; RV32-NEXT:    sw a1, 12(sp)
1680; RV32-NEXT:    addi a0, sp, 8
1681; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1682; RV32-NEXT:    vlse64.v v12, (a0), zero
1683; RV32-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
1684; RV32-NEXT:    vmadd.vv v8, v12, v10, v0.t
1685; RV32-NEXT:    addi sp, sp, 16
1686; RV32-NEXT:    ret
1687;
1688; RV64-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64:
1689; RV64:       # %bb.0: # %entry
1690; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
1691; RV64-NEXT:    vmadd.vx v8, a0, v10, v0.t
1692; RV64-NEXT:    ret
1693entry:
1694  %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
1695    <vscale x 2 x i64> %0,
1696    i64 %1,
1697    <vscale x 2 x i64> %2,
1698    <vscale x 2 x i1> %3,
1699    iXLen %4, iXLen 0)
1700
1701  ret <vscale x 2 x i64> %a
1702}
1703
1704declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
1705  <vscale x 4 x i64>,
1706  i64,
1707  <vscale x 4 x i64>,
1708  iXLen,
1709  iXLen);
1710
1711define <vscale x 4 x i64>  @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
1712; RV32-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64:
1713; RV32:       # %bb.0: # %entry
1714; RV32-NEXT:    addi sp, sp, -16
1715; RV32-NEXT:    sw a0, 8(sp)
1716; RV32-NEXT:    sw a1, 12(sp)
1717; RV32-NEXT:    addi a0, sp, 8
1718; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1719; RV32-NEXT:    vlse64.v v16, (a0), zero
1720; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, ma
1721; RV32-NEXT:    vmadd.vv v8, v16, v12
1722; RV32-NEXT:    addi sp, sp, 16
1723; RV32-NEXT:    ret
1724;
1725; RV64-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64:
1726; RV64:       # %bb.0: # %entry
1727; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
1728; RV64-NEXT:    vmadd.vx v8, a0, v12
1729; RV64-NEXT:    ret
1730entry:
1731  %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
1732    <vscale x 4 x i64> %0,
1733    i64 %1,
1734    <vscale x 4 x i64> %2,
1735    iXLen %3, iXLen 0)
1736
1737  ret <vscale x 4 x i64> %a
1738}
1739
1740declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
1741  <vscale x 4 x i64>,
1742  i64,
1743  <vscale x 4 x i64>,
1744  <vscale x 4 x i1>,
1745  iXLen, iXLen);
1746
1747define <vscale x 4 x i64> @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1748; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64:
1749; RV32:       # %bb.0: # %entry
1750; RV32-NEXT:    addi sp, sp, -16
1751; RV32-NEXT:    sw a0, 8(sp)
1752; RV32-NEXT:    sw a1, 12(sp)
1753; RV32-NEXT:    addi a0, sp, 8
1754; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1755; RV32-NEXT:    vlse64.v v16, (a0), zero
1756; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
1757; RV32-NEXT:    vmadd.vv v8, v16, v12, v0.t
1758; RV32-NEXT:    addi sp, sp, 16
1759; RV32-NEXT:    ret
1760;
1761; RV64-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64:
1762; RV64:       # %bb.0: # %entry
1763; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
1764; RV64-NEXT:    vmadd.vx v8, a0, v12, v0.t
1765; RV64-NEXT:    ret
1766entry:
1767  %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
1768    <vscale x 4 x i64> %0,
1769    i64 %1,
1770    <vscale x 4 x i64> %2,
1771    <vscale x 4 x i1> %3,
1772    iXLen %4, iXLen 0)
1773
1774  ret <vscale x 4 x i64> %a
1775}
1776