xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vwmaccu.ll (revision f2bdc29f3e5dd4d8d65081094f8afc789d58706a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d \
3; RUN:   -verify-machineinstrs | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d \
5; RUN:   -verify-machineinstrs | FileCheck %s
6
7declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
8  <vscale x 1 x i16>,
9  <vscale x 1 x i8>,
10  <vscale x 1 x i8>,
11  iXLen,
12  iXLen);
13
14define <vscale x 1 x i16>  @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
15; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8:
16; CHECK:       # %bb.0: # %entry
17; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
18; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
19; CHECK-NEXT:    ret
20entry:
21  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
22    <vscale x 1 x i16> %0,
23    <vscale x 1 x i8> %1,
24    <vscale x 1 x i8> %2,
25    iXLen %3, iXLen 0)
26
27  ret <vscale x 1 x i16> %a
28}
29
30declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
31  <vscale x 1 x i16>,
32  <vscale x 1 x i8>,
33  <vscale x 1 x i8>,
34  <vscale x 1 x i1>,
35  iXLen, iXLen);
36
37define <vscale x 1 x i16>  @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
39; CHECK:       # %bb.0: # %entry
40; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
41; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
42; CHECK-NEXT:    ret
43entry:
44  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
45    <vscale x 1 x i16> %0,
46    <vscale x 1 x i8> %1,
47    <vscale x 1 x i8> %2,
48    <vscale x 1 x i1> %3,
49    iXLen %4, iXLen 0)
50
51  ret <vscale x 1 x i16> %a
52}
53
54declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
55  <vscale x 2 x i16>,
56  <vscale x 2 x i8>,
57  <vscale x 2 x i8>,
58  iXLen,
59  iXLen);
60
61define <vscale x 2 x i16>  @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
62; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8:
63; CHECK:       # %bb.0: # %entry
64; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
65; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
66; CHECK-NEXT:    ret
67entry:
68  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
69    <vscale x 2 x i16> %0,
70    <vscale x 2 x i8> %1,
71    <vscale x 2 x i8> %2,
72    iXLen %3, iXLen 0)
73
74  ret <vscale x 2 x i16> %a
75}
76
77declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
78  <vscale x 2 x i16>,
79  <vscale x 2 x i8>,
80  <vscale x 2 x i8>,
81  <vscale x 2 x i1>,
82  iXLen, iXLen);
83
84define <vscale x 2 x i16>  @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
86; CHECK:       # %bb.0: # %entry
87; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
88; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
89; CHECK-NEXT:    ret
90entry:
91  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
92    <vscale x 2 x i16> %0,
93    <vscale x 2 x i8> %1,
94    <vscale x 2 x i8> %2,
95    <vscale x 2 x i1> %3,
96    iXLen %4, iXLen 0)
97
98  ret <vscale x 2 x i16> %a
99}
100
101declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
102  <vscale x 4 x i16>,
103  <vscale x 4 x i8>,
104  <vscale x 4 x i8>,
105  iXLen,
106  iXLen);
107
108define <vscale x 4 x i16>  @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
109; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8:
110; CHECK:       # %bb.0: # %entry
111; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
112; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
113; CHECK-NEXT:    ret
114entry:
115  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
116    <vscale x 4 x i16> %0,
117    <vscale x 4 x i8> %1,
118    <vscale x 4 x i8> %2,
119    iXLen %3, iXLen 0)
120
121  ret <vscale x 4 x i16> %a
122}
123
124declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
125  <vscale x 4 x i16>,
126  <vscale x 4 x i8>,
127  <vscale x 4 x i8>,
128  <vscale x 4 x i1>,
129  iXLen, iXLen);
130
131define <vscale x 4 x i16>  @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
133; CHECK:       # %bb.0: # %entry
134; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
135; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
136; CHECK-NEXT:    ret
137entry:
138  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
139    <vscale x 4 x i16> %0,
140    <vscale x 4 x i8> %1,
141    <vscale x 4 x i8> %2,
142    <vscale x 4 x i1> %3,
143    iXLen %4, iXLen 0)
144
145  ret <vscale x 4 x i16> %a
146}
147
148declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
149  <vscale x 8 x i16>,
150  <vscale x 8 x i8>,
151  <vscale x 8 x i8>,
152  iXLen,
153  iXLen);
154
155define <vscale x 8 x i16>  @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
156; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8:
157; CHECK:       # %bb.0: # %entry
158; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
159; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
160; CHECK-NEXT:    ret
161entry:
162  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
163    <vscale x 8 x i16> %0,
164    <vscale x 8 x i8> %1,
165    <vscale x 8 x i8> %2,
166    iXLen %3, iXLen 0)
167
168  ret <vscale x 8 x i16> %a
169}
170
171declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
172  <vscale x 8 x i16>,
173  <vscale x 8 x i8>,
174  <vscale x 8 x i8>,
175  <vscale x 8 x i1>,
176  iXLen, iXLen);
177
178define <vscale x 8 x i16>  @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
180; CHECK:       # %bb.0: # %entry
181; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
182; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
183; CHECK-NEXT:    ret
184entry:
185  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
186    <vscale x 8 x i16> %0,
187    <vscale x 8 x i8> %1,
188    <vscale x 8 x i8> %2,
189    <vscale x 8 x i1> %3,
190    iXLen %4, iXLen 0)
191
192  ret <vscale x 8 x i16> %a
193}
194
195declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
196  <vscale x 16 x i16>,
197  <vscale x 16 x i8>,
198  <vscale x 16 x i8>,
199  iXLen,
200  iXLen);
201
202define <vscale x 16 x i16>  @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
203; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8:
204; CHECK:       # %bb.0: # %entry
205; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
206; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
207; CHECK-NEXT:    ret
208entry:
209  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
210    <vscale x 16 x i16> %0,
211    <vscale x 16 x i8> %1,
212    <vscale x 16 x i8> %2,
213    iXLen %3, iXLen 0)
214
215  ret <vscale x 16 x i16> %a
216}
217
218declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
219  <vscale x 16 x i16>,
220  <vscale x 16 x i8>,
221  <vscale x 16 x i8>,
222  <vscale x 16 x i1>,
223  iXLen, iXLen);
224
225define <vscale x 16 x i16>  @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
226; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
227; CHECK:       # %bb.0: # %entry
228; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
229; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
230; CHECK-NEXT:    ret
231entry:
232  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
233    <vscale x 16 x i16> %0,
234    <vscale x 16 x i8> %1,
235    <vscale x 16 x i8> %2,
236    <vscale x 16 x i1> %3,
237    iXLen %4, iXLen 0)
238
239  ret <vscale x 16 x i16> %a
240}
241
242declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
243  <vscale x 32 x i16>,
244  <vscale x 32 x i8>,
245  <vscale x 32 x i8>,
246  iXLen,
247  iXLen);
248
249define <vscale x 32 x i16>  @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
250; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8:
251; CHECK:       # %bb.0: # %entry
252; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
253; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
254; CHECK-NEXT:    ret
255entry:
256  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
257    <vscale x 32 x i16> %0,
258    <vscale x 32 x i8> %1,
259    <vscale x 32 x i8> %2,
260    iXLen %3, iXLen 0)
261
262  ret <vscale x 32 x i16> %a
263}
264
265declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
266  <vscale x 32 x i16>,
267  <vscale x 32 x i8>,
268  <vscale x 32 x i8>,
269  <vscale x 32 x i1>,
270  iXLen, iXLen);
271
272define <vscale x 32 x i16>  @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
273; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
274; CHECK:       # %bb.0: # %entry
275; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
276; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
277; CHECK-NEXT:    ret
278entry:
279  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
280    <vscale x 32 x i16> %0,
281    <vscale x 32 x i8> %1,
282    <vscale x 32 x i8> %2,
283    <vscale x 32 x i1> %3,
284    iXLen %4, iXLen 0)
285
286  ret <vscale x 32 x i16> %a
287}
288
289declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
290  <vscale x 1 x i32>,
291  <vscale x 1 x i16>,
292  <vscale x 1 x i16>,
293  iXLen,
294  iXLen);
295
296define <vscale x 1 x i32>  @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
297; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16:
298; CHECK:       # %bb.0: # %entry
299; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
300; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
301; CHECK-NEXT:    ret
302entry:
303  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
304    <vscale x 1 x i32> %0,
305    <vscale x 1 x i16> %1,
306    <vscale x 1 x i16> %2,
307    iXLen %3, iXLen 0)
308
309  ret <vscale x 1 x i32> %a
310}
311
312declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
313  <vscale x 1 x i32>,
314  <vscale x 1 x i16>,
315  <vscale x 1 x i16>,
316  <vscale x 1 x i1>,
317  iXLen, iXLen);
318
319define <vscale x 1 x i32>  @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
320; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
321; CHECK:       # %bb.0: # %entry
322; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
323; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
324; CHECK-NEXT:    ret
325entry:
326  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
327    <vscale x 1 x i32> %0,
328    <vscale x 1 x i16> %1,
329    <vscale x 1 x i16> %2,
330    <vscale x 1 x i1> %3,
331    iXLen %4, iXLen 0)
332
333  ret <vscale x 1 x i32> %a
334}
335
336declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
337  <vscale x 2 x i32>,
338  <vscale x 2 x i16>,
339  <vscale x 2 x i16>,
340  iXLen,
341  iXLen);
342
343define <vscale x 2 x i32>  @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
344; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16:
345; CHECK:       # %bb.0: # %entry
346; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
347; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
348; CHECK-NEXT:    ret
349entry:
350  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
351    <vscale x 2 x i32> %0,
352    <vscale x 2 x i16> %1,
353    <vscale x 2 x i16> %2,
354    iXLen %3, iXLen 0)
355
356  ret <vscale x 2 x i32> %a
357}
358
359declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
360  <vscale x 2 x i32>,
361  <vscale x 2 x i16>,
362  <vscale x 2 x i16>,
363  <vscale x 2 x i1>,
364  iXLen, iXLen);
365
366define <vscale x 2 x i32>  @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
367; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
368; CHECK:       # %bb.0: # %entry
369; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
370; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
371; CHECK-NEXT:    ret
372entry:
373  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
374    <vscale x 2 x i32> %0,
375    <vscale x 2 x i16> %1,
376    <vscale x 2 x i16> %2,
377    <vscale x 2 x i1> %3,
378    iXLen %4, iXLen 0)
379
380  ret <vscale x 2 x i32> %a
381}
382
383declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
384  <vscale x 4 x i32>,
385  <vscale x 4 x i16>,
386  <vscale x 4 x i16>,
387  iXLen,
388  iXLen);
389
390define <vscale x 4 x i32>  @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
391; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16:
392; CHECK:       # %bb.0: # %entry
393; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
394; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
395; CHECK-NEXT:    ret
396entry:
397  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
398    <vscale x 4 x i32> %0,
399    <vscale x 4 x i16> %1,
400    <vscale x 4 x i16> %2,
401    iXLen %3, iXLen 0)
402
403  ret <vscale x 4 x i32> %a
404}
405
406declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
407  <vscale x 4 x i32>,
408  <vscale x 4 x i16>,
409  <vscale x 4 x i16>,
410  <vscale x 4 x i1>,
411  iXLen, iXLen);
412
413define <vscale x 4 x i32>  @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
414; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
415; CHECK:       # %bb.0: # %entry
416; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
417; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
418; CHECK-NEXT:    ret
419entry:
420  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
421    <vscale x 4 x i32> %0,
422    <vscale x 4 x i16> %1,
423    <vscale x 4 x i16> %2,
424    <vscale x 4 x i1> %3,
425    iXLen %4, iXLen 0)
426
427  ret <vscale x 4 x i32> %a
428}
429
430declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
431  <vscale x 8 x i32>,
432  <vscale x 8 x i16>,
433  <vscale x 8 x i16>,
434  iXLen,
435  iXLen);
436
437define <vscale x 8 x i32>  @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
438; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16:
439; CHECK:       # %bb.0: # %entry
440; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
441; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
442; CHECK-NEXT:    ret
443entry:
444  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
445    <vscale x 8 x i32> %0,
446    <vscale x 8 x i16> %1,
447    <vscale x 8 x i16> %2,
448    iXLen %3, iXLen 0)
449
450  ret <vscale x 8 x i32> %a
451}
452
453declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
454  <vscale x 8 x i32>,
455  <vscale x 8 x i16>,
456  <vscale x 8 x i16>,
457  <vscale x 8 x i1>,
458  iXLen, iXLen);
459
460define <vscale x 8 x i32>  @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
461; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
462; CHECK:       # %bb.0: # %entry
463; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
464; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
465; CHECK-NEXT:    ret
466entry:
467  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
468    <vscale x 8 x i32> %0,
469    <vscale x 8 x i16> %1,
470    <vscale x 8 x i16> %2,
471    <vscale x 8 x i1> %3,
472    iXLen %4, iXLen 0)
473
474  ret <vscale x 8 x i32> %a
475}
476
477declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
478  <vscale x 16 x i32>,
479  <vscale x 16 x i16>,
480  <vscale x 16 x i16>,
481  iXLen,
482  iXLen);
483
484define <vscale x 16 x i32>  @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
485; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16:
486; CHECK:       # %bb.0: # %entry
487; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
488; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
489; CHECK-NEXT:    ret
490entry:
491  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
492    <vscale x 16 x i32> %0,
493    <vscale x 16 x i16> %1,
494    <vscale x 16 x i16> %2,
495    iXLen %3, iXLen 0)
496
497  ret <vscale x 16 x i32> %a
498}
499
500declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
501  <vscale x 16 x i32>,
502  <vscale x 16 x i16>,
503  <vscale x 16 x i16>,
504  <vscale x 16 x i1>,
505  iXLen, iXLen);
506
507define <vscale x 16 x i32>  @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
508; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
509; CHECK:       # %bb.0: # %entry
510; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
511; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
512; CHECK-NEXT:    ret
513entry:
514  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
515    <vscale x 16 x i32> %0,
516    <vscale x 16 x i16> %1,
517    <vscale x 16 x i16> %2,
518    <vscale x 16 x i1> %3,
519    iXLen %4, iXLen 0)
520
521  ret <vscale x 16 x i32> %a
522}
523
524declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
525  <vscale x 1 x i64>,
526  <vscale x 1 x i32>,
527  <vscale x 1 x i32>,
528  iXLen,
529  iXLen);
530
531define <vscale x 1 x i64>  @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
532; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32:
533; CHECK:       # %bb.0: # %entry
534; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
535; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
536; CHECK-NEXT:    ret
537entry:
538  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
539    <vscale x 1 x i64> %0,
540    <vscale x 1 x i32> %1,
541    <vscale x 1 x i32> %2,
542    iXLen %3, iXLen 0)
543
544  ret <vscale x 1 x i64> %a
545}
546
547declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
548  <vscale x 1 x i64>,
549  <vscale x 1 x i32>,
550  <vscale x 1 x i32>,
551  <vscale x 1 x i1>,
552  iXLen, iXLen);
553
554define <vscale x 1 x i64>  @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
555; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
556; CHECK:       # %bb.0: # %entry
557; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
558; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
559; CHECK-NEXT:    ret
560entry:
561  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
562    <vscale x 1 x i64> %0,
563    <vscale x 1 x i32> %1,
564    <vscale x 1 x i32> %2,
565    <vscale x 1 x i1> %3,
566    iXLen %4, iXLen 0)
567
568  ret <vscale x 1 x i64> %a
569}
570
571declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
572  <vscale x 2 x i64>,
573  <vscale x 2 x i32>,
574  <vscale x 2 x i32>,
575  iXLen,
576  iXLen);
577
578define <vscale x 2 x i64>  @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
579; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32:
580; CHECK:       # %bb.0: # %entry
581; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
582; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
583; CHECK-NEXT:    ret
584entry:
585  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
586    <vscale x 2 x i64> %0,
587    <vscale x 2 x i32> %1,
588    <vscale x 2 x i32> %2,
589    iXLen %3, iXLen 0)
590
591  ret <vscale x 2 x i64> %a
592}
593
594declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
595  <vscale x 2 x i64>,
596  <vscale x 2 x i32>,
597  <vscale x 2 x i32>,
598  <vscale x 2 x i1>,
599  iXLen, iXLen);
600
601define <vscale x 2 x i64>  @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
602; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
603; CHECK:       # %bb.0: # %entry
604; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
605; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
606; CHECK-NEXT:    ret
607entry:
608  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
609    <vscale x 2 x i64> %0,
610    <vscale x 2 x i32> %1,
611    <vscale x 2 x i32> %2,
612    <vscale x 2 x i1> %3,
613    iXLen %4, iXLen 0)
614
615  ret <vscale x 2 x i64> %a
616}
617
618declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
619  <vscale x 4 x i64>,
620  <vscale x 4 x i32>,
621  <vscale x 4 x i32>,
622  iXLen,
623  iXLen);
624
625define <vscale x 4 x i64>  @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
626; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32:
627; CHECK:       # %bb.0: # %entry
628; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
629; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
630; CHECK-NEXT:    ret
631entry:
632  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
633    <vscale x 4 x i64> %0,
634    <vscale x 4 x i32> %1,
635    <vscale x 4 x i32> %2,
636    iXLen %3, iXLen 0)
637
638  ret <vscale x 4 x i64> %a
639}
640
641declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
642  <vscale x 4 x i64>,
643  <vscale x 4 x i32>,
644  <vscale x 4 x i32>,
645  <vscale x 4 x i1>,
646  iXLen, iXLen);
647
648define <vscale x 4 x i64>  @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
649; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
650; CHECK:       # %bb.0: # %entry
651; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
652; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
653; CHECK-NEXT:    ret
654entry:
655  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
656    <vscale x 4 x i64> %0,
657    <vscale x 4 x i32> %1,
658    <vscale x 4 x i32> %2,
659    <vscale x 4 x i1> %3,
660    iXLen %4, iXLen 0)
661
662  ret <vscale x 4 x i64> %a
663}
664
665declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
666  <vscale x 8 x i64>,
667  <vscale x 8 x i32>,
668  <vscale x 8 x i32>,
669  iXLen,
670  iXLen);
671
672define <vscale x 8 x i64>  @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
673; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32:
674; CHECK:       # %bb.0: # %entry
675; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
676; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
677; CHECK-NEXT:    ret
678entry:
679  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
680    <vscale x 8 x i64> %0,
681    <vscale x 8 x i32> %1,
682    <vscale x 8 x i32> %2,
683    iXLen %3, iXLen 0)
684
685  ret <vscale x 8 x i64> %a
686}
687
688declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
689  <vscale x 8 x i64>,
690  <vscale x 8 x i32>,
691  <vscale x 8 x i32>,
692  <vscale x 8 x i1>,
693  iXLen, iXLen);
694
695define <vscale x 8 x i64>  @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
696; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
697; CHECK:       # %bb.0: # %entry
698; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
699; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
700; CHECK-NEXT:    ret
701entry:
702  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
703    <vscale x 8 x i64> %0,
704    <vscale x 8 x i32> %1,
705    <vscale x 8 x i32> %2,
706    <vscale x 8 x i1> %3,
707    iXLen %4, iXLen 0)
708
709  ret <vscale x 8 x i64> %a
710}
711
712declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
713  <vscale x 1 x i16>,
714  i8,
715  <vscale x 1 x i8>,
716  iXLen,
717  iXLen);
718
719define <vscale x 1 x i16>  @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
720; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8:
721; CHECK:       # %bb.0: # %entry
722; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
723; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
724; CHECK-NEXT:    ret
725entry:
726  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
727    <vscale x 1 x i16> %0,
728    i8 %1,
729    <vscale x 1 x i8> %2,
730    iXLen %3, iXLen 0)
731
732  ret <vscale x 1 x i16> %a
733}
734
735declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
736  <vscale x 1 x i16>,
737  i8,
738  <vscale x 1 x i8>,
739  <vscale x 1 x i1>,
740  iXLen, iXLen);
741
742define <vscale x 1 x i16> @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
743; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8:
744; CHECK:       # %bb.0: # %entry
745; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
746; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
747; CHECK-NEXT:    ret
748entry:
749  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
750    <vscale x 1 x i16> %0,
751    i8 %1,
752    <vscale x 1 x i8> %2,
753    <vscale x 1 x i1> %3,
754    iXLen %4, iXLen 0)
755
756  ret <vscale x 1 x i16> %a
757}
758
759declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
760  <vscale x 2 x i16>,
761  i8,
762  <vscale x 2 x i8>,
763  iXLen,
764  iXLen);
765
766define <vscale x 2 x i16>  @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
767; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8:
768; CHECK:       # %bb.0: # %entry
769; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
770; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
771; CHECK-NEXT:    ret
772entry:
773  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
774    <vscale x 2 x i16> %0,
775    i8 %1,
776    <vscale x 2 x i8> %2,
777    iXLen %3, iXLen 0)
778
779  ret <vscale x 2 x i16> %a
780}
781
782declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
783  <vscale x 2 x i16>,
784  i8,
785  <vscale x 2 x i8>,
786  <vscale x 2 x i1>,
787  iXLen, iXLen);
788
789define <vscale x 2 x i16> @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
790; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8:
791; CHECK:       # %bb.0: # %entry
792; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
793; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
794; CHECK-NEXT:    ret
795entry:
796  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
797    <vscale x 2 x i16> %0,
798    i8 %1,
799    <vscale x 2 x i8> %2,
800    <vscale x 2 x i1> %3,
801    iXLen %4, iXLen 0)
802
803  ret <vscale x 2 x i16> %a
804}
805
806declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
807  <vscale x 4 x i16>,
808  i8,
809  <vscale x 4 x i8>,
810  iXLen,
811  iXLen);
812
813define <vscale x 4 x i16>  @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
814; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8:
815; CHECK:       # %bb.0: # %entry
816; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
817; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
818; CHECK-NEXT:    ret
819entry:
820  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
821    <vscale x 4 x i16> %0,
822    i8 %1,
823    <vscale x 4 x i8> %2,
824    iXLen %3, iXLen 0)
825
826  ret <vscale x 4 x i16> %a
827}
828
829declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
830  <vscale x 4 x i16>,
831  i8,
832  <vscale x 4 x i8>,
833  <vscale x 4 x i1>,
834  iXLen, iXLen);
835
836define <vscale x 4 x i16> @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
837; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8:
838; CHECK:       # %bb.0: # %entry
839; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
840; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
841; CHECK-NEXT:    ret
842entry:
843  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
844    <vscale x 4 x i16> %0,
845    i8 %1,
846    <vscale x 4 x i8> %2,
847    <vscale x 4 x i1> %3,
848    iXLen %4, iXLen 0)
849
850  ret <vscale x 4 x i16> %a
851}
852
853declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
854  <vscale x 8 x i16>,
855  i8,
856  <vscale x 8 x i8>,
857  iXLen,
858  iXLen);
859
860define <vscale x 8 x i16>  @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
861; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8:
862; CHECK:       # %bb.0: # %entry
863; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
864; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
865; CHECK-NEXT:    ret
866entry:
867  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
868    <vscale x 8 x i16> %0,
869    i8 %1,
870    <vscale x 8 x i8> %2,
871    iXLen %3, iXLen 0)
872
873  ret <vscale x 8 x i16> %a
874}
875
876declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
877  <vscale x 8 x i16>,
878  i8,
879  <vscale x 8 x i8>,
880  <vscale x 8 x i1>,
881  iXLen, iXLen);
882
883define <vscale x 8 x i16> @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
884; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8:
885; CHECK:       # %bb.0: # %entry
886; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
887; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
888; CHECK-NEXT:    ret
889entry:
890  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
891    <vscale x 8 x i16> %0,
892    i8 %1,
893    <vscale x 8 x i8> %2,
894    <vscale x 8 x i1> %3,
895    iXLen %4, iXLen 0)
896
897  ret <vscale x 8 x i16> %a
898}
899
900declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
901  <vscale x 16 x i16>,
902  i8,
903  <vscale x 16 x i8>,
904  iXLen,
905  iXLen);
906
907define <vscale x 16 x i16>  @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
908; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8:
909; CHECK:       # %bb.0: # %entry
910; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
911; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
912; CHECK-NEXT:    ret
913entry:
914  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
915    <vscale x 16 x i16> %0,
916    i8 %1,
917    <vscale x 16 x i8> %2,
918    iXLen %3, iXLen 0)
919
920  ret <vscale x 16 x i16> %a
921}
922
923declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
924  <vscale x 16 x i16>,
925  i8,
926  <vscale x 16 x i8>,
927  <vscale x 16 x i1>,
928  iXLen, iXLen);
929
930define <vscale x 16 x i16> @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
931; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8:
932; CHECK:       # %bb.0: # %entry
933; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
934; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
935; CHECK-NEXT:    ret
936entry:
937  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
938    <vscale x 16 x i16> %0,
939    i8 %1,
940    <vscale x 16 x i8> %2,
941    <vscale x 16 x i1> %3,
942    iXLen %4, iXLen 0)
943
944  ret <vscale x 16 x i16> %a
945}
946
947declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
948  <vscale x 32 x i16>,
949  i8,
950  <vscale x 32 x i8>,
951  iXLen,
952  iXLen);
953
954define <vscale x 32 x i16>  @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
955; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8:
956; CHECK:       # %bb.0: # %entry
957; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
958; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
959; CHECK-NEXT:    ret
960entry:
961  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
962    <vscale x 32 x i16> %0,
963    i8 %1,
964    <vscale x 32 x i8> %2,
965    iXLen %3, iXLen 0)
966
967  ret <vscale x 32 x i16> %a
968}
969
970declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
971  <vscale x 32 x i16>,
972  i8,
973  <vscale x 32 x i8>,
974  <vscale x 32 x i1>,
975  iXLen, iXLen);
976
977define <vscale x 32 x i16> @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
978; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8:
979; CHECK:       # %bb.0: # %entry
980; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
981; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
982; CHECK-NEXT:    ret
983entry:
984  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
985    <vscale x 32 x i16> %0,
986    i8 %1,
987    <vscale x 32 x i8> %2,
988    <vscale x 32 x i1> %3,
989    iXLen %4, iXLen 0)
990
991  ret <vscale x 32 x i16> %a
992}
993
994declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
995  <vscale x 1 x i32>,
996  i16,
997  <vscale x 1 x i16>,
998  iXLen,
999  iXLen);
1000
1001define <vscale x 1 x i32>  @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
1002; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16:
1003; CHECK:       # %bb.0: # %entry
1004; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
1005; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
1006; CHECK-NEXT:    ret
1007entry:
1008  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
1009    <vscale x 1 x i32> %0,
1010    i16 %1,
1011    <vscale x 1 x i16> %2,
1012    iXLen %3, iXLen 0)
1013
1014  ret <vscale x 1 x i32> %a
1015}
1016
1017declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
1018  <vscale x 1 x i32>,
1019  i16,
1020  <vscale x 1 x i16>,
1021  <vscale x 1 x i1>,
1022  iXLen, iXLen);
1023
1024define <vscale x 1 x i32> @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1025; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16:
1026; CHECK:       # %bb.0: # %entry
1027; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
1028; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
1029; CHECK-NEXT:    ret
1030entry:
1031  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
1032    <vscale x 1 x i32> %0,
1033    i16 %1,
1034    <vscale x 1 x i16> %2,
1035    <vscale x 1 x i1> %3,
1036    iXLen %4, iXLen 0)
1037
1038  ret <vscale x 1 x i32> %a
1039}
1040
1041declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
1042  <vscale x 2 x i32>,
1043  i16,
1044  <vscale x 2 x i16>,
1045  iXLen,
1046  iXLen);
1047
1048define <vscale x 2 x i32>  @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
1049; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16:
1050; CHECK:       # %bb.0: # %entry
1051; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
1052; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
1053; CHECK-NEXT:    ret
1054entry:
1055  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
1056    <vscale x 2 x i32> %0,
1057    i16 %1,
1058    <vscale x 2 x i16> %2,
1059    iXLen %3, iXLen 0)
1060
1061  ret <vscale x 2 x i32> %a
1062}
1063
1064declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
1065  <vscale x 2 x i32>,
1066  i16,
1067  <vscale x 2 x i16>,
1068  <vscale x 2 x i1>,
1069  iXLen, iXLen);
1070
1071define <vscale x 2 x i32> @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1072; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16:
1073; CHECK:       # %bb.0: # %entry
1074; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
1075; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
1076; CHECK-NEXT:    ret
1077entry:
1078  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
1079    <vscale x 2 x i32> %0,
1080    i16 %1,
1081    <vscale x 2 x i16> %2,
1082    <vscale x 2 x i1> %3,
1083    iXLen %4, iXLen 0)
1084
1085  ret <vscale x 2 x i32> %a
1086}
1087
1088declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
1089  <vscale x 4 x i32>,
1090  i16,
1091  <vscale x 4 x i16>,
1092  iXLen,
1093  iXLen);
1094
1095define <vscale x 4 x i32>  @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
1096; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16:
1097; CHECK:       # %bb.0: # %entry
1098; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
1099; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
1100; CHECK-NEXT:    ret
1101entry:
1102  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
1103    <vscale x 4 x i32> %0,
1104    i16 %1,
1105    <vscale x 4 x i16> %2,
1106    iXLen %3, iXLen 0)
1107
1108  ret <vscale x 4 x i32> %a
1109}
1110
1111declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
1112  <vscale x 4 x i32>,
1113  i16,
1114  <vscale x 4 x i16>,
1115  <vscale x 4 x i1>,
1116  iXLen, iXLen);
1117
1118define <vscale x 4 x i32> @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1119; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16:
1120; CHECK:       # %bb.0: # %entry
1121; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
1122; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
1123; CHECK-NEXT:    ret
1124entry:
1125  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
1126    <vscale x 4 x i32> %0,
1127    i16 %1,
1128    <vscale x 4 x i16> %2,
1129    <vscale x 4 x i1> %3,
1130    iXLen %4, iXLen 0)
1131
1132  ret <vscale x 4 x i32> %a
1133}
1134
1135declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
1136  <vscale x 8 x i32>,
1137  i16,
1138  <vscale x 8 x i16>,
1139  iXLen,
1140  iXLen);
1141
1142define <vscale x 8 x i32>  @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
1143; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16:
1144; CHECK:       # %bb.0: # %entry
1145; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
1146; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
1147; CHECK-NEXT:    ret
1148entry:
1149  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
1150    <vscale x 8 x i32> %0,
1151    i16 %1,
1152    <vscale x 8 x i16> %2,
1153    iXLen %3, iXLen 0)
1154
1155  ret <vscale x 8 x i32> %a
1156}
1157
1158declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
1159  <vscale x 8 x i32>,
1160  i16,
1161  <vscale x 8 x i16>,
1162  <vscale x 8 x i1>,
1163  iXLen, iXLen);
1164
1165define <vscale x 8 x i32> @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1166; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16:
1167; CHECK:       # %bb.0: # %entry
1168; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
1169; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
1170; CHECK-NEXT:    ret
1171entry:
1172  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
1173    <vscale x 8 x i32> %0,
1174    i16 %1,
1175    <vscale x 8 x i16> %2,
1176    <vscale x 8 x i1> %3,
1177    iXLen %4, iXLen 0)
1178
1179  ret <vscale x 8 x i32> %a
1180}
1181
1182declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
1183  <vscale x 16 x i32>,
1184  i16,
1185  <vscale x 16 x i16>,
1186  iXLen,
1187  iXLen);
1188
1189define <vscale x 16 x i32>  @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
1190; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16:
1191; CHECK:       # %bb.0: # %entry
1192; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
1193; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
1194; CHECK-NEXT:    ret
1195entry:
1196  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
1197    <vscale x 16 x i32> %0,
1198    i16 %1,
1199    <vscale x 16 x i16> %2,
1200    iXLen %3, iXLen 0)
1201
1202  ret <vscale x 16 x i32> %a
1203}
1204
1205declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
1206  <vscale x 16 x i32>,
1207  i16,
1208  <vscale x 16 x i16>,
1209  <vscale x 16 x i1>,
1210  iXLen, iXLen);
1211
1212define <vscale x 16 x i32> @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1213; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16:
1214; CHECK:       # %bb.0: # %entry
1215; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
1216; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
1217; CHECK-NEXT:    ret
1218entry:
1219  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
1220    <vscale x 16 x i32> %0,
1221    i16 %1,
1222    <vscale x 16 x i16> %2,
1223    <vscale x 16 x i1> %3,
1224    iXLen %4, iXLen 0)
1225
1226  ret <vscale x 16 x i32> %a
1227}
1228
1229declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
1230  <vscale x 1 x i64>,
1231  i32,
1232  <vscale x 1 x i32>,
1233  iXLen,
1234  iXLen);
1235
1236define <vscale x 1 x i64>  @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
1237; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32:
1238; CHECK:       # %bb.0: # %entry
1239; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
1240; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
1241; CHECK-NEXT:    ret
1242entry:
1243  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
1244    <vscale x 1 x i64> %0,
1245    i32 %1,
1246    <vscale x 1 x i32> %2,
1247    iXLen %3, iXLen 0)
1248
1249  ret <vscale x 1 x i64> %a
1250}
1251
1252declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
1253  <vscale x 1 x i64>,
1254  i32,
1255  <vscale x 1 x i32>,
1256  <vscale x 1 x i1>,
1257  iXLen, iXLen);
1258
1259define <vscale x 1 x i64> @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1260; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32:
1261; CHECK:       # %bb.0: # %entry
1262; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
1263; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
1264; CHECK-NEXT:    ret
1265entry:
1266  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
1267    <vscale x 1 x i64> %0,
1268    i32 %1,
1269    <vscale x 1 x i32> %2,
1270    <vscale x 1 x i1> %3,
1271    iXLen %4, iXLen 0)
1272
1273  ret <vscale x 1 x i64> %a
1274}
1275
1276declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
1277  <vscale x 2 x i64>,
1278  i32,
1279  <vscale x 2 x i32>,
1280  iXLen,
1281  iXLen);
1282
1283define <vscale x 2 x i64>  @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
1284; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32:
1285; CHECK:       # %bb.0: # %entry
1286; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
1287; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
1288; CHECK-NEXT:    ret
1289entry:
1290  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
1291    <vscale x 2 x i64> %0,
1292    i32 %1,
1293    <vscale x 2 x i32> %2,
1294    iXLen %3, iXLen 0)
1295
1296  ret <vscale x 2 x i64> %a
1297}
1298
1299declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
1300  <vscale x 2 x i64>,
1301  i32,
1302  <vscale x 2 x i32>,
1303  <vscale x 2 x i1>,
1304  iXLen, iXLen);
1305
1306define <vscale x 2 x i64> @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1307; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32:
1308; CHECK:       # %bb.0: # %entry
1309; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1310; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
1311; CHECK-NEXT:    ret
1312entry:
1313  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
1314    <vscale x 2 x i64> %0,
1315    i32 %1,
1316    <vscale x 2 x i32> %2,
1317    <vscale x 2 x i1> %3,
1318    iXLen %4, iXLen 0)
1319
1320  ret <vscale x 2 x i64> %a
1321}
1322
1323declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
1324  <vscale x 4 x i64>,
1325  i32,
1326  <vscale x 4 x i32>,
1327  iXLen,
1328  iXLen);
1329
1330define <vscale x 4 x i64>  @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
1331; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32:
1332; CHECK:       # %bb.0: # %entry
1333; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
1334; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
1335; CHECK-NEXT:    ret
1336entry:
1337  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
1338    <vscale x 4 x i64> %0,
1339    i32 %1,
1340    <vscale x 4 x i32> %2,
1341    iXLen %3, iXLen 0)
1342
1343  ret <vscale x 4 x i64> %a
1344}
1345
1346declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
1347  <vscale x 4 x i64>,
1348  i32,
1349  <vscale x 4 x i32>,
1350  <vscale x 4 x i1>,
1351  iXLen, iXLen);
1352
1353define <vscale x 4 x i64> @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1354; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32:
1355; CHECK:       # %bb.0: # %entry
1356; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1357; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
1358; CHECK-NEXT:    ret
1359entry:
1360  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
1361    <vscale x 4 x i64> %0,
1362    i32 %1,
1363    <vscale x 4 x i32> %2,
1364    <vscale x 4 x i1> %3,
1365    iXLen %4, iXLen 0)
1366
1367  ret <vscale x 4 x i64> %a
1368}
1369
1370declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
1371  <vscale x 8 x i64>,
1372  i32,
1373  <vscale x 8 x i32>,
1374  iXLen,
1375  iXLen);
1376
1377define <vscale x 8 x i64>  @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
1378; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32:
1379; CHECK:       # %bb.0: # %entry
1380; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
1381; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
1382; CHECK-NEXT:    ret
1383entry:
1384  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
1385    <vscale x 8 x i64> %0,
1386    i32 %1,
1387    <vscale x 8 x i32> %2,
1388    iXLen %3, iXLen 0)
1389
1390  ret <vscale x 8 x i64> %a
1391}
1392
1393declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
1394  <vscale x 8 x i64>,
1395  i32,
1396  <vscale x 8 x i32>,
1397  <vscale x 8 x i1>,
1398  iXLen, iXLen);
1399
1400define <vscale x 8 x i64> @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1401; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32:
1402; CHECK:       # %bb.0: # %entry
1403; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1404; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
1405; CHECK-NEXT:    ret
1406entry:
1407  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
1408    <vscale x 8 x i64> %0,
1409    i32 %1,
1410    <vscale x 8 x i32> %2,
1411    <vscale x 8 x i1> %3,
1412    iXLen %4, iXLen 0)
1413
1414  ret <vscale x 8 x i64> %a
1415}
1416