xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vwadd.w.ll (revision f2bdc29f3e5dd4d8d65081094f8afc789d58706a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s
4; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
5; RUN:   -verify-machineinstrs -early-live-intervals | FileCheck %s
6; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
7; RUN:   -verify-machineinstrs | FileCheck %s
8; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
9; RUN:   -verify-machineinstrs -early-live-intervals | FileCheck %s
10
11declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
12  <vscale x 1 x i16>,
13  <vscale x 1 x i16>,
14  <vscale x 1 x i8>,
15  iXLen);
16
17define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
18; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8:
19; CHECK:       # %bb.0: # %entry
20; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
21; CHECK-NEXT:    vwadd.wv v8, v8, v9
22; CHECK-NEXT:    ret
23entry:
24  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
25    <vscale x 1 x i16> undef,
26    <vscale x 1 x i16> %0,
27    <vscale x 1 x i8> %1,
28    iXLen %2)
29
30  ret <vscale x 1 x i16> %a
31}
32
33declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
34  <vscale x 1 x i16>,
35  <vscale x 1 x i16>,
36  <vscale x 1 x i8>,
37  <vscale x 1 x i1>,
38  iXLen,
39  iXLen);
40
41define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
42; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8:
43; CHECK:       # %bb.0: # %entry
44; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
45; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
46; CHECK-NEXT:    ret
47entry:
48  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
49    <vscale x 1 x i16> %0,
50    <vscale x 1 x i16> %1,
51    <vscale x 1 x i8> %2,
52    <vscale x 1 x i1> %3,
53    iXLen %4, iXLen 1)
54
55  ret <vscale x 1 x i16> %a
56}
57
58declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
59  <vscale x 2 x i16>,
60  <vscale x 2 x i16>,
61  <vscale x 2 x i8>,
62  iXLen);
63
64define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
65; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8:
66; CHECK:       # %bb.0: # %entry
67; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
68; CHECK-NEXT:    vwadd.wv v8, v8, v9
69; CHECK-NEXT:    ret
70entry:
71  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
72    <vscale x 2 x i16> undef,
73    <vscale x 2 x i16> %0,
74    <vscale x 2 x i8> %1,
75    iXLen %2)
76
77  ret <vscale x 2 x i16> %a
78}
79
80declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
81  <vscale x 2 x i16>,
82  <vscale x 2 x i16>,
83  <vscale x 2 x i8>,
84  <vscale x 2 x i1>,
85  iXLen,
86  iXLen);
87
88define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
89; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8:
90; CHECK:       # %bb.0: # %entry
91; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
92; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
93; CHECK-NEXT:    ret
94entry:
95  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
96    <vscale x 2 x i16> %0,
97    <vscale x 2 x i16> %1,
98    <vscale x 2 x i8> %2,
99    <vscale x 2 x i1> %3,
100    iXLen %4, iXLen 1)
101
102  ret <vscale x 2 x i16> %a
103}
104
105declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
106  <vscale x 4 x i16>,
107  <vscale x 4 x i16>,
108  <vscale x 4 x i8>,
109  iXLen);
110
111define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
112; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8:
113; CHECK:       # %bb.0: # %entry
114; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
115; CHECK-NEXT:    vwadd.wv v8, v8, v9
116; CHECK-NEXT:    ret
117entry:
118  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
119    <vscale x 4 x i16> undef,
120    <vscale x 4 x i16> %0,
121    <vscale x 4 x i8> %1,
122    iXLen %2)
123
124  ret <vscale x 4 x i16> %a
125}
126
127declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
128  <vscale x 4 x i16>,
129  <vscale x 4 x i16>,
130  <vscale x 4 x i8>,
131  <vscale x 4 x i1>,
132  iXLen,
133  iXLen);
134
135define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
136; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8:
137; CHECK:       # %bb.0: # %entry
138; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
139; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
140; CHECK-NEXT:    ret
141entry:
142  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
143    <vscale x 4 x i16> %0,
144    <vscale x 4 x i16> %1,
145    <vscale x 4 x i8> %2,
146    <vscale x 4 x i1> %3,
147    iXLen %4, iXLen 1)
148
149  ret <vscale x 4 x i16> %a
150}
151
152declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
153  <vscale x 8 x i16>,
154  <vscale x 8 x i16>,
155  <vscale x 8 x i8>,
156  iXLen);
157
158define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
159; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8:
160; CHECK:       # %bb.0: # %entry
161; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
162; CHECK-NEXT:    vwadd.wv v8, v8, v10
163; CHECK-NEXT:    ret
164entry:
165  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
166    <vscale x 8 x i16> undef,
167    <vscale x 8 x i16> %0,
168    <vscale x 8 x i8> %1,
169    iXLen %2)
170
171  ret <vscale x 8 x i16> %a
172}
173
174declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
175  <vscale x 8 x i16>,
176  <vscale x 8 x i16>,
177  <vscale x 8 x i8>,
178  <vscale x 8 x i1>,
179  iXLen,
180  iXLen);
181
182define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
183; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8:
184; CHECK:       # %bb.0: # %entry
185; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
186; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
187; CHECK-NEXT:    ret
188entry:
189  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
190    <vscale x 8 x i16> %0,
191    <vscale x 8 x i16> %1,
192    <vscale x 8 x i8> %2,
193    <vscale x 8 x i1> %3,
194    iXLen %4, iXLen 1)
195
196  ret <vscale x 8 x i16> %a
197}
198
199declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
200  <vscale x 16 x i16>,
201  <vscale x 16 x i16>,
202  <vscale x 16 x i8>,
203  iXLen);
204
205define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
206; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8:
207; CHECK:       # %bb.0: # %entry
208; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
209; CHECK-NEXT:    vwadd.wv v8, v8, v12
210; CHECK-NEXT:    ret
211entry:
212  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
213    <vscale x 16 x i16> undef,
214    <vscale x 16 x i16> %0,
215    <vscale x 16 x i8> %1,
216    iXLen %2)
217
218  ret <vscale x 16 x i16> %a
219}
220
221declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
222  <vscale x 16 x i16>,
223  <vscale x 16 x i16>,
224  <vscale x 16 x i8>,
225  <vscale x 16 x i1>,
226  iXLen,
227  iXLen);
228
229define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
230; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8:
231; CHECK:       # %bb.0: # %entry
232; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
233; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
234; CHECK-NEXT:    ret
235entry:
236  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
237    <vscale x 16 x i16> %0,
238    <vscale x 16 x i16> %1,
239    <vscale x 16 x i8> %2,
240    <vscale x 16 x i1> %3,
241    iXLen %4, iXLen 1)
242
243  ret <vscale x 16 x i16> %a
244}
245
246declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
247  <vscale x 32 x i16>,
248  <vscale x 32 x i16>,
249  <vscale x 32 x i8>,
250  iXLen);
251
252define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
253; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8:
254; CHECK:       # %bb.0: # %entry
255; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
256; CHECK-NEXT:    vwadd.wv v8, v8, v16
257; CHECK-NEXT:    ret
258entry:
259  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
260    <vscale x 32 x i16> undef,
261    <vscale x 32 x i16> %0,
262    <vscale x 32 x i8> %1,
263    iXLen %2)
264
265  ret <vscale x 32 x i16> %a
266}
267
268declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
269  <vscale x 32 x i16>,
270  <vscale x 32 x i16>,
271  <vscale x 32 x i8>,
272  <vscale x 32 x i1>,
273  iXLen,
274  iXLen);
275
276define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
277; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
278; CHECK:       # %bb.0: # %entry
279; CHECK-NEXT:    vl4r.v v24, (a0)
280; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
281; CHECK-NEXT:    vwadd.wv v8, v16, v24, v0.t
282; CHECK-NEXT:    ret
283entry:
284  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
285    <vscale x 32 x i16> %0,
286    <vscale x 32 x i16> %1,
287    <vscale x 32 x i8> %2,
288    <vscale x 32 x i1> %3,
289    iXLen %4, iXLen 1)
290
291  ret <vscale x 32 x i16> %a
292}
293
294declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
295  <vscale x 1 x i32>,
296  <vscale x 1 x i32>,
297  <vscale x 1 x i16>,
298  iXLen);
299
300define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
301; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16:
302; CHECK:       # %bb.0: # %entry
303; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
304; CHECK-NEXT:    vwadd.wv v8, v8, v9
305; CHECK-NEXT:    ret
306entry:
307  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
308    <vscale x 1 x i32> undef,
309    <vscale x 1 x i32> %0,
310    <vscale x 1 x i16> %1,
311    iXLen %2)
312
313  ret <vscale x 1 x i32> %a
314}
315
316declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
317  <vscale x 1 x i32>,
318  <vscale x 1 x i32>,
319  <vscale x 1 x i16>,
320  <vscale x 1 x i1>,
321  iXLen,
322  iXLen);
323
324define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
325; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16:
326; CHECK:       # %bb.0: # %entry
327; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
328; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
329; CHECK-NEXT:    ret
330entry:
331  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
332    <vscale x 1 x i32> %0,
333    <vscale x 1 x i32> %1,
334    <vscale x 1 x i16> %2,
335    <vscale x 1 x i1> %3,
336    iXLen %4, iXLen 1)
337
338  ret <vscale x 1 x i32> %a
339}
340
341declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
342  <vscale x 2 x i32>,
343  <vscale x 2 x i32>,
344  <vscale x 2 x i16>,
345  iXLen);
346
347define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
348; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16:
349; CHECK:       # %bb.0: # %entry
350; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
351; CHECK-NEXT:    vwadd.wv v8, v8, v9
352; CHECK-NEXT:    ret
353entry:
354  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
355    <vscale x 2 x i32> undef,
356    <vscale x 2 x i32> %0,
357    <vscale x 2 x i16> %1,
358    iXLen %2)
359
360  ret <vscale x 2 x i32> %a
361}
362
363declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
364  <vscale x 2 x i32>,
365  <vscale x 2 x i32>,
366  <vscale x 2 x i16>,
367  <vscale x 2 x i1>,
368  iXLen,
369  iXLen);
370
371define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
372; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16:
373; CHECK:       # %bb.0: # %entry
374; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
375; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
376; CHECK-NEXT:    ret
377entry:
378  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
379    <vscale x 2 x i32> %0,
380    <vscale x 2 x i32> %1,
381    <vscale x 2 x i16> %2,
382    <vscale x 2 x i1> %3,
383    iXLen %4, iXLen 1)
384
385  ret <vscale x 2 x i32> %a
386}
387
388declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
389  <vscale x 4 x i32>,
390  <vscale x 4 x i32>,
391  <vscale x 4 x i16>,
392  iXLen);
393
394define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
395; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16:
396; CHECK:       # %bb.0: # %entry
397; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
398; CHECK-NEXT:    vwadd.wv v8, v8, v10
399; CHECK-NEXT:    ret
400entry:
401  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
402    <vscale x 4 x i32> undef,
403    <vscale x 4 x i32> %0,
404    <vscale x 4 x i16> %1,
405    iXLen %2)
406
407  ret <vscale x 4 x i32> %a
408}
409
410declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
411  <vscale x 4 x i32>,
412  <vscale x 4 x i32>,
413  <vscale x 4 x i16>,
414  <vscale x 4 x i1>,
415  iXLen,
416  iXLen);
417
418define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
419; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16:
420; CHECK:       # %bb.0: # %entry
421; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
422; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
423; CHECK-NEXT:    ret
424entry:
425  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
426    <vscale x 4 x i32> %0,
427    <vscale x 4 x i32> %1,
428    <vscale x 4 x i16> %2,
429    <vscale x 4 x i1> %3,
430    iXLen %4, iXLen 1)
431
432  ret <vscale x 4 x i32> %a
433}
434
435declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
436  <vscale x 8 x i32>,
437  <vscale x 8 x i32>,
438  <vscale x 8 x i16>,
439  iXLen);
440
441define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
442; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16:
443; CHECK:       # %bb.0: # %entry
444; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
445; CHECK-NEXT:    vwadd.wv v8, v8, v12
446; CHECK-NEXT:    ret
447entry:
448  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
449    <vscale x 8 x i32> undef,
450    <vscale x 8 x i32> %0,
451    <vscale x 8 x i16> %1,
452    iXLen %2)
453
454  ret <vscale x 8 x i32> %a
455}
456
457declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
458  <vscale x 8 x i32>,
459  <vscale x 8 x i32>,
460  <vscale x 8 x i16>,
461  <vscale x 8 x i1>,
462  iXLen,
463  iXLen);
464
465define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
466; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16:
467; CHECK:       # %bb.0: # %entry
468; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
469; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
470; CHECK-NEXT:    ret
471entry:
472  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
473    <vscale x 8 x i32> %0,
474    <vscale x 8 x i32> %1,
475    <vscale x 8 x i16> %2,
476    <vscale x 8 x i1> %3,
477    iXLen %4, iXLen 1)
478
479  ret <vscale x 8 x i32> %a
480}
481
482declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
483  <vscale x 16 x i32>,
484  <vscale x 16 x i32>,
485  <vscale x 16 x i16>,
486  iXLen);
487
488define <vscale x 16 x i32> @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
489; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16:
490; CHECK:       # %bb.0: # %entry
491; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
492; CHECK-NEXT:    vwadd.wv v8, v8, v16
493; CHECK-NEXT:    ret
494entry:
495  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
496    <vscale x 16 x i32> undef,
497    <vscale x 16 x i32> %0,
498    <vscale x 16 x i16> %1,
499    iXLen %2)
500
501  ret <vscale x 16 x i32> %a
502}
503
504declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
505  <vscale x 16 x i32>,
506  <vscale x 16 x i32>,
507  <vscale x 16 x i16>,
508  <vscale x 16 x i1>,
509  iXLen,
510  iXLen);
511
512define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
513; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
514; CHECK:       # %bb.0: # %entry
515; CHECK-NEXT:    vl4re16.v v24, (a0)
516; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
517; CHECK-NEXT:    vwadd.wv v8, v16, v24, v0.t
518; CHECK-NEXT:    ret
519entry:
520  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
521    <vscale x 16 x i32> %0,
522    <vscale x 16 x i32> %1,
523    <vscale x 16 x i16> %2,
524    <vscale x 16 x i1> %3,
525    iXLen %4, iXLen 1)
526
527  ret <vscale x 16 x i32> %a
528}
529
530declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
531  <vscale x 1 x i64>,
532  <vscale x 1 x i64>,
533  <vscale x 1 x i32>,
534  iXLen);
535
536define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
537; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32:
538; CHECK:       # %bb.0: # %entry
539; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
540; CHECK-NEXT:    vwadd.wv v8, v8, v9
541; CHECK-NEXT:    ret
542entry:
543  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
544    <vscale x 1 x i64> undef,
545    <vscale x 1 x i64> %0,
546    <vscale x 1 x i32> %1,
547    iXLen %2)
548
549  ret <vscale x 1 x i64> %a
550}
551
552declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
553  <vscale x 1 x i64>,
554  <vscale x 1 x i64>,
555  <vscale x 1 x i32>,
556  <vscale x 1 x i1>,
557  iXLen,
558  iXLen);
559
560define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
561; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32:
562; CHECK:       # %bb.0: # %entry
563; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
564; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
565; CHECK-NEXT:    ret
566entry:
567  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
568    <vscale x 1 x i64> %0,
569    <vscale x 1 x i64> %1,
570    <vscale x 1 x i32> %2,
571    <vscale x 1 x i1> %3,
572    iXLen %4, iXLen 1)
573
574  ret <vscale x 1 x i64> %a
575}
576
577declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
578  <vscale x 2 x i64>,
579  <vscale x 2 x i64>,
580  <vscale x 2 x i32>,
581  iXLen);
582
583define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
584; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32:
585; CHECK:       # %bb.0: # %entry
586; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
587; CHECK-NEXT:    vwadd.wv v8, v8, v10
588; CHECK-NEXT:    ret
589entry:
590  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
591    <vscale x 2 x i64> undef,
592    <vscale x 2 x i64> %0,
593    <vscale x 2 x i32> %1,
594    iXLen %2)
595
596  ret <vscale x 2 x i64> %a
597}
598
599declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
600  <vscale x 2 x i64>,
601  <vscale x 2 x i64>,
602  <vscale x 2 x i32>,
603  <vscale x 2 x i1>,
604  iXLen,
605  iXLen);
606
607define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
608; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32:
609; CHECK:       # %bb.0: # %entry
610; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
611; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
612; CHECK-NEXT:    ret
613entry:
614  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
615    <vscale x 2 x i64> %0,
616    <vscale x 2 x i64> %1,
617    <vscale x 2 x i32> %2,
618    <vscale x 2 x i1> %3,
619    iXLen %4, iXLen 1)
620
621  ret <vscale x 2 x i64> %a
622}
623
624declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
625  <vscale x 4 x i64>,
626  <vscale x 4 x i64>,
627  <vscale x 4 x i32>,
628  iXLen);
629
630define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
631; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32:
632; CHECK:       # %bb.0: # %entry
633; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
634; CHECK-NEXT:    vwadd.wv v8, v8, v12
635; CHECK-NEXT:    ret
636entry:
637  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
638    <vscale x 4 x i64> undef,
639    <vscale x 4 x i64> %0,
640    <vscale x 4 x i32> %1,
641    iXLen %2)
642
643  ret <vscale x 4 x i64> %a
644}
645
646declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
647  <vscale x 4 x i64>,
648  <vscale x 4 x i64>,
649  <vscale x 4 x i32>,
650  <vscale x 4 x i1>,
651  iXLen,
652  iXLen);
653
654define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
655; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32:
656; CHECK:       # %bb.0: # %entry
657; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
658; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
659; CHECK-NEXT:    ret
660entry:
661  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
662    <vscale x 4 x i64> %0,
663    <vscale x 4 x i64> %1,
664    <vscale x 4 x i32> %2,
665    <vscale x 4 x i1> %3,
666    iXLen %4, iXLen 1)
667
668  ret <vscale x 4 x i64> %a
669}
670
671declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
672  <vscale x 8 x i64>,
673  <vscale x 8 x i64>,
674  <vscale x 8 x i32>,
675  iXLen);
676
677define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
678; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32:
679; CHECK:       # %bb.0: # %entry
680; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
681; CHECK-NEXT:    vwadd.wv v8, v8, v16
682; CHECK-NEXT:    ret
683entry:
684  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
685    <vscale x 8 x i64> undef,
686    <vscale x 8 x i64> %0,
687    <vscale x 8 x i32> %1,
688    iXLen %2)
689
690  ret <vscale x 8 x i64> %a
691}
692
693declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
694  <vscale x 8 x i64>,
695  <vscale x 8 x i64>,
696  <vscale x 8 x i32>,
697  <vscale x 8 x i1>,
698  iXLen,
699  iXLen);
700
701define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
702; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
703; CHECK:       # %bb.0: # %entry
704; CHECK-NEXT:    vl4re32.v v24, (a0)
705; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
706; CHECK-NEXT:    vwadd.wv v8, v16, v24, v0.t
707; CHECK-NEXT:    ret
708entry:
709  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
710    <vscale x 8 x i64> %0,
711    <vscale x 8 x i64> %1,
712    <vscale x 8 x i32> %2,
713    <vscale x 8 x i1> %3,
714    iXLen %4, iXLen 1)
715
716  ret <vscale x 8 x i64> %a
717}
718
719declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
720  <vscale x 1 x i16>,
721  <vscale x 1 x i16>,
722  i8,
723  iXLen);
724
725define <vscale x 1 x i16> @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, iXLen %2) nounwind {
726; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8:
727; CHECK:       # %bb.0: # %entry
728; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
729; CHECK-NEXT:    vwadd.wx v8, v8, a0
730; CHECK-NEXT:    ret
731entry:
732  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
733    <vscale x 1 x i16> undef,
734    <vscale x 1 x i16> %0,
735    i8 %1,
736    iXLen %2)
737
738  ret <vscale x 1 x i16> %a
739}
740
741declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
742  <vscale x 1 x i16>,
743  <vscale x 1 x i16>,
744  i8,
745  <vscale x 1 x i1>,
746  iXLen,
747  iXLen);
748
749define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
750; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8:
751; CHECK:       # %bb.0: # %entry
752; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
753; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
754; CHECK-NEXT:    ret
755entry:
756  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
757    <vscale x 1 x i16> %0,
758    <vscale x 1 x i16> %1,
759    i8 %2,
760    <vscale x 1 x i1> %3,
761    iXLen %4, iXLen 1)
762
763  ret <vscale x 1 x i16> %a
764}
765
766declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
767  <vscale x 2 x i16>,
768  <vscale x 2 x i16>,
769  i8,
770  iXLen);
771
772define <vscale x 2 x i16> @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, iXLen %2) nounwind {
773; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8:
774; CHECK:       # %bb.0: # %entry
775; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
776; CHECK-NEXT:    vwadd.wx v8, v8, a0
777; CHECK-NEXT:    ret
778entry:
779  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
780    <vscale x 2 x i16> undef,
781    <vscale x 2 x i16> %0,
782    i8 %1,
783    iXLen %2)
784
785  ret <vscale x 2 x i16> %a
786}
787
788declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
789  <vscale x 2 x i16>,
790  <vscale x 2 x i16>,
791  i8,
792  <vscale x 2 x i1>,
793  iXLen,
794  iXLen);
795
796define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
797; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8:
798; CHECK:       # %bb.0: # %entry
799; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
800; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
801; CHECK-NEXT:    ret
802entry:
803  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
804    <vscale x 2 x i16> %0,
805    <vscale x 2 x i16> %1,
806    i8 %2,
807    <vscale x 2 x i1> %3,
808    iXLen %4, iXLen 1)
809
810  ret <vscale x 2 x i16> %a
811}
812
813declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
814  <vscale x 4 x i16>,
815  <vscale x 4 x i16>,
816  i8,
817  iXLen);
818
819define <vscale x 4 x i16> @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, iXLen %2) nounwind {
820; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8:
821; CHECK:       # %bb.0: # %entry
822; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
823; CHECK-NEXT:    vwadd.wx v8, v8, a0
824; CHECK-NEXT:    ret
825entry:
826  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
827    <vscale x 4 x i16> undef,
828    <vscale x 4 x i16> %0,
829    i8 %1,
830    iXLen %2)
831
832  ret <vscale x 4 x i16> %a
833}
834
835declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
836  <vscale x 4 x i16>,
837  <vscale x 4 x i16>,
838  i8,
839  <vscale x 4 x i1>,
840  iXLen,
841  iXLen);
842
843define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
844; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8:
845; CHECK:       # %bb.0: # %entry
846; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
847; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
848; CHECK-NEXT:    ret
849entry:
850  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
851    <vscale x 4 x i16> %0,
852    <vscale x 4 x i16> %1,
853    i8 %2,
854    <vscale x 4 x i1> %3,
855    iXLen %4, iXLen 1)
856
857  ret <vscale x 4 x i16> %a
858}
859
860declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
861  <vscale x 8 x i16>,
862  <vscale x 8 x i16>,
863  i8,
864  iXLen);
865
866define <vscale x 8 x i16> @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, iXLen %2) nounwind {
867; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8:
868; CHECK:       # %bb.0: # %entry
869; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
870; CHECK-NEXT:    vwadd.wx v8, v8, a0
871; CHECK-NEXT:    ret
872entry:
873  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
874    <vscale x 8 x i16> undef,
875    <vscale x 8 x i16> %0,
876    i8 %1,
877    iXLen %2)
878
879  ret <vscale x 8 x i16> %a
880}
881
882declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
883  <vscale x 8 x i16>,
884  <vscale x 8 x i16>,
885  i8,
886  <vscale x 8 x i1>,
887  iXLen,
888  iXLen);
889
890define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
891; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8:
892; CHECK:       # %bb.0: # %entry
893; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
894; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
895; CHECK-NEXT:    ret
896entry:
897  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
898    <vscale x 8 x i16> %0,
899    <vscale x 8 x i16> %1,
900    i8 %2,
901    <vscale x 8 x i1> %3,
902    iXLen %4, iXLen 1)
903
904  ret <vscale x 8 x i16> %a
905}
906
907declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
908  <vscale x 16 x i16>,
909  <vscale x 16 x i16>,
910  i8,
911  iXLen);
912
913define <vscale x 16 x i16> @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, iXLen %2) nounwind {
914; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8:
915; CHECK:       # %bb.0: # %entry
916; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
917; CHECK-NEXT:    vwadd.wx v8, v8, a0
918; CHECK-NEXT:    ret
919entry:
920  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
921    <vscale x 16 x i16> undef,
922    <vscale x 16 x i16> %0,
923    i8 %1,
924    iXLen %2)
925
926  ret <vscale x 16 x i16> %a
927}
928
929declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
930  <vscale x 16 x i16>,
931  <vscale x 16 x i16>,
932  i8,
933  <vscale x 16 x i1>,
934  iXLen,
935  iXLen);
936
937define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
938; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8:
939; CHECK:       # %bb.0: # %entry
940; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
941; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
942; CHECK-NEXT:    ret
943entry:
944  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
945    <vscale x 16 x i16> %0,
946    <vscale x 16 x i16> %1,
947    i8 %2,
948    <vscale x 16 x i1> %3,
949    iXLen %4, iXLen 1)
950
951  ret <vscale x 16 x i16> %a
952}
953
954declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
955  <vscale x 32 x i16>,
956  <vscale x 32 x i16>,
957  i8,
958  iXLen);
959
960define <vscale x 32 x i16> @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, iXLen %2) nounwind {
961; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8:
962; CHECK:       # %bb.0: # %entry
963; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
964; CHECK-NEXT:    vwadd.wx v8, v8, a0
965; CHECK-NEXT:    ret
966entry:
967  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
968    <vscale x 32 x i16> undef,
969    <vscale x 32 x i16> %0,
970    i8 %1,
971    iXLen %2)
972
973  ret <vscale x 32 x i16> %a
974}
975
976declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
977  <vscale x 32 x i16>,
978  <vscale x 32 x i16>,
979  i8,
980  <vscale x 32 x i1>,
981  iXLen,
982  iXLen);
983
984define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
985; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8:
986; CHECK:       # %bb.0: # %entry
987; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
988; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
989; CHECK-NEXT:    ret
990entry:
991  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
992    <vscale x 32 x i16> %0,
993    <vscale x 32 x i16> %1,
994    i8 %2,
995    <vscale x 32 x i1> %3,
996    iXLen %4, iXLen 1)
997
998  ret <vscale x 32 x i16> %a
999}
1000
1001declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
1002  <vscale x 1 x i32>,
1003  <vscale x 1 x i32>,
1004  i16,
1005  iXLen);
1006
1007define <vscale x 1 x i32> @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, iXLen %2) nounwind {
1008; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16:
1009; CHECK:       # %bb.0: # %entry
1010; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1011; CHECK-NEXT:    vwadd.wx v8, v8, a0
1012; CHECK-NEXT:    ret
1013entry:
1014  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
1015    <vscale x 1 x i32> undef,
1016    <vscale x 1 x i32> %0,
1017    i16 %1,
1018    iXLen %2)
1019
1020  ret <vscale x 1 x i32> %a
1021}
1022
1023declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
1024  <vscale x 1 x i32>,
1025  <vscale x 1 x i32>,
1026  i16,
1027  <vscale x 1 x i1>,
1028  iXLen,
1029  iXLen);
1030
1031define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1032; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16:
1033; CHECK:       # %bb.0: # %entry
1034; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1035; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
1036; CHECK-NEXT:    ret
1037entry:
1038  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
1039    <vscale x 1 x i32> %0,
1040    <vscale x 1 x i32> %1,
1041    i16 %2,
1042    <vscale x 1 x i1> %3,
1043    iXLen %4, iXLen 1)
1044
1045  ret <vscale x 1 x i32> %a
1046}
1047
1048declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
1049  <vscale x 2 x i32>,
1050  <vscale x 2 x i32>,
1051  i16,
1052  iXLen);
1053
1054define <vscale x 2 x i32> @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, iXLen %2) nounwind {
1055; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16:
1056; CHECK:       # %bb.0: # %entry
1057; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1058; CHECK-NEXT:    vwadd.wx v8, v8, a0
1059; CHECK-NEXT:    ret
1060entry:
1061  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
1062    <vscale x 2 x i32> undef,
1063    <vscale x 2 x i32> %0,
1064    i16 %1,
1065    iXLen %2)
1066
1067  ret <vscale x 2 x i32> %a
1068}
1069
1070declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
1071  <vscale x 2 x i32>,
1072  <vscale x 2 x i32>,
1073  i16,
1074  <vscale x 2 x i1>,
1075  iXLen,
1076  iXLen);
1077
1078define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1079; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16:
1080; CHECK:       # %bb.0: # %entry
1081; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1082; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
1083; CHECK-NEXT:    ret
1084entry:
1085  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
1086    <vscale x 2 x i32> %0,
1087    <vscale x 2 x i32> %1,
1088    i16 %2,
1089    <vscale x 2 x i1> %3,
1090    iXLen %4, iXLen 1)
1091
1092  ret <vscale x 2 x i32> %a
1093}
1094
1095declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
1096  <vscale x 4 x i32>,
1097  <vscale x 4 x i32>,
1098  i16,
1099  iXLen);
1100
1101define <vscale x 4 x i32> @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, iXLen %2) nounwind {
1102; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16:
1103; CHECK:       # %bb.0: # %entry
1104; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1105; CHECK-NEXT:    vwadd.wx v8, v8, a0
1106; CHECK-NEXT:    ret
1107entry:
1108  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
1109    <vscale x 4 x i32> undef,
1110    <vscale x 4 x i32> %0,
1111    i16 %1,
1112    iXLen %2)
1113
1114  ret <vscale x 4 x i32> %a
1115}
1116
1117declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
1118  <vscale x 4 x i32>,
1119  <vscale x 4 x i32>,
1120  i16,
1121  <vscale x 4 x i1>,
1122  iXLen,
1123  iXLen);
1124
1125define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1126; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16:
1127; CHECK:       # %bb.0: # %entry
1128; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1129; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
1130; CHECK-NEXT:    ret
1131entry:
1132  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
1133    <vscale x 4 x i32> %0,
1134    <vscale x 4 x i32> %1,
1135    i16 %2,
1136    <vscale x 4 x i1> %3,
1137    iXLen %4, iXLen 1)
1138
1139  ret <vscale x 4 x i32> %a
1140}
1141
1142declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
1143  <vscale x 8 x i32>,
1144  <vscale x 8 x i32>,
1145  i16,
1146  iXLen);
1147
1148define <vscale x 8 x i32> @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, iXLen %2) nounwind {
1149; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16:
1150; CHECK:       # %bb.0: # %entry
1151; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1152; CHECK-NEXT:    vwadd.wx v8, v8, a0
1153; CHECK-NEXT:    ret
1154entry:
1155  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
1156    <vscale x 8 x i32> undef,
1157    <vscale x 8 x i32> %0,
1158    i16 %1,
1159    iXLen %2)
1160
1161  ret <vscale x 8 x i32> %a
1162}
1163
1164declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
1165  <vscale x 8 x i32>,
1166  <vscale x 8 x i32>,
1167  i16,
1168  <vscale x 8 x i1>,
1169  iXLen,
1170  iXLen);
1171
1172define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1173; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16:
1174; CHECK:       # %bb.0: # %entry
1175; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1176; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
1177; CHECK-NEXT:    ret
1178entry:
1179  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
1180    <vscale x 8 x i32> %0,
1181    <vscale x 8 x i32> %1,
1182    i16 %2,
1183    <vscale x 8 x i1> %3,
1184    iXLen %4, iXLen 1)
1185
1186  ret <vscale x 8 x i32> %a
1187}
1188
1189declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
1190  <vscale x 16 x i32>,
1191  <vscale x 16 x i32>,
1192  i16,
1193  iXLen);
1194
1195define <vscale x 16 x i32> @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, iXLen %2) nounwind {
1196; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16:
1197; CHECK:       # %bb.0: # %entry
1198; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1199; CHECK-NEXT:    vwadd.wx v8, v8, a0
1200; CHECK-NEXT:    ret
1201entry:
1202  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
1203    <vscale x 16 x i32> undef,
1204    <vscale x 16 x i32> %0,
1205    i16 %1,
1206    iXLen %2)
1207
1208  ret <vscale x 16 x i32> %a
1209}
1210
1211declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
1212  <vscale x 16 x i32>,
1213  <vscale x 16 x i32>,
1214  i16,
1215  <vscale x 16 x i1>,
1216  iXLen,
1217  iXLen);
1218
1219define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1220; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16:
1221; CHECK:       # %bb.0: # %entry
1222; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1223; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
1224; CHECK-NEXT:    ret
1225entry:
1226  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
1227    <vscale x 16 x i32> %0,
1228    <vscale x 16 x i32> %1,
1229    i16 %2,
1230    <vscale x 16 x i1> %3,
1231    iXLen %4, iXLen 1)
1232
1233  ret <vscale x 16 x i32> %a
1234}
1235
1236declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
1237  <vscale x 1 x i64>,
1238  <vscale x 1 x i64>,
1239  i32,
1240  iXLen);
1241
1242define <vscale x 1 x i64> @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, iXLen %2) nounwind {
1243; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32:
1244; CHECK:       # %bb.0: # %entry
1245; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1246; CHECK-NEXT:    vwadd.wx v8, v8, a0
1247; CHECK-NEXT:    ret
1248entry:
1249  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
1250    <vscale x 1 x i64> undef,
1251    <vscale x 1 x i64> %0,
1252    i32 %1,
1253    iXLen %2)
1254
1255  ret <vscale x 1 x i64> %a
1256}
1257
1258declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
1259  <vscale x 1 x i64>,
1260  <vscale x 1 x i64>,
1261  i32,
1262  <vscale x 1 x i1>,
1263  iXLen,
1264  iXLen);
1265
1266define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1267; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32:
1268; CHECK:       # %bb.0: # %entry
1269; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1270; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
1271; CHECK-NEXT:    ret
1272entry:
1273  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
1274    <vscale x 1 x i64> %0,
1275    <vscale x 1 x i64> %1,
1276    i32 %2,
1277    <vscale x 1 x i1> %3,
1278    iXLen %4, iXLen 1)
1279
1280  ret <vscale x 1 x i64> %a
1281}
1282
1283declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
1284  <vscale x 2 x i64>,
1285  <vscale x 2 x i64>,
1286  i32,
1287  iXLen);
1288
1289define <vscale x 2 x i64> @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, iXLen %2) nounwind {
1290; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32:
1291; CHECK:       # %bb.0: # %entry
1292; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1293; CHECK-NEXT:    vwadd.wx v8, v8, a0
1294; CHECK-NEXT:    ret
1295entry:
1296  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
1297    <vscale x 2 x i64> undef,
1298    <vscale x 2 x i64> %0,
1299    i32 %1,
1300    iXLen %2)
1301
1302  ret <vscale x 2 x i64> %a
1303}
1304
1305declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
1306  <vscale x 2 x i64>,
1307  <vscale x 2 x i64>,
1308  i32,
1309  <vscale x 2 x i1>,
1310  iXLen,
1311  iXLen);
1312
1313define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1314; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32:
1315; CHECK:       # %bb.0: # %entry
1316; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1317; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
1318; CHECK-NEXT:    ret
1319entry:
1320  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
1321    <vscale x 2 x i64> %0,
1322    <vscale x 2 x i64> %1,
1323    i32 %2,
1324    <vscale x 2 x i1> %3,
1325    iXLen %4, iXLen 1)
1326
1327  ret <vscale x 2 x i64> %a
1328}
1329
1330declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
1331  <vscale x 4 x i64>,
1332  <vscale x 4 x i64>,
1333  i32,
1334  iXLen);
1335
1336define <vscale x 4 x i64> @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, iXLen %2) nounwind {
1337; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32:
1338; CHECK:       # %bb.0: # %entry
1339; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1340; CHECK-NEXT:    vwadd.wx v8, v8, a0
1341; CHECK-NEXT:    ret
1342entry:
1343  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
1344    <vscale x 4 x i64> undef,
1345    <vscale x 4 x i64> %0,
1346    i32 %1,
1347    iXLen %2)
1348
1349  ret <vscale x 4 x i64> %a
1350}
1351
1352declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
1353  <vscale x 4 x i64>,
1354  <vscale x 4 x i64>,
1355  i32,
1356  <vscale x 4 x i1>,
1357  iXLen,
1358  iXLen);
1359
1360define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1361; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32:
1362; CHECK:       # %bb.0: # %entry
1363; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1364; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
1365; CHECK-NEXT:    ret
1366entry:
1367  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
1368    <vscale x 4 x i64> %0,
1369    <vscale x 4 x i64> %1,
1370    i32 %2,
1371    <vscale x 4 x i1> %3,
1372    iXLen %4, iXLen 1)
1373
1374  ret <vscale x 4 x i64> %a
1375}
1376
1377declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
1378  <vscale x 8 x i64>,
1379  <vscale x 8 x i64>,
1380  i32,
1381  iXLen);
1382
1383define <vscale x 8 x i64> @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, iXLen %2) nounwind {
1384; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32:
1385; CHECK:       # %bb.0: # %entry
1386; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1387; CHECK-NEXT:    vwadd.wx v8, v8, a0
1388; CHECK-NEXT:    ret
1389entry:
1390  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
1391    <vscale x 8 x i64> undef,
1392    <vscale x 8 x i64> %0,
1393    i32 %1,
1394    iXLen %2)
1395
1396  ret <vscale x 8 x i64> %a
1397}
1398
1399declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
1400  <vscale x 8 x i64>,
1401  <vscale x 8 x i64>,
1402  i32,
1403  <vscale x 8 x i1>,
1404  iXLen,
1405  iXLen);
1406
1407define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1408; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32:
1409; CHECK:       # %bb.0: # %entry
1410; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1411; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
1412; CHECK-NEXT:    ret
1413entry:
1414  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
1415    <vscale x 8 x i64> %0,
1416    <vscale x 8 x i64> %1,
1417    i32 %2,
1418    <vscale x 8 x i1> %3,
1419    iXLen %4, iXLen 1)
1420
1421  ret <vscale x 8 x i64> %a
1422}
1423
1424define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1425; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
1426; CHECK:       # %bb.0: # %entry
1427; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
1428; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
1429; CHECK-NEXT:    ret
1430entry:
1431  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
1432    <vscale x 1 x i16> %0,
1433    <vscale x 1 x i16> %0,
1434    <vscale x 1 x i8> %1,
1435    <vscale x 1 x i1> %2,
1436    iXLen %3, iXLen 1)
1437
1438  ret <vscale x 1 x i16> %a
1439}
1440
1441define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1442; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
1443; CHECK:       # %bb.0: # %entry
1444; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
1445; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
1446; CHECK-NEXT:    ret
1447entry:
1448  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
1449    <vscale x 2 x i16> %0,
1450    <vscale x 2 x i16> %0,
1451    <vscale x 2 x i8> %1,
1452    <vscale x 2 x i1> %2,
1453    iXLen %3, iXLen 1)
1454
1455  ret <vscale x 2 x i16> %a
1456}
1457
1458define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1459; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
1460; CHECK:       # %bb.0: # %entry
1461; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
1462; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
1463; CHECK-NEXT:    ret
1464entry:
1465  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
1466    <vscale x 4 x i16> %0,
1467    <vscale x 4 x i16> %0,
1468    <vscale x 4 x i8> %1,
1469    <vscale x 4 x i1> %2,
1470    iXLen %3, iXLen 1)
1471
1472  ret <vscale x 4 x i16> %a
1473}
1474
1475define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1476; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
1477; CHECK:       # %bb.0: # %entry
1478; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
1479; CHECK-NEXT:    vwadd.wv v8, v8, v10, v0.t
1480; CHECK-NEXT:    ret
1481entry:
1482  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
1483    <vscale x 8 x i16> %0,
1484    <vscale x 8 x i16> %0,
1485    <vscale x 8 x i8> %1,
1486    <vscale x 8 x i1> %2,
1487    iXLen %3, iXLen 1)
1488
1489  ret <vscale x 8 x i16> %a
1490}
1491
1492define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1493; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
1494; CHECK:       # %bb.0: # %entry
1495; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
1496; CHECK-NEXT:    vwadd.wv v8, v8, v12, v0.t
1497; CHECK-NEXT:    ret
1498entry:
1499  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
1500    <vscale x 16 x i16> %0,
1501    <vscale x 16 x i16> %0,
1502    <vscale x 16 x i8> %1,
1503    <vscale x 16 x i1> %2,
1504    iXLen %3, iXLen 1)
1505
1506  ret <vscale x 16 x i16> %a
1507}
1508
1509define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1510; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
1511; CHECK:       # %bb.0: # %entry
1512; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
1513; CHECK-NEXT:    vwadd.wv v8, v8, v16, v0.t
1514; CHECK-NEXT:    ret
1515entry:
1516  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
1517    <vscale x 32 x i16> %0,
1518    <vscale x 32 x i16> %0,
1519    <vscale x 32 x i8> %1,
1520    <vscale x 32 x i1> %2,
1521    iXLen %3, iXLen 1)
1522
1523  ret <vscale x 32 x i16> %a
1524}
1525
1526define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1527; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
1528; CHECK:       # %bb.0: # %entry
1529; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
1530; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
1531; CHECK-NEXT:    ret
1532entry:
1533  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
1534    <vscale x 1 x i32> %0,
1535    <vscale x 1 x i32> %0,
1536    <vscale x 1 x i16> %1,
1537    <vscale x 1 x i1> %2,
1538    iXLen %3, iXLen 1)
1539
1540  ret <vscale x 1 x i32> %a
1541}
1542
1543define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1544; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
1545; CHECK:       # %bb.0: # %entry
1546; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
1547; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
1548; CHECK-NEXT:    ret
1549entry:
1550  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
1551    <vscale x 2 x i32> %0,
1552    <vscale x 2 x i32> %0,
1553    <vscale x 2 x i16> %1,
1554    <vscale x 2 x i1> %2,
1555    iXLen %3, iXLen 1)
1556
1557  ret <vscale x 2 x i32> %a
1558}
1559
1560define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1561; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
1562; CHECK:       # %bb.0: # %entry
1563; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
1564; CHECK-NEXT:    vwadd.wv v8, v8, v10, v0.t
1565; CHECK-NEXT:    ret
1566entry:
1567  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
1568    <vscale x 4 x i32> %0,
1569    <vscale x 4 x i32> %0,
1570    <vscale x 4 x i16> %1,
1571    <vscale x 4 x i1> %2,
1572    iXLen %3, iXLen 1)
1573
1574  ret <vscale x 4 x i32> %a
1575}
1576
1577define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1578; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
1579; CHECK:       # %bb.0: # %entry
1580; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
1581; CHECK-NEXT:    vwadd.wv v8, v8, v12, v0.t
1582; CHECK-NEXT:    ret
1583entry:
1584  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
1585    <vscale x 8 x i32> %0,
1586    <vscale x 8 x i32> %0,
1587    <vscale x 8 x i16> %1,
1588    <vscale x 8 x i1> %2,
1589    iXLen %3, iXLen 1)
1590
1591  ret <vscale x 8 x i32> %a
1592}
1593
1594define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1595; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
1596; CHECK:       # %bb.0: # %entry
1597; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
1598; CHECK-NEXT:    vwadd.wv v8, v8, v16, v0.t
1599; CHECK-NEXT:    ret
1600entry:
1601  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
1602    <vscale x 16 x i32> %0,
1603    <vscale x 16 x i32> %0,
1604    <vscale x 16 x i16> %1,
1605    <vscale x 16 x i1> %2,
1606    iXLen %3, iXLen 1)
1607
1608  ret <vscale x 16 x i32> %a
1609}
1610
1611define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1612; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
1613; CHECK:       # %bb.0: # %entry
1614; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
1615; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
1616; CHECK-NEXT:    ret
1617entry:
1618  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
1619    <vscale x 1 x i64> %0,
1620    <vscale x 1 x i64> %0,
1621    <vscale x 1 x i32> %1,
1622    <vscale x 1 x i1> %2,
1623    iXLen %3, iXLen 1)
1624
1625  ret <vscale x 1 x i64> %a
1626}
1627
1628define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1629; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
1630; CHECK:       # %bb.0: # %entry
1631; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
1632; CHECK-NEXT:    vwadd.wv v8, v8, v10, v0.t
1633; CHECK-NEXT:    ret
1634entry:
1635  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
1636    <vscale x 2 x i64> %0,
1637    <vscale x 2 x i64> %0,
1638    <vscale x 2 x i32> %1,
1639    <vscale x 2 x i1> %2,
1640    iXLen %3, iXLen 1)
1641
1642  ret <vscale x 2 x i64> %a
1643}
1644
1645define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1646; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
1647; CHECK:       # %bb.0: # %entry
1648; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
1649; CHECK-NEXT:    vwadd.wv v8, v8, v12, v0.t
1650; CHECK-NEXT:    ret
1651entry:
1652  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
1653    <vscale x 4 x i64> %0,
1654    <vscale x 4 x i64> %0,
1655    <vscale x 4 x i32> %1,
1656    <vscale x 4 x i1> %2,
1657    iXLen %3, iXLen 1)
1658
1659  ret <vscale x 4 x i64> %a
1660}
1661
1662define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1663; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
1664; CHECK:       # %bb.0: # %entry
1665; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1666; CHECK-NEXT:    vwadd.wv v8, v8, v16, v0.t
1667; CHECK-NEXT:    ret
1668entry:
1669  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
1670    <vscale x 8 x i64> %0,
1671    <vscale x 8 x i64> %0,
1672    <vscale x 8 x i32> %1,
1673    <vscale x 8 x i1> %2,
1674    iXLen %3, iXLen 1)
1675
1676  ret <vscale x 8 x i64> %a
1677}
1678
1679define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1680; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
1681; CHECK:       # %bb.0: # %entry
1682; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
1683; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1684; CHECK-NEXT:    ret
1685entry:
1686  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
1687    <vscale x 1 x i16> %0,
1688    <vscale x 1 x i16> %0,
1689    i8 %1,
1690    <vscale x 1 x i1> %2,
1691    iXLen %3, iXLen 1)
1692
1693  ret <vscale x 1 x i16> %a
1694}
1695
1696define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1697; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
1698; CHECK:       # %bb.0: # %entry
1699; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1700; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1701; CHECK-NEXT:    ret
1702entry:
1703  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
1704    <vscale x 2 x i16> %0,
1705    <vscale x 2 x i16> %0,
1706    i8 %1,
1707    <vscale x 2 x i1> %2,
1708    iXLen %3, iXLen 1)
1709
1710  ret <vscale x 2 x i16> %a
1711}
1712
1713define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1714; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
1715; CHECK:       # %bb.0: # %entry
1716; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1717; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1718; CHECK-NEXT:    ret
1719entry:
1720  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
1721    <vscale x 4 x i16> %0,
1722    <vscale x 4 x i16> %0,
1723    i8 %1,
1724    <vscale x 4 x i1> %2,
1725    iXLen %3, iXLen 1)
1726
1727  ret <vscale x 4 x i16> %a
1728}
1729
1730define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1731; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
1732; CHECK:       # %bb.0: # %entry
1733; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1734; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1735; CHECK-NEXT:    ret
1736entry:
1737  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
1738    <vscale x 8 x i16> %0,
1739    <vscale x 8 x i16> %0,
1740    i8 %1,
1741    <vscale x 8 x i1> %2,
1742    iXLen %3, iXLen 1)
1743
1744  ret <vscale x 8 x i16> %a
1745}
1746
1747define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1748; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
1749; CHECK:       # %bb.0: # %entry
1750; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1751; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1752; CHECK-NEXT:    ret
1753entry:
1754  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
1755    <vscale x 16 x i16> %0,
1756    <vscale x 16 x i16> %0,
1757    i8 %1,
1758    <vscale x 16 x i1> %2,
1759    iXLen %3, iXLen 1)
1760
1761  ret <vscale x 16 x i16> %a
1762}
1763
1764define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1765; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
1766; CHECK:       # %bb.0: # %entry
1767; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1768; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1769; CHECK-NEXT:    ret
1770entry:
1771  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
1772    <vscale x 32 x i16> %0,
1773    <vscale x 32 x i16> %0,
1774    i8 %1,
1775    <vscale x 32 x i1> %2,
1776    iXLen %3, iXLen 1)
1777
1778  ret <vscale x 32 x i16> %a
1779}
1780
1781define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1782; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
1783; CHECK:       # %bb.0: # %entry
1784; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1785; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1786; CHECK-NEXT:    ret
1787entry:
1788  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
1789    <vscale x 1 x i32> %0,
1790    <vscale x 1 x i32> %0,
1791    i16 %1,
1792    <vscale x 1 x i1> %2,
1793    iXLen %3, iXLen 1)
1794
1795  ret <vscale x 1 x i32> %a
1796}
1797
1798define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1799; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
1800; CHECK:       # %bb.0: # %entry
1801; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1802; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1803; CHECK-NEXT:    ret
1804entry:
1805  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
1806    <vscale x 2 x i32> %0,
1807    <vscale x 2 x i32> %0,
1808    i16 %1,
1809    <vscale x 2 x i1> %2,
1810    iXLen %3, iXLen 1)
1811
1812  ret <vscale x 2 x i32> %a
1813}
1814
1815define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1816; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
1817; CHECK:       # %bb.0: # %entry
1818; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1819; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1820; CHECK-NEXT:    ret
1821entry:
1822  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
1823    <vscale x 4 x i32> %0,
1824    <vscale x 4 x i32> %0,
1825    i16 %1,
1826    <vscale x 4 x i1> %2,
1827    iXLen %3, iXLen 1)
1828
1829  ret <vscale x 4 x i32> %a
1830}
1831
1832define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1833; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
1834; CHECK:       # %bb.0: # %entry
1835; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1836; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1837; CHECK-NEXT:    ret
1838entry:
1839  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
1840    <vscale x 8 x i32> %0,
1841    <vscale x 8 x i32> %0,
1842    i16 %1,
1843    <vscale x 8 x i1> %2,
1844    iXLen %3, iXLen 1)
1845
1846  ret <vscale x 8 x i32> %a
1847}
1848
1849define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1850; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
1851; CHECK:       # %bb.0: # %entry
1852; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1853; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1854; CHECK-NEXT:    ret
1855entry:
1856  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
1857    <vscale x 16 x i32> %0,
1858    <vscale x 16 x i32> %0,
1859    i16 %1,
1860    <vscale x 16 x i1> %2,
1861    iXLen %3, iXLen 1)
1862
1863  ret <vscale x 16 x i32> %a
1864}
1865
1866define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1867; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
1868; CHECK:       # %bb.0: # %entry
1869; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1870; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1871; CHECK-NEXT:    ret
1872entry:
1873  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
1874    <vscale x 1 x i64> %0,
1875    <vscale x 1 x i64> %0,
1876    i32 %1,
1877    <vscale x 1 x i1> %2,
1878    iXLen %3, iXLen 1)
1879
1880  ret <vscale x 1 x i64> %a
1881}
1882
1883define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1884; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
1885; CHECK:       # %bb.0: # %entry
1886; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1887; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1888; CHECK-NEXT:    ret
1889entry:
1890  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
1891    <vscale x 2 x i64> %0,
1892    <vscale x 2 x i64> %0,
1893    i32 %1,
1894    <vscale x 2 x i1> %2,
1895    iXLen %3, iXLen 1)
1896
1897  ret <vscale x 2 x i64> %a
1898}
1899
1900define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1901; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
1902; CHECK:       # %bb.0: # %entry
1903; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1904; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1905; CHECK-NEXT:    ret
1906entry:
1907  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
1908    <vscale x 4 x i64> %0,
1909    <vscale x 4 x i64> %0,
1910    i32 %1,
1911    <vscale x 4 x i1> %2,
1912    iXLen %3, iXLen 1)
1913
1914  ret <vscale x 4 x i64> %a
1915}
1916
1917define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1918; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
1919; CHECK:       # %bb.0: # %entry
1920; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1921; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1922; CHECK-NEXT:    ret
1923entry:
1924  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
1925    <vscale x 8 x i64> %0,
1926    <vscale x 8 x i64> %0,
1927    i32 %1,
1928    <vscale x 8 x i1> %2,
1929    iXLen %3, iXLen 1)
1930
1931  ret <vscale x 8 x i64> %a
1932}
1933
1934define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
1935; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8:
1936; CHECK:       # %bb.0: # %entry
1937; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
1938; CHECK-NEXT:    vwadd.wv v10, v9, v8
1939; CHECK-NEXT:    vmv1r.v v8, v10
1940; CHECK-NEXT:    ret
1941entry:
1942  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
1943    <vscale x 1 x i16> undef,
1944    <vscale x 1 x i16> %1,
1945    <vscale x 1 x i8> %0,
1946    iXLen %2)
1947
1948  ret <vscale x 1 x i16> %a
1949}
1950
1951define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
1952; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8:
1953; CHECK:       # %bb.0: # %entry
1954; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
1955; CHECK-NEXT:    vwadd.wv v10, v9, v8
1956; CHECK-NEXT:    vmv1r.v v8, v10
1957; CHECK-NEXT:    ret
1958entry:
1959  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
1960    <vscale x 2 x i16> undef,
1961    <vscale x 2 x i16> %1,
1962    <vscale x 2 x i8> %0,
1963    iXLen %2)
1964
1965  ret <vscale x 2 x i16> %a
1966}
1967
1968define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
1969; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8:
1970; CHECK:       # %bb.0: # %entry
1971; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
1972; CHECK-NEXT:    vwadd.wv v10, v9, v8
1973; CHECK-NEXT:    vmv1r.v v8, v10
1974; CHECK-NEXT:    ret
1975entry:
1976  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
1977    <vscale x 4 x i16> undef,
1978    <vscale x 4 x i16> %1,
1979    <vscale x 4 x i8> %0,
1980    iXLen %2)
1981
1982  ret <vscale x 4 x i16> %a
1983}
1984
1985define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
1986; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8:
1987; CHECK:       # %bb.0: # %entry
1988; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
1989; CHECK-NEXT:    vwadd.wv v12, v10, v8
1990; CHECK-NEXT:    vmv2r.v v8, v12
1991; CHECK-NEXT:    ret
1992entry:
1993  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
1994    <vscale x 8 x i16> undef,
1995    <vscale x 8 x i16> %1,
1996    <vscale x 8 x i8> %0,
1997    iXLen %2)
1998
1999  ret <vscale x 8 x i16> %a
2000}
2001
2002define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
2003; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8:
2004; CHECK:       # %bb.0: # %entry
2005; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
2006; CHECK-NEXT:    vwadd.wv v16, v12, v8
2007; CHECK-NEXT:    vmv4r.v v8, v16
2008; CHECK-NEXT:    ret
2009entry:
2010  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
2011    <vscale x 16 x i16> undef,
2012    <vscale x 16 x i16> %1,
2013    <vscale x 16 x i8> %0,
2014    iXLen %2)
2015
2016  ret <vscale x 16 x i16> %a
2017}
2018
2019define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
2020; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8:
2021; CHECK:       # %bb.0: # %entry
2022; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
2023; CHECK-NEXT:    vwadd.wv v24, v16, v8
2024; CHECK-NEXT:    vmv8r.v v8, v24
2025; CHECK-NEXT:    ret
2026entry:
2027  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
2028    <vscale x 32 x i16> undef,
2029    <vscale x 32 x i16> %1,
2030    <vscale x 32 x i8> %0,
2031    iXLen %2)
2032
2033  ret <vscale x 32 x i16> %a
2034}
2035
2036define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
2037; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16:
2038; CHECK:       # %bb.0: # %entry
2039; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
2040; CHECK-NEXT:    vwadd.wv v10, v9, v8
2041; CHECK-NEXT:    vmv1r.v v8, v10
2042; CHECK-NEXT:    ret
2043entry:
2044  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
2045    <vscale x 1 x i32> undef,
2046    <vscale x 1 x i32> %1,
2047    <vscale x 1 x i16> %0,
2048    iXLen %2)
2049
2050  ret <vscale x 1 x i32> %a
2051}
2052
2053define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
2054; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16:
2055; CHECK:       # %bb.0: # %entry
2056; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
2057; CHECK-NEXT:    vwadd.wv v10, v9, v8
2058; CHECK-NEXT:    vmv1r.v v8, v10
2059; CHECK-NEXT:    ret
2060entry:
2061  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
2062    <vscale x 2 x i32> undef,
2063    <vscale x 2 x i32> %1,
2064    <vscale x 2 x i16> %0,
2065    iXLen %2)
2066
2067  ret <vscale x 2 x i32> %a
2068}
2069
2070define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
2071; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16:
2072; CHECK:       # %bb.0: # %entry
2073; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
2074; CHECK-NEXT:    vwadd.wv v12, v10, v8
2075; CHECK-NEXT:    vmv2r.v v8, v12
2076; CHECK-NEXT:    ret
2077entry:
2078  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
2079    <vscale x 4 x i32> undef,
2080    <vscale x 4 x i32> %1,
2081    <vscale x 4 x i16> %0,
2082    iXLen %2)
2083
2084  ret <vscale x 4 x i32> %a
2085}
2086
2087define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
2088; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16:
2089; CHECK:       # %bb.0: # %entry
2090; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
2091; CHECK-NEXT:    vwadd.wv v16, v12, v8
2092; CHECK-NEXT:    vmv4r.v v8, v16
2093; CHECK-NEXT:    ret
2094entry:
2095  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
2096    <vscale x 8 x i32> undef,
2097    <vscale x 8 x i32> %1,
2098    <vscale x 8 x i16> %0,
2099    iXLen %2)
2100
2101  ret <vscale x 8 x i32> %a
2102}
2103
2104define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
2105; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32:
2106; CHECK:       # %bb.0: # %entry
2107; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
2108; CHECK-NEXT:    vwadd.wv v10, v9, v8
2109; CHECK-NEXT:    vmv1r.v v8, v10
2110; CHECK-NEXT:    ret
2111entry:
2112  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
2113    <vscale x 1 x i64> undef,
2114    <vscale x 1 x i64> %1,
2115    <vscale x 1 x i32> %0,
2116    iXLen %2)
2117
2118  ret <vscale x 1 x i64> %a
2119}
2120
2121define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
2122; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32:
2123; CHECK:       # %bb.0: # %entry
2124; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
2125; CHECK-NEXT:    vwadd.wv v12, v10, v8
2126; CHECK-NEXT:    vmv2r.v v8, v12
2127; CHECK-NEXT:    ret
2128entry:
2129  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
2130    <vscale x 2 x i64> undef,
2131    <vscale x 2 x i64> %1,
2132    <vscale x 2 x i32> %0,
2133    iXLen %2)
2134
2135  ret <vscale x 2 x i64> %a
2136}
2137
2138define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
2139; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32:
2140; CHECK:       # %bb.0: # %entry
2141; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
2142; CHECK-NEXT:    vwadd.wv v16, v12, v8
2143; CHECK-NEXT:    vmv4r.v v8, v16
2144; CHECK-NEXT:    ret
2145entry:
2146  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
2147    <vscale x 4 x i64> undef,
2148    <vscale x 4 x i64> %1,
2149    <vscale x 4 x i32> %0,
2150    iXLen %2)
2151
2152  ret <vscale x 4 x i64> %a
2153}
2154
2155define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
2156; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32:
2157; CHECK:       # %bb.0: # %entry
2158; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
2159; CHECK-NEXT:    vwadd.wv v24, v16, v8
2160; CHECK-NEXT:    vmv8r.v v8, v24
2161; CHECK-NEXT:    ret
2162entry:
2163  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
2164    <vscale x 8 x i64> undef,
2165    <vscale x 8 x i64> %1,
2166    <vscale x 8 x i32> %0,
2167    iXLen %2)
2168
2169  ret <vscale x 8 x i64> %a
2170}
2171