xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vadd.ll (revision 2967e5f8007d873a3e9d97870d2461d0827a3976)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
8  <vscale x 1 x i8>,
9  <vscale x 1 x i8>,
10  <vscale x 1 x i8>,
11  iXLen);
12
13define <vscale x 1 x i8> @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
17; CHECK-NEXT:    vadd.vv v8, v8, v9
18; CHECK-NEXT:    ret
19entry:
20  %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
21    <vscale x 1 x i8> undef,
22    <vscale x 1 x i8> %0,
23    <vscale x 1 x i8> %1,
24    iXLen %2)
25
26  ret <vscale x 1 x i8> %a
27}
28
29declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
30  <vscale x 1 x i8>,
31  <vscale x 1 x i8>,
32  <vscale x 1 x i8>,
33  <vscale x 1 x i1>,
34  iXLen, iXLen);
35
36define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
38; CHECK:       # %bb.0: # %entry
39; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
40; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
41; CHECK-NEXT:    ret
42entry:
43  %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
44    <vscale x 1 x i8> %0,
45    <vscale x 1 x i8> %1,
46    <vscale x 1 x i8> %2,
47    <vscale x 1 x i1> %3,
48    iXLen %4, iXLen 1)
49
50  ret <vscale x 1 x i8> %a
51}
52
53declare <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
54  <vscale x 2 x i8>,
55  <vscale x 2 x i8>,
56  <vscale x 2 x i8>,
57  iXLen);
58
59define <vscale x 2 x i8> @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
60; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8:
61; CHECK:       # %bb.0: # %entry
62; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
63; CHECK-NEXT:    vadd.vv v8, v8, v9
64; CHECK-NEXT:    ret
65entry:
66  %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
67    <vscale x 2 x i8> undef,
68    <vscale x 2 x i8> %0,
69    <vscale x 2 x i8> %1,
70    iXLen %2)
71
72  ret <vscale x 2 x i8> %a
73}
74
75declare <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
76  <vscale x 2 x i8>,
77  <vscale x 2 x i8>,
78  <vscale x 2 x i8>,
79  <vscale x 2 x i1>,
80  iXLen, iXLen);
81
82define <vscale x 2 x i8> @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
83; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8:
84; CHECK:       # %bb.0: # %entry
85; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
86; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
87; CHECK-NEXT:    ret
88entry:
89  %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
90    <vscale x 2 x i8> %0,
91    <vscale x 2 x i8> %1,
92    <vscale x 2 x i8> %2,
93    <vscale x 2 x i1> %3,
94    iXLen %4, iXLen 1)
95
96  ret <vscale x 2 x i8> %a
97}
98
99declare <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
100  <vscale x 4 x i8>,
101  <vscale x 4 x i8>,
102  <vscale x 4 x i8>,
103  iXLen);
104
105define <vscale x 4 x i8> @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
106; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8:
107; CHECK:       # %bb.0: # %entry
108; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
109; CHECK-NEXT:    vadd.vv v8, v8, v9
110; CHECK-NEXT:    ret
111entry:
112  %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
113    <vscale x 4 x i8> undef,
114    <vscale x 4 x i8> %0,
115    <vscale x 4 x i8> %1,
116    iXLen %2)
117
118  ret <vscale x 4 x i8> %a
119}
120
121declare <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
122  <vscale x 4 x i8>,
123  <vscale x 4 x i8>,
124  <vscale x 4 x i8>,
125  <vscale x 4 x i1>,
126  iXLen, iXLen);
127
128define <vscale x 4 x i8> @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
129; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8:
130; CHECK:       # %bb.0: # %entry
131; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
132; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
133; CHECK-NEXT:    ret
134entry:
135  %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
136    <vscale x 4 x i8> %0,
137    <vscale x 4 x i8> %1,
138    <vscale x 4 x i8> %2,
139    <vscale x 4 x i1> %3,
140    iXLen %4, iXLen 1)
141
142  ret <vscale x 4 x i8> %a
143}
144
145declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
146  <vscale x 8 x i8>,
147  <vscale x 8 x i8>,
148  <vscale x 8 x i8>,
149  iXLen);
150
151define <vscale x 8 x i8> @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
152; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8:
153; CHECK:       # %bb.0: # %entry
154; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
155; CHECK-NEXT:    vadd.vv v8, v8, v9
156; CHECK-NEXT:    ret
157entry:
158  %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
159    <vscale x 8 x i8> undef,
160    <vscale x 8 x i8> %0,
161    <vscale x 8 x i8> %1,
162    iXLen %2)
163
164  ret <vscale x 8 x i8> %a
165}
166
167declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
168  <vscale x 8 x i8>,
169  <vscale x 8 x i8>,
170  <vscale x 8 x i8>,
171  <vscale x 8 x i1>,
172  iXLen, iXLen);
173
174define <vscale x 8 x i8> @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
175; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8:
176; CHECK:       # %bb.0: # %entry
177; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
178; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
179; CHECK-NEXT:    ret
180entry:
181  %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
182    <vscale x 8 x i8> %0,
183    <vscale x 8 x i8> %1,
184    <vscale x 8 x i8> %2,
185    <vscale x 8 x i1> %3,
186    iXLen %4, iXLen 1)
187
188  ret <vscale x 8 x i8> %a
189}
190
191declare <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
192  <vscale x 16 x i8>,
193  <vscale x 16 x i8>,
194  <vscale x 16 x i8>,
195  iXLen);
196
197define <vscale x 16 x i8> @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
198; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8:
199; CHECK:       # %bb.0: # %entry
200; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
201; CHECK-NEXT:    vadd.vv v8, v8, v10
202; CHECK-NEXT:    ret
203entry:
204  %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
205    <vscale x 16 x i8> undef,
206    <vscale x 16 x i8> %0,
207    <vscale x 16 x i8> %1,
208    iXLen %2)
209
210  ret <vscale x 16 x i8> %a
211}
212
213declare <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
214  <vscale x 16 x i8>,
215  <vscale x 16 x i8>,
216  <vscale x 16 x i8>,
217  <vscale x 16 x i1>,
218  iXLen, iXLen);
219
220define <vscale x 16 x i8> @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
221; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8:
222; CHECK:       # %bb.0: # %entry
223; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
224; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
225; CHECK-NEXT:    ret
226entry:
227  %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
228    <vscale x 16 x i8> %0,
229    <vscale x 16 x i8> %1,
230    <vscale x 16 x i8> %2,
231    <vscale x 16 x i1> %3,
232    iXLen %4, iXLen 1)
233
234  ret <vscale x 16 x i8> %a
235}
236
237declare <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
238  <vscale x 32 x i8>,
239  <vscale x 32 x i8>,
240  <vscale x 32 x i8>,
241  iXLen);
242
243define <vscale x 32 x i8> @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
244; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8:
245; CHECK:       # %bb.0: # %entry
246; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
247; CHECK-NEXT:    vadd.vv v8, v8, v12
248; CHECK-NEXT:    ret
249entry:
250  %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
251    <vscale x 32 x i8> undef,
252    <vscale x 32 x i8> %0,
253    <vscale x 32 x i8> %1,
254    iXLen %2)
255
256  ret <vscale x 32 x i8> %a
257}
258
259declare <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
260  <vscale x 32 x i8>,
261  <vscale x 32 x i8>,
262  <vscale x 32 x i8>,
263  <vscale x 32 x i1>,
264  iXLen, iXLen);
265
266define <vscale x 32 x i8> @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
267; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8:
268; CHECK:       # %bb.0: # %entry
269; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
270; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
271; CHECK-NEXT:    ret
272entry:
273  %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
274    <vscale x 32 x i8> %0,
275    <vscale x 32 x i8> %1,
276    <vscale x 32 x i8> %2,
277    <vscale x 32 x i1> %3,
278    iXLen %4, iXLen 1)
279
280  ret <vscale x 32 x i8> %a
281}
282
283declare <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
284  <vscale x 64 x i8>,
285  <vscale x 64 x i8>,
286  <vscale x 64 x i8>,
287  iXLen);
288
289define <vscale x 64 x i8> @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
290; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8:
291; CHECK:       # %bb.0: # %entry
292; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
293; CHECK-NEXT:    vadd.vv v8, v8, v16
294; CHECK-NEXT:    ret
295entry:
296  %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
297    <vscale x 64 x i8> undef,
298    <vscale x 64 x i8> %0,
299    <vscale x 64 x i8> %1,
300    iXLen %2)
301
302  ret <vscale x 64 x i8> %a
303}
304
305declare <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
306  <vscale x 64 x i8>,
307  <vscale x 64 x i8>,
308  <vscale x 64 x i8>,
309  <vscale x 64 x i1>,
310  iXLen, iXLen);
311
312define <vscale x 64 x i8> @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
313; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
314; CHECK:       # %bb.0: # %entry
315; CHECK-NEXT:    vl8r.v v24, (a0)
316; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
317; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
318; CHECK-NEXT:    ret
319entry:
320  %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
321    <vscale x 64 x i8> %0,
322    <vscale x 64 x i8> %1,
323    <vscale x 64 x i8> %2,
324    <vscale x 64 x i1> %3,
325    iXLen %4, iXLen 1)
326
327  ret <vscale x 64 x i8> %a
328}
329
330declare <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
331  <vscale x 1 x i16>,
332  <vscale x 1 x i16>,
333  <vscale x 1 x i16>,
334  iXLen);
335
336define <vscale x 1 x i16> @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
337; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16:
338; CHECK:       # %bb.0: # %entry
339; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
340; CHECK-NEXT:    vadd.vv v8, v8, v9
341; CHECK-NEXT:    ret
342entry:
343  %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
344    <vscale x 1 x i16> undef,
345    <vscale x 1 x i16> %0,
346    <vscale x 1 x i16> %1,
347    iXLen %2)
348
349  ret <vscale x 1 x i16> %a
350}
351
352declare <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
353  <vscale x 1 x i16>,
354  <vscale x 1 x i16>,
355  <vscale x 1 x i16>,
356  <vscale x 1 x i1>,
357  iXLen, iXLen);
358
359define <vscale x 1 x i16> @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
360; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16:
361; CHECK:       # %bb.0: # %entry
362; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
363; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
364; CHECK-NEXT:    ret
365entry:
366  %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
367    <vscale x 1 x i16> %0,
368    <vscale x 1 x i16> %1,
369    <vscale x 1 x i16> %2,
370    <vscale x 1 x i1> %3,
371    iXLen %4, iXLen 1)
372
373  ret <vscale x 1 x i16> %a
374}
375
376declare <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
377  <vscale x 2 x i16>,
378  <vscale x 2 x i16>,
379  <vscale x 2 x i16>,
380  iXLen);
381
382define <vscale x 2 x i16> @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
383; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16:
384; CHECK:       # %bb.0: # %entry
385; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
386; CHECK-NEXT:    vadd.vv v8, v8, v9
387; CHECK-NEXT:    ret
388entry:
389  %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
390    <vscale x 2 x i16> undef,
391    <vscale x 2 x i16> %0,
392    <vscale x 2 x i16> %1,
393    iXLen %2)
394
395  ret <vscale x 2 x i16> %a
396}
397
398declare <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
399  <vscale x 2 x i16>,
400  <vscale x 2 x i16>,
401  <vscale x 2 x i16>,
402  <vscale x 2 x i1>,
403  iXLen, iXLen);
404
405define <vscale x 2 x i16> @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
406; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16:
407; CHECK:       # %bb.0: # %entry
408; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
409; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
410; CHECK-NEXT:    ret
411entry:
412  %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
413    <vscale x 2 x i16> %0,
414    <vscale x 2 x i16> %1,
415    <vscale x 2 x i16> %2,
416    <vscale x 2 x i1> %3,
417    iXLen %4, iXLen 1)
418
419  ret <vscale x 2 x i16> %a
420}
421
422declare <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
423  <vscale x 4 x i16>,
424  <vscale x 4 x i16>,
425  <vscale x 4 x i16>,
426  iXLen);
427
428define <vscale x 4 x i16> @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
429; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16:
430; CHECK:       # %bb.0: # %entry
431; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
432; CHECK-NEXT:    vadd.vv v8, v8, v9
433; CHECK-NEXT:    ret
434entry:
435  %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
436    <vscale x 4 x i16> undef,
437    <vscale x 4 x i16> %0,
438    <vscale x 4 x i16> %1,
439    iXLen %2)
440
441  ret <vscale x 4 x i16> %a
442}
443
444declare <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
445  <vscale x 4 x i16>,
446  <vscale x 4 x i16>,
447  <vscale x 4 x i16>,
448  <vscale x 4 x i1>,
449  iXLen, iXLen);
450
451define <vscale x 4 x i16> @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
452; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16:
453; CHECK:       # %bb.0: # %entry
454; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
455; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
456; CHECK-NEXT:    ret
457entry:
458  %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
459    <vscale x 4 x i16> %0,
460    <vscale x 4 x i16> %1,
461    <vscale x 4 x i16> %2,
462    <vscale x 4 x i1> %3,
463    iXLen %4, iXLen 1)
464
465  ret <vscale x 4 x i16> %a
466}
467
468declare <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
469  <vscale x 8 x i16>,
470  <vscale x 8 x i16>,
471  <vscale x 8 x i16>,
472  iXLen);
473
474define <vscale x 8 x i16> @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
475; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16:
476; CHECK:       # %bb.0: # %entry
477; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
478; CHECK-NEXT:    vadd.vv v8, v8, v10
479; CHECK-NEXT:    ret
480entry:
481  %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
482    <vscale x 8 x i16> undef,
483    <vscale x 8 x i16> %0,
484    <vscale x 8 x i16> %1,
485    iXLen %2)
486
487  ret <vscale x 8 x i16> %a
488}
489
490declare <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
491  <vscale x 8 x i16>,
492  <vscale x 8 x i16>,
493  <vscale x 8 x i16>,
494  <vscale x 8 x i1>,
495  iXLen, iXLen);
496
497define <vscale x 8 x i16> @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
498; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16:
499; CHECK:       # %bb.0: # %entry
500; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
501; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
502; CHECK-NEXT:    ret
503entry:
504  %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
505    <vscale x 8 x i16> %0,
506    <vscale x 8 x i16> %1,
507    <vscale x 8 x i16> %2,
508    <vscale x 8 x i1> %3,
509    iXLen %4, iXLen 1)
510
511  ret <vscale x 8 x i16> %a
512}
513
514declare <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
515  <vscale x 16 x i16>,
516  <vscale x 16 x i16>,
517  <vscale x 16 x i16>,
518  iXLen);
519
520define <vscale x 16 x i16> @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
521; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16:
522; CHECK:       # %bb.0: # %entry
523; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
524; CHECK-NEXT:    vadd.vv v8, v8, v12
525; CHECK-NEXT:    ret
526entry:
527  %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
528    <vscale x 16 x i16> undef,
529    <vscale x 16 x i16> %0,
530    <vscale x 16 x i16> %1,
531    iXLen %2)
532
533  ret <vscale x 16 x i16> %a
534}
535
536declare <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
537  <vscale x 16 x i16>,
538  <vscale x 16 x i16>,
539  <vscale x 16 x i16>,
540  <vscale x 16 x i1>,
541  iXLen, iXLen);
542
543define <vscale x 16 x i16> @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
544; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16:
545; CHECK:       # %bb.0: # %entry
546; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
547; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
548; CHECK-NEXT:    ret
549entry:
550  %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
551    <vscale x 16 x i16> %0,
552    <vscale x 16 x i16> %1,
553    <vscale x 16 x i16> %2,
554    <vscale x 16 x i1> %3,
555    iXLen %4, iXLen 1)
556
557  ret <vscale x 16 x i16> %a
558}
559
560declare <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
561  <vscale x 32 x i16>,
562  <vscale x 32 x i16>,
563  <vscale x 32 x i16>,
564  iXLen);
565
566define <vscale x 32 x i16> @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
567; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16:
568; CHECK:       # %bb.0: # %entry
569; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
570; CHECK-NEXT:    vadd.vv v8, v8, v16
571; CHECK-NEXT:    ret
572entry:
573  %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
574    <vscale x 32 x i16> undef,
575    <vscale x 32 x i16> %0,
576    <vscale x 32 x i16> %1,
577    iXLen %2)
578
579  ret <vscale x 32 x i16> %a
580}
581
582declare <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
583  <vscale x 32 x i16>,
584  <vscale x 32 x i16>,
585  <vscale x 32 x i16>,
586  <vscale x 32 x i1>,
587  iXLen, iXLen);
588
589define <vscale x 32 x i16> @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
590; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
591; CHECK:       # %bb.0: # %entry
592; CHECK-NEXT:    vl8re16.v v24, (a0)
593; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
594; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
595; CHECK-NEXT:    ret
596entry:
597  %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
598    <vscale x 32 x i16> %0,
599    <vscale x 32 x i16> %1,
600    <vscale x 32 x i16> %2,
601    <vscale x 32 x i1> %3,
602    iXLen %4, iXLen 1)
603
604  ret <vscale x 32 x i16> %a
605}
606
607declare <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
608  <vscale x 1 x i32>,
609  <vscale x 1 x i32>,
610  <vscale x 1 x i32>,
611  iXLen);
612
613define <vscale x 1 x i32> @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
614; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32:
615; CHECK:       # %bb.0: # %entry
616; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
617; CHECK-NEXT:    vadd.vv v8, v8, v9
618; CHECK-NEXT:    ret
619entry:
620  %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
621    <vscale x 1 x i32> undef,
622    <vscale x 1 x i32> %0,
623    <vscale x 1 x i32> %1,
624    iXLen %2)
625
626  ret <vscale x 1 x i32> %a
627}
628
629declare <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
630  <vscale x 1 x i32>,
631  <vscale x 1 x i32>,
632  <vscale x 1 x i32>,
633  <vscale x 1 x i1>,
634  iXLen, iXLen);
635
636define <vscale x 1 x i32> @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
637; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32:
638; CHECK:       # %bb.0: # %entry
639; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
640; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
641; CHECK-NEXT:    ret
642entry:
643  %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
644    <vscale x 1 x i32> %0,
645    <vscale x 1 x i32> %1,
646    <vscale x 1 x i32> %2,
647    <vscale x 1 x i1> %3,
648    iXLen %4, iXLen 1)
649
650  ret <vscale x 1 x i32> %a
651}
652
653declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
654  <vscale x 2 x i32>,
655  <vscale x 2 x i32>,
656  <vscale x 2 x i32>,
657  iXLen);
658
659define <vscale x 2 x i32> @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
660; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32:
661; CHECK:       # %bb.0: # %entry
662; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
663; CHECK-NEXT:    vadd.vv v8, v8, v9
664; CHECK-NEXT:    ret
665entry:
666  %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
667    <vscale x 2 x i32> undef,
668    <vscale x 2 x i32> %0,
669    <vscale x 2 x i32> %1,
670    iXLen %2)
671
672  ret <vscale x 2 x i32> %a
673}
674
675declare <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
676  <vscale x 2 x i32>,
677  <vscale x 2 x i32>,
678  <vscale x 2 x i32>,
679  <vscale x 2 x i1>,
680  iXLen, iXLen);
681
682define <vscale x 2 x i32> @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
683; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32:
684; CHECK:       # %bb.0: # %entry
685; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
686; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
687; CHECK-NEXT:    ret
688entry:
689  %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
690    <vscale x 2 x i32> %0,
691    <vscale x 2 x i32> %1,
692    <vscale x 2 x i32> %2,
693    <vscale x 2 x i1> %3,
694    iXLen %4, iXLen 1)
695
696  ret <vscale x 2 x i32> %a
697}
698
699declare <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
700  <vscale x 4 x i32>,
701  <vscale x 4 x i32>,
702  <vscale x 4 x i32>,
703  iXLen);
704
705define <vscale x 4 x i32> @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
706; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32:
707; CHECK:       # %bb.0: # %entry
708; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
709; CHECK-NEXT:    vadd.vv v8, v8, v10
710; CHECK-NEXT:    ret
711entry:
712  %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
713    <vscale x 4 x i32> undef,
714    <vscale x 4 x i32> %0,
715    <vscale x 4 x i32> %1,
716    iXLen %2)
717
718  ret <vscale x 4 x i32> %a
719}
720
721declare <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
722  <vscale x 4 x i32>,
723  <vscale x 4 x i32>,
724  <vscale x 4 x i32>,
725  <vscale x 4 x i1>,
726  iXLen, iXLen);
727
728define <vscale x 4 x i32> @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
729; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32:
730; CHECK:       # %bb.0: # %entry
731; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
732; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
733; CHECK-NEXT:    ret
734entry:
735  %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
736    <vscale x 4 x i32> %0,
737    <vscale x 4 x i32> %1,
738    <vscale x 4 x i32> %2,
739    <vscale x 4 x i1> %3,
740    iXLen %4, iXLen 1)
741
742  ret <vscale x 4 x i32> %a
743}
744
745declare <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
746  <vscale x 8 x i32>,
747  <vscale x 8 x i32>,
748  <vscale x 8 x i32>,
749  iXLen);
750
751define <vscale x 8 x i32> @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
752; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32:
753; CHECK:       # %bb.0: # %entry
754; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
755; CHECK-NEXT:    vadd.vv v8, v8, v12
756; CHECK-NEXT:    ret
757entry:
758  %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
759    <vscale x 8 x i32> undef,
760    <vscale x 8 x i32> %0,
761    <vscale x 8 x i32> %1,
762    iXLen %2)
763
764  ret <vscale x 8 x i32> %a
765}
766
767declare <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
768  <vscale x 8 x i32>,
769  <vscale x 8 x i32>,
770  <vscale x 8 x i32>,
771  <vscale x 8 x i1>,
772  iXLen, iXLen);
773
774define <vscale x 8 x i32> @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
775; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32:
776; CHECK:       # %bb.0: # %entry
777; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
778; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
779; CHECK-NEXT:    ret
780entry:
781  %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
782    <vscale x 8 x i32> %0,
783    <vscale x 8 x i32> %1,
784    <vscale x 8 x i32> %2,
785    <vscale x 8 x i1> %3,
786    iXLen %4, iXLen 1)
787
788  ret <vscale x 8 x i32> %a
789}
790
791declare <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
792  <vscale x 16 x i32>,
793  <vscale x 16 x i32>,
794  <vscale x 16 x i32>,
795  iXLen);
796
797define <vscale x 16 x i32> @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
798; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32:
799; CHECK:       # %bb.0: # %entry
800; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
801; CHECK-NEXT:    vadd.vv v8, v8, v16
802; CHECK-NEXT:    ret
803entry:
804  %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
805    <vscale x 16 x i32> undef,
806    <vscale x 16 x i32> %0,
807    <vscale x 16 x i32> %1,
808    iXLen %2)
809
810  ret <vscale x 16 x i32> %a
811}
812
813declare <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
814  <vscale x 16 x i32>,
815  <vscale x 16 x i32>,
816  <vscale x 16 x i32>,
817  <vscale x 16 x i1>,
818  iXLen, iXLen);
819
820define <vscale x 16 x i32> @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
821; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
822; CHECK:       # %bb.0: # %entry
823; CHECK-NEXT:    vl8re32.v v24, (a0)
824; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
825; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
826; CHECK-NEXT:    ret
827entry:
828  %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
829    <vscale x 16 x i32> %0,
830    <vscale x 16 x i32> %1,
831    <vscale x 16 x i32> %2,
832    <vscale x 16 x i1> %3,
833    iXLen %4, iXLen 1)
834
835  ret <vscale x 16 x i32> %a
836}
837
838declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
839  <vscale x 1 x i64>,
840  <vscale x 1 x i64>,
841  <vscale x 1 x i64>,
842  iXLen);
843
844define <vscale x 1 x i64> @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
845; CHECK-LABEL: intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64:
846; CHECK:       # %bb.0: # %entry
847; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
848; CHECK-NEXT:    vadd.vv v8, v8, v9
849; CHECK-NEXT:    ret
850entry:
851  %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
852    <vscale x 1 x i64> undef,
853    <vscale x 1 x i64> %0,
854    <vscale x 1 x i64> %1,
855    iXLen %2)
856
857  ret <vscale x 1 x i64> %a
858}
859
860declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
861  <vscale x 1 x i64>,
862  <vscale x 1 x i64>,
863  <vscale x 1 x i64>,
864  <vscale x 1 x i1>,
865  iXLen, iXLen);
866
867define <vscale x 1 x i64> @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
868; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
869; CHECK:       # %bb.0: # %entry
870; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
871; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
872; CHECK-NEXT:    ret
873entry:
874  %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
875    <vscale x 1 x i64> %0,
876    <vscale x 1 x i64> %1,
877    <vscale x 1 x i64> %2,
878    <vscale x 1 x i1> %3,
879    iXLen %4, iXLen 1)
880
881  ret <vscale x 1 x i64> %a
882}
883
884declare <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(
885  <vscale x 2 x i64>,
886  <vscale x 2 x i64>,
887  <vscale x 2 x i64>,
888  iXLen);
889
890define <vscale x 2 x i64> @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
891; CHECK-LABEL: intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64:
892; CHECK:       # %bb.0: # %entry
893; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
894; CHECK-NEXT:    vadd.vv v8, v8, v10
895; CHECK-NEXT:    ret
896entry:
897  %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(
898    <vscale x 2 x i64> undef,
899    <vscale x 2 x i64> %0,
900    <vscale x 2 x i64> %1,
901    iXLen %2)
902
903  ret <vscale x 2 x i64> %a
904}
905
906declare <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64(
907  <vscale x 2 x i64>,
908  <vscale x 2 x i64>,
909  <vscale x 2 x i64>,
910  <vscale x 2 x i1>,
911  iXLen, iXLen);
912
913define <vscale x 2 x i64> @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
914; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
915; CHECK:       # %bb.0: # %entry
916; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
917; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
918; CHECK-NEXT:    ret
919entry:
920  %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64(
921    <vscale x 2 x i64> %0,
922    <vscale x 2 x i64> %1,
923    <vscale x 2 x i64> %2,
924    <vscale x 2 x i1> %3,
925    iXLen %4, iXLen 1)
926
927  ret <vscale x 2 x i64> %a
928}
929
930declare <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(
931  <vscale x 4 x i64>,
932  <vscale x 4 x i64>,
933  <vscale x 4 x i64>,
934  iXLen);
935
936define <vscale x 4 x i64> @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
937; CHECK-LABEL: intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64:
938; CHECK:       # %bb.0: # %entry
939; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
940; CHECK-NEXT:    vadd.vv v8, v8, v12
941; CHECK-NEXT:    ret
942entry:
943  %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(
944    <vscale x 4 x i64> undef,
945    <vscale x 4 x i64> %0,
946    <vscale x 4 x i64> %1,
947    iXLen %2)
948
949  ret <vscale x 4 x i64> %a
950}
951
952declare <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64(
953  <vscale x 4 x i64>,
954  <vscale x 4 x i64>,
955  <vscale x 4 x i64>,
956  <vscale x 4 x i1>,
957  iXLen, iXLen);
958
959define <vscale x 4 x i64> @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
960; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
961; CHECK:       # %bb.0: # %entry
962; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
963; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
964; CHECK-NEXT:    ret
965entry:
966  %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64(
967    <vscale x 4 x i64> %0,
968    <vscale x 4 x i64> %1,
969    <vscale x 4 x i64> %2,
970    <vscale x 4 x i1> %3,
971    iXLen %4, iXLen 1)
972
973  ret <vscale x 4 x i64> %a
974}
975
976declare <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64(
977  <vscale x 8 x i64>,
978  <vscale x 8 x i64>,
979  <vscale x 8 x i64>,
980  iXLen);
981
982define <vscale x 8 x i64> @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
983; CHECK-LABEL: intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64:
984; CHECK:       # %bb.0: # %entry
985; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
986; CHECK-NEXT:    vadd.vv v8, v8, v16
987; CHECK-NEXT:    ret
988entry:
989  %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64(
990    <vscale x 8 x i64> undef,
991    <vscale x 8 x i64> %0,
992    <vscale x 8 x i64> %1,
993    iXLen %2)
994
995  ret <vscale x 8 x i64> %a
996}
997
998declare <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64(
999  <vscale x 8 x i64>,
1000  <vscale x 8 x i64>,
1001  <vscale x 8 x i64>,
1002  <vscale x 8 x i1>,
1003  iXLen, iXLen);
1004
1005define <vscale x 8 x i64> @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1006; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1007; CHECK:       # %bb.0: # %entry
1008; CHECK-NEXT:    vl8re64.v v24, (a0)
1009; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
1010; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
1011; CHECK-NEXT:    ret
1012entry:
1013  %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64(
1014    <vscale x 8 x i64> %0,
1015    <vscale x 8 x i64> %1,
1016    <vscale x 8 x i64> %2,
1017    <vscale x 8 x i1> %3,
1018    iXLen %4, iXLen 1)
1019
1020  ret <vscale x 8 x i64> %a
1021}
1022
1023declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
1024  <vscale x 1 x i8>,
1025  <vscale x 1 x i8>,
1026  i8,
1027  iXLen);
1028
1029define <vscale x 1 x i8> @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
1030; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8:
1031; CHECK:       # %bb.0: # %entry
1032; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
1033; CHECK-NEXT:    vadd.vx v8, v8, a0
1034; CHECK-NEXT:    ret
1035entry:
1036  %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
1037    <vscale x 1 x i8> undef,
1038    <vscale x 1 x i8> %0,
1039    i8 %1,
1040    iXLen %2)
1041
1042  ret <vscale x 1 x i8> %a
1043}
1044
1045declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
1046  <vscale x 1 x i8>,
1047  <vscale x 1 x i8>,
1048  i8,
1049  <vscale x 1 x i1>,
1050  iXLen, iXLen);
1051
1052define <vscale x 1 x i8> @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1053; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8:
1054; CHECK:       # %bb.0: # %entry
1055; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
1056; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
1057; CHECK-NEXT:    ret
1058entry:
1059  %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
1060    <vscale x 1 x i8> %0,
1061    <vscale x 1 x i8> %1,
1062    i8 %2,
1063    <vscale x 1 x i1> %3,
1064    iXLen %4, iXLen 1)
1065
1066  ret <vscale x 1 x i8> %a
1067}
1068
1069declare <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
1070  <vscale x 2 x i8>,
1071  <vscale x 2 x i8>,
1072  i8,
1073  iXLen);
1074
1075define <vscale x 2 x i8> @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
1076; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8:
1077; CHECK:       # %bb.0: # %entry
1078; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
1079; CHECK-NEXT:    vadd.vx v8, v8, a0
1080; CHECK-NEXT:    ret
1081entry:
1082  %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
1083    <vscale x 2 x i8> undef,
1084    <vscale x 2 x i8> %0,
1085    i8 %1,
1086    iXLen %2)
1087
1088  ret <vscale x 2 x i8> %a
1089}
1090
1091declare <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
1092  <vscale x 2 x i8>,
1093  <vscale x 2 x i8>,
1094  i8,
1095  <vscale x 2 x i1>,
1096  iXLen, iXLen);
1097
1098define <vscale x 2 x i8> @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1099; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8:
1100; CHECK:       # %bb.0: # %entry
1101; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1102; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
1103; CHECK-NEXT:    ret
1104entry:
1105  %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
1106    <vscale x 2 x i8> %0,
1107    <vscale x 2 x i8> %1,
1108    i8 %2,
1109    <vscale x 2 x i1> %3,
1110    iXLen %4, iXLen 1)
1111
1112  ret <vscale x 2 x i8> %a
1113}
1114
1115declare <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
1116  <vscale x 4 x i8>,
1117  <vscale x 4 x i8>,
1118  i8,
1119  iXLen);
1120
1121define <vscale x 4 x i8> @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
1122; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8:
1123; CHECK:       # %bb.0: # %entry
1124; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
1125; CHECK-NEXT:    vadd.vx v8, v8, a0
1126; CHECK-NEXT:    ret
1127entry:
1128  %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
1129    <vscale x 4 x i8> undef,
1130    <vscale x 4 x i8> %0,
1131    i8 %1,
1132    iXLen %2)
1133
1134  ret <vscale x 4 x i8> %a
1135}
1136
1137declare <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
1138  <vscale x 4 x i8>,
1139  <vscale x 4 x i8>,
1140  i8,
1141  <vscale x 4 x i1>,
1142  iXLen, iXLen);
1143
1144define <vscale x 4 x i8> @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1145; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8:
1146; CHECK:       # %bb.0: # %entry
1147; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1148; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
1149; CHECK-NEXT:    ret
1150entry:
1151  %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
1152    <vscale x 4 x i8> %0,
1153    <vscale x 4 x i8> %1,
1154    i8 %2,
1155    <vscale x 4 x i1> %3,
1156    iXLen %4, iXLen 1)
1157
1158  ret <vscale x 4 x i8> %a
1159}
1160
1161declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
1162  <vscale x 8 x i8>,
1163  <vscale x 8 x i8>,
1164  i8,
1165  iXLen);
1166
1167define <vscale x 8 x i8> @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
1168; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8:
1169; CHECK:       # %bb.0: # %entry
1170; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
1171; CHECK-NEXT:    vadd.vx v8, v8, a0
1172; CHECK-NEXT:    ret
1173entry:
1174  %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
1175    <vscale x 8 x i8> undef,
1176    <vscale x 8 x i8> %0,
1177    i8 %1,
1178    iXLen %2)
1179
1180  ret <vscale x 8 x i8> %a
1181}
1182
1183declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
1184  <vscale x 8 x i8>,
1185  <vscale x 8 x i8>,
1186  i8,
1187  <vscale x 8 x i1>,
1188  iXLen, iXLen);
1189
1190define <vscale x 8 x i8> @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1191; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8:
1192; CHECK:       # %bb.0: # %entry
1193; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1194; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
1195; CHECK-NEXT:    ret
1196entry:
1197  %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
1198    <vscale x 8 x i8> %0,
1199    <vscale x 8 x i8> %1,
1200    i8 %2,
1201    <vscale x 8 x i1> %3,
1202    iXLen %4, iXLen 1)
1203
1204  ret <vscale x 8 x i8> %a
1205}
1206
1207declare <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
1208  <vscale x 16 x i8>,
1209  <vscale x 16 x i8>,
1210  i8,
1211  iXLen);
1212
1213define <vscale x 16 x i8> @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
1214; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8:
1215; CHECK:       # %bb.0: # %entry
1216; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
1217; CHECK-NEXT:    vadd.vx v8, v8, a0
1218; CHECK-NEXT:    ret
1219entry:
1220  %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
1221    <vscale x 16 x i8> undef,
1222    <vscale x 16 x i8> %0,
1223    i8 %1,
1224    iXLen %2)
1225
1226  ret <vscale x 16 x i8> %a
1227}
1228
1229declare <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
1230  <vscale x 16 x i8>,
1231  <vscale x 16 x i8>,
1232  i8,
1233  <vscale x 16 x i1>,
1234  iXLen, iXLen);
1235
1236define <vscale x 16 x i8> @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1237; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8:
1238; CHECK:       # %bb.0: # %entry
1239; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1240; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
1241; CHECK-NEXT:    ret
1242entry:
1243  %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
1244    <vscale x 16 x i8> %0,
1245    <vscale x 16 x i8> %1,
1246    i8 %2,
1247    <vscale x 16 x i1> %3,
1248    iXLen %4, iXLen 1)
1249
1250  ret <vscale x 16 x i8> %a
1251}
1252
1253declare <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
1254  <vscale x 32 x i8>,
1255  <vscale x 32 x i8>,
1256  i8,
1257  iXLen);
1258
1259define <vscale x 32 x i8> @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
1260; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8:
1261; CHECK:       # %bb.0: # %entry
1262; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
1263; CHECK-NEXT:    vadd.vx v8, v8, a0
1264; CHECK-NEXT:    ret
1265entry:
1266  %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
1267    <vscale x 32 x i8> undef,
1268    <vscale x 32 x i8> %0,
1269    i8 %1,
1270    iXLen %2)
1271
1272  ret <vscale x 32 x i8> %a
1273}
1274
1275declare <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
1276  <vscale x 32 x i8>,
1277  <vscale x 32 x i8>,
1278  i8,
1279  <vscale x 32 x i1>,
1280  iXLen, iXLen);
1281
1282define <vscale x 32 x i8> @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1283; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8:
1284; CHECK:       # %bb.0: # %entry
1285; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1286; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
1287; CHECK-NEXT:    ret
1288entry:
1289  %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
1290    <vscale x 32 x i8> %0,
1291    <vscale x 32 x i8> %1,
1292    i8 %2,
1293    <vscale x 32 x i1> %3,
1294    iXLen %4, iXLen 1)
1295
1296  ret <vscale x 32 x i8> %a
1297}
1298
1299declare <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
1300  <vscale x 64 x i8>,
1301  <vscale x 64 x i8>,
1302  i8,
1303  iXLen);
1304
1305define <vscale x 64 x i8> @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
1306; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8:
1307; CHECK:       # %bb.0: # %entry
1308; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
1309; CHECK-NEXT:    vadd.vx v8, v8, a0
1310; CHECK-NEXT:    ret
1311entry:
1312  %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
1313    <vscale x 64 x i8> undef,
1314    <vscale x 64 x i8> %0,
1315    i8 %1,
1316    iXLen %2)
1317
1318  ret <vscale x 64 x i8> %a
1319}
1320
1321declare <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
1322  <vscale x 64 x i8>,
1323  <vscale x 64 x i8>,
1324  i8,
1325  <vscale x 64 x i1>,
1326  iXLen, iXLen);
1327
1328define <vscale x 64 x i8> @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
1329; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8:
1330; CHECK:       # %bb.0: # %entry
1331; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
1332; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
1333; CHECK-NEXT:    ret
1334entry:
1335  %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
1336    <vscale x 64 x i8> %0,
1337    <vscale x 64 x i8> %1,
1338    i8 %2,
1339    <vscale x 64 x i1> %3,
1340    iXLen %4, iXLen 1)
1341
1342  ret <vscale x 64 x i8> %a
1343}
1344
1345declare <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
1346  <vscale x 1 x i16>,
1347  <vscale x 1 x i16>,
1348  i16,
1349  iXLen);
1350
1351define <vscale x 1 x i16> @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
1352; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16:
1353; CHECK:       # %bb.0: # %entry
1354; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1355; CHECK-NEXT:    vadd.vx v8, v8, a0
1356; CHECK-NEXT:    ret
1357entry:
1358  %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
1359    <vscale x 1 x i16> undef,
1360    <vscale x 1 x i16> %0,
1361    i16 %1,
1362    iXLen %2)
1363
1364  ret <vscale x 1 x i16> %a
1365}
1366
1367declare <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
1368  <vscale x 1 x i16>,
1369  <vscale x 1 x i16>,
1370  i16,
1371  <vscale x 1 x i1>,
1372  iXLen, iXLen);
1373
1374define <vscale x 1 x i16> @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1375; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16:
1376; CHECK:       # %bb.0: # %entry
1377; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1378; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
1379; CHECK-NEXT:    ret
1380entry:
1381  %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
1382    <vscale x 1 x i16> %0,
1383    <vscale x 1 x i16> %1,
1384    i16 %2,
1385    <vscale x 1 x i1> %3,
1386    iXLen %4, iXLen 1)
1387
1388  ret <vscale x 1 x i16> %a
1389}
1390
1391declare <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
1392  <vscale x 2 x i16>,
1393  <vscale x 2 x i16>,
1394  i16,
1395  iXLen);
1396
1397define <vscale x 2 x i16> @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
1398; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16:
1399; CHECK:       # %bb.0: # %entry
1400; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1401; CHECK-NEXT:    vadd.vx v8, v8, a0
1402; CHECK-NEXT:    ret
1403entry:
1404  %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
1405    <vscale x 2 x i16> undef,
1406    <vscale x 2 x i16> %0,
1407    i16 %1,
1408    iXLen %2)
1409
1410  ret <vscale x 2 x i16> %a
1411}
1412
1413declare <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
1414  <vscale x 2 x i16>,
1415  <vscale x 2 x i16>,
1416  i16,
1417  <vscale x 2 x i1>,
1418  iXLen, iXLen);
1419
1420define <vscale x 2 x i16> @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1421; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16:
1422; CHECK:       # %bb.0: # %entry
1423; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1424; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
1425; CHECK-NEXT:    ret
1426entry:
1427  %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
1428    <vscale x 2 x i16> %0,
1429    <vscale x 2 x i16> %1,
1430    i16 %2,
1431    <vscale x 2 x i1> %3,
1432    iXLen %4, iXLen 1)
1433
1434  ret <vscale x 2 x i16> %a
1435}
1436
1437declare <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
1438  <vscale x 4 x i16>,
1439  <vscale x 4 x i16>,
1440  i16,
1441  iXLen);
1442
1443define <vscale x 4 x i16> @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
1444; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16:
1445; CHECK:       # %bb.0: # %entry
1446; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1447; CHECK-NEXT:    vadd.vx v8, v8, a0
1448; CHECK-NEXT:    ret
1449entry:
1450  %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
1451    <vscale x 4 x i16> undef,
1452    <vscale x 4 x i16> %0,
1453    i16 %1,
1454    iXLen %2)
1455
1456  ret <vscale x 4 x i16> %a
1457}
1458
1459declare <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
1460  <vscale x 4 x i16>,
1461  <vscale x 4 x i16>,
1462  i16,
1463  <vscale x 4 x i1>,
1464  iXLen, iXLen);
1465
1466define <vscale x 4 x i16> @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1467; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16:
1468; CHECK:       # %bb.0: # %entry
1469; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1470; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
1471; CHECK-NEXT:    ret
1472entry:
1473  %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
1474    <vscale x 4 x i16> %0,
1475    <vscale x 4 x i16> %1,
1476    i16 %2,
1477    <vscale x 4 x i1> %3,
1478    iXLen %4, iXLen 1)
1479
1480  ret <vscale x 4 x i16> %a
1481}
1482
1483declare <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
1484  <vscale x 8 x i16>,
1485  <vscale x 8 x i16>,
1486  i16,
1487  iXLen);
1488
1489define <vscale x 8 x i16> @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
1490; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16:
1491; CHECK:       # %bb.0: # %entry
1492; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1493; CHECK-NEXT:    vadd.vx v8, v8, a0
1494; CHECK-NEXT:    ret
1495entry:
1496  %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
1497    <vscale x 8 x i16> undef,
1498    <vscale x 8 x i16> %0,
1499    i16 %1,
1500    iXLen %2)
1501
1502  ret <vscale x 8 x i16> %a
1503}
1504
1505declare <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
1506  <vscale x 8 x i16>,
1507  <vscale x 8 x i16>,
1508  i16,
1509  <vscale x 8 x i1>,
1510  iXLen, iXLen);
1511
1512define <vscale x 8 x i16> @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1513; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16:
1514; CHECK:       # %bb.0: # %entry
1515; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1516; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
1517; CHECK-NEXT:    ret
1518entry:
1519  %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
1520    <vscale x 8 x i16> %0,
1521    <vscale x 8 x i16> %1,
1522    i16 %2,
1523    <vscale x 8 x i1> %3,
1524    iXLen %4, iXLen 1)
1525
1526  ret <vscale x 8 x i16> %a
1527}
1528
1529declare <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
1530  <vscale x 16 x i16>,
1531  <vscale x 16 x i16>,
1532  i16,
1533  iXLen);
1534
1535define <vscale x 16 x i16> @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
1536; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16:
1537; CHECK:       # %bb.0: # %entry
1538; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1539; CHECK-NEXT:    vadd.vx v8, v8, a0
1540; CHECK-NEXT:    ret
1541entry:
1542  %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
1543    <vscale x 16 x i16> undef,
1544    <vscale x 16 x i16> %0,
1545    i16 %1,
1546    iXLen %2)
1547
1548  ret <vscale x 16 x i16> %a
1549}
1550
1551declare <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
1552  <vscale x 16 x i16>,
1553  <vscale x 16 x i16>,
1554  i16,
1555  <vscale x 16 x i1>,
1556  iXLen, iXLen);
1557
1558define <vscale x 16 x i16> @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1559; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16:
1560; CHECK:       # %bb.0: # %entry
1561; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1562; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
1563; CHECK-NEXT:    ret
1564entry:
1565  %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
1566    <vscale x 16 x i16> %0,
1567    <vscale x 16 x i16> %1,
1568    i16 %2,
1569    <vscale x 16 x i1> %3,
1570    iXLen %4, iXLen 1)
1571
1572  ret <vscale x 16 x i16> %a
1573}
1574
1575declare <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
1576  <vscale x 32 x i16>,
1577  <vscale x 32 x i16>,
1578  i16,
1579  iXLen);
1580
1581define <vscale x 32 x i16> @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
1582; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16:
1583; CHECK:       # %bb.0: # %entry
1584; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
1585; CHECK-NEXT:    vadd.vx v8, v8, a0
1586; CHECK-NEXT:    ret
1587entry:
1588  %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
1589    <vscale x 32 x i16> undef,
1590    <vscale x 32 x i16> %0,
1591    i16 %1,
1592    iXLen %2)
1593
1594  ret <vscale x 32 x i16> %a
1595}
1596
1597declare <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
1598  <vscale x 32 x i16>,
1599  <vscale x 32 x i16>,
1600  i16,
1601  <vscale x 32 x i1>,
1602  iXLen, iXLen);
1603
1604define <vscale x 32 x i16> @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1605; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16:
1606; CHECK:       # %bb.0: # %entry
1607; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
1608; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
1609; CHECK-NEXT:    ret
1610entry:
1611  %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
1612    <vscale x 32 x i16> %0,
1613    <vscale x 32 x i16> %1,
1614    i16 %2,
1615    <vscale x 32 x i1> %3,
1616    iXLen %4, iXLen 1)
1617
1618  ret <vscale x 32 x i16> %a
1619}
1620
1621declare <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
1622  <vscale x 1 x i32>,
1623  <vscale x 1 x i32>,
1624  i32,
1625  iXLen);
1626
1627define <vscale x 1 x i32> @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
1628; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32:
1629; CHECK:       # %bb.0: # %entry
1630; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1631; CHECK-NEXT:    vadd.vx v8, v8, a0
1632; CHECK-NEXT:    ret
1633entry:
1634  %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
1635    <vscale x 1 x i32> undef,
1636    <vscale x 1 x i32> %0,
1637    i32 %1,
1638    iXLen %2)
1639
1640  ret <vscale x 1 x i32> %a
1641}
1642
1643declare <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
1644  <vscale x 1 x i32>,
1645  <vscale x 1 x i32>,
1646  i32,
1647  <vscale x 1 x i1>,
1648  iXLen, iXLen);
1649
1650define <vscale x 1 x i32> @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1651; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32:
1652; CHECK:       # %bb.0: # %entry
1653; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1654; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
1655; CHECK-NEXT:    ret
1656entry:
1657  %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
1658    <vscale x 1 x i32> %0,
1659    <vscale x 1 x i32> %1,
1660    i32 %2,
1661    <vscale x 1 x i1> %3,
1662    iXLen %4, iXLen 1)
1663
1664  ret <vscale x 1 x i32> %a
1665}
1666
1667declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
1668  <vscale x 2 x i32>,
1669  <vscale x 2 x i32>,
1670  i32,
1671  iXLen);
1672
1673define <vscale x 2 x i32> @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
1674; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32:
1675; CHECK:       # %bb.0: # %entry
1676; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1677; CHECK-NEXT:    vadd.vx v8, v8, a0
1678; CHECK-NEXT:    ret
1679entry:
1680  %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
1681    <vscale x 2 x i32> undef,
1682    <vscale x 2 x i32> %0,
1683    i32 %1,
1684    iXLen %2)
1685
1686  ret <vscale x 2 x i32> %a
1687}
1688
1689declare <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
1690  <vscale x 2 x i32>,
1691  <vscale x 2 x i32>,
1692  i32,
1693  <vscale x 2 x i1>,
1694  iXLen, iXLen);
1695
1696define <vscale x 2 x i32> @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1697; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32:
1698; CHECK:       # %bb.0: # %entry
1699; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1700; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
1701; CHECK-NEXT:    ret
1702entry:
1703  %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
1704    <vscale x 2 x i32> %0,
1705    <vscale x 2 x i32> %1,
1706    i32 %2,
1707    <vscale x 2 x i1> %3,
1708    iXLen %4, iXLen 1)
1709
1710  ret <vscale x 2 x i32> %a
1711}
1712
1713declare <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
1714  <vscale x 4 x i32>,
1715  <vscale x 4 x i32>,
1716  i32,
1717  iXLen);
1718
1719define <vscale x 4 x i32> @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
1720; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32:
1721; CHECK:       # %bb.0: # %entry
1722; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1723; CHECK-NEXT:    vadd.vx v8, v8, a0
1724; CHECK-NEXT:    ret
1725entry:
1726  %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
1727    <vscale x 4 x i32> undef,
1728    <vscale x 4 x i32> %0,
1729    i32 %1,
1730    iXLen %2)
1731
1732  ret <vscale x 4 x i32> %a
1733}
1734
1735declare <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
1736  <vscale x 4 x i32>,
1737  <vscale x 4 x i32>,
1738  i32,
1739  <vscale x 4 x i1>,
1740  iXLen, iXLen);
1741
1742define <vscale x 4 x i32> @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1743; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32:
1744; CHECK:       # %bb.0: # %entry
1745; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1746; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
1747; CHECK-NEXT:    ret
1748entry:
1749  %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
1750    <vscale x 4 x i32> %0,
1751    <vscale x 4 x i32> %1,
1752    i32 %2,
1753    <vscale x 4 x i1> %3,
1754    iXLen %4, iXLen 1)
1755
1756  ret <vscale x 4 x i32> %a
1757}
1758
1759declare <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
1760  <vscale x 8 x i32>,
1761  <vscale x 8 x i32>,
1762  i32,
1763  iXLen);
1764
1765define <vscale x 8 x i32> @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
1766; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32:
1767; CHECK:       # %bb.0: # %entry
1768; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1769; CHECK-NEXT:    vadd.vx v8, v8, a0
1770; CHECK-NEXT:    ret
1771entry:
1772  %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
1773    <vscale x 8 x i32> undef,
1774    <vscale x 8 x i32> %0,
1775    i32 %1,
1776    iXLen %2)
1777
1778  ret <vscale x 8 x i32> %a
1779}
1780
1781declare <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
1782  <vscale x 8 x i32>,
1783  <vscale x 8 x i32>,
1784  i32,
1785  <vscale x 8 x i1>,
1786  iXLen, iXLen);
1787
1788define <vscale x 8 x i32> @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1789; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32:
1790; CHECK:       # %bb.0: # %entry
1791; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1792; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
1793; CHECK-NEXT:    ret
1794entry:
1795  %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
1796    <vscale x 8 x i32> %0,
1797    <vscale x 8 x i32> %1,
1798    i32 %2,
1799    <vscale x 8 x i1> %3,
1800    iXLen %4, iXLen 1)
1801
1802  ret <vscale x 8 x i32> %a
1803}
1804
1805declare <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
1806  <vscale x 16 x i32>,
1807  <vscale x 16 x i32>,
1808  i32,
1809  iXLen);
1810
1811define <vscale x 16 x i32> @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
1812; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32:
1813; CHECK:       # %bb.0: # %entry
1814; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1815; CHECK-NEXT:    vadd.vx v8, v8, a0
1816; CHECK-NEXT:    ret
1817entry:
1818  %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
1819    <vscale x 16 x i32> undef,
1820    <vscale x 16 x i32> %0,
1821    i32 %1,
1822    iXLen %2)
1823
1824  ret <vscale x 16 x i32> %a
1825}
1826
1827declare <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
1828  <vscale x 16 x i32>,
1829  <vscale x 16 x i32>,
1830  i32,
1831  <vscale x 16 x i1>,
1832  iXLen, iXLen);
1833
1834define <vscale x 16 x i32> @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1835; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32:
1836; CHECK:       # %bb.0: # %entry
1837; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
1838; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
1839; CHECK-NEXT:    ret
1840entry:
1841  %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
1842    <vscale x 16 x i32> %0,
1843    <vscale x 16 x i32> %1,
1844    i32 %2,
1845    <vscale x 16 x i1> %3,
1846    iXLen %4, iXLen 1)
1847
1848  ret <vscale x 16 x i32> %a
1849}
1850
1851declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
1852  <vscale x 1 x i64>,
1853  <vscale x 1 x i64>,
1854  i64,
1855  iXLen);
1856
1857define <vscale x 1 x i64> @intrinsic_vadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
1858; RV32-LABEL: intrinsic_vadd_vx_nxv1i64_nxv1i64_i64:
1859; RV32:       # %bb.0: # %entry
1860; RV32-NEXT:    addi sp, sp, -16
1861; RV32-NEXT:    sw a0, 8(sp)
1862; RV32-NEXT:    sw a1, 12(sp)
1863; RV32-NEXT:    addi a0, sp, 8
1864; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1865; RV32-NEXT:    vlse64.v v9, (a0), zero
1866; RV32-NEXT:    vadd.vv v8, v8, v9
1867; RV32-NEXT:    addi sp, sp, 16
1868; RV32-NEXT:    ret
1869;
1870; RV64-LABEL: intrinsic_vadd_vx_nxv1i64_nxv1i64_i64:
1871; RV64:       # %bb.0: # %entry
1872; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1873; RV64-NEXT:    vadd.vx v8, v8, a0
1874; RV64-NEXT:    ret
1875entry:
1876  %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
1877    <vscale x 1 x i64> undef,
1878    <vscale x 1 x i64> %0,
1879    i64 %1,
1880    iXLen %2)
1881
1882  ret <vscale x 1 x i64> %a
1883}
1884
1885define <vscale x 1 x i64> @intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1, iXLen %2) nounwind {
1886; RV32-LABEL: intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64:
1887; RV32:       # %bb.0: # %entry
1888; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1889; RV32-NEXT:    vadd.vx v8, v8, a0
1890; RV32-NEXT:    ret
1891;
1892; RV64-LABEL: intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64:
1893; RV64:       # %bb.0: # %entry
1894; RV64-NEXT:    sext.w a0, a0
1895; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1896; RV64-NEXT:    vadd.vx v8, v8, a0
1897; RV64-NEXT:    ret
1898entry:
1899  %ext = sext i32 %1 to i64
1900  %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
1901    <vscale x 1 x i64> undef,
1902    <vscale x 1 x i64> %0,
1903    i64 %ext,
1904    iXLen %2)
1905
1906  ret <vscale x 1 x i64> %a
1907}
1908
1909define <vscale x 1 x i64> @intrinsic_vadd_vx_sextload_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2) nounwind {
1910; CHECK-LABEL: intrinsic_vadd_vx_sextload_nxv1i64_nxv1i64_i64:
1911; CHECK:       # %bb.0: # %entry
1912; CHECK-NEXT:    lw a0, 0(a0)
1913; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1914; CHECK-NEXT:    vadd.vx v8, v8, a0
1915; CHECK-NEXT:    ret
1916entry:
1917  %load = load i32, ptr %1
1918  %ext = sext i32 %load to i64
1919  %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
1920    <vscale x 1 x i64> undef,
1921    <vscale x 1 x i64> %0,
1922    i64 %ext,
1923    iXLen %2)
1924
1925  ret <vscale x 1 x i64> %a
1926}
1927
1928declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
1929  <vscale x 1 x i64>,
1930  <vscale x 1 x i64>,
1931  i64,
1932  <vscale x 1 x i1>,
1933  iXLen, iXLen);
1934
1935define <vscale x 1 x i64> @intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1936; RV32-LABEL: intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64:
1937; RV32:       # %bb.0: # %entry
1938; RV32-NEXT:    addi sp, sp, -16
1939; RV32-NEXT:    sw a0, 8(sp)
1940; RV32-NEXT:    sw a1, 12(sp)
1941; RV32-NEXT:    addi a0, sp, 8
1942; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
1943; RV32-NEXT:    vlse64.v v10, (a0), zero
1944; RV32-NEXT:    vadd.vv v8, v9, v10, v0.t
1945; RV32-NEXT:    addi sp, sp, 16
1946; RV32-NEXT:    ret
1947;
1948; RV64-LABEL: intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64:
1949; RV64:       # %bb.0: # %entry
1950; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
1951; RV64-NEXT:    vadd.vx v8, v9, a0, v0.t
1952; RV64-NEXT:    ret
1953entry:
1954  %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
1955    <vscale x 1 x i64> %0,
1956    <vscale x 1 x i64> %1,
1957    i64 %2,
1958    <vscale x 1 x i1> %3,
1959    iXLen %4, iXLen 1)
1960
1961  ret <vscale x 1 x i64> %a
1962}
1963
1964declare <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
1965  <vscale x 2 x i64>,
1966  <vscale x 2 x i64>,
1967  i64,
1968  iXLen);
1969
1970define <vscale x 2 x i64> @intrinsic_vadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
1971; RV32-LABEL: intrinsic_vadd_vx_nxv2i64_nxv2i64_i64:
1972; RV32:       # %bb.0: # %entry
1973; RV32-NEXT:    addi sp, sp, -16
1974; RV32-NEXT:    sw a0, 8(sp)
1975; RV32-NEXT:    sw a1, 12(sp)
1976; RV32-NEXT:    addi a0, sp, 8
1977; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1978; RV32-NEXT:    vlse64.v v10, (a0), zero
1979; RV32-NEXT:    vadd.vv v8, v8, v10
1980; RV32-NEXT:    addi sp, sp, 16
1981; RV32-NEXT:    ret
1982;
1983; RV64-LABEL: intrinsic_vadd_vx_nxv2i64_nxv2i64_i64:
1984; RV64:       # %bb.0: # %entry
1985; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1986; RV64-NEXT:    vadd.vx v8, v8, a0
1987; RV64-NEXT:    ret
1988entry:
1989  %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
1990    <vscale x 2 x i64> undef,
1991    <vscale x 2 x i64> %0,
1992    i64 %1,
1993    iXLen %2)
1994
1995  ret <vscale x 2 x i64> %a
1996}
1997
1998declare <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
1999  <vscale x 2 x i64>,
2000  <vscale x 2 x i64>,
2001  i64,
2002  <vscale x 2 x i1>,
2003  iXLen, iXLen);
2004
2005define <vscale x 2 x i64> @intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2006; RV32-LABEL: intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64:
2007; RV32:       # %bb.0: # %entry
2008; RV32-NEXT:    addi sp, sp, -16
2009; RV32-NEXT:    sw a0, 8(sp)
2010; RV32-NEXT:    sw a1, 12(sp)
2011; RV32-NEXT:    addi a0, sp, 8
2012; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
2013; RV32-NEXT:    vlse64.v v12, (a0), zero
2014; RV32-NEXT:    vadd.vv v8, v10, v12, v0.t
2015; RV32-NEXT:    addi sp, sp, 16
2016; RV32-NEXT:    ret
2017;
2018; RV64-LABEL: intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64:
2019; RV64:       # %bb.0: # %entry
2020; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
2021; RV64-NEXT:    vadd.vx v8, v10, a0, v0.t
2022; RV64-NEXT:    ret
2023entry:
2024  %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
2025    <vscale x 2 x i64> %0,
2026    <vscale x 2 x i64> %1,
2027    i64 %2,
2028    <vscale x 2 x i1> %3,
2029    iXLen %4, iXLen 1)
2030
2031  ret <vscale x 2 x i64> %a
2032}
2033
2034declare <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
2035  <vscale x 4 x i64>,
2036  <vscale x 4 x i64>,
2037  i64,
2038  iXLen);
2039
2040define <vscale x 4 x i64> @intrinsic_vadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
2041; RV32-LABEL: intrinsic_vadd_vx_nxv4i64_nxv4i64_i64:
2042; RV32:       # %bb.0: # %entry
2043; RV32-NEXT:    addi sp, sp, -16
2044; RV32-NEXT:    sw a0, 8(sp)
2045; RV32-NEXT:    sw a1, 12(sp)
2046; RV32-NEXT:    addi a0, sp, 8
2047; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
2048; RV32-NEXT:    vlse64.v v12, (a0), zero
2049; RV32-NEXT:    vadd.vv v8, v8, v12
2050; RV32-NEXT:    addi sp, sp, 16
2051; RV32-NEXT:    ret
2052;
2053; RV64-LABEL: intrinsic_vadd_vx_nxv4i64_nxv4i64_i64:
2054; RV64:       # %bb.0: # %entry
2055; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
2056; RV64-NEXT:    vadd.vx v8, v8, a0
2057; RV64-NEXT:    ret
2058entry:
2059  %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
2060    <vscale x 4 x i64> undef,
2061    <vscale x 4 x i64> %0,
2062    i64 %1,
2063    iXLen %2)
2064
2065  ret <vscale x 4 x i64> %a
2066}
2067
2068declare <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
2069  <vscale x 4 x i64>,
2070  <vscale x 4 x i64>,
2071  i64,
2072  <vscale x 4 x i1>,
2073  iXLen, iXLen);
2074
2075define <vscale x 4 x i64> @intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2076; RV32-LABEL: intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64:
2077; RV32:       # %bb.0: # %entry
2078; RV32-NEXT:    addi sp, sp, -16
2079; RV32-NEXT:    sw a0, 8(sp)
2080; RV32-NEXT:    sw a1, 12(sp)
2081; RV32-NEXT:    addi a0, sp, 8
2082; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
2083; RV32-NEXT:    vlse64.v v16, (a0), zero
2084; RV32-NEXT:    vadd.vv v8, v12, v16, v0.t
2085; RV32-NEXT:    addi sp, sp, 16
2086; RV32-NEXT:    ret
2087;
2088; RV64-LABEL: intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64:
2089; RV64:       # %bb.0: # %entry
2090; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
2091; RV64-NEXT:    vadd.vx v8, v12, a0, v0.t
2092; RV64-NEXT:    ret
2093entry:
2094  %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
2095    <vscale x 4 x i64> %0,
2096    <vscale x 4 x i64> %1,
2097    i64 %2,
2098    <vscale x 4 x i1> %3,
2099    iXLen %4, iXLen 1)
2100
2101  ret <vscale x 4 x i64> %a
2102}
2103
2104declare <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
2105  <vscale x 8 x i64>,
2106  <vscale x 8 x i64>,
2107  i64,
2108  iXLen);
2109
2110define <vscale x 8 x i64> @intrinsic_vadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
2111; RV32-LABEL: intrinsic_vadd_vx_nxv8i64_nxv8i64_i64:
2112; RV32:       # %bb.0: # %entry
2113; RV32-NEXT:    addi sp, sp, -16
2114; RV32-NEXT:    sw a0, 8(sp)
2115; RV32-NEXT:    sw a1, 12(sp)
2116; RV32-NEXT:    addi a0, sp, 8
2117; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
2118; RV32-NEXT:    vlse64.v v16, (a0), zero
2119; RV32-NEXT:    vadd.vv v8, v8, v16
2120; RV32-NEXT:    addi sp, sp, 16
2121; RV32-NEXT:    ret
2122;
2123; RV64-LABEL: intrinsic_vadd_vx_nxv8i64_nxv8i64_i64:
2124; RV64:       # %bb.0: # %entry
2125; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
2126; RV64-NEXT:    vadd.vx v8, v8, a0
2127; RV64-NEXT:    ret
2128entry:
2129  %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
2130    <vscale x 8 x i64> undef,
2131    <vscale x 8 x i64> %0,
2132    i64 %1,
2133    iXLen %2)
2134
2135  ret <vscale x 8 x i64> %a
2136}
2137
2138declare <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
2139  <vscale x 8 x i64>,
2140  <vscale x 8 x i64>,
2141  i64,
2142  <vscale x 8 x i1>,
2143  iXLen, iXLen);
2144
2145define <vscale x 8 x i64> @intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2146; RV32-LABEL: intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64:
2147; RV32:       # %bb.0: # %entry
2148; RV32-NEXT:    addi sp, sp, -16
2149; RV32-NEXT:    sw a0, 8(sp)
2150; RV32-NEXT:    sw a1, 12(sp)
2151; RV32-NEXT:    addi a0, sp, 8
2152; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
2153; RV32-NEXT:    vlse64.v v24, (a0), zero
2154; RV32-NEXT:    vadd.vv v8, v16, v24, v0.t
2155; RV32-NEXT:    addi sp, sp, 16
2156; RV32-NEXT:    ret
2157;
2158; RV64-LABEL: intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64:
2159; RV64:       # %bb.0: # %entry
2160; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
2161; RV64-NEXT:    vadd.vx v8, v16, a0, v0.t
2162; RV64-NEXT:    ret
2163entry:
2164  %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
2165    <vscale x 8 x i64> %0,
2166    <vscale x 8 x i64> %1,
2167    i64 %2,
2168    <vscale x 8 x i1> %3,
2169    iXLen %4, iXLen 1)
2170
2171  ret <vscale x 8 x i64> %a
2172}
2173
2174define <vscale x 1 x i8> @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
2175; CHECK-LABEL: intrinsic_vadd_vi_nxv1i8_nxv1i8_i8:
2176; CHECK:       # %bb.0: # %entry
2177; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
2178; CHECK-NEXT:    vadd.vi v8, v8, 9
2179; CHECK-NEXT:    ret
2180entry:
2181  %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
2182    <vscale x 1 x i8> undef,
2183    <vscale x 1 x i8> %0,
2184    i8 9,
2185    iXLen %1)
2186
2187  ret <vscale x 1 x i8> %a
2188}
2189
2190define <vscale x 1 x i8> @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2191; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8:
2192; CHECK:       # %bb.0: # %entry
2193; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
2194; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
2195; CHECK-NEXT:    ret
2196entry:
2197  %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
2198    <vscale x 1 x i8> %0,
2199    <vscale x 1 x i8> %1,
2200    i8 -9,
2201    <vscale x 1 x i1> %2,
2202    iXLen %3, iXLen 1)
2203
2204  ret <vscale x 1 x i8> %a
2205}
2206
2207define <vscale x 2 x i8> @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
2208; CHECK-LABEL: intrinsic_vadd_vi_nxv2i8_nxv2i8_i8:
2209; CHECK:       # %bb.0: # %entry
2210; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
2211; CHECK-NEXT:    vadd.vi v8, v8, 9
2212; CHECK-NEXT:    ret
2213entry:
2214  %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
2215    <vscale x 2 x i8> undef,
2216    <vscale x 2 x i8> %0,
2217    i8 9,
2218    iXLen %1)
2219
2220  ret <vscale x 2 x i8> %a
2221}
2222
2223define <vscale x 2 x i8> @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2224; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8:
2225; CHECK:       # %bb.0: # %entry
2226; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
2227; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
2228; CHECK-NEXT:    ret
2229entry:
2230  %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
2231    <vscale x 2 x i8> %0,
2232    <vscale x 2 x i8> %1,
2233    i8 -9,
2234    <vscale x 2 x i1> %2,
2235    iXLen %3, iXLen 1)
2236
2237  ret <vscale x 2 x i8> %a
2238}
2239
2240define <vscale x 4 x i8> @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
2241; CHECK-LABEL: intrinsic_vadd_vi_nxv4i8_nxv4i8_i8:
2242; CHECK:       # %bb.0: # %entry
2243; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
2244; CHECK-NEXT:    vadd.vi v8, v8, 9
2245; CHECK-NEXT:    ret
2246entry:
2247  %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
2248    <vscale x 4 x i8> undef,
2249    <vscale x 4 x i8> %0,
2250    i8 9,
2251    iXLen %1)
2252
2253  ret <vscale x 4 x i8> %a
2254}
2255
2256define <vscale x 4 x i8> @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2257; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8:
2258; CHECK:       # %bb.0: # %entry
2259; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
2260; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
2261; CHECK-NEXT:    ret
2262entry:
2263  %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
2264    <vscale x 4 x i8> %0,
2265    <vscale x 4 x i8> %1,
2266    i8 -9,
2267    <vscale x 4 x i1> %2,
2268    iXLen %3, iXLen 1)
2269
2270  ret <vscale x 4 x i8> %a
2271}
2272
2273define <vscale x 8 x i8> @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
2274; CHECK-LABEL: intrinsic_vadd_vi_nxv8i8_nxv8i8_i8:
2275; CHECK:       # %bb.0: # %entry
2276; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
2277; CHECK-NEXT:    vadd.vi v8, v8, 9
2278; CHECK-NEXT:    ret
2279entry:
2280  %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
2281    <vscale x 8 x i8> undef,
2282    <vscale x 8 x i8> %0,
2283    i8 9,
2284    iXLen %1)
2285
2286  ret <vscale x 8 x i8> %a
2287}
2288
2289define <vscale x 8 x i8> @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2290; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8:
2291; CHECK:       # %bb.0: # %entry
2292; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
2293; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
2294; CHECK-NEXT:    ret
2295entry:
2296  %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
2297    <vscale x 8 x i8> %0,
2298    <vscale x 8 x i8> %1,
2299    i8 -9,
2300    <vscale x 8 x i1> %2,
2301    iXLen %3, iXLen 1)
2302
2303  ret <vscale x 8 x i8> %a
2304}
2305
2306define <vscale x 16 x i8> @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
2307; CHECK-LABEL: intrinsic_vadd_vi_nxv16i8_nxv16i8_i8:
2308; CHECK:       # %bb.0: # %entry
2309; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
2310; CHECK-NEXT:    vadd.vi v8, v8, 9
2311; CHECK-NEXT:    ret
2312entry:
2313  %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
2314    <vscale x 16 x i8> undef,
2315    <vscale x 16 x i8> %0,
2316    i8 9,
2317    iXLen %1)
2318
2319  ret <vscale x 16 x i8> %a
2320}
2321
2322define <vscale x 16 x i8> @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2323; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8:
2324; CHECK:       # %bb.0: # %entry
2325; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
2326; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
2327; CHECK-NEXT:    ret
2328entry:
2329  %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
2330    <vscale x 16 x i8> %0,
2331    <vscale x 16 x i8> %1,
2332    i8 -9,
2333    <vscale x 16 x i1> %2,
2334    iXLen %3, iXLen 1)
2335
2336  ret <vscale x 16 x i8> %a
2337}
2338
2339define <vscale x 32 x i8> @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
2340; CHECK-LABEL: intrinsic_vadd_vi_nxv32i8_nxv32i8_i8:
2341; CHECK:       # %bb.0: # %entry
2342; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
2343; CHECK-NEXT:    vadd.vi v8, v8, 9
2344; CHECK-NEXT:    ret
2345entry:
2346  %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
2347    <vscale x 32 x i8> undef,
2348    <vscale x 32 x i8> %0,
2349    i8 9,
2350    iXLen %1)
2351
2352  ret <vscale x 32 x i8> %a
2353}
2354
2355define <vscale x 32 x i8> @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2356; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8:
2357; CHECK:       # %bb.0: # %entry
2358; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
2359; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
2360; CHECK-NEXT:    ret
2361entry:
2362  %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
2363    <vscale x 32 x i8> %0,
2364    <vscale x 32 x i8> %1,
2365    i8 -9,
2366    <vscale x 32 x i1> %2,
2367    iXLen %3, iXLen 1)
2368
2369  ret <vscale x 32 x i8> %a
2370}
2371
2372define <vscale x 64 x i8> @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
2373; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8:
2374; CHECK:       # %bb.0: # %entry
2375; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
2376; CHECK-NEXT:    vadd.vi v8, v8, -9
2377; CHECK-NEXT:    ret
2378entry:
2379  %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
2380    <vscale x 64 x i8> undef,
2381    <vscale x 64 x i8> %0,
2382    i8 -9,
2383    iXLen %1)
2384
2385  ret <vscale x 64 x i8> %a
2386}
2387
2388define <vscale x 64 x i8> @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
2389; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8:
2390; CHECK:       # %bb.0: # %entry
2391; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
2392; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
2393; CHECK-NEXT:    ret
2394entry:
2395  %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
2396    <vscale x 64 x i8> %0,
2397    <vscale x 64 x i8> %1,
2398    i8 -9,
2399    <vscale x 64 x i1> %2,
2400    iXLen %3, iXLen 1)
2401
2402  ret <vscale x 64 x i8> %a
2403}
2404
2405define <vscale x 1 x i16> @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
2406; CHECK-LABEL: intrinsic_vadd_vi_nxv1i16_nxv1i16_i16:
2407; CHECK:       # %bb.0: # %entry
2408; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
2409; CHECK-NEXT:    vadd.vi v8, v8, 9
2410; CHECK-NEXT:    ret
2411entry:
2412  %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
2413    <vscale x 1 x i16> undef,
2414    <vscale x 1 x i16> %0,
2415    i16 9,
2416    iXLen %1)
2417
2418  ret <vscale x 1 x i16> %a
2419}
2420
2421define <vscale x 1 x i16> @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2422; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16:
2423; CHECK:       # %bb.0: # %entry
2424; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
2425; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
2426; CHECK-NEXT:    ret
2427entry:
2428  %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
2429    <vscale x 1 x i16> %0,
2430    <vscale x 1 x i16> %1,
2431    i16 -9,
2432    <vscale x 1 x i1> %2,
2433    iXLen %3, iXLen 1)
2434
2435  ret <vscale x 1 x i16> %a
2436}
2437
2438define <vscale x 2 x i16> @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
2439; CHECK-LABEL: intrinsic_vadd_vi_nxv2i16_nxv2i16_i16:
2440; CHECK:       # %bb.0: # %entry
2441; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
2442; CHECK-NEXT:    vadd.vi v8, v8, 9
2443; CHECK-NEXT:    ret
2444entry:
2445  %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
2446    <vscale x 2 x i16> undef,
2447    <vscale x 2 x i16> %0,
2448    i16 9,
2449    iXLen %1)
2450
2451  ret <vscale x 2 x i16> %a
2452}
2453
2454define <vscale x 2 x i16> @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2455; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16:
2456; CHECK:       # %bb.0: # %entry
2457; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
2458; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
2459; CHECK-NEXT:    ret
2460entry:
2461  %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
2462    <vscale x 2 x i16> %0,
2463    <vscale x 2 x i16> %1,
2464    i16 -9,
2465    <vscale x 2 x i1> %2,
2466    iXLen %3, iXLen 1)
2467
2468  ret <vscale x 2 x i16> %a
2469}
2470
2471define <vscale x 4 x i16> @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
2472; CHECK-LABEL: intrinsic_vadd_vi_nxv4i16_nxv4i16_i16:
2473; CHECK:       # %bb.0: # %entry
2474; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
2475; CHECK-NEXT:    vadd.vi v8, v8, 9
2476; CHECK-NEXT:    ret
2477entry:
2478  %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
2479    <vscale x 4 x i16> undef,
2480    <vscale x 4 x i16> %0,
2481    i16 9,
2482    iXLen %1)
2483
2484  ret <vscale x 4 x i16> %a
2485}
2486
2487define <vscale x 4 x i16> @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2488; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16:
2489; CHECK:       # %bb.0: # %entry
2490; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
2491; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
2492; CHECK-NEXT:    ret
2493entry:
2494  %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
2495    <vscale x 4 x i16> %0,
2496    <vscale x 4 x i16> %1,
2497    i16 -9,
2498    <vscale x 4 x i1> %2,
2499    iXLen %3, iXLen 1)
2500
2501  ret <vscale x 4 x i16> %a
2502}
2503
2504define <vscale x 8 x i16> @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
2505; CHECK-LABEL: intrinsic_vadd_vi_nxv8i16_nxv8i16_i16:
2506; CHECK:       # %bb.0: # %entry
2507; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
2508; CHECK-NEXT:    vadd.vi v8, v8, 9
2509; CHECK-NEXT:    ret
2510entry:
2511  %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
2512    <vscale x 8 x i16> undef,
2513    <vscale x 8 x i16> %0,
2514    i16 9,
2515    iXLen %1)
2516
2517  ret <vscale x 8 x i16> %a
2518}
2519
2520define <vscale x 8 x i16> @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2521; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16:
2522; CHECK:       # %bb.0: # %entry
2523; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
2524; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
2525; CHECK-NEXT:    ret
2526entry:
2527  %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
2528    <vscale x 8 x i16> %0,
2529    <vscale x 8 x i16> %1,
2530    i16 -9,
2531    <vscale x 8 x i1> %2,
2532    iXLen %3, iXLen 1)
2533
2534  ret <vscale x 8 x i16> %a
2535}
2536
2537define <vscale x 16 x i16> @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
2538; CHECK-LABEL: intrinsic_vadd_vi_nxv16i16_nxv16i16_i16:
2539; CHECK:       # %bb.0: # %entry
2540; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
2541; CHECK-NEXT:    vadd.vi v8, v8, 9
2542; CHECK-NEXT:    ret
2543entry:
2544  %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
2545    <vscale x 16 x i16> undef,
2546    <vscale x 16 x i16> %0,
2547    i16 9,
2548    iXLen %1)
2549
2550  ret <vscale x 16 x i16> %a
2551}
2552
2553define <vscale x 16 x i16> @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2554; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16:
2555; CHECK:       # %bb.0: # %entry
2556; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
2557; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
2558; CHECK-NEXT:    ret
2559entry:
2560  %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
2561    <vscale x 16 x i16> %0,
2562    <vscale x 16 x i16> %1,
2563    i16 -9,
2564    <vscale x 16 x i1> %2,
2565    iXLen %3, iXLen 1)
2566
2567  ret <vscale x 16 x i16> %a
2568}
2569
2570define <vscale x 32 x i16> @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
2571; CHECK-LABEL: intrinsic_vadd_vi_nxv32i16_nxv32i16_i16:
2572; CHECK:       # %bb.0: # %entry
2573; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
2574; CHECK-NEXT:    vadd.vi v8, v8, 9
2575; CHECK-NEXT:    ret
2576entry:
2577  %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
2578    <vscale x 32 x i16> undef,
2579    <vscale x 32 x i16> %0,
2580    i16 9,
2581    iXLen %1)
2582
2583  ret <vscale x 32 x i16> %a
2584}
2585
2586define <vscale x 32 x i16> @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2587; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16:
2588; CHECK:       # %bb.0: # %entry
2589; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
2590; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
2591; CHECK-NEXT:    ret
2592entry:
2593  %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
2594    <vscale x 32 x i16> %0,
2595    <vscale x 32 x i16> %1,
2596    i16 -9,
2597    <vscale x 32 x i1> %2,
2598    iXLen %3, iXLen 1)
2599
2600  ret <vscale x 32 x i16> %a
2601}
2602
2603define <vscale x 1 x i32> @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
2604; CHECK-LABEL: intrinsic_vadd_vi_nxv1i32_nxv1i32_i32:
2605; CHECK:       # %bb.0: # %entry
2606; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
2607; CHECK-NEXT:    vadd.vi v8, v8, 9
2608; CHECK-NEXT:    ret
2609entry:
2610  %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
2611    <vscale x 1 x i32> undef,
2612    <vscale x 1 x i32> %0,
2613    i32 9,
2614    iXLen %1)
2615
2616  ret <vscale x 1 x i32> %a
2617}
2618
2619define <vscale x 1 x i32> @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2620; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32:
2621; CHECK:       # %bb.0: # %entry
2622; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
2623; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
2624; CHECK-NEXT:    ret
2625entry:
2626  %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
2627    <vscale x 1 x i32> %0,
2628    <vscale x 1 x i32> %1,
2629    i32 -9,
2630    <vscale x 1 x i1> %2,
2631    iXLen %3, iXLen 1)
2632
2633  ret <vscale x 1 x i32> %a
2634}
2635
2636define <vscale x 2 x i32> @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
2637; CHECK-LABEL: intrinsic_vadd_vi_nxv2i32_nxv2i32_i32:
2638; CHECK:       # %bb.0: # %entry
2639; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
2640; CHECK-NEXT:    vadd.vi v8, v8, 9
2641; CHECK-NEXT:    ret
2642entry:
2643  %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
2644    <vscale x 2 x i32> undef,
2645    <vscale x 2 x i32> %0,
2646    i32 9,
2647    iXLen %1)
2648
2649  ret <vscale x 2 x i32> %a
2650}
2651
2652define <vscale x 2 x i32> @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2653; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32:
2654; CHECK:       # %bb.0: # %entry
2655; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
2656; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
2657; CHECK-NEXT:    ret
2658entry:
2659  %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
2660    <vscale x 2 x i32> %0,
2661    <vscale x 2 x i32> %1,
2662    i32 -9,
2663    <vscale x 2 x i1> %2,
2664    iXLen %3, iXLen 1)
2665
2666  ret <vscale x 2 x i32> %a
2667}
2668
2669define <vscale x 4 x i32> @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
2670; CHECK-LABEL: intrinsic_vadd_vi_nxv4i32_nxv4i32_i32:
2671; CHECK:       # %bb.0: # %entry
2672; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
2673; CHECK-NEXT:    vadd.vi v8, v8, 9
2674; CHECK-NEXT:    ret
2675entry:
2676  %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
2677    <vscale x 4 x i32> undef,
2678    <vscale x 4 x i32> %0,
2679    i32 9,
2680    iXLen %1)
2681
2682  ret <vscale x 4 x i32> %a
2683}
2684
2685define <vscale x 4 x i32> @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2686; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32:
2687; CHECK:       # %bb.0: # %entry
2688; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
2689; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
2690; CHECK-NEXT:    ret
2691entry:
2692  %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
2693    <vscale x 4 x i32> %0,
2694    <vscale x 4 x i32> %1,
2695    i32 -9,
2696    <vscale x 4 x i1> %2,
2697    iXLen %3, iXLen 1)
2698
2699  ret <vscale x 4 x i32> %a
2700}
2701
2702define <vscale x 8 x i32> @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
2703; CHECK-LABEL: intrinsic_vadd_vi_nxv8i32_nxv8i32_i32:
2704; CHECK:       # %bb.0: # %entry
2705; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
2706; CHECK-NEXT:    vadd.vi v8, v8, 9
2707; CHECK-NEXT:    ret
2708entry:
2709  %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
2710    <vscale x 8 x i32> undef,
2711    <vscale x 8 x i32> %0,
2712    i32 9,
2713    iXLen %1)
2714
2715  ret <vscale x 8 x i32> %a
2716}
2717
2718define <vscale x 8 x i32> @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2719; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32:
2720; CHECK:       # %bb.0: # %entry
2721; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
2722; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
2723; CHECK-NEXT:    ret
2724entry:
2725  %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
2726    <vscale x 8 x i32> %0,
2727    <vscale x 8 x i32> %1,
2728    i32 -9,
2729    <vscale x 8 x i1> %2,
2730    iXLen %3, iXLen 1)
2731
2732  ret <vscale x 8 x i32> %a
2733}
2734
2735define <vscale x 16 x i32> @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
2736; CHECK-LABEL: intrinsic_vadd_vi_nxv16i32_nxv16i32_i32:
2737; CHECK:       # %bb.0: # %entry
2738; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
2739; CHECK-NEXT:    vadd.vi v8, v8, 9
2740; CHECK-NEXT:    ret
2741entry:
2742  %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
2743    <vscale x 16 x i32> undef,
2744    <vscale x 16 x i32> %0,
2745    i32 9,
2746    iXLen %1)
2747
2748  ret <vscale x 16 x i32> %a
2749}
2750
2751define <vscale x 16 x i32> @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2752; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32:
2753; CHECK:       # %bb.0: # %entry
2754; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
2755; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
2756; CHECK-NEXT:    ret
2757entry:
2758  %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
2759    <vscale x 16 x i32> %0,
2760    <vscale x 16 x i32> %1,
2761    i32 -9,
2762    <vscale x 16 x i1> %2,
2763    iXLen %3, iXLen 1)
2764
2765  ret <vscale x 16 x i32> %a
2766}
2767
2768define <vscale x 1 x i64> @intrinsic_vadd_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
2769; CHECK-LABEL: intrinsic_vadd_vi_nxv1i64_nxv1i64_i64:
2770; CHECK:       # %bb.0: # %entry
2771; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
2772; CHECK-NEXT:    vadd.vi v8, v8, 9
2773; CHECK-NEXT:    ret
2774entry:
2775  %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
2776    <vscale x 1 x i64> undef,
2777    <vscale x 1 x i64> %0,
2778    i64 9,
2779    iXLen %1)
2780
2781  ret <vscale x 1 x i64> %a
2782}
2783
2784define <vscale x 1 x i64> @intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2785; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64:
2786; CHECK:       # %bb.0: # %entry
2787; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
2788; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
2789; CHECK-NEXT:    ret
2790entry:
2791  %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
2792    <vscale x 1 x i64> %0,
2793    <vscale x 1 x i64> %1,
2794    i64 9,
2795    <vscale x 1 x i1> %2,
2796    iXLen %3, iXLen 1)
2797
2798  ret <vscale x 1 x i64> %a
2799}
2800
2801define <vscale x 2 x i64> @intrinsic_vadd_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
2802; CHECK-LABEL: intrinsic_vadd_vi_nxv2i64_nxv2i64_i64:
2803; CHECK:       # %bb.0: # %entry
2804; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
2805; CHECK-NEXT:    vadd.vi v8, v8, 9
2806; CHECK-NEXT:    ret
2807entry:
2808  %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
2809    <vscale x 2 x i64> undef,
2810    <vscale x 2 x i64> %0,
2811    i64 9,
2812    iXLen %1)
2813
2814  ret <vscale x 2 x i64> %a
2815}
2816
2817define <vscale x 2 x i64> @intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2818; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64:
2819; CHECK:       # %bb.0: # %entry
2820; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
2821; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
2822; CHECK-NEXT:    ret
2823entry:
2824  %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
2825    <vscale x 2 x i64> %0,
2826    <vscale x 2 x i64> %1,
2827    i64 9,
2828    <vscale x 2 x i1> %2,
2829    iXLen %3, iXLen 1)
2830
2831  ret <vscale x 2 x i64> %a
2832}
2833
2834define <vscale x 4 x i64> @intrinsic_vadd_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
2835; CHECK-LABEL: intrinsic_vadd_vi_nxv4i64_nxv4i64_i64:
2836; CHECK:       # %bb.0: # %entry
2837; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
2838; CHECK-NEXT:    vadd.vi v8, v8, 9
2839; CHECK-NEXT:    ret
2840entry:
2841  %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
2842    <vscale x 4 x i64> undef,
2843    <vscale x 4 x i64> %0,
2844    i64 9,
2845    iXLen %1)
2846
2847  ret <vscale x 4 x i64> %a
2848}
2849
2850define <vscale x 4 x i64> @intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2851; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64:
2852; CHECK:       # %bb.0: # %entry
2853; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
2854; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
2855; CHECK-NEXT:    ret
2856entry:
2857  %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
2858    <vscale x 4 x i64> %0,
2859    <vscale x 4 x i64> %1,
2860    i64 9,
2861    <vscale x 4 x i1> %2,
2862    iXLen %3, iXLen 1)
2863
2864  ret <vscale x 4 x i64> %a
2865}
2866
2867define <vscale x 8 x i64> @intrinsic_vadd_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
2868; CHECK-LABEL: intrinsic_vadd_vi_nxv8i64_nxv8i64_i64:
2869; CHECK:       # %bb.0: # %entry
2870; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
2871; CHECK-NEXT:    vadd.vi v8, v8, 9
2872; CHECK-NEXT:    ret
2873entry:
2874  %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
2875    <vscale x 8 x i64> undef,
2876    <vscale x 8 x i64> %0,
2877    i64 9,
2878    iXLen %1)
2879
2880  ret <vscale x 8 x i64> %a
2881}
2882
2883define <vscale x 8 x i64> @intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2884; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64:
2885; CHECK:       # %bb.0: # %entry
2886; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
2887; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
2888; CHECK-NEXT:    ret
2889entry:
2890  %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
2891    <vscale x 8 x i64> %0,
2892    <vscale x 8 x i64> %1,
2893    i64 9,
2894    <vscale x 8 x i1> %2,
2895    iXLen %3, iXLen 1)
2896
2897  ret <vscale x 8 x i64> %a
2898}
2899