xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vredand.ll (revision 99a0cd6f7edcb184a65d2e65842e7d9ece2a5eaf)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s
6
7declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
8  <vscale x 8 x i8>,
9  <vscale x 1 x i8>,
10  <vscale x 8 x i8>,
11  iXLen);
12
13define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
14; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
17; CHECK-NEXT:    vredand.vs v8, v9, v10
18; CHECK-NEXT:    ret
19entry:
20  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
21    <vscale x 8 x i8> %0,
22    <vscale x 1 x i8> %1,
23    <vscale x 8 x i8> %2,
24    iXLen %3)
25
26  ret <vscale x 8 x i8> %a
27}
28
29declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
30  <vscale x 8 x i8>,
31  <vscale x 1 x i8>,
32  <vscale x 8 x i8>,
33  <vscale x 1 x i1>,
34  iXLen);
35
36define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8:
38; CHECK:       # %bb.0: # %entry
39; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
40; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
41; CHECK-NEXT:    ret
42entry:
43  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
44    <vscale x 8 x i8> %0,
45    <vscale x 1 x i8> %1,
46    <vscale x 8 x i8> %2,
47    <vscale x 1 x i1> %3,
48    iXLen %4)
49
50  ret <vscale x 8 x i8> %a
51}
52
53declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
54  <vscale x 8 x i8>,
55  <vscale x 2 x i8>,
56  <vscale x 8 x i8>,
57  iXLen);
58
59define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
60; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8:
61; CHECK:       # %bb.0: # %entry
62; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
63; CHECK-NEXT:    vredand.vs v8, v9, v10
64; CHECK-NEXT:    ret
65entry:
66  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
67    <vscale x 8 x i8> %0,
68    <vscale x 2 x i8> %1,
69    <vscale x 8 x i8> %2,
70    iXLen %3)
71
72  ret <vscale x 8 x i8> %a
73}
74
75declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
76  <vscale x 8 x i8>,
77  <vscale x 2 x i8>,
78  <vscale x 8 x i8>,
79  <vscale x 2 x i1>,
80  iXLen);
81
82define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
83; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8:
84; CHECK:       # %bb.0: # %entry
85; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
86; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
87; CHECK-NEXT:    ret
88entry:
89  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
90    <vscale x 8 x i8> %0,
91    <vscale x 2 x i8> %1,
92    <vscale x 8 x i8> %2,
93    <vscale x 2 x i1> %3,
94    iXLen %4)
95
96  ret <vscale x 8 x i8> %a
97}
98
99declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
100  <vscale x 8 x i8>,
101  <vscale x 4 x i8>,
102  <vscale x 8 x i8>,
103  iXLen);
104
105define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
106; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8:
107; CHECK:       # %bb.0: # %entry
108; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
109; CHECK-NEXT:    vredand.vs v8, v9, v10
110; CHECK-NEXT:    ret
111entry:
112  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
113    <vscale x 8 x i8> %0,
114    <vscale x 4 x i8> %1,
115    <vscale x 8 x i8> %2,
116    iXLen %3)
117
118  ret <vscale x 8 x i8> %a
119}
120
121declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
122  <vscale x 8 x i8>,
123  <vscale x 4 x i8>,
124  <vscale x 8 x i8>,
125  <vscale x 4 x i1>,
126  iXLen);
127
128define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
129; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8:
130; CHECK:       # %bb.0: # %entry
131; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
132; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
133; CHECK-NEXT:    ret
134entry:
135  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
136    <vscale x 8 x i8> %0,
137    <vscale x 4 x i8> %1,
138    <vscale x 8 x i8> %2,
139    <vscale x 4 x i1> %3,
140    iXLen %4)
141
142  ret <vscale x 8 x i8> %a
143}
144
145declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
146  <vscale x 8 x i8>,
147  <vscale x 8 x i8>,
148  <vscale x 8 x i8>,
149  iXLen);
150
151define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
152; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8:
153; CHECK:       # %bb.0: # %entry
154; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
155; CHECK-NEXT:    vredand.vs v8, v9, v10
156; CHECK-NEXT:    ret
157entry:
158  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
159    <vscale x 8 x i8> %0,
160    <vscale x 8 x i8> %1,
161    <vscale x 8 x i8> %2,
162    iXLen %3)
163
164  ret <vscale x 8 x i8> %a
165}
166
167declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
168  <vscale x 8 x i8>,
169  <vscale x 8 x i8>,
170  <vscale x 8 x i8>,
171  <vscale x 8 x i1>,
172  iXLen);
173
174define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
175; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8:
176; CHECK:       # %bb.0: # %entry
177; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
178; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
179; CHECK-NEXT:    ret
180entry:
181  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
182    <vscale x 8 x i8> %0,
183    <vscale x 8 x i8> %1,
184    <vscale x 8 x i8> %2,
185    <vscale x 8 x i1> %3,
186    iXLen %4)
187
188  ret <vscale x 8 x i8> %a
189}
190
191declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
192  <vscale x 8 x i8>,
193  <vscale x 16 x i8>,
194  <vscale x 8 x i8>,
195  iXLen);
196
197define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
198; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8:
199; CHECK:       # %bb.0: # %entry
200; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
201; CHECK-NEXT:    vredand.vs v8, v10, v9
202; CHECK-NEXT:    ret
203entry:
204  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
205    <vscale x 8 x i8> %0,
206    <vscale x 16 x i8> %1,
207    <vscale x 8 x i8> %2,
208    iXLen %3)
209
210  ret <vscale x 8 x i8> %a
211}
212
213declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
214  <vscale x 8 x i8>,
215  <vscale x 16 x i8>,
216  <vscale x 8 x i8>,
217  <vscale x 16 x i1>,
218  iXLen);
219
220define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
221; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8:
222; CHECK:       # %bb.0: # %entry
223; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
224; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
225; CHECK-NEXT:    ret
226entry:
227  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
228    <vscale x 8 x i8> %0,
229    <vscale x 16 x i8> %1,
230    <vscale x 8 x i8> %2,
231    <vscale x 16 x i1> %3,
232    iXLen %4)
233
234  ret <vscale x 8 x i8> %a
235}
236
237declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
238  <vscale x 8 x i8>,
239  <vscale x 32 x i8>,
240  <vscale x 8 x i8>,
241  iXLen);
242
243define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
244; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8:
245; CHECK:       # %bb.0: # %entry
246; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
247; CHECK-NEXT:    vredand.vs v8, v12, v9
248; CHECK-NEXT:    ret
249entry:
250  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
251    <vscale x 8 x i8> %0,
252    <vscale x 32 x i8> %1,
253    <vscale x 8 x i8> %2,
254    iXLen %3)
255
256  ret <vscale x 8 x i8> %a
257}
258
259declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
260  <vscale x 8 x i8>,
261  <vscale x 32 x i8>,
262  <vscale x 8 x i8>,
263  <vscale x 32 x i1>,
264  iXLen);
265
266define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
267; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8:
268; CHECK:       # %bb.0: # %entry
269; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
270; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
271; CHECK-NEXT:    ret
272entry:
273  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
274    <vscale x 8 x i8> %0,
275    <vscale x 32 x i8> %1,
276    <vscale x 8 x i8> %2,
277    <vscale x 32 x i1> %3,
278    iXLen %4)
279
280  ret <vscale x 8 x i8> %a
281}
282
283declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
284  <vscale x 4 x i16>,
285  <vscale x 1 x i16>,
286  <vscale x 4 x i16>,
287  iXLen);
288
289define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
290; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16:
291; CHECK:       # %bb.0: # %entry
292; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
293; CHECK-NEXT:    vredand.vs v8, v9, v10
294; CHECK-NEXT:    ret
295entry:
296  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
297    <vscale x 4 x i16> %0,
298    <vscale x 1 x i16> %1,
299    <vscale x 4 x i16> %2,
300    iXLen %3)
301
302  ret <vscale x 4 x i16> %a
303}
304
305declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
306  <vscale x 4 x i16>,
307  <vscale x 1 x i16>,
308  <vscale x 4 x i16>,
309  <vscale x 1 x i1>,
310  iXLen);
311
312define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
313; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16:
314; CHECK:       # %bb.0: # %entry
315; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
316; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
317; CHECK-NEXT:    ret
318entry:
319  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
320    <vscale x 4 x i16> %0,
321    <vscale x 1 x i16> %1,
322    <vscale x 4 x i16> %2,
323    <vscale x 1 x i1> %3,
324    iXLen %4)
325
326  ret <vscale x 4 x i16> %a
327}
328
329declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
330  <vscale x 4 x i16>,
331  <vscale x 2 x i16>,
332  <vscale x 4 x i16>,
333  iXLen);
334
335define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
336; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16:
337; CHECK:       # %bb.0: # %entry
338; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
339; CHECK-NEXT:    vredand.vs v8, v9, v10
340; CHECK-NEXT:    ret
341entry:
342  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
343    <vscale x 4 x i16> %0,
344    <vscale x 2 x i16> %1,
345    <vscale x 4 x i16> %2,
346    iXLen %3)
347
348  ret <vscale x 4 x i16> %a
349}
350
351declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
352  <vscale x 4 x i16>,
353  <vscale x 2 x i16>,
354  <vscale x 4 x i16>,
355  <vscale x 2 x i1>,
356  iXLen);
357
358define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
359; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16:
360; CHECK:       # %bb.0: # %entry
361; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
362; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
363; CHECK-NEXT:    ret
364entry:
365  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
366    <vscale x 4 x i16> %0,
367    <vscale x 2 x i16> %1,
368    <vscale x 4 x i16> %2,
369    <vscale x 2 x i1> %3,
370    iXLen %4)
371
372  ret <vscale x 4 x i16> %a
373}
374
375declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
376  <vscale x 4 x i16>,
377  <vscale x 4 x i16>,
378  <vscale x 4 x i16>,
379  iXLen);
380
381define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
382; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16:
383; CHECK:       # %bb.0: # %entry
384; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
385; CHECK-NEXT:    vredand.vs v8, v9, v10
386; CHECK-NEXT:    ret
387entry:
388  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
389    <vscale x 4 x i16> %0,
390    <vscale x 4 x i16> %1,
391    <vscale x 4 x i16> %2,
392    iXLen %3)
393
394  ret <vscale x 4 x i16> %a
395}
396
397declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
398  <vscale x 4 x i16>,
399  <vscale x 4 x i16>,
400  <vscale x 4 x i16>,
401  <vscale x 4 x i1>,
402  iXLen);
403
404define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
405; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16:
406; CHECK:       # %bb.0: # %entry
407; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
408; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
409; CHECK-NEXT:    ret
410entry:
411  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
412    <vscale x 4 x i16> %0,
413    <vscale x 4 x i16> %1,
414    <vscale x 4 x i16> %2,
415    <vscale x 4 x i1> %3,
416    iXLen %4)
417
418  ret <vscale x 4 x i16> %a
419}
420
421declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
422  <vscale x 4 x i16>,
423  <vscale x 8 x i16>,
424  <vscale x 4 x i16>,
425  iXLen);
426
427define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
428; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16:
429; CHECK:       # %bb.0: # %entry
430; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
431; CHECK-NEXT:    vredand.vs v8, v10, v9
432; CHECK-NEXT:    ret
433entry:
434  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
435    <vscale x 4 x i16> %0,
436    <vscale x 8 x i16> %1,
437    <vscale x 4 x i16> %2,
438    iXLen %3)
439
440  ret <vscale x 4 x i16> %a
441}
442
443declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
444  <vscale x 4 x i16>,
445  <vscale x 8 x i16>,
446  <vscale x 4 x i16>,
447  <vscale x 8 x i1>,
448  iXLen);
449
450define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
451; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16:
452; CHECK:       # %bb.0: # %entry
453; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
454; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
455; CHECK-NEXT:    ret
456entry:
457  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
458    <vscale x 4 x i16> %0,
459    <vscale x 8 x i16> %1,
460    <vscale x 4 x i16> %2,
461    <vscale x 8 x i1> %3,
462    iXLen %4)
463
464  ret <vscale x 4 x i16> %a
465}
466
467declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
468  <vscale x 4 x i16>,
469  <vscale x 16 x i16>,
470  <vscale x 4 x i16>,
471  iXLen);
472
473define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
474; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16:
475; CHECK:       # %bb.0: # %entry
476; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
477; CHECK-NEXT:    vredand.vs v8, v12, v9
478; CHECK-NEXT:    ret
479entry:
480  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
481    <vscale x 4 x i16> %0,
482    <vscale x 16 x i16> %1,
483    <vscale x 4 x i16> %2,
484    iXLen %3)
485
486  ret <vscale x 4 x i16> %a
487}
488
489declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
490  <vscale x 4 x i16>,
491  <vscale x 16 x i16>,
492  <vscale x 4 x i16>,
493  <vscale x 16 x i1>,
494  iXLen);
495
496define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
497; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16:
498; CHECK:       # %bb.0: # %entry
499; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
500; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
501; CHECK-NEXT:    ret
502entry:
503  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
504    <vscale x 4 x i16> %0,
505    <vscale x 16 x i16> %1,
506    <vscale x 4 x i16> %2,
507    <vscale x 16 x i1> %3,
508    iXLen %4)
509
510  ret <vscale x 4 x i16> %a
511}
512
513declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
514  <vscale x 4 x i16>,
515  <vscale x 32 x i16>,
516  <vscale x 4 x i16>,
517  iXLen);
518
519define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
520; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16:
521; CHECK:       # %bb.0: # %entry
522; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
523; CHECK-NEXT:    vredand.vs v8, v16, v9
524; CHECK-NEXT:    ret
525entry:
526  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
527    <vscale x 4 x i16> %0,
528    <vscale x 32 x i16> %1,
529    <vscale x 4 x i16> %2,
530    iXLen %3)
531
532  ret <vscale x 4 x i16> %a
533}
534
535declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
536  <vscale x 4 x i16>,
537  <vscale x 32 x i16>,
538  <vscale x 4 x i16>,
539  <vscale x 32 x i1>,
540  iXLen);
541
542define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
543; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16:
544; CHECK:       # %bb.0: # %entry
545; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
546; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
547; CHECK-NEXT:    ret
548entry:
549  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
550    <vscale x 4 x i16> %0,
551    <vscale x 32 x i16> %1,
552    <vscale x 4 x i16> %2,
553    <vscale x 32 x i1> %3,
554    iXLen %4)
555
556  ret <vscale x 4 x i16> %a
557}
558
559declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
560  <vscale x 2 x i32>,
561  <vscale x 1 x i32>,
562  <vscale x 2 x i32>,
563  iXLen);
564
565define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
566; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32:
567; CHECK:       # %bb.0: # %entry
568; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
569; CHECK-NEXT:    vredand.vs v8, v9, v10
570; CHECK-NEXT:    ret
571entry:
572  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
573    <vscale x 2 x i32> %0,
574    <vscale x 1 x i32> %1,
575    <vscale x 2 x i32> %2,
576    iXLen %3)
577
578  ret <vscale x 2 x i32> %a
579}
580
581declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
582  <vscale x 2 x i32>,
583  <vscale x 1 x i32>,
584  <vscale x 2 x i32>,
585  <vscale x 1 x i1>,
586  iXLen);
587
588define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
589; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32:
590; CHECK:       # %bb.0: # %entry
591; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
592; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
593; CHECK-NEXT:    ret
594entry:
595  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
596    <vscale x 2 x i32> %0,
597    <vscale x 1 x i32> %1,
598    <vscale x 2 x i32> %2,
599    <vscale x 1 x i1> %3,
600    iXLen %4)
601
602  ret <vscale x 2 x i32> %a
603}
604
605declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
606  <vscale x 2 x i32>,
607  <vscale x 2 x i32>,
608  <vscale x 2 x i32>,
609  iXLen);
610
611define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
612; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32:
613; CHECK:       # %bb.0: # %entry
614; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
615; CHECK-NEXT:    vredand.vs v8, v9, v10
616; CHECK-NEXT:    ret
617entry:
618  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
619    <vscale x 2 x i32> %0,
620    <vscale x 2 x i32> %1,
621    <vscale x 2 x i32> %2,
622    iXLen %3)
623
624  ret <vscale x 2 x i32> %a
625}
626
627declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
628  <vscale x 2 x i32>,
629  <vscale x 2 x i32>,
630  <vscale x 2 x i32>,
631  <vscale x 2 x i1>,
632  iXLen);
633
634define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
635; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32:
636; CHECK:       # %bb.0: # %entry
637; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
638; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
639; CHECK-NEXT:    ret
640entry:
641  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
642    <vscale x 2 x i32> %0,
643    <vscale x 2 x i32> %1,
644    <vscale x 2 x i32> %2,
645    <vscale x 2 x i1> %3,
646    iXLen %4)
647
648  ret <vscale x 2 x i32> %a
649}
650
651declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
652  <vscale x 2 x i32>,
653  <vscale x 4 x i32>,
654  <vscale x 2 x i32>,
655  iXLen);
656
657define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
658; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32:
659; CHECK:       # %bb.0: # %entry
660; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
661; CHECK-NEXT:    vredand.vs v8, v10, v9
662; CHECK-NEXT:    ret
663entry:
664  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
665    <vscale x 2 x i32> %0,
666    <vscale x 4 x i32> %1,
667    <vscale x 2 x i32> %2,
668    iXLen %3)
669
670  ret <vscale x 2 x i32> %a
671}
672
673declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
674  <vscale x 2 x i32>,
675  <vscale x 4 x i32>,
676  <vscale x 2 x i32>,
677  <vscale x 4 x i1>,
678  iXLen);
679
680define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
681; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32:
682; CHECK:       # %bb.0: # %entry
683; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
684; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
685; CHECK-NEXT:    ret
686entry:
687  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
688    <vscale x 2 x i32> %0,
689    <vscale x 4 x i32> %1,
690    <vscale x 2 x i32> %2,
691    <vscale x 4 x i1> %3,
692    iXLen %4)
693
694  ret <vscale x 2 x i32> %a
695}
696
697declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
698  <vscale x 2 x i32>,
699  <vscale x 8 x i32>,
700  <vscale x 2 x i32>,
701  iXLen);
702
703define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
704; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32:
705; CHECK:       # %bb.0: # %entry
706; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
707; CHECK-NEXT:    vredand.vs v8, v12, v9
708; CHECK-NEXT:    ret
709entry:
710  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
711    <vscale x 2 x i32> %0,
712    <vscale x 8 x i32> %1,
713    <vscale x 2 x i32> %2,
714    iXLen %3)
715
716  ret <vscale x 2 x i32> %a
717}
718
719declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
720  <vscale x 2 x i32>,
721  <vscale x 8 x i32>,
722  <vscale x 2 x i32>,
723  <vscale x 8 x i1>,
724  iXLen);
725
726define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
727; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32:
728; CHECK:       # %bb.0: # %entry
729; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
730; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
731; CHECK-NEXT:    ret
732entry:
733  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
734    <vscale x 2 x i32> %0,
735    <vscale x 8 x i32> %1,
736    <vscale x 2 x i32> %2,
737    <vscale x 8 x i1> %3,
738    iXLen %4)
739
740  ret <vscale x 2 x i32> %a
741}
742
743declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
744  <vscale x 2 x i32>,
745  <vscale x 16 x i32>,
746  <vscale x 2 x i32>,
747  iXLen);
748
749define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
750; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32:
751; CHECK:       # %bb.0: # %entry
752; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
753; CHECK-NEXT:    vredand.vs v8, v16, v9
754; CHECK-NEXT:    ret
755entry:
756  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
757    <vscale x 2 x i32> %0,
758    <vscale x 16 x i32> %1,
759    <vscale x 2 x i32> %2,
760    iXLen %3)
761
762  ret <vscale x 2 x i32> %a
763}
764
765declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
766  <vscale x 2 x i32>,
767  <vscale x 16 x i32>,
768  <vscale x 2 x i32>,
769  <vscale x 16 x i1>,
770  iXLen);
771
772define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
773; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32:
774; CHECK:       # %bb.0: # %entry
775; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
776; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
777; CHECK-NEXT:    ret
778entry:
779  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
780    <vscale x 2 x i32> %0,
781    <vscale x 16 x i32> %1,
782    <vscale x 2 x i32> %2,
783    <vscale x 16 x i1> %3,
784    iXLen %4)
785
786  ret <vscale x 2 x i32> %a
787}
788
789declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
790  <vscale x 1 x i64>,
791  <vscale x 1 x i64>,
792  <vscale x 1 x i64>,
793  iXLen);
794
795define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
796; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64:
797; CHECK:       # %bb.0: # %entry
798; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
799; CHECK-NEXT:    vredand.vs v8, v9, v10
800; CHECK-NEXT:    ret
801entry:
802  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
803    <vscale x 1 x i64> %0,
804    <vscale x 1 x i64> %1,
805    <vscale x 1 x i64> %2,
806    iXLen %3)
807
808  ret <vscale x 1 x i64> %a
809}
810
811declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
812  <vscale x 1 x i64>,
813  <vscale x 1 x i64>,
814  <vscale x 1 x i64>,
815  <vscale x 1 x i1>,
816  iXLen);
817
818define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
819; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64:
820; CHECK:       # %bb.0: # %entry
821; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
822; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
823; CHECK-NEXT:    ret
824entry:
825  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
826    <vscale x 1 x i64> %0,
827    <vscale x 1 x i64> %1,
828    <vscale x 1 x i64> %2,
829    <vscale x 1 x i1> %3,
830    iXLen %4)
831
832  ret <vscale x 1 x i64> %a
833}
834
835declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
836  <vscale x 1 x i64>,
837  <vscale x 2 x i64>,
838  <vscale x 1 x i64>,
839  iXLen);
840
841define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
842; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64:
843; CHECK:       # %bb.0: # %entry
844; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
845; CHECK-NEXT:    vredand.vs v8, v10, v9
846; CHECK-NEXT:    ret
847entry:
848  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
849    <vscale x 1 x i64> %0,
850    <vscale x 2 x i64> %1,
851    <vscale x 1 x i64> %2,
852    iXLen %3)
853
854  ret <vscale x 1 x i64> %a
855}
856
857declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
858  <vscale x 1 x i64>,
859  <vscale x 2 x i64>,
860  <vscale x 1 x i64>,
861  <vscale x 2 x i1>,
862  iXLen);
863
864define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
865; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64:
866; CHECK:       # %bb.0: # %entry
867; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
868; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
869; CHECK-NEXT:    ret
870entry:
871  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
872    <vscale x 1 x i64> %0,
873    <vscale x 2 x i64> %1,
874    <vscale x 1 x i64> %2,
875    <vscale x 2 x i1> %3,
876    iXLen %4)
877
878  ret <vscale x 1 x i64> %a
879}
880
881declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
882  <vscale x 1 x i64>,
883  <vscale x 4 x i64>,
884  <vscale x 1 x i64>,
885  iXLen);
886
887define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
888; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64:
889; CHECK:       # %bb.0: # %entry
890; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
891; CHECK-NEXT:    vredand.vs v8, v12, v9
892; CHECK-NEXT:    ret
893entry:
894  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
895    <vscale x 1 x i64> %0,
896    <vscale x 4 x i64> %1,
897    <vscale x 1 x i64> %2,
898    iXLen %3)
899
900  ret <vscale x 1 x i64> %a
901}
902
903declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
904  <vscale x 1 x i64>,
905  <vscale x 4 x i64>,
906  <vscale x 1 x i64>,
907  <vscale x 4 x i1>,
908  iXLen);
909
910define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
911; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64:
912; CHECK:       # %bb.0: # %entry
913; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
914; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
915; CHECK-NEXT:    ret
916entry:
917  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
918    <vscale x 1 x i64> %0,
919    <vscale x 4 x i64> %1,
920    <vscale x 1 x i64> %2,
921    <vscale x 4 x i1> %3,
922    iXLen %4)
923
924  ret <vscale x 1 x i64> %a
925}
926
927declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
928  <vscale x 1 x i64>,
929  <vscale x 8 x i64>,
930  <vscale x 1 x i64>,
931  iXLen);
932
933define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
934; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64:
935; CHECK:       # %bb.0: # %entry
936; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
937; CHECK-NEXT:    vredand.vs v8, v16, v9
938; CHECK-NEXT:    ret
939entry:
940  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
941    <vscale x 1 x i64> %0,
942    <vscale x 8 x i64> %1,
943    <vscale x 1 x i64> %2,
944    iXLen %3)
945
946  ret <vscale x 1 x i64> %a
947}
948
949declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
950  <vscale x 1 x i64>,
951  <vscale x 8 x i64>,
952  <vscale x 1 x i64>,
953  <vscale x 8 x i1>,
954  iXLen);
955
956define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
957; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64:
958; CHECK:       # %bb.0: # %entry
959; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
960; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
961; CHECK-NEXT:    ret
962entry:
963  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
964    <vscale x 1 x i64> %0,
965    <vscale x 8 x i64> %1,
966    <vscale x 1 x i64> %2,
967    <vscale x 8 x i1> %3,
968    iXLen %4)
969
970  ret <vscale x 1 x i64> %a
971}
972