xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vredand.ll (revision 99a0cd6f7edcb184a65d2e65842e7d9ece2a5eaf)
1b5151330SJim Lin; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2b5151330SJim Lin; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3b5151330SJim Lin; RUN:   -verify-machineinstrs | FileCheck %s
4b5151330SJim Lin; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5b5151330SJim Lin; RUN:   -verify-machineinstrs | FileCheck %s
6f2bdc29fSJim Lin
7b5151330SJim Lindeclare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
8b5151330SJim Lin  <vscale x 8 x i8>,
9b5151330SJim Lin  <vscale x 1 x i8>,
10b5151330SJim Lin  <vscale x 8 x i8>,
11b5151330SJim Lin  iXLen);
12b5151330SJim Lin
13b5151330SJim Lindefine <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
14b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8:
15b5151330SJim Lin; CHECK:       # %bb.0: # %entry
16*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
17b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10
18b5151330SJim Lin; CHECK-NEXT:    ret
19b5151330SJim Linentry:
20b5151330SJim Lin  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
21b5151330SJim Lin    <vscale x 8 x i8> %0,
22b5151330SJim Lin    <vscale x 1 x i8> %1,
23b5151330SJim Lin    <vscale x 8 x i8> %2,
24b5151330SJim Lin    iXLen %3)
25b5151330SJim Lin
26b5151330SJim Lin  ret <vscale x 8 x i8> %a
27b5151330SJim Lin}
28b5151330SJim Lin
29b5151330SJim Lindeclare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
30b5151330SJim Lin  <vscale x 8 x i8>,
31b5151330SJim Lin  <vscale x 1 x i8>,
32b5151330SJim Lin  <vscale x 8 x i8>,
33b5151330SJim Lin  <vscale x 1 x i1>,
34b5151330SJim Lin  iXLen);
35b5151330SJim Lin
36b5151330SJim Lindefine <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8:
38b5151330SJim Lin; CHECK:       # %bb.0: # %entry
39*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
40b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
41b5151330SJim Lin; CHECK-NEXT:    ret
42b5151330SJim Linentry:
43b5151330SJim Lin  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
44b5151330SJim Lin    <vscale x 8 x i8> %0,
45b5151330SJim Lin    <vscale x 1 x i8> %1,
46b5151330SJim Lin    <vscale x 8 x i8> %2,
47b5151330SJim Lin    <vscale x 1 x i1> %3,
48b5151330SJim Lin    iXLen %4)
49b5151330SJim Lin
50b5151330SJim Lin  ret <vscale x 8 x i8> %a
51b5151330SJim Lin}
52b5151330SJim Lin
53b5151330SJim Lindeclare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
54b5151330SJim Lin  <vscale x 8 x i8>,
55b5151330SJim Lin  <vscale x 2 x i8>,
56b5151330SJim Lin  <vscale x 8 x i8>,
57b5151330SJim Lin  iXLen);
58b5151330SJim Lin
59b5151330SJim Lindefine <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
60b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8:
61b5151330SJim Lin; CHECK:       # %bb.0: # %entry
62*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
63b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10
64b5151330SJim Lin; CHECK-NEXT:    ret
65b5151330SJim Linentry:
66b5151330SJim Lin  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
67b5151330SJim Lin    <vscale x 8 x i8> %0,
68b5151330SJim Lin    <vscale x 2 x i8> %1,
69b5151330SJim Lin    <vscale x 8 x i8> %2,
70b5151330SJim Lin    iXLen %3)
71b5151330SJim Lin
72b5151330SJim Lin  ret <vscale x 8 x i8> %a
73b5151330SJim Lin}
74b5151330SJim Lin
75b5151330SJim Lindeclare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
76b5151330SJim Lin  <vscale x 8 x i8>,
77b5151330SJim Lin  <vscale x 2 x i8>,
78b5151330SJim Lin  <vscale x 8 x i8>,
79b5151330SJim Lin  <vscale x 2 x i1>,
80b5151330SJim Lin  iXLen);
81b5151330SJim Lin
82b5151330SJim Lindefine <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
83b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8:
84b5151330SJim Lin; CHECK:       # %bb.0: # %entry
85*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
86b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
87b5151330SJim Lin; CHECK-NEXT:    ret
88b5151330SJim Linentry:
89b5151330SJim Lin  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
90b5151330SJim Lin    <vscale x 8 x i8> %0,
91b5151330SJim Lin    <vscale x 2 x i8> %1,
92b5151330SJim Lin    <vscale x 8 x i8> %2,
93b5151330SJim Lin    <vscale x 2 x i1> %3,
94b5151330SJim Lin    iXLen %4)
95b5151330SJim Lin
96b5151330SJim Lin  ret <vscale x 8 x i8> %a
97b5151330SJim Lin}
98b5151330SJim Lin
99b5151330SJim Lindeclare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
100b5151330SJim Lin  <vscale x 8 x i8>,
101b5151330SJim Lin  <vscale x 4 x i8>,
102b5151330SJim Lin  <vscale x 8 x i8>,
103b5151330SJim Lin  iXLen);
104b5151330SJim Lin
105b5151330SJim Lindefine <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
106b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8:
107b5151330SJim Lin; CHECK:       # %bb.0: # %entry
108*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
109b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10
110b5151330SJim Lin; CHECK-NEXT:    ret
111b5151330SJim Linentry:
112b5151330SJim Lin  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
113b5151330SJim Lin    <vscale x 8 x i8> %0,
114b5151330SJim Lin    <vscale x 4 x i8> %1,
115b5151330SJim Lin    <vscale x 8 x i8> %2,
116b5151330SJim Lin    iXLen %3)
117b5151330SJim Lin
118b5151330SJim Lin  ret <vscale x 8 x i8> %a
119b5151330SJim Lin}
120b5151330SJim Lin
121b5151330SJim Lindeclare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
122b5151330SJim Lin  <vscale x 8 x i8>,
123b5151330SJim Lin  <vscale x 4 x i8>,
124b5151330SJim Lin  <vscale x 8 x i8>,
125b5151330SJim Lin  <vscale x 4 x i1>,
126b5151330SJim Lin  iXLen);
127b5151330SJim Lin
128b5151330SJim Lindefine <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
129b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8:
130b5151330SJim Lin; CHECK:       # %bb.0: # %entry
131*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
132b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
133b5151330SJim Lin; CHECK-NEXT:    ret
134b5151330SJim Linentry:
135b5151330SJim Lin  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
136b5151330SJim Lin    <vscale x 8 x i8> %0,
137b5151330SJim Lin    <vscale x 4 x i8> %1,
138b5151330SJim Lin    <vscale x 8 x i8> %2,
139b5151330SJim Lin    <vscale x 4 x i1> %3,
140b5151330SJim Lin    iXLen %4)
141b5151330SJim Lin
142b5151330SJim Lin  ret <vscale x 8 x i8> %a
143b5151330SJim Lin}
144b5151330SJim Lin
145b5151330SJim Lindeclare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
146b5151330SJim Lin  <vscale x 8 x i8>,
147b5151330SJim Lin  <vscale x 8 x i8>,
148b5151330SJim Lin  <vscale x 8 x i8>,
149b5151330SJim Lin  iXLen);
150b5151330SJim Lin
151b5151330SJim Lindefine <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
152b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8:
153b5151330SJim Lin; CHECK:       # %bb.0: # %entry
154*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
155b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10
156b5151330SJim Lin; CHECK-NEXT:    ret
157b5151330SJim Linentry:
158b5151330SJim Lin  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
159b5151330SJim Lin    <vscale x 8 x i8> %0,
160b5151330SJim Lin    <vscale x 8 x i8> %1,
161b5151330SJim Lin    <vscale x 8 x i8> %2,
162b5151330SJim Lin    iXLen %3)
163b5151330SJim Lin
164b5151330SJim Lin  ret <vscale x 8 x i8> %a
165b5151330SJim Lin}
166b5151330SJim Lin
167b5151330SJim Lindeclare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
168b5151330SJim Lin  <vscale x 8 x i8>,
169b5151330SJim Lin  <vscale x 8 x i8>,
170b5151330SJim Lin  <vscale x 8 x i8>,
171b5151330SJim Lin  <vscale x 8 x i1>,
172b5151330SJim Lin  iXLen);
173b5151330SJim Lin
174b5151330SJim Lindefine <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
175b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8:
176b5151330SJim Lin; CHECK:       # %bb.0: # %entry
177*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
178b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
179b5151330SJim Lin; CHECK-NEXT:    ret
180b5151330SJim Linentry:
181b5151330SJim Lin  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
182b5151330SJim Lin    <vscale x 8 x i8> %0,
183b5151330SJim Lin    <vscale x 8 x i8> %1,
184b5151330SJim Lin    <vscale x 8 x i8> %2,
185b5151330SJim Lin    <vscale x 8 x i1> %3,
186b5151330SJim Lin    iXLen %4)
187b5151330SJim Lin
188b5151330SJim Lin  ret <vscale x 8 x i8> %a
189b5151330SJim Lin}
190b5151330SJim Lin
191b5151330SJim Lindeclare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
192b5151330SJim Lin  <vscale x 8 x i8>,
193b5151330SJim Lin  <vscale x 16 x i8>,
194b5151330SJim Lin  <vscale x 8 x i8>,
195b5151330SJim Lin  iXLen);
196b5151330SJim Lin
197b5151330SJim Lindefine <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
198b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8:
199b5151330SJim Lin; CHECK:       # %bb.0: # %entry
200*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
201b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v10, v9
202b5151330SJim Lin; CHECK-NEXT:    ret
203b5151330SJim Linentry:
204b5151330SJim Lin  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
205b5151330SJim Lin    <vscale x 8 x i8> %0,
206b5151330SJim Lin    <vscale x 16 x i8> %1,
207b5151330SJim Lin    <vscale x 8 x i8> %2,
208b5151330SJim Lin    iXLen %3)
209b5151330SJim Lin
210b5151330SJim Lin  ret <vscale x 8 x i8> %a
211b5151330SJim Lin}
212b5151330SJim Lin
213b5151330SJim Lindeclare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
214b5151330SJim Lin  <vscale x 8 x i8>,
215b5151330SJim Lin  <vscale x 16 x i8>,
216b5151330SJim Lin  <vscale x 8 x i8>,
217b5151330SJim Lin  <vscale x 16 x i1>,
218b5151330SJim Lin  iXLen);
219b5151330SJim Lin
220b5151330SJim Lindefine <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
221b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8:
222b5151330SJim Lin; CHECK:       # %bb.0: # %entry
223*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
224b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
225b5151330SJim Lin; CHECK-NEXT:    ret
226b5151330SJim Linentry:
227b5151330SJim Lin  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
228b5151330SJim Lin    <vscale x 8 x i8> %0,
229b5151330SJim Lin    <vscale x 16 x i8> %1,
230b5151330SJim Lin    <vscale x 8 x i8> %2,
231b5151330SJim Lin    <vscale x 16 x i1> %3,
232b5151330SJim Lin    iXLen %4)
233b5151330SJim Lin
234b5151330SJim Lin  ret <vscale x 8 x i8> %a
235b5151330SJim Lin}
236b5151330SJim Lin
237b5151330SJim Lindeclare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
238b5151330SJim Lin  <vscale x 8 x i8>,
239b5151330SJim Lin  <vscale x 32 x i8>,
240b5151330SJim Lin  <vscale x 8 x i8>,
241b5151330SJim Lin  iXLen);
242b5151330SJim Lin
243b5151330SJim Lindefine <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
244b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8:
245b5151330SJim Lin; CHECK:       # %bb.0: # %entry
246*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
247b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v12, v9
248b5151330SJim Lin; CHECK-NEXT:    ret
249b5151330SJim Linentry:
250b5151330SJim Lin  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
251b5151330SJim Lin    <vscale x 8 x i8> %0,
252b5151330SJim Lin    <vscale x 32 x i8> %1,
253b5151330SJim Lin    <vscale x 8 x i8> %2,
254b5151330SJim Lin    iXLen %3)
255b5151330SJim Lin
256b5151330SJim Lin  ret <vscale x 8 x i8> %a
257b5151330SJim Lin}
258b5151330SJim Lin
259b5151330SJim Lindeclare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
260b5151330SJim Lin  <vscale x 8 x i8>,
261b5151330SJim Lin  <vscale x 32 x i8>,
262b5151330SJim Lin  <vscale x 8 x i8>,
263b5151330SJim Lin  <vscale x 32 x i1>,
264b5151330SJim Lin  iXLen);
265b5151330SJim Lin
266b5151330SJim Lindefine <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
267b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8:
268b5151330SJim Lin; CHECK:       # %bb.0: # %entry
269*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
270b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
271b5151330SJim Lin; CHECK-NEXT:    ret
272b5151330SJim Linentry:
273b5151330SJim Lin  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
274b5151330SJim Lin    <vscale x 8 x i8> %0,
275b5151330SJim Lin    <vscale x 32 x i8> %1,
276b5151330SJim Lin    <vscale x 8 x i8> %2,
277b5151330SJim Lin    <vscale x 32 x i1> %3,
278b5151330SJim Lin    iXLen %4)
279b5151330SJim Lin
280b5151330SJim Lin  ret <vscale x 8 x i8> %a
281b5151330SJim Lin}
282b5151330SJim Lin
283b5151330SJim Lindeclare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
284b5151330SJim Lin  <vscale x 4 x i16>,
285b5151330SJim Lin  <vscale x 1 x i16>,
286b5151330SJim Lin  <vscale x 4 x i16>,
287b5151330SJim Lin  iXLen);
288b5151330SJim Lin
289b5151330SJim Lindefine <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
290b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16:
291b5151330SJim Lin; CHECK:       # %bb.0: # %entry
292*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
293b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10
294b5151330SJim Lin; CHECK-NEXT:    ret
295b5151330SJim Linentry:
296b5151330SJim Lin  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
297b5151330SJim Lin    <vscale x 4 x i16> %0,
298b5151330SJim Lin    <vscale x 1 x i16> %1,
299b5151330SJim Lin    <vscale x 4 x i16> %2,
300b5151330SJim Lin    iXLen %3)
301b5151330SJim Lin
302b5151330SJim Lin  ret <vscale x 4 x i16> %a
303b5151330SJim Lin}
304b5151330SJim Lin
305b5151330SJim Lindeclare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
306b5151330SJim Lin  <vscale x 4 x i16>,
307b5151330SJim Lin  <vscale x 1 x i16>,
308b5151330SJim Lin  <vscale x 4 x i16>,
309b5151330SJim Lin  <vscale x 1 x i1>,
310b5151330SJim Lin  iXLen);
311b5151330SJim Lin
312b5151330SJim Lindefine <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
313b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16:
314b5151330SJim Lin; CHECK:       # %bb.0: # %entry
315*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
316b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
317b5151330SJim Lin; CHECK-NEXT:    ret
318b5151330SJim Linentry:
319b5151330SJim Lin  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
320b5151330SJim Lin    <vscale x 4 x i16> %0,
321b5151330SJim Lin    <vscale x 1 x i16> %1,
322b5151330SJim Lin    <vscale x 4 x i16> %2,
323b5151330SJim Lin    <vscale x 1 x i1> %3,
324b5151330SJim Lin    iXLen %4)
325b5151330SJim Lin
326b5151330SJim Lin  ret <vscale x 4 x i16> %a
327b5151330SJim Lin}
328b5151330SJim Lin
329b5151330SJim Lindeclare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
330b5151330SJim Lin  <vscale x 4 x i16>,
331b5151330SJim Lin  <vscale x 2 x i16>,
332b5151330SJim Lin  <vscale x 4 x i16>,
333b5151330SJim Lin  iXLen);
334b5151330SJim Lin
335b5151330SJim Lindefine <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
336b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16:
337b5151330SJim Lin; CHECK:       # %bb.0: # %entry
338*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
339b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10
340b5151330SJim Lin; CHECK-NEXT:    ret
341b5151330SJim Linentry:
342b5151330SJim Lin  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
343b5151330SJim Lin    <vscale x 4 x i16> %0,
344b5151330SJim Lin    <vscale x 2 x i16> %1,
345b5151330SJim Lin    <vscale x 4 x i16> %2,
346b5151330SJim Lin    iXLen %3)
347b5151330SJim Lin
348b5151330SJim Lin  ret <vscale x 4 x i16> %a
349b5151330SJim Lin}
350b5151330SJim Lin
351b5151330SJim Lindeclare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
352b5151330SJim Lin  <vscale x 4 x i16>,
353b5151330SJim Lin  <vscale x 2 x i16>,
354b5151330SJim Lin  <vscale x 4 x i16>,
355b5151330SJim Lin  <vscale x 2 x i1>,
356b5151330SJim Lin  iXLen);
357b5151330SJim Lin
358b5151330SJim Lindefine <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
359b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16:
360b5151330SJim Lin; CHECK:       # %bb.0: # %entry
361*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
362b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
363b5151330SJim Lin; CHECK-NEXT:    ret
364b5151330SJim Linentry:
365b5151330SJim Lin  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
366b5151330SJim Lin    <vscale x 4 x i16> %0,
367b5151330SJim Lin    <vscale x 2 x i16> %1,
368b5151330SJim Lin    <vscale x 4 x i16> %2,
369b5151330SJim Lin    <vscale x 2 x i1> %3,
370b5151330SJim Lin    iXLen %4)
371b5151330SJim Lin
372b5151330SJim Lin  ret <vscale x 4 x i16> %a
373b5151330SJim Lin}
374b5151330SJim Lin
375b5151330SJim Lindeclare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
376b5151330SJim Lin  <vscale x 4 x i16>,
377b5151330SJim Lin  <vscale x 4 x i16>,
378b5151330SJim Lin  <vscale x 4 x i16>,
379b5151330SJim Lin  iXLen);
380b5151330SJim Lin
381b5151330SJim Lindefine <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
382b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16:
383b5151330SJim Lin; CHECK:       # %bb.0: # %entry
384*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
385b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10
386b5151330SJim Lin; CHECK-NEXT:    ret
387b5151330SJim Linentry:
388b5151330SJim Lin  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
389b5151330SJim Lin    <vscale x 4 x i16> %0,
390b5151330SJim Lin    <vscale x 4 x i16> %1,
391b5151330SJim Lin    <vscale x 4 x i16> %2,
392b5151330SJim Lin    iXLen %3)
393b5151330SJim Lin
394b5151330SJim Lin  ret <vscale x 4 x i16> %a
395b5151330SJim Lin}
396b5151330SJim Lin
397b5151330SJim Lindeclare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
398b5151330SJim Lin  <vscale x 4 x i16>,
399b5151330SJim Lin  <vscale x 4 x i16>,
400b5151330SJim Lin  <vscale x 4 x i16>,
401b5151330SJim Lin  <vscale x 4 x i1>,
402b5151330SJim Lin  iXLen);
403b5151330SJim Lin
404b5151330SJim Lindefine <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
405b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16:
406b5151330SJim Lin; CHECK:       # %bb.0: # %entry
407*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
408b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
409b5151330SJim Lin; CHECK-NEXT:    ret
410b5151330SJim Linentry:
411b5151330SJim Lin  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
412b5151330SJim Lin    <vscale x 4 x i16> %0,
413b5151330SJim Lin    <vscale x 4 x i16> %1,
414b5151330SJim Lin    <vscale x 4 x i16> %2,
415b5151330SJim Lin    <vscale x 4 x i1> %3,
416b5151330SJim Lin    iXLen %4)
417b5151330SJim Lin
418b5151330SJim Lin  ret <vscale x 4 x i16> %a
419b5151330SJim Lin}
420b5151330SJim Lin
421b5151330SJim Lindeclare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
422b5151330SJim Lin  <vscale x 4 x i16>,
423b5151330SJim Lin  <vscale x 8 x i16>,
424b5151330SJim Lin  <vscale x 4 x i16>,
425b5151330SJim Lin  iXLen);
426b5151330SJim Lin
427b5151330SJim Lindefine <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
428b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16:
429b5151330SJim Lin; CHECK:       # %bb.0: # %entry
430*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
431b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v10, v9
432b5151330SJim Lin; CHECK-NEXT:    ret
433b5151330SJim Linentry:
434b5151330SJim Lin  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
435b5151330SJim Lin    <vscale x 4 x i16> %0,
436b5151330SJim Lin    <vscale x 8 x i16> %1,
437b5151330SJim Lin    <vscale x 4 x i16> %2,
438b5151330SJim Lin    iXLen %3)
439b5151330SJim Lin
440b5151330SJim Lin  ret <vscale x 4 x i16> %a
441b5151330SJim Lin}
442b5151330SJim Lin
443b5151330SJim Lindeclare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
444b5151330SJim Lin  <vscale x 4 x i16>,
445b5151330SJim Lin  <vscale x 8 x i16>,
446b5151330SJim Lin  <vscale x 4 x i16>,
447b5151330SJim Lin  <vscale x 8 x i1>,
448b5151330SJim Lin  iXLen);
449b5151330SJim Lin
450b5151330SJim Lindefine <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
451b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16:
452b5151330SJim Lin; CHECK:       # %bb.0: # %entry
453*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
454b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
455b5151330SJim Lin; CHECK-NEXT:    ret
456b5151330SJim Linentry:
457b5151330SJim Lin  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
458b5151330SJim Lin    <vscale x 4 x i16> %0,
459b5151330SJim Lin    <vscale x 8 x i16> %1,
460b5151330SJim Lin    <vscale x 4 x i16> %2,
461b5151330SJim Lin    <vscale x 8 x i1> %3,
462b5151330SJim Lin    iXLen %4)
463b5151330SJim Lin
464b5151330SJim Lin  ret <vscale x 4 x i16> %a
465b5151330SJim Lin}
466b5151330SJim Lin
467b5151330SJim Lindeclare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
468b5151330SJim Lin  <vscale x 4 x i16>,
469b5151330SJim Lin  <vscale x 16 x i16>,
470b5151330SJim Lin  <vscale x 4 x i16>,
471b5151330SJim Lin  iXLen);
472b5151330SJim Lin
473b5151330SJim Lindefine <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
474b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16:
475b5151330SJim Lin; CHECK:       # %bb.0: # %entry
476*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
477b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v12, v9
478b5151330SJim Lin; CHECK-NEXT:    ret
479b5151330SJim Linentry:
480b5151330SJim Lin  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
481b5151330SJim Lin    <vscale x 4 x i16> %0,
482b5151330SJim Lin    <vscale x 16 x i16> %1,
483b5151330SJim Lin    <vscale x 4 x i16> %2,
484b5151330SJim Lin    iXLen %3)
485b5151330SJim Lin
486b5151330SJim Lin  ret <vscale x 4 x i16> %a
487b5151330SJim Lin}
488b5151330SJim Lin
489b5151330SJim Lindeclare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
490b5151330SJim Lin  <vscale x 4 x i16>,
491b5151330SJim Lin  <vscale x 16 x i16>,
492b5151330SJim Lin  <vscale x 4 x i16>,
493b5151330SJim Lin  <vscale x 16 x i1>,
494b5151330SJim Lin  iXLen);
495b5151330SJim Lin
496b5151330SJim Lindefine <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
497b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16:
498b5151330SJim Lin; CHECK:       # %bb.0: # %entry
499*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
500b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
501b5151330SJim Lin; CHECK-NEXT:    ret
502b5151330SJim Linentry:
503b5151330SJim Lin  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
504b5151330SJim Lin    <vscale x 4 x i16> %0,
505b5151330SJim Lin    <vscale x 16 x i16> %1,
506b5151330SJim Lin    <vscale x 4 x i16> %2,
507b5151330SJim Lin    <vscale x 16 x i1> %3,
508b5151330SJim Lin    iXLen %4)
509b5151330SJim Lin
510b5151330SJim Lin  ret <vscale x 4 x i16> %a
511b5151330SJim Lin}
512b5151330SJim Lin
513b5151330SJim Lindeclare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
514b5151330SJim Lin  <vscale x 4 x i16>,
515b5151330SJim Lin  <vscale x 32 x i16>,
516b5151330SJim Lin  <vscale x 4 x i16>,
517b5151330SJim Lin  iXLen);
518b5151330SJim Lin
519b5151330SJim Lindefine <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
520b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16:
521b5151330SJim Lin; CHECK:       # %bb.0: # %entry
522*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
523b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v16, v9
524b5151330SJim Lin; CHECK-NEXT:    ret
525b5151330SJim Linentry:
526b5151330SJim Lin  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
527b5151330SJim Lin    <vscale x 4 x i16> %0,
528b5151330SJim Lin    <vscale x 32 x i16> %1,
529b5151330SJim Lin    <vscale x 4 x i16> %2,
530b5151330SJim Lin    iXLen %3)
531b5151330SJim Lin
532b5151330SJim Lin  ret <vscale x 4 x i16> %a
533b5151330SJim Lin}
534b5151330SJim Lin
535b5151330SJim Lindeclare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
536b5151330SJim Lin  <vscale x 4 x i16>,
537b5151330SJim Lin  <vscale x 32 x i16>,
538b5151330SJim Lin  <vscale x 4 x i16>,
539b5151330SJim Lin  <vscale x 32 x i1>,
540b5151330SJim Lin  iXLen);
541b5151330SJim Lin
542b5151330SJim Lindefine <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
543b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16:
544b5151330SJim Lin; CHECK:       # %bb.0: # %entry
545*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
546b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
547b5151330SJim Lin; CHECK-NEXT:    ret
548b5151330SJim Linentry:
549b5151330SJim Lin  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
550b5151330SJim Lin    <vscale x 4 x i16> %0,
551b5151330SJim Lin    <vscale x 32 x i16> %1,
552b5151330SJim Lin    <vscale x 4 x i16> %2,
553b5151330SJim Lin    <vscale x 32 x i1> %3,
554b5151330SJim Lin    iXLen %4)
555b5151330SJim Lin
556b5151330SJim Lin  ret <vscale x 4 x i16> %a
557b5151330SJim Lin}
558b5151330SJim Lin
559b5151330SJim Lindeclare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
560b5151330SJim Lin  <vscale x 2 x i32>,
561b5151330SJim Lin  <vscale x 1 x i32>,
562b5151330SJim Lin  <vscale x 2 x i32>,
563b5151330SJim Lin  iXLen);
564b5151330SJim Lin
565b5151330SJim Lindefine <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
566b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32:
567b5151330SJim Lin; CHECK:       # %bb.0: # %entry
568*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
569b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10
570b5151330SJim Lin; CHECK-NEXT:    ret
571b5151330SJim Linentry:
572b5151330SJim Lin  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
573b5151330SJim Lin    <vscale x 2 x i32> %0,
574b5151330SJim Lin    <vscale x 1 x i32> %1,
575b5151330SJim Lin    <vscale x 2 x i32> %2,
576b5151330SJim Lin    iXLen %3)
577b5151330SJim Lin
578b5151330SJim Lin  ret <vscale x 2 x i32> %a
579b5151330SJim Lin}
580b5151330SJim Lin
581b5151330SJim Lindeclare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
582b5151330SJim Lin  <vscale x 2 x i32>,
583b5151330SJim Lin  <vscale x 1 x i32>,
584b5151330SJim Lin  <vscale x 2 x i32>,
585b5151330SJim Lin  <vscale x 1 x i1>,
586b5151330SJim Lin  iXLen);
587b5151330SJim Lin
588b5151330SJim Lindefine <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
589b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32:
590b5151330SJim Lin; CHECK:       # %bb.0: # %entry
591*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
592b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
593b5151330SJim Lin; CHECK-NEXT:    ret
594b5151330SJim Linentry:
595b5151330SJim Lin  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
596b5151330SJim Lin    <vscale x 2 x i32> %0,
597b5151330SJim Lin    <vscale x 1 x i32> %1,
598b5151330SJim Lin    <vscale x 2 x i32> %2,
599b5151330SJim Lin    <vscale x 1 x i1> %3,
600b5151330SJim Lin    iXLen %4)
601b5151330SJim Lin
602b5151330SJim Lin  ret <vscale x 2 x i32> %a
603b5151330SJim Lin}
604b5151330SJim Lin
605b5151330SJim Lindeclare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
606b5151330SJim Lin  <vscale x 2 x i32>,
607b5151330SJim Lin  <vscale x 2 x i32>,
608b5151330SJim Lin  <vscale x 2 x i32>,
609b5151330SJim Lin  iXLen);
610b5151330SJim Lin
611b5151330SJim Lindefine <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
612b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32:
613b5151330SJim Lin; CHECK:       # %bb.0: # %entry
614*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
615b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10
616b5151330SJim Lin; CHECK-NEXT:    ret
617b5151330SJim Linentry:
618b5151330SJim Lin  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
619b5151330SJim Lin    <vscale x 2 x i32> %0,
620b5151330SJim Lin    <vscale x 2 x i32> %1,
621b5151330SJim Lin    <vscale x 2 x i32> %2,
622b5151330SJim Lin    iXLen %3)
623b5151330SJim Lin
624b5151330SJim Lin  ret <vscale x 2 x i32> %a
625b5151330SJim Lin}
626b5151330SJim Lin
627b5151330SJim Lindeclare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
628b5151330SJim Lin  <vscale x 2 x i32>,
629b5151330SJim Lin  <vscale x 2 x i32>,
630b5151330SJim Lin  <vscale x 2 x i32>,
631b5151330SJim Lin  <vscale x 2 x i1>,
632b5151330SJim Lin  iXLen);
633b5151330SJim Lin
634b5151330SJim Lindefine <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
635b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32:
636b5151330SJim Lin; CHECK:       # %bb.0: # %entry
637*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
638b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
639b5151330SJim Lin; CHECK-NEXT:    ret
640b5151330SJim Linentry:
641b5151330SJim Lin  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
642b5151330SJim Lin    <vscale x 2 x i32> %0,
643b5151330SJim Lin    <vscale x 2 x i32> %1,
644b5151330SJim Lin    <vscale x 2 x i32> %2,
645b5151330SJim Lin    <vscale x 2 x i1> %3,
646b5151330SJim Lin    iXLen %4)
647b5151330SJim Lin
648b5151330SJim Lin  ret <vscale x 2 x i32> %a
649b5151330SJim Lin}
650b5151330SJim Lin
651b5151330SJim Lindeclare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
652b5151330SJim Lin  <vscale x 2 x i32>,
653b5151330SJim Lin  <vscale x 4 x i32>,
654b5151330SJim Lin  <vscale x 2 x i32>,
655b5151330SJim Lin  iXLen);
656b5151330SJim Lin
657b5151330SJim Lindefine <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
658b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32:
659b5151330SJim Lin; CHECK:       # %bb.0: # %entry
660*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
661b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v10, v9
662b5151330SJim Lin; CHECK-NEXT:    ret
663b5151330SJim Linentry:
664b5151330SJim Lin  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
665b5151330SJim Lin    <vscale x 2 x i32> %0,
666b5151330SJim Lin    <vscale x 4 x i32> %1,
667b5151330SJim Lin    <vscale x 2 x i32> %2,
668b5151330SJim Lin    iXLen %3)
669b5151330SJim Lin
670b5151330SJim Lin  ret <vscale x 2 x i32> %a
671b5151330SJim Lin}
672b5151330SJim Lin
673b5151330SJim Lindeclare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
674b5151330SJim Lin  <vscale x 2 x i32>,
675b5151330SJim Lin  <vscale x 4 x i32>,
676b5151330SJim Lin  <vscale x 2 x i32>,
677b5151330SJim Lin  <vscale x 4 x i1>,
678b5151330SJim Lin  iXLen);
679b5151330SJim Lin
680b5151330SJim Lindefine <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
681b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32:
682b5151330SJim Lin; CHECK:       # %bb.0: # %entry
683*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
684b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
685b5151330SJim Lin; CHECK-NEXT:    ret
686b5151330SJim Linentry:
687b5151330SJim Lin  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
688b5151330SJim Lin    <vscale x 2 x i32> %0,
689b5151330SJim Lin    <vscale x 4 x i32> %1,
690b5151330SJim Lin    <vscale x 2 x i32> %2,
691b5151330SJim Lin    <vscale x 4 x i1> %3,
692b5151330SJim Lin    iXLen %4)
693b5151330SJim Lin
694b5151330SJim Lin  ret <vscale x 2 x i32> %a
695b5151330SJim Lin}
696b5151330SJim Lin
697b5151330SJim Lindeclare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
698b5151330SJim Lin  <vscale x 2 x i32>,
699b5151330SJim Lin  <vscale x 8 x i32>,
700b5151330SJim Lin  <vscale x 2 x i32>,
701b5151330SJim Lin  iXLen);
702b5151330SJim Lin
703b5151330SJim Lindefine <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
704b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32:
705b5151330SJim Lin; CHECK:       # %bb.0: # %entry
706*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
707b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v12, v9
708b5151330SJim Lin; CHECK-NEXT:    ret
709b5151330SJim Linentry:
710b5151330SJim Lin  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
711b5151330SJim Lin    <vscale x 2 x i32> %0,
712b5151330SJim Lin    <vscale x 8 x i32> %1,
713b5151330SJim Lin    <vscale x 2 x i32> %2,
714b5151330SJim Lin    iXLen %3)
715b5151330SJim Lin
716b5151330SJim Lin  ret <vscale x 2 x i32> %a
717b5151330SJim Lin}
718b5151330SJim Lin
719b5151330SJim Lindeclare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
720b5151330SJim Lin  <vscale x 2 x i32>,
721b5151330SJim Lin  <vscale x 8 x i32>,
722b5151330SJim Lin  <vscale x 2 x i32>,
723b5151330SJim Lin  <vscale x 8 x i1>,
724b5151330SJim Lin  iXLen);
725b5151330SJim Lin
726b5151330SJim Lindefine <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
727b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32:
728b5151330SJim Lin; CHECK:       # %bb.0: # %entry
729*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
730b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
731b5151330SJim Lin; CHECK-NEXT:    ret
732b5151330SJim Linentry:
733b5151330SJim Lin  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
734b5151330SJim Lin    <vscale x 2 x i32> %0,
735b5151330SJim Lin    <vscale x 8 x i32> %1,
736b5151330SJim Lin    <vscale x 2 x i32> %2,
737b5151330SJim Lin    <vscale x 8 x i1> %3,
738b5151330SJim Lin    iXLen %4)
739b5151330SJim Lin
740b5151330SJim Lin  ret <vscale x 2 x i32> %a
741b5151330SJim Lin}
742b5151330SJim Lin
743b5151330SJim Lindeclare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
744b5151330SJim Lin  <vscale x 2 x i32>,
745b5151330SJim Lin  <vscale x 16 x i32>,
746b5151330SJim Lin  <vscale x 2 x i32>,
747b5151330SJim Lin  iXLen);
748b5151330SJim Lin
749b5151330SJim Lindefine <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
750b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32:
751b5151330SJim Lin; CHECK:       # %bb.0: # %entry
752*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
753b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v16, v9
754b5151330SJim Lin; CHECK-NEXT:    ret
755b5151330SJim Linentry:
756b5151330SJim Lin  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
757b5151330SJim Lin    <vscale x 2 x i32> %0,
758b5151330SJim Lin    <vscale x 16 x i32> %1,
759b5151330SJim Lin    <vscale x 2 x i32> %2,
760b5151330SJim Lin    iXLen %3)
761b5151330SJim Lin
762b5151330SJim Lin  ret <vscale x 2 x i32> %a
763b5151330SJim Lin}
764b5151330SJim Lin
765b5151330SJim Lindeclare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
766b5151330SJim Lin  <vscale x 2 x i32>,
767b5151330SJim Lin  <vscale x 16 x i32>,
768b5151330SJim Lin  <vscale x 2 x i32>,
769b5151330SJim Lin  <vscale x 16 x i1>,
770b5151330SJim Lin  iXLen);
771b5151330SJim Lin
772b5151330SJim Lindefine <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
773b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32:
774b5151330SJim Lin; CHECK:       # %bb.0: # %entry
775*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
776b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
777b5151330SJim Lin; CHECK-NEXT:    ret
778b5151330SJim Linentry:
779b5151330SJim Lin  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
780b5151330SJim Lin    <vscale x 2 x i32> %0,
781b5151330SJim Lin    <vscale x 16 x i32> %1,
782b5151330SJim Lin    <vscale x 2 x i32> %2,
783b5151330SJim Lin    <vscale x 16 x i1> %3,
784b5151330SJim Lin    iXLen %4)
785b5151330SJim Lin
786b5151330SJim Lin  ret <vscale x 2 x i32> %a
787b5151330SJim Lin}
788b5151330SJim Lin
789b5151330SJim Lindeclare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
790b5151330SJim Lin  <vscale x 1 x i64>,
791b5151330SJim Lin  <vscale x 1 x i64>,
792b5151330SJim Lin  <vscale x 1 x i64>,
793b5151330SJim Lin  iXLen);
794b5151330SJim Lin
795b5151330SJim Lindefine <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
796b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64:
797b5151330SJim Lin; CHECK:       # %bb.0: # %entry
798*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
799b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10
800b5151330SJim Lin; CHECK-NEXT:    ret
801b5151330SJim Linentry:
802b5151330SJim Lin  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
803b5151330SJim Lin    <vscale x 1 x i64> %0,
804b5151330SJim Lin    <vscale x 1 x i64> %1,
805b5151330SJim Lin    <vscale x 1 x i64> %2,
806b5151330SJim Lin    iXLen %3)
807b5151330SJim Lin
808b5151330SJim Lin  ret <vscale x 1 x i64> %a
809b5151330SJim Lin}
810b5151330SJim Lin
811b5151330SJim Lindeclare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
812b5151330SJim Lin  <vscale x 1 x i64>,
813b5151330SJim Lin  <vscale x 1 x i64>,
814b5151330SJim Lin  <vscale x 1 x i64>,
815b5151330SJim Lin  <vscale x 1 x i1>,
816b5151330SJim Lin  iXLen);
817b5151330SJim Lin
818b5151330SJim Lindefine <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
819b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64:
820b5151330SJim Lin; CHECK:       # %bb.0: # %entry
821*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
822b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
823b5151330SJim Lin; CHECK-NEXT:    ret
824b5151330SJim Linentry:
825b5151330SJim Lin  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
826b5151330SJim Lin    <vscale x 1 x i64> %0,
827b5151330SJim Lin    <vscale x 1 x i64> %1,
828b5151330SJim Lin    <vscale x 1 x i64> %2,
829b5151330SJim Lin    <vscale x 1 x i1> %3,
830b5151330SJim Lin    iXLen %4)
831b5151330SJim Lin
832b5151330SJim Lin  ret <vscale x 1 x i64> %a
833b5151330SJim Lin}
834b5151330SJim Lin
835b5151330SJim Lindeclare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
836b5151330SJim Lin  <vscale x 1 x i64>,
837b5151330SJim Lin  <vscale x 2 x i64>,
838b5151330SJim Lin  <vscale x 1 x i64>,
839b5151330SJim Lin  iXLen);
840b5151330SJim Lin
841b5151330SJim Lindefine <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
842b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64:
843b5151330SJim Lin; CHECK:       # %bb.0: # %entry
844*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
845b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v10, v9
846b5151330SJim Lin; CHECK-NEXT:    ret
847b5151330SJim Linentry:
848b5151330SJim Lin  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
849b5151330SJim Lin    <vscale x 1 x i64> %0,
850b5151330SJim Lin    <vscale x 2 x i64> %1,
851b5151330SJim Lin    <vscale x 1 x i64> %2,
852b5151330SJim Lin    iXLen %3)
853b5151330SJim Lin
854b5151330SJim Lin  ret <vscale x 1 x i64> %a
855b5151330SJim Lin}
856b5151330SJim Lin
857b5151330SJim Lindeclare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
858b5151330SJim Lin  <vscale x 1 x i64>,
859b5151330SJim Lin  <vscale x 2 x i64>,
860b5151330SJim Lin  <vscale x 1 x i64>,
861b5151330SJim Lin  <vscale x 2 x i1>,
862b5151330SJim Lin  iXLen);
863b5151330SJim Lin
864b5151330SJim Lindefine <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
865b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64:
866b5151330SJim Lin; CHECK:       # %bb.0: # %entry
867*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
868b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
869b5151330SJim Lin; CHECK-NEXT:    ret
870b5151330SJim Linentry:
871b5151330SJim Lin  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
872b5151330SJim Lin    <vscale x 1 x i64> %0,
873b5151330SJim Lin    <vscale x 2 x i64> %1,
874b5151330SJim Lin    <vscale x 1 x i64> %2,
875b5151330SJim Lin    <vscale x 2 x i1> %3,
876b5151330SJim Lin    iXLen %4)
877b5151330SJim Lin
878b5151330SJim Lin  ret <vscale x 1 x i64> %a
879b5151330SJim Lin}
880b5151330SJim Lin
881b5151330SJim Lindeclare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
882b5151330SJim Lin  <vscale x 1 x i64>,
883b5151330SJim Lin  <vscale x 4 x i64>,
884b5151330SJim Lin  <vscale x 1 x i64>,
885b5151330SJim Lin  iXLen);
886b5151330SJim Lin
887b5151330SJim Lindefine <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
888b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64:
889b5151330SJim Lin; CHECK:       # %bb.0: # %entry
890*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
891b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v12, v9
892b5151330SJim Lin; CHECK-NEXT:    ret
893b5151330SJim Linentry:
894b5151330SJim Lin  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
895b5151330SJim Lin    <vscale x 1 x i64> %0,
896b5151330SJim Lin    <vscale x 4 x i64> %1,
897b5151330SJim Lin    <vscale x 1 x i64> %2,
898b5151330SJim Lin    iXLen %3)
899b5151330SJim Lin
900b5151330SJim Lin  ret <vscale x 1 x i64> %a
901b5151330SJim Lin}
902b5151330SJim Lin
903b5151330SJim Lindeclare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
904b5151330SJim Lin  <vscale x 1 x i64>,
905b5151330SJim Lin  <vscale x 4 x i64>,
906b5151330SJim Lin  <vscale x 1 x i64>,
907b5151330SJim Lin  <vscale x 4 x i1>,
908b5151330SJim Lin  iXLen);
909b5151330SJim Lin
910b5151330SJim Lindefine <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
911b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64:
912b5151330SJim Lin; CHECK:       # %bb.0: # %entry
913*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
914b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
915b5151330SJim Lin; CHECK-NEXT:    ret
916b5151330SJim Linentry:
917b5151330SJim Lin  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
918b5151330SJim Lin    <vscale x 1 x i64> %0,
919b5151330SJim Lin    <vscale x 4 x i64> %1,
920b5151330SJim Lin    <vscale x 1 x i64> %2,
921b5151330SJim Lin    <vscale x 4 x i1> %3,
922b5151330SJim Lin    iXLen %4)
923b5151330SJim Lin
924b5151330SJim Lin  ret <vscale x 1 x i64> %a
925b5151330SJim Lin}
926b5151330SJim Lin
927b5151330SJim Lindeclare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
928b5151330SJim Lin  <vscale x 1 x i64>,
929b5151330SJim Lin  <vscale x 8 x i64>,
930b5151330SJim Lin  <vscale x 1 x i64>,
931b5151330SJim Lin  iXLen);
932b5151330SJim Lin
933b5151330SJim Lindefine <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
934b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64:
935b5151330SJim Lin; CHECK:       # %bb.0: # %entry
936*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
937b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v16, v9
938b5151330SJim Lin; CHECK-NEXT:    ret
939b5151330SJim Linentry:
940b5151330SJim Lin  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
941b5151330SJim Lin    <vscale x 1 x i64> %0,
942b5151330SJim Lin    <vscale x 8 x i64> %1,
943b5151330SJim Lin    <vscale x 1 x i64> %2,
944b5151330SJim Lin    iXLen %3)
945b5151330SJim Lin
946b5151330SJim Lin  ret <vscale x 1 x i64> %a
947b5151330SJim Lin}
948b5151330SJim Lin
949b5151330SJim Lindeclare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
950b5151330SJim Lin  <vscale x 1 x i64>,
951b5151330SJim Lin  <vscale x 8 x i64>,
952b5151330SJim Lin  <vscale x 1 x i64>,
953b5151330SJim Lin  <vscale x 8 x i1>,
954b5151330SJim Lin  iXLen);
955b5151330SJim Lin
956b5151330SJim Lindefine <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
957b5151330SJim Lin; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64:
958b5151330SJim Lin; CHECK:       # %bb.0: # %entry
959*99a0cd6fSCraig Topper; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
960b5151330SJim Lin; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
961b5151330SJim Lin; CHECK-NEXT:    ret
962b5151330SJim Linentry:
963b5151330SJim Lin  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
964b5151330SJim Lin    <vscale x 1 x i64> %0,
965b5151330SJim Lin    <vscale x 8 x i64> %1,
966b5151330SJim Lin    <vscale x 1 x i64> %2,
967b5151330SJim Lin    <vscale x 8 x i1> %3,
968b5151330SJim Lin    iXLen %4)
969b5151330SJim Lin
970b5151330SJim Lin  ret <vscale x 1 x i64> %a
971b5151330SJim Lin}
972