xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll (revision b6c0f1bfa79a3a32d841ac5ab1f94c3aee3b5d90)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
4
5declare i1 @llvm.vp.reduce.and.v1i1(i1, <1 x i1>, <1 x i1>, i32)
6
7define zeroext i1 @vpreduce_and_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
8; CHECK-LABEL: vpreduce_and_v1i1:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
11; CHECK-NEXT:    vmnot.m v9, v0
12; CHECK-NEXT:    vmv1r.v v0, v8
13; CHECK-NEXT:    vcpop.m a1, v9, v0.t
14; CHECK-NEXT:    seqz a1, a1
15; CHECK-NEXT:    and a0, a1, a0
16; CHECK-NEXT:    ret
17  %r = call i1 @llvm.vp.reduce.and.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
18  ret i1 %r
19}
20
21declare i1 @llvm.vp.reduce.or.v1i1(i1, <1 x i1>, <1 x i1>, i32)
22
23define zeroext i1 @vpreduce_or_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
24; CHECK-LABEL: vpreduce_or_v1i1:
25; CHECK:       # %bb.0:
26; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
27; CHECK-NEXT:    vmv1r.v v9, v0
28; CHECK-NEXT:    vmv1r.v v0, v8
29; CHECK-NEXT:    vcpop.m a1, v9, v0.t
30; CHECK-NEXT:    snez a1, a1
31; CHECK-NEXT:    or a0, a1, a0
32; CHECK-NEXT:    ret
33  %r = call i1 @llvm.vp.reduce.or.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
34  ret i1 %r
35}
36
37declare i1 @llvm.vp.reduce.xor.v1i1(i1, <1 x i1>, <1 x i1>, i32)
38
39define zeroext i1 @vpreduce_xor_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
40; CHECK-LABEL: vpreduce_xor_v1i1:
41; CHECK:       # %bb.0:
42; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
43; CHECK-NEXT:    vmv1r.v v9, v0
44; CHECK-NEXT:    vmv1r.v v0, v8
45; CHECK-NEXT:    vcpop.m a1, v9, v0.t
46; CHECK-NEXT:    andi a1, a1, 1
47; CHECK-NEXT:    xor a0, a1, a0
48; CHECK-NEXT:    ret
49  %r = call i1 @llvm.vp.reduce.xor.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
50  ret i1 %r
51}
52
53declare i1 @llvm.vp.reduce.and.v2i1(i1, <2 x i1>, <2 x i1>, i32)
54
55define zeroext i1 @vpreduce_and_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
56; CHECK-LABEL: vpreduce_and_v2i1:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
59; CHECK-NEXT:    vmnot.m v9, v0
60; CHECK-NEXT:    vmv1r.v v0, v8
61; CHECK-NEXT:    vcpop.m a1, v9, v0.t
62; CHECK-NEXT:    seqz a1, a1
63; CHECK-NEXT:    and a0, a1, a0
64; CHECK-NEXT:    ret
65  %r = call i1 @llvm.vp.reduce.and.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
66  ret i1 %r
67}
68
69declare i1 @llvm.vp.reduce.or.v2i1(i1, <2 x i1>, <2 x i1>, i32)
70
71define zeroext i1 @vpreduce_or_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
72; CHECK-LABEL: vpreduce_or_v2i1:
73; CHECK:       # %bb.0:
74; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
75; CHECK-NEXT:    vmv1r.v v9, v0
76; CHECK-NEXT:    vmv1r.v v0, v8
77; CHECK-NEXT:    vcpop.m a1, v9, v0.t
78; CHECK-NEXT:    snez a1, a1
79; CHECK-NEXT:    or a0, a1, a0
80; CHECK-NEXT:    ret
81  %r = call i1 @llvm.vp.reduce.or.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
82  ret i1 %r
83}
84
85declare i1 @llvm.vp.reduce.xor.v2i1(i1, <2 x i1>, <2 x i1>, i32)
86
87define zeroext i1 @vpreduce_xor_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
88; CHECK-LABEL: vpreduce_xor_v2i1:
89; CHECK:       # %bb.0:
90; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
91; CHECK-NEXT:    vmv1r.v v9, v0
92; CHECK-NEXT:    vmv1r.v v0, v8
93; CHECK-NEXT:    vcpop.m a1, v9, v0.t
94; CHECK-NEXT:    andi a1, a1, 1
95; CHECK-NEXT:    xor a0, a1, a0
96; CHECK-NEXT:    ret
97  %r = call i1 @llvm.vp.reduce.xor.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
98  ret i1 %r
99}
100
101declare i1 @llvm.vp.reduce.and.v4i1(i1, <4 x i1>, <4 x i1>, i32)
102
103define zeroext i1 @vpreduce_and_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
104; CHECK-LABEL: vpreduce_and_v4i1:
105; CHECK:       # %bb.0:
106; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
107; CHECK-NEXT:    vmnot.m v9, v0
108; CHECK-NEXT:    vmv1r.v v0, v8
109; CHECK-NEXT:    vcpop.m a1, v9, v0.t
110; CHECK-NEXT:    seqz a1, a1
111; CHECK-NEXT:    and a0, a1, a0
112; CHECK-NEXT:    ret
113  %r = call i1 @llvm.vp.reduce.and.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
114  ret i1 %r
115}
116
117declare i1 @llvm.vp.reduce.or.v4i1(i1, <4 x i1>, <4 x i1>, i32)
118
119define zeroext i1 @vpreduce_or_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
120; CHECK-LABEL: vpreduce_or_v4i1:
121; CHECK:       # %bb.0:
122; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
123; CHECK-NEXT:    vmv1r.v v9, v0
124; CHECK-NEXT:    vmv1r.v v0, v8
125; CHECK-NEXT:    vcpop.m a1, v9, v0.t
126; CHECK-NEXT:    snez a1, a1
127; CHECK-NEXT:    or a0, a1, a0
128; CHECK-NEXT:    ret
129  %r = call i1 @llvm.vp.reduce.or.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
130  ret i1 %r
131}
132
133declare i1 @llvm.vp.reduce.xor.v4i1(i1, <4 x i1>, <4 x i1>, i32)
134
135define zeroext i1 @vpreduce_xor_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
136; CHECK-LABEL: vpreduce_xor_v4i1:
137; CHECK:       # %bb.0:
138; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
139; CHECK-NEXT:    vmv1r.v v9, v0
140; CHECK-NEXT:    vmv1r.v v0, v8
141; CHECK-NEXT:    vcpop.m a1, v9, v0.t
142; CHECK-NEXT:    andi a1, a1, 1
143; CHECK-NEXT:    xor a0, a1, a0
144; CHECK-NEXT:    ret
145  %r = call i1 @llvm.vp.reduce.xor.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
146  ret i1 %r
147}
148
149declare i1 @llvm.vp.reduce.and.v8i1(i1, <8 x i1>, <8 x i1>, i32)
150
151define zeroext i1 @vpreduce_and_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
152; CHECK-LABEL: vpreduce_and_v8i1:
153; CHECK:       # %bb.0:
154; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
155; CHECK-NEXT:    vmnot.m v9, v0
156; CHECK-NEXT:    vmv1r.v v0, v8
157; CHECK-NEXT:    vcpop.m a1, v9, v0.t
158; CHECK-NEXT:    seqz a1, a1
159; CHECK-NEXT:    and a0, a1, a0
160; CHECK-NEXT:    ret
161  %r = call i1 @llvm.vp.reduce.and.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
162  ret i1 %r
163}
164
165declare i1 @llvm.vp.reduce.or.v8i1(i1, <8 x i1>, <8 x i1>, i32)
166
167define zeroext i1 @vpreduce_or_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
168; CHECK-LABEL: vpreduce_or_v8i1:
169; CHECK:       # %bb.0:
170; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
171; CHECK-NEXT:    vmv1r.v v9, v0
172; CHECK-NEXT:    vmv1r.v v0, v8
173; CHECK-NEXT:    vcpop.m a1, v9, v0.t
174; CHECK-NEXT:    snez a1, a1
175; CHECK-NEXT:    or a0, a1, a0
176; CHECK-NEXT:    ret
177  %r = call i1 @llvm.vp.reduce.or.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
178  ret i1 %r
179}
180
181declare i1 @llvm.vp.reduce.xor.v8i1(i1, <8 x i1>, <8 x i1>, i32)
182
183define zeroext i1 @vpreduce_xor_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
184; CHECK-LABEL: vpreduce_xor_v8i1:
185; CHECK:       # %bb.0:
186; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
187; CHECK-NEXT:    vmv1r.v v9, v0
188; CHECK-NEXT:    vmv1r.v v0, v8
189; CHECK-NEXT:    vcpop.m a1, v9, v0.t
190; CHECK-NEXT:    andi a1, a1, 1
191; CHECK-NEXT:    xor a0, a1, a0
192; CHECK-NEXT:    ret
193  %r = call i1 @llvm.vp.reduce.xor.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
194  ret i1 %r
195}
196
197declare i1 @llvm.vp.reduce.and.v10i1(i1, <10 x i1>, <10 x i1>, i32)
198
199define zeroext i1 @vpreduce_and_v10i1(i1 zeroext %s, <10 x i1> %v, <10 x i1> %m, i32 zeroext %evl) {
200; CHECK-LABEL: vpreduce_and_v10i1:
201; CHECK:       # %bb.0:
202; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
203; CHECK-NEXT:    vmnot.m v9, v0
204; CHECK-NEXT:    vmv1r.v v0, v8
205; CHECK-NEXT:    vcpop.m a1, v9, v0.t
206; CHECK-NEXT:    seqz a1, a1
207; CHECK-NEXT:    and a0, a1, a0
208; CHECK-NEXT:    ret
209  %r = call i1 @llvm.vp.reduce.and.v10i1(i1 %s, <10 x i1> %v, <10 x i1> %m, i32 %evl)
210  ret i1 %r
211}
212
213declare i1 @llvm.vp.reduce.and.v16i1(i1, <16 x i1>, <16 x i1>, i32)
214
215define zeroext i1 @vpreduce_and_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
216; CHECK-LABEL: vpreduce_and_v16i1:
217; CHECK:       # %bb.0:
218; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
219; CHECK-NEXT:    vmnot.m v9, v0
220; CHECK-NEXT:    vmv1r.v v0, v8
221; CHECK-NEXT:    vcpop.m a1, v9, v0.t
222; CHECK-NEXT:    seqz a1, a1
223; CHECK-NEXT:    and a0, a1, a0
224; CHECK-NEXT:    ret
225  %r = call i1 @llvm.vp.reduce.and.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
226  ret i1 %r
227}
228
229declare i1 @llvm.vp.reduce.and.v256i1(i1, <256 x i1>, <256 x i1>, i32)
230
231define zeroext i1 @vpreduce_and_v256i1(i1 zeroext %s, <256 x i1> %v, <256 x i1> %m, i32 zeroext %evl) {
232; CHECK-LABEL: vpreduce_and_v256i1:
233; CHECK:       # %bb.0:
234; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
235; CHECK-NEXT:    vmv1r.v v11, v9
236; CHECK-NEXT:    vmv1r.v v9, v0
237; CHECK-NEXT:    li a3, 128
238; CHECK-NEXT:    mv a2, a1
239; CHECK-NEXT:    bltu a1, a3, .LBB14_2
240; CHECK-NEXT:  # %bb.1:
241; CHECK-NEXT:    li a2, 128
242; CHECK-NEXT:  .LBB14_2:
243; CHECK-NEXT:    vmv1r.v v0, v11
244; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
245; CHECK-NEXT:    vmnot.m v9, v9
246; CHECK-NEXT:    vcpop.m a2, v9, v0.t
247; CHECK-NEXT:    seqz a2, a2
248; CHECK-NEXT:    and a0, a2, a0
249; CHECK-NEXT:    addi a2, a1, -128
250; CHECK-NEXT:    sltu a1, a1, a2
251; CHECK-NEXT:    addi a1, a1, -1
252; CHECK-NEXT:    and a1, a1, a2
253; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
254; CHECK-NEXT:    vmnot.m v8, v8
255; CHECK-NEXT:    vmv1r.v v0, v10
256; CHECK-NEXT:    vcpop.m a1, v8, v0.t
257; CHECK-NEXT:    seqz a1, a1
258; CHECK-NEXT:    and a0, a1, a0
259; CHECK-NEXT:    ret
260  %r = call i1 @llvm.vp.reduce.and.v256i1(i1 %s, <256 x i1> %v, <256 x i1> %m, i32 %evl)
261  ret i1 %r
262}
263
264declare i1 @llvm.vp.reduce.or.v16i1(i1, <16 x i1>, <16 x i1>, i32)
265
266define zeroext i1 @vpreduce_or_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
267; CHECK-LABEL: vpreduce_or_v16i1:
268; CHECK:       # %bb.0:
269; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
270; CHECK-NEXT:    vmv1r.v v9, v0
271; CHECK-NEXT:    vmv1r.v v0, v8
272; CHECK-NEXT:    vcpop.m a1, v9, v0.t
273; CHECK-NEXT:    snez a1, a1
274; CHECK-NEXT:    or a0, a1, a0
275; CHECK-NEXT:    ret
276  %r = call i1 @llvm.vp.reduce.or.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
277  ret i1 %r
278}
279
280declare i1 @llvm.vp.reduce.xor.v16i1(i1, <16 x i1>, <16 x i1>, i32)
281
282define zeroext i1 @vpreduce_xor_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
283; CHECK-LABEL: vpreduce_xor_v16i1:
284; CHECK:       # %bb.0:
285; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
286; CHECK-NEXT:    vmv1r.v v9, v0
287; CHECK-NEXT:    vmv1r.v v0, v8
288; CHECK-NEXT:    vcpop.m a1, v9, v0.t
289; CHECK-NEXT:    andi a1, a1, 1
290; CHECK-NEXT:    xor a0, a1, a0
291; CHECK-NEXT:    ret
292  %r = call i1 @llvm.vp.reduce.xor.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
293  ret i1 %r
294}
295
296declare i1 @llvm.vp.reduce.add.v1i1(i1, <1 x i1>, <1 x i1>, i32)
297
298define zeroext i1 @vpreduce_add_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
299; CHECK-LABEL: vpreduce_add_v1i1:
300; CHECK:       # %bb.0:
301; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
302; CHECK-NEXT:    vmv1r.v v9, v0
303; CHECK-NEXT:    vmv1r.v v0, v8
304; CHECK-NEXT:    vcpop.m a1, v9, v0.t
305; CHECK-NEXT:    andi a1, a1, 1
306; CHECK-NEXT:    xor a0, a1, a0
307; CHECK-NEXT:    ret
308  %r = call i1 @llvm.vp.reduce.add.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
309  ret i1 %r
310}
311
312declare i1 @llvm.vp.reduce.add.v2i1(i1, <2 x i1>, <2 x i1>, i32)
313
314define zeroext i1 @vpreduce_add_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
315; CHECK-LABEL: vpreduce_add_v2i1:
316; CHECK:       # %bb.0:
317; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
318; CHECK-NEXT:    vmv1r.v v9, v0
319; CHECK-NEXT:    vmv1r.v v0, v8
320; CHECK-NEXT:    vcpop.m a1, v9, v0.t
321; CHECK-NEXT:    andi a1, a1, 1
322; CHECK-NEXT:    xor a0, a1, a0
323; CHECK-NEXT:    ret
324  %r = call i1 @llvm.vp.reduce.add.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
325  ret i1 %r
326}
327
328declare i1 @llvm.vp.reduce.add.v4i1(i1, <4 x i1>, <4 x i1>, i32)
329
330define zeroext i1 @vpreduce_add_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
331; CHECK-LABEL: vpreduce_add_v4i1:
332; CHECK:       # %bb.0:
333; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
334; CHECK-NEXT:    vmv1r.v v9, v0
335; CHECK-NEXT:    vmv1r.v v0, v8
336; CHECK-NEXT:    vcpop.m a1, v9, v0.t
337; CHECK-NEXT:    andi a1, a1, 1
338; CHECK-NEXT:    xor a0, a1, a0
339; CHECK-NEXT:    ret
340  %r = call i1 @llvm.vp.reduce.add.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
341  ret i1 %r
342}
343
344declare i1 @llvm.vp.reduce.add.v8i1(i1, <8 x i1>, <8 x i1>, i32)
345
346define zeroext i1 @vpreduce_add_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
347; CHECK-LABEL: vpreduce_add_v8i1:
348; CHECK:       # %bb.0:
349; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
350; CHECK-NEXT:    vmv1r.v v9, v0
351; CHECK-NEXT:    vmv1r.v v0, v8
352; CHECK-NEXT:    vcpop.m a1, v9, v0.t
353; CHECK-NEXT:    andi a1, a1, 1
354; CHECK-NEXT:    xor a0, a1, a0
355; CHECK-NEXT:    ret
356  %r = call i1 @llvm.vp.reduce.add.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
357  ret i1 %r
358}
359
360declare i1 @llvm.vp.reduce.add.v16i1(i1, <16 x i1>, <16 x i1>, i32)
361
362define zeroext i1 @vpreduce_add_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
363; CHECK-LABEL: vpreduce_add_v16i1:
364; CHECK:       # %bb.0:
365; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
366; CHECK-NEXT:    vmv1r.v v9, v0
367; CHECK-NEXT:    vmv1r.v v0, v8
368; CHECK-NEXT:    vcpop.m a1, v9, v0.t
369; CHECK-NEXT:    andi a1, a1, 1
370; CHECK-NEXT:    xor a0, a1, a0
371; CHECK-NEXT:    ret
372  %r = call i1 @llvm.vp.reduce.add.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
373  ret i1 %r
374}
375
376declare i1 @llvm.vp.reduce.smax.v1i1(i1, <1 x i1>, <1 x i1>, i32)
377
378define zeroext i1 @vpreduce_smax_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
379; CHECK-LABEL: vpreduce_smax_v1i1:
380; CHECK:       # %bb.0:
381; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
382; CHECK-NEXT:    vmnot.m v9, v0
383; CHECK-NEXT:    vmv1r.v v0, v8
384; CHECK-NEXT:    vcpop.m a1, v9, v0.t
385; CHECK-NEXT:    seqz a1, a1
386; CHECK-NEXT:    and a0, a1, a0
387; CHECK-NEXT:    ret
388  %r = call i1 @llvm.vp.reduce.smax.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
389  ret i1 %r
390}
391
392declare i1 @llvm.vp.reduce.smax.v2i1(i1, <2 x i1>, <2 x i1>, i32)
393
394define zeroext i1 @vpreduce_smax_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
395; CHECK-LABEL: vpreduce_smax_v2i1:
396; CHECK:       # %bb.0:
397; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
398; CHECK-NEXT:    vmnot.m v9, v0
399; CHECK-NEXT:    vmv1r.v v0, v8
400; CHECK-NEXT:    vcpop.m a1, v9, v0.t
401; CHECK-NEXT:    seqz a1, a1
402; CHECK-NEXT:    and a0, a1, a0
403; CHECK-NEXT:    ret
404  %r = call i1 @llvm.vp.reduce.smax.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
405  ret i1 %r
406}
407
408declare i1 @llvm.vp.reduce.smax.v4i1(i1, <4 x i1>, <4 x i1>, i32)
409
410define zeroext i1 @vpreduce_smax_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
411; CHECK-LABEL: vpreduce_smax_v4i1:
412; CHECK:       # %bb.0:
413; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
414; CHECK-NEXT:    vmnot.m v9, v0
415; CHECK-NEXT:    vmv1r.v v0, v8
416; CHECK-NEXT:    vcpop.m a1, v9, v0.t
417; CHECK-NEXT:    seqz a1, a1
418; CHECK-NEXT:    and a0, a1, a0
419; CHECK-NEXT:    ret
420  %r = call i1 @llvm.vp.reduce.smax.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
421  ret i1 %r
422}
423
424declare i1 @llvm.vp.reduce.smax.v8i1(i1, <8 x i1>, <8 x i1>, i32)
425
426define zeroext i1 @vpreduce_smax_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
427; CHECK-LABEL: vpreduce_smax_v8i1:
428; CHECK:       # %bb.0:
429; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
430; CHECK-NEXT:    vmnot.m v9, v0
431; CHECK-NEXT:    vmv1r.v v0, v8
432; CHECK-NEXT:    vcpop.m a1, v9, v0.t
433; CHECK-NEXT:    seqz a1, a1
434; CHECK-NEXT:    and a0, a1, a0
435; CHECK-NEXT:    ret
436  %r = call i1 @llvm.vp.reduce.smax.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
437  ret i1 %r
438}
439
440declare i1 @llvm.vp.reduce.smax.v16i1(i1, <16 x i1>, <16 x i1>, i32)
441
442define zeroext i1 @vpreduce_smax_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
443; CHECK-LABEL: vpreduce_smax_v16i1:
444; CHECK:       # %bb.0:
445; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
446; CHECK-NEXT:    vmnot.m v9, v0
447; CHECK-NEXT:    vmv1r.v v0, v8
448; CHECK-NEXT:    vcpop.m a1, v9, v0.t
449; CHECK-NEXT:    seqz a1, a1
450; CHECK-NEXT:    and a0, a1, a0
451; CHECK-NEXT:    ret
452  %r = call i1 @llvm.vp.reduce.smax.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
453  ret i1 %r
454}
455
456declare i1 @llvm.vp.reduce.smax.v32i1(i1, <32 x i1>, <32 x i1>, i32)
457
458define zeroext i1 @vpreduce_smax_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
459; CHECK-LABEL: vpreduce_smax_v32i1:
460; CHECK:       # %bb.0:
461; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
462; CHECK-NEXT:    vmnot.m v9, v0
463; CHECK-NEXT:    vmv1r.v v0, v8
464; CHECK-NEXT:    vcpop.m a1, v9, v0.t
465; CHECK-NEXT:    seqz a1, a1
466; CHECK-NEXT:    and a0, a1, a0
467; CHECK-NEXT:    ret
468  %r = call i1 @llvm.vp.reduce.smax.v32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl)
469  ret i1 %r
470}
471
472declare i1 @llvm.vp.reduce.smax.v64i1(i1, <64 x i1>, <64 x i1>, i32)
473
474define zeroext i1 @vpreduce_smax_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
475; CHECK-LABEL: vpreduce_smax_v64i1:
476; CHECK:       # %bb.0:
477; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
478; CHECK-NEXT:    vmnot.m v9, v0
479; CHECK-NEXT:    vmv1r.v v0, v8
480; CHECK-NEXT:    vcpop.m a1, v9, v0.t
481; CHECK-NEXT:    seqz a1, a1
482; CHECK-NEXT:    and a0, a1, a0
483; CHECK-NEXT:    ret
484  %r = call i1 @llvm.vp.reduce.smax.v64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl)
485  ret i1 %r
486}
487
488declare i1 @llvm.vp.reduce.smin.v1i1(i1, <1 x i1>, <1 x i1>, i32)
489
490define zeroext i1 @vpreduce_smin_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
491; CHECK-LABEL: vpreduce_smin_v1i1:
492; CHECK:       # %bb.0:
493; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
494; CHECK-NEXT:    vmv1r.v v9, v0
495; CHECK-NEXT:    vmv1r.v v0, v8
496; CHECK-NEXT:    vcpop.m a1, v9, v0.t
497; CHECK-NEXT:    snez a1, a1
498; CHECK-NEXT:    or a0, a1, a0
499; CHECK-NEXT:    ret
500  %r = call i1 @llvm.vp.reduce.smin.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
501  ret i1 %r
502}
503
504declare i1 @llvm.vp.reduce.smin.v2i1(i1, <2 x i1>, <2 x i1>, i32)
505
506define zeroext i1 @vpreduce_smin_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
507; CHECK-LABEL: vpreduce_smin_v2i1:
508; CHECK:       # %bb.0:
509; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
510; CHECK-NEXT:    vmv1r.v v9, v0
511; CHECK-NEXT:    vmv1r.v v0, v8
512; CHECK-NEXT:    vcpop.m a1, v9, v0.t
513; CHECK-NEXT:    snez a1, a1
514; CHECK-NEXT:    or a0, a1, a0
515; CHECK-NEXT:    ret
516  %r = call i1 @llvm.vp.reduce.smin.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
517  ret i1 %r
518}
519
520declare i1 @llvm.vp.reduce.smin.v4i1(i1, <4 x i1>, <4 x i1>, i32)
521
522define zeroext i1 @vpreduce_smin_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
523; CHECK-LABEL: vpreduce_smin_v4i1:
524; CHECK:       # %bb.0:
525; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
526; CHECK-NEXT:    vmv1r.v v9, v0
527; CHECK-NEXT:    vmv1r.v v0, v8
528; CHECK-NEXT:    vcpop.m a1, v9, v0.t
529; CHECK-NEXT:    snez a1, a1
530; CHECK-NEXT:    or a0, a1, a0
531; CHECK-NEXT:    ret
532  %r = call i1 @llvm.vp.reduce.smin.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
533  ret i1 %r
534}
535
536declare i1 @llvm.vp.reduce.smin.v8i1(i1, <8 x i1>, <8 x i1>, i32)
537
538define zeroext i1 @vpreduce_smin_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
539; CHECK-LABEL: vpreduce_smin_v8i1:
540; CHECK:       # %bb.0:
541; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
542; CHECK-NEXT:    vmv1r.v v9, v0
543; CHECK-NEXT:    vmv1r.v v0, v8
544; CHECK-NEXT:    vcpop.m a1, v9, v0.t
545; CHECK-NEXT:    snez a1, a1
546; CHECK-NEXT:    or a0, a1, a0
547; CHECK-NEXT:    ret
548  %r = call i1 @llvm.vp.reduce.smin.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
549  ret i1 %r
550}
551
552declare i1 @llvm.vp.reduce.smin.v16i1(i1, <16 x i1>, <16 x i1>, i32)
553
554define zeroext i1 @vpreduce_smin_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
555; CHECK-LABEL: vpreduce_smin_v16i1:
556; CHECK:       # %bb.0:
557; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
558; CHECK-NEXT:    vmv1r.v v9, v0
559; CHECK-NEXT:    vmv1r.v v0, v8
560; CHECK-NEXT:    vcpop.m a1, v9, v0.t
561; CHECK-NEXT:    snez a1, a1
562; CHECK-NEXT:    or a0, a1, a0
563; CHECK-NEXT:    ret
564  %r = call i1 @llvm.vp.reduce.smin.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
565  ret i1 %r
566}
567
568declare i1 @llvm.vp.reduce.smin.v32i1(i1, <32 x i1>, <32 x i1>, i32)
569
570define zeroext i1 @vpreduce_smin_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
571; CHECK-LABEL: vpreduce_smin_v32i1:
572; CHECK:       # %bb.0:
573; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
574; CHECK-NEXT:    vmv1r.v v9, v0
575; CHECK-NEXT:    vmv1r.v v0, v8
576; CHECK-NEXT:    vcpop.m a1, v9, v0.t
577; CHECK-NEXT:    snez a1, a1
578; CHECK-NEXT:    or a0, a1, a0
579; CHECK-NEXT:    ret
580  %r = call i1 @llvm.vp.reduce.smin.v32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl)
581  ret i1 %r
582}
583
584declare i1 @llvm.vp.reduce.smin.v64i1(i1, <64 x i1>, <64 x i1>, i32)
585
586define zeroext i1 @vpreduce_smin_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
587; CHECK-LABEL: vpreduce_smin_v64i1:
588; CHECK:       # %bb.0:
589; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
590; CHECK-NEXT:    vmv1r.v v9, v0
591; CHECK-NEXT:    vmv1r.v v0, v8
592; CHECK-NEXT:    vcpop.m a1, v9, v0.t
593; CHECK-NEXT:    snez a1, a1
594; CHECK-NEXT:    or a0, a1, a0
595; CHECK-NEXT:    ret
596  %r = call i1 @llvm.vp.reduce.smin.v64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl)
597  ret i1 %r
598}
599
600declare i1 @llvm.vp.reduce.umax.v1i1(i1, <1 x i1>, <1 x i1>, i32)
601
602define zeroext i1 @vpreduce_umax_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
603; CHECK-LABEL: vpreduce_umax_v1i1:
604; CHECK:       # %bb.0:
605; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
606; CHECK-NEXT:    vmv1r.v v9, v0
607; CHECK-NEXT:    vmv1r.v v0, v8
608; CHECK-NEXT:    vcpop.m a1, v9, v0.t
609; CHECK-NEXT:    snez a1, a1
610; CHECK-NEXT:    or a0, a1, a0
611; CHECK-NEXT:    ret
612  %r = call i1 @llvm.vp.reduce.umax.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
613  ret i1 %r
614}
615
616declare i1 @llvm.vp.reduce.umax.v2i1(i1, <2 x i1>, <2 x i1>, i32)
617
618define zeroext i1 @vpreduce_umax_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
619; CHECK-LABEL: vpreduce_umax_v2i1:
620; CHECK:       # %bb.0:
621; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
622; CHECK-NEXT:    vmv1r.v v9, v0
623; CHECK-NEXT:    vmv1r.v v0, v8
624; CHECK-NEXT:    vcpop.m a1, v9, v0.t
625; CHECK-NEXT:    snez a1, a1
626; CHECK-NEXT:    or a0, a1, a0
627; CHECK-NEXT:    ret
628  %r = call i1 @llvm.vp.reduce.umax.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
629  ret i1 %r
630}
631
632declare i1 @llvm.vp.reduce.umax.v4i1(i1, <4 x i1>, <4 x i1>, i32)
633
634define zeroext i1 @vpreduce_umax_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
635; CHECK-LABEL: vpreduce_umax_v4i1:
636; CHECK:       # %bb.0:
637; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
638; CHECK-NEXT:    vmv1r.v v9, v0
639; CHECK-NEXT:    vmv1r.v v0, v8
640; CHECK-NEXT:    vcpop.m a1, v9, v0.t
641; CHECK-NEXT:    snez a1, a1
642; CHECK-NEXT:    or a0, a1, a0
643; CHECK-NEXT:    ret
644  %r = call i1 @llvm.vp.reduce.umax.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
645  ret i1 %r
646}
647
648declare i1 @llvm.vp.reduce.umax.v8i1(i1, <8 x i1>, <8 x i1>, i32)
649
650define zeroext i1 @vpreduce_umax_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
651; CHECK-LABEL: vpreduce_umax_v8i1:
652; CHECK:       # %bb.0:
653; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
654; CHECK-NEXT:    vmv1r.v v9, v0
655; CHECK-NEXT:    vmv1r.v v0, v8
656; CHECK-NEXT:    vcpop.m a1, v9, v0.t
657; CHECK-NEXT:    snez a1, a1
658; CHECK-NEXT:    or a0, a1, a0
659; CHECK-NEXT:    ret
660  %r = call i1 @llvm.vp.reduce.umax.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
661  ret i1 %r
662}
663
664declare i1 @llvm.vp.reduce.umax.v16i1(i1, <16 x i1>, <16 x i1>, i32)
665
666define zeroext i1 @vpreduce_umax_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
667; CHECK-LABEL: vpreduce_umax_v16i1:
668; CHECK:       # %bb.0:
669; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
670; CHECK-NEXT:    vmv1r.v v9, v0
671; CHECK-NEXT:    vmv1r.v v0, v8
672; CHECK-NEXT:    vcpop.m a1, v9, v0.t
673; CHECK-NEXT:    snez a1, a1
674; CHECK-NEXT:    or a0, a1, a0
675; CHECK-NEXT:    ret
676  %r = call i1 @llvm.vp.reduce.umax.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
677  ret i1 %r
678}
679
680declare i1 @llvm.vp.reduce.umax.v32i1(i1, <32 x i1>, <32 x i1>, i32)
681
682define zeroext i1 @vpreduce_umax_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
683; CHECK-LABEL: vpreduce_umax_v32i1:
684; CHECK:       # %bb.0:
685; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
686; CHECK-NEXT:    vmv1r.v v9, v0
687; CHECK-NEXT:    vmv1r.v v0, v8
688; CHECK-NEXT:    vcpop.m a1, v9, v0.t
689; CHECK-NEXT:    snez a1, a1
690; CHECK-NEXT:    or a0, a1, a0
691; CHECK-NEXT:    ret
692  %r = call i1 @llvm.vp.reduce.umax.v32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl)
693  ret i1 %r
694}
695
696declare i1 @llvm.vp.reduce.umax.v64i1(i1, <64 x i1>, <64 x i1>, i32)
697
698define zeroext i1 @vpreduce_umax_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
699; CHECK-LABEL: vpreduce_umax_v64i1:
700; CHECK:       # %bb.0:
701; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
702; CHECK-NEXT:    vmv1r.v v9, v0
703; CHECK-NEXT:    vmv1r.v v0, v8
704; CHECK-NEXT:    vcpop.m a1, v9, v0.t
705; CHECK-NEXT:    snez a1, a1
706; CHECK-NEXT:    or a0, a1, a0
707; CHECK-NEXT:    ret
708  %r = call i1 @llvm.vp.reduce.umax.v64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl)
709  ret i1 %r
710}
711
712declare i1 @llvm.vp.reduce.umin.v1i1(i1, <1 x i1>, <1 x i1>, i32)
713
714define zeroext i1 @vpreduce_umin_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
715; CHECK-LABEL: vpreduce_umin_v1i1:
716; CHECK:       # %bb.0:
717; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
718; CHECK-NEXT:    vmnot.m v9, v0
719; CHECK-NEXT:    vmv1r.v v0, v8
720; CHECK-NEXT:    vcpop.m a1, v9, v0.t
721; CHECK-NEXT:    seqz a1, a1
722; CHECK-NEXT:    and a0, a1, a0
723; CHECK-NEXT:    ret
724  %r = call i1 @llvm.vp.reduce.umin.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
725  ret i1 %r
726}
727
728declare i1 @llvm.vp.reduce.umin.v2i1(i1, <2 x i1>, <2 x i1>, i32)
729
730define zeroext i1 @vpreduce_umin_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
731; CHECK-LABEL: vpreduce_umin_v2i1:
732; CHECK:       # %bb.0:
733; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
734; CHECK-NEXT:    vmnot.m v9, v0
735; CHECK-NEXT:    vmv1r.v v0, v8
736; CHECK-NEXT:    vcpop.m a1, v9, v0.t
737; CHECK-NEXT:    seqz a1, a1
738; CHECK-NEXT:    and a0, a1, a0
739; CHECK-NEXT:    ret
740  %r = call i1 @llvm.vp.reduce.umin.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
741  ret i1 %r
742}
743
744declare i1 @llvm.vp.reduce.umin.v4i1(i1, <4 x i1>, <4 x i1>, i32)
745
746define zeroext i1 @vpreduce_umin_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
747; CHECK-LABEL: vpreduce_umin_v4i1:
748; CHECK:       # %bb.0:
749; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
750; CHECK-NEXT:    vmnot.m v9, v0
751; CHECK-NEXT:    vmv1r.v v0, v8
752; CHECK-NEXT:    vcpop.m a1, v9, v0.t
753; CHECK-NEXT:    seqz a1, a1
754; CHECK-NEXT:    and a0, a1, a0
755; CHECK-NEXT:    ret
756  %r = call i1 @llvm.vp.reduce.umin.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
757  ret i1 %r
758}
759
760declare i1 @llvm.vp.reduce.umin.v8i1(i1, <8 x i1>, <8 x i1>, i32)
761
762define zeroext i1 @vpreduce_umin_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
763; CHECK-LABEL: vpreduce_umin_v8i1:
764; CHECK:       # %bb.0:
765; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
766; CHECK-NEXT:    vmnot.m v9, v0
767; CHECK-NEXT:    vmv1r.v v0, v8
768; CHECK-NEXT:    vcpop.m a1, v9, v0.t
769; CHECK-NEXT:    seqz a1, a1
770; CHECK-NEXT:    and a0, a1, a0
771; CHECK-NEXT:    ret
772  %r = call i1 @llvm.vp.reduce.umin.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
773  ret i1 %r
774}
775
776declare i1 @llvm.vp.reduce.umin.v16i1(i1, <16 x i1>, <16 x i1>, i32)
777
778define zeroext i1 @vpreduce_umin_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
779; CHECK-LABEL: vpreduce_umin_v16i1:
780; CHECK:       # %bb.0:
781; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
782; CHECK-NEXT:    vmnot.m v9, v0
783; CHECK-NEXT:    vmv1r.v v0, v8
784; CHECK-NEXT:    vcpop.m a1, v9, v0.t
785; CHECK-NEXT:    seqz a1, a1
786; CHECK-NEXT:    and a0, a1, a0
787; CHECK-NEXT:    ret
788  %r = call i1 @llvm.vp.reduce.umin.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
789  ret i1 %r
790}
791
792declare i1 @llvm.vp.reduce.umin.v32i1(i1, <32 x i1>, <32 x i1>, i32)
793
794define zeroext i1 @vpreduce_umin_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
795; CHECK-LABEL: vpreduce_umin_v32i1:
796; CHECK:       # %bb.0:
797; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
798; CHECK-NEXT:    vmnot.m v9, v0
799; CHECK-NEXT:    vmv1r.v v0, v8
800; CHECK-NEXT:    vcpop.m a1, v9, v0.t
801; CHECK-NEXT:    seqz a1, a1
802; CHECK-NEXT:    and a0, a1, a0
803; CHECK-NEXT:    ret
804  %r = call i1 @llvm.vp.reduce.umin.v32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl)
805  ret i1 %r
806}
807
808declare i1 @llvm.vp.reduce.umin.v64i1(i1, <64 x i1>, <64 x i1>, i32)
809
810define zeroext i1 @vpreduce_umin_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
811; CHECK-LABEL: vpreduce_umin_v64i1:
812; CHECK:       # %bb.0:
813; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
814; CHECK-NEXT:    vmnot.m v9, v0
815; CHECK-NEXT:    vmv1r.v v0, v8
816; CHECK-NEXT:    vcpop.m a1, v9, v0.t
817; CHECK-NEXT:    seqz a1, a1
818; CHECK-NEXT:    and a0, a1, a0
819; CHECK-NEXT:    ret
820  %r = call i1 @llvm.vp.reduce.umin.v64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl)
821  ret i1 %r
822}
823
824declare i1 @llvm.vp.reduce.mul.v1i1(i1, <1 x i1>, <1 x i1>, i32)
825
826define i1 @vpreduce_mul_v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
827; CHECK-LABEL: vpreduce_mul_v1i1:
828; CHECK:       # %bb.0:
829; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
830; CHECK-NEXT:    vmnot.m v9, v0
831; CHECK-NEXT:    vmv1r.v v0, v8
832; CHECK-NEXT:    vcpop.m a1, v9, v0.t
833; CHECK-NEXT:    seqz a1, a1
834; CHECK-NEXT:    and a0, a1, a0
835; CHECK-NEXT:    ret
836  %r = call i1 @llvm.vp.reduce.mul.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
837  ret i1 %r
838}
839
840declare i1 @llvm.vp.reduce.mul.v2i1(i1, <2 x i1>, <2 x i1>, i32)
841
842define zeroext i1 @vpreduce_mul_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
843; CHECK-LABEL: vpreduce_mul_v2i1:
844; CHECK:       # %bb.0:
845; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
846; CHECK-NEXT:    vmnot.m v9, v0
847; CHECK-NEXT:    vmv1r.v v0, v8
848; CHECK-NEXT:    vcpop.m a1, v9, v0.t
849; CHECK-NEXT:    seqz a1, a1
850; CHECK-NEXT:    and a0, a1, a0
851; CHECK-NEXT:    ret
852  %r = call i1 @llvm.vp.reduce.mul.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
853  ret i1 %r
854}
855
856declare i1 @llvm.vp.reduce.mul.v4i1(i1, <4 x i1>, <4 x i1>, i32)
857
858define zeroext i1 @vpreduce_mul_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
859; CHECK-LABEL: vpreduce_mul_v4i1:
860; CHECK:       # %bb.0:
861; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
862; CHECK-NEXT:    vmnot.m v9, v0
863; CHECK-NEXT:    vmv1r.v v0, v8
864; CHECK-NEXT:    vcpop.m a1, v9, v0.t
865; CHECK-NEXT:    seqz a1, a1
866; CHECK-NEXT:    and a0, a1, a0
867; CHECK-NEXT:    ret
868  %r = call i1 @llvm.vp.reduce.mul.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
869  ret i1 %r
870}
871
872declare i1 @llvm.vp.reduce.mul.v8i1(i1, <8 x i1>, <8 x i1>, i32)
873
874define zeroext i1 @vpreduce_mul_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
875; CHECK-LABEL: vpreduce_mul_v8i1:
876; CHECK:       # %bb.0:
877; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
878; CHECK-NEXT:    vmnot.m v9, v0
879; CHECK-NEXT:    vmv1r.v v0, v8
880; CHECK-NEXT:    vcpop.m a1, v9, v0.t
881; CHECK-NEXT:    seqz a1, a1
882; CHECK-NEXT:    and a0, a1, a0
883; CHECK-NEXT:    ret
884  %r = call i1 @llvm.vp.reduce.mul.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
885  ret i1 %r
886}
887
888declare i1 @llvm.vp.reduce.mul.v16i1(i1, <16 x i1>, <16 x i1>, i32)
889
890define zeroext i1 @vpreduce_mul_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
891; CHECK-LABEL: vpreduce_mul_v16i1:
892; CHECK:       # %bb.0:
893; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
894; CHECK-NEXT:    vmnot.m v9, v0
895; CHECK-NEXT:    vmv1r.v v0, v8
896; CHECK-NEXT:    vcpop.m a1, v9, v0.t
897; CHECK-NEXT:    seqz a1, a1
898; CHECK-NEXT:    and a0, a1, a0
899; CHECK-NEXT:    ret
900  %r = call i1 @llvm.vp.reduce.mul.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
901  ret i1 %r
902}
903
904declare i1 @llvm.vp.reduce.mul.v32i1(i1, <32 x i1>, <32 x i1>, i32)
905
906define zeroext i1 @vpreduce_mul_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
907; CHECK-LABEL: vpreduce_mul_v32i1:
908; CHECK:       # %bb.0:
909; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
910; CHECK-NEXT:    vmnot.m v9, v0
911; CHECK-NEXT:    vmv1r.v v0, v8
912; CHECK-NEXT:    vcpop.m a1, v9, v0.t
913; CHECK-NEXT:    seqz a1, a1
914; CHECK-NEXT:    and a0, a1, a0
915; CHECK-NEXT:    ret
916  %r = call i1 @llvm.vp.reduce.mul.v32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl)
917  ret i1 %r
918}
919
920declare i1 @llvm.vp.reduce.mul.v64i1(i1, <64 x i1>, <64 x i1>, i32)
921
922define zeroext i1 @vpreduce_mul_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
923; CHECK-LABEL: vpreduce_mul_v64i1:
924; CHECK:       # %bb.0:
925; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
926; CHECK-NEXT:    vmnot.m v9, v0
927; CHECK-NEXT:    vmv1r.v v0, v8
928; CHECK-NEXT:    vcpop.m a1, v9, v0.t
929; CHECK-NEXT:    seqz a1, a1
930; CHECK-NEXT:    and a0, a1, a0
931; CHECK-NEXT:    ret
932  %r = call i1 @llvm.vp.reduce.mul.v64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl)
933  ret i1 %r
934}
935