xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-int-reduce.ll (revision c2bd5c25b3634e55089d34afe922aa38eee743e2)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
3
4; ANDV
5
6define i8 @andv_nxv16i8(<vscale x 16 x i8> %a) {
7; CHECK-LABEL: andv_nxv16i8:
8; CHECK:       // %bb.0:
9; CHECK-NEXT:    ptrue p0.b
10; CHECK-NEXT:    andv b0, p0, z0.b
11; CHECK-NEXT:    fmov w0, s0
12; CHECK-NEXT:    ret
13  %res = call i8 @llvm.vector.reduce.and.nxv16i8(<vscale x 16 x i8> %a)
14  ret i8 %res
15}
16
17define i16 @andv_nxv8i16(<vscale x 8 x i16> %a) {
18; CHECK-LABEL: andv_nxv8i16:
19; CHECK:       // %bb.0:
20; CHECK-NEXT:    ptrue p0.h
21; CHECK-NEXT:    andv h0, p0, z0.h
22; CHECK-NEXT:    fmov w0, s0
23; CHECK-NEXT:    ret
24  %res = call i16 @llvm.vector.reduce.and.nxv8i16(<vscale x 8 x i16> %a)
25  ret i16 %res
26}
27
28define i32 @andv_nxv4i32(<vscale x 4 x i32> %a) {
29; CHECK-LABEL: andv_nxv4i32:
30; CHECK:       // %bb.0:
31; CHECK-NEXT:    ptrue p0.s
32; CHECK-NEXT:    andv s0, p0, z0.s
33; CHECK-NEXT:    fmov w0, s0
34; CHECK-NEXT:    ret
35  %res = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> %a)
36  ret i32 %res
37}
38
39define i64 @andv_nxv2i64(<vscale x 2 x i64> %a) {
40; CHECK-LABEL: andv_nxv2i64:
41; CHECK:       // %bb.0:
42; CHECK-NEXT:    ptrue p0.d
43; CHECK-NEXT:    andv d0, p0, z0.d
44; CHECK-NEXT:    fmov x0, d0
45; CHECK-NEXT:    ret
46  %res = call i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64> %a)
47  ret i64 %res
48}
49
50; ORV
51
52define i8 @orv_nxv16i8(<vscale x 16 x i8> %a) {
53; CHECK-LABEL: orv_nxv16i8:
54; CHECK:       // %bb.0:
55; CHECK-NEXT:    ptrue p0.b
56; CHECK-NEXT:    orv b0, p0, z0.b
57; CHECK-NEXT:    fmov w0, s0
58; CHECK-NEXT:    ret
59  %res = call i8 @llvm.vector.reduce.or.nxv16i8(<vscale x 16 x i8> %a)
60  ret i8 %res
61}
62
63define i16 @orv_nxv8i16(<vscale x 8 x i16> %a) {
64; CHECK-LABEL: orv_nxv8i16:
65; CHECK:       // %bb.0:
66; CHECK-NEXT:    ptrue p0.h
67; CHECK-NEXT:    orv h0, p0, z0.h
68; CHECK-NEXT:    fmov w0, s0
69; CHECK-NEXT:    ret
70  %res = call i16 @llvm.vector.reduce.or.nxv8i16(<vscale x 8 x i16> %a)
71  ret i16 %res
72}
73
74define i32 @orv_nxv4i32(<vscale x 4 x i32> %a) {
75; CHECK-LABEL: orv_nxv4i32:
76; CHECK:       // %bb.0:
77; CHECK-NEXT:    ptrue p0.s
78; CHECK-NEXT:    orv s0, p0, z0.s
79; CHECK-NEXT:    fmov w0, s0
80; CHECK-NEXT:    ret
81  %res = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> %a)
82  ret i32 %res
83}
84
85define i64 @orv_nxv2i64(<vscale x 2 x i64> %a) {
86; CHECK-LABEL: orv_nxv2i64:
87; CHECK:       // %bb.0:
88; CHECK-NEXT:    ptrue p0.d
89; CHECK-NEXT:    orv d0, p0, z0.d
90; CHECK-NEXT:    fmov x0, d0
91; CHECK-NEXT:    ret
92  %res = call i64 @llvm.vector.reduce.or.nxv2i64(<vscale x 2 x i64> %a)
93  ret i64 %res
94}
95
96; XORV
97
98define i8 @xorv_nxv16i8(<vscale x 16 x i8> %a) {
99; CHECK-LABEL: xorv_nxv16i8:
100; CHECK:       // %bb.0:
101; CHECK-NEXT:    ptrue p0.b
102; CHECK-NEXT:    eorv b0, p0, z0.b
103; CHECK-NEXT:    fmov w0, s0
104; CHECK-NEXT:    ret
105  %res = call i8 @llvm.vector.reduce.xor.nxv16i8(<vscale x 16 x i8> %a)
106  ret i8 %res
107}
108
109define i16 @xorv_nxv8i16(<vscale x 8 x i16> %a) {
110; CHECK-LABEL: xorv_nxv8i16:
111; CHECK:       // %bb.0:
112; CHECK-NEXT:    ptrue p0.h
113; CHECK-NEXT:    eorv h0, p0, z0.h
114; CHECK-NEXT:    fmov w0, s0
115; CHECK-NEXT:    ret
116  %res = call i16 @llvm.vector.reduce.xor.nxv8i16(<vscale x 8 x i16> %a)
117  ret i16 %res
118}
119
120define i32 @xorv_nxv4i32(<vscale x 4 x i32> %a) {
121; CHECK-LABEL: xorv_nxv4i32:
122; CHECK:       // %bb.0:
123; CHECK-NEXT:    ptrue p0.s
124; CHECK-NEXT:    eorv s0, p0, z0.s
125; CHECK-NEXT:    fmov w0, s0
126; CHECK-NEXT:    ret
127  %res = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> %a)
128  ret i32 %res
129}
130
131define i64 @xorv_nxv2i64(<vscale x 2 x i64> %a) {
132; CHECK-LABEL: xorv_nxv2i64:
133; CHECK:       // %bb.0:
134; CHECK-NEXT:    ptrue p0.d
135; CHECK-NEXT:    eorv d0, p0, z0.d
136; CHECK-NEXT:    fmov x0, d0
137; CHECK-NEXT:    ret
138  %res = call i64 @llvm.vector.reduce.xor.nxv2i64(<vscale x 2 x i64> %a)
139  ret i64 %res
140}
141
142; UADDV
143
144define i8 @uaddv_nxv16i8(<vscale x 16 x i8> %a) {
145; CHECK-LABEL: uaddv_nxv16i8:
146; CHECK:       // %bb.0:
147; CHECK-NEXT:    ptrue p0.b
148; CHECK-NEXT:    uaddv d0, p0, z0.b
149; CHECK-NEXT:    fmov w0, s0
150; CHECK-NEXT:    ret
151  %res = call i8 @llvm.vector.reduce.add.nxv16i8(<vscale x 16 x i8> %a)
152  ret i8 %res
153}
154
155define i16 @uaddv_nxv8i16(<vscale x 8 x i16> %a) {
156; CHECK-LABEL: uaddv_nxv8i16:
157; CHECK:       // %bb.0:
158; CHECK-NEXT:    ptrue p0.h
159; CHECK-NEXT:    uaddv d0, p0, z0.h
160; CHECK-NEXT:    fmov w0, s0
161; CHECK-NEXT:    ret
162  %res = call i16 @llvm.vector.reduce.add.nxv8i16(<vscale x 8 x i16> %a)
163  ret i16 %res
164}
165
166define i32 @uaddv_nxv4i32(<vscale x 4 x i32> %a) {
167; CHECK-LABEL: uaddv_nxv4i32:
168; CHECK:       // %bb.0:
169; CHECK-NEXT:    ptrue p0.s
170; CHECK-NEXT:    uaddv d0, p0, z0.s
171; CHECK-NEXT:    fmov w0, s0
172; CHECK-NEXT:    ret
173  %res = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %a)
174  ret i32 %res
175}
176
177define i64 @uaddv_nxv2i64(<vscale x 2 x i64> %a) {
178; CHECK-LABEL: uaddv_nxv2i64:
179; CHECK:       // %bb.0:
180; CHECK-NEXT:    ptrue p0.d
181; CHECK-NEXT:    uaddv d0, p0, z0.d
182; CHECK-NEXT:    fmov x0, d0
183; CHECK-NEXT:    ret
184  %res = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %a)
185  ret i64 %res
186}
187
188; UMINV
189
190define i8 @umin_nxv16i8(<vscale x 16 x i8> %a) {
191; CHECK-LABEL: umin_nxv16i8:
192; CHECK:       // %bb.0:
193; CHECK-NEXT:    ptrue p0.b
194; CHECK-NEXT:    uminv b0, p0, z0.b
195; CHECK-NEXT:    fmov w0, s0
196; CHECK-NEXT:    ret
197  %res = call i8 @llvm.vector.reduce.umin.nxv16i8(<vscale x 16 x i8> %a)
198  ret i8 %res
199}
200
201define i16 @umin_nxv8i16(<vscale x 8 x i16> %a) {
202; CHECK-LABEL: umin_nxv8i16:
203; CHECK:       // %bb.0:
204; CHECK-NEXT:    ptrue p0.h
205; CHECK-NEXT:    uminv h0, p0, z0.h
206; CHECK-NEXT:    fmov w0, s0
207; CHECK-NEXT:    ret
208  %res = call i16 @llvm.vector.reduce.umin.nxv8i16(<vscale x 8 x i16> %a)
209  ret i16 %res
210}
211
212define i32 @umin_nxv4i32(<vscale x 4 x i32> %a) {
213; CHECK-LABEL: umin_nxv4i32:
214; CHECK:       // %bb.0:
215; CHECK-NEXT:    ptrue p0.s
216; CHECK-NEXT:    uminv s0, p0, z0.s
217; CHECK-NEXT:    fmov w0, s0
218; CHECK-NEXT:    ret
219  %res = call i32 @llvm.vector.reduce.umin.nxv4i32(<vscale x 4 x i32> %a)
220  ret i32 %res
221}
222
223define i64 @umin_nxv2i64(<vscale x 2 x i64> %a) {
224; CHECK-LABEL: umin_nxv2i64:
225; CHECK:       // %bb.0:
226; CHECK-NEXT:    ptrue p0.d
227; CHECK-NEXT:    uminv d0, p0, z0.d
228; CHECK-NEXT:    fmov x0, d0
229; CHECK-NEXT:    ret
230  %res = call i64 @llvm.vector.reduce.umin.nxv2i64(<vscale x 2 x i64> %a)
231  ret i64 %res
232}
233
234; SMINV
235
236define i8 @smin_nxv16i8(<vscale x 16 x i8> %a) {
237; CHECK-LABEL: smin_nxv16i8:
238; CHECK:       // %bb.0:
239; CHECK-NEXT:    ptrue p0.b
240; CHECK-NEXT:    sminv b0, p0, z0.b
241; CHECK-NEXT:    fmov w0, s0
242; CHECK-NEXT:    ret
243  %res = call i8 @llvm.vector.reduce.smin.nxv16i8(<vscale x 16 x i8> %a)
244  ret i8 %res
245}
246
247define i16 @smin_nxv8i16(<vscale x 8 x i16> %a) {
248; CHECK-LABEL: smin_nxv8i16:
249; CHECK:       // %bb.0:
250; CHECK-NEXT:    ptrue p0.h
251; CHECK-NEXT:    sminv h0, p0, z0.h
252; CHECK-NEXT:    fmov w0, s0
253; CHECK-NEXT:    ret
254  %res = call i16 @llvm.vector.reduce.smin.nxv8i16(<vscale x 8 x i16> %a)
255  ret i16 %res
256}
257
258define i32 @smin_nxv4i32(<vscale x 4 x i32> %a) {
259; CHECK-LABEL: smin_nxv4i32:
260; CHECK:       // %bb.0:
261; CHECK-NEXT:    ptrue p0.s
262; CHECK-NEXT:    sminv s0, p0, z0.s
263; CHECK-NEXT:    fmov w0, s0
264; CHECK-NEXT:    ret
265  %res = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> %a)
266  ret i32 %res
267}
268
269define i64 @smin_nxv2i64(<vscale x 2 x i64> %a) {
270; CHECK-LABEL: smin_nxv2i64:
271; CHECK:       // %bb.0:
272; CHECK-NEXT:    ptrue p0.d
273; CHECK-NEXT:    sminv d0, p0, z0.d
274; CHECK-NEXT:    fmov x0, d0
275; CHECK-NEXT:    ret
276  %res = call i64 @llvm.vector.reduce.smin.nxv2i64(<vscale x 2 x i64> %a)
277  ret i64 %res
278}
279
280; UMAXV
281
282define i8 @umax_nxv16i8(<vscale x 16 x i8> %a) {
283; CHECK-LABEL: umax_nxv16i8:
284; CHECK:       // %bb.0:
285; CHECK-NEXT:    ptrue p0.b
286; CHECK-NEXT:    umaxv b0, p0, z0.b
287; CHECK-NEXT:    fmov w0, s0
288; CHECK-NEXT:    ret
289  %res = call i8 @llvm.vector.reduce.umax.nxv16i8(<vscale x 16 x i8> %a)
290  ret i8 %res
291}
292
293define i16 @umax_nxv8i16(<vscale x 8 x i16> %a) {
294; CHECK-LABEL: umax_nxv8i16:
295; CHECK:       // %bb.0:
296; CHECK-NEXT:    ptrue p0.h
297; CHECK-NEXT:    umaxv h0, p0, z0.h
298; CHECK-NEXT:    fmov w0, s0
299; CHECK-NEXT:    ret
300  %res = call i16 @llvm.vector.reduce.umax.nxv8i16(<vscale x 8 x i16> %a)
301  ret i16 %res
302}
303
304define i32 @umax_nxv4i32(<vscale x 4 x i32> %a) {
305; CHECK-LABEL: umax_nxv4i32:
306; CHECK:       // %bb.0:
307; CHECK-NEXT:    ptrue p0.s
308; CHECK-NEXT:    umaxv s0, p0, z0.s
309; CHECK-NEXT:    fmov w0, s0
310; CHECK-NEXT:    ret
311  %res = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> %a)
312  ret i32 %res
313}
314
315define i64 @umax_nxv2i64(<vscale x 2 x i64> %a) {
316; CHECK-LABEL: umax_nxv2i64:
317; CHECK:       // %bb.0:
318; CHECK-NEXT:    ptrue p0.d
319; CHECK-NEXT:    umaxv d0, p0, z0.d
320; CHECK-NEXT:    fmov x0, d0
321; CHECK-NEXT:    ret
322  %res = call i64 @llvm.vector.reduce.umax.nxv2i64(<vscale x 2 x i64> %a)
323  ret i64 %res
324}
325
326; SMAXV
327
328define i8 @smax_nxv16i8(<vscale x 16 x i8> %a) {
329; CHECK-LABEL: smax_nxv16i8:
330; CHECK:       // %bb.0:
331; CHECK-NEXT:    ptrue p0.b
332; CHECK-NEXT:    smaxv b0, p0, z0.b
333; CHECK-NEXT:    fmov w0, s0
334; CHECK-NEXT:    ret
335  %res = call i8 @llvm.vector.reduce.smax.nxv16i8(<vscale x 16 x i8> %a)
336  ret i8 %res
337}
338
339define i16 @smax_nxv8i16(<vscale x 8 x i16> %a) {
340; CHECK-LABEL: smax_nxv8i16:
341; CHECK:       // %bb.0:
342; CHECK-NEXT:    ptrue p0.h
343; CHECK-NEXT:    smaxv h0, p0, z0.h
344; CHECK-NEXT:    fmov w0, s0
345; CHECK-NEXT:    ret
346  %res = call i16 @llvm.vector.reduce.smax.nxv8i16(<vscale x 8 x i16> %a)
347  ret i16 %res
348}
349
350define i32 @smax_nxv4i32(<vscale x 4 x i32> %a) {
351; CHECK-LABEL: smax_nxv4i32:
352; CHECK:       // %bb.0:
353; CHECK-NEXT:    ptrue p0.s
354; CHECK-NEXT:    smaxv s0, p0, z0.s
355; CHECK-NEXT:    fmov w0, s0
356; CHECK-NEXT:    ret
357  %res = call i32 @llvm.vector.reduce.smax.nxv4i32(<vscale x 4 x i32> %a)
358  ret i32 %res
359}
360
361define i64 @smax_nxv2i64(<vscale x 2 x i64> %a) {
362; CHECK-LABEL: smax_nxv2i64:
363; CHECK:       // %bb.0:
364; CHECK-NEXT:    ptrue p0.d
365; CHECK-NEXT:    smaxv d0, p0, z0.d
366; CHECK-NEXT:    fmov x0, d0
367; CHECK-NEXT:    ret
368  %res = call i64 @llvm.vector.reduce.smax.nxv2i64(<vscale x 2 x i64> %a)
369  ret i64 %res
370}
371
372; Test widen vector reduce type
373declare i8 @llvm.vector.reduce.smin.nxv10i8(<vscale x 10 x i8>)
374
375define i8 @smin_nxv10i8(<vscale x 10 x i8> %a) {
376; CHECK-LABEL: smin_nxv10i8:
377; CHECK:       // %bb.0:
378; CHECK-NEXT:    uunpkhi z1.h, z0.b
379; CHECK-NEXT:    mov z3.d, #127 // =0x7f
380; CHECK-NEXT:    uunpklo z0.h, z0.b
381; CHECK-NEXT:    ptrue p0.b
382; CHECK-NEXT:    uunpklo z2.s, z1.h
383; CHECK-NEXT:    uunpkhi z1.s, z1.h
384; CHECK-NEXT:    uunpklo z2.d, z2.s
385; CHECK-NEXT:    uzp1 z2.s, z2.s, z3.s
386; CHECK-NEXT:    uzp1 z1.h, z2.h, z1.h
387; CHECK-NEXT:    uzp1 z1.b, z0.b, z1.b
388; CHECK-NEXT:    uunpkhi z1.h, z1.b
389; CHECK-NEXT:    uunpkhi z2.s, z1.h
390; CHECK-NEXT:    uunpklo z1.s, z1.h
391; CHECK-NEXT:    uunpkhi z2.d, z2.s
392; CHECK-NEXT:    uzp1 z2.s, z3.s, z2.s
393; CHECK-NEXT:    uzp1 z1.h, z1.h, z2.h
394; CHECK-NEXT:    uzp1 z1.b, z0.b, z1.b
395; CHECK-NEXT:    uunpkhi z1.h, z1.b
396; CHECK-NEXT:    uunpkhi z2.s, z1.h
397; CHECK-NEXT:    uunpklo z1.s, z1.h
398; CHECK-NEXT:    uunpklo z2.d, z2.s
399; CHECK-NEXT:    uzp1 z2.s, z2.s, z3.s
400; CHECK-NEXT:    uzp1 z1.h, z1.h, z2.h
401; CHECK-NEXT:    uzp1 z0.b, z0.b, z1.b
402; CHECK-NEXT:    sminv b0, p0, z0.b
403; CHECK-NEXT:    fmov w0, s0
404; CHECK-NEXT:    ret
405  %res = call i8 @llvm.vector.reduce.smin.nxv10i8(<vscale x 10 x i8> %a)
406  ret i8 %res
407}
408
409declare i8 @llvm.vector.reduce.add.nxv12i8(<vscale x 12 x i8>)
410
411define i8 @uaddv_nxv12i8(<vscale x 12 x i8> %a) {
412; CHECK-LABEL: uaddv_nxv12i8:
413; CHECK:       // %bb.0:
414; CHECK-NEXT:    uunpkhi z1.h, z0.b
415; CHECK-NEXT:    mov z2.s, #0 // =0x0
416; CHECK-NEXT:    uunpklo z0.h, z0.b
417; CHECK-NEXT:    ptrue p0.b
418; CHECK-NEXT:    uunpklo z1.s, z1.h
419; CHECK-NEXT:    uzp1 z1.h, z1.h, z2.h
420; CHECK-NEXT:    uzp1 z0.b, z0.b, z1.b
421; CHECK-NEXT:    uaddv d0, p0, z0.b
422; CHECK-NEXT:    fmov w0, s0
423; CHECK-NEXT:    ret
424  %res = call i8 @llvm.vector.reduce.add.nxv12i8(<vscale x 12 x i8> %a)
425  ret i8 %res
426}
427
428declare i8 @llvm.vector.reduce.umax.nxv14i8(<vscale x 14 x i8>)
429
430define i8 @umax_nxv14i8(<vscale x 14 x i8> %a) {
431; CHECK-LABEL: umax_nxv14i8:
432; CHECK:       // %bb.0:
433; CHECK-NEXT:    uunpkhi z1.h, z0.b
434; CHECK-NEXT:    mov z3.d, #0 // =0x0
435; CHECK-NEXT:    uunpklo z0.h, z0.b
436; CHECK-NEXT:    ptrue p0.b
437; CHECK-NEXT:    uunpkhi z2.s, z1.h
438; CHECK-NEXT:    uunpklo z1.s, z1.h
439; CHECK-NEXT:    uunpklo z2.d, z2.s
440; CHECK-NEXT:    uzp1 z2.s, z2.s, z3.s
441; CHECK-NEXT:    uzp1 z1.h, z1.h, z2.h
442; CHECK-NEXT:    uzp1 z0.b, z0.b, z1.b
443; CHECK-NEXT:    umaxv b0, p0, z0.b
444; CHECK-NEXT:    fmov w0, s0
445; CHECK-NEXT:    ret
446  %res = call i8 @llvm.vector.reduce.umax.nxv14i8(<vscale x 14 x i8> %a)
447  ret i8 %res
448}
449
450declare i8 @llvm.vector.reduce.and.nxv16i8(<vscale x 16 x i8>)
451declare i16 @llvm.vector.reduce.and.nxv8i16(<vscale x 8 x i16>)
452declare i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32>)
453declare i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64>)
454
455declare i8 @llvm.vector.reduce.or.nxv16i8(<vscale x 16 x i8>)
456declare i16 @llvm.vector.reduce.or.nxv8i16(<vscale x 8 x i16>)
457declare i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32>)
458declare i64 @llvm.vector.reduce.or.nxv2i64(<vscale x 2 x i64>)
459
460declare i8 @llvm.vector.reduce.xor.nxv16i8(<vscale x 16 x i8>)
461declare i16 @llvm.vector.reduce.xor.nxv8i16(<vscale x 8 x i16>)
462declare i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32>)
463declare i64 @llvm.vector.reduce.xor.nxv2i64(<vscale x 2 x i64>)
464
465declare i8 @llvm.vector.reduce.add.nxv16i8(<vscale x 16 x i8>)
466declare i16 @llvm.vector.reduce.add.nxv8i16(<vscale x 8 x i16>)
467declare i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32>)
468declare i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64>)
469
470declare i8 @llvm.vector.reduce.umin.nxv16i8(<vscale x 16 x i8>)
471declare i16 @llvm.vector.reduce.umin.nxv8i16(<vscale x 8 x i16>)
472declare i32 @llvm.vector.reduce.umin.nxv4i32(<vscale x 4 x i32>)
473declare i64 @llvm.vector.reduce.umin.nxv2i64(<vscale x 2 x i64>)
474
475declare i8 @llvm.vector.reduce.smin.nxv16i8(<vscale x 16 x i8>)
476declare i16 @llvm.vector.reduce.smin.nxv8i16(<vscale x 8 x i16>)
477declare i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32>)
478declare i64 @llvm.vector.reduce.smin.nxv2i64(<vscale x 2 x i64>)
479
480declare i8 @llvm.vector.reduce.umax.nxv16i8(<vscale x 16 x i8>)
481declare i16 @llvm.vector.reduce.umax.nxv8i16(<vscale x 8 x i16>)
482declare i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32>)
483declare i64 @llvm.vector.reduce.umax.nxv2i64(<vscale x 2 x i64>)
484
485declare i8 @llvm.vector.reduce.smax.nxv16i8(<vscale x 16 x i8>)
486declare i16 @llvm.vector.reduce.smax.nxv8i16(<vscale x 8 x i16>)
487declare i32 @llvm.vector.reduce.smax.nxv4i32(<vscale x 4 x i32>)
488declare i64 @llvm.vector.reduce.smax.nxv2i64(<vscale x 2 x i64>)
489