xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-i1-add-reduce.ll (revision 61510b51c33464a6bc15e4cf5b1ee07e2e0ec1c9)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
3
4define i8 @uaddv_zexti8_nxv16i1(<vscale x 16 x i1> %v) {
5; CHECK-LABEL: uaddv_zexti8_nxv16i1:
6; CHECK:       // %bb.0: // %entry
7; CHECK-NEXT:    cntp x0, p0, p0.b
8; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
9; CHECK-NEXT:    ret
10entry:
11  %3 = zext <vscale x 16 x i1> %v to <vscale x 16 x i8>
12  %4 = tail call i8 @llvm.vector.reduce.add.nxv16i8(<vscale x 16 x i8> %3)
13  ret i8 %4
14}
15
16define i8 @uaddv_zexti8_nxv8i1(<vscale x 8 x i1> %v) {
17; CHECK-LABEL: uaddv_zexti8_nxv8i1:
18; CHECK:       // %bb.0: // %entry
19; CHECK-NEXT:    cntp x0, p0, p0.h
20; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
21; CHECK-NEXT:    ret
22entry:
23  %3 = zext <vscale x 8 x i1> %v to <vscale x 8 x i8>
24  %4 = tail call i8 @llvm.vector.reduce.add.nxv8i8(<vscale x 8 x i8> %3)
25  ret i8 %4
26}
27
28define i16 @uaddv_zexti16_nxv8i1(<vscale x 8 x i1> %v) {
29; CHECK-LABEL: uaddv_zexti16_nxv8i1:
30; CHECK:       // %bb.0: // %entry
31; CHECK-NEXT:    cntp x0, p0, p0.h
32; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
33; CHECK-NEXT:    ret
34entry:
35  %3 = zext <vscale x 8 x i1> %v to <vscale x 8 x i16>
36  %4 = tail call i16 @llvm.vector.reduce.add.nxv8i16(<vscale x 8 x i16> %3)
37  ret i16 %4
38}
39
40define i8 @uaddv_zexti8_nxv4i1(<vscale x 4 x i1> %v) {
41; CHECK-LABEL: uaddv_zexti8_nxv4i1:
42; CHECK:       // %bb.0: // %entry
43; CHECK-NEXT:    cntp x0, p0, p0.s
44; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
45; CHECK-NEXT:    ret
46entry:
47  %3 = zext <vscale x 4 x i1> %v to <vscale x 4 x i8>
48  %4 = tail call i8 @llvm.vector.reduce.add.nxv4i8(<vscale x 4 x i8> %3)
49  ret i8 %4
50}
51
52define i16 @uaddv_zexti16_nxv4i1(<vscale x 4 x i1> %v) {
53; CHECK-LABEL: uaddv_zexti16_nxv4i1:
54; CHECK:       // %bb.0: // %entry
55; CHECK-NEXT:    cntp x0, p0, p0.s
56; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
57; CHECK-NEXT:    ret
58entry:
59  %3 = zext <vscale x 4 x i1> %v to <vscale x 4 x i16>
60  %4 = tail call i16 @llvm.vector.reduce.add.nxv4i16(<vscale x 4 x i16> %3)
61  ret i16 %4
62}
63
64define i32 @uaddv_zexti32_nxv4i1(<vscale x 4 x i1> %v) {
65; CHECK-LABEL: uaddv_zexti32_nxv4i1:
66; CHECK:       // %bb.0: // %entry
67; CHECK-NEXT:    cntp x0, p0, p0.s
68; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
69; CHECK-NEXT:    ret
70entry:
71  %3 = zext <vscale x 4 x i1> %v to <vscale x 4 x i32>
72  %4 = tail call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %3)
73  ret i32 %4
74}
75
76define i8 @uaddv_zexti8_nxv2i1(<vscale x 2 x i1> %v) {
77; CHECK-LABEL: uaddv_zexti8_nxv2i1:
78; CHECK:       // %bb.0: // %entry
79; CHECK-NEXT:    cntp x0, p0, p0.d
80; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
81; CHECK-NEXT:    ret
82entry:
83  %3 = zext <vscale x 2 x i1> %v to <vscale x 2 x i8>
84  %4 = tail call i8 @llvm.vector.reduce.add.nxv2i8(<vscale x 2 x i8> %3)
85  ret i8 %4
86}
87
88define i16 @uaddv_zexti16_nxv2i1(<vscale x 2 x i1> %v) {
89; CHECK-LABEL: uaddv_zexti16_nxv2i1:
90; CHECK:       // %bb.0: // %entry
91; CHECK-NEXT:    cntp x0, p0, p0.d
92; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
93; CHECK-NEXT:    ret
94entry:
95  %3 = zext <vscale x 2 x i1> %v to <vscale x 2 x i16>
96  %4 = tail call i16 @llvm.vector.reduce.add.nxv2i16(<vscale x 2 x i16> %3)
97  ret i16 %4
98}
99
100define i32 @uaddv_zexti32_nxv2i1(<vscale x 2 x i1> %v) {
101; CHECK-LABEL: uaddv_zexti32_nxv2i1:
102; CHECK:       // %bb.0: // %entry
103; CHECK-NEXT:    cntp x0, p0, p0.d
104; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
105; CHECK-NEXT:    ret
106entry:
107  %3 = zext <vscale x 2 x i1> %v to <vscale x 2 x i32>
108  %4 = tail call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> %3)
109  ret i32 %4
110}
111
112define i64 @uaddv_zexti64_nxv2i1(<vscale x 2 x i1> %v) {
113; CHECK-LABEL: uaddv_zexti64_nxv2i1:
114; CHECK:       // %bb.0: // %entry
115; CHECK-NEXT:    cntp x0, p0, p0.d
116; CHECK-NEXT:    ret
117entry:
118  %3 = zext <vscale x 2 x i1> %v to <vscale x 2 x i64>
119  %4 = tail call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %3)
120  ret i64 %4
121}
122
123declare i8 @llvm.vector.reduce.add.nxv16i8(<vscale x 16 x i8>)
124declare i8 @llvm.vector.reduce.add.nxv8i8(<vscale x 8 x i8>)
125declare i16 @llvm.vector.reduce.add.nxv8i16(<vscale x 8 x i16>)
126declare i8 @llvm.vector.reduce.add.nxv4i8(<vscale x 4 x i8>)
127declare i16 @llvm.vector.reduce.add.nxv4i16(<vscale x 4 x i16>)
128declare i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32>)
129declare i8 @llvm.vector.reduce.add.nxv2i8(<vscale x 2 x i8>)
130declare i16 @llvm.vector.reduce.add.nxv2i16(<vscale x 2 x i16>)
131declare i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32>)
132declare i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64>)
133