xref: /llvm-project/llvm/test/CodeGen/AArch64/neon-addlv.ll (revision 50df08cd43ec02c58067797df33ec67c128431bb)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple aarch64-none-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
3; RUN: llc -mtriple aarch64-none-linux-gnu -global-isel -global-isel-abort=2 2>&1 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
4
5; CHECK-GI:         warning: Instruction selection used fallback path for uaddlv_v8i8_urshr
6
7declare <4 x i16>  @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8>) nounwind readnone
8declare <8 x i16>  @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8>) nounwind readnone
9declare <4 x i32>  @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16>) nounwind readnone
10declare <2 x i64>  @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32>) nounwind readnone
11declare <2 x i32>  @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16>) nounwind readnone
12
13declare <4 x i16>  @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8>) nounwind readnone
14declare <8 x i16>  @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8>) nounwind readnone
15declare <4 x i32>  @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16>) nounwind readnone
16declare <2 x i64>  @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32>) nounwind readnone
17declare <2 x i32>  @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16>) nounwind readnone
18
19declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) nounwind readnone
20declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>) nounwind readnone
21declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) nounwind readnone
22declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) nounwind readnone
23declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>) nounwind readnone
24
25define i16 @uaddlv4h_from_v8i8(ptr %A) nounwind {
26; CHECK-LABEL: uaddlv4h_from_v8i8:
27; CHECK:       // %bb.0:
28; CHECK-NEXT:    ldr d0, [x0]
29; CHECK-NEXT:    uaddlv h0, v0.8b
30; CHECK-NEXT:    fmov w0, s0
31; CHECK-NEXT:    ret
32  %tmp1 = load <8 x i8>, ptr %A
33  %tmp3 = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> %tmp1)
34  %tmp5 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %tmp3)
35  ret i16 %tmp5
36}
37
38define i16 @uaddlv16b_from_v16i8(ptr %A) nounwind {
39; CHECK-LABEL: uaddlv16b_from_v16i8:
40; CHECK:       // %bb.0:
41; CHECK-NEXT:    ldr q0, [x0]
42; CHECK-NEXT:    uaddlv h0, v0.16b
43; CHECK-NEXT:    fmov w0, s0
44; CHECK-NEXT:    ret
45  %tmp1 = load <16 x i8>, ptr %A
46  %tmp3 = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> %tmp1)
47  %tmp5 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %tmp3)
48  ret i16 %tmp5
49}
50
51define i32 @uaddlv8h_from_v8i16(ptr %A) nounwind {
52; CHECK-LABEL: uaddlv8h_from_v8i16:
53; CHECK:       // %bb.0:
54; CHECK-NEXT:    ldr q0, [x0]
55; CHECK-NEXT:    uaddlv s0, v0.8h
56; CHECK-NEXT:    fmov w0, s0
57; CHECK-NEXT:    ret
58  %tmp1 = load <8 x i16>, ptr %A
59  %tmp3 = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> %tmp1)
60  %tmp5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp3)
61  ret i32 %tmp5
62}
63
64define i64 @uaddlv4s_from_v4i32(ptr %A) nounwind {
65; CHECK-LABEL: uaddlv4s_from_v4i32:
66; CHECK:       // %bb.0:
67; CHECK-NEXT:    ldr q0, [x0]
68; CHECK-NEXT:    uaddlv d0, v0.4s
69; CHECK-NEXT:    fmov x0, d0
70; CHECK-NEXT:    ret
71  %tmp1 = load <4 x i32>, ptr %A
72  %tmp3 = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> %tmp1)
73  %tmp5 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %tmp3)
74  ret i64 %tmp5
75}
76
77define i32 @uaddlv4h_from_v4i16(ptr %A) nounwind {
78; CHECK-LABEL: uaddlv4h_from_v4i16:
79; CHECK:       // %bb.0:
80; CHECK-NEXT:    ldr d0, [x0]
81; CHECK-NEXT:    uaddlv s0, v0.4h
82; CHECK-NEXT:    fmov w0, s0
83; CHECK-NEXT:    ret
84  %tmp1 = load <4 x i16>, ptr %A
85  %tmp3 = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> %tmp1)
86  %tmp5 = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %tmp3)
87  ret i32 %tmp5
88}
89
90
91
92define i16 @saddlv4h_from_v8i8(ptr %A) nounwind {
93; CHECK-LABEL: saddlv4h_from_v8i8:
94; CHECK:       // %bb.0:
95; CHECK-NEXT:    ldr d0, [x0]
96; CHECK-NEXT:    saddlv h0, v0.8b
97; CHECK-NEXT:    fmov w0, s0
98; CHECK-NEXT:    ret
99  %tmp1 = load <8 x i8>, ptr %A
100  %tmp3 = call <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8> %tmp1)
101  %tmp5 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %tmp3)
102  ret i16 %tmp5
103}
104
105define i16 @saddlv16b_from_v16i8(ptr %A) nounwind {
106; CHECK-LABEL: saddlv16b_from_v16i8:
107; CHECK:       // %bb.0:
108; CHECK-NEXT:    ldr q0, [x0]
109; CHECK-NEXT:    saddlv h0, v0.16b
110; CHECK-NEXT:    fmov w0, s0
111; CHECK-NEXT:    ret
112  %tmp1 = load <16 x i8>, ptr %A
113  %tmp3 = call <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8> %tmp1)
114  %tmp5 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %tmp3)
115  ret i16 %tmp5
116}
117
118define i32 @saddlv8h_from_v8i16(ptr %A) nounwind {
119; CHECK-LABEL: saddlv8h_from_v8i16:
120; CHECK:       // %bb.0:
121; CHECK-NEXT:    ldr q0, [x0]
122; CHECK-NEXT:    saddlv s0, v0.8h
123; CHECK-NEXT:    fmov w0, s0
124; CHECK-NEXT:    ret
125  %tmp1 = load <8 x i16>, ptr %A
126  %tmp3 = call <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16> %tmp1)
127  %tmp5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp3)
128  ret i32 %tmp5
129}
130
131define i64 @saddlv4s_from_v4i32(ptr %A) nounwind {
132; CHECK-LABEL: saddlv4s_from_v4i32:
133; CHECK:       // %bb.0:
134; CHECK-NEXT:    ldr q0, [x0]
135; CHECK-NEXT:    saddlv d0, v0.4s
136; CHECK-NEXT:    fmov x0, d0
137; CHECK-NEXT:    ret
138  %tmp1 = load <4 x i32>, ptr %A
139  %tmp3 = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> %tmp1)
140  %tmp5 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %tmp3)
141  ret i64 %tmp5
142}
143
144define i32 @saddlv4h_from_v4i16(ptr %A) nounwind {
145; CHECK-LABEL: saddlv4h_from_v4i16:
146; CHECK:       // %bb.0:
147; CHECK-NEXT:    ldr d0, [x0]
148; CHECK-NEXT:    saddlv s0, v0.4h
149; CHECK-NEXT:    fmov w0, s0
150; CHECK-NEXT:    ret
151  %tmp1 = load <4 x i16>, ptr %A
152  %tmp3 = call <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16> %tmp1)
153  %tmp5 = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %tmp3)
154  ret i32 %tmp5
155}
156
157declare i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8>) nounwind readnone
158
159define i32 @uaddlv_known_bits_v8i8(<8 x i8> %a) {
160; CHECK-SD-LABEL: uaddlv_known_bits_v8i8:
161; CHECK-SD:       // %bb.0:
162; CHECK-SD-NEXT:    uaddlv h0, v0.8b
163; CHECK-SD-NEXT:    fmov w0, s0
164; CHECK-SD-NEXT:    ret
165;
166; CHECK-GI-LABEL: uaddlv_known_bits_v8i8:
167; CHECK-GI:       // %bb.0:
168; CHECK-GI-NEXT:    uaddlv h0, v0.8b
169; CHECK-GI-NEXT:    fmov w8, s0
170; CHECK-GI-NEXT:    and w0, w8, #0xffff
171; CHECK-GI-NEXT:    ret
172  %tmp1 = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a)
173  %tmp2 = and i32 %tmp1, 65535
174  ret i32 %tmp2
175}
176
177declare i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8>) nounwind readnone
178
179define i32 @uaddlv_known_bits_v16i8(<16 x i8> %a) {
180; CHECK-SD-LABEL: uaddlv_known_bits_v16i8:
181; CHECK-SD:       // %bb.0: // %entry
182; CHECK-SD-NEXT:    uaddlv h0, v0.16b
183; CHECK-SD-NEXT:    fmov w0, s0
184; CHECK-SD-NEXT:    ret
185;
186; CHECK-GI-LABEL: uaddlv_known_bits_v16i8:
187; CHECK-GI:       // %bb.0: // %entry
188; CHECK-GI-NEXT:    uaddlv h0, v0.16b
189; CHECK-GI-NEXT:    fmov w8, s0
190; CHECK-GI-NEXT:    and w0, w8, #0xffff
191; CHECK-GI-NEXT:    ret
192entry:
193  %vaddlv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> %a)
194  %0 = and i32 %vaddlv.i, 65535
195  ret i32 %0
196}
197
198define dso_local <8 x i8> @uaddlv_v8i8_dup(<8 x i8> %a) {
199; CHECK-SD-LABEL: uaddlv_v8i8_dup:
200; CHECK-SD:       // %bb.0: // %entry
201; CHECK-SD-NEXT:    uaddlv h0, v0.8b
202; CHECK-SD-NEXT:    dup v0.8h, v0.h[0]
203; CHECK-SD-NEXT:    rshrn v0.8b, v0.8h, #3
204; CHECK-SD-NEXT:    ret
205;
206; CHECK-GI-LABEL: uaddlv_v8i8_dup:
207; CHECK-GI:       // %bb.0: // %entry
208; CHECK-GI-NEXT:    uaddlv h0, v0.8b
209; CHECK-GI-NEXT:    fmov w8, s0
210; CHECK-GI-NEXT:    dup v0.8h, w8
211; CHECK-GI-NEXT:    rshrn v0.8b, v0.8h, #3
212; CHECK-GI-NEXT:    ret
213entry:
214  %vaddlv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a)
215  %0 = trunc i32 %vaddlv.i to i16
216  %vecinit.i = insertelement <8 x i16> undef, i16 %0, i64 0
217  %vecinit7.i = shufflevector <8 x i16> %vecinit.i, <8 x i16> poison, <8 x i32> zeroinitializer
218  %vrshrn_n2 = tail call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %vecinit7.i, i32 3)
219  ret <8 x i8> %vrshrn_n2
220}
221
222declare <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16>, i32)
223declare i64 @llvm.aarch64.neon.urshl.i64(i64, i64)
224
225define <8 x i8> @uaddlv_v8i8_urshr(<8 x i8> %a) {
226; CHECK-LABEL: uaddlv_v8i8_urshr:
227; CHECK:       // %bb.0: // %entry
228; CHECK-NEXT:    uaddlv h0, v0.8b
229; CHECK-NEXT:    urshr d0, d0, #3
230; CHECK-NEXT:    dup v0.8b, v0.b[0]
231; CHECK-NEXT:    ret
232entry:
233  %vaddlv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a)
234  %0 = and i32 %vaddlv.i, 65535
235  %conv = zext i32 %0 to i64
236  %vrshr_n = tail call i64 @llvm.aarch64.neon.urshl.i64(i64 %conv, i64 -3)
237  %conv1 = trunc i64 %vrshr_n to i8
238  %vecinit.i = insertelement <8 x i8> undef, i8 %conv1, i64 0
239  %vecinit7.i = shufflevector <8 x i8> %vecinit.i, <8 x i8> poison, <8 x i32> zeroinitializer
240  ret <8 x i8> %vecinit7.i
241}
242
243define <4 x i32> @uaddlv_dup_v4i16(<4 x i16> %a) {
244; CHECK-LABEL: uaddlv_dup_v4i16:
245; CHECK:       // %bb.0: // %entry
246; CHECK-NEXT:    uaddlv s0, v0.4h
247; CHECK-NEXT:    dup v0.4s, v0.s[0]
248; CHECK-NEXT:    ushr v0.4s, v0.4s, #3
249; CHECK-NEXT:    ret
250entry:
251  %vaddlv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16> %a)
252  %vecinit.i = insertelement <4 x i32> undef, i32 %vaddlv.i, i64 0
253  %vecinit7.i = shufflevector <4 x i32> %vecinit.i, <4 x i32> poison, <4 x i32> zeroinitializer
254  %vshr_n = lshr <4 x i32> %vecinit7.i, <i32 3, i32 3, i32 3, i32 3>
255  ret <4 x i32> %vshr_n
256}
257
258define <4 x i32> @uaddlv_dup_v8i16(<8 x i16> %a) {
259; CHECK-LABEL: uaddlv_dup_v8i16:
260; CHECK:       // %bb.0: // %entry
261; CHECK-NEXT:    uaddlv s0, v0.8h
262; CHECK-NEXT:    dup v0.4s, v0.s[0]
263; CHECK-NEXT:    ushr v0.4s, v0.4s, #3
264; CHECK-NEXT:    ret
265entry:
266  %vaddlv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> %a)
267  %vecinit.i = insertelement <4 x i32> undef, i32 %vaddlv.i, i64 0
268  %vecinit7.i = shufflevector <4 x i32> %vecinit.i, <4 x i32> poison, <4 x i32> zeroinitializer
269  %vshr_n = lshr <4 x i32> %vecinit7.i, <i32 3, i32 3, i32 3, i32 3>
270  ret <4 x i32> %vshr_n
271}
272
273declare i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16>)
274declare i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16>)
275