xref: /llvm-project/clang/test/CodeGen/AArch64/neon-across.c (revision 207e5ccceec8d3cc3f32723e78f2a142bc61b07d)
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature
2 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
3 // RUN:  -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
4 
5 // REQUIRES: aarch64-registered-target || arm-registered-target
6 
7 #include <arm_neon.h>
8 
9 // CHECK-LABEL: define {{[^@]+}}@test_vaddlv_s8
10 // CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
11 // CHECK-NEXT:  entry:
12 // CHECK-NEXT:    [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v8i8(<8 x i8> [[A]])
13 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16
14 // CHECK-NEXT:    ret i16 [[TMP0]]
15 //
16 int16_t test_vaddlv_s8(int8x8_t a) {
17   return vaddlv_s8(a);
18 }
19 
20 // CHECK-LABEL: define {{[^@]+}}@test_vaddlv_s16
21 // CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
22 // CHECK-NEXT:  entry:
23 // CHECK-NEXT:    [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16> [[A]])
24 // CHECK-NEXT:    ret i32 [[VADDLV_I]]
25 //
26 int32_t test_vaddlv_s16(int16x4_t a) {
27   return vaddlv_s16(a);
28 }
29 
30 // CHECK-LABEL: define {{[^@]+}}@test_vaddlv_u8
31 // CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
32 // CHECK-NEXT:  entry:
33 // CHECK-NEXT:    [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> [[A]])
34 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16
35 // CHECK-NEXT:    ret i16 [[TMP0]]
36 //
37 uint16_t test_vaddlv_u8(uint8x8_t a) {
38   return vaddlv_u8(a);
39 }
40 
41 // CHECK-LABEL: define {{[^@]+}}@test_vaddlv_u16
42 // CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
43 // CHECK-NEXT:  entry:
44 // CHECK-NEXT:    [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16> [[A]])
45 // CHECK-NEXT:    ret i32 [[VADDLV_I]]
46 //
47 uint32_t test_vaddlv_u16(uint16x4_t a) {
48   return vaddlv_u16(a);
49 }
50 
51 // CHECK-LABEL: define {{[^@]+}}@test_vaddlvq_s8
52 // CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR1:[0-9]+]] {
53 // CHECK-NEXT:  entry:
54 // CHECK-NEXT:    [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v16i8(<16 x i8> [[A]])
55 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16
56 // CHECK-NEXT:    ret i16 [[TMP0]]
57 //
58 int16_t test_vaddlvq_s8(int8x16_t a) {
59   return vaddlvq_s8(a);
60 }
61 
62 // CHECK-LABEL: define {{[^@]+}}@test_vaddlvq_s16
63 // CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR1]] {
64 // CHECK-NEXT:  entry:
65 // CHECK-NEXT:    [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16> [[A]])
66 // CHECK-NEXT:    ret i32 [[VADDLV_I]]
67 //
68 int32_t test_vaddlvq_s16(int16x8_t a) {
69   return vaddlvq_s16(a);
70 }
71 
72 // CHECK-LABEL: define {{[^@]+}}@test_vaddlvq_s32
73 // CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR1]] {
74 // CHECK-NEXT:  entry:
75 // CHECK-NEXT:    [[VADDLVQ_S32_I:%.*]] = call i64 @llvm.aarch64.neon.saddlv.i64.v4i32(<4 x i32> [[A]])
76 // CHECK-NEXT:    ret i64 [[VADDLVQ_S32_I]]
77 //
78 int64_t test_vaddlvq_s32(int32x4_t a) {
79   return vaddlvq_s32(a);
80 }
81 
82 // CHECK-LABEL: define {{[^@]+}}@test_vaddlvq_u8
83 // CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR1]] {
84 // CHECK-NEXT:  entry:
85 // CHECK-NEXT:    [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> [[A]])
86 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16
87 // CHECK-NEXT:    ret i16 [[TMP0]]
88 //
89 uint16_t test_vaddlvq_u8(uint8x16_t a) {
90   return vaddlvq_u8(a);
91 }
92 
93 // CHECK-LABEL: define {{[^@]+}}@test_vaddlvq_u16
94 // CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR1]] {
95 // CHECK-NEXT:  entry:
96 // CHECK-NEXT:    [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> [[A]])
97 // CHECK-NEXT:    ret i32 [[VADDLV_I]]
98 //
99 uint32_t test_vaddlvq_u16(uint16x8_t a) {
100   return vaddlvq_u16(a);
101 }
102 
103 // CHECK-LABEL: define {{[^@]+}}@test_vaddlvq_u32
104 // CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR1]] {
105 // CHECK-NEXT:  entry:
106 // CHECK-NEXT:    [[VADDLVQ_U32_I:%.*]] = call i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32> [[A]])
107 // CHECK-NEXT:    ret i64 [[VADDLVQ_U32_I]]
108 //
109 uint64_t test_vaddlvq_u32(uint32x4_t a) {
110   return vaddlvq_u32(a);
111 }
112 
113 // CHECK-LABEL: define {{[^@]+}}@test_vmaxv_s8
114 // CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
115 // CHECK-NEXT:  entry:
116 // CHECK-NEXT:    [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> [[A]])
117 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
118 // CHECK-NEXT:    ret i8 [[TMP0]]
119 //
120 int8_t test_vmaxv_s8(int8x8_t a) {
121   return vmaxv_s8(a);
122 }
123 
124 // CHECK-LABEL: define {{[^@]+}}@test_vmaxv_s16
125 // CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
126 // CHECK-NEXT:  entry:
127 // CHECK-NEXT:    [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> [[A]])
128 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i16
129 // CHECK-NEXT:    ret i16 [[TMP0]]
130 //
131 int16_t test_vmaxv_s16(int16x4_t a) {
132   return vmaxv_s16(a);
133 }
134 
135 // CHECK-LABEL: define {{[^@]+}}@test_vmaxv_u8
136 // CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
137 // CHECK-NEXT:  entry:
138 // CHECK-NEXT:    [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> [[A]])
139 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
140 // CHECK-NEXT:    ret i8 [[TMP0]]
141 //
142 uint8_t test_vmaxv_u8(uint8x8_t a) {
143   return vmaxv_u8(a);
144 }
145 
146 // CHECK-LABEL: define {{[^@]+}}@test_vmaxv_u16
147 // CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
148 // CHECK-NEXT:  entry:
149 // CHECK-NEXT:    [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> [[A]])
150 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i16
151 // CHECK-NEXT:    ret i16 [[TMP0]]
152 //
153 uint16_t test_vmaxv_u16(uint16x4_t a) {
154   return vmaxv_u16(a);
155 }
156 
157 // CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_s8
158 // CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR1]] {
159 // CHECK-NEXT:  entry:
160 // CHECK-NEXT:    [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> [[A]])
161 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
162 // CHECK-NEXT:    ret i8 [[TMP0]]
163 //
164 int8_t test_vmaxvq_s8(int8x16_t a) {
165   return vmaxvq_s8(a);
166 }
167 
168 // CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_s16
169 // CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR1]] {
170 // CHECK-NEXT:  entry:
171 // CHECK-NEXT:    [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> [[A]])
172 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i16
173 // CHECK-NEXT:    ret i16 [[TMP0]]
174 //
175 int16_t test_vmaxvq_s16(int16x8_t a) {
176   return vmaxvq_s16(a);
177 }
178 
179 // CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_s32
180 // CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR1]] {
181 // CHECK-NEXT:  entry:
182 // CHECK-NEXT:    [[VMAXVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> [[A]])
183 // CHECK-NEXT:    ret i32 [[VMAXVQ_S32_I]]
184 //
185 int32_t test_vmaxvq_s32(int32x4_t a) {
186   return vmaxvq_s32(a);
187 }
188 
189 // CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_u8
190 // CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR1]] {
191 // CHECK-NEXT:  entry:
192 // CHECK-NEXT:    [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> [[A]])
193 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
194 // CHECK-NEXT:    ret i8 [[TMP0]]
195 //
196 uint8_t test_vmaxvq_u8(uint8x16_t a) {
197   return vmaxvq_u8(a);
198 }
199 
200 // CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_u16
201 // CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR1]] {
202 // CHECK-NEXT:  entry:
203 // CHECK-NEXT:    [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> [[A]])
204 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i16
205 // CHECK-NEXT:    ret i16 [[TMP0]]
206 //
207 uint16_t test_vmaxvq_u16(uint16x8_t a) {
208   return vmaxvq_u16(a);
209 }
210 
211 // CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_u32
212 // CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR1]] {
213 // CHECK-NEXT:  entry:
214 // CHECK-NEXT:    [[VMAXVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> [[A]])
215 // CHECK-NEXT:    ret i32 [[VMAXVQ_U32_I]]
216 //
217 uint32_t test_vmaxvq_u32(uint32x4_t a) {
218   return vmaxvq_u32(a);
219 }
220 
221 // CHECK-LABEL: define {{[^@]+}}@test_vminv_s8
222 // CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
223 // CHECK-NEXT:  entry:
224 // CHECK-NEXT:    [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> [[A]])
225 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
226 // CHECK-NEXT:    ret i8 [[TMP0]]
227 //
228 int8_t test_vminv_s8(int8x8_t a) {
229   return vminv_s8(a);
230 }
231 
232 // CHECK-LABEL: define {{[^@]+}}@test_vminv_s16
233 // CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
234 // CHECK-NEXT:  entry:
235 // CHECK-NEXT:    [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> [[A]])
236 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i16
237 // CHECK-NEXT:    ret i16 [[TMP0]]
238 //
239 int16_t test_vminv_s16(int16x4_t a) {
240   return vminv_s16(a);
241 }
242 
243 // CHECK-LABEL: define {{[^@]+}}@test_vminv_u8
244 // CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
245 // CHECK-NEXT:  entry:
246 // CHECK-NEXT:    [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> [[A]])
247 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
248 // CHECK-NEXT:    ret i8 [[TMP0]]
249 //
250 uint8_t test_vminv_u8(uint8x8_t a) {
251   return vminv_u8(a);
252 }
253 
254 // CHECK-LABEL: define {{[^@]+}}@test_vminv_u16
255 // CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
256 // CHECK-NEXT:  entry:
257 // CHECK-NEXT:    [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> [[A]])
258 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i16
259 // CHECK-NEXT:    ret i16 [[TMP0]]
260 //
261 uint16_t test_vminv_u16(uint16x4_t a) {
262   return vminv_u16(a);
263 }
264 
265 // CHECK-LABEL: define {{[^@]+}}@test_vminvq_s8
266 // CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR1]] {
267 // CHECK-NEXT:  entry:
268 // CHECK-NEXT:    [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> [[A]])
269 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
270 // CHECK-NEXT:    ret i8 [[TMP0]]
271 //
272 int8_t test_vminvq_s8(int8x16_t a) {
273   return vminvq_s8(a);
274 }
275 
276 // CHECK-LABEL: define {{[^@]+}}@test_vminvq_s16
277 // CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR1]] {
278 // CHECK-NEXT:  entry:
279 // CHECK-NEXT:    [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> [[A]])
280 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i16
281 // CHECK-NEXT:    ret i16 [[TMP0]]
282 //
283 int16_t test_vminvq_s16(int16x8_t a) {
284   return vminvq_s16(a);
285 }
286 
287 // CHECK-LABEL: define {{[^@]+}}@test_vminvq_s32
288 // CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR1]] {
289 // CHECK-NEXT:  entry:
290 // CHECK-NEXT:    [[VMINVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> [[A]])
291 // CHECK-NEXT:    ret i32 [[VMINVQ_S32_I]]
292 //
293 int32_t test_vminvq_s32(int32x4_t a) {
294   return vminvq_s32(a);
295 }
296 
297 // CHECK-LABEL: define {{[^@]+}}@test_vminvq_u8
298 // CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR1]] {
299 // CHECK-NEXT:  entry:
300 // CHECK-NEXT:    [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> [[A]])
301 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
302 // CHECK-NEXT:    ret i8 [[TMP0]]
303 //
304 uint8_t test_vminvq_u8(uint8x16_t a) {
305   return vminvq_u8(a);
306 }
307 
308 // CHECK-LABEL: define {{[^@]+}}@test_vminvq_u16
309 // CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR1]] {
310 // CHECK-NEXT:  entry:
311 // CHECK-NEXT:    [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> [[A]])
312 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i16
313 // CHECK-NEXT:    ret i16 [[TMP0]]
314 //
315 uint16_t test_vminvq_u16(uint16x8_t a) {
316   return vminvq_u16(a);
317 }
318 
319 // CHECK-LABEL: define {{[^@]+}}@test_vminvq_u32
320 // CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR1]] {
321 // CHECK-NEXT:  entry:
322 // CHECK-NEXT:    [[VMINVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> [[A]])
323 // CHECK-NEXT:    ret i32 [[VMINVQ_U32_I]]
324 //
325 uint32_t test_vminvq_u32(uint32x4_t a) {
326   return vminvq_u32(a);
327 }
328 
329 // CHECK-LABEL: define {{[^@]+}}@test_vaddv_s8
330 // CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
331 // CHECK-NEXT:  entry:
332 // CHECK-NEXT:    [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> [[A]])
333 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
334 // CHECK-NEXT:    ret i8 [[TMP0]]
335 //
336 int8_t test_vaddv_s8(int8x8_t a) {
337   return vaddv_s8(a);
338 }
339 
340 // CHECK-LABEL: define {{[^@]+}}@test_vaddv_s16
341 // CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
342 // CHECK-NEXT:  entry:
343 // CHECK-NEXT:    [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> [[A]])
344 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16
345 // CHECK-NEXT:    ret i16 [[TMP0]]
346 //
347 int16_t test_vaddv_s16(int16x4_t a) {
348   return vaddv_s16(a);
349 }
350 
351 // CHECK-LABEL: define {{[^@]+}}@test_vaddv_u8
352 // CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
353 // CHECK-NEXT:  entry:
354 // CHECK-NEXT:    [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i8(<8 x i8> [[A]])
355 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
356 // CHECK-NEXT:    ret i8 [[TMP0]]
357 //
358 uint8_t test_vaddv_u8(uint8x8_t a) {
359   return vaddv_u8(a);
360 }
361 
362 // CHECK-LABEL: define {{[^@]+}}@test_vaddv_u16
363 // CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
364 // CHECK-NEXT:  entry:
365 // CHECK-NEXT:    [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> [[A]])
366 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16
367 // CHECK-NEXT:    ret i16 [[TMP0]]
368 //
369 uint16_t test_vaddv_u16(uint16x4_t a) {
370   return vaddv_u16(a);
371 }
372 
373 // CHECK-LABEL: define {{[^@]+}}@test_vaddvq_s8
374 // CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR1]] {
375 // CHECK-NEXT:  entry:
376 // CHECK-NEXT:    [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> [[A]])
377 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
378 // CHECK-NEXT:    ret i8 [[TMP0]]
379 //
380 int8_t test_vaddvq_s8(int8x16_t a) {
381   return vaddvq_s8(a);
382 }
383 
384 // CHECK-LABEL: define {{[^@]+}}@test_vaddvq_s16
385 // CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR1]] {
386 // CHECK-NEXT:  entry:
387 // CHECK-NEXT:    [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> [[A]])
388 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16
389 // CHECK-NEXT:    ret i16 [[TMP0]]
390 //
391 int16_t test_vaddvq_s16(int16x8_t a) {
392   return vaddvq_s16(a);
393 }
394 
395 // CHECK-LABEL: define {{[^@]+}}@test_vaddvq_s32
396 // CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR1]] {
397 // CHECK-NEXT:  entry:
398 // CHECK-NEXT:    [[VADDVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> [[A]])
399 // CHECK-NEXT:    ret i32 [[VADDVQ_S32_I]]
400 //
401 int32_t test_vaddvq_s32(int32x4_t a) {
402   return vaddvq_s32(a);
403 }
404 
405 // CHECK-LABEL: define {{[^@]+}}@test_vaddvq_u8
406 // CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR1]] {
407 // CHECK-NEXT:  entry:
408 // CHECK-NEXT:    [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v16i8(<16 x i8> [[A]])
409 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
410 // CHECK-NEXT:    ret i8 [[TMP0]]
411 //
412 uint8_t test_vaddvq_u8(uint8x16_t a) {
413   return vaddvq_u8(a);
414 }
415 
416 // CHECK-LABEL: define {{[^@]+}}@test_vaddvq_u16
417 // CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR1]] {
418 // CHECK-NEXT:  entry:
419 // CHECK-NEXT:    [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16> [[A]])
420 // CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16
421 // CHECK-NEXT:    ret i16 [[TMP0]]
422 //
423 uint16_t test_vaddvq_u16(uint16x8_t a) {
424   return vaddvq_u16(a);
425 }
426 
427 // CHECK-LABEL: define {{[^@]+}}@test_vaddvq_u32
428 // CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR1]] {
429 // CHECK-NEXT:  entry:
430 // CHECK-NEXT:    [[VADDVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> [[A]])
431 // CHECK-NEXT:    ret i32 [[VADDVQ_U32_I]]
432 //
433 uint32_t test_vaddvq_u32(uint32x4_t a) {
434   return vaddvq_u32(a);
435 }
436 
437 // CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_f32
438 // CHECK-SAME: (<4 x float> noundef [[A:%.*]]) #[[ATTR1]] {
439 // CHECK-NEXT:  entry:
440 // CHECK-NEXT:    [[VMAXVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float> [[A]])
441 // CHECK-NEXT:    ret float [[VMAXVQ_F32_I]]
442 //
443 float32_t test_vmaxvq_f32(float32x4_t a) {
444   return vmaxvq_f32(a);
445 }
446 
447 // CHECK-LABEL: define {{[^@]+}}@test_vminvq_f32
448 // CHECK-SAME: (<4 x float> noundef [[A:%.*]]) #[[ATTR1]] {
449 // CHECK-NEXT:  entry:
450 // CHECK-NEXT:    [[VMINVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float> [[A]])
451 // CHECK-NEXT:    ret float [[VMINVQ_F32_I]]
452 //
453 float32_t test_vminvq_f32(float32x4_t a) {
454   return vminvq_f32(a);
455 }
456 
457 // CHECK-LABEL: define {{[^@]+}}@test_vmaxnmvq_f32
458 // CHECK-SAME: (<4 x float> noundef [[A:%.*]]) #[[ATTR1]] {
459 // CHECK-NEXT:  entry:
460 // CHECK-NEXT:    [[VMAXNMVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float> [[A]])
461 // CHECK-NEXT:    ret float [[VMAXNMVQ_F32_I]]
462 //
463 float32_t test_vmaxnmvq_f32(float32x4_t a) {
464   return vmaxnmvq_f32(a);
465 }
466 
467 // CHECK-LABEL: define {{[^@]+}}@test_vminnmvq_f32
468 // CHECK-SAME: (<4 x float> noundef [[A:%.*]]) #[[ATTR1]] {
469 // CHECK-NEXT:  entry:
470 // CHECK-NEXT:    [[VMINNMVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float> [[A]])
471 // CHECK-NEXT:    ret float [[VMINNMVQ_F32_I]]
472 //
473 float32_t test_vminnmvq_f32(float32x4_t a) {
474   return vminnmvq_f32(a);
475 }
476 
477