xref: /llvm-project/clang/test/CodeGen/arm-mve-intrinsics/absneg.c (revision 38fffa630ee80163dc65e759392ad29798905679)
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -DPOLYMORPHIC -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
4 
5 // REQUIRES: aarch64-registered-target || arm-registered-target
6 
7 #include <arm_mve.h>
8 
9 // CHECK-LABEL: @test_vabsq_f16(
10 // CHECK-NEXT:  entry:
11 // CHECK-NEXT:    [[TMP0:%.*]] = call <8 x half> @llvm.fabs.v8f16(<8 x half> [[A:%.*]])
12 // CHECK-NEXT:    ret <8 x half> [[TMP0]]
13 //
14 float16x8_t test_vabsq_f16(float16x8_t a)
15 {
16 #ifdef POLYMORPHIC
17     return vabsq(a);
18 #else /* POLYMORPHIC */
19     return vabsq_f16(a);
20 #endif /* POLYMORPHIC */
21 }
22 
23 // CHECK-LABEL: @test_vabsq_f32(
24 // CHECK-NEXT:  entry:
25 // CHECK-NEXT:    [[TMP0:%.*]] = call <4 x float> @llvm.fabs.v4f32(<4 x float> [[A:%.*]])
26 // CHECK-NEXT:    ret <4 x float> [[TMP0]]
27 //
28 float32x4_t test_vabsq_f32(float32x4_t a)
29 {
30 #ifdef POLYMORPHIC
31     return vabsq(a);
32 #else /* POLYMORPHIC */
33     return vabsq_f32(a);
34 #endif /* POLYMORPHIC */
35 }
36 
37 // CHECK-LABEL: @test_vabsq_s8(
38 // CHECK-NEXT:  entry:
39 // CHECK-NEXT:    [[TMP0:%.*]] = icmp slt <16 x i8> [[A:%.*]], zeroinitializer
40 // CHECK-NEXT:    [[TMP1:%.*]] = sub <16 x i8> zeroinitializer, [[A]]
41 // CHECK-NEXT:    [[TMP2:%.*]] = select <16 x i1> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[A]]
42 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
43 //
44 int8x16_t test_vabsq_s8(int8x16_t a)
45 {
46 #ifdef POLYMORPHIC
47     return vabsq(a);
48 #else /* POLYMORPHIC */
49     return vabsq_s8(a);
50 #endif /* POLYMORPHIC */
51 }
52 
53 // CHECK-LABEL: @test_vabsq_s16(
54 // CHECK-NEXT:  entry:
55 // CHECK-NEXT:    [[TMP0:%.*]] = icmp slt <8 x i16> [[A:%.*]], zeroinitializer
56 // CHECK-NEXT:    [[TMP1:%.*]] = sub <8 x i16> zeroinitializer, [[A]]
57 // CHECK-NEXT:    [[TMP2:%.*]] = select <8 x i1> [[TMP0]], <8 x i16> [[TMP1]], <8 x i16> [[A]]
58 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
59 //
60 int16x8_t test_vabsq_s16(int16x8_t a)
61 {
62 #ifdef POLYMORPHIC
63     return vabsq(a);
64 #else /* POLYMORPHIC */
65     return vabsq_s16(a);
66 #endif /* POLYMORPHIC */
67 }
68 
69 // CHECK-LABEL: @test_vabsq_s32(
70 // CHECK-NEXT:  entry:
71 // CHECK-NEXT:    [[TMP0:%.*]] = icmp slt <4 x i32> [[A:%.*]], zeroinitializer
72 // CHECK-NEXT:    [[TMP1:%.*]] = sub <4 x i32> zeroinitializer, [[A]]
73 // CHECK-NEXT:    [[TMP2:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP1]], <4 x i32> [[A]]
74 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
75 //
76 int32x4_t test_vabsq_s32(int32x4_t a)
77 {
78 #ifdef POLYMORPHIC
79     return vabsq(a);
80 #else /* POLYMORPHIC */
81     return vabsq_s32(a);
82 #endif /* POLYMORPHIC */
83 }
84 
85 // CHECK-LABEL: @test_vmvnq_s8(
86 // CHECK-NEXT:  entry:
87 // CHECK-NEXT:    [[TMP0:%.*]] = xor <16 x i8> [[A:%.*]], splat (i8 -1)
88 // CHECK-NEXT:    ret <16 x i8> [[TMP0]]
89 //
90 int8x16_t test_vmvnq_s8(int8x16_t a)
91 {
92 #ifdef POLYMORPHIC
93     return vmvnq(a);
94 #else /* POLYMORPHIC */
95     return vmvnq_s8(a);
96 #endif /* POLYMORPHIC */
97 }
98 
99 // CHECK-LABEL: @test_vmvnq_s16(
100 // CHECK-NEXT:  entry:
101 // CHECK-NEXT:    [[TMP0:%.*]] = xor <8 x i16> [[A:%.*]], splat (i16 -1)
102 // CHECK-NEXT:    ret <8 x i16> [[TMP0]]
103 //
104 int16x8_t test_vmvnq_s16(int16x8_t a)
105 {
106 #ifdef POLYMORPHIC
107     return vmvnq(a);
108 #else /* POLYMORPHIC */
109     return vmvnq_s16(a);
110 #endif /* POLYMORPHIC */
111 }
112 
113 // CHECK-LABEL: @test_vmvnq_s32(
114 // CHECK-NEXT:  entry:
115 // CHECK-NEXT:    [[TMP0:%.*]] = xor <4 x i32> [[A:%.*]], splat (i32 -1)
116 // CHECK-NEXT:    ret <4 x i32> [[TMP0]]
117 //
118 int32x4_t test_vmvnq_s32(int32x4_t a)
119 {
120 #ifdef POLYMORPHIC
121     return vmvnq(a);
122 #else /* POLYMORPHIC */
123     return vmvnq_s32(a);
124 #endif /* POLYMORPHIC */
125 }
126 
127 // CHECK-LABEL: @test_vmvnq_u8(
128 // CHECK-NEXT:  entry:
129 // CHECK-NEXT:    [[TMP0:%.*]] = xor <16 x i8> [[A:%.*]], splat (i8 -1)
130 // CHECK-NEXT:    ret <16 x i8> [[TMP0]]
131 //
132 uint8x16_t test_vmvnq_u8(uint8x16_t a)
133 {
134 #ifdef POLYMORPHIC
135     return vmvnq(a);
136 #else /* POLYMORPHIC */
137     return vmvnq_u8(a);
138 #endif /* POLYMORPHIC */
139 }
140 
141 // CHECK-LABEL: @test_vmvnq_u16(
142 // CHECK-NEXT:  entry:
143 // CHECK-NEXT:    [[TMP0:%.*]] = xor <8 x i16> [[A:%.*]], splat (i16 -1)
144 // CHECK-NEXT:    ret <8 x i16> [[TMP0]]
145 //
146 uint16x8_t test_vmvnq_u16(uint16x8_t a)
147 {
148 #ifdef POLYMORPHIC
149     return vmvnq(a);
150 #else /* POLYMORPHIC */
151     return vmvnq_u16(a);
152 #endif /* POLYMORPHIC */
153 }
154 
155 // CHECK-LABEL: @test_vmvnq_u32(
156 // CHECK-NEXT:  entry:
157 // CHECK-NEXT:    [[TMP0:%.*]] = xor <4 x i32> [[A:%.*]], splat (i32 -1)
158 // CHECK-NEXT:    ret <4 x i32> [[TMP0]]
159 //
160 uint32x4_t test_vmvnq_u32(uint32x4_t a)
161 {
162 #ifdef POLYMORPHIC
163     return vmvnq(a);
164 #else /* POLYMORPHIC */
165     return vmvnq_u32(a);
166 #endif /* POLYMORPHIC */
167 }
168 
169 // CHECK-LABEL: @test_vmvnq_m_s8(
170 // CHECK-NEXT:  entry:
171 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
172 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
173 // CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.mvn.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
174 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
175 //
176 int8x16_t test_vmvnq_m_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
177 {
178 #ifdef POLYMORPHIC
179     return vmvnq_m(inactive, a, p);
180 #else /* POLYMORPHIC */
181     return vmvnq_m_s8(inactive, a, p);
182 #endif /* POLYMORPHIC */
183 }
184 
185 // CHECK-LABEL: @test_vmvnq_m_s16(
186 // CHECK-NEXT:  entry:
187 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
188 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
189 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.mvn.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
190 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
191 //
192 int16x8_t test_vmvnq_m_s16(int16x8_t inactive, int16x8_t a, mve_pred16_t p)
193 {
194 #ifdef POLYMORPHIC
195     return vmvnq_m(inactive, a, p);
196 #else /* POLYMORPHIC */
197     return vmvnq_m_s16(inactive, a, p);
198 #endif /* POLYMORPHIC */
199 }
200 
201 // CHECK-LABEL: @test_vmvnq_m_s32(
202 // CHECK-NEXT:  entry:
203 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
204 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
205 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.mvn.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
206 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
207 //
208 int32x4_t test_vmvnq_m_s32(int32x4_t inactive, int32x4_t a, mve_pred16_t p)
209 {
210 #ifdef POLYMORPHIC
211     return vmvnq_m(inactive, a, p);
212 #else /* POLYMORPHIC */
213     return vmvnq_m_s32(inactive, a, p);
214 #endif /* POLYMORPHIC */
215 }
216 
217 // CHECK-LABEL: @test_vmvnq_m_u8(
218 // CHECK-NEXT:  entry:
219 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
220 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
221 // CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.mvn.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
222 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
223 //
224 uint8x16_t test_vmvnq_m_u8(uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
225 {
226 #ifdef POLYMORPHIC
227     return vmvnq_m(inactive, a, p);
228 #else /* POLYMORPHIC */
229     return vmvnq_m_u8(inactive, a, p);
230 #endif /* POLYMORPHIC */
231 }
232 
233 // CHECK-LABEL: @test_vmvnq_m_u16(
234 // CHECK-NEXT:  entry:
235 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
236 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
237 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.mvn.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
238 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
239 //
240 uint16x8_t test_vmvnq_m_u16(uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
241 {
242 #ifdef POLYMORPHIC
243     return vmvnq_m(inactive, a, p);
244 #else /* POLYMORPHIC */
245     return vmvnq_m_u16(inactive, a, p);
246 #endif /* POLYMORPHIC */
247 }
248 
249 // CHECK-LABEL: @test_vmvnq_m_u32(
250 // CHECK-NEXT:  entry:
251 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
252 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
253 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.mvn.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
254 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
255 //
256 uint32x4_t test_vmvnq_m_u32(uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
257 {
258 #ifdef POLYMORPHIC
259     return vmvnq_m(inactive, a, p);
260 #else /* POLYMORPHIC */
261     return vmvnq_m_u32(inactive, a, p);
262 #endif /* POLYMORPHIC */
263 }
264 
265 // CHECK-LABEL: @test_vmvnq_x_s8(
266 // CHECK-NEXT:  entry:
267 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
268 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
269 // CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.mvn.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> undef)
270 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
271 //
272 int8x16_t test_vmvnq_x_s8(int8x16_t a, mve_pred16_t p)
273 {
274 #ifdef POLYMORPHIC
275     return vmvnq_x(a, p);
276 #else /* POLYMORPHIC */
277     return vmvnq_x_s8(a, p);
278 #endif /* POLYMORPHIC */
279 }
280 
281 // CHECK-LABEL: @test_vmvnq_x_s16(
282 // CHECK-NEXT:  entry:
283 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
284 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
285 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.mvn.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> undef)
286 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
287 //
288 int16x8_t test_vmvnq_x_s16(int16x8_t a, mve_pred16_t p)
289 {
290 #ifdef POLYMORPHIC
291     return vmvnq_x(a, p);
292 #else /* POLYMORPHIC */
293     return vmvnq_x_s16(a, p);
294 #endif /* POLYMORPHIC */
295 }
296 
297 // CHECK-LABEL: @test_vmvnq_x_s32(
298 // CHECK-NEXT:  entry:
299 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
300 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
301 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.mvn.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> undef)
302 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
303 //
304 int32x4_t test_vmvnq_x_s32(int32x4_t a, mve_pred16_t p)
305 {
306 #ifdef POLYMORPHIC
307     return vmvnq_x(a, p);
308 #else /* POLYMORPHIC */
309     return vmvnq_x_s32(a, p);
310 #endif /* POLYMORPHIC */
311 }
312 
313 // CHECK-LABEL: @test_vmvnq_x_u8(
314 // CHECK-NEXT:  entry:
315 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
316 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
317 // CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.mvn.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> undef)
318 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
319 //
320 uint8x16_t test_vmvnq_x_u8(uint8x16_t a, mve_pred16_t p)
321 {
322 #ifdef POLYMORPHIC
323     return vmvnq_x(a, p);
324 #else /* POLYMORPHIC */
325     return vmvnq_x_u8(a, p);
326 #endif /* POLYMORPHIC */
327 }
328 
329 // CHECK-LABEL: @test_vmvnq_x_u16(
330 // CHECK-NEXT:  entry:
331 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
332 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
333 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.mvn.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> undef)
334 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
335 //
336 uint16x8_t test_vmvnq_x_u16(uint16x8_t a, mve_pred16_t p)
337 {
338 #ifdef POLYMORPHIC
339     return vmvnq_x(a, p);
340 #else /* POLYMORPHIC */
341     return vmvnq_x_u16(a, p);
342 #endif /* POLYMORPHIC */
343 }
344 
345 // CHECK-LABEL: @test_vmvnq_x_u32(
346 // CHECK-NEXT:  entry:
347 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
348 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
349 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.mvn.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> undef)
350 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
351 //
352 uint32x4_t test_vmvnq_x_u32(uint32x4_t a, mve_pred16_t p)
353 {
354 #ifdef POLYMORPHIC
355     return vmvnq_x(a, p);
356 #else /* POLYMORPHIC */
357     return vmvnq_x_u32(a, p);
358 #endif /* POLYMORPHIC */
359 }
360 
361 // CHECK-LABEL: @test_vnegq_f16(
362 // CHECK-NEXT:  entry:
363 // CHECK-NEXT:    [[TMP0:%.*]] = fneg <8 x half> [[A:%.*]]
364 // CHECK-NEXT:    ret <8 x half> [[TMP0]]
365 //
366 float16x8_t test_vnegq_f16(float16x8_t a)
367 {
368 #ifdef POLYMORPHIC
369     return vnegq(a);
370 #else /* POLYMORPHIC */
371     return vnegq_f16(a);
372 #endif /* POLYMORPHIC */
373 }
374 
375 // CHECK-LABEL: @test_vnegq_f32(
376 // CHECK-NEXT:  entry:
377 // CHECK-NEXT:    [[TMP0:%.*]] = fneg <4 x float> [[A:%.*]]
378 // CHECK-NEXT:    ret <4 x float> [[TMP0]]
379 //
380 float32x4_t test_vnegq_f32(float32x4_t a)
381 {
382 #ifdef POLYMORPHIC
383     return vnegq(a);
384 #else /* POLYMORPHIC */
385     return vnegq_f32(a);
386 #endif /* POLYMORPHIC */
387 }
388 
389 // CHECK-LABEL: @test_vnegq_s8(
390 // CHECK-NEXT:  entry:
391 // CHECK-NEXT:    [[TMP0:%.*]] = sub <16 x i8> zeroinitializer, [[A:%.*]]
392 // CHECK-NEXT:    ret <16 x i8> [[TMP0]]
393 //
394 int8x16_t test_vnegq_s8(int8x16_t a)
395 {
396 #ifdef POLYMORPHIC
397     return vnegq(a);
398 #else /* POLYMORPHIC */
399     return vnegq_s8(a);
400 #endif /* POLYMORPHIC */
401 }
402 
403 // CHECK-LABEL: @test_vnegq_s16(
404 // CHECK-NEXT:  entry:
405 // CHECK-NEXT:    [[TMP0:%.*]] = sub <8 x i16> zeroinitializer, [[A:%.*]]
406 // CHECK-NEXT:    ret <8 x i16> [[TMP0]]
407 //
408 int16x8_t test_vnegq_s16(int16x8_t a)
409 {
410 #ifdef POLYMORPHIC
411     return vnegq(a);
412 #else /* POLYMORPHIC */
413     return vnegq_s16(a);
414 #endif /* POLYMORPHIC */
415 }
416 
417 // CHECK-LABEL: @test_vnegq_s32(
418 // CHECK-NEXT:  entry:
419 // CHECK-NEXT:    [[TMP0:%.*]] = sub <4 x i32> zeroinitializer, [[A:%.*]]
420 // CHECK-NEXT:    ret <4 x i32> [[TMP0]]
421 //
422 int32x4_t test_vnegq_s32(int32x4_t a)
423 {
424 #ifdef POLYMORPHIC
425     return vnegq(a);
426 #else /* POLYMORPHIC */
427     return vnegq_s32(a);
428 #endif /* POLYMORPHIC */
429 }
430 
431 // CHECK-LABEL: @test_vqabsq_s8(
432 // CHECK-NEXT:  entry:
433 // CHECK-NEXT:    [[TMP0:%.*]] = icmp sgt <16 x i8> [[A:%.*]], zeroinitializer
434 // CHECK-NEXT:    [[TMP1:%.*]] = icmp eq <16 x i8> [[A]], splat (i8 -128)
435 // CHECK-NEXT:    [[TMP2:%.*]] = sub <16 x i8> zeroinitializer, [[A]]
436 // CHECK-NEXT:    [[TMP3:%.*]] = select <16 x i1> [[TMP1]], <16 x i8> splat (i8 127), <16 x i8> [[TMP2]]
437 // CHECK-NEXT:    [[TMP4:%.*]] = select <16 x i1> [[TMP0]], <16 x i8> [[A]], <16 x i8> [[TMP3]]
438 // CHECK-NEXT:    ret <16 x i8> [[TMP4]]
439 //
440 int8x16_t test_vqabsq_s8(int8x16_t a)
441 {
442 #ifdef POLYMORPHIC
443     return vqabsq(a);
444 #else /* POLYMORPHIC */
445     return vqabsq_s8(a);
446 #endif /* POLYMORPHIC */
447 }
448 
449 // CHECK-LABEL: @test_vqabsq_s16(
450 // CHECK-NEXT:  entry:
451 // CHECK-NEXT:    [[TMP0:%.*]] = icmp sgt <8 x i16> [[A:%.*]], zeroinitializer
452 // CHECK-NEXT:    [[TMP1:%.*]] = icmp eq <8 x i16> [[A]], splat (i16 -32768)
453 // CHECK-NEXT:    [[TMP2:%.*]] = sub <8 x i16> zeroinitializer, [[A]]
454 // CHECK-NEXT:    [[TMP3:%.*]] = select <8 x i1> [[TMP1]], <8 x i16> splat (i16 32767), <8 x i16> [[TMP2]]
455 // CHECK-NEXT:    [[TMP4:%.*]] = select <8 x i1> [[TMP0]], <8 x i16> [[A]], <8 x i16> [[TMP3]]
456 // CHECK-NEXT:    ret <8 x i16> [[TMP4]]
457 //
458 int16x8_t test_vqabsq_s16(int16x8_t a)
459 {
460 #ifdef POLYMORPHIC
461     return vqabsq(a);
462 #else /* POLYMORPHIC */
463     return vqabsq_s16(a);
464 #endif /* POLYMORPHIC */
465 }
466 
467 // CHECK-LABEL: @test_vqabsq_s32(
468 // CHECK-NEXT:  entry:
469 // CHECK-NEXT:    [[TMP0:%.*]] = icmp sgt <4 x i32> [[A:%.*]], zeroinitializer
470 // CHECK-NEXT:    [[TMP1:%.*]] = icmp eq <4 x i32> [[A]], splat (i32 -2147483648)
471 // CHECK-NEXT:    [[TMP2:%.*]] = sub <4 x i32> zeroinitializer, [[A]]
472 // CHECK-NEXT:    [[TMP3:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> splat (i32 2147483647), <4 x i32> [[TMP2]]
473 // CHECK-NEXT:    [[TMP4:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[A]], <4 x i32> [[TMP3]]
474 // CHECK-NEXT:    ret <4 x i32> [[TMP4]]
475 //
476 int32x4_t test_vqabsq_s32(int32x4_t a)
477 {
478 #ifdef POLYMORPHIC
479     return vqabsq(a);
480 #else /* POLYMORPHIC */
481     return vqabsq_s32(a);
482 #endif /* POLYMORPHIC */
483 }
484 
485 // CHECK-LABEL: @test_vqnegq_s8(
486 // CHECK-NEXT:  entry:
487 // CHECK-NEXT:    [[TMP0:%.*]] = icmp eq <16 x i8> [[A:%.*]], splat (i8 -128)
488 // CHECK-NEXT:    [[TMP1:%.*]] = sub <16 x i8> zeroinitializer, [[A]]
489 // CHECK-NEXT:    [[TMP2:%.*]] = select <16 x i1> [[TMP0]], <16 x i8> splat (i8 127), <16 x i8> [[TMP1]]
490 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
491 //
492 int8x16_t test_vqnegq_s8(int8x16_t a)
493 {
494 #ifdef POLYMORPHIC
495     return vqnegq(a);
496 #else /* POLYMORPHIC */
497     return vqnegq_s8(a);
498 #endif /* POLYMORPHIC */
499 }
500 
501 // CHECK-LABEL: @test_vqnegq_s16(
502 // CHECK-NEXT:  entry:
503 // CHECK-NEXT:    [[TMP0:%.*]] = icmp eq <8 x i16> [[A:%.*]], splat (i16 -32768)
504 // CHECK-NEXT:    [[TMP1:%.*]] = sub <8 x i16> zeroinitializer, [[A]]
505 // CHECK-NEXT:    [[TMP2:%.*]] = select <8 x i1> [[TMP0]], <8 x i16> splat (i16 32767), <8 x i16> [[TMP1]]
506 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
507 //
508 int16x8_t test_vqnegq_s16(int16x8_t a)
509 {
510 #ifdef POLYMORPHIC
511     return vqnegq(a);
512 #else /* POLYMORPHIC */
513     return vqnegq_s16(a);
514 #endif /* POLYMORPHIC */
515 }
516 
517 // CHECK-LABEL: @test_vqnegq_s32(
518 // CHECK-NEXT:  entry:
519 // CHECK-NEXT:    [[TMP0:%.*]] = icmp eq <4 x i32> [[A:%.*]], splat (i32 -2147483648)
520 // CHECK-NEXT:    [[TMP1:%.*]] = sub <4 x i32> zeroinitializer, [[A]]
521 // CHECK-NEXT:    [[TMP2:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> splat (i32 2147483647), <4 x i32> [[TMP1]]
522 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
523 //
524 int32x4_t test_vqnegq_s32(int32x4_t a)
525 {
526 #ifdef POLYMORPHIC
527     return vqnegq(a);
528 #else /* POLYMORPHIC */
529     return vqnegq_s32(a);
530 #endif /* POLYMORPHIC */
531 }
532 
533 // CHECK-LABEL: @test_vnegq_m_f16(
534 // CHECK-NEXT:  entry:
535 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
536 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
537 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.neg.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> [[INACTIVE:%.*]])
538 // CHECK-NEXT:    ret <8 x half> [[TMP2]]
539 //
540 float16x8_t test_vnegq_m_f16(float16x8_t inactive, float16x8_t a, mve_pred16_t p)
541 {
542 #ifdef POLYMORPHIC
543     return vnegq_m(inactive, a, p);
544 #else /* POLYMORPHIC */
545     return vnegq_m_f16(inactive, a, p);
546 #endif /* POLYMORPHIC */
547 }
548 
549 // CHECK-LABEL: @test_vnegq_m_f32(
550 // CHECK-NEXT:  entry:
551 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
552 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
553 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.neg.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> [[INACTIVE:%.*]])
554 // CHECK-NEXT:    ret <4 x float> [[TMP2]]
555 //
556 float32x4_t test_vnegq_m_f32(float32x4_t inactive, float32x4_t a, mve_pred16_t p)
557 {
558 #ifdef POLYMORPHIC
559     return vnegq_m(inactive, a, p);
560 #else /* POLYMORPHIC */
561     return vnegq_m_f32(inactive, a, p);
562 #endif /* POLYMORPHIC */
563 }
564 
565 // CHECK-LABEL: @test_vnegq_m_s8(
566 // CHECK-NEXT:  entry:
567 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
568 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
569 // CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.neg.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
570 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
571 //
572 int8x16_t test_vnegq_m_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
573 {
574 #ifdef POLYMORPHIC
575     return vnegq_m(inactive, a, p);
576 #else /* POLYMORPHIC */
577     return vnegq_m_s8(inactive, a, p);
578 #endif /* POLYMORPHIC */
579 }
580 
581 // CHECK-LABEL: @test_vnegq_m_s16(
582 // CHECK-NEXT:  entry:
583 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
584 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
585 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.neg.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
586 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
587 //
588 int16x8_t test_vnegq_m_s16(int16x8_t inactive, int16x8_t a, mve_pred16_t p)
589 {
590 #ifdef POLYMORPHIC
591     return vnegq_m(inactive, a, p);
592 #else /* POLYMORPHIC */
593     return vnegq_m_s16(inactive, a, p);
594 #endif /* POLYMORPHIC */
595 }
596 
597 // CHECK-LABEL: @test_vnegq_m_s32(
598 // CHECK-NEXT:  entry:
599 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
600 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
601 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.neg.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
602 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
603 //
604 int32x4_t test_vnegq_m_s32(int32x4_t inactive, int32x4_t a, mve_pred16_t p)
605 {
606 #ifdef POLYMORPHIC
607     return vnegq_m(inactive, a, p);
608 #else /* POLYMORPHIC */
609     return vnegq_m_s32(inactive, a, p);
610 #endif /* POLYMORPHIC */
611 }
612 
613 // CHECK-LABEL: @test_vnegq_x_f16(
614 // CHECK-NEXT:  entry:
615 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
616 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
617 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.neg.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> undef)
618 // CHECK-NEXT:    ret <8 x half> [[TMP2]]
619 //
620 float16x8_t test_vnegq_x_f16(float16x8_t a, mve_pred16_t p)
621 {
622 #ifdef POLYMORPHIC
623     return vnegq_x(a, p);
624 #else /* POLYMORPHIC */
625     return vnegq_x_f16(a, p);
626 #endif /* POLYMORPHIC */
627 }
628 
629 // CHECK-LABEL: @test_vnegq_x_f32(
630 // CHECK-NEXT:  entry:
631 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
632 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
633 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.neg.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> undef)
634 // CHECK-NEXT:    ret <4 x float> [[TMP2]]
635 //
636 float32x4_t test_vnegq_x_f32(float32x4_t a, mve_pred16_t p)
637 {
638 #ifdef POLYMORPHIC
639     return vnegq_x(a, p);
640 #else /* POLYMORPHIC */
641     return vnegq_x_f32(a, p);
642 #endif /* POLYMORPHIC */
643 }
644 
645 // CHECK-LABEL: @test_vnegq_x_s8(
646 // CHECK-NEXT:  entry:
647 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
648 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
649 // CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.neg.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> undef)
650 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
651 //
652 int8x16_t test_vnegq_x_s8(int8x16_t a, mve_pred16_t p)
653 {
654 #ifdef POLYMORPHIC
655     return vnegq_x(a, p);
656 #else /* POLYMORPHIC */
657     return vnegq_x_s8(a, p);
658 #endif /* POLYMORPHIC */
659 }
660 
661 // CHECK-LABEL: @test_vnegq_x_s16(
662 // CHECK-NEXT:  entry:
663 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
664 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
665 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.neg.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> undef)
666 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
667 //
668 int16x8_t test_vnegq_x_s16(int16x8_t a, mve_pred16_t p)
669 {
670 #ifdef POLYMORPHIC
671     return vnegq_x(a, p);
672 #else /* POLYMORPHIC */
673     return vnegq_x_s16(a, p);
674 #endif /* POLYMORPHIC */
675 }
676 
677 // CHECK-LABEL: @test_vnegq_x_s32(
678 // CHECK-NEXT:  entry:
679 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
680 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
681 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.neg.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> undef)
682 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
683 //
684 int32x4_t test_vnegq_x_s32(int32x4_t a, mve_pred16_t p)
685 {
686 #ifdef POLYMORPHIC
687     return vnegq_x(a, p);
688 #else /* POLYMORPHIC */
689     return vnegq_x_s32(a, p);
690 #endif /* POLYMORPHIC */
691 }
692 
693 // CHECK-LABEL: @test_vabsq_m_f16(
694 // CHECK-NEXT:  entry:
695 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
696 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
697 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.abs.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> [[INACTIVE:%.*]])
698 // CHECK-NEXT:    ret <8 x half> [[TMP2]]
699 //
700 float16x8_t test_vabsq_m_f16(float16x8_t inactive, float16x8_t a, mve_pred16_t p)
701 {
702 #ifdef POLYMORPHIC
703     return vabsq_m(inactive, a, p);
704 #else /* POLYMORPHIC */
705     return vabsq_m_f16(inactive, a, p);
706 #endif /* POLYMORPHIC */
707 }
708 
709 // CHECK-LABEL: @test_vabsq_m_f32(
710 // CHECK-NEXT:  entry:
711 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
712 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
713 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.abs.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> [[INACTIVE:%.*]])
714 // CHECK-NEXT:    ret <4 x float> [[TMP2]]
715 //
716 float32x4_t test_vabsq_m_f32(float32x4_t inactive, float32x4_t a, mve_pred16_t p)
717 {
718 #ifdef POLYMORPHIC
719     return vabsq_m(inactive, a, p);
720 #else /* POLYMORPHIC */
721     return vabsq_m_f32(inactive, a, p);
722 #endif /* POLYMORPHIC */
723 }
724 
725 // CHECK-LABEL: @test_vabsq_m_s8(
726 // CHECK-NEXT:  entry:
727 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
728 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
729 // CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.abs.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
730 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
731 //
732 int8x16_t test_vabsq_m_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
733 {
734 #ifdef POLYMORPHIC
735     return vabsq_m(inactive, a, p);
736 #else /* POLYMORPHIC */
737     return vabsq_m_s8(inactive, a, p);
738 #endif /* POLYMORPHIC */
739 }
740 
741 // CHECK-LABEL: @test_vabsq_m_s16(
742 // CHECK-NEXT:  entry:
743 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
744 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
745 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.abs.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
746 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
747 //
748 int16x8_t test_vabsq_m_s16(int16x8_t inactive, int16x8_t a, mve_pred16_t p)
749 {
750 #ifdef POLYMORPHIC
751     return vabsq_m(inactive, a, p);
752 #else /* POLYMORPHIC */
753     return vabsq_m_s16(inactive, a, p);
754 #endif /* POLYMORPHIC */
755 }
756 
757 // CHECK-LABEL: @test_vabsq_m_s32(
758 // CHECK-NEXT:  entry:
759 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
760 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
761 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.abs.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
762 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
763 //
764 int32x4_t test_vabsq_m_s32(int32x4_t inactive, int32x4_t a, mve_pred16_t p)
765 {
766 #ifdef POLYMORPHIC
767     return vabsq_m(inactive, a, p);
768 #else /* POLYMORPHIC */
769     return vabsq_m_s32(inactive, a, p);
770 #endif /* POLYMORPHIC */
771 }
772 
773 // CHECK-LABEL: @test_vabsq_x_f16(
774 // CHECK-NEXT:  entry:
775 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
776 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
777 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.abs.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> undef)
778 // CHECK-NEXT:    ret <8 x half> [[TMP2]]
779 //
780 float16x8_t test_vabsq_x_f16(float16x8_t a, mve_pred16_t p)
781 {
782 #ifdef POLYMORPHIC
783     return vabsq_x(a, p);
784 #else /* POLYMORPHIC */
785     return vabsq_x_f16(a, p);
786 #endif /* POLYMORPHIC */
787 }
788 
789 // CHECK-LABEL: @test_vabsq_x_f32(
790 // CHECK-NEXT:  entry:
791 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
792 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
793 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.abs.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> undef)
794 // CHECK-NEXT:    ret <4 x float> [[TMP2]]
795 //
796 float32x4_t test_vabsq_x_f32(float32x4_t a, mve_pred16_t p)
797 {
798 #ifdef POLYMORPHIC
799     return vabsq_x(a, p);
800 #else /* POLYMORPHIC */
801     return vabsq_x_f32(a, p);
802 #endif /* POLYMORPHIC */
803 }
804 
805 // CHECK-LABEL: @test_vabsq_x_s8(
806 // CHECK-NEXT:  entry:
807 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
808 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
809 // CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.abs.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> undef)
810 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
811 //
812 int8x16_t test_vabsq_x_s8(int8x16_t a, mve_pred16_t p)
813 {
814 #ifdef POLYMORPHIC
815     return vabsq_x(a, p);
816 #else /* POLYMORPHIC */
817     return vabsq_x_s8(a, p);
818 #endif /* POLYMORPHIC */
819 }
820 
821 // CHECK-LABEL: @test_vabsq_x_s16(
822 // CHECK-NEXT:  entry:
823 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
824 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
825 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.abs.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> undef)
826 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
827 //
828 int16x8_t test_vabsq_x_s16(int16x8_t a, mve_pred16_t p)
829 {
830 #ifdef POLYMORPHIC
831     return vabsq_x(a, p);
832 #else /* POLYMORPHIC */
833     return vabsq_x_s16(a, p);
834 #endif /* POLYMORPHIC */
835 }
836 
837 // CHECK-LABEL: @test_vabsq_x_s32(
838 // CHECK-NEXT:  entry:
839 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
840 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
841 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.abs.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> undef)
842 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
843 //
844 int32x4_t test_vabsq_x_s32(int32x4_t a, mve_pred16_t p)
845 {
846 #ifdef POLYMORPHIC
847     return vabsq_x(a, p);
848 #else /* POLYMORPHIC */
849     return vabsq_x_s32(a, p);
850 #endif /* POLYMORPHIC */
851 }
852 
853 // CHECK-LABEL: @test_vqnegq_m_s8(
854 // CHECK-NEXT:  entry:
855 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
856 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
857 // CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.qneg.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
858 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
859 //
860 int8x16_t test_vqnegq_m_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
861 {
862 #ifdef POLYMORPHIC
863     return vqnegq_m(inactive, a, p);
864 #else /* POLYMORPHIC */
865     return vqnegq_m_s8(inactive, a, p);
866 #endif /* POLYMORPHIC */
867 }
868 
869 // CHECK-LABEL: @test_vqnegq_m_s16(
870 // CHECK-NEXT:  entry:
871 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
872 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
873 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.qneg.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
874 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
875 //
876 int16x8_t test_vqnegq_m_s16(int16x8_t inactive, int16x8_t a, mve_pred16_t p)
877 {
878 #ifdef POLYMORPHIC
879     return vqnegq_m(inactive, a, p);
880 #else /* POLYMORPHIC */
881     return vqnegq_m_s16(inactive, a, p);
882 #endif /* POLYMORPHIC */
883 }
884 
885 // CHECK-LABEL: @test_vqnegq_m_s32(
886 // CHECK-NEXT:  entry:
887 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
888 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
889 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.qneg.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
890 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
891 //
892 int32x4_t test_vqnegq_m_s32(int32x4_t inactive, int32x4_t a, mve_pred16_t p)
893 {
894 #ifdef POLYMORPHIC
895     return vqnegq_m(inactive, a, p);
896 #else /* POLYMORPHIC */
897     return vqnegq_m_s32(inactive, a, p);
898 #endif /* POLYMORPHIC */
899 }
900 
901 // CHECK-LABEL: @test_vqabsq_m_s8(
902 // CHECK-NEXT:  entry:
903 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
904 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
905 // CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.qabs.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
906 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
907 //
908 int8x16_t test_vqabsq_m_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
909 {
910 #ifdef POLYMORPHIC
911     return vqabsq_m(inactive, a, p);
912 #else /* POLYMORPHIC */
913     return vqabsq_m_s8(inactive, a, p);
914 #endif /* POLYMORPHIC */
915 }
916 
917 // CHECK-LABEL: @test_vqabsq_m_s16(
918 // CHECK-NEXT:  entry:
919 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
920 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
921 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.qabs.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
922 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
923 //
924 int16x8_t test_vqabsq_m_s16(int16x8_t inactive, int16x8_t a, mve_pred16_t p)
925 {
926 #ifdef POLYMORPHIC
927     return vqabsq_m(inactive, a, p);
928 #else /* POLYMORPHIC */
929     return vqabsq_m_s16(inactive, a, p);
930 #endif /* POLYMORPHIC */
931 }
932 
933 // CHECK-LABEL: @test_vqabsq_m_s32(
934 // CHECK-NEXT:  entry:
935 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
936 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
937 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.qabs.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
938 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
939 //
940 int32x4_t test_vqabsq_m_s32(int32x4_t inactive, int32x4_t a, mve_pred16_t p)
941 {
942 #ifdef POLYMORPHIC
943     return vqabsq_m(inactive, a, p);
944 #else /* POLYMORPHIC */
945     return vqabsq_m_s32(inactive, a, p);
946 #endif /* POLYMORPHIC */
947 }
948 
949