Lines Matching +full:4 +full:d

1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
9 ; CHECK-NEXT: mov z5.d, z2.d
10 ; CHECK-NEXT: mov z4.d, z1.d
12 ; CHECK-NEXT: mov z0.d, z4.d
13 ; CHECK-NEXT: mov z1.d, z5.d
22 ; CHECK-NEXT: mov z5.d, z2.d
23 ; CHECK-NEXT: mov z4.d, z1.d
25 ; CHECK-NEXT: mov z0.d, z4.d
26 ; CHECK-NEXT: mov z1.d, z5.d
32 define { <vscale x 4 x i32>, <vscale x 4 x i32> } @multi_vec_min_single_x2_s32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zm) {
35 ; CHECK-NEXT: mov z5.d, z2.d
36 ; CHECK-NEXT: mov z4.d, z1.d
38 ; CHECK-NEXT: mov z0.d, z4.d
39 ; CHECK-NEXT: mov z1.d, z5.d
41 %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.smin.single.x2.nxv4i32(<vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zm)
42 ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res
48 ; CHECK-NEXT: mov z5.d, z2.d
49 ; CHECK-NEXT: mov z4.d, z1.d
50 ; CHECK-NEXT: smin { z4.d, z5.d }, { z4.d, z5.d }, z3.d
51 ; CHECK-NEXT: mov z0.d, z4.d
52 ; CHECK-NEXT: mov z1.d, z5.d
63 ; CHECK-NEXT: mov z5.d, z2.d
64 ; CHECK-NEXT: mov z4.d, z1.d
66 ; CHECK-NEXT: mov z0.d, z4.d
67 ; CHECK-NEXT: mov z1.d, z5.d
76 ; CHECK-NEXT: mov z5.d, z2.d
77 ; CHECK-NEXT: mov z4.d, z1.d
79 ; CHECK-NEXT: mov z0.d, z4.d
80 ; CHECK-NEXT: mov z1.d, z5.d
86 define { <vscale x 4 x i32>, <vscale x 4 x i32> } @multi_vec_min_single_x2_u32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zm) {
89 ; CHECK-NEXT: mov z5.d, z2.d
90 ; CHECK-NEXT: mov z4.d, z1.d
92 ; CHECK-NEXT: mov z0.d, z4.d
93 ; CHECK-NEXT: mov z1.d, z5.d
95 %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.umin.single.x2.nxv4i32(<vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zm)
96 ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res
102 ; CHECK-NEXT: mov z5.d, z2.d
103 ; CHECK-NEXT: mov z4.d, z1.d
104 ; CHECK-NEXT: umin { z4.d, z5.d }, { z4.d, z5.d }, z3.d
105 ; CHECK-NEXT: mov z0.d, z4.d
106 ; CHECK-NEXT: mov z1.d, z5.d
130 ; CHECK-NEXT: mov z5.d, z2.d
131 ; CHECK-NEXT: mov z4.d, z1.d
133 ; CHECK-NEXT: mov z0.d, z4.d
134 ; CHECK-NEXT: mov z1.d, z5.d
140 define { <vscale x 4 x float>, <vscale x 4 x float> } @multi_vec_min_single_x2_f32(<vscale x 4 x float> %unused, <vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zm) {
143 ; CHECK-NEXT: mov z5.d, z2.d
144 ; CHECK-NEXT: mov z4.d, z1.d
146 ; CHECK-NEXT: mov z0.d, z4.d
147 ; CHECK-NEXT: mov z1.d, z5.d
149 %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fmin.single.x2.nxv4f32(<vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zm)
150 ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
156 ; CHECK-NEXT: mov z5.d, z2.d
157 ; CHECK-NEXT: mov z4.d, z1.d
158 ; CHECK-NEXT: fmin { z4.d, z5.d }, { z4.d, z5.d }, z3.d
159 ; CHECK-NEXT: mov z0.d, z4.d
160 ; CHECK-NEXT: mov z1.d, z5.d
171 ; CHECK-NEXT: mov z27.d, z4.d
172 ; CHECK-NEXT: mov z26.d, z3.d
173 ; CHECK-NEXT: mov z25.d, z2.d
174 ; CHECK-NEXT: mov z24.d, z1.d
176 ; CHECK-NEXT: mov z0.d, z24.d
177 ; CHECK-NEXT: mov z1.d, z25.d
178 ; CHECK-NEXT: mov z2.d, z26.d
179 ; CHECK-NEXT: mov z3.d, z27.d
189 ; CHECK-NEXT: mov z27.d, z4.d
190 ; CHECK-NEXT: mov z26.d, z3.d
191 ; CHECK-NEXT: mov z25.d, z2.d
192 ; CHECK-NEXT: mov z24.d, z1.d
194 ; CHECK-NEXT: mov z0.d, z24.d
195 ; CHECK-NEXT: mov z1.d, z25.d
196 ; CHECK-NEXT: mov z2.d, z26.d
197 ; CHECK-NEXT: mov z3.d, z27.d
204 define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @multi_vec_min_single_x4_s32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zdn3, <vscale x 4 x i32> %zdn4, <vscale x 4 x i32> %zm) {
207 ; CHECK-NEXT: mov z27.d, z4.d
208 ; CHECK-NEXT: mov z26.d, z3.d
209 ; CHECK-NEXT: mov z25.d, z2.d
210 ; CHECK-NEXT: mov z24.d, z1.d
212 ; CHECK-NEXT: mov z0.d, z24.d
213 ; CHECK-NEXT: mov z1.d, z25.d
214 ; CHECK-NEXT: mov z2.d, z26.d
215 ; CHECK-NEXT: mov z3.d, z27.d
217 %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }
218 @llvm.aarch64.sve.smin.single.x4.nxv4i32(<vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zdn3, <vscale x 4 x i32> %zdn4, <vscale x 4 x i32> %zm)
219 ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
225 ; CHECK-NEXT: mov z27.d, z4.d
226 ; CHECK-NEXT: mov z26.d, z3.d
227 ; CHECK-NEXT: mov z25.d, z2.d
228 ; CHECK-NEXT: mov z24.d, z1.d
229 ; CHECK-NEXT: smin { z24.d - z27.d }, { z24.d - z27.d }, z5.d
230 ; CHECK-NEXT: mov z0.d, z24.d
231 ; CHECK-NEXT: mov z1.d, z25.d
232 ; CHECK-NEXT: mov z2.d, z26.d
233 ; CHECK-NEXT: mov z3.d, z27.d
245 ; CHECK-NEXT: mov z27.d, z4.d
246 ; CHECK-NEXT: mov z26.d, z3.d
247 ; CHECK-NEXT: mov z25.d, z2.d
248 ; CHECK-NEXT: mov z24.d, z1.d
250 ; CHECK-NEXT: mov z0.d, z24.d
251 ; CHECK-NEXT: mov z1.d, z25.d
252 ; CHECK-NEXT: mov z2.d, z26.d
253 ; CHECK-NEXT: mov z3.d, z27.d
263 ; CHECK-NEXT: mov z27.d, z4.d
264 ; CHECK-NEXT: mov z26.d, z3.d
265 ; CHECK-NEXT: mov z25.d, z2.d
266 ; CHECK-NEXT: mov z24.d, z1.d
268 ; CHECK-NEXT: mov z0.d, z24.d
269 ; CHECK-NEXT: mov z1.d, z25.d
270 ; CHECK-NEXT: mov z2.d, z26.d
271 ; CHECK-NEXT: mov z3.d, z27.d
278 define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @multi_vec_min_single_x4_u32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zdn3, <vscale x 4 x i32> %zdn4, <vscale x 4 x i32> %zm) {
281 ; CHECK-NEXT: mov z27.d, z4.d
282 ; CHECK-NEXT: mov z26.d, z3.d
283 ; CHECK-NEXT: mov z25.d, z2.d
284 ; CHECK-NEXT: mov z24.d, z1.d
286 ; CHECK-NEXT: mov z0.d, z24.d
287 ; CHECK-NEXT: mov z1.d, z25.d
288 ; CHECK-NEXT: mov z2.d, z26.d
289 ; CHECK-NEXT: mov z3.d, z27.d
291 %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }
292 @llvm.aarch64.sve.umin.single.x4.nxv4i32(<vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zdn3, <vscale x 4 x i32> %zdn4, <vscale x 4 x i32> %zm)
293 ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
299 ; CHECK-NEXT: mov z27.d, z4.d
300 ; CHECK-NEXT: mov z26.d, z3.d
301 ; CHECK-NEXT: mov z25.d, z2.d
302 ; CHECK-NEXT: mov z24.d, z1.d
303 ; CHECK-NEXT: umin { z24.d - z27.d }, { z24.d - z27.d }, z5.d
304 ; CHECK-NEXT: mov z0.d, z24.d
305 ; CHECK-NEXT: mov z1.d, z25.d
306 ; CHECK-NEXT: mov z2.d, z26.d
307 ; CHECK-NEXT: mov z3.d, z27.d
334 ; CHECK-NEXT: mov z27.d, z4.d
335 ; CHECK-NEXT: mov z26.d, z3.d
336 ; CHECK-NEXT: mov z25.d, z2.d
337 ; CHECK-NEXT: mov z24.d, z1.d
339 ; CHECK-NEXT: mov z0.d, z24.d
340 ; CHECK-NEXT: mov z1.d, z25.d
341 ; CHECK-NEXT: mov z2.d, z26.d
342 ; CHECK-NEXT: mov z3.d, z27.d
349 define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @multi_vec_min_single_x4_f32(<vscale x 4 x float> %unused, <vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zdn3, <vscale x 4 x float> %zdn4, <vscale x 4 x float> %zm) {
352 ; CHECK-NEXT: mov z27.d, z4.d
353 ; CHECK-NEXT: mov z26.d, z3.d
354 ; CHECK-NEXT: mov z25.d, z2.d
355 ; CHECK-NEXT: mov z24.d, z1.d
357 ; CHECK-NEXT: mov z0.d, z24.d
358 ; CHECK-NEXT: mov z1.d, z25.d
359 ; CHECK-NEXT: mov z2.d, z26.d
360 ; CHECK-NEXT: mov z3.d, z27.d
362 %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
363 @llvm.aarch64.sve.fmin.single.x4.nxv4f32(<vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zdn3, <vscale x 4 x float> %zdn4, <vscale x 4 x float> %zm)
364 ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
370 ; CHECK-NEXT: mov z27.d, z4.d
371 ; CHECK-NEXT: mov z26.d, z3.d
372 ; CHECK-NEXT: mov z25.d, z2.d
373 ; CHECK-NEXT: mov z24.d, z1.d
374 ; CHECK-NEXT: fmin { z24.d - z27.d }, { z24.d - z27.d }, z5.d
375 ; CHECK-NEXT: mov z0.d, z24.d
376 ; CHECK-NEXT: mov z1.d, z25.d
377 ; CHECK-NEXT: mov z2.d, z26.d
378 ; CHECK-NEXT: mov z3.d, z27.d
390 ; CHECK-NEXT: mov z7.d, z4.d
391 ; CHECK-NEXT: mov z5.d, z2.d
392 ; CHECK-NEXT: mov z6.d, z3.d
393 ; CHECK-NEXT: mov z4.d, z1.d
395 ; CHECK-NEXT: mov z0.d, z4.d
396 ; CHECK-NEXT: mov z1.d, z5.d
405 ; CHECK-NEXT: mov z7.d, z4.d
406 ; CHECK-NEXT: mov z5.d, z2.d
407 ; CHECK-NEXT: mov z6.d, z3.d
408 ; CHECK-NEXT: mov z4.d, z1.d
410 ; CHECK-NEXT: mov z0.d, z4.d
411 ; CHECK-NEXT: mov z1.d, z5.d
417 define { <vscale x 4 x i32>, <vscale x 4 x i32> } @multi_vec_min_multi_x2_s32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2) {
420 ; CHECK-NEXT: mov z7.d, z4.d
421 ; CHECK-NEXT: mov z5.d, z2.d
422 ; CHECK-NEXT: mov z6.d, z3.d
423 ; CHECK-NEXT: mov z4.d, z1.d
425 ; CHECK-NEXT: mov z0.d, z4.d
426 ; CHECK-NEXT: mov z1.d, z5.d
428 %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.smin.x2.nxv4i32(<vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2)
429 ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res
435 ; CHECK-NEXT: mov z7.d, z4.d
436 ; CHECK-NEXT: mov z5.d, z2.d
437 ; CHECK-NEXT: mov z6.d, z3.d
438 ; CHECK-NEXT: mov z4.d, z1.d
439 ; CHECK-NEXT: smin { z4.d, z5.d }, { z4.d, z5.d }, { z6.d, z7.d }
440 ; CHECK-NEXT: mov z0.d, z4.d
441 ; CHECK-NEXT: mov z1.d, z5.d
452 ; CHECK-NEXT: mov z7.d, z4.d
453 ; CHECK-NEXT: mov z5.d, z2.d
454 ; CHECK-NEXT: mov z6.d, z3.d
455 ; CHECK-NEXT: mov z4.d, z1.d
457 ; CHECK-NEXT: mov z0.d, z4.d
458 ; CHECK-NEXT: mov z1.d, z5.d
467 ; CHECK-NEXT: mov z7.d, z4.d
468 ; CHECK-NEXT: mov z5.d, z2.d
469 ; CHECK-NEXT: mov z6.d, z3.d
470 ; CHECK-NEXT: mov z4.d, z1.d
472 ; CHECK-NEXT: mov z0.d, z4.d
473 ; CHECK-NEXT: mov z1.d, z5.d
479 define { <vscale x 4 x i32>, <vscale x 4 x i32> } @multi_vec_min_multi_x2_u32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2) {
482 ; CHECK-NEXT: mov z7.d, z4.d
483 ; CHECK-NEXT: mov z5.d, z2.d
484 ; CHECK-NEXT: mov z6.d, z3.d
485 ; CHECK-NEXT: mov z4.d, z1.d
487 ; CHECK-NEXT: mov z0.d, z4.d
488 ; CHECK-NEXT: mov z1.d, z5.d
490 %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.umin.x2.nxv4i32(<vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2)
491 ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res
497 ; CHECK-NEXT: mov z7.d, z4.d
498 ; CHECK-NEXT: mov z5.d, z2.d
499 ; CHECK-NEXT: mov z6.d, z3.d
500 ; CHECK-NEXT: mov z4.d, z1.d
501 ; CHECK-NEXT: umin { z4.d, z5.d }, { z4.d, z5.d }, { z6.d, z7.d }
502 ; CHECK-NEXT: mov z0.d, z4.d
503 ; CHECK-NEXT: mov z1.d, z5.d
529 ; CHECK-NEXT: mov z7.d, z4.d
530 ; CHECK-NEXT: mov z5.d, z2.d
531 ; CHECK-NEXT: mov z6.d, z3.d
532 ; CHECK-NEXT: mov z4.d, z1.d
534 ; CHECK-NEXT: mov z0.d, z4.d
535 ; CHECK-NEXT: mov z1.d, z5.d
541 define { <vscale x 4 x float>, <vscale x 4 x float> } @multi_vec_min_multi_x2_f32(<vscale x 4 x float> %unused, <vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2) {
544 ; CHECK-NEXT: mov z7.d, z4.d
545 ; CHECK-NEXT: mov z5.d, z2.d
546 ; CHECK-NEXT: mov z6.d, z3.d
547 ; CHECK-NEXT: mov z4.d, z1.d
549 ; CHECK-NEXT: mov z0.d, z4.d
550 ; CHECK-NEXT: mov z1.d, z5.d
552 %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fmin.x2.nxv4f32(<vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2)
553 ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
559 ; CHECK-NEXT: mov z7.d, z4.d
560 ; CHECK-NEXT: mov z5.d, z2.d
561 ; CHECK-NEXT: mov z6.d, z3.d
562 ; CHECK-NEXT: mov z4.d, z1.d
563 ; CHECK-NEXT: fmin { z4.d, z5.d }, { z4.d, z5.d }, { z6.d, z7.d }
564 ; CHECK-NEXT: mov z0.d, z4.d
565 ; CHECK-NEXT: mov z1.d, z5.d
576 ; CHECK-NEXT: mov z30.d, z7.d
577 ; CHECK-NEXT: mov z27.d, z4.d
579 ; CHECK-NEXT: mov z29.d, z6.d
580 ; CHECK-NEXT: mov z26.d, z3.d
581 ; CHECK-NEXT: mov z28.d, z5.d
582 ; CHECK-NEXT: mov z25.d, z2.d
584 ; CHECK-NEXT: mov z24.d, z1.d
586 ; CHECK-NEXT: mov z0.d, z24.d
587 ; CHECK-NEXT: mov z1.d, z25.d
588 ; CHECK-NEXT: mov z2.d, z26.d
589 ; CHECK-NEXT: mov z3.d, z27.d
601 ; CHECK-NEXT: mov z30.d, z7.d
602 ; CHECK-NEXT: mov z27.d, z4.d
604 ; CHECK-NEXT: mov z29.d, z6.d
605 ; CHECK-NEXT: mov z26.d, z3.d
606 ; CHECK-NEXT: mov z28.d, z5.d
607 ; CHECK-NEXT: mov z25.d, z2.d
609 ; CHECK-NEXT: mov z24.d, z1.d
611 ; CHECK-NEXT: mov z0.d, z24.d
612 ; CHECK-NEXT: mov z1.d, z25.d
613 ; CHECK-NEXT: mov z2.d, z26.d
614 ; CHECK-NEXT: mov z3.d, z27.d
623 define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @multi_vec_min_multi_x4_s32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zdn3, <vscale x 4 x i32> %zdn4,
626 ; CHECK-NEXT: mov z30.d, z7.d
627 ; CHECK-NEXT: mov z27.d, z4.d
629 ; CHECK-NEXT: mov z29.d, z6.d
630 ; CHECK-NEXT: mov z26.d, z3.d
631 ; CHECK-NEXT: mov z28.d, z5.d
632 ; CHECK-NEXT: mov z25.d, z2.d
634 ; CHECK-NEXT: mov z24.d, z1.d
636 ; CHECK-NEXT: mov z0.d, z24.d
637 ; CHECK-NEXT: mov z1.d, z25.d
638 ; CHECK-NEXT: mov z2.d, z26.d
639 ; CHECK-NEXT: mov z3.d, z27.d
641 <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2, <vscale x 4 x i32> %zm3, <vscale x 4 x i32> %zm4) {
642 %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }
643 @llvm.aarch64.sve.smin.x4.nxv4i32(<vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zdn3, <vscale x 4 x i32> %zdn4,
644 <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2, <vscale x 4 x i32> %zm3, <vscale x 4 x i32> %zm4)
645 ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
651 ; CHECK-NEXT: mov z30.d, z7.d
652 ; CHECK-NEXT: mov z27.d, z4.d
653 ; CHECK-NEXT: ptrue p0.d
654 ; CHECK-NEXT: mov z29.d, z6.d
655 ; CHECK-NEXT: mov z26.d, z3.d
656 ; CHECK-NEXT: mov z28.d, z5.d
657 ; CHECK-NEXT: mov z25.d, z2.d
658 ; CHECK-NEXT: ld1d { z31.d }, p0/z, [x0]
659 ; CHECK-NEXT: mov z24.d, z1.d
660 ; CHECK-NEXT: smin { z24.d - z27.d }, { z24.d - z27.d }, { z28.d - z31.d }
661 ; CHECK-NEXT: mov z0.d, z24.d
662 ; CHECK-NEXT: mov z1.d, z25.d
663 ; CHECK-NEXT: mov z2.d, z26.d
664 ; CHECK-NEXT: mov z3.d, z27.d
678 ; CHECK-NEXT: mov z30.d, z7.d
679 ; CHECK-NEXT: mov z27.d, z4.d
681 ; CHECK-NEXT: mov z29.d, z6.d
682 ; CHECK-NEXT: mov z26.d, z3.d
683 ; CHECK-NEXT: mov z28.d, z5.d
684 ; CHECK-NEXT: mov z25.d, z2.d
686 ; CHECK-NEXT: mov z24.d, z1.d
688 ; CHECK-NEXT: mov z0.d, z24.d
689 ; CHECK-NEXT: mov z1.d, z25.d
690 ; CHECK-NEXT: mov z2.d, z26.d
691 ; CHECK-NEXT: mov z3.d, z27.d
703 ; CHECK-NEXT: mov z30.d, z7.d
704 ; CHECK-NEXT: mov z27.d, z4.d
706 ; CHECK-NEXT: mov z29.d, z6.d
707 ; CHECK-NEXT: mov z26.d, z3.d
708 ; CHECK-NEXT: mov z28.d, z5.d
709 ; CHECK-NEXT: mov z25.d, z2.d
711 ; CHECK-NEXT: mov z24.d, z1.d
713 ; CHECK-NEXT: mov z0.d, z24.d
714 ; CHECK-NEXT: mov z1.d, z25.d
715 ; CHECK-NEXT: mov z2.d, z26.d
716 ; CHECK-NEXT: mov z3.d, z27.d
725 define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @multi_vec_min_multi_x4_u32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zdn3, <vscale x 4 x i32> %zdn4,
728 ; CHECK-NEXT: mov z30.d, z7.d
729 ; CHECK-NEXT: mov z27.d, z4.d
731 ; CHECK-NEXT: mov z29.d, z6.d
732 ; CHECK-NEXT: mov z26.d, z3.d
733 ; CHECK-NEXT: mov z28.d, z5.d
734 ; CHECK-NEXT: mov z25.d, z2.d
736 ; CHECK-NEXT: mov z24.d, z1.d
738 ; CHECK-NEXT: mov z0.d, z24.d
739 ; CHECK-NEXT: mov z1.d, z25.d
740 ; CHECK-NEXT: mov z2.d, z26.d
741 ; CHECK-NEXT: mov z3.d, z27.d
743 <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2, <vscale x 4 x i32> %zm3, <vscale x 4 x i32> %zm4) {
744 %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }
745 @llvm.aarch64.sve.umin.x4.nxv4i32(<vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zdn3, <vscale x 4 x i32> %zdn4,
746 <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2, <vscale x 4 x i32> %zm3, <vscale x 4 x i32> %zm4)
747 ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
753 ; CHECK-NEXT: mov z30.d, z7.d
754 ; CHECK-NEXT: mov z27.d, z4.d
755 ; CHECK-NEXT: ptrue p0.d
756 ; CHECK-NEXT: mov z29.d, z6.d
757 ; CHECK-NEXT: mov z26.d, z3.d
758 ; CHECK-NEXT: mov z28.d, z5.d
759 ; CHECK-NEXT: mov z25.d, z2.d
760 ; CHECK-NEXT: ld1d { z31.d }, p0/z, [x0]
761 ; CHECK-NEXT: mov z24.d, z1.d
762 ; CHECK-NEXT: umin { z24.d - z27.d }, { z24.d - z27.d }, { z28.d - z31.d }
763 ; CHECK-NEXT: mov z0.d, z24.d
764 ; CHECK-NEXT: mov z1.d, z25.d
765 ; CHECK-NEXT: mov z2.d, z26.d
766 ; CHECK-NEXT: mov z3.d, z27.d
800 ; CHECK-NEXT: mov z30.d, z7.d
801 ; CHECK-NEXT: mov z27.d, z4.d
803 ; CHECK-NEXT: mov z29.d, z6.d
804 ; CHECK-NEXT: mov z26.d, z3.d
805 ; CHECK-NEXT: mov z28.d, z5.d
806 ; CHECK-NEXT: mov z25.d, z2.d
808 ; CHECK-NEXT: mov z24.d, z1.d
810 ; CHECK-NEXT: mov z0.d, z24.d
811 ; CHECK-NEXT: mov z1.d, z25.d
812 ; CHECK-NEXT: mov z2.d, z26.d
813 ; CHECK-NEXT: mov z3.d, z27.d
822 define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @multi_vec_min_multi_x4_f32(<vscale x 4 x float> %unused, <vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zdn3, <vscale x 4 x float> %zdn4,
825 ; CHECK-NEXT: mov z30.d, z7.d
826 ; CHECK-NEXT: mov z27.d, z4.d
828 ; CHECK-NEXT: mov z29.d, z6.d
829 ; CHECK-NEXT: mov z26.d, z3.d
830 ; CHECK-NEXT: mov z28.d, z5.d
831 ; CHECK-NEXT: mov z25.d, z2.d
833 ; CHECK-NEXT: mov z24.d, z1.d
835 ; CHECK-NEXT: mov z0.d, z24.d
836 ; CHECK-NEXT: mov z1.d, z25.d
837 ; CHECK-NEXT: mov z2.d, z26.d
838 ; CHECK-NEXT: mov z3.d, z27.d
840 <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2, <vscale x 4 x float> %zm3, <vscale x 4 x float> %zm4) {
841 %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
842 @llvm.aarch64.sve.fmin.x4.nxv4f32(<vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zdn3, <vscale x 4 x float> %zdn4,
843 <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2, <vscale x 4 x float> %zm3, <vscale x 4 x float> %zm4)
844 ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
850 ; CHECK-NEXT: mov z30.d, z7.d
851 ; CHECK-NEXT: mov z27.d, z4.d
852 ; CHECK-NEXT: ptrue p0.d
853 ; CHECK-NEXT: mov z29.d, z6.d
854 ; CHECK-NEXT: mov z26.d, z3.d
855 ; CHECK-NEXT: mov z28.d, z5.d
856 ; CHECK-NEXT: mov z25.d, z2.d
857 ; CHECK-NEXT: ld1d { z31.d }, p0/z, [x0]
858 ; CHECK-NEXT: mov z24.d, z1.d
859 ; CHECK-NEXT: fmin { z24.d - z27.d }, { z24.d - z27.d }, { z28.d - z31.d }
860 ; CHECK-NEXT: mov z0.d, z24.d
861 ; CHECK-NEXT: mov z1.d, z25.d
862 ; CHECK-NEXT: mov z2.d, z26.d
863 ; CHECK-NEXT: mov z3.d, z27.d
890 ; CHECK-NEXT: mov z5.d, z2.d
891 ; CHECK-NEXT: mov z4.d, z1.d
893 ; CHECK-NEXT: mov z0.d, z4.d
894 ; CHECK-NEXT: mov z1.d, z5.d
900 define { <vscale x 4 x float>, <vscale x 4 x float> } @multi_vec_minnm_single_x2_f32(<vscale x 8 x half> %dummy, <vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zm) {
903 ; CHECK-NEXT: mov z5.d, z2.d
904 ; CHECK-NEXT: mov z4.d, z1.d
906 ; CHECK-NEXT: mov z0.d, z4.d
907 ; CHECK-NEXT: mov z1.d, z5.d
909 %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fminnm.single.x2.nxv4f32(<vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zm)
910 ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
916 ; CHECK-NEXT: mov z5.d, z2.d
917 ; CHECK-NEXT: mov z4.d, z1.d
918 ; CHECK-NEXT: fminnm { z4.d, z5.d }, { z4.d, z5.d }, z3.d
919 ; CHECK-NEXT: mov z0.d, z4.d
920 ; CHECK-NEXT: mov z1.d, z5.d
946 ; CHECK-NEXT: mov z27.d, z4.d
947 ; CHECK-NEXT: mov z26.d, z3.d
948 ; CHECK-NEXT: mov z25.d, z2.d
949 ; CHECK-NEXT: mov z24.d, z1.d
951 ; CHECK-NEXT: mov z0.d, z24.d
952 ; CHECK-NEXT: mov z1.d, z25.d
953 ; CHECK-NEXT: mov z2.d, z26.d
954 ; CHECK-NEXT: mov z3.d, z27.d
961 define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @multi_vec_minnm_single_x4_f32(<vscale x 8 x half> %dummy, <vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zdn3, <vscale x 4 x float> %zdn4, <vscale x 4 x float> %zm) {
964 ; CHECK-NEXT: mov z27.d, z4.d
965 ; CHECK-NEXT: mov z26.d, z3.d
966 ; CHECK-NEXT: mov z25.d, z2.d
967 ; CHECK-NEXT: mov z24.d, z1.d
969 ; CHECK-NEXT: mov z0.d, z24.d
970 ; CHECK-NEXT: mov z1.d, z25.d
971 ; CHECK-NEXT: mov z2.d, z26.d
972 ; CHECK-NEXT: mov z3.d, z27.d
974 %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
975 @llvm.aarch64.sve.fminnm.single.x4.nxv4f32(<vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zdn3, <vscale x 4 x float> %zdn4, <vscale x 4 x float> %zm)
976 ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
982 ; CHECK-NEXT: mov z27.d, z4.d
983 ; CHECK-NEXT: mov z26.d, z3.d
984 ; CHECK-NEXT: mov z25.d, z2.d
985 ; CHECK-NEXT: mov z24.d, z1.d
986 ; CHECK-NEXT: fminnm { z24.d - z27.d }, { z24.d - z27.d }, z5.d
987 ; CHECK-NEXT: mov z0.d, z24.d
988 ; CHECK-NEXT: mov z1.d, z25.d
989 ; CHECK-NEXT: mov z2.d, z26.d
990 ; CHECK-NEXT: mov z3.d, z27.d
1017 ; CHECK-NEXT: mov z7.d, z4.d
1018 ; CHECK-NEXT: mov z5.d, z2.d
1019 ; CHECK-NEXT: mov z6.d, z3.d
1020 ; CHECK-NEXT: mov z4.d, z1.d
1022 ; CHECK-NEXT: mov z0.d, z4.d
1023 ; CHECK-NEXT: mov z1.d, z5.d
1029 define { <vscale x 4 x float>, <vscale x 4 x float> } @multi_vec_minnm_x2_f32(<vscale x 8 x half> %dummy, <vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2) {
1032 ; CHECK-NEXT: mov z7.d, z4.d
1033 ; CHECK-NEXT: mov z5.d, z2.d
1034 ; CHECK-NEXT: mov z6.d, z3.d
1035 ; CHECK-NEXT: mov z4.d, z1.d
1037 ; CHECK-NEXT: mov z0.d, z4.d
1038 ; CHECK-NEXT: mov z1.d, z5.d
1040 %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fminnm.x2.nxv4f32(<vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2)
1041 ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
1047 ; CHECK-NEXT: mov z7.d, z4.d
1048 ; CHECK-NEXT: mov z5.d, z2.d
1049 ; CHECK-NEXT: mov z6.d, z3.d
1050 ; CHECK-NEXT: mov z4.d, z1.d
1051 ; CHECK-NEXT: fminnm { z4.d, z5.d }, { z4.d, z5.d }, { z6.d, z7.d }
1052 ; CHECK-NEXT: mov z0.d, z4.d
1053 ; CHECK-NEXT: mov z1.d, z5.d
1083 ; CHECK-NEXT: mov z30.d, z7.d
1084 ; CHECK-NEXT: mov z27.d, z4.d
1086 ; CHECK-NEXT: mov z29.d, z6.d
1087 ; CHECK-NEXT: mov z26.d, z3.d
1088 ; CHECK-NEXT: mov z28.d, z5.d
1089 ; CHECK-NEXT: mov z25.d, z2.d
1091 ; CHECK-NEXT: mov z24.d, z1.d
1093 ; CHECK-NEXT: mov z0.d, z24.d
1094 ; CHECK-NEXT: mov z1.d, z25.d
1095 ; CHECK-NEXT: mov z2.d, z26.d
1096 ; CHECK-NEXT: mov z3.d, z27.d
1104 define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @multi_vec_minnm_x4_f32(<vscale x 8 x half> %dummy, <vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zdn3, <vscale x 4 x float> %zdn4, <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2, <vscale x 4 x float> %zm3, <vscale x 4 x float> %zm4) {
1107 ; CHECK-NEXT: mov z30.d, z7.d
1108 ; CHECK-NEXT: mov z27.d, z4.d
1110 ; CHECK-NEXT: mov z29.d, z6.d
1111 ; CHECK-NEXT: mov z26.d, z3.d
1112 ; CHECK-NEXT: mov z28.d, z5.d
1113 ; CHECK-NEXT: mov z25.d, z2.d
1115 ; CHECK-NEXT: mov z24.d, z1.d
1117 ; CHECK-NEXT: mov z0.d, z24.d
1118 ; CHECK-NEXT: mov z1.d, z25.d
1119 ; CHECK-NEXT: mov z2.d, z26.d
1120 ; CHECK-NEXT: mov z3.d, z27.d
1122 %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
1123 @llvm.aarch64.sve.fminnm.x4.nxv4f32(<vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zdn3, <vscale x 4 x float> %zdn4,
1124 <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2, <vscale x 4 x float> %zm3, <vscale x 4 x float> %zm4)
1125 ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
1131 ; CHECK-NEXT: mov z30.d, z7.d
1132 ; CHECK-NEXT: mov z27.d, z4.d
1133 ; CHECK-NEXT: ptrue p0.d
1134 ; CHECK-NEXT: mov z29.d, z6.d
1135 ; CHECK-NEXT: mov z26.d, z3.d
1136 ; CHECK-NEXT: mov z28.d, z5.d
1137 ; CHECK-NEXT: mov z25.d, z2.d
1138 ; CHECK-NEXT: ld1d { z31.d }, p0/z, [x0]
1139 ; CHECK-NEXT: mov z24.d, z1.d
1140 ; CHECK-NEXT: fminnm { z24.d - z27.d }, { z24.d - z27.d }, { z28.d - z31.d }
1141 ; CHECK-NEXT: mov z0.d, z24.d
1142 ; CHECK-NEXT: mov z1.d, z25.d
1143 ; CHECK-NEXT: mov z2.d, z26.d
1144 ; CHECK-NEXT: mov z3.d, z27.d
1154 declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.smin.single.x2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1159 declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.umin.single.x2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1163 declare { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fmin.single.x2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1168 declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.smin.single.x4.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1173 declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.umin.single.x4.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1178 declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
1179 @llvm.aarch64.sve.fmin.single.x4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1185 declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.smin.x2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1190 declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.umin.x2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1194 declare { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fmin.x2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1201 declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }
1202 @llvm.aarch64.sve.smin.x4.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1210 declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }
1211 @llvm.aarch64.sve.umin.x4.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1217 declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
1218 @llvm.aarch64.sve.fmin.x4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1223 declare { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fminnm.single.x2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1228 declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
1229 @llvm.aarch64.sve.fminnm.single.x4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1234 declare { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fminnm.x2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1239 declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
1240 @llvm.aarch64.sve.fminnm.x4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)