Lines Matching +full:batch +full:- +full:reduce

2 ; RUN: opt < %s -passes=slp-vectorizer -mtriple=riscv64 \
3 ; RUN: -mattr=+v,+zvfhmin,+zvfbfmin -riscv-v-slp-max-vf=0 -S \
4 ; RUN: | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
5 ; RUN: opt < %s -passes=slp-vectorizer -mtriple=riscv64 \
6 ; RUN: -mattr=+v,+zvfh,+zvfbfmin -riscv-v-slp-max-vf=0 -S \
7 ; RUN: | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVL128
8 ; RUN: opt < %s -passes=slp-vectorizer -mtriple=riscv64 \
9 ; RUN: -mattr=+v,+zvl256b,+zvfh,+zvfbfmin -riscv-v-slp-max-vf=0 -S \
10 ; RUN: | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVL256
11 ; RUN: opt < %s -passes=slp-vectorizer -mtriple=riscv64 \
12 ; RUN: -mattr=+v,+zvl512b,+zvfh,+zvfbfmin -riscv-v-slp-max-vf=0 -S \
13 ; RUN: | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVL512
15 target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
18 ; First batch of tests are simple reductions of various widths
21 ; CHECK-LABEL: @red_ld_2xi64(
22 ; CHECK-NEXT: entry:
23 ; CHECK-NEXT: [[LD0:%.*]] = load i64, ptr [[PTR:%.*]], align 8
24 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 1
25 ; CHECK-NEXT: [[LD1:%.*]] = load i64, ptr [[GEP]], align 8
26 ; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i64 [[LD0]], [[LD1]]
27 ; CHECK-NEXT: ret i64 [[ADD_1]]
38 ; CHECK-LABEL: @red_ld_4xi64(
39 ; CHECK-NEXT: entry:
40 ; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr [[PTR:%.*]], align 8
41 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP0]])
42 ; CHECK-NEXT: ret i64 [[TMP1]]
59 ; CHECK-LABEL: @red_ld_8xi64(
60 ; CHECK-NEXT: entry:
61 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i64>, ptr [[PTR:%.*]], align 8
62 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP0]])
63 ; CHECK-NEXT: ret i64 [[TMP1]]
92 ; CHECK-LABEL: @red_ld_16xi64(
93 ; CHECK-NEXT: entry:
94 ; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i64>, ptr [[PTR:%.*]], align 8
95 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> [[TMP0]])
96 ; CHECK-NEXT: ret i64 [[TMP1]]
150 ; CHECK-LABEL: @red_strided_ld_16xi64(
151 ; CHECK-NEXT: entry:
152 ; CHECK-NEXT: [[TMP0:%.*]] = call <16 x i64> @llvm.experimental.vp.strided.load.v16i64.p0.i64(ptr align 8 [[PTR:%.*]], i64 16, <16 x i1> splat (i1 true), i32 16)
153 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> [[TMP0]])
154 ; CHECK-NEXT: ret i64 [[TMP1]]
206 ; Next batch test differen reductions kinds
211 ; CHECK-LABEL: @reduce_and(
212 ; CHECK-NEXT: entry:
213 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0
214 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0
215 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1
216 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1
217 ; CHECK-NEXT: [[TMP2:%.*]] = xor <8 x i8> [[TMP1]], [[TMP0]]
218 ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> [[TMP2]])
219 ; CHECK-NEXT: [[OP_RDX:%.*]] = and i8 [[TMP3]], 1
220 ; CHECK-NEXT: ret i8 [[OP_RDX]]
275 ; CHECK-LABEL: @reduce_or_1(
276 ; CHECK-NEXT: entry:
277 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0
278 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0
279 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1
280 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1
281 ; CHECK-NEXT: [[TMP2:%.*]] = xor <8 x i8> [[TMP1]], [[TMP0]]
282 ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[TMP2]])
283 ; CHECK-NEXT: ret i8 [[TMP3]]
338 ; ZVFHMIN-LABEL: @reduce_or_2(
339 ; ZVFHMIN-NEXT: [[TMP1:%.*]] = shl i64 0, 0
340 ; ZVFHMIN-NEXT: [[TMP2:%.*]] = insertelement <16 x i64> <i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 poison>, i64 [[TMP1]], i32 15
341 ; ZVFHMIN-NEXT: [[TMP3:%.*]] = icmp ult <16 x i64> [[TMP2]], zeroinitializer
342 ; ZVFHMIN-NEXT: [[TMP4:%.*]] = insertelement <16 x i64> <i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 poison, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0>, i64 [[TMP1]], i32 6
343 ; ZVFHMIN-NEXT: [[TMP5:%.*]] = icmp ult <16 x i64> [[TMP4]], zeroinitializer
344 ; ZVFHMIN-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP3]])
345 ; ZVFHMIN-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP5]])
346 ; ZVFHMIN-NEXT: [[OP_RDX:%.*]] = or i1 [[TMP6]], [[TMP7]]
347 ; ZVFHMIN-NEXT: br i1 [[OP_RDX]], label [[TMP9:%.*]], label [[TMP8:%.*]]
349 ; ZVFHMIN-NEXT: ret void
351 ; ZVFHMIN-NEXT: ret void
353 ; ZVL128-LABEL: @reduce_or_2(
354 ; ZVL128-NEXT: [[TMP1:%.*]] = shl i64 0, 0
355 ; ZVL128-NEXT: [[TMP2:%.*]] = insertelement <16 x i64> <i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 poison>, i64 [[TMP1]], i32 15
356 ; ZVL128-NEXT: [[TMP3:%.*]] = icmp ult <16 x i64> [[TMP2]], zeroinitializer
357 ; ZVL128-NEXT: [[TMP4:%.*]] = insertelement <16 x i64> <i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 poison, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0>, i64 [[TMP1]], i32 6
358 ; ZVL128-NEXT: [[TMP5:%.*]] = icmp ult <16 x i64> [[TMP4]], zeroinitializer
359 ; ZVL128-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP3]])
360 ; ZVL128-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP5]])
361 ; ZVL128-NEXT: [[OP_RDX:%.*]] = or i1 [[TMP6]], [[TMP7]]
362 ; ZVL128-NEXT: br i1 [[OP_RDX]], label [[TMP9:%.*]], label [[TMP8:%.*]]
364 ; ZVL128-NEXT: ret void
366 ; ZVL128-NEXT: ret void
368 ; ZVL256-LABEL: @reduce_or_2(
369 ; ZVL256-NEXT: [[TMP1:%.*]] = shl i64 0, 0
370 ; ZVL256-NEXT: [[TMP2:%.*]] = insertelement <16 x i64> <i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 poison>, i64 [[TMP1]], i32 15
371 ; ZVL256-NEXT: [[TMP3:%.*]] = icmp ult <16 x i64> [[TMP2]], zeroinitializer
372 ; ZVL256-NEXT: [[TMP4:%.*]] = insertelement <16 x i64> <i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 poison, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0>, i64 [[TMP1]], i32 6
373 ; ZVL256-NEXT: [[TMP5:%.*]] = icmp ult <16 x i64> [[TMP4]], zeroinitializer
374 ; ZVL256-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP3]])
375 ; ZVL256-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP5]])
376 ; ZVL256-NEXT: [[OP_RDX:%.*]] = or i1 [[TMP6]], [[TMP7]]
377 ; ZVL256-NEXT: br i1 [[OP_RDX]], label [[TMP9:%.*]], label [[TMP8:%.*]]
379 ; ZVL256-NEXT: ret void
381 ; ZVL256-NEXT: ret void
383 ; ZVL512-LABEL: @reduce_or_2(
384 ; ZVL512-NEXT: [[TMP1:%.*]] = shl i64 0, 0
385 ; ZVL512-NEXT: [[TMP2:%.*]] = insertelement <32 x i64> <i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 poison, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 poison, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0>, i64 [[TMP1]], i32 15
386 ; ZVL512-NEXT: [[TMP3:%.*]] = shufflevector <32 x i64> [[TMP2]], <32 x i64> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 15, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
387 ; ZVL512-NEXT: [[TMP4:%.*]] = icmp ult <32 x i64> [[TMP3]], zeroinitializer
388 ; ZVL512-NEXT: [[TMP5:%.*]] = call i1 @llvm.vector.reduce.or.v32i1(<32 x i1> [[TMP4]])
389 ; ZVL512-NEXT: br i1 [[TMP5]], label [[TMP7:%.*]], label [[TMP6:%.*]]
391 ; ZVL512-NEXT: ret void
393 ; ZVL512-NEXT: ret void
469 ; CHECK-LABEL: @reduce_xor(
470 ; CHECK-NEXT: entry:
471 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0
472 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0
473 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1
474 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1
475 ; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[TMP0]]
476 ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> [[TMP2]])
477 ; CHECK-NEXT: [[OP_RDX:%.*]] = xor i8 [[TMP3]], 1
478 ; CHECK-NEXT: ret i8 [[OP_RDX]]
535 ; CHECK-LABEL: @reduce_add(
536 ; CHECK-NEXT: entry:
537 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0
538 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0
539 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1
540 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1
541 ; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[TMP0]]
542 ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> [[TMP2]])
543 ; CHECK-NEXT: [[OP_RDX:%.*]] = add i8 [[TMP3]], 1
544 ; CHECK-NEXT: ret i8 [[OP_RDX]]
601 ; CHECK-LABEL: @reduce_smin(
602 ; CHECK-NEXT: entry:
603 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0
604 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0
605 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1
606 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1
607 ; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[TMP0]]
608 ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> [[TMP2]])
609 ; CHECK-NEXT: ret i8 [[TMP3]]
665 ; CHECK-LABEL: @reduce_smax(
666 ; CHECK-NEXT: entry:
667 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0
668 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0
669 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1
670 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1
671 ; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[TMP0]]
672 ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> [[TMP2]])
673 ; CHECK-NEXT: ret i8 [[TMP3]]
729 ; CHECK-LABEL: @reduce_umax(
730 ; CHECK-NEXT: entry:
731 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0
732 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0
733 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1
734 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1
735 ; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[TMP0]]
736 ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> [[TMP2]])
737 ; CHECK-NEXT: ret i8 [[TMP3]]
793 ; CHECK-LABEL: @reduce_umin(
794 ; CHECK-NEXT: entry:
795 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0
796 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0
797 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1
798 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1
799 ; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[TMP0]]
800 ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> [[TMP2]])
801 ; CHECK-NEXT: ret i8 [[TMP3]]
854 ; Next batch exercise reductions involing zext of narrower loads
857 ; CHECK-LABEL: @red_zext_ld_2xi64(
858 ; CHECK-NEXT: entry:
859 ; CHECK-NEXT: [[LD0:%.*]] = load i8, ptr [[PTR:%.*]], align 1
860 ; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[LD0]] to i64
861 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
862 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP]], align 1
863 ; CHECK-NEXT: [[ZEXT_1:%.*]] = zext i8 [[LD1]] to i64
864 ; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i64 [[ZEXT]], [[ZEXT_1]]
865 ; CHECK-NEXT: ret i64 [[ADD_1]]
878 ; CHECK-LABEL: @red_zext_ld_4xi64(
879 ; CHECK-NEXT: entry:
880 ; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[PTR:%.*]], align 1
881 ; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[TMP0]] to <4 x i16>
882 ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[TMP1]])
883 ; CHECK-NEXT: [[ADD_3:%.*]] = zext i16 [[TMP2]] to i64
884 ; CHECK-NEXT: ret i64 [[ADD_3]]
905 ; CHECK-LABEL: @red_zext_ld_8xi64(
906 ; CHECK-NEXT: entry:
907 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[PTR:%.*]], align 1
908 ; CHECK-NEXT: [[TMP1:%.*]] = zext <8 x i8> [[TMP0]] to <8 x i64>
909 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP1]])
910 ; CHECK-NEXT: ret i64 [[TMP2]]
947 ; CHECK-LABEL: @red_zext_ld_16xi64(
948 ; CHECK-NEXT: entry:
949 ; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[PTR:%.*]], align 1
950 ; CHECK-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[TMP0]] to <16 x i64>
951 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> [[TMP1]])
952 ; CHECK-NEXT: ret i64 [[TMP2]]
1023 ; CHECK-LABEL: @stride_sum_abs_diff(
1024 ; CHECK-NEXT: [[P_2:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[STRIDE:%.*]]
1025 ; CHECK-NEXT: [[Q_2:%.*]] = getelementptr inbounds i32, ptr [[Q:%.*]], i64 [[STRIDE]]
1026 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr [[P]], align 4
1027 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr [[Q]], align 4
1028 ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr [[P_2]], align 4
1029 ; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr [[Q_2]], align 4
1030 ; CHECK-NEXT: [[TMP5:%.*]] = call <4 x i32> @llvm.vector.insert.v4i32.v2i32(<4 x i32> poison, <2 x i32> [[TMP1]], i64 0)
1031 ; CHECK-NEXT: [[TMP6:%.*]] = call <4 x i32> @llvm.vector.insert.v4i32.v2i32(<4 x i32> [[TMP5]], <2 x i32> [[TMP3]], i64 2)
1032 ; CHECK-NEXT: [[TMP7:%.*]] = call <4 x i32> @llvm.vector.insert.v4i32.v2i32(<4 x i32> poison, <2 x i32> [[TMP2]], i64 0)
1033 ; CHECK-NEXT: [[TMP8:%.*]] = call <4 x i32> @llvm.vector.insert.v4i32.v2i32(<4 x i32> [[TMP7]], <2 x i32> [[TMP4]], i64 2)
1034 ; CHECK-NEXT: [[TMP9:%.*]] = sub <4 x i32> [[TMP6]], [[TMP8]]
1035 ; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[TMP9]], i1 true)
1036 ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP10]])
1037 ; CHECK-NEXT: ret i32 [[TMP11]]
1073 ; CHECK-LABEL: @reduce_sum_2arrays_a(
1074 ; CHECK-NEXT: entry:
1075 ; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[P:%.*]], align 1
1076 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[Q:%.*]], align 1
1077 ; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i8> @llvm.vector.insert.v8i8.v4i8(<8 x i8> poison, <4 x i8> [[TMP0]], i64 0)
1078 ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i8> @llvm.vector.insert.v8i8.v4i8(<8 x i8> [[TMP2]], <4 x i8> [[TMP1]], i64 4)
1079 ; CHECK-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[TMP3]] to <8 x i32>
1080 ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP4]])
1081 ; CHECK-NEXT: ret i32 [[TMP5]]
1121 ; CHECK-LABEL: @reduce_sum_2arrays_b(
1122 ; CHECK-NEXT: entry:
1123 ; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[X:%.*]], align 1
1124 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[Y:%.*]], align 1
1125 ; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i8> @llvm.vector.insert.v8i8.v4i8(<8 x i8> poison, <4 x i8> [[TMP0]], i64 0)
1126 ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i8> @llvm.vector.insert.v8i8.v4i8(<8 x i8> [[TMP2]], <4 x i8> [[TMP1]], i64 4)
1127 ; CHECK-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[TMP3]] to <8 x i32>
1128 ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP4]])
1129 ; CHECK-NEXT: ret i32 [[TMP5]]
1166 ; CHECK-LABEL: @fadd_4xbf16(
1167 ; CHECK-NEXT: [[X0:%.*]] = load bfloat, ptr [[P:%.*]], align 2
1168 ; CHECK-NEXT: [[P1:%.*]] = getelementptr bfloat, ptr [[P]], i32 1
1169 ; CHECK-NEXT: [[X1:%.*]] = load bfloat, ptr [[P1]], align 2
1170 ; CHECK-NEXT: [[P2:%.*]] = getelementptr bfloat, ptr [[P]], i32 2
1171 ; CHECK-NEXT: [[X2:%.*]] = load bfloat, ptr [[P2]], align 2
1172 ; CHECK-NEXT: [[P3:%.*]] = getelementptr bfloat, ptr [[P]], i32 3
1173 ; CHECK-NEXT: [[X3:%.*]] = load bfloat, ptr [[P3]], align 2
1174 ; CHECK-NEXT: [[R0:%.*]] = fadd fast bfloat [[X0]], [[X1]]
1175 ; CHECK-NEXT: [[R1:%.*]] = fadd fast bfloat [[R0]], [[X2]]
1176 ; CHECK-NEXT: [[R2:%.*]] = fadd fast bfloat [[R1]], [[X3]]
1177 ; CHECK-NEXT: ret bfloat [[R2]]
1196 ; CHECK-LABEL: @fmul_4xbf16(
1197 ; CHECK-NEXT: [[X0:%.*]] = load bfloat, ptr [[P:%.*]], align 2
1198 ; CHECK-NEXT: [[P1:%.*]] = getelementptr bfloat, ptr [[P]], i32 1
1199 ; CHECK-NEXT: [[X1:%.*]] = load bfloat, ptr [[P1]], align 2
1200 ; CHECK-NEXT: [[P2:%.*]] = getelementptr bfloat, ptr [[P]], i32 2
1201 ; CHECK-NEXT: [[X2:%.*]] = load bfloat, ptr [[P2]], align 2
1202 ; CHECK-NEXT: [[P3:%.*]] = getelementptr bfloat, ptr [[P]], i32 3
1203 ; CHECK-NEXT: [[X3:%.*]] = load bfloat, ptr [[P3]], align 2
1204 ; CHECK-NEXT: [[R0:%.*]] = fmul fast bfloat [[X0]], [[X1]]
1205 ; CHECK-NEXT: [[R1:%.*]] = fmul fast bfloat [[R0]], [[X2]]
1206 ; CHECK-NEXT: [[R2:%.*]] = fmul fast bfloat [[R1]], [[X3]]
1207 ; CHECK-NEXT: ret bfloat [[R2]]
1226 ; ZVFHMIN-LABEL: @fadd_4xf16(
1227 ; ZVFHMIN-NEXT: [[X0:%.*]] = load half, ptr [[P:%.*]], align 2
1228 ; ZVFHMIN-NEXT: [[P1:%.*]] = getelementptr half, ptr [[P]], i32 1
1229 ; ZVFHMIN-NEXT: [[X1:%.*]] = load half, ptr [[P1]], align 2
1230 ; ZVFHMIN-NEXT: [[P2:%.*]] = getelementptr half, ptr [[P]], i32 2
1231 ; ZVFHMIN-NEXT: [[X2:%.*]] = load half, ptr [[P2]], align 2
1232 ; ZVFHMIN-NEXT: [[P3:%.*]] = getelementptr half, ptr [[P]], i32 3
1233 ; ZVFHMIN-NEXT: [[X3:%.*]] = load half, ptr [[P3]], align 2
1234 ; ZVFHMIN-NEXT: [[R0:%.*]] = fadd fast half [[X0]], [[X1]]
1235 ; ZVFHMIN-NEXT: [[R1:%.*]] = fadd fast half [[R0]], [[X2]]
1236 ; ZVFHMIN-NEXT: [[R2:%.*]] = fadd fast half [[R1]], [[X3]]
1237 ; ZVFHMIN-NEXT: ret half [[R2]]
1239 ; ZVFH-LABEL: @fadd_4xf16(
1240 ; ZVFH-NEXT: [[TMP1:%.*]] = load <4 x half>, ptr [[P:%.*]], align 2
1241 ; ZVFH-NEXT: [[TMP2:%.*]] = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> [[TMP1]])
1242 ; ZVFH-NEXT: ret half [[TMP2]]
1261 ; CHECK-LABEL: @fmul_4xf16(
1262 ; CHECK-NEXT: [[X0:%.*]] = load half, ptr [[P:%.*]], align 2
1263 ; CHECK-NEXT: [[P1:%.*]] = getelementptr half, ptr [[P]], i32 1
1264 ; CHECK-NEXT: [[X1:%.*]] = load half, ptr [[P1]], align 2
1265 ; CHECK-NEXT: [[P2:%.*]] = getelementptr half, ptr [[P]], i32 2
1266 ; CHECK-NEXT: [[X2:%.*]] = load half, ptr [[P2]], align 2
1267 ; CHECK-NEXT: [[P3:%.*]] = getelementptr half, ptr [[P]], i32 3
1268 ; CHECK-NEXT: [[X3:%.*]] = load half, ptr [[P3]], align 2
1269 ; CHECK-NEXT: [[R0:%.*]] = fmul fast half [[X0]], [[X1]]
1270 ; CHECK-NEXT: [[R1:%.*]] = fmul fast half [[R0]], [[X2]]
1271 ; CHECK-NEXT: [[R2:%.*]] = fmul fast half [[R1]], [[X3]]
1272 ; CHECK-NEXT: ret half [[R2]]