; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -passes=slp-vectorizer -mtriple=riscv64 \ ; RUN: -mattr=+v,+zvfhmin,+zvfbfmin -riscv-v-slp-max-vf=0 -S \ ; RUN: | FileCheck %s --check-prefixes=CHECK,ZVFHMIN ; RUN: opt < %s -passes=slp-vectorizer -mtriple=riscv64 \ ; RUN: -mattr=+v,+zvfh,+zvfbfmin -riscv-v-slp-max-vf=0 -S \ ; RUN: | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVL128 ; RUN: opt < %s -passes=slp-vectorizer -mtriple=riscv64 \ ; RUN: -mattr=+v,+zvl256b,+zvfh,+zvfbfmin -riscv-v-slp-max-vf=0 -S \ ; RUN: | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVL256 ; RUN: opt < %s -passes=slp-vectorizer -mtriple=riscv64 \ ; RUN: -mattr=+v,+zvl512b,+zvfh,+zvfbfmin -riscv-v-slp-max-vf=0 -S \ ; RUN: | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVL512 target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128" target triple = "riscv64" ; First batch of tests are simple reductions of various widths define i64 @red_ld_2xi64(ptr %ptr) { ; CHECK-LABEL: @red_ld_2xi64( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[LD0:%.*]] = load i64, ptr [[PTR:%.*]], align 8 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 1 ; CHECK-NEXT: [[LD1:%.*]] = load i64, ptr [[GEP]], align 8 ; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i64 [[LD0]], [[LD1]] ; CHECK-NEXT: ret i64 [[ADD_1]] ; entry: %ld0 = load i64, ptr %ptr %gep = getelementptr inbounds i64, ptr %ptr, i64 1 %ld1 = load i64, ptr %gep %add.1 = add nuw nsw i64 %ld0, %ld1 ret i64 %add.1 } define i64 @red_ld_4xi64(ptr %ptr) { ; CHECK-LABEL: @red_ld_4xi64( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr [[PTR:%.*]], align 8 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP0]]) ; CHECK-NEXT: ret i64 [[TMP1]] ; entry: %ld0 = load i64, ptr %ptr %gep = getelementptr inbounds i64, ptr %ptr, i64 1 %ld1 = load i64, ptr %gep %add.1 = add nuw nsw i64 %ld0, %ld1 %gep.1 = getelementptr inbounds i64, ptr %ptr, i64 2 %ld2 = load i64, ptr %gep.1 %add.2 = add nuw nsw i64 %add.1, %ld2 %gep.2 = getelementptr inbounds i64, ptr %ptr, i64 3 %ld3 = load i64, ptr %gep.2 %add.3 = add nuw nsw i64 %add.2, %ld3 ret i64 %add.3 } define i64 @red_ld_8xi64(ptr %ptr) { ; CHECK-LABEL: @red_ld_8xi64( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i64>, ptr [[PTR:%.*]], align 8 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP0]]) ; CHECK-NEXT: ret i64 [[TMP1]] ; entry: %ld0 = load i64, ptr %ptr %gep = getelementptr inbounds i64, ptr %ptr, i64 1 %ld1 = load i64, ptr %gep %add.1 = add nuw nsw i64 %ld0, %ld1 %gep.1 = getelementptr inbounds i64, ptr %ptr, i64 2 %ld2 = load i64, ptr %gep.1 %add.2 = add nuw nsw i64 %add.1, %ld2 %gep.2 = getelementptr inbounds i64, ptr %ptr, i64 3 %ld3 = load i64, ptr %gep.2 %add.3 = add nuw nsw i64 %add.2, %ld3 %gep.3 = getelementptr inbounds i64, ptr %ptr, i64 4 %ld4 = load i64, ptr %gep.3 %add.4 = add nuw nsw i64 %add.3, %ld4 %gep.4 = getelementptr inbounds i64, ptr %ptr, i64 5 %ld5 = load i64, ptr %gep.4 %add.5 = add nuw nsw i64 %add.4, %ld5 %gep.5 = getelementptr inbounds i64, ptr %ptr, i64 6 %ld6 = load i64, ptr %gep.5 %add.6 = add nuw nsw i64 %add.5, %ld6 %gep.6 = getelementptr inbounds i64, ptr %ptr, i64 7 %ld7 = load i64, ptr %gep.6 %add.7 = add nuw nsw i64 %add.6, %ld7 ret i64 %add.7 } define i64 @red_ld_16xi64(ptr %ptr) { ; CHECK-LABEL: @red_ld_16xi64( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i64>, ptr [[PTR:%.*]], align 8 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> [[TMP0]]) ; CHECK-NEXT: ret i64 [[TMP1]] ; entry: %ld0 = load i64, ptr %ptr %gep = getelementptr inbounds i64, ptr %ptr, i64 1 %ld1 = load i64, ptr %gep %add.1 = add nuw nsw i64 %ld0, %ld1 %gep.1 = getelementptr inbounds i64, ptr %ptr, i64 2 %ld2 = load i64, ptr %gep.1 %add.2 = add nuw nsw i64 %add.1, %ld2 %gep.2 = getelementptr inbounds i64, ptr %ptr, i64 3 %ld3 = load i64, ptr %gep.2 %add.3 = add nuw nsw i64 %add.2, %ld3 %gep.3 = getelementptr inbounds i64, ptr %ptr, i64 4 %ld4 = load i64, ptr %gep.3 %add.4 = add nuw nsw i64 %add.3, %ld4 %gep.4 = getelementptr inbounds i64, ptr %ptr, i64 5 %ld5 = load i64, ptr %gep.4 %add.5 = add nuw nsw i64 %add.4, %ld5 %gep.5 = getelementptr inbounds i64, ptr %ptr, i64 6 %ld6 = load i64, ptr %gep.5 %add.6 = add nuw nsw i64 %add.5, %ld6 %gep.6 = getelementptr inbounds i64, ptr %ptr, i64 7 %ld7 = load i64, ptr %gep.6 %add.7 = add nuw nsw i64 %add.6, %ld7 %gep.7 = getelementptr inbounds i64, ptr %ptr, i64 8 %ld8 = load i64, ptr %gep.7 %add.8 = add nuw nsw i64 %add.7, %ld8 %gep.8 = getelementptr inbounds i64, ptr %ptr, i64 9 %ld9 = load i64, ptr %gep.8 %add.9 = add nuw nsw i64 %add.8, %ld9 %gep.9 = getelementptr inbounds i64, ptr %ptr, i64 10 %ld10 = load i64, ptr %gep.9 %add.10 = add nuw nsw i64 %add.9, %ld10 %gep.10 = getelementptr inbounds i64, ptr %ptr, i64 11 %ld11 = load i64, ptr %gep.10 %add.11 = add nuw nsw i64 %add.10, %ld11 %gep.11 = getelementptr inbounds i64, ptr %ptr, i64 12 %ld12 = load i64, ptr %gep.11 %add.12 = add nuw nsw i64 %add.11, %ld12 %gep.12 = getelementptr inbounds i64, ptr %ptr, i64 13 %ld13 = load i64, ptr %gep.12 %add.13 = add nuw nsw i64 %add.12, %ld13 %gep.13 = getelementptr inbounds i64, ptr %ptr, i64 14 %ld14 = load i64, ptr %gep.13 %add.14 = add nuw nsw i64 %add.13, %ld14 %gep.14 = getelementptr inbounds i64, ptr %ptr, i64 15 %ld15 = load i64, ptr %gep.14 %add.15 = add nuw nsw i64 %add.14, %ld15 ret i64 %add.15 } define i64 @red_strided_ld_16xi64(ptr %ptr) { ; CHECK-LABEL: @red_strided_ld_16xi64( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call <16 x i64> @llvm.experimental.vp.strided.load.v16i64.p0.i64(ptr align 8 [[PTR:%.*]], i64 16, <16 x i1> splat (i1 true), i32 16) ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> [[TMP0]]) ; CHECK-NEXT: ret i64 [[TMP1]] ; entry: %ld0 = load i64, ptr %ptr %gep = getelementptr inbounds i64, ptr %ptr, i64 2 %ld1 = load i64, ptr %gep %add.1 = add nuw nsw i64 %ld0, %ld1 %gep.1 = getelementptr inbounds i64, ptr %ptr, i64 4 %ld2 = load i64, ptr %gep.1 %add.2 = add nuw nsw i64 %add.1, %ld2 %gep.2 = getelementptr inbounds i64, ptr %ptr, i64 6 %ld3 = load i64, ptr %gep.2 %add.3 = add nuw nsw i64 %add.2, %ld3 %gep.3 = getelementptr inbounds i64, ptr %ptr, i64 8 %ld4 = load i64, ptr %gep.3 %add.4 = add nuw nsw i64 %add.3, %ld4 %gep.4 = getelementptr inbounds i64, ptr %ptr, i64 10 %ld5 = load i64, ptr %gep.4 %add.5 = add nuw nsw i64 %add.4, %ld5 %gep.5 = getelementptr inbounds i64, ptr %ptr, i64 12 %ld6 = load i64, ptr %gep.5 %add.6 = add nuw nsw i64 %add.5, %ld6 %gep.6 = getelementptr inbounds i64, ptr %ptr, i64 14 %ld7 = load i64, ptr %gep.6 %add.7 = add nuw nsw i64 %add.6, %ld7 %gep.7 = getelementptr inbounds i64, ptr %ptr, i64 16 %ld8 = load i64, ptr %gep.7 %add.8 = add nuw nsw i64 %add.7, %ld8 %gep.8 = getelementptr inbounds i64, ptr %ptr, i64 18 %ld9 = load i64, ptr %gep.8 %add.9 = add nuw nsw i64 %add.8, %ld9 %gep.9 = getelementptr inbounds i64, ptr %ptr, i64 20 %ld10 = load i64, ptr %gep.9 %add.10 = add nuw nsw i64 %add.9, %ld10 %gep.10 = getelementptr inbounds i64, ptr %ptr, i64 22 %ld11 = load i64, ptr %gep.10 %add.11 = add nuw nsw i64 %add.10, %ld11 %gep.11 = getelementptr inbounds i64, ptr %ptr, i64 24 %ld12 = load i64, ptr %gep.11 %add.12 = add nuw nsw i64 %add.11, %ld12 %gep.12 = getelementptr inbounds i64, ptr %ptr, i64 26 %ld13 = load i64, ptr %gep.12 %add.13 = add nuw nsw i64 %add.12, %ld13 %gep.13 = getelementptr inbounds i64, ptr %ptr, i64 28 %ld14 = load i64, ptr %gep.13 %add.14 = add nuw nsw i64 %add.13, %ld14 %gep.14 = getelementptr inbounds i64, ptr %ptr, i64 30 %ld15 = load i64, ptr %gep.14 %add.15 = add nuw nsw i64 %add.14, %ld15 ret i64 %add.15 } ; Next batch test differen reductions kinds %struct.buf = type { [8 x i8] } define i8 @reduce_and(ptr %a, ptr %b) { ; CHECK-LABEL: @reduce_and( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = xor <8 x i8> [[TMP1]], [[TMP0]] ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> [[TMP2]]) ; CHECK-NEXT: [[OP_RDX:%.*]] = and i8 [[TMP3]], 1 ; CHECK-NEXT: ret i8 [[OP_RDX]] ; entry: %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0 %0 = load i8, ptr %arrayidx, align 1 %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0 %1 = load i8, ptr %arrayidx3, align 1 %xor12 = xor i8 %1, %0 %and13 = and i8 %xor12, 1 %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1 %2 = load i8, ptr %arrayidx.1, align 1 %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1 %3 = load i8, ptr %arrayidx3.1, align 1 %xor12.1 = xor i8 %3, %2 %and13.1 = and i8 %xor12.1, %and13 %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2 %4 = load i8, ptr %arrayidx.2, align 1 %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2 %5 = load i8, ptr %arrayidx3.2, align 1 %xor12.2 = xor i8 %5, %4 %and13.2 = and i8 %xor12.2, %and13.1 %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3 %6 = load i8, ptr %arrayidx.3, align 1 %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3 %7 = load i8, ptr %arrayidx3.3, align 1 %xor12.3 = xor i8 %7, %6 %and13.3 = and i8 %xor12.3, %and13.2 %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4 %8 = load i8, ptr %arrayidx.4, align 1 %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4 %9 = load i8, ptr %arrayidx3.4, align 1 %xor12.4 = xor i8 %9, %8 %and13.4 = and i8 %xor12.4, %and13.3 %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5 %10 = load i8, ptr %arrayidx.5, align 1 %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5 %11 = load i8, ptr %arrayidx3.5, align 1 %xor12.5 = xor i8 %11, %10 %and13.5 = and i8 %xor12.5, %and13.4 %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6 %12 = load i8, ptr %arrayidx.6, align 1 %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6 %13 = load i8, ptr %arrayidx3.6, align 1 %xor12.6 = xor i8 %13, %12 %and13.6 = and i8 %xor12.6, %and13.5 %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7 %14 = load i8, ptr %arrayidx.7, align 1 %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7 %15 = load i8, ptr %arrayidx3.7, align 1 %xor12.7 = xor i8 %15, %14 %and13.7 = and i8 %xor12.7, %and13.6 ret i8 %and13.7 } define i8 @reduce_or_1(ptr %a, ptr %b) { ; CHECK-LABEL: @reduce_or_1( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = xor <8 x i8> [[TMP1]], [[TMP0]] ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[TMP2]]) ; CHECK-NEXT: ret i8 [[TMP3]] ; entry: %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0 %0 = load i8, ptr %arrayidx, align 1 %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0 %1 = load i8, ptr %arrayidx3, align 1 %xor12 = xor i8 %1, %0 %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1 %2 = load i8, ptr %arrayidx.1, align 1 %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1 %3 = load i8, ptr %arrayidx3.1, align 1 %xor12.1 = xor i8 %3, %2 %or13.1 = or i8 %xor12.1, %xor12 %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2 %4 = load i8, ptr %arrayidx.2, align 1 %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2 %5 = load i8, ptr %arrayidx3.2, align 1 %xor12.2 = xor i8 %5, %4 %or13.2 = or i8 %xor12.2, %or13.1 %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3 %6 = load i8, ptr %arrayidx.3, align 1 %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3 %7 = load i8, ptr %arrayidx3.3, align 1 %xor12.3 = xor i8 %7, %6 %or13.3 = or i8 %xor12.3, %or13.2 %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4 %8 = load i8, ptr %arrayidx.4, align 1 %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4 %9 = load i8, ptr %arrayidx3.4, align 1 %xor12.4 = xor i8 %9, %8 %or13.4 = or i8 %xor12.4, %or13.3 %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5 %10 = load i8, ptr %arrayidx.5, align 1 %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5 %11 = load i8, ptr %arrayidx3.5, align 1 %xor12.5 = xor i8 %11, %10 %or13.5 = or i8 %xor12.5, %or13.4 %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6 %12 = load i8, ptr %arrayidx.6, align 1 %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6 %13 = load i8, ptr %arrayidx3.6, align 1 %xor12.6 = xor i8 %13, %12 %or13.6 = or i8 %xor12.6, %or13.5 %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7 %14 = load i8, ptr %arrayidx.7, align 1 %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7 %15 = load i8, ptr %arrayidx3.7, align 1 %xor12.7 = xor i8 %15, %14 %or13.7 = or i8 %xor12.7, %or13.6 ret i8 %or13.7 } define void @reduce_or_2() { ; ZVFHMIN-LABEL: @reduce_or_2( ; ZVFHMIN-NEXT: [[TMP1:%.*]] = shl i64 0, 0 ; ZVFHMIN-NEXT: [[TMP2:%.*]] = insertelement <16 x i64> , i64 [[TMP1]], i32 15 ; ZVFHMIN-NEXT: [[TMP3:%.*]] = icmp ult <16 x i64> [[TMP2]], zeroinitializer ; ZVFHMIN-NEXT: [[TMP4:%.*]] = insertelement <16 x i64> , i64 [[TMP1]], i32 6 ; ZVFHMIN-NEXT: [[TMP5:%.*]] = icmp ult <16 x i64> [[TMP4]], zeroinitializer ; ZVFHMIN-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP3]]) ; ZVFHMIN-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP5]]) ; ZVFHMIN-NEXT: [[OP_RDX:%.*]] = or i1 [[TMP6]], [[TMP7]] ; ZVFHMIN-NEXT: br i1 [[OP_RDX]], label [[TMP9:%.*]], label [[TMP8:%.*]] ; ZVFHMIN: 8: ; ZVFHMIN-NEXT: ret void ; ZVFHMIN: 9: ; ZVFHMIN-NEXT: ret void ; ; ZVL128-LABEL: @reduce_or_2( ; ZVL128-NEXT: [[TMP1:%.*]] = shl i64 0, 0 ; ZVL128-NEXT: [[TMP2:%.*]] = insertelement <16 x i64> , i64 [[TMP1]], i32 15 ; ZVL128-NEXT: [[TMP3:%.*]] = icmp ult <16 x i64> [[TMP2]], zeroinitializer ; ZVL128-NEXT: [[TMP4:%.*]] = insertelement <16 x i64> , i64 [[TMP1]], i32 6 ; ZVL128-NEXT: [[TMP5:%.*]] = icmp ult <16 x i64> [[TMP4]], zeroinitializer ; ZVL128-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP3]]) ; ZVL128-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP5]]) ; ZVL128-NEXT: [[OP_RDX:%.*]] = or i1 [[TMP6]], [[TMP7]] ; ZVL128-NEXT: br i1 [[OP_RDX]], label [[TMP9:%.*]], label [[TMP8:%.*]] ; ZVL128: 8: ; ZVL128-NEXT: ret void ; ZVL128: 9: ; ZVL128-NEXT: ret void ; ; ZVL256-LABEL: @reduce_or_2( ; ZVL256-NEXT: [[TMP1:%.*]] = shl i64 0, 0 ; ZVL256-NEXT: [[TMP2:%.*]] = insertelement <16 x i64> , i64 [[TMP1]], i32 15 ; ZVL256-NEXT: [[TMP3:%.*]] = icmp ult <16 x i64> [[TMP2]], zeroinitializer ; ZVL256-NEXT: [[TMP4:%.*]] = insertelement <16 x i64> , i64 [[TMP1]], i32 6 ; ZVL256-NEXT: [[TMP5:%.*]] = icmp ult <16 x i64> [[TMP4]], zeroinitializer ; ZVL256-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP3]]) ; ZVL256-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP5]]) ; ZVL256-NEXT: [[OP_RDX:%.*]] = or i1 [[TMP6]], [[TMP7]] ; ZVL256-NEXT: br i1 [[OP_RDX]], label [[TMP9:%.*]], label [[TMP8:%.*]] ; ZVL256: 8: ; ZVL256-NEXT: ret void ; ZVL256: 9: ; ZVL256-NEXT: ret void ; ; ZVL512-LABEL: @reduce_or_2( ; ZVL512-NEXT: [[TMP1:%.*]] = shl i64 0, 0 ; ZVL512-NEXT: [[TMP2:%.*]] = insertelement <32 x i64> , i64 [[TMP1]], i32 15 ; ZVL512-NEXT: [[TMP3:%.*]] = shufflevector <32 x i64> [[TMP2]], <32 x i64> poison, <32 x i32> ; ZVL512-NEXT: [[TMP4:%.*]] = icmp ult <32 x i64> [[TMP3]], zeroinitializer ; ZVL512-NEXT: [[TMP5:%.*]] = call i1 @llvm.vector.reduce.or.v32i1(<32 x i1> [[TMP4]]) ; ZVL512-NEXT: br i1 [[TMP5]], label [[TMP7:%.*]], label [[TMP6:%.*]] ; ZVL512: 6: ; ZVL512-NEXT: ret void ; ZVL512: 7: ; ZVL512-NEXT: ret void ; %1 = shl i64 0, 0 %2 = icmp ult i64 0, 0 %3 = icmp ult i64 0, 0 %4 = or i1 %2, %3 %5 = icmp ult i64 0, 0 %6 = or i1 %4, %5 %7 = icmp ult i64 0, 0 %8 = or i1 %6, %7 %9 = icmp ult i64 0, 0 %10 = or i1 %8, %9 %11 = icmp ult i64 0, 0 %12 = or i1 %10, %11 %13 = icmp ult i64 0, 0 %14 = or i1 %12, %13 %15 = icmp ult i64 0, 0 %16 = or i1 %14, %15 %17 = icmp ult i64 0, 0 %18 = or i1 %16, %17 %19 = icmp ult i64 0, 0 %20 = or i1 %18, %19 %21 = icmp ult i64 0, 0 %22 = or i1 %20, %21 %23 = icmp ult i64 0, 0 %24 = or i1 %22, %23 %25 = icmp ult i64 0, 0 %26 = or i1 %24, %25 %27 = icmp ult i64 0, 0 %28 = or i1 %26, %27 %29 = icmp ult i64 0, 0 %30 = or i1 %28, %29 %31 = icmp ult i64 %1, 0 %32 = or i1 %30, %31 %33 = icmp ult i64 0, 0 %34 = or i1 %32, %33 %35 = icmp ult i64 0, 0 %36 = or i1 %34, %35 %37 = icmp ult i64 0, 0 %38 = or i1 %36, %37 %39 = icmp ult i64 0, 0 %40 = or i1 %38, %39 %41 = icmp ult i64 0, 0 %42 = or i1 %40, %41 %43 = icmp ult i64 0, 0 %44 = or i1 %42, %43 %45 = icmp ult i64 %1, 0 %46 = or i1 %44, %45 %47 = icmp ult i64 0, 0 %48 = or i1 %46, %47 %49 = icmp ult i64 0, 0 %50 = or i1 %48, %49 %51 = icmp ult i64 0, 0 %52 = or i1 %50, %51 %53 = icmp ult i64 0, 0 %54 = or i1 %52, %53 %55 = icmp ult i64 0, 0 %56 = or i1 %54, %55 %57 = icmp ult i64 0, 0 %58 = or i1 %56, %57 %59 = icmp ult i64 0, 0 %60 = or i1 %58, %59 %61 = icmp ult i64 0, 0 %62 = or i1 %60, %61 %63 = icmp ult i64 0, 0 %64 = or i1 %62, %63 br i1 %64, label %66, label %65 65: ; preds = %0 ret void 66: ; preds = %0 ret void } define i8 @reduce_xor(ptr %a, ptr %b) { ; CHECK-LABEL: @reduce_xor( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[TMP0]] ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> [[TMP2]]) ; CHECK-NEXT: [[OP_RDX:%.*]] = xor i8 [[TMP3]], 1 ; CHECK-NEXT: ret i8 [[OP_RDX]] ; entry: %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0 %0 = load i8, ptr %arrayidx, align 1 %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0 %1 = load i8, ptr %arrayidx3, align 1 %and12 = and i8 %1, %0 %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1 %2 = load i8, ptr %arrayidx.1, align 1 %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1 %3 = load i8, ptr %arrayidx3.1, align 1 %and12.1 = and i8 %3, %2 %4 = xor i8 %and12, %and12.1 %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2 %5 = load i8, ptr %arrayidx.2, align 1 %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2 %6 = load i8, ptr %arrayidx3.2, align 1 %and12.2 = and i8 %6, %5 %7 = xor i8 %4, %and12.2 %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3 %8 = load i8, ptr %arrayidx.3, align 1 %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3 %9 = load i8, ptr %arrayidx3.3, align 1 %and12.3 = and i8 %9, %8 %10 = xor i8 %7, %and12.3 %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4 %11 = load i8, ptr %arrayidx.4, align 1 %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4 %12 = load i8, ptr %arrayidx3.4, align 1 %and12.4 = and i8 %12, %11 %13 = xor i8 %10, %and12.4 %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5 %14 = load i8, ptr %arrayidx.5, align 1 %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5 %15 = load i8, ptr %arrayidx3.5, align 1 %and12.5 = and i8 %15, %14 %16 = xor i8 %13, %and12.5 %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6 %17 = load i8, ptr %arrayidx.6, align 1 %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6 %18 = load i8, ptr %arrayidx3.6, align 1 %and12.6 = and i8 %18, %17 %19 = xor i8 %16, %and12.6 %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7 %20 = load i8, ptr %arrayidx.7, align 1 %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7 %21 = load i8, ptr %arrayidx3.7, align 1 %and12.7 = and i8 %21, %20 %22 = xor i8 %19, %and12.7 %xor13.7 = xor i8 %22, 1 ret i8 %xor13.7 } define i8 @reduce_add(ptr %a, ptr %b) { ; CHECK-LABEL: @reduce_add( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[TMP0]] ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> [[TMP2]]) ; CHECK-NEXT: [[OP_RDX:%.*]] = add i8 [[TMP3]], 1 ; CHECK-NEXT: ret i8 [[OP_RDX]] ; entry: %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0 %0 = load i8, ptr %arrayidx, align 1 %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0 %1 = load i8, ptr %arrayidx3, align 1 %and12 = and i8 %1, %0 %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1 %2 = load i8, ptr %arrayidx.1, align 1 %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1 %3 = load i8, ptr %arrayidx3.1, align 1 %and12.1 = and i8 %3, %2 %4 = add i8 %and12, %and12.1 %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2 %5 = load i8, ptr %arrayidx.2, align 1 %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2 %6 = load i8, ptr %arrayidx3.2, align 1 %and12.2 = and i8 %6, %5 %7 = add i8 %4, %and12.2 %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3 %8 = load i8, ptr %arrayidx.3, align 1 %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3 %9 = load i8, ptr %arrayidx3.3, align 1 %and12.3 = and i8 %9, %8 %10 = add i8 %7, %and12.3 %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4 %11 = load i8, ptr %arrayidx.4, align 1 %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4 %12 = load i8, ptr %arrayidx3.4, align 1 %and12.4 = and i8 %12, %11 %13 = add i8 %10, %and12.4 %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5 %14 = load i8, ptr %arrayidx.5, align 1 %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5 %15 = load i8, ptr %arrayidx3.5, align 1 %and12.5 = and i8 %15, %14 %16 = add i8 %13, %and12.5 %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6 %17 = load i8, ptr %arrayidx.6, align 1 %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6 %18 = load i8, ptr %arrayidx3.6, align 1 %and12.6 = and i8 %18, %17 %19 = add i8 %16, %and12.6 %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7 %20 = load i8, ptr %arrayidx.7, align 1 %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7 %21 = load i8, ptr %arrayidx3.7, align 1 %and12.7 = and i8 %21, %20 %22 = add i8 %19, %and12.7 %add13.7 = add i8 %22, 1 ret i8 %add13.7 } declare i8 @llvm.smin.i8(i8, i8) define i8 @reduce_smin(ptr %a, ptr %b) { ; CHECK-LABEL: @reduce_smin( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[TMP0]] ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> [[TMP2]]) ; CHECK-NEXT: ret i8 [[TMP3]] ; entry: %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0 %0 = load i8, ptr %arrayidx, align 1 %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0 %1 = load i8, ptr %arrayidx3, align 1 %and12 = and i8 %1, %0 %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1 %2 = load i8, ptr %arrayidx.1, align 1 %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1 %3 = load i8, ptr %arrayidx3.1, align 1 %and12.1 = and i8 %3, %2 %4 = tail call i8 @llvm.smin.i8(i8 %and12, i8 %and12.1) %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2 %5 = load i8, ptr %arrayidx.2, align 1 %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2 %6 = load i8, ptr %arrayidx3.2, align 1 %and12.2 = and i8 %6, %5 %7 = tail call i8 @llvm.smin.i8(i8 %4, i8 %and12.2) %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3 %8 = load i8, ptr %arrayidx.3, align 1 %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3 %9 = load i8, ptr %arrayidx3.3, align 1 %and12.3 = and i8 %9, %8 %10 = tail call i8 @llvm.smin.i8(i8 %7, i8 %and12.3) %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4 %11 = load i8, ptr %arrayidx.4, align 1 %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4 %12 = load i8, ptr %arrayidx3.4, align 1 %and12.4 = and i8 %12, %11 %13 = tail call i8 @llvm.smin.i8(i8 %10, i8 %and12.4) %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5 %14 = load i8, ptr %arrayidx.5, align 1 %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5 %15 = load i8, ptr %arrayidx3.5, align 1 %and12.5 = and i8 %15, %14 %16 = tail call i8 @llvm.smin.i8(i8 %13, i8 %and12.5) %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6 %17 = load i8, ptr %arrayidx.6, align 1 %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6 %18 = load i8, ptr %arrayidx3.6, align 1 %and12.6 = and i8 %18, %17 %19 = tail call i8 @llvm.smin.i8(i8 %16, i8 %and12.6) %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7 %20 = load i8, ptr %arrayidx.7, align 1 %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7 %21 = load i8, ptr %arrayidx3.7, align 1 %and12.7 = and i8 %21, %20 %22 = tail call i8 @llvm.smin.i8(i8 %19, i8 %and12.7) ret i8 %22 } declare i8 @llvm.smax.i8(i8, i8) define i8 @reduce_smax(ptr %a, ptr %b) { ; CHECK-LABEL: @reduce_smax( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[TMP0]] ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> [[TMP2]]) ; CHECK-NEXT: ret i8 [[TMP3]] ; entry: %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0 %0 = load i8, ptr %arrayidx, align 1 %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0 %1 = load i8, ptr %arrayidx3, align 1 %and12 = and i8 %1, %0 %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1 %2 = load i8, ptr %arrayidx.1, align 1 %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1 %3 = load i8, ptr %arrayidx3.1, align 1 %and12.1 = and i8 %3, %2 %4 = tail call i8 @llvm.smax.i8(i8 %and12, i8 %and12.1) %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2 %5 = load i8, ptr %arrayidx.2, align 1 %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2 %6 = load i8, ptr %arrayidx3.2, align 1 %and12.2 = and i8 %6, %5 %7 = tail call i8 @llvm.smax.i8(i8 %4, i8 %and12.2) %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3 %8 = load i8, ptr %arrayidx.3, align 1 %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3 %9 = load i8, ptr %arrayidx3.3, align 1 %and12.3 = and i8 %9, %8 %10 = tail call i8 @llvm.smax.i8(i8 %7, i8 %and12.3) %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4 %11 = load i8, ptr %arrayidx.4, align 1 %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4 %12 = load i8, ptr %arrayidx3.4, align 1 %and12.4 = and i8 %12, %11 %13 = tail call i8 @llvm.smax.i8(i8 %10, i8 %and12.4) %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5 %14 = load i8, ptr %arrayidx.5, align 1 %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5 %15 = load i8, ptr %arrayidx3.5, align 1 %and12.5 = and i8 %15, %14 %16 = tail call i8 @llvm.smax.i8(i8 %13, i8 %and12.5) %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6 %17 = load i8, ptr %arrayidx.6, align 1 %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6 %18 = load i8, ptr %arrayidx3.6, align 1 %and12.6 = and i8 %18, %17 %19 = tail call i8 @llvm.smax.i8(i8 %16, i8 %and12.6) %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7 %20 = load i8, ptr %arrayidx.7, align 1 %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7 %21 = load i8, ptr %arrayidx3.7, align 1 %and12.7 = and i8 %21, %20 %22 = tail call i8 @llvm.smax.i8(i8 %19, i8 %and12.7) ret i8 %22 } declare i8 @llvm.umax.i8(i8, i8) define i8 @reduce_umax(ptr %a, ptr %b) { ; CHECK-LABEL: @reduce_umax( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[TMP0]] ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> [[TMP2]]) ; CHECK-NEXT: ret i8 [[TMP3]] ; entry: %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0 %0 = load i8, ptr %arrayidx, align 1 %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0 %1 = load i8, ptr %arrayidx3, align 1 %and12 = and i8 %1, %0 %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1 %2 = load i8, ptr %arrayidx.1, align 1 %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1 %3 = load i8, ptr %arrayidx3.1, align 1 %and12.1 = and i8 %3, %2 %4 = tail call i8 @llvm.umax.i8(i8 %and12, i8 %and12.1) %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2 %5 = load i8, ptr %arrayidx.2, align 1 %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2 %6 = load i8, ptr %arrayidx3.2, align 1 %and12.2 = and i8 %6, %5 %7 = tail call i8 @llvm.umax.i8(i8 %4, i8 %and12.2) %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3 %8 = load i8, ptr %arrayidx.3, align 1 %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3 %9 = load i8, ptr %arrayidx3.3, align 1 %and12.3 = and i8 %9, %8 %10 = tail call i8 @llvm.umax.i8(i8 %7, i8 %and12.3) %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4 %11 = load i8, ptr %arrayidx.4, align 1 %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4 %12 = load i8, ptr %arrayidx3.4, align 1 %and12.4 = and i8 %12, %11 %13 = tail call i8 @llvm.umax.i8(i8 %10, i8 %and12.4) %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5 %14 = load i8, ptr %arrayidx.5, align 1 %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5 %15 = load i8, ptr %arrayidx3.5, align 1 %and12.5 = and i8 %15, %14 %16 = tail call i8 @llvm.umax.i8(i8 %13, i8 %and12.5) %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6 %17 = load i8, ptr %arrayidx.6, align 1 %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6 %18 = load i8, ptr %arrayidx3.6, align 1 %and12.6 = and i8 %18, %17 %19 = tail call i8 @llvm.umax.i8(i8 %16, i8 %and12.6) %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7 %20 = load i8, ptr %arrayidx.7, align 1 %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7 %21 = load i8, ptr %arrayidx3.7, align 1 %and12.7 = and i8 %21, %20 %22 = tail call i8 @llvm.umax.i8(i8 %19, i8 %and12.7) ret i8 %22 } declare i8 @llvm.umin.i8(i8, i8) define i8 @reduce_umin(ptr %a, ptr %b) { ; CHECK-LABEL: @reduce_umin( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0 ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[TMP0]] ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> [[TMP2]]) ; CHECK-NEXT: ret i8 [[TMP3]] ; entry: %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0 %0 = load i8, ptr %arrayidx, align 1 %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0 %1 = load i8, ptr %arrayidx3, align 1 %and12 = and i8 %1, %0 %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1 %2 = load i8, ptr %arrayidx.1, align 1 %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1 %3 = load i8, ptr %arrayidx3.1, align 1 %and12.1 = and i8 %3, %2 %4 = tail call i8 @llvm.umin.i8(i8 %and12, i8 %and12.1) %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2 %5 = load i8, ptr %arrayidx.2, align 1 %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2 %6 = load i8, ptr %arrayidx3.2, align 1 %and12.2 = and i8 %6, %5 %7 = tail call i8 @llvm.umin.i8(i8 %4, i8 %and12.2) %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3 %8 = load i8, ptr %arrayidx.3, align 1 %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3 %9 = load i8, ptr %arrayidx3.3, align 1 %and12.3 = and i8 %9, %8 %10 = tail call i8 @llvm.umin.i8(i8 %7, i8 %and12.3) %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4 %11 = load i8, ptr %arrayidx.4, align 1 %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4 %12 = load i8, ptr %arrayidx3.4, align 1 %and12.4 = and i8 %12, %11 %13 = tail call i8 @llvm.umin.i8(i8 %10, i8 %and12.4) %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5 %14 = load i8, ptr %arrayidx.5, align 1 %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5 %15 = load i8, ptr %arrayidx3.5, align 1 %and12.5 = and i8 %15, %14 %16 = tail call i8 @llvm.umin.i8(i8 %13, i8 %and12.5) %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6 %17 = load i8, ptr %arrayidx.6, align 1 %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6 %18 = load i8, ptr %arrayidx3.6, align 1 %and12.6 = and i8 %18, %17 %19 = tail call i8 @llvm.umin.i8(i8 %16, i8 %and12.6) %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7 %20 = load i8, ptr %arrayidx.7, align 1 %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7 %21 = load i8, ptr %arrayidx3.7, align 1 %and12.7 = and i8 %21, %20 %22 = tail call i8 @llvm.umin.i8(i8 %19, i8 %and12.7) ret i8 %22 } ; Next batch exercise reductions involing zext of narrower loads define i64 @red_zext_ld_2xi64(ptr %ptr) { ; CHECK-LABEL: @red_zext_ld_2xi64( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[LD0:%.*]] = load i8, ptr [[PTR:%.*]], align 1 ; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[LD0]] to i64 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 1 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP]], align 1 ; CHECK-NEXT: [[ZEXT_1:%.*]] = zext i8 [[LD1]] to i64 ; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i64 [[ZEXT]], [[ZEXT_1]] ; CHECK-NEXT: ret i64 [[ADD_1]] ; entry: %ld0 = load i8, ptr %ptr %zext = zext i8 %ld0 to i64 %gep = getelementptr inbounds i8, ptr %ptr, i64 1 %ld1 = load i8, ptr %gep %zext.1 = zext i8 %ld1 to i64 %add.1 = add nuw nsw i64 %zext, %zext.1 ret i64 %add.1 } define i64 @red_zext_ld_4xi64(ptr %ptr) { ; CHECK-LABEL: @red_zext_ld_4xi64( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[PTR:%.*]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[TMP0]] to <4 x i16> ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[TMP1]]) ; CHECK-NEXT: [[ADD_3:%.*]] = zext i16 [[TMP2]] to i64 ; CHECK-NEXT: ret i64 [[ADD_3]] ; entry: %ld0 = load i8, ptr %ptr %zext = zext i8 %ld0 to i64 %gep = getelementptr inbounds i8, ptr %ptr, i64 1 %ld1 = load i8, ptr %gep %zext.1 = zext i8 %ld1 to i64 %add.1 = add nuw nsw i64 %zext, %zext.1 %gep.1 = getelementptr inbounds i8, ptr %ptr, i64 2 %ld2 = load i8, ptr %gep.1 %zext.2 = zext i8 %ld2 to i64 %add.2 = add nuw nsw i64 %add.1, %zext.2 %gep.2 = getelementptr inbounds i8, ptr %ptr, i64 3 %ld3 = load i8, ptr %gep.2 %zext.3 = zext i8 %ld3 to i64 %add.3 = add nuw nsw i64 %add.2, %zext.3 ret i64 %add.3 } define i64 @red_zext_ld_8xi64(ptr %ptr) { ; CHECK-LABEL: @red_zext_ld_8xi64( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[PTR:%.*]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = zext <8 x i8> [[TMP0]] to <8 x i64> ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP1]]) ; CHECK-NEXT: ret i64 [[TMP2]] ; entry: %ld0 = load i8, ptr %ptr %zext = zext i8 %ld0 to i64 %gep = getelementptr inbounds i8, ptr %ptr, i64 1 %ld1 = load i8, ptr %gep %zext.1 = zext i8 %ld1 to i64 %add.1 = add nuw nsw i64 %zext, %zext.1 %gep.1 = getelementptr inbounds i8, ptr %ptr, i64 2 %ld2 = load i8, ptr %gep.1 %zext.2 = zext i8 %ld2 to i64 %add.2 = add nuw nsw i64 %add.1, %zext.2 %gep.2 = getelementptr inbounds i8, ptr %ptr, i64 3 %ld3 = load i8, ptr %gep.2 %zext.3 = zext i8 %ld3 to i64 %add.3 = add nuw nsw i64 %add.2, %zext.3 %gep.3 = getelementptr inbounds i8, ptr %ptr, i64 4 %ld4 = load i8, ptr %gep.3 %zext.4 = zext i8 %ld4 to i64 %add.4 = add nuw nsw i64 %add.3, %zext.4 %gep.4 = getelementptr inbounds i8, ptr %ptr, i64 5 %ld5 = load i8, ptr %gep.4 %zext.5 = zext i8 %ld5 to i64 %add.5 = add nuw nsw i64 %add.4, %zext.5 %gep.5 = getelementptr inbounds i8, ptr %ptr, i64 6 %ld6 = load i8, ptr %gep.5 %zext.6 = zext i8 %ld6 to i64 %add.6 = add nuw nsw i64 %add.5, %zext.6 %gep.6 = getelementptr inbounds i8, ptr %ptr, i64 7 %ld7 = load i8, ptr %gep.6 %zext.7 = zext i8 %ld7 to i64 %add.7 = add nuw nsw i64 %add.6, %zext.7 ret i64 %add.7 } define i64 @red_zext_ld_16xi64(ptr %ptr) { ; CHECK-LABEL: @red_zext_ld_16xi64( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[PTR:%.*]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[TMP0]] to <16 x i64> ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> [[TMP1]]) ; CHECK-NEXT: ret i64 [[TMP2]] ; entry: %ld0 = load i8, ptr %ptr %zext = zext i8 %ld0 to i64 %gep = getelementptr inbounds i8, ptr %ptr, i64 1 %ld1 = load i8, ptr %gep %zext.1 = zext i8 %ld1 to i64 %add.1 = add nuw nsw i64 %zext, %zext.1 %gep.1 = getelementptr inbounds i8, ptr %ptr, i64 2 %ld2 = load i8, ptr %gep.1 %zext.2 = zext i8 %ld2 to i64 %add.2 = add nuw nsw i64 %add.1, %zext.2 %gep.2 = getelementptr inbounds i8, ptr %ptr, i64 3 %ld3 = load i8, ptr %gep.2 %zext.3 = zext i8 %ld3 to i64 %add.3 = add nuw nsw i64 %add.2, %zext.3 %gep.3 = getelementptr inbounds i8, ptr %ptr, i64 4 %ld4 = load i8, ptr %gep.3 %zext.4 = zext i8 %ld4 to i64 %add.4 = add nuw nsw i64 %add.3, %zext.4 %gep.4 = getelementptr inbounds i8, ptr %ptr, i64 5 %ld5 = load i8, ptr %gep.4 %zext.5 = zext i8 %ld5 to i64 %add.5 = add nuw nsw i64 %add.4, %zext.5 %gep.5 = getelementptr inbounds i8, ptr %ptr, i64 6 %ld6 = load i8, ptr %gep.5 %zext.6 = zext i8 %ld6 to i64 %add.6 = add nuw nsw i64 %add.5, %zext.6 %gep.6 = getelementptr inbounds i8, ptr %ptr, i64 7 %ld7 = load i8, ptr %gep.6 %zext.7 = zext i8 %ld7 to i64 %add.7 = add nuw nsw i64 %add.6, %zext.7 %gep.7 = getelementptr inbounds i8, ptr %ptr, i64 8 %ld8 = load i8, ptr %gep.7 %zext.8 = zext i8 %ld8 to i64 %add.8 = add nuw nsw i64 %add.7, %zext.8 %gep.8 = getelementptr inbounds i8, ptr %ptr, i64 9 %ld9 = load i8, ptr %gep.8 %zext.9 = zext i8 %ld9 to i64 %add.9 = add nuw nsw i64 %add.8, %zext.9 %gep.9 = getelementptr inbounds i8, ptr %ptr, i64 10 %ld10 = load i8, ptr %gep.9 %zext.10 = zext i8 %ld10 to i64 %add.10 = add nuw nsw i64 %add.9, %zext.10 %gep.10 = getelementptr inbounds i8, ptr %ptr, i64 11 %ld11 = load i8, ptr %gep.10 %zext.11 = zext i8 %ld11 to i64 %add.11 = add nuw nsw i64 %add.10, %zext.11 %gep.11 = getelementptr inbounds i8, ptr %ptr, i64 12 %ld12 = load i8, ptr %gep.11 %zext.12 = zext i8 %ld12 to i64 %add.12 = add nuw nsw i64 %add.11, %zext.12 %gep.12 = getelementptr inbounds i8, ptr %ptr, i64 13 %ld13 = load i8, ptr %gep.12 %zext.13 = zext i8 %ld13 to i64 %add.13 = add nuw nsw i64 %add.12, %zext.13 %gep.13 = getelementptr inbounds i8, ptr %ptr, i64 14 %ld14 = load i8, ptr %gep.13 %zext.14 = zext i8 %ld14 to i64 %add.14 = add nuw nsw i64 %add.13, %zext.14 %gep.14 = getelementptr inbounds i8, ptr %ptr, i64 15 %ld15 = load i8, ptr %gep.14 %zext.15 = zext i8 %ld15 to i64 %add.15 = add nuw nsw i64 %add.14, %zext.15 ret i64 %add.15 } declare i32 @llvm.abs.i32(i32, i1) define i32 @stride_sum_abs_diff(ptr %p, ptr %q, i64 %stride) { ; CHECK-LABEL: @stride_sum_abs_diff( ; CHECK-NEXT: [[P_2:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[STRIDE:%.*]] ; CHECK-NEXT: [[Q_2:%.*]] = getelementptr inbounds i32, ptr [[Q:%.*]], i64 [[STRIDE]] ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr [[P]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr [[Q]], align 4 ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr [[P_2]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr [[Q_2]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = call <4 x i32> @llvm.vector.insert.v4i32.v2i32(<4 x i32> poison, <2 x i32> [[TMP1]], i64 0) ; CHECK-NEXT: [[TMP6:%.*]] = call <4 x i32> @llvm.vector.insert.v4i32.v2i32(<4 x i32> [[TMP5]], <2 x i32> [[TMP3]], i64 2) ; CHECK-NEXT: [[TMP7:%.*]] = call <4 x i32> @llvm.vector.insert.v4i32.v2i32(<4 x i32> poison, <2 x i32> [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP8:%.*]] = call <4 x i32> @llvm.vector.insert.v4i32.v2i32(<4 x i32> [[TMP7]], <2 x i32> [[TMP4]], i64 2) ; CHECK-NEXT: [[TMP9:%.*]] = sub <4 x i32> [[TMP6]], [[TMP8]] ; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[TMP9]], i1 true) ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP10]]) ; CHECK-NEXT: ret i32 [[TMP11]] ; %x.0 = load i32, ptr %p %y.0 = load i32, ptr %q %sub.0 = sub i32 %x.0, %y.0 %abs.0 = tail call i32 @llvm.abs.i32(i32 %sub.0, i1 true) %p.1 = getelementptr inbounds i32, ptr %p, i64 1 %x.1 = load i32, ptr %p.1 %q.1 = getelementptr inbounds i32, ptr %q, i64 1 %y.1 = load i32, ptr %q.1 %sub.1 = sub i32 %x.1, %y.1 %abs.1 = tail call i32 @llvm.abs.i32(i32 %sub.1, i1 true) %sum.0 = add i32 %abs.0, %abs.1 %p.2 = getelementptr inbounds i32, ptr %p, i64 %stride %q.2 = getelementptr inbounds i32, ptr %q, i64 %stride %x.2 = load i32, ptr %p.2 %y.2 = load i32, ptr %q.2 %sub.2 = sub i32 %x.2, %y.2 %abs.2 = tail call i32 @llvm.abs.i32(i32 %sub.2, i1 true) %sum.1 = add i32 %sum.0, %abs.2 %p.3 = getelementptr inbounds i32, ptr %p.2, i64 1 %x.3 = load i32, ptr %p.3 %q.3 = getelementptr inbounds i32, ptr %q.2, i64 1 %y.3 = load i32, ptr %q.3 %sub.3 = sub i32 %x.3, %y.3 %abs.3 = tail call i32 @llvm.abs.i32(i32 %sub.3, i1 true) %sum.2 = add i32 %sum.1, %abs.3 ret i32 %sum.2 } define i32 @reduce_sum_2arrays_a(ptr noalias %p, ptr noalias %q) { ; CHECK-LABEL: @reduce_sum_2arrays_a( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[P:%.*]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[Q:%.*]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i8> @llvm.vector.insert.v8i8.v4i8(<8 x i8> poison, <4 x i8> [[TMP0]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i8> @llvm.vector.insert.v8i8.v4i8(<8 x i8> [[TMP2]], <4 x i8> [[TMP1]], i64 4) ; CHECK-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[TMP3]] to <8 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP4]]) ; CHECK-NEXT: ret i32 [[TMP5]] ; entry: %x.0 = load i8, ptr %p, align 1 %conv = zext i8 %x.0 to i32 %y.0 = load i8, ptr %q, align 1 %conv3 = zext i8 %y.0 to i32 %add4 = add nuw nsw i32 %conv, %conv3 %arrayidx.1 = getelementptr inbounds i8, ptr %p, i64 1 %x.1 = load i8, ptr %arrayidx.1, align 1 %conv.1 = zext i8 %x.1 to i32 %arrayidx2.1 = getelementptr inbounds i8, ptr %q, i64 1 %y.1 = load i8, ptr %arrayidx2.1, align 1 %conv3.1 = zext i8 %y.1 to i32 %add.1 = add nuw nsw i32 %add4, %conv.1 %add4.1 = add nuw nsw i32 %add.1, %conv3.1 %arrayidx.2 = getelementptr inbounds i8, ptr %p, i64 2 %x.2 = load i8, ptr %arrayidx.2, align 1 %conv.2 = zext i8 %x.2 to i32 %arrayidx2.2 = getelementptr inbounds i8, ptr %q, i64 2 %y.2 = load i8, ptr %arrayidx2.2, align 1 %conv3.2 = zext i8 %y.2 to i32 %add.2 = add nuw nsw i32 %add4.1, %conv.2 %add4.2 = add nuw nsw i32 %add.2, %conv3.2 %arrayidx.3 = getelementptr inbounds i8, ptr %p, i64 3 %x.3 = load i8, ptr %arrayidx.3, align 1 %conv.3 = zext i8 %x.3 to i32 %arrayidx2.3 = getelementptr inbounds i8, ptr %q, i64 3 %y.3 = load i8, ptr %arrayidx2.3, align 1 %conv3.3 = zext i8 %y.3 to i32 %add.3 = add nuw nsw i32 %add4.2, %conv.3 %add4.3 = add nuw nsw i32 %add.3, %conv3.3 ret i32 %add4.3 } define i32 @reduce_sum_2arrays_b(ptr noalias noundef %x, ptr noalias %y) { ; CHECK-LABEL: @reduce_sum_2arrays_b( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[X:%.*]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[Y:%.*]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i8> @llvm.vector.insert.v8i8.v4i8(<8 x i8> poison, <4 x i8> [[TMP0]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i8> @llvm.vector.insert.v8i8.v4i8(<8 x i8> [[TMP2]], <4 x i8> [[TMP1]], i64 4) ; CHECK-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[TMP3]] to <8 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP4]]) ; CHECK-NEXT: ret i32 [[TMP5]] ; entry: %0 = load i8, ptr %x, align 1 %conv = zext i8 %0 to i32 %arrayidx.1 = getelementptr inbounds i8, ptr %x, i64 1 %1 = load i8, ptr %arrayidx.1, align 1 %conv.1 = zext i8 %1 to i32 %add.1 = add nuw nsw i32 %conv, %conv.1 %arrayidx.2 = getelementptr inbounds i8, ptr %x, i64 2 %2 = load i8, ptr %arrayidx.2, align 1 %conv.2 = zext i8 %2 to i32 %add.2 = add nuw nsw i32 %add.1, %conv.2 %arrayidx.3 = getelementptr inbounds i8, ptr %x, i64 3 %3 = load i8, ptr %arrayidx.3, align 1 %conv.3 = zext i8 %3 to i32 %add.3 = add nuw nsw i32 %add.2, %conv.3 %4 = load i8, ptr %y, align 1 %conv9 = zext i8 %4 to i32 %add10 = add nuw nsw i32 %add.3, %conv9 %arrayidx8.1 = getelementptr inbounds i8, ptr %y, i64 1 %5 = load i8, ptr %arrayidx8.1, align 1 %conv9.1 = zext i8 %5 to i32 %add10.1 = add nuw nsw i32 %add10, %conv9.1 %arrayidx8.2 = getelementptr inbounds i8, ptr %y, i64 2 %6 = load i8, ptr %arrayidx8.2, align 1 %conv9.2 = zext i8 %6 to i32 %add10.2 = add nuw nsw i32 %add10.1, %conv9.2 %arrayidx8.3 = getelementptr inbounds i8, ptr %y, i64 3 %7 = load i8, ptr %arrayidx8.3, align 1 %conv9.3 = zext i8 %7 to i32 %add10.3 = add nuw nsw i32 %add10.2, %conv9.3 ret i32 %add10.3 } ; Shouldn't vectorize to a reduction because we can't promote it define bfloat @fadd_4xbf16(ptr %p) { ; CHECK-LABEL: @fadd_4xbf16( ; CHECK-NEXT: [[X0:%.*]] = load bfloat, ptr [[P:%.*]], align 2 ; CHECK-NEXT: [[P1:%.*]] = getelementptr bfloat, ptr [[P]], i32 1 ; CHECK-NEXT: [[X1:%.*]] = load bfloat, ptr [[P1]], align 2 ; CHECK-NEXT: [[P2:%.*]] = getelementptr bfloat, ptr [[P]], i32 2 ; CHECK-NEXT: [[X2:%.*]] = load bfloat, ptr [[P2]], align 2 ; CHECK-NEXT: [[P3:%.*]] = getelementptr bfloat, ptr [[P]], i32 3 ; CHECK-NEXT: [[X3:%.*]] = load bfloat, ptr [[P3]], align 2 ; CHECK-NEXT: [[R0:%.*]] = fadd fast bfloat [[X0]], [[X1]] ; CHECK-NEXT: [[R1:%.*]] = fadd fast bfloat [[R0]], [[X2]] ; CHECK-NEXT: [[R2:%.*]] = fadd fast bfloat [[R1]], [[X3]] ; CHECK-NEXT: ret bfloat [[R2]] ; %x0 = load bfloat, ptr %p %p1 = getelementptr bfloat, ptr %p, i32 1 %x1 = load bfloat, ptr %p1 %p2 = getelementptr bfloat, ptr %p, i32 2 %x2 = load bfloat, ptr %p2 %p3 = getelementptr bfloat, ptr %p, i32 3 %x3 = load bfloat, ptr %p3 %r0 = fadd fast bfloat %x0, %x1 %r1 = fadd fast bfloat %r0, %x2 %r2 = fadd fast bfloat %r1, %x3 ret bfloat %r2 } ; Shouldn't vectorize to a reduction because there's no vfred{u,o}mul.vs define bfloat @fmul_4xbf16(ptr %p) { ; CHECK-LABEL: @fmul_4xbf16( ; CHECK-NEXT: [[X0:%.*]] = load bfloat, ptr [[P:%.*]], align 2 ; CHECK-NEXT: [[P1:%.*]] = getelementptr bfloat, ptr [[P]], i32 1 ; CHECK-NEXT: [[X1:%.*]] = load bfloat, ptr [[P1]], align 2 ; CHECK-NEXT: [[P2:%.*]] = getelementptr bfloat, ptr [[P]], i32 2 ; CHECK-NEXT: [[X2:%.*]] = load bfloat, ptr [[P2]], align 2 ; CHECK-NEXT: [[P3:%.*]] = getelementptr bfloat, ptr [[P]], i32 3 ; CHECK-NEXT: [[X3:%.*]] = load bfloat, ptr [[P3]], align 2 ; CHECK-NEXT: [[R0:%.*]] = fmul fast bfloat [[X0]], [[X1]] ; CHECK-NEXT: [[R1:%.*]] = fmul fast bfloat [[R0]], [[X2]] ; CHECK-NEXT: [[R2:%.*]] = fmul fast bfloat [[R1]], [[X3]] ; CHECK-NEXT: ret bfloat [[R2]] ; %x0 = load bfloat, ptr %p %p1 = getelementptr bfloat, ptr %p, i32 1 %x1 = load bfloat, ptr %p1 %p2 = getelementptr bfloat, ptr %p, i32 2 %x2 = load bfloat, ptr %p2 %p3 = getelementptr bfloat, ptr %p, i32 3 %x3 = load bfloat, ptr %p3 %r0 = fmul fast bfloat %x0, %x1 %r1 = fmul fast bfloat %r0, %x2 %r2 = fmul fast bfloat %r1, %x3 ret bfloat %r2 } ; Shouldn't vectorize to a reduction on zvfhmin because we can't promote it define half @fadd_4xf16(ptr %p) { ; ZVFHMIN-LABEL: @fadd_4xf16( ; ZVFHMIN-NEXT: [[X0:%.*]] = load half, ptr [[P:%.*]], align 2 ; ZVFHMIN-NEXT: [[P1:%.*]] = getelementptr half, ptr [[P]], i32 1 ; ZVFHMIN-NEXT: [[X1:%.*]] = load half, ptr [[P1]], align 2 ; ZVFHMIN-NEXT: [[P2:%.*]] = getelementptr half, ptr [[P]], i32 2 ; ZVFHMIN-NEXT: [[X2:%.*]] = load half, ptr [[P2]], align 2 ; ZVFHMIN-NEXT: [[P3:%.*]] = getelementptr half, ptr [[P]], i32 3 ; ZVFHMIN-NEXT: [[X3:%.*]] = load half, ptr [[P3]], align 2 ; ZVFHMIN-NEXT: [[R0:%.*]] = fadd fast half [[X0]], [[X1]] ; ZVFHMIN-NEXT: [[R1:%.*]] = fadd fast half [[R0]], [[X2]] ; ZVFHMIN-NEXT: [[R2:%.*]] = fadd fast half [[R1]], [[X3]] ; ZVFHMIN-NEXT: ret half [[R2]] ; ; ZVFH-LABEL: @fadd_4xf16( ; ZVFH-NEXT: [[TMP1:%.*]] = load <4 x half>, ptr [[P:%.*]], align 2 ; ZVFH-NEXT: [[TMP2:%.*]] = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> [[TMP1]]) ; ZVFH-NEXT: ret half [[TMP2]] ; %x0 = load half, ptr %p %p1 = getelementptr half, ptr %p, i32 1 %x1 = load half, ptr %p1 %p2 = getelementptr half, ptr %p, i32 2 %x2 = load half, ptr %p2 %p3 = getelementptr half, ptr %p, i32 3 %x3 = load half, ptr %p3 %r0 = fadd fast half %x0, %x1 %r1 = fadd fast half %r0, %x2 %r2 = fadd fast half %r1, %x3 ret half %r2 } ; Shouldn't vectorize to a reduction because there's no vfred{u,o}mul.vs define half @fmul_4xf16(ptr %p) { ; CHECK-LABEL: @fmul_4xf16( ; CHECK-NEXT: [[X0:%.*]] = load half, ptr [[P:%.*]], align 2 ; CHECK-NEXT: [[P1:%.*]] = getelementptr half, ptr [[P]], i32 1 ; CHECK-NEXT: [[X1:%.*]] = load half, ptr [[P1]], align 2 ; CHECK-NEXT: [[P2:%.*]] = getelementptr half, ptr [[P]], i32 2 ; CHECK-NEXT: [[X2:%.*]] = load half, ptr [[P2]], align 2 ; CHECK-NEXT: [[P3:%.*]] = getelementptr half, ptr [[P]], i32 3 ; CHECK-NEXT: [[X3:%.*]] = load half, ptr [[P3]], align 2 ; CHECK-NEXT: [[R0:%.*]] = fmul fast half [[X0]], [[X1]] ; CHECK-NEXT: [[R1:%.*]] = fmul fast half [[R0]], [[X2]] ; CHECK-NEXT: [[R2:%.*]] = fmul fast half [[R1]], [[X3]] ; CHECK-NEXT: ret half [[R2]] ; %x0 = load half, ptr %p %p1 = getelementptr half, ptr %p, i32 1 %x1 = load half, ptr %p1 %p2 = getelementptr half, ptr %p, i32 2 %x2 = load half, ptr %p2 %p3 = getelementptr half, ptr %p, i32 3 %x3 = load half, ptr %p3 %r0 = fmul fast half %x0, %x1 %r1 = fmul fast half %r0, %x2 %r2 = fmul fast half %r1, %x3 ret half %r2 }