1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -mtriple=x86_64-unknown-linux -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE2 3; RUN: opt < %s -mtriple=x86_64-unknown-linux -mcpu=x86-64-v2 -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE4 4; RUN: opt < %s -mtriple=x86_64-unknown-linux -mcpu=corei7-avx -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX 5; RUN: opt < %s -mtriple=x86_64-unknown-linux -mcpu=core-avx2 -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX 6 7@arr = local_unnamed_addr global [32 x i32] zeroinitializer, align 16 8 9declare i32 @llvm.smax.i32(i32, i32) 10 11define i32 @smax_v2i32(i32) { 12; CHECK-LABEL: @smax_v2i32( 13; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @arr, align 16 14; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 1), align 4 15; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP2]], i32 [[TMP3]]) 16; CHECK-NEXT: ret i32 [[TMP4]] 17; 18 %2 = load i32, ptr @arr, align 16 19 %3 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 1), align 4 20 %4 = call i32 @llvm.smax.i32(i32 %2, i32 %3) 21 ret i32 %4 22} 23 24define i32 @smax_v4i32(i32) { 25; SSE2-LABEL: @smax_v4i32( 26; SSE2-NEXT: [[TMP2:%.*]] = load i32, ptr @arr, align 16 27; SSE2-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 1), align 4 28; SSE2-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 2), align 8 29; SSE2-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 3), align 4 30; SSE2-NEXT: [[TMP6:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP2]], i32 [[TMP3]]) 31; SSE2-NEXT: [[TMP7:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP6]], i32 [[TMP4]]) 32; SSE2-NEXT: [[TMP8:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP7]], i32 [[TMP5]]) 33; SSE2-NEXT: ret i32 [[TMP8]] 34; 35; SSE4-LABEL: @smax_v4i32( 36; SSE4-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @arr, align 16 37; SSE4-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP2]]) 38; SSE4-NEXT: ret i32 [[TMP3]] 39; 40; AVX-LABEL: @smax_v4i32( 41; AVX-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @arr, align 16 42; AVX-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP2]]) 43; AVX-NEXT: ret i32 [[TMP3]] 44; 45 %2 = load i32, ptr @arr, align 16 46 %3 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 1), align 4 47 %4 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 2), align 8 48 %5 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 3), align 4 49 %6 = call i32 @llvm.smax.i32(i32 %2, i32 %3) 50 %7 = call i32 @llvm.smax.i32(i32 %6, i32 %4) 51 %8 = call i32 @llvm.smax.i32(i32 %7, i32 %5) 52 ret i32 %8 53} 54 55define i32 @smax_v8i32(i32) { 56; CHECK-LABEL: @smax_v8i32( 57; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @arr, align 16 58; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> [[TMP2]]) 59; CHECK-NEXT: ret i32 [[TMP3]] 60; 61 %2 = load i32, ptr @arr, align 16 62 %3 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 1), align 4 63 %4 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 2), align 8 64 %5 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 3), align 4 65 %6 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 4), align 16 66 %7 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 5), align 4 67 %8 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 6), align 8 68 %9 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 7), align 4 69 %10 = call i32 @llvm.smax.i32(i32 %2, i32 %3) 70 %11 = call i32 @llvm.smax.i32(i32 %10, i32 %4) 71 %12 = call i32 @llvm.smax.i32(i32 %11, i32 %5) 72 %13 = call i32 @llvm.smax.i32(i32 %12, i32 %6) 73 %14 = call i32 @llvm.smax.i32(i32 %13, i32 %7) 74 %15 = call i32 @llvm.smax.i32(i32 %14, i32 %8) 75 %16 = call i32 @llvm.smax.i32(i32 %15, i32 %9) 76 ret i32 %16 77} 78 79define i32 @smax_v16i32(i32) { 80; CHECK-LABEL: @smax_v16i32( 81; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @arr, align 16 82; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> [[TMP2]]) 83; CHECK-NEXT: ret i32 [[TMP3]] 84; 85 %2 = load i32, ptr @arr, align 16 86 %3 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 1), align 4 87 %4 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 2), align 8 88 %5 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 3), align 4 89 %6 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 4), align 16 90 %7 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 5), align 4 91 %8 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 6), align 8 92 %9 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 7), align 4 93 %10 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 8), align 16 94 %11 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 9), align 4 95 %12 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 10), align 8 96 %13 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 11), align 4 97 %14 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 12), align 16 98 %15 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 13), align 4 99 %16 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 14), align 8 100 %17 = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 15), align 4 101 %18 = call i32 @llvm.smax.i32(i32 %2, i32 %3) 102 %19 = call i32 @llvm.smax.i32(i32 %18, i32 %4) 103 %20 = call i32 @llvm.smax.i32(i32 %19, i32 %5) 104 %21 = call i32 @llvm.smax.i32(i32 %20, i32 %6) 105 %22 = call i32 @llvm.smax.i32(i32 %21, i32 %7) 106 %23 = call i32 @llvm.smax.i32(i32 %22, i32 %8) 107 %24 = call i32 @llvm.smax.i32(i32 %23, i32 %9) 108 %25 = call i32 @llvm.smax.i32(i32 %24, i32 %10) 109 %26 = call i32 @llvm.smax.i32(i32 %25, i32 %11) 110 %27 = call i32 @llvm.smax.i32(i32 %26, i32 %12) 111 %28 = call i32 @llvm.smax.i32(i32 %27, i32 %13) 112 %29 = call i32 @llvm.smax.i32(i32 %28, i32 %14) 113 %30 = call i32 @llvm.smax.i32(i32 %29, i32 %15) 114 %31 = call i32 @llvm.smax.i32(i32 %30, i32 %16) 115 %32 = call i32 @llvm.smax.i32(i32 %31, i32 %17) 116 ret i32 %32 117} 118