xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/same-scalars-reordered-in-reduction.ll (revision 15ee17c3ce34623261788d7de3c1bdf5860be34e)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
2; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
3; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
4
5define i32 @test() {
6; CHECK-LABEL: define i32 @test() {
7; CHECK-NEXT:  entry:
8; CHECK-NEXT:    [[SQ:%.*]] = alloca [64 x i32], i32 0, align 16
9; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [64 x i32], ptr [[SQ]], i64 0, i64 1
10; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
11; CHECK-NEXT:    [[TMP2:%.*]] = mul <4 x i32> [[TMP1]], <i32 2, i32 3, i32 2, i32 1>
12; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
13; CHECK-NEXT:    ret i32 [[TMP3]]
14;
15entry:
16  %sq = alloca [64 x i32], i32 0, align 16
17  %0 = getelementptr inbounds [64 x i32], ptr %sq, i64 0, i64 1
18  %elt_1 = load i32, ptr %0, align 4
19  %1 = getelementptr [64 x i32], ptr %sq, i64 0, i64 2
20  %elt_2 = load i32, ptr %1, align 8
21  %2 = getelementptr [64 x i32], ptr %sq, i64 0, i64 3
22  %elt_3 = load i32, ptr %2, align 4
23  %3 = getelementptr [64 x i32], ptr %sq, i64 0, i64 4
24  %elt_4 = load i32, ptr %3, align 16
25
26  %4 = add i32 %elt_2, %elt_3
27  %5 = add i32 %4, %elt_2
28  %6 = add i32 %5, %elt_1
29  %7 = add i32 %6, %elt_4
30  %8 = add i32 %7, %elt_3
31  %9 = add i32 %8, %elt_2
32  %10 = add i32 %9, %elt_1
33
34  ret i32 %10
35}
36