xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/reduction-modified-values.ll (revision 7773243d9916f98ba0ffce0c3a960e4aa9f03e81)
1*7773243dSElvina Yakubova; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2*7773243dSElvina Yakubova; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
3*7773243dSElvina Yakubova; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
4*7773243dSElvina Yakubova
5*7773243dSElvina Yakubovadefine i32 @test() {
6*7773243dSElvina Yakubova; CHECK-LABEL: @test(
7*7773243dSElvina Yakubova; CHECK-NEXT:  bb:
8*7773243dSElvina Yakubova; CHECK-NEXT:    [[TMP0:%.*]] = shufflevector <4 x i32> zeroinitializer, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
9*7773243dSElvina Yakubova; CHECK-NEXT:    [[TMP1:%.*]] = or <4 x i32> [[TMP0]], zeroinitializer
10*7773243dSElvina Yakubova; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP1]])
11*7773243dSElvina Yakubova; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP0]])
12*7773243dSElvina Yakubova; CHECK-NEXT:    [[OP_RDX:%.*]] = add i32 [[TMP2]], [[TMP3]]
13*7773243dSElvina Yakubova; CHECK-NEXT:    ret i32 [[OP_RDX]]
14*7773243dSElvina Yakubova;
15*7773243dSElvina Yakubovabb:
16*7773243dSElvina Yakubova  %0 = shufflevector <4 x i32> zeroinitializer, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
17*7773243dSElvina Yakubova  %1 = extractelement <4 x i32> %0, i32 3
18*7773243dSElvina Yakubova  %2 = extractelement <4 x i32> %0, i32 2
19*7773243dSElvina Yakubova  %3 = extractelement <4 x i32> %0, i32 1
20*7773243dSElvina Yakubova  %4 = extractelement <4 x i32> %0, i32 0
21*7773243dSElvina Yakubova  %inst514 = or i32 %4, 0
22*7773243dSElvina Yakubova  %inst494 = or i32 %3, 0
23*7773243dSElvina Yakubova  %inst474 = or i32 %2, 0
24*7773243dSElvina Yakubova  %inst454 = or i32 %1, 0
25*7773243dSElvina Yakubova  %inst458 = add i32 %1, %inst454
26*7773243dSElvina Yakubova  %inst477 = add i32 %inst458, %2
27*7773243dSElvina Yakubova  %inst478 = add i32 %inst477, %inst474
28*7773243dSElvina Yakubova  %inst497 = add i32 %inst478, %3
29*7773243dSElvina Yakubova  %inst498 = add i32 %inst497, %inst494
30*7773243dSElvina Yakubova  %inst517 = add i32 %inst498, %4
31*7773243dSElvina Yakubova  %inst518 = add i32 %inst517, %inst514
32*7773243dSElvina Yakubova  ret i32 %inst518
33*7773243dSElvina Yakubova}
34