xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/SystemZ/reorder-same-node.ll (revision b04dd5d187306df9cc7e53ec5a84c1324be63eb8)
1*b04dd5d1SAlexey Bataev; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
2*b04dd5d1SAlexey Bataev; RUN: opt -S --passes=slp-vectorizer -mtriple=s390x-unknown-linux -mcpu=z16 < %s | FileCheck %s
3*b04dd5d1SAlexey Bataev
4*b04dd5d1SAlexey Bataevdefine void @test() {
5*b04dd5d1SAlexey Bataev; CHECK-LABEL: define void @test(
6*b04dd5d1SAlexey Bataev; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
7*b04dd5d1SAlexey Bataev; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vector.reduce.xor.v8i64(<8 x i64> zeroinitializer)
8*b04dd5d1SAlexey Bataev; CHECK-NEXT:    store i64 [[TMP1]], ptr null, align 8
9*b04dd5d1SAlexey Bataev; CHECK-NEXT:    ret void
10*b04dd5d1SAlexey Bataev;
11*b04dd5d1SAlexey Bataev  %1 = zext i8 0 to i32
12*b04dd5d1SAlexey Bataev  %2 = lshr i32 0, %1
13*b04dd5d1SAlexey Bataev  %3 = icmp ult i32 %2, 0
14*b04dd5d1SAlexey Bataev  %4 = shl i32 0, %1
15*b04dd5d1SAlexey Bataev  %5 = and i32 %4, 0
16*b04dd5d1SAlexey Bataev  %narrow = select i1 %3, i32 0, i32 %5
17*b04dd5d1SAlexey Bataev  %6 = zext i32 %narrow to i64
18*b04dd5d1SAlexey Bataev  %7 = zext i8 0 to i32
19*b04dd5d1SAlexey Bataev  %8 = lshr i32 0, %7
20*b04dd5d1SAlexey Bataev  %9 = icmp ult i32 %8, 0
21*b04dd5d1SAlexey Bataev  %10 = shl i32 0, %7
22*b04dd5d1SAlexey Bataev  %11 = and i32 %10, 0
23*b04dd5d1SAlexey Bataev  %narrow.1 = select i1 %9, i32 0, i32 %11
24*b04dd5d1SAlexey Bataev  %12 = zext i32 %narrow.1 to i64
25*b04dd5d1SAlexey Bataev  %13 = xor i64 %6, %12
26*b04dd5d1SAlexey Bataev  %14 = zext i8 0 to i32
27*b04dd5d1SAlexey Bataev  %15 = lshr i32 0, %14
28*b04dd5d1SAlexey Bataev  %16 = icmp ult i32 %15, 0
29*b04dd5d1SAlexey Bataev  %17 = shl i32 0, %14
30*b04dd5d1SAlexey Bataev  %18 = and i32 %17, 0
31*b04dd5d1SAlexey Bataev  %narrow.2 = select i1 %16, i32 0, i32 %18
32*b04dd5d1SAlexey Bataev  %19 = zext i32 %narrow.2 to i64
33*b04dd5d1SAlexey Bataev  %20 = xor i64 %13, %19
34*b04dd5d1SAlexey Bataev  %21 = icmp ult i32 %8, 0
35*b04dd5d1SAlexey Bataev  %22 = shl i32 0, %7
36*b04dd5d1SAlexey Bataev  %23 = and i32 %22, 0
37*b04dd5d1SAlexey Bataev  %narrow.3 = select i1 %21, i32 0, i32 %23
38*b04dd5d1SAlexey Bataev  %24 = zext i32 %narrow.3 to i64
39*b04dd5d1SAlexey Bataev  %25 = xor i64 %20, %24
40*b04dd5d1SAlexey Bataev  %26 = icmp ult i32 %15, 0
41*b04dd5d1SAlexey Bataev  %27 = shl i32 0, %14
42*b04dd5d1SAlexey Bataev  %28 = and i32 %27, 0
43*b04dd5d1SAlexey Bataev  %narrow.4 = select i1 %26, i32 0, i32 %28
44*b04dd5d1SAlexey Bataev  %29 = zext i32 %narrow.4 to i64
45*b04dd5d1SAlexey Bataev  %30 = xor i64 %25, %29
46*b04dd5d1SAlexey Bataev  %31 = icmp ult i32 %8, 0
47*b04dd5d1SAlexey Bataev  %32 = shl i32 0, %7
48*b04dd5d1SAlexey Bataev  %33 = and i32 %32, 0
49*b04dd5d1SAlexey Bataev  %narrow.5 = select i1 %31, i32 0, i32 %33
50*b04dd5d1SAlexey Bataev  %34 = zext i32 %narrow.5 to i64
51*b04dd5d1SAlexey Bataev  %35 = xor i64 %30, %34
52*b04dd5d1SAlexey Bataev  %36 = icmp ult i32 %15, 0
53*b04dd5d1SAlexey Bataev  %37 = shl i32 0, %14
54*b04dd5d1SAlexey Bataev  %38 = and i32 %37, 0
55*b04dd5d1SAlexey Bataev  %narrow.6 = select i1 %36, i32 0, i32 %38
56*b04dd5d1SAlexey Bataev  %39 = zext i32 %narrow.6 to i64
57*b04dd5d1SAlexey Bataev  %40 = xor i64 %35, %39
58*b04dd5d1SAlexey Bataev  %41 = icmp ult i32 %8, 0
59*b04dd5d1SAlexey Bataev  %42 = shl i32 0, %7
60*b04dd5d1SAlexey Bataev  %43 = and i32 %42, 0
61*b04dd5d1SAlexey Bataev  %narrow.7 = select i1 %41, i32 0, i32 %43
62*b04dd5d1SAlexey Bataev  %44 = zext i32 %narrow.7 to i64
63*b04dd5d1SAlexey Bataev  %45 = xor i64 %40, %44
64*b04dd5d1SAlexey Bataev  store i64 %45, ptr null, align 8
65*b04dd5d1SAlexey Bataev  ret void
66*b04dd5d1SAlexey Bataev}
67*b04dd5d1SAlexey Bataev
68