xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/limit.ll (revision 15ee17c3ce34623261788d7de3c1bdf5860be34e)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: %if x86-registered-target %{ opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu | FileCheck %s %}
3; RUN: %if aarch64-registered-target %{ opt < %s -passes=slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu | FileCheck %s %}
4
5@b = common global [4 x i32] zeroinitializer, align 16
6@c = common global [4 x i32] zeroinitializer, align 16
7@d = common global [4 x i32] zeroinitializer, align 16
8@e = common global [4 x i32] zeroinitializer, align 16
9@a = common global [4 x i32] zeroinitializer, align 16
10@fb = common global [4 x float] zeroinitializer, align 16
11@fc = common global [4 x float] zeroinitializer, align 16
12@fa = common global [4 x float] zeroinitializer, align 16
13@fd = common global [4 x float] zeroinitializer, align 16
14
15define void @addsub() {
16; CHECK-LABEL: @addsub(
17; CHECK-NEXT:  entry:
18; CHECK-NEXT:    br label [[BB1:%.*]]
19; CHECK:       bb1:
20; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr @b, align 16
21; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @c, align 16
22; CHECK-NEXT:    [[TMP2:%.*]] = add nsw <4 x i32> [[TMP0]], [[TMP1]]
23; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr @d, align 16
24; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, ptr @e, align 16
25; CHECK-NEXT:    [[TMP5:%.*]] = add nsw <4 x i32> [[TMP3]], [[TMP4]]
26; CHECK-NEXT:    [[TMP6:%.*]] = add nsw <4 x i32> [[TMP2]], [[TMP5]]
27; CHECK-NEXT:    [[TMP7:%.*]] = sub nsw <4 x i32> [[TMP2]], [[TMP5]]
28; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> [[TMP7]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
29; CHECK-NEXT:    store <4 x i32> [[TMP8]], ptr @a, align 16
30; CHECK-NEXT:    ret void
31;
32entry:
33  br label %bb1
34
35bb1:
36  %0 = load i32, ptr @b, align 16
37  %1 = load i32, ptr @c, align 16
38  %add = add nsw i32 %0, %1
39  %2 = load i32, ptr @d, align 16
40  %3 = load i32, ptr @e, align 16
41  %add1 = add nsw i32 %2, %3
42  %add2 = add nsw i32 %add, %add1
43  store i32 %add2, ptr @a, align 16
44  %4 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @b, i64 0, i64 1), align 4
45  %5 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @c, i64 0, i64 1), align 4
46  %add3 = add nsw i32 %4, %5
47  %6 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @d, i64 0, i64 1), align 4
48  %7 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @e, i64 0, i64 1), align 4
49  %add4 = add nsw i32 %6, %7
50  %sub = sub nsw i32 %add3, %add4
51  store i32 %sub, ptr getelementptr inbounds ([4 x i32], ptr @a, i64 0, i64 1), align 4
52  %8 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @b, i64 0, i64 2), align 8
53  %9 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @c, i64 0, i64 2), align 8
54  %add5 = add nsw i32 %8, %9
55  %10 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @d, i64 0, i64 2), align 8
56  %11 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @e, i64 0, i64 2), align 8
57  %add6 = add nsw i32 %10, %11
58  %add7 = add nsw i32 %add5, %add6
59  store i32 %add7, ptr getelementptr inbounds ([4 x i32], ptr @a, i64 0, i64 2), align 8
60  %12 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @b, i64 0, i64 3), align 4
61  %13 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @c, i64 0, i64 3), align 4
62  %add8 = add nsw i32 %12, %13
63  %14 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @d, i64 0, i64 3), align 4
64  %15 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @e, i64 0, i64 3), align 4
65  %add9 = add nsw i32 %14, %15
66  %sub10 = sub nsw i32 %add8, %add9
67  store i32 %sub10, ptr getelementptr inbounds ([4 x i32], ptr @a, i64 0, i64 3), align 4
68  ret void
69}
70