xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/X86/cycle_dup.ll (revision 580210a0c938531ef9fd79f9ffedb93eeb2e66c2)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=slp-vectorizer,dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
3
4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5target triple = "x86_64-apple-macosx10.9.0"
6
7; int foo(int *A) {
8;   int r = A[0], g = A[1], b = A[2], a = A[3];
9;   for (int i=0; i < A[13]; i++) {
10;     r*=18; g*=19; b*=12; a *=9;
11;   }
12;   A[0] = r; A[1] = g; A[2] = b; A[3] = a;
13; }
14
15define i32 @foo(ptr nocapture %A) #0 {
16; CHECK-LABEL: @foo(
17; CHECK-NEXT:  entry:
18; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[A:%.*]], align 4
19; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 13
20; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4
21; CHECK-NEXT:    [[CMP24:%.*]] = icmp sgt i32 [[TMP2]], 0
22; CHECK-NEXT:    br i1 [[CMP24]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
23; CHECK:       for.body:
24; CHECK-NEXT:    [[I_029:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
25; CHECK-NEXT:    [[TMP3:%.*]] = phi <4 x i32> [ [[TMP4:%.*]], [[FOR_BODY]] ], [ [[TMP1]], [[ENTRY]] ]
26; CHECK-NEXT:    [[TMP4]] = mul nsw <4 x i32> [[TMP3]], <i32 18, i32 19, i32 12, i32 9>
27; CHECK-NEXT:    [[INC]] = add nsw i32 [[I_029]], 1
28; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[INC]], [[TMP2]]
29; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]]
30; CHECK:       for.end:
31; CHECK-NEXT:    [[TMP5:%.*]] = phi <4 x i32> [ [[TMP1]], [[ENTRY]] ], [ [[TMP4]], [[FOR_BODY]] ]
32; CHECK-NEXT:    store <4 x i32> [[TMP5]], ptr [[A]], align 4
33; CHECK-NEXT:    ret i32 undef
34;
35entry:
36  %0 = load i32, ptr %A, align 4
37  %arrayidx1 = getelementptr inbounds i32, ptr %A, i64 1
38  %1 = load i32, ptr %arrayidx1, align 4
39  %arrayidx2 = getelementptr inbounds i32, ptr %A, i64 2
40  %2 = load i32, ptr %arrayidx2, align 4
41  %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 3
42  %3 = load i32, ptr %arrayidx3, align 4
43  %arrayidx4 = getelementptr inbounds i32, ptr %A, i64 13
44  %4 = load i32, ptr %arrayidx4, align 4
45  %cmp24 = icmp sgt i32 %4, 0
46  br i1 %cmp24, label %for.body, label %for.end
47
48for.body:                                         ; preds = %entry, %for.body
49  %i.029 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
50  %a.028 = phi i32 [ %mul7, %for.body ], [ %3, %entry ]
51  %b.027 = phi i32 [ %mul6, %for.body ], [ %2, %entry ]
52  %g.026 = phi i32 [ %mul5, %for.body ], [ %1, %entry ]
53  %r.025 = phi i32 [ %mul, %for.body ], [ %0, %entry ]
54  %mul = mul nsw i32 %r.025, 18
55  %mul5 = mul nsw i32 %g.026, 19
56  %mul6 = mul nsw i32 %b.027, 12
57  %mul7 = mul nsw i32 %a.028, 9
58  %inc = add nsw i32 %i.029, 1
59  %cmp = icmp slt i32 %inc, %4
60  br i1 %cmp, label %for.body, label %for.end
61
62for.end:                                          ; preds = %for.body, %entry
63  %a.0.lcssa = phi i32 [ %3, %entry ], [ %mul7, %for.body ]
64  %b.0.lcssa = phi i32 [ %2, %entry ], [ %mul6, %for.body ]
65  %g.0.lcssa = phi i32 [ %1, %entry ], [ %mul5, %for.body ]
66  %r.0.lcssa = phi i32 [ %0, %entry ], [ %mul, %for.body ]
67  store i32 %r.0.lcssa, ptr %A, align 4
68  store i32 %g.0.lcssa, ptr %arrayidx1, align 4
69  store i32 %b.0.lcssa, ptr %arrayidx2, align 4
70  store i32 %a.0.lcssa, ptr %arrayidx3, align 4
71  ret i32 undef
72}
73
74
75