xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/X86/reduction.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=slp-vectorizer,dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
3
4target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
5target triple = "i386-apple-macosx10.8.0"
6
7; int foo(ptr A, int n, int m) {
8;   double sum = 0, v1 = 2, v0 = 3;
9;   for (int i=0; i < n; ++i)
10;     sum += 7*A[i*2] + 7*A[i*2+1];
11;   return sum;
12; }
13
14define i32 @reduce(ptr nocapture %A, i32 %n, i32 %m) {
15; CHECK-LABEL: @reduce(
16; CHECK-NEXT:  entry:
17; CHECK-NEXT:    [[CMP13:%.*]] = icmp sgt i32 [[N:%.*]], 0
18; CHECK-NEXT:    br i1 [[CMP13]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
19; CHECK:       for.body:
20; CHECK-NEXT:    [[I_015:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
21; CHECK-NEXT:    [[SUM_014:%.*]] = phi double [ [[ADD6:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[ENTRY]] ]
22; CHECK-NEXT:    [[MUL:%.*]] = shl nsw i32 [[I_015]], 1
23; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i32 [[MUL]]
24; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 4
25; CHECK-NEXT:    [[TMP1:%.*]] = fmul <2 x double> [[TMP0]], splat (double 7.000000e+00)
26; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0
27; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x double> [[TMP1]], i32 1
28; CHECK-NEXT:    [[ADD5:%.*]] = fadd double [[TMP2]], [[TMP3]]
29; CHECK-NEXT:    [[ADD6]] = fadd double [[SUM_014]], [[ADD5]]
30; CHECK-NEXT:    [[INC]] = add nsw i32 [[I_015]], 1
31; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
32; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]], label [[FOR_BODY]]
33; CHECK:       for.cond.for.end_crit_edge:
34; CHECK-NEXT:    [[PHITMP:%.*]] = fptosi double [[ADD6]] to i32
35; CHECK-NEXT:    br label [[FOR_END]]
36; CHECK:       for.end:
37; CHECK-NEXT:    [[SUM_0_LCSSA:%.*]] = phi i32 [ [[PHITMP]], [[FOR_COND_FOR_END_CRIT_EDGE]] ], [ 0, [[ENTRY]] ]
38; CHECK-NEXT:    ret i32 [[SUM_0_LCSSA]]
39;
40entry:
41  %cmp13 = icmp sgt i32 %n, 0
42  br i1 %cmp13, label %for.body, label %for.end
43
44for.body:                                         ; preds = %entry, %for.body
45  %i.015 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
46  %sum.014 = phi double [ %add6, %for.body ], [ 0.000000e+00, %entry ]
47  %mul = shl nsw i32 %i.015, 1
48  %arrayidx = getelementptr inbounds double, ptr %A, i32 %mul
49  %0 = load double, ptr %arrayidx, align 4
50  %mul1 = fmul double %0, 7.000000e+00
51  %add12 = or disjoint i32 %mul, 1
52  %arrayidx3 = getelementptr inbounds double, ptr %A, i32 %add12
53  %1 = load double, ptr %arrayidx3, align 4
54  %mul4 = fmul double %1, 7.000000e+00
55  %add5 = fadd double %mul1, %mul4
56  %add6 = fadd double %sum.014, %add5
57  %inc = add nsw i32 %i.015, 1
58  %exitcond = icmp eq i32 %inc, %n
59  br i1 %exitcond, label %for.cond.for.end_crit_edge, label %for.body
60
61for.cond.for.end_crit_edge:                       ; preds = %for.body
62  %phitmp = fptosi double %add6 to i32
63  br label %for.end
64
65for.end:                                          ; preds = %for.cond.for.end_crit_edge, %entry
66  %sum.0.lcssa = phi i32 [ %phitmp, %for.cond.for.end_crit_edge ], [ 0, %entry ]
67  ret i32 %sum.0.lcssa
68}
69
70; PR43948 - https://bugs.llvm.org/show_bug.cgi?id=43948
71; The extra use of a non-vectorized element of a reduction must not be killed.
72
73define i32 @horiz_max_multiple_uses(ptr %x, ptr %p) {
74; CHECK-LABEL: @horiz_max_multiple_uses(
75; CHECK-NEXT:    [[X4:%.*]] = getelementptr [32 x i32], ptr [[X:%.*]], i64 0, i64 4
76; CHECK-NEXT:    [[X5:%.*]] = getelementptr [32 x i32], ptr [[X]], i64 0, i64 5
77; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[X]], align 4
78; CHECK-NEXT:    [[T4:%.*]] = load i32, ptr [[X4]], align 4
79; CHECK-NEXT:    [[T5:%.*]] = load i32, ptr [[X5]], align 4
80; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP1]])
81; CHECK-NEXT:    [[MAX_ROOT_CMP:%.*]] = icmp sgt i32 [[TMP2]], [[T4]]
82; CHECK-NEXT:    [[MAX_ROOT_SEL:%.*]] = select i1 [[MAX_ROOT_CMP]], i32 [[TMP2]], i32 [[T4]]
83; CHECK-NEXT:    [[C012345:%.*]] = icmp sgt i32 [[MAX_ROOT_SEL]], [[T5]]
84; CHECK-NEXT:    [[T17:%.*]] = select i1 [[C012345]], i32 [[MAX_ROOT_SEL]], i32 [[T5]]
85; CHECK-NEXT:    [[THREE_OR_FOUR:%.*]] = select i1 [[MAX_ROOT_CMP]], i32 3, i32 4
86; CHECK-NEXT:    store i32 [[THREE_OR_FOUR]], ptr [[P:%.*]], align 8
87; CHECK-NEXT:    ret i32 [[T17]]
88;
89  %x1 = getelementptr [32 x i32], ptr %x, i64 0, i64 1
90  %x2 = getelementptr [32 x i32], ptr %x, i64 0, i64 2
91  %x3 = getelementptr [32 x i32], ptr %x, i64 0, i64 3
92  %x4 = getelementptr [32 x i32], ptr %x, i64 0, i64 4
93  %x5 = getelementptr [32 x i32], ptr %x, i64 0, i64 5
94
95  %t0 = load i32, ptr %x
96  %t1 = load i32, ptr %x1
97  %t2 = load i32, ptr %x2
98  %t3 = load i32, ptr %x3
99  %t4 = load i32, ptr %x4
100  %t5 = load i32, ptr %x5
101
102  %c01 = icmp sgt i32 %t0, %t1
103  %s5 = select i1 %c01, i32 %t0, i32 %t1
104  %c012 = icmp sgt i32 %s5, %t2
105  %t8 = select i1 %c012, i32 %s5, i32 %t2
106  %c0123 = icmp sgt i32 %t8, %t3
107  %rdx4 = select i1 %c0123, i32 %t8, i32 %t3
108  %MAX_ROOT_CMP = icmp sgt i32 %rdx4, %t4
109  %MAX_ROOT_SEL = select i1 %MAX_ROOT_CMP, i32 %rdx4, i32 %t4
110  %c012345 = icmp sgt i32 %MAX_ROOT_SEL, %t5
111  %t17 = select i1 %c012345, i32 %MAX_ROOT_SEL, i32 %t5
112  %three_or_four = select i1 %MAX_ROOT_CMP, i32 3, i32 4
113  store i32 %three_or_four, ptr %p, align 8
114  ret i32 %t17
115}
116
117; This is a miscompile (see the undef operand) and/or test for invalid IR.
118
119define i1 @bad_insertpoint_rdx(ptr %p) #0 {
120; CHECK-LABEL: @bad_insertpoint_rdx(
121; CHECK-NEXT:    [[T0:%.*]] = load i32, ptr [[P:%.*]], align 16
122; CHECK-NEXT:    [[CMP23:%.*]] = icmp sgt i32 [[T0]], 0
123; CHECK-NEXT:    [[SPEC_SELECT:%.*]] = select i1 [[CMP23]], i32 [[T0]], i32 0
124; CHECK-NEXT:    [[ARRAYIDX22_1:%.*]] = getelementptr inbounds [8 x i32], ptr [[P]], i64 0, i64 1
125; CHECK-NEXT:    [[T1:%.*]] = load i32, ptr [[ARRAYIDX22_1]], align 4
126; CHECK-NEXT:    [[CMP23_1:%.*]] = icmp sgt i32 [[T1]], [[SPEC_SELECT]]
127; CHECK-NEXT:    [[SPEC_STORE_SELECT87:%.*]] = zext i1 [[CMP23_1]] to i32
128; CHECK-NEXT:    [[SPEC_SELECT88:%.*]] = select i1 [[CMP23_1]], i32 [[T1]], i32 [[SPEC_SELECT]]
129; CHECK-NEXT:    [[CMP23_2:%.*]] = icmp sgt i32 [[SPEC_STORE_SELECT87]], [[SPEC_SELECT88]]
130; CHECK-NEXT:    ret i1 [[CMP23_2]]
131;
132  %t0 = load i32, ptr %p, align 16
133  %cmp23 = icmp sgt i32 %t0, 0
134  %spec.select = select i1 %cmp23, i32 %t0, i32 0
135  %arrayidx22.1 = getelementptr inbounds [8 x i32], ptr %p, i64 0, i64 1
136  %t1 = load i32, ptr %arrayidx22.1, align 4
137  %cmp23.1 = icmp sgt i32 %t1, %spec.select
138  %spec.store.select87 = zext i1 %cmp23.1 to i32
139  %spec.select88 = select i1 %cmp23.1, i32 %t1, i32 %spec.select
140  %cmp23.2 = icmp sgt i32 %spec.store.select87, %spec.select88
141  ret i1 %cmp23.2
142}
143