xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=slp-vectorizer,dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
3
4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5target triple = "x86_64-apple-macosx10.7.0"
6
7@.str = private unnamed_addr constant [6 x i8] c"bingo\00", align 1
8
9define void @reduce_compare(ptr nocapture %A, i32 %n) {
10; CHECK-LABEL: @reduce_compare(
11; CHECK-NEXT:  entry:
12; CHECK-NEXT:    [[CONV:%.*]] = sitofp i32 [[N:%.*]] to double
13; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> poison, double [[CONV]], i32 0
14; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> poison, <2 x i32> zeroinitializer
15; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
16; CHECK:       for.body:
17; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
18; CHECK-NEXT:    [[TMP2:%.*]] = shl nsw i64 [[INDVARS_IV]], 1
19; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[TMP2]]
20; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
21; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> [[TMP1]], [[TMP3]]
22; CHECK-NEXT:    [[TMP5:%.*]] = fmul <2 x double> [[TMP4]], <double 7.000000e+00, double 4.000000e+00>
23; CHECK-NEXT:    [[TMP6:%.*]] = fadd <2 x double> [[TMP5]], <double 5.000000e+00, double 9.000000e+00>
24; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <2 x double> [[TMP6]], i32 0
25; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <2 x double> [[TMP6]], i32 1
26; CHECK-NEXT:    [[CMP11:%.*]] = fcmp ogt double [[TMP7]], [[TMP8]]
27; CHECK-NEXT:    br i1 [[CMP11]], label [[IF_THEN:%.*]], label [[FOR_INC]]
28; CHECK:       if.then:
29; CHECK-NEXT:    [[CALL:%.*]] = tail call i32 (ptr, ...) @printf(ptr @.str)
30; CHECK-NEXT:    br label [[FOR_INC]]
31; CHECK:       for.inc:
32; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
33; CHECK-NEXT:    [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
34; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 100
35; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
36; CHECK:       for.end:
37; CHECK-NEXT:    ret void
38;
39entry:
40  %conv = sitofp i32 %n to double
41  br label %for.body
42
43for.body:                                         ; preds = %for.inc, %entry
44  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
45  %0 = shl nsw i64 %indvars.iv, 1
46  %arrayidx = getelementptr inbounds double, ptr %A, i64 %0
47  %1 = load double, ptr %arrayidx, align 8
48  %mul1 = fmul double %conv, %1
49  %mul2 = fmul double %mul1, 7.000000e+00
50  %add = fadd double %mul2, 5.000000e+00
51  %2 = or disjoint i64 %0, 1
52  %arrayidx6 = getelementptr inbounds double, ptr %A, i64 %2
53  %3 = load double, ptr %arrayidx6, align 8
54  %mul8 = fmul double %conv, %3
55  %mul9 = fmul double %mul8, 4.000000e+00
56  %add10 = fadd double %mul9, 9.000000e+00
57  %cmp11 = fcmp ogt double %add, %add10
58  br i1 %cmp11, label %if.then, label %for.inc
59
60if.then:                                          ; preds = %for.body
61  %call = tail call i32 (ptr, ...) @printf(ptr @.str)
62  br label %for.inc
63
64for.inc:                                          ; preds = %for.body, %if.then
65  %indvars.iv.next = add i64 %indvars.iv, 1
66  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
67  %exitcond = icmp eq i32 %lftr.wideiv, 100
68  br i1 %exitcond, label %for.end, label %for.body
69
70for.end:                                          ; preds = %for.inc
71  ret void
72}
73
74declare i32 @printf(ptr nocapture, ...)
75
76; PR41312 - the order of the reduction ops should not prevent forming a reduction.
77; The 'wrong' member of the reduction requires a greater cost if grouped with the
78; other candidates in the reduction because it does not have matching predicate
79; and/or constant operand.
80
81define float @merge_anyof_v4f32_wrong_first(<4 x float> %x) {
82; CHECK-LABEL: @merge_anyof_v4f32_wrong_first(
83; CHECK-NEXT:    [[X3:%.*]] = extractelement <4 x float> [[X:%.*]], i32 3
84; CHECK-NEXT:    [[CMP3WRONG:%.*]] = fcmp olt float [[X3]], 4.200000e+01
85; CHECK-NEXT:    [[TMP1:%.*]] = fcmp ogt <4 x float> [[X]], splat (float 1.000000e+00)
86; CHECK-NEXT:    [[TMP2:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP1]])
87; CHECK-NEXT:    [[OP_RDX:%.*]] = or i1 [[TMP2]], [[CMP3WRONG]]
88; CHECK-NEXT:    [[R:%.*]] = select i1 [[OP_RDX]], float -1.000000e+00, float 1.000000e+00
89; CHECK-NEXT:    ret float [[R]]
90;
91  %x0 = extractelement <4 x float> %x, i32 0
92  %x1 = extractelement <4 x float> %x, i32 1
93  %x2 = extractelement <4 x float> %x, i32 2
94  %x3 = extractelement <4 x float> %x, i32 3
95  %cmp3wrong = fcmp olt float %x3, 42.0
96  %cmp0 = fcmp ogt float %x0, 1.0
97  %cmp1 = fcmp ogt float %x1, 1.0
98  %cmp2 = fcmp ogt float %x2, 1.0
99  %cmp3 = fcmp ogt float %x3, 1.0
100  %or03 = or i1 %cmp0, %cmp3wrong
101  %or031 = or i1 %or03, %cmp1
102  %or0312 = or i1 %or031, %cmp2
103  %or03123 = or i1 %or0312, %cmp3
104  %r = select i1 %or03123, float -1.0, float 1.0
105  ret float %r
106}
107
108define float @merge_anyof_v4f32_wrong_last(<4 x float> %x) {
109; CHECK-LABEL: @merge_anyof_v4f32_wrong_last(
110; CHECK-NEXT:    [[X3:%.*]] = extractelement <4 x float> [[X:%.*]], i32 3
111; CHECK-NEXT:    [[CMP3WRONG:%.*]] = fcmp olt float [[X3]], 4.200000e+01
112; CHECK-NEXT:    [[TMP1:%.*]] = fcmp ogt <4 x float> [[X]], splat (float 1.000000e+00)
113; CHECK-NEXT:    [[TMP2:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP1]])
114; CHECK-NEXT:    [[OP_RDX:%.*]] = or i1 [[TMP2]], [[CMP3WRONG]]
115; CHECK-NEXT:    [[R:%.*]] = select i1 [[OP_RDX]], float -1.000000e+00, float 1.000000e+00
116; CHECK-NEXT:    ret float [[R]]
117;
118  %x0 = extractelement <4 x float> %x, i32 0
119  %x1 = extractelement <4 x float> %x, i32 1
120  %x2 = extractelement <4 x float> %x, i32 2
121  %x3 = extractelement <4 x float> %x, i32 3
122  %cmp3wrong = fcmp olt float %x3, 42.0
123  %cmp0 = fcmp ogt float %x0, 1.0
124  %cmp1 = fcmp ogt float %x1, 1.0
125  %cmp2 = fcmp ogt float %x2, 1.0
126  %cmp3 = fcmp ogt float %x3, 1.0
127  %or03 = or i1 %cmp0, %cmp3
128  %or031 = or i1 %or03, %cmp1
129  %or0312 = or i1 %or031, %cmp2
130  %or03123 = or i1 %or0312, %cmp3wrong
131  %r = select i1 %or03123, float -1.0, float 1.0
132  ret float %r
133}
134
135define i32 @merge_anyof_v4i32_wrong_middle(<4 x i32> %x) {
136; CHECK-LABEL: @merge_anyof_v4i32_wrong_middle(
137; CHECK-NEXT:    [[X3:%.*]] = extractelement <4 x i32> [[X:%.*]], i32 3
138; CHECK-NEXT:    [[CMP3WRONG:%.*]] = icmp slt i32 [[X3]], 42
139; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt <4 x i32> [[X]], splat (i32 1)
140; CHECK-NEXT:    [[TMP2:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP1]])
141; CHECK-NEXT:    [[OP_RDX:%.*]] = or i1 [[TMP2]], [[CMP3WRONG]]
142; CHECK-NEXT:    [[R:%.*]] = select i1 [[OP_RDX]], i32 -1, i32 1
143; CHECK-NEXT:    ret i32 [[R]]
144;
145  %x0 = extractelement <4 x i32> %x, i32 0
146  %x1 = extractelement <4 x i32> %x, i32 1
147  %x2 = extractelement <4 x i32> %x, i32 2
148  %x3 = extractelement <4 x i32> %x, i32 3
149  %cmp3wrong = icmp slt i32 %x3, 42
150  %cmp0 = icmp sgt i32 %x0, 1
151  %cmp1 = icmp sgt i32 %x1, 1
152  %cmp2 = icmp sgt i32 %x2, 1
153  %cmp3 = icmp sgt i32 %x3, 1
154  %or03 = or i1 %cmp0, %cmp3
155  %or033 = or i1 %or03, %cmp3wrong
156  %or0332 = or i1 %or033, %cmp2
157  %or03321 = or i1 %or0332, %cmp1
158  %r = select i1 %or03321, i32 -1, i32 1
159  ret i32 %r
160}
161
162; Operand/predicate swapping allows forming a reduction, but the
163; ideal reduction groups all of the original 'sgt' ops together.
164
165define i32 @merge_anyof_v4i32_wrong_middle_better_rdx(<4 x i32> %x, <4 x i32> %y) {
166; CHECK-LABEL: @merge_anyof_v4i32_wrong_middle_better_rdx(
167; CHECK-NEXT:    [[X3:%.*]] = extractelement <4 x i32> [[X:%.*]], i32 3
168; CHECK-NEXT:    [[Y3:%.*]] = extractelement <4 x i32> [[Y:%.*]], i32 3
169; CHECK-NEXT:    [[CMP3WRONG:%.*]] = icmp slt i32 [[X3]], [[Y3]]
170; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt <4 x i32> [[X]], [[Y]]
171; CHECK-NEXT:    [[TMP2:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP1]])
172; CHECK-NEXT:    [[OP_RDX:%.*]] = or i1 [[TMP2]], [[CMP3WRONG]]
173; CHECK-NEXT:    [[R:%.*]] = select i1 [[OP_RDX]], i32 -1, i32 1
174; CHECK-NEXT:    ret i32 [[R]]
175;
176  %x0 = extractelement <4 x i32> %x, i32 0
177  %x1 = extractelement <4 x i32> %x, i32 1
178  %x2 = extractelement <4 x i32> %x, i32 2
179  %x3 = extractelement <4 x i32> %x, i32 3
180  %y0 = extractelement <4 x i32> %y, i32 0
181  %y1 = extractelement <4 x i32> %y, i32 1
182  %y2 = extractelement <4 x i32> %y, i32 2
183  %y3 = extractelement <4 x i32> %y, i32 3
184  %cmp3wrong = icmp slt i32 %x3, %y3
185  %cmp0 = icmp sgt i32 %x0, %y0
186  %cmp1 = icmp sgt i32 %x1, %y1
187  %cmp2 = icmp sgt i32 %x2, %y2
188  %cmp3 = icmp sgt i32 %x3, %y3
189  %or03 = or i1 %cmp0, %cmp3
190  %or033 = or i1 %or03, %cmp3wrong
191  %or0332 = or i1 %or033, %cmp2
192  %or03321 = or i1 %or0332, %cmp1
193  %r = select i1 %or03321, i32 -1, i32 1
194  ret i32 %r
195}
196