xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/SystemZ/vec-elt-insertion.ll (revision 0ad6be1927f89cef09aa5d0fb244873f687997c9)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2; RUN: opt < %s -mtriple=s390x-unknown-linux -mcpu=z16 -S -passes=slp-vectorizer \
3; RUN:   -pass-remarks-output=%t | FileCheck %s
4; RUN: cat %t | FileCheck -check-prefix=REMARK %s
5;
6; Test functions that (at least currently) only gets vectorized if the
7; insertion cost for an element load is counted as free.
8
9declare double @llvm.fmuladd.f64(double, double, double)
10
11; This function needs the free element load to be recognized in SLP
12; getGatherCost().
13define void @fun0(ptr %0, double %1) {
14; CHECK-LABEL: define void @fun0(
15; CHECK-SAME: ptr [[TMP0:%.*]], double [[TMP1:%.*]]) #[[ATTR1:[0-9]+]] {
16; CHECK-NEXT:    [[TMP3:%.*]] = load double, ptr [[TMP0]], align 8
17; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x double> poison, double [[TMP1]], i32 0
18; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x double> [[TMP4]], double [[TMP3]], i32 1
19; CHECK-NEXT:    [[TMP6:%.*]] = fmul <2 x double> [[TMP5]], splat (double 2.000000e+00)
20; CHECK-NEXT:    [[TMP7:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[TMP6]], <2 x double> [[TMP6]], <2 x double> zeroinitializer)
21; CHECK-NEXT:    [[TMP8:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[TMP6]], <2 x double> [[TMP6]], <2 x double> [[TMP7]])
22; CHECK-NEXT:    [[TMP9:%.*]] = call <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP8]])
23; CHECK-NEXT:    [[TMP10:%.*]] = extractelement <2 x double> [[TMP9]], i32 0
24; CHECK-NEXT:    [[TMP11:%.*]] = extractelement <2 x double> [[TMP9]], i32 1
25; CHECK-NEXT:    [[TMP12:%.*]] = fadd double [[TMP10]], [[TMP11]]
26; CHECK-NEXT:    store double [[TMP12]], ptr [[TMP0]], align 8
27; CHECK-NEXT:    ret void
28;
29; REMARK-LABEL: Function: fun0
30; REMARK: Args:
31; REMARK-NEXT: - String:          'SLP vectorized with cost '
32; REMARK-NEXT: - Cost:            '-1'
33
34  %3 = fmul double %1, 2.000000e+00
35  %4 = tail call double @llvm.fmuladd.f64(double %3, double %3, double 0.000000e+00)
36  %5 = tail call double @llvm.fmuladd.f64(double %3, double %3, double %4)
37  %sqrt1 = tail call double @llvm.sqrt.f64(double %5)
38  %6 = load double, ptr %0, align 8
39  %7 = fmul double %6, 2.000000e+00
40  %8 = tail call double @llvm.fmuladd.f64(double %7, double %7, double 0.000000e+00)
41  %9 = tail call double @llvm.fmuladd.f64(double %7, double %7, double %8)
42  %sqrt = tail call double @llvm.sqrt.f64(double %9)
43  %10 = fadd double %sqrt1, %sqrt
44  store double %10, ptr %0, align 8
45  ret void
46}
47
48; This function needs the element-load to be recognized in SystemZ
49; getVectorInstrCost().
50define void @fun1(double %0) {
51; CHECK-LABEL: define void @fun1(
52; CHECK-SAME: double [[TMP0:%.*]]) #[[ATTR1]] {
53; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x double> <double 0.000000e+00, double poison>, double [[TMP0]], i32 1
54; CHECK-NEXT:    br label %[[BB3:.*]]
55; CHECK:       [[BB3]]:
56; CHECK-NEXT:    [[TMP4:%.*]] = phi <2 x double> [ <double poison, double undef>, [[TMP1:%.*]] ], [ poison, %[[BB3]] ]
57; CHECK-NEXT:    [[TMP5:%.*]] = phi <2 x double> [ zeroinitializer, [[TMP1]] ], [ poison, %[[BB3]] ]
58; CHECK-NEXT:    [[TMP6:%.*]] = phi <2 x double> [ zeroinitializer, [[TMP1]] ], [ [[TMP18:%.*]], %[[BB3]] ]
59; CHECK-NEXT:    [[TMP7:%.*]] = fsub <2 x double> zeroinitializer, [[TMP6]]
60; CHECK-NEXT:    [[TMP8:%.*]] = fsub <2 x double> zeroinitializer, [[TMP5]]
61; CHECK-NEXT:    [[TMP9:%.*]] = fsub <2 x double> zeroinitializer, [[TMP4]]
62; CHECK-NEXT:    [[TMP10:%.*]] = load double, ptr null, align 8
63; CHECK-NEXT:    [[TMP11:%.*]] = fmul <2 x double> [[TMP7]], zeroinitializer
64; CHECK-NEXT:    [[TMP12:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[TMP8]], <2 x double> [[TMP8]], <2 x double> [[TMP11]])
65; CHECK-NEXT:    [[TMP13:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[TMP9]], <2 x double> [[TMP9]], <2 x double> [[TMP12]])
66; CHECK-NEXT:    [[TMP14:%.*]] = fcmp olt <2 x double> [[TMP13]], [[TMP2]]
67; CHECK-NEXT:    [[TMP15:%.*]] = extractelement <2 x i1> [[TMP14]], i32 0
68; CHECK-NEXT:    [[TMP16:%.*]] = extractelement <2 x i1> [[TMP14]], i32 1
69; CHECK-NEXT:    [[TMP17:%.*]] = or i1 [[TMP15]], [[TMP16]]
70; CHECK-NEXT:    [[TMP18]] = insertelement <2 x double> poison, double [[TMP10]], i32 1
71; CHECK-NEXT:    br label %[[BB3]]
72;
73; REMARK-LABEL: Function: fun1
74; REMARK: Args:
75; REMARK:      - String:          'SLP vectorized with cost '
76; REMARK-NEXT: - Cost:            '-1'
77
78  br label %2
79
802:
81  %3 = phi double [ poison, %1 ], [ poison, %2 ]
82  %4 = phi double [ undef, %1 ], [ poison, %2 ]
83  %5 = phi double [ 0.000000e+00, %1 ], [ poison, %2 ]
84  %6 = phi double [ 0.000000e+00, %1 ], [ poison, %2 ]
85  %7 = phi double [ 0.000000e+00, %1 ], [ poison, %2 ]
86  %8 = phi double [ 0.000000e+00, %1 ], [ %21, %2 ]
87  %9 = fsub double 0.000000e+00, %8
88  %10 = fsub double 0.000000e+00, %7
89  %11 = fmul double %9, 0.000000e+00
90  %12 = fmul double %10, 0.000000e+00
91  %13 = fsub double 0.000000e+00, %6
92  %14 = fsub double 0.000000e+00, %5
93  %15 = tail call double @llvm.fmuladd.f64(double %13, double %13, double %11)
94  %16 = tail call double @llvm.fmuladd.f64(double %14, double %14, double %12)
95  %17 = fsub double 0.000000e+00, %4
96  %18 = fsub double 0.000000e+00, %3
97  %19 = tail call double @llvm.fmuladd.f64(double %17, double %17, double %15)
98  %20 = tail call double @llvm.fmuladd.f64(double %18, double %18, double %16)
99  %21 = load double, ptr null, align 8
100  %22 = fcmp olt double %19, %0
101  %23 = fcmp olt double %20, 0.000000e+00
102  %24 = or i1 %23, %22
103  br label %2
104}
105
106; This should *not* be vectorized as the insertion into the vector isn't free,
107; which is recognized in SystemZTTImpl::getScalarizationOverhead().
108define void @fun2(ptr %0, ptr %Dst) {
109; CHECK-LABEL: define void @fun2(
110; CHECK-SAME: ptr [[TMP0:%.*]], ptr [[DST:%.*]]) #[[ATTR1]] {
111; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr [[TMP0]], align 8
112; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i64 [[TMP2]], 0
113; CHECK-NEXT:    br i1 [[TMP3]], label %[[BB4:.*]], label %[[BB5:.*]]
114; CHECK:       [[BB4]]:
115; CHECK-NEXT:    ret void
116; CHECK:       [[BB5]]:
117; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr i8, ptr [[DST]], i64 24
118; CHECK-NEXT:    store i64 [[TMP2]], ptr [[TMP6]], align 8
119; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr i8, ptr [[DST]], i64 16
120; CHECK-NEXT:    store i64 0, ptr [[TMP7]], align 8
121; CHECK-NEXT:    br label %[[BB4]]
122;
123; REMARK-NOT: Function: fun2
124
125  %3 = load i64, ptr %0, align 8
126  %4 = icmp eq i64 %3, 0
127  br i1 %4, label %5, label %6
128
1295:
130  ret void
131
1326:
133  %7 = getelementptr i8, ptr %Dst, i64 24
134  store i64 %3, ptr %7, align 8
135  %8 = getelementptr i8, ptr %Dst, i64 16
136  store i64 0, ptr %8, align 8
137  br label %5
138}
139
140; This should *not* be vectorized as the load is immediately stored, in which
141; case MVC is preferred.
142define void @fun3(ptr %0)  {
143; CHECK-LABEL: define void @fun3(
144; CHECK-SAME: ptr [[TMP0:%.*]]) #[[ATTR1]] {
145; CHECK-NEXT:    [[TMP2:%.*]] = load ptr, ptr inttoptr (i64 568 to ptr), align 8
146; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 40
147; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 48
148; CHECK-NEXT:    br label %[[BB5:.*]]
149; CHECK:       [[BB5]]:
150; CHECK-NEXT:    store ptr null, ptr [[TMP3]], align 8, !tbaa [[TBAA0:![0-9]+]]
151; CHECK-NEXT:    [[TMP6:%.*]] = load ptr, ptr inttoptr (i64 64 to ptr), align 8, !tbaa [[TBAA8:![0-9]+]]
152; CHECK-NEXT:    store ptr [[TMP6]], ptr [[TMP4]], align 8
153; CHECK-NEXT:    [[TMP7:%.*]] = tail call i64 [[TMP0]](ptr noundef poison, i64 noundef poison)
154; CHECK-NEXT:    br label %[[BB5]]
155;
156  %2 = load ptr, ptr inttoptr (i64 568 to ptr), align 8
157  %3 = getelementptr inbounds nuw i8, ptr %2, i64 40
158  %4 = getelementptr inbounds nuw i8, ptr %2, i64 48
159  br label %5
160
1615:
162  store ptr null, ptr %3, align 8, !tbaa !1
163  %6 = load ptr, ptr inttoptr (i64 64 to ptr), align 8, !tbaa !9
164  store ptr %6, ptr %4, align 8
165  %7 = tail call i64 %0(ptr noundef poison, i64 noundef poison)
166  br label %5
167}
168
169!1 = !{!2, !7, i64 40}
170!2 = !{!"arc", !3, i64 0, !6, i64 8, !7, i64 16, !7, i64 24, !8, i64 32, !7, i64 40, !7, i64 48, !6, i64 56, !6, i64 64}
171!3 = !{!"int", !4, i64 0}
172!4 = !{!"omnipotent char", !5, i64 0}
173!5 = !{!"Simple C/C++ TBAA"}
174!6 = !{!"long", !4, i64 0}
175!7 = !{!"any pointer", !4, i64 0}
176!8 = !{!"short", !4, i64 0}
177!9 = !{!10, !7, i64 64}
178!10 = !{!"node", !6, i64 0, !3, i64 8, !7, i64 16, !7, i64 24, !7, i64 32, !7, i64 40, !7, i64 48, !7, i64 56, !7, i64 64, !7, i64 72, !6, i64 80, !6, i64 88, !3, i64 96, !3, i64 100}
179;.
180; CHECK: [[TBAA0]] = !{[[META1:![0-9]+]], [[META6:![0-9]+]], i64 40}
181; CHECK: [[META1]] = !{!"arc", [[META2:![0-9]+]], i64 0, [[META5:![0-9]+]], i64 8, [[META6]], i64 16, [[META6]], i64 24, [[META7:![0-9]+]], i64 32, [[META6]], i64 40, [[META6]], i64 48, [[META5]], i64 56, [[META5]], i64 64}
182; CHECK: [[META2]] = !{!"int", [[META3:![0-9]+]], i64 0}
183; CHECK: [[META3]] = !{!"omnipotent char", [[META4:![0-9]+]], i64 0}
184; CHECK: [[META4]] = !{!"Simple C/C++ TBAA"}
185; CHECK: [[META5]] = !{!"long", [[META3]], i64 0}
186; CHECK: [[META6]] = !{!"any pointer", [[META3]], i64 0}
187; CHECK: [[META7]] = !{!"short", [[META3]], i64 0}
188; CHECK: [[TBAA8]] = !{[[META9:![0-9]+]], [[META6]], i64 64}
189; CHECK: [[META9]] = !{!"node", [[META5]], i64 0, [[META2]], i64 8, [[META6]], i64 16, [[META6]], i64 24, [[META6]], i64 32, [[META6]], i64 40, [[META6]], i64 48, [[META6]], i64 56, [[META6]], i64 64, [[META6]], i64 72, [[META5]], i64 80, [[META5]], i64 88, [[META2]], i64 96, [[META2]], i64 100}
190;.
191