xref: /llvm-project/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd-cost.ll (revision 2a859b20146108af84c741a509dc0e534e045768)
1; REQUIRES: asserts
2; RUN: opt < %s -passes=loop-vectorize -debug -disable-output -force-ordered-reductions=true -hints-allow-reordering=false \
3; RUN:   -force-vector-width=4 -force-vector-interleave=1 -S 2>&1 | FileCheck %s --check-prefix=CHECK-VF4
4; RUN: opt < %s -passes=loop-vectorize -debug -disable-output -force-ordered-reductions=true -hints-allow-reordering=false \
5; RUN:   -force-vector-width=8 -force-vector-interleave=1 -S 2>&1 | FileCheck %s --check-prefix=CHECK-VF8
6
7target triple="aarch64-unknown-linux-gnu"
8
9; CHECK-VF4: Found an estimated cost of 14 for VF 4 For instruction:   %add = fadd float %0, %sum.07
10; CHECK-VF8: Found an estimated cost of 28 for VF 8 For instruction:   %add = fadd float %0, %sum.07
11
12define float @fadd_strict32(ptr noalias nocapture readonly %a, i64 %n) {
13entry:
14  br label %for.body
15
16for.body:
17  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
18  %sum.07 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
19  %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
20  %0 = load float, ptr %arrayidx, align 4
21  %add = fadd float %0, %sum.07
22  %iv.next = add nuw nsw i64 %iv, 1
23  %exitcond.not = icmp eq i64 %iv.next, %n
24  br i1 %exitcond.not, label %for.end, label %for.body
25
26for.end:
27  ret float %add
28}
29
30
31; CHECK-VF4: Found an estimated cost of 12 for VF 4 For instruction:   %add = fadd double %0, %sum.07
32; CHECK-VF8: Found an estimated cost of 24 for VF 8 For instruction:   %add = fadd double %0, %sum.07
33
34define double @fadd_strict64(ptr noalias nocapture readonly %a, i64 %n) {
35entry:
36  br label %for.body
37
38for.body:
39  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
40  %sum.07 = phi double [ 0.000000e+00, %entry ], [ %add, %for.body ]
41  %arrayidx = getelementptr inbounds double, ptr %a, i64 %iv
42  %0 = load double, ptr %arrayidx, align 4
43  %add = fadd double %0, %sum.07
44  %iv.next = add nuw nsw i64 %iv, 1
45  %exitcond.not = icmp eq i64 %iv.next, %n
46  br i1 %exitcond.not, label %for.end, label %for.body
47
48for.end:
49  ret double %add
50}
51
52; CHECK-VF4: Found an estimated cost of 16 for VF 4 For instruction:   %muladd = tail call float @llvm.fmuladd.f32(float %0, float %1, float %sum.07)
53; CHECK-VF8: Found an estimated cost of 32 for VF 8 For instruction:   %muladd = tail call float @llvm.fmuladd.f32(float %0, float %1, float %sum.07)
54
55define float @fmuladd_strict32(ptr %a, ptr %b, i64 %n) {
56entry:
57  br label %for.body
58
59for.body:
60  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
61  %sum.07 = phi float [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
62  %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
63  %0 = load float, ptr %arrayidx, align 4
64  %arrayidx2 = getelementptr inbounds float, ptr %b, i64 %iv
65  %1 = load float, ptr %arrayidx2, align 4
66  %muladd = tail call float @llvm.fmuladd.f32(float %0, float %1, float %sum.07)
67  %iv.next = add nuw nsw i64 %iv, 1
68  %exitcond.not = icmp eq i64 %iv.next, %n
69  br i1 %exitcond.not, label %for.end, label %for.body
70
71for.end:
72  ret float %muladd
73}
74
75declare float @llvm.fmuladd.f32(float, float, float)
76
77; CHECK-VF4: Found an estimated cost of 16 for VF 4 For instruction:   %muladd = tail call double @llvm.fmuladd.f64(double %0, double %1, double %sum.07)
78; CHECK-VF8: Found an estimated cost of 32 for VF 8 For instruction:   %muladd = tail call double @llvm.fmuladd.f64(double %0, double %1, double %sum.07)
79
80define double @fmuladd_strict64(ptr %a, ptr %b, i64 %n) {
81entry:
82  br label %for.body
83
84for.body:
85  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
86  %sum.07 = phi double [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
87  %arrayidx = getelementptr inbounds double, ptr %a, i64 %iv
88  %0 = load double, ptr %arrayidx, align 4
89  %arrayidx2 = getelementptr inbounds double, ptr %b, i64 %iv
90  %1 = load double, ptr %arrayidx2, align 4
91  %muladd = tail call double @llvm.fmuladd.f64(double %0, double %1, double %sum.07)
92  %iv.next = add nuw nsw i64 %iv, 1
93  %exitcond.not = icmp eq i64 %iv.next, %n
94  br i1 %exitcond.not, label %for.end, label %for.body
95
96for.end:
97  ret double %muladd
98}
99
100declare double @llvm.fmuladd.f64(double, double, double)
101