xref: /llvm-project/llvm/test/Transforms/LoopVectorize/X86/gather-cost.ll (revision 7d7577256b76e4293f455b8093504d5f7044ab4b)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -passes=loop-vectorize -mtriple=x86_64-apple-macosx -S -mcpu=corei7-avx -enable-interleaved-mem-accesses=false < %s | FileCheck %s
3target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
4
5@kernel = global [512 x float] zeroinitializer, align 16
6@kernel2 = global [512 x float] zeroinitializer, align 16
7@kernel3 = global [512 x float] zeroinitializer, align 16
8@kernel4 = global [512 x float] zeroinitializer, align 16
9@src_data = global [1536 x float] zeroinitializer, align 16
10@r_ = global i8 0, align 1
11@g_ = global i8 0, align 1
12@b_ = global i8 0, align 1
13
14; We don't want to vectorize most loops containing gathers because they are
15; expensive. This function represents a point where vectorization starts to
16; become beneficial.
17; Make sure we are conservative and don't vectorize it.
18
19define void @_Z4testmm(i64 %size, i64 %offset) {
20; CHECK-LABEL: @_Z4testmm(
21; CHECK-NEXT:  entry:
22; CHECK-NEXT:    [[CMP53:%.*]] = icmp eq i64 [[SIZE:%.*]], 0
23; CHECK-NEXT:    br i1 [[CMP53]], label [[FOR_END:%.*]], label [[FOR_BODY_LR_PH:%.*]]
24; CHECK:       for.body.lr.ph:
25; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
26; CHECK:       for.body:
27; CHECK-NEXT:    [[R_057:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_LR_PH]] ], [ [[ADD10:%.*]], [[FOR_BODY]] ]
28; CHECK-NEXT:    [[G_056:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_LR_PH]] ], [ [[ADD20:%.*]], [[FOR_BODY]] ]
29; CHECK-NEXT:    [[V_055:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
30; CHECK-NEXT:    [[B_054:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_LR_PH]] ], [ [[ADD30:%.*]], [[FOR_BODY]] ]
31; CHECK-NEXT:    [[ADD:%.*]] = add i64 [[V_055]], [[OFFSET:%.*]]
32; CHECK-NEXT:    [[MUL:%.*]] = mul i64 [[ADD]], 3
33; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1536 x float], ptr @src_data, i64 0, i64 [[MUL]]
34; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
35; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [512 x float], ptr @kernel, i64 0, i64 [[V_055]]
36; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
37; CHECK-NEXT:    [[MUL3:%.*]] = fmul fast float [[TMP0]], [[TMP1]]
38; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [512 x float], ptr @kernel2, i64 0, i64 [[V_055]]
39; CHECK-NEXT:    [[TMP2:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
40; CHECK-NEXT:    [[MUL5:%.*]] = fmul fast float [[MUL3]], [[TMP2]]
41; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [512 x float], ptr @kernel3, i64 0, i64 [[V_055]]
42; CHECK-NEXT:    [[TMP3:%.*]] = load float, ptr [[ARRAYIDX6]], align 4
43; CHECK-NEXT:    [[MUL7:%.*]] = fmul fast float [[MUL5]], [[TMP3]]
44; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds [512 x float], ptr @kernel4, i64 0, i64 [[V_055]]
45; CHECK-NEXT:    [[TMP4:%.*]] = load float, ptr [[ARRAYIDX8]], align 4
46; CHECK-NEXT:    [[MUL9:%.*]] = fmul fast float [[MUL7]], [[TMP4]]
47; CHECK-NEXT:    [[ADD10]] = fadd fast float [[R_057]], [[MUL9]]
48; CHECK-NEXT:    [[ARRAYIDX_SUM:%.*]] = add i64 [[MUL]], 1
49; CHECK-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds [1536 x float], ptr @src_data, i64 0, i64 [[ARRAYIDX_SUM]]
50; CHECK-NEXT:    [[TMP5:%.*]] = load float, ptr [[ARRAYIDX11]], align 4
51; CHECK-NEXT:    [[MUL13:%.*]] = fmul fast float [[TMP1]], [[TMP5]]
52; CHECK-NEXT:    [[MUL15:%.*]] = fmul fast float [[TMP2]], [[MUL13]]
53; CHECK-NEXT:    [[MUL17:%.*]] = fmul fast float [[TMP3]], [[MUL15]]
54; CHECK-NEXT:    [[MUL19:%.*]] = fmul fast float [[TMP4]], [[MUL17]]
55; CHECK-NEXT:    [[ADD20]] = fadd fast float [[G_056]], [[MUL19]]
56; CHECK-NEXT:    [[ARRAYIDX_SUM52:%.*]] = add i64 [[MUL]], 2
57; CHECK-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds [1536 x float], ptr @src_data, i64 0, i64 [[ARRAYIDX_SUM52]]
58; CHECK-NEXT:    [[TMP6:%.*]] = load float, ptr [[ARRAYIDX21]], align 4
59; CHECK-NEXT:    [[MUL23:%.*]] = fmul fast float [[TMP1]], [[TMP6]]
60; CHECK-NEXT:    [[MUL25:%.*]] = fmul fast float [[TMP2]], [[MUL23]]
61; CHECK-NEXT:    [[MUL27:%.*]] = fmul fast float [[TMP3]], [[MUL25]]
62; CHECK-NEXT:    [[MUL29:%.*]] = fmul fast float [[TMP4]], [[MUL27]]
63; CHECK-NEXT:    [[ADD30]] = fadd fast float [[B_054]], [[MUL29]]
64; CHECK-NEXT:    [[INC]] = add i64 [[V_055]], 1
65; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp ne i64 [[INC]], [[SIZE]]
66; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
67; CHECK:       for.cond.for.end_crit_edge:
68; CHECK-NEXT:    [[ADD30_LCSSA:%.*]] = phi float [ [[ADD30]], [[FOR_BODY]] ]
69; CHECK-NEXT:    [[ADD20_LCSSA:%.*]] = phi float [ [[ADD20]], [[FOR_BODY]] ]
70; CHECK-NEXT:    [[ADD10_LCSSA:%.*]] = phi float [ [[ADD10]], [[FOR_BODY]] ]
71; CHECK-NEXT:    [[PHITMP:%.*]] = fptoui float [[ADD10_LCSSA]] to i8
72; CHECK-NEXT:    [[PHITMP60:%.*]] = fptoui float [[ADD20_LCSSA]] to i8
73; CHECK-NEXT:    [[PHITMP61:%.*]] = fptoui float [[ADD30_LCSSA]] to i8
74; CHECK-NEXT:    br label [[FOR_END]]
75; CHECK:       for.end:
76; CHECK-NEXT:    [[R_0_LCSSA:%.*]] = phi i8 [ [[PHITMP]], [[FOR_COND_FOR_END_CRIT_EDGE]] ], [ 0, [[ENTRY:%.*]] ]
77; CHECK-NEXT:    [[G_0_LCSSA:%.*]] = phi i8 [ [[PHITMP60]], [[FOR_COND_FOR_END_CRIT_EDGE]] ], [ 0, [[ENTRY]] ]
78; CHECK-NEXT:    [[B_0_LCSSA:%.*]] = phi i8 [ [[PHITMP61]], [[FOR_COND_FOR_END_CRIT_EDGE]] ], [ 0, [[ENTRY]] ]
79; CHECK-NEXT:    store i8 [[R_0_LCSSA]], ptr @r_, align 1
80; CHECK-NEXT:    store i8 [[G_0_LCSSA]], ptr @g_, align 1
81; CHECK-NEXT:    store i8 [[B_0_LCSSA]], ptr @b_, align 1
82; CHECK-NEXT:    ret void
83;
84entry:
85  %cmp53 = icmp eq i64 %size, 0
86  br i1 %cmp53, label %for.end, label %for.body.lr.ph
87
88for.body.lr.ph:
89  br label %for.body
90
91for.body:
92  %r.057 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add10, %for.body ]
93  %g.056 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add20, %for.body ]
94  %v.055 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
95  %b.054 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add30, %for.body ]
96  %add = add i64 %v.055, %offset
97  %mul = mul i64 %add, 3
98  %arrayidx = getelementptr inbounds [1536 x float], ptr @src_data, i64 0, i64 %mul
99  %0 = load float, ptr %arrayidx, align 4
100  %arrayidx2 = getelementptr inbounds [512 x float], ptr @kernel, i64 0, i64 %v.055
101  %1 = load float, ptr %arrayidx2, align 4
102  %mul3 = fmul fast float %0, %1
103  %arrayidx4 = getelementptr inbounds [512 x float], ptr @kernel2, i64 0, i64 %v.055
104  %2 = load float, ptr %arrayidx4, align 4
105  %mul5 = fmul fast float %mul3, %2
106  %arrayidx6 = getelementptr inbounds [512 x float], ptr @kernel3, i64 0, i64 %v.055
107  %3 = load float, ptr %arrayidx6, align 4
108  %mul7 = fmul fast float %mul5, %3
109  %arrayidx8 = getelementptr inbounds [512 x float], ptr @kernel4, i64 0, i64 %v.055
110  %4 = load float, ptr %arrayidx8, align 4
111  %mul9 = fmul fast float %mul7, %4
112  %add10 = fadd fast float %r.057, %mul9
113  %arrayidx.sum = add i64 %mul, 1
114  %arrayidx11 = getelementptr inbounds [1536 x float], ptr @src_data, i64 0, i64 %arrayidx.sum
115  %5 = load float, ptr %arrayidx11, align 4
116  %mul13 = fmul fast float %1, %5
117  %mul15 = fmul fast float %2, %mul13
118  %mul17 = fmul fast float %3, %mul15
119  %mul19 = fmul fast float %4, %mul17
120  %add20 = fadd fast float %g.056, %mul19
121  %arrayidx.sum52 = add i64 %mul, 2
122  %arrayidx21 = getelementptr inbounds [1536 x float], ptr @src_data, i64 0, i64 %arrayidx.sum52
123  %6 = load float, ptr %arrayidx21, align 4
124  %mul23 = fmul fast float %1, %6
125  %mul25 = fmul fast float %2, %mul23
126  %mul27 = fmul fast float %3, %mul25
127  %mul29 = fmul fast float %4, %mul27
128  %add30 = fadd fast float %b.054, %mul29
129  %inc = add i64 %v.055, 1
130  %exitcond = icmp ne i64 %inc, %size
131  br i1 %exitcond, label %for.body, label %for.cond.for.end_crit_edge
132
133for.cond.for.end_crit_edge:
134  %add30.lcssa = phi float [ %add30, %for.body ]
135  %add20.lcssa = phi float [ %add20, %for.body ]
136  %add10.lcssa = phi float [ %add10, %for.body ]
137  %phitmp = fptoui float %add10.lcssa to i8
138  %phitmp60 = fptoui float %add20.lcssa to i8
139  %phitmp61 = fptoui float %add30.lcssa to i8
140  br label %for.end
141
142for.end:
143  %r.0.lcssa = phi i8 [ %phitmp, %for.cond.for.end_crit_edge ], [ 0, %entry ]
144  %g.0.lcssa = phi i8 [ %phitmp60, %for.cond.for.end_crit_edge ], [ 0, %entry ]
145  %b.0.lcssa = phi i8 [ %phitmp61, %for.cond.for.end_crit_edge ], [ 0, %entry ]
146  store i8 %r.0.lcssa, ptr @r_, align 1
147  store i8 %g.0.lcssa, ptr @g_, align 1
148  store i8 %b.0.lcssa, ptr @b_, align 1
149  ret void
150}
151