xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/reduction_loads.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: %if x86-registered-target %{ opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-apple-macosx10.10.0 -mattr=+sse4.2 | FileCheck %s %}
3; RUN: %if aarch64-registered-target %{ opt < %s -passes=slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu -mattr=+neon | FileCheck %s %}
4
5; PR28474
6
7;void foo();
8;
9;int test1(unsigned int *p) {
10;  int sum = 0;
11;  #pragma nounroll
12;  for (int y = 0; y < 2; y++) {
13;    // Inner loop gets unrolled
14;    for (int x = 0; x < 8; x++) {
15;      sum += pptr 42;
16;    }
17;    // Dummy call to keep outer loop alive
18;    foo();
19;  }
20;  return sum;
21;}
22
23define i32 @test(ptr nocapture readonly %p) {
24; CHECK-LABEL: @test(
25; CHECK-NEXT:  entry:
26; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
27; CHECK:       for.body:
28; CHECK-NEXT:    [[SUM:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OP_RDX:%.*]], [[FOR_BODY]] ]
29; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr [[P:%.*]], align 4
30; CHECK-NEXT:    [[TMP2:%.*]] = mul <8 x i32> [[TMP1]], splat (i32 42)
31; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]])
32; CHECK-NEXT:    [[OP_RDX]] = add i32 [[TMP3]], [[SUM]]
33; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[FOR_BODY]]
34; CHECK:       for.end:
35; CHECK-NEXT:    ret i32 [[OP_RDX]]
36;
37entry:
38  %arrayidx.1 = getelementptr inbounds i32, ptr %p, i64 1
39  %arrayidx.2 = getelementptr inbounds i32, ptr %p, i64 2
40  %arrayidx.3 = getelementptr inbounds i32, ptr %p, i64 3
41  %arrayidx.4 = getelementptr inbounds i32, ptr %p, i64 4
42  %arrayidx.5 = getelementptr inbounds i32, ptr %p, i64 5
43  %arrayidx.6 = getelementptr inbounds i32, ptr %p, i64 6
44  %arrayidx.7 = getelementptr inbounds i32, ptr %p, i64 7
45  br label %for.body
46
47for.body:
48  %sum = phi i32 [ 0, %entry ], [ %add.7, %for.body ]
49  %tmp = load i32, ptr %p, align 4
50  %mul = mul i32 %tmp, 42
51  %add = add i32 %mul, %sum
52  %tmp5 = load i32, ptr %arrayidx.1, align 4
53  %mul.1 = mul i32 %tmp5, 42
54  %add.1 = add i32 %mul.1, %add
55  %tmp6 = load i32, ptr %arrayidx.2, align 4
56  %mul.2 = mul i32 %tmp6, 42
57  %add.2 = add i32 %mul.2, %add.1
58  %tmp7 = load i32, ptr %arrayidx.3, align 4
59  %mul.3 = mul i32 %tmp7, 42
60  %add.3 = add i32 %mul.3, %add.2
61  %tmp8 = load i32, ptr %arrayidx.4, align 4
62  %mul.4 = mul i32 %tmp8, 42
63  %add.4 = add i32 %mul.4, %add.3
64  %tmp9 = load i32, ptr %arrayidx.5, align 4
65  %mul.5 = mul i32 %tmp9, 42
66  %add.5 = add i32 %mul.5, %add.4
67  %tmp10 = load i32, ptr %arrayidx.6, align 4
68  %mul.6 = mul i32 %tmp10, 42
69  %add.6 = add i32 %mul.6, %add.5
70  %tmp11 = load i32, ptr %arrayidx.7, align 4
71  %mul.7 = mul i32 %tmp11, 42
72  %add.7 = add i32 %mul.7, %add.6
73  br i1 true, label %for.end, label %for.body
74
75for.end:
76  ret i32 %add.7
77}
78
79;void foo();
80;
81;int test2(unsigned int *p, unsigned int *q) {
82;  int sum = 0;
83;  #pragma nounroll
84;  for (int y = 0; y < 2; y++) {
85;    // Inner loop gets unrolled
86;    for (int x = 0; x < 8; x++) {
87;      sum += pptr q[x];
88;    }
89;    // Dummy call to keep outer loop alive
90;    foo();
91;  }
92;  return sum;
93;}
94
95define i32 @test2(ptr nocapture readonly %p, ptr nocapture readonly %q) {
96; CHECK-LABEL: @test2(
97; CHECK-NEXT:  entry:
98; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
99; CHECK:       for.body:
100; CHECK-NEXT:    [[SUM:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OP_RDX:%.*]], [[FOR_BODY]] ]
101; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr [[P:%.*]], align 4
102; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, ptr [[Q:%.*]], align 4
103; CHECK-NEXT:    [[TMP4:%.*]] = mul <8 x i32> [[TMP1]], [[TMP3]]
104; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP4]])
105; CHECK-NEXT:    [[OP_RDX]] = add i32 [[TMP5]], [[SUM]]
106; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[FOR_BODY]]
107; CHECK:       for.end:
108; CHECK-NEXT:    ret i32 [[OP_RDX]]
109;
110entry:
111  %arrayidx.p.1 = getelementptr inbounds i32, ptr %p, i64 1
112  %arrayidx.p.2 = getelementptr inbounds i32, ptr %p, i64 2
113  %arrayidx.p.3 = getelementptr inbounds i32, ptr %p, i64 3
114  %arrayidx.p.4 = getelementptr inbounds i32, ptr %p, i64 4
115  %arrayidx.p.5 = getelementptr inbounds i32, ptr %p, i64 5
116  %arrayidx.p.6 = getelementptr inbounds i32, ptr %p, i64 6
117  %arrayidx.p.7 = getelementptr inbounds i32, ptr %p, i64 7
118
119  %arrayidx.q.1 = getelementptr inbounds i32, ptr %q, i64 1
120  %arrayidx.q.2 = getelementptr inbounds i32, ptr %q, i64 2
121  %arrayidx.q.3 = getelementptr inbounds i32, ptr %q, i64 3
122  %arrayidx.q.4 = getelementptr inbounds i32, ptr %q, i64 4
123  %arrayidx.q.5 = getelementptr inbounds i32, ptr %q, i64 5
124  %arrayidx.q.6 = getelementptr inbounds i32, ptr %q, i64 6
125  %arrayidx.q.7 = getelementptr inbounds i32, ptr %q, i64 7
126  br label %for.body
127
128for.body:
129  %sum = phi i32 [ 0, %entry ], [ %add.7, %for.body ]
130  %tmpp = load i32, ptr %p, align 4
131  %tmpq = load i32, ptr %q, align 4
132  %mul = mul i32 %tmpp, %tmpq
133  %add = add i32 %mul, %sum
134  %tmp5p = load i32, ptr %arrayidx.p.1, align 4
135  %tmp5q = load i32, ptr %arrayidx.q.1, align 4
136  %mul.1 = mul i32 %tmp5p, %tmp5q
137  %add.1 = add i32 %mul.1, %add
138  %tmp6p = load i32, ptr %arrayidx.p.2, align 4
139  %tmp6q = load i32, ptr %arrayidx.q.2, align 4
140  %mul.2 = mul i32 %tmp6p, %tmp6q
141  %add.2 = add i32 %mul.2, %add.1
142  %tmp7p = load i32, ptr %arrayidx.p.3, align 4
143  %tmp7q = load i32, ptr %arrayidx.q.3, align 4
144  %mul.3 = mul i32 %tmp7p, %tmp7q
145  %add.3 = add i32 %mul.3, %add.2
146  %tmp8p = load i32, ptr %arrayidx.p.4, align 4
147  %tmp8q = load i32, ptr %arrayidx.q.4, align 4
148  %mul.4 = mul i32 %tmp8p, %tmp8q
149  %add.4 = add i32 %mul.4, %add.3
150  %tmp9p = load i32, ptr %arrayidx.p.5, align 4
151  %tmp9q = load i32, ptr %arrayidx.q.5, align 4
152  %mul.5 = mul i32 %tmp9p, %tmp9q
153  %add.5 = add i32 %mul.5, %add.4
154  %tmp10p = load i32, ptr %arrayidx.p.6, align 4
155  %tmp10q = load i32, ptr %arrayidx.q.6, align 4
156  %mul.6 = mul i32 %tmp10p, %tmp10q
157  %add.6 = add i32 %mul.6, %add.5
158  %tmp11p = load i32, ptr %arrayidx.p.7, align 4
159  %tmp11q = load i32, ptr %arrayidx.q.7, align 4
160  %mul.7 = mul i32 %tmp11p, %tmp11q
161  %add.7 = add i32 %mul.7, %add.6
162  br i1 true, label %for.end, label %for.body
163
164for.end:
165  ret i32 %add.7
166}
167
168;void foo();
169;
170;int test3(unsigned int *p, unsigned int *q) {
171;  int sum = 0;
172;  #pragma nounroll
173;  for (int y = 0; y < 2; y++) {
174;    // Inner loop gets unrolled
175;    for (int x = 0; x < 8; x++) {
176;      sum += pptr q[7-x];
177;    }
178;    // Dummy call to keep outer loop alive
179;    foo();
180;  }
181;  return sum;
182;}
183
184define i32 @test3(ptr nocapture readonly %p, ptr nocapture readonly %q) {
185; CHECK-LABEL: @test3(
186; CHECK-NEXT:  entry:
187; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
188; CHECK:       for.body:
189; CHECK-NEXT:    [[SUM:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OP_RDX:%.*]], [[FOR_BODY]] ]
190; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr [[P:%.*]], align 4
191; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, ptr [[Q:%.*]], align 4
192; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <8 x i32> [[TMP3]], <8 x i32> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
193; CHECK-NEXT:    [[TMP4:%.*]] = mul <8 x i32> [[TMP1]], [[SHUFFLE]]
194; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP4]])
195; CHECK-NEXT:    [[OP_RDX]] = add i32 [[TMP5]], [[SUM]]
196; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[FOR_BODY]]
197; CHECK:       for.end:
198; CHECK-NEXT:    ret i32 [[OP_RDX]]
199;
200entry:
201  %arrayidx.p.1 = getelementptr inbounds i32, ptr %p, i64 1
202  %arrayidx.p.2 = getelementptr inbounds i32, ptr %p, i64 2
203  %arrayidx.p.3 = getelementptr inbounds i32, ptr %p, i64 3
204  %arrayidx.p.4 = getelementptr inbounds i32, ptr %p, i64 4
205  %arrayidx.p.5 = getelementptr inbounds i32, ptr %p, i64 5
206  %arrayidx.p.6 = getelementptr inbounds i32, ptr %p, i64 6
207  %arrayidx.p.7 = getelementptr inbounds i32, ptr %p, i64 7
208
209  %arrayidx.q.1 = getelementptr inbounds i32, ptr %q, i64 1
210  %arrayidx.q.2 = getelementptr inbounds i32, ptr %q, i64 2
211  %arrayidx.q.3 = getelementptr inbounds i32, ptr %q, i64 3
212  %arrayidx.q.4 = getelementptr inbounds i32, ptr %q, i64 4
213  %arrayidx.q.5 = getelementptr inbounds i32, ptr %q, i64 5
214  %arrayidx.q.6 = getelementptr inbounds i32, ptr %q, i64 6
215  %arrayidx.q.7 = getelementptr inbounds i32, ptr %q, i64 7
216  br label %for.body
217
218for.body:
219  %sum = phi i32 [ 0, %entry ], [ %add.7, %for.body ]
220  %tmpp = load i32, ptr %p, align 4
221  %tmpq = load i32, ptr %arrayidx.q.7, align 4
222  %mul = mul i32 %tmpp, %tmpq
223  %add = add i32 %mul, %sum
224  %tmp5p = load i32, ptr %arrayidx.p.1, align 4
225  %tmp5q = load i32, ptr %arrayidx.q.6, align 4
226  %mul.1 = mul i32 %tmp5p, %tmp5q
227  %add.1 = add i32 %mul.1, %add
228  %tmp6p = load i32, ptr %arrayidx.p.2, align 4
229  %tmp6q = load i32, ptr %arrayidx.q.5, align 4
230  %mul.2 = mul i32 %tmp6p, %tmp6q
231  %add.2 = add i32 %mul.2, %add.1
232  %tmp7p = load i32, ptr %arrayidx.p.3, align 4
233  %tmp7q = load i32, ptr %arrayidx.q.4, align 4
234  %mul.3 = mul i32 %tmp7p, %tmp7q
235  %add.3 = add i32 %mul.3, %add.2
236  %tmp8p = load i32, ptr %arrayidx.p.4, align 4
237  %tmp8q = load i32, ptr %arrayidx.q.3, align 4
238  %mul.4 = mul i32 %tmp8p, %tmp8q
239  %add.4 = add i32 %mul.4, %add.3
240  %tmp9p = load i32, ptr %arrayidx.p.5, align 4
241  %tmp9q = load i32, ptr %arrayidx.q.2, align 4
242  %mul.5 = mul i32 %tmp9p, %tmp9q
243  %add.5 = add i32 %mul.5, %add.4
244  %tmp10p = load i32, ptr %arrayidx.p.6, align 4
245  %tmp10q = load i32, ptr %arrayidx.q.1, align 4
246  %mul.6 = mul i32 %tmp10p, %tmp10q
247  %add.6 = add i32 %mul.6, %add.5
248  %tmp11p = load i32, ptr %arrayidx.p.7, align 4
249  %tmp11q = load i32, ptr %q, align 4
250  %mul.7 = mul i32 %tmp11p, %tmp11q
251  %add.7 = add i32 %mul.7, %add.6
252  br i1 true, label %for.end, label %for.body
253
254for.end:
255  ret i32 %add.7
256}
257