xref: /llvm-project/llvm/test/Transforms/LoopVectorize/single_early_exit.ll (revision 776ef9d1bec66875c554e8a5bd0e3ae8c9543d9a)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
2; RUN: opt -S < %s -p loop-vectorize -enable-early-exit-vectorization -force-vector-width=4 | FileCheck %s
3
4declare void @init_mem(ptr, i64);
5
6
7define i64 @same_exit_block_phi_of_consts() {
8; CHECK-LABEL: define i64 @same_exit_block_phi_of_consts() {
9; CHECK-NEXT:  entry:
10; CHECK-NEXT:    [[P1:%.*]] = alloca [1024 x i8], align 1
11; CHECK-NEXT:    [[P2:%.*]] = alloca [1024 x i8], align 1
12; CHECK-NEXT:    call void @init_mem(ptr [[P1]], i64 1024)
13; CHECK-NEXT:    call void @init_mem(ptr [[P2]], i64 1024)
14; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
15; CHECK:       vector.ph:
16; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
17; CHECK:       vector.body:
18; CHECK-NEXT:    [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ]
19; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
20; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
21; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP0]]
22; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
23; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1
24; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP0]]
25; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0
26; CHECK-NEXT:    [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1
27; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
28; CHECK-NEXT:    [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4
29; CHECK-NEXT:    [[TMP6:%.*]] = xor <4 x i1> [[TMP5]], splat (i1 true)
30; CHECK-NEXT:    [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
31; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 64
32; CHECK-NEXT:    [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
33; CHECK-NEXT:    br i1 [[TMP9]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
34; CHECK:       middle.split:
35; CHECK-NEXT:    br i1 [[TMP7]], label [[LOOP_END:%.*]], label [[MIDDLE_BLOCK:%.*]]
36; CHECK:       middle.block:
37; CHECK-NEXT:    br i1 true, label [[LOOP_END]], label [[SCALAR_PH]]
38; CHECK:       scalar.ph:
39; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
40; CHECK-NEXT:    br label [[LOOP:%.*]]
41; CHECK:       loop:
42; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
43; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
44; CHECK-NEXT:    [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
45; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
46; CHECK-NEXT:    [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
47; CHECK-NEXT:    [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
48; CHECK-NEXT:    br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
49; CHECK:       loop.inc:
50; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 1
51; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
52; CHECK-NEXT:    br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP3:![0-9]+]]
53; CHECK:       loop.end:
54; CHECK-NEXT:    [[RETVAL:%.*]] = phi i64 [ 0, [[LOOP]] ], [ 1, [[LOOP_INC]] ], [ 1, [[MIDDLE_BLOCK]] ], [ 0, [[MIDDLE_SPLIT]] ]
55; CHECK-NEXT:    ret i64 [[RETVAL]]
56;
57entry:
58  %p1 = alloca [1024 x i8]
59  %p2 = alloca [1024 x i8]
60  call void @init_mem(ptr %p1, i64 1024)
61  call void @init_mem(ptr %p2, i64 1024)
62  br label %loop
63
64loop:
65  %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
66  %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
67  %ld1 = load i8, ptr %arrayidx, align 1
68  %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
69  %ld2 = load i8, ptr %arrayidx1, align 1
70  %cmp3 = icmp eq i8 %ld1, %ld2
71  br i1 %cmp3, label %loop.inc, label %loop.end
72
73loop.inc:
74  %index.next = add i64 %index, 1
75  %exitcond = icmp ne i64 %index.next, 67
76  br i1 %exitcond, label %loop, label %loop.end
77
78loop.end:
79  %retval = phi i64 [ 0, %loop ], [ 1, %loop.inc ]
80  ret i64 %retval
81}
82
83
84define i64 @diff_exit_block_phi_of_consts() {
85; CHECK-LABEL: define i64 @diff_exit_block_phi_of_consts() {
86; CHECK-NEXT:  entry:
87; CHECK-NEXT:    [[P1:%.*]] = alloca [1024 x i8], align 1
88; CHECK-NEXT:    [[P2:%.*]] = alloca [1024 x i8], align 1
89; CHECK-NEXT:    call void @init_mem(ptr [[P1]], i64 1024)
90; CHECK-NEXT:    call void @init_mem(ptr [[P2]], i64 1024)
91; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
92; CHECK:       vector.ph:
93; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
94; CHECK:       vector.body:
95; CHECK-NEXT:    [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ]
96; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
97; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
98; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP0]]
99; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
100; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1
101; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP0]]
102; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0
103; CHECK-NEXT:    [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1
104; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
105; CHECK-NEXT:    [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4
106; CHECK-NEXT:    [[TMP6:%.*]] = xor <4 x i1> [[TMP5]], splat (i1 true)
107; CHECK-NEXT:    [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
108; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 64
109; CHECK-NEXT:    [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
110; CHECK-NEXT:    br i1 [[TMP9]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
111; CHECK:       middle.split:
112; CHECK-NEXT:    br i1 [[TMP7]], label [[LOOP_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
113; CHECK:       middle.block:
114; CHECK-NEXT:    br i1 true, label [[LOOP_END:%.*]], label [[SCALAR_PH]]
115; CHECK:       scalar.ph:
116; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
117; CHECK-NEXT:    br label [[LOOP:%.*]]
118; CHECK:       loop:
119; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
120; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
121; CHECK-NEXT:    [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
122; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
123; CHECK-NEXT:    [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
124; CHECK-NEXT:    [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
125; CHECK-NEXT:    br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]]
126; CHECK:       loop.inc:
127; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 1
128; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
129; CHECK-NEXT:    br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP5:![0-9]+]]
130; CHECK:       loop.early.exit:
131; CHECK-NEXT:    ret i64 0
132; CHECK:       loop.end:
133; CHECK-NEXT:    ret i64 1
134;
135entry:
136  %p1 = alloca [1024 x i8]
137  %p2 = alloca [1024 x i8]
138  call void @init_mem(ptr %p1, i64 1024)
139  call void @init_mem(ptr %p2, i64 1024)
140  br label %loop
141
142loop:
143  %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
144  %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
145  %ld1 = load i8, ptr %arrayidx, align 1
146  %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
147  %ld2 = load i8, ptr %arrayidx1, align 1
148  %cmp3 = icmp eq i8 %ld1, %ld2
149  br i1 %cmp3, label %loop.inc, label %loop.early.exit
150
151loop.inc:
152  %index.next = add i64 %index, 1
153  %exitcond = icmp ne i64 %index.next, 67
154  br i1 %exitcond, label %loop, label %loop.end
155
156loop.early.exit:
157  ret i64 0
158
159loop.end:
160  ret i64 1
161}
162
163
164; The form of the induction variables requires SCEV predicates.
165define i32 @diff_exit_block_needs_scev_check(i32 %end) {
166; CHECK-LABEL: define i32 @diff_exit_block_needs_scev_check(
167; CHECK-SAME: i32 [[END:%.*]]) {
168; CHECK-NEXT:  entry:
169; CHECK-NEXT:    [[P1:%.*]] = alloca [1024 x i32], align 4
170; CHECK-NEXT:    [[P2:%.*]] = alloca [1024 x i32], align 4
171; CHECK-NEXT:    call void @init_mem(ptr [[P1]], i64 1024)
172; CHECK-NEXT:    call void @init_mem(ptr [[P2]], i64 1024)
173; CHECK-NEXT:    [[END_CLAMPED:%.*]] = and i32 [[END]], 1023
174; CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[END]] to i10
175; CHECK-NEXT:    [[TMP1:%.*]] = zext i10 [[TMP0]] to i64
176; CHECK-NEXT:    [[UMAX1:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 1)
177; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX1]], 4
178; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
179; CHECK:       vector.scevcheck:
180; CHECK-NEXT:    [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[END_CLAMPED]], i32 1)
181; CHECK-NEXT:    [[TMP2:%.*]] = add nsw i32 [[UMAX]], -1
182; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
183; CHECK-NEXT:    [[TMP4:%.*]] = add i8 1, [[TMP3]]
184; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i8 [[TMP4]], 1
185; CHECK-NEXT:    [[TMP6:%.*]] = icmp ugt i32 [[TMP2]], 255
186; CHECK-NEXT:    [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
187; CHECK-NEXT:    br i1 [[TMP7]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
188; CHECK:       vector.ph:
189; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[UMAX1]], 4
190; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[UMAX1]], [[N_MOD_VF]]
191; CHECK-NEXT:    [[TMP8:%.*]] = trunc i64 [[N_VEC]] to i8
192; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
193; CHECK:       vector.body:
194; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
195; CHECK-NEXT:    [[TMP9:%.*]] = add i64 [[INDEX]], 0
196; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[TMP9]]
197; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP10]], i32 0
198; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4
199; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[P2]], i64 [[TMP9]]
200; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 0
201; CHECK-NEXT:    [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4
202; CHECK-NEXT:    [[TMP14:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], [[WIDE_LOAD2]]
203; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
204; CHECK-NEXT:    [[TMP15:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP14]])
205; CHECK-NEXT:    [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
206; CHECK-NEXT:    [[TMP17:%.*]] = or i1 [[TMP15]], [[TMP16]]
207; CHECK-NEXT:    br i1 [[TMP17]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
208; CHECK:       middle.split:
209; CHECK-NEXT:    br i1 [[TMP15]], label [[FOUND:%.*]], label [[MIDDLE_BLOCK:%.*]]
210; CHECK:       middle.block:
211; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[UMAX1]], [[N_VEC]]
212; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
213; CHECK:       scalar.ph:
214; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i8 [ [[TMP8]], [[MIDDLE_BLOCK]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ENTRY:%.*]] ]
215; CHECK-NEXT:    [[BC_RESUME_VAL3:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ENTRY]] ]
216; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
217; CHECK:       for.body:
218; CHECK-NEXT:    [[IND:%.*]] = phi i8 [ [[IND_NEXT:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
219; CHECK-NEXT:    [[GEP_IND:%.*]] = phi i64 [ [[GEP_IND_NEXT:%.*]], [[FOR_INC]] ], [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ]
220; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[GEP_IND]]
221; CHECK-NEXT:    [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
222; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[P2]], i64 [[GEP_IND]]
223; CHECK-NEXT:    [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
224; CHECK-NEXT:    [[CMP_EARLY:%.*]] = icmp eq i32 [[TMP18]], [[TMP19]]
225; CHECK-NEXT:    br i1 [[CMP_EARLY]], label [[FOUND]], label [[FOR_INC]]
226; CHECK:       for.inc:
227; CHECK-NEXT:    [[IND_NEXT]] = add i8 [[IND]], 1
228; CHECK-NEXT:    [[CONV:%.*]] = zext i8 [[IND_NEXT]] to i32
229; CHECK-NEXT:    [[GEP_IND_NEXT]] = add i64 [[GEP_IND]], 1
230; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[CONV]], [[END_CLAMPED]]
231; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[EXIT]], !llvm.loop [[LOOP7:![0-9]+]]
232; CHECK:       found:
233; CHECK-NEXT:    ret i32 1
234; CHECK:       exit:
235; CHECK-NEXT:    ret i32 0
236;
237entry:
238  %p1 = alloca [1024 x i32]
239  %p2 = alloca [1024 x i32]
240  call void @init_mem(ptr %p1, i64 1024)
241  call void @init_mem(ptr %p2, i64 1024)
242  %end.clamped = and i32 %end, 1023
243  br label %for.body
244
245for.body:
246  %ind = phi i8 [ %ind.next, %for.inc ], [ 0, %entry ]
247  %gep.ind = phi i64 [ %gep.ind.next, %for.inc ], [ 0, %entry ]
248  %arrayidx1 = getelementptr inbounds i32, ptr %p1, i64 %gep.ind
249  %0 = load i32, ptr %arrayidx1, align 4
250  %arrayidx2 = getelementptr inbounds i32, ptr %p2, i64 %gep.ind
251  %1 = load i32, ptr %arrayidx2, align 4
252  %cmp.early = icmp eq i32 %0, %1
253  br i1 %cmp.early, label %found, label %for.inc
254
255for.inc:
256  %ind.next = add i8 %ind, 1
257  %conv = zext i8 %ind.next to i32
258  %gep.ind.next = add i64 %gep.ind, 1
259  %cmp = icmp ult i32 %conv, %end.clamped
260  br i1 %cmp, label %for.body, label %exit
261
262found:
263  ret i32 1
264
265exit:
266  ret i32 0
267}
268
269
270declare void @abort()
271
272; This is a variant of an early exit loop where the condition for leaving
273; early is loop invariant.
274define i32 @diff_blocks_invariant_early_exit_cond(ptr %s) {
275; CHECK-LABEL: define i32 @diff_blocks_invariant_early_exit_cond(
276; CHECK-SAME: ptr [[S:%.*]]) {
277; CHECK-NEXT:  entry:
278; CHECK-NEXT:    [[SVAL:%.*]] = load i32, ptr [[S]], align 4
279; CHECK-NEXT:    [[COND:%.*]] = icmp eq i32 [[SVAL]], 0
280; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
281; CHECK:       vector.ph:
282; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[COND]], i64 0
283; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
284; CHECK-NEXT:    [[TMP0:%.*]] = xor <4 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
285; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP0]])
286; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
287; CHECK:       vector.body:
288; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
289; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
290; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 276
291; CHECK-NEXT:    [[TMP3:%.*]] = or i1 [[TMP1]], [[TMP2]]
292; CHECK-NEXT:    br i1 [[TMP3]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
293; CHECK:       middle.split:
294; CHECK-NEXT:    br i1 [[TMP1]], label [[EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
295; CHECK:       middle.block:
296; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
297; CHECK:       scalar.ph:
298; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ 266, [[MIDDLE_BLOCK]] ], [ -10, [[ENTRY:%.*]] ]
299; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
300; CHECK:       for.body:
301; CHECK-NEXT:    [[IND:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IND_NEXT:%.*]], [[FOR_INC:%.*]] ]
302; CHECK-NEXT:    br i1 [[COND]], label [[FOR_INC]], label [[EARLY_EXIT]]
303; CHECK:       for.inc:
304; CHECK-NEXT:    [[IND_NEXT]] = add nsw i32 [[IND]], 1
305; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IND_NEXT]], 266
306; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
307; CHECK:       early.exit:
308; CHECK-NEXT:    tail call void @abort()
309; CHECK-NEXT:    unreachable
310; CHECK:       for.end:
311; CHECK-NEXT:    ret i32 0
312;
313entry:
314  %sval = load i32, ptr %s, align 4
315  %cond = icmp eq i32 %sval, 0
316  br label %for.body
317
318for.body:
319  %ind = phi i32 [ -10, %entry ], [ %ind.next, %for.inc ]
320  br i1 %cond, label %for.inc, label %early.exit
321
322for.inc:
323  %ind.next = add nsw i32 %ind, 1
324  %exitcond.not = icmp eq i32 %ind.next, 266
325  br i1 %exitcond.not, label %for.end, label %for.body
326
327early.exit:
328  tail call void @abort()
329  unreachable
330
331for.end:
332  ret i32 0
333}
334;.
335; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
336; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
337; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
338; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
339; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
340; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
341; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
342; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]}
343; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
344; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]}
345;.
346