xref: /llvm-project/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll (revision 82821254f532c1dbdfd5d985ef7130511efaaa83)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=loop-vectorize -force-vector-interleave=4 -pass-remarks='loop-vectorize' -disable-output -S 2>&1 | FileCheck %s --check-prefix=CHECK-REMARKS
3; RUN: opt < %s -passes=loop-vectorize -force-vector-interleave=4 -S | FileCheck %s
4
5; These tests are to check that fold-tail procedure produces correct scalar code when
6; loop-vectorization is only unrolling but not vectorizing.
7
8; CHECK-REMARKS:      remark: {{.*}} interleaved loop (interleaved count: 4)
9; CHECK-REMARKS-NEXT: remark: {{.*}} interleaved loop (interleaved count: 4)
10; CHECK-REMARKS-NOT:  remark: {{.*}} vectorized loop
11
12define void @VF1-VPlanExe(ptr %dst) {
13; CHECK-LABEL: @VF1-VPlanExe(
14; CHECK-NEXT:  entry:
15; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
16; CHECK:       vector.ph:
17; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
18; CHECK:       vector.body:
19; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
20; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
21; CHECK-NEXT:    [[TMP1:%.*]] = add i64 [[INDEX]], 1
22; CHECK-NEXT:    [[TMP2:%.*]] = add i64 [[INDEX]], 2
23; CHECK-NEXT:    [[TMP3:%.*]] = add i64 [[INDEX]], 3
24; CHECK-NEXT:    [[TMP4:%.*]] = icmp ule i64 [[TMP0]], 14
25; CHECK-NEXT:    [[TMP5:%.*]] = icmp ule i64 [[TMP1]], 14
26; CHECK-NEXT:    [[TMP6:%.*]] = icmp ule i64 [[TMP2]], 14
27; CHECK-NEXT:    [[TMP7:%.*]] = icmp ule i64 [[TMP3]], 14
28; CHECK-NEXT:    br i1 [[TMP4]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
29; CHECK:       pred.store.if:
30; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[TMP0]]
31; CHECK-NEXT:    store i32 0, ptr [[TMP8]], align 4
32; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE]]
33; CHECK:       pred.store.continue:
34; CHECK-NEXT:    br i1 [[TMP5]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2:%.*]]
35; CHECK:       pred.store.if1:
36; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP1]]
37; CHECK-NEXT:    store i32 0, ptr [[TMP9]], align 4
38; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE2]]
39; CHECK:       pred.store.continue2:
40; CHECK-NEXT:    br i1 [[TMP6]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4:%.*]]
41; CHECK:       pred.store.if3:
42; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP2]]
43; CHECK-NEXT:    store i32 0, ptr [[TMP10]], align 4
44; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE4]]
45; CHECK:       pred.store.continue4:
46; CHECK-NEXT:    br i1 [[TMP7]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6]]
47; CHECK:       pred.store.if5:
48; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP3]]
49; CHECK-NEXT:    store i32 0, ptr [[TMP11]], align 4
50; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE6]]
51; CHECK:       pred.store.continue6:
52; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
53; CHECK-NEXT:    [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
54; CHECK-NEXT:    br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
55; CHECK:       middle.block:
56; CHECK-NEXT:    br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
57; CHECK:       scalar.ph:
58; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
59; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
60; CHECK:       for.cond.cleanup:
61; CHECK-NEXT:    ret void
62; CHECK:       for.body:
63; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
64; CHECK-NEXT:    [[DST_PTR:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[INDVARS_IV]]
65; CHECK-NEXT:    store i32 0, ptr [[DST_PTR]], align 4
66; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
67; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15
68; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
69;
70entry:
71  br label %for.body
72
73for.cond.cleanup:
74  ret void
75
76for.body:
77  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
78  %dst.ptr = getelementptr inbounds i32, ptr %dst, i64 %indvars.iv
79  store i32 0, ptr %dst.ptr
80  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
81  %exitcond = icmp eq i64 %indvars.iv.next, 15
82  br i1 %exitcond, label %for.cond.cleanup, label %for.body
83}
84
85define void @VF1-VPWidenCanonicalIVRecipeExe(ptr %ptr1) {
86; CHECK-LABEL: @VF1-VPWidenCanonicalIVRecipeExe(
87; CHECK-NEXT:  entry:
88; CHECK-NEXT:    [[PTR2:%.*]] = getelementptr inbounds double, ptr [[PTR1:%.*]], i64 15
89; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
90; CHECK:       vector.ph:
91; CHECK-NEXT:    [[IND_END:%.*]] = getelementptr i8, ptr [[PTR1]], i64 128
92; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
93; CHECK:       vector.body:
94; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE12:%.*]] ]
95; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
96; CHECK-NEXT:    [[VEC_IV:%.*]] = add i64 [[INDEX]], 0
97; CHECK-NEXT:    [[VEC_IV4:%.*]] = add i64 [[INDEX]], 1
98; CHECK-NEXT:    [[VEC_IV5:%.*]] = add i64 [[INDEX]], 2
99; CHECK-NEXT:    [[VEC_IV6:%.*]] = add i64 [[INDEX]], 3
100; CHECK-NEXT:    [[TMP0:%.*]] = icmp ule i64 [[VEC_IV]], 14
101; CHECK-NEXT:    [[TMP1:%.*]] = icmp ule i64 [[VEC_IV4]], 14
102; CHECK-NEXT:    [[TMP2:%.*]] = icmp ule i64 [[VEC_IV5]], 14
103; CHECK-NEXT:    [[TMP3:%.*]] = icmp ule i64 [[VEC_IV6]], 14
104; CHECK-NEXT:    br i1 [[TMP0]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
105; CHECK:       pred.store.if:
106; CHECK-NEXT:    [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 0
107; CHECK-NEXT:    [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP4]]
108; CHECK-NEXT:    store double 0.000000e+00, ptr [[NEXT_GEP]], align 8
109; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE]]
110; CHECK:       pred.store.continue:
111; CHECK-NEXT:    br i1 [[TMP1]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8:%.*]]
112; CHECK:       pred.store.if7:
113; CHECK-NEXT:    [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 8
114; CHECK-NEXT:    [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP5]]
115; CHECK-NEXT:    store double 0.000000e+00, ptr [[NEXT_GEP1]], align 8
116; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE8]]
117; CHECK:       pred.store.continue8:
118; CHECK-NEXT:    br i1 [[TMP2]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10:%.*]]
119; CHECK:       pred.store.if9:
120; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 16
121; CHECK-NEXT:    [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP6]]
122; CHECK-NEXT:    store double 0.000000e+00, ptr [[NEXT_GEP2]], align 8
123; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE10]]
124; CHECK:       pred.store.continue10:
125; CHECK-NEXT:    br i1 [[TMP3]], label [[PRED_STORE_IF11:%.*]], label [[PRED_STORE_CONTINUE12]]
126; CHECK:       pred.store.if11:
127; CHECK-NEXT:    [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 24
128; CHECK-NEXT:    [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP7]]
129; CHECK-NEXT:    store double 0.000000e+00, ptr [[NEXT_GEP3]], align 8
130; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE12]]
131; CHECK:       pred.store.continue12:
132; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
133; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
134; CHECK-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
135; CHECK:       middle.block:
136; CHECK-NEXT:    br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
137; CHECK:       scalar.ph:
138; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[PTR1]], [[ENTRY:%.*]] ]
139; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
140; CHECK:       for.cond.cleanup:
141; CHECK-NEXT:    ret void
142; CHECK:       for.body:
143; CHECK-NEXT:    [[ADDR:%.*]] = phi ptr [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
144; CHECK-NEXT:    store double 0.000000e+00, ptr [[ADDR]], align 8
145; CHECK-NEXT:    [[PTR]] = getelementptr inbounds double, ptr [[ADDR]], i64 1
146; CHECK-NEXT:    [[COND:%.*]] = icmp eq ptr [[PTR]], [[PTR2]]
147; CHECK-NEXT:    br i1 [[COND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
148;
149entry:
150  %ptr2 = getelementptr inbounds double, ptr %ptr1, i64 15
151  br label %for.body
152
153for.cond.cleanup:
154  ret void
155
156for.body:
157  %addr = phi ptr [ %ptr, %for.body ], [ %ptr1, %entry ]
158  store double 0.0, ptr %addr
159  %ptr = getelementptr inbounds double, ptr %addr, i64 1
160  %cond = icmp eq ptr %ptr, %ptr2
161  br i1 %cond, label %for.cond.cleanup, label %for.body
162}
163;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
164; CHECK-REMARKS: {{.*}}
165