xref: /llvm-project/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll (revision 7f3428d3ed71d87a2088b77b6cab9f3d86544234)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; REQUIRES: asserts
3; RUN: opt -passes=loop-vectorize -S -mtriple=aarch64 -mattr=+sve -debug-only=loop-vectorize \
4; RUN:   -prefer-predicate-over-epilogue=scalar-epilogue < %s 2>&1 | FileCheck %s
5
6target triple = "aarch64-unknown-linux-gnu"
7
8; CHECK-LABEL:  LV: Checking a loop in 'pointer_induction_used_as_vector'
9; CHECK-NOT:    LV: Found {{.*}} scalar instruction:   %ptr.iv.2.next = getelementptr inbounds i8, ptr %ptr.iv.2, i64 1
10;
11; CHECK:        VPlan 'Initial VPlan for VF={vscale x 2},UF>=1' {
12; CHECK-NEXT:   Live-in vp<[[VFxUF:%.+]]> = VF * UF
13; CHECK-NEXT:   Live-in vp<[[VEC_TC:%.+]]> = vector-trip-count
14; CHECK-NEXT:   Live-in ir<%N> = original trip-count
15; CHECK-EMPTY:
16; CHECK-NEXT: ir-bb<entry>:
17; CHECK-NEXT: Successor(s): vector.ph
18; CHECK-EMPTY:
19; CHECK-NEXT: vector.ph:
20; CHECK-NEXT:  vp<[[END1:%.+]]> = DERIVED-IV ir<%start.1> + vp<[[VEC_TC]]> * ir<8>
21; CHECK-NEXT:  vp<[[END2:%.+]]> = DERIVED-IV ir<%start.2> + vp<[[VEC_TC]]> * ir<1>
22; CHECK-NEXT: Successor(s): vector loop
23; CHECK-EMPTY:
24; CHECK-NEXT:   <x1> vector loop: {
25; CHECK-NEXT:   vector.body:
26; CHECK-NEXT:     EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
27; CHECK-NEXT:     EMIT ir<%ptr.iv.2> = WIDEN-POINTER-INDUCTION ir<%start.2>, ir<1>
28; CHECK-NEXT:     vp<[[PTR_IDX:%.+]]> = DERIVED-IV ir<0> + vp<[[CAN_IV]]> * ir<8>
29; CHECK-NEXT:     vp<[[PTR_IDX_STEPS:%.+]]> = SCALAR-STEPS vp<[[PTR_IDX]]>, ir<8>
30; CHECK-NEXT:     EMIT vp<[[PTR_IV_1:%.+]]> = ptradd ir<%start.1>, vp<[[PTR_IDX_STEPS]]>
31; CHECK-NEXT:     WIDEN-GEP Var[Inv] ir<%ptr.iv.2.next> = getelementptr inbounds ir<%ptr.iv.2>, ir<1>
32; CHECK-NEXT:     vp<[[VEC_PTR:%.+]]> = vector-pointer vp<[[PTR_IV_1]]>
33; CHECK-NEXT:     WIDEN store vp<[[VEC_PTR]]>, ir<%ptr.iv.2.next>
34; CHECK-NEXT:     vp<[[VEC_PTR2:%.+]]> = vector-pointer ir<%ptr.iv.2>
35; CHECK-NEXT:     WIDEN ir<%lv> = load vp<[[VEC_PTR2]]>
36; CHECK-NEXT:     WIDEN ir<%add> = add ir<%lv>, ir<1>
37; CHECK-NEXT:     vp<[[VEC_PTR3:%.+]]> = vector-pointer ir<%ptr.iv.2>
38; CHECK-NEXT:     WIDEN store vp<[[VEC_PTR3]]>, ir<%add>
39; CHECK-NEXT:     EMIT vp<[[CAN_IV_NEXT:%.+]]> = add nuw vp<[[CAN_IV]]>, vp<[[VFxUF]]>
40; CHECK-NEXT:     EMIT branch-on-count vp<[[CAN_IV_NEXT]]>, vp<[[VEC_TC]]>
41; CHECK-NEXT:   No successors
42; CHECK-NEXT:   }
43
44; In the test below the pointer phi %ptr.iv.2 is used as
45;  1. As a uniform address for the load, and
46;  2. Non-uniform use by the getelementptr which is stored. This requires the
47;     vector value.
48define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias %start.2, i64 %N) {
49; CHECK-LABEL: @pointer_induction_used_as_vector(
50; CHECK-NEXT:  entry:
51; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
52; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 [[TMP0]], 2
53; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
54; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
55; CHECK:       vector.ph:
56; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
57; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 2
58; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
59; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
60; CHECK-NEXT:    [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
61; CHECK-NEXT:    [[TMP6:%.*]] = mul i64 [[TMP5]], 2
62; CHECK-NEXT:    [[TMP4:%.*]] = mul i64 [[N_VEC]], 8
63; CHECK-NEXT:    [[IND_END:%.*]] = getelementptr i8, ptr [[START_1:%.*]], i64 [[TMP4]]
64; CHECK-NEXT:    [[IND_END2:%.*]] = getelementptr i8, ptr [[START_2:%.*]], i64 [[N_VEC]]
65
66; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
67; CHECK:       vector.body:
68; CHECK-NEXT:    [[POINTER_PHI:%.*]] = phi ptr [ [[START_2]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
69; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
70; CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
71; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP7]], 2
72; CHECK-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 1
73; CHECK-NEXT:    [[TMP10:%.*]] = mul i64 1, [[TMP9]]
74; CHECK-NEXT:    [[TMP11:%.*]] = mul i64 [[TMP8]], 0
75; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP11]], i64 0
76; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
77; CHECK-NEXT:    [[TMP12:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
78; CHECK-NEXT:    [[TMP13:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP12]]
79; CHECK-NEXT:    [[TMP14:%.*]] = mul <vscale x 2 x i64> [[TMP13]], splat (i64 1)
80; CHECK-NEXT:    [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP14]]
81; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
82; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 0
83; CHECK-NEXT:    [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START_1]], i64 [[TMP15]]
84; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds i8, <vscale x 2 x ptr> [[VECTOR_GEP]], i64 1
85; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr ptr, ptr [[NEXT_GEP]], i32 0
86; CHECK-NEXT:    store <vscale x 2 x ptr> [[TMP16]], ptr [[TMP17]], align 8
87; CHECK-NEXT:    [[TMP18:%.*]] = extractelement <vscale x 2 x ptr> [[VECTOR_GEP]], i32 0
88; CHECK-NEXT:    [[TMP19:%.*]] = getelementptr i8, ptr [[TMP18]], i32 0
89; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[TMP19]], align 1
90; CHECK-NEXT:    [[TMP20:%.*]] = add <vscale x 2 x i8> [[WIDE_LOAD]], splat (i8 1)
91; CHECK-NEXT:    store <vscale x 2 x i8> [[TMP20]], ptr [[TMP19]], align 1
92; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
93; CHECK-NEXT:    [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP10]]
94; CHECK-NEXT:    [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
95; CHECK-NEXT:    br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
96; CHECK:       middle.block:
97; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
98; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
99; CHECK:       scalar.ph:
100; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
101; CHECK-NEXT:    [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[START_1]], [[ENTRY]] ]
102; CHECK-NEXT:    [[BC_RESUME_VAL3:%.*]] = phi ptr [ [[IND_END2]], [[MIDDLE_BLOCK]] ], [ [[START_2]], [[ENTRY]] ]
103; CHECK-NEXT:    br label [[LOOP_BODY:%.*]]
104; CHECK:       loop.body:
105; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_BODY]] ]
106; CHECK-NEXT:    [[PTR_IV_1:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[PTR_IV_1_NEXT:%.*]], [[LOOP_BODY]] ]
107; CHECK-NEXT:    [[PTR_IV_2:%.*]] = phi ptr [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ], [ [[PTR_IV_2_NEXT:%.*]], [[LOOP_BODY]] ]
108; CHECK-NEXT:    [[PTR_IV_1_NEXT]] = getelementptr inbounds ptr, ptr [[PTR_IV_1]], i64 1
109; CHECK-NEXT:    [[PTR_IV_2_NEXT]] = getelementptr inbounds i8, ptr [[PTR_IV_2]], i64 1
110; CHECK-NEXT:    store ptr [[PTR_IV_2_NEXT]], ptr [[PTR_IV_1]], align 8
111; CHECK-NEXT:    [[LV:%.*]] = load i8, ptr [[PTR_IV_2]], align 1
112; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[LV]], 1
113; CHECK-NEXT:    store i8 [[ADD]], ptr [[PTR_IV_2]], align 1
114; CHECK-NEXT:    [[IV_NEXT]] = add nuw i64 [[IV]], 1
115; CHECK-NEXT:    [[C:%.*]] = icmp ne i64 [[IV_NEXT]], [[N]]
116; CHECK-NEXT:    br i1 [[C]], label [[LOOP_BODY]], label [[EXIT]], !llvm.loop [[LOOP3:![0-9]+]]
117; CHECK:       exit:
118; CHECK-NEXT:    ret void
119;
120
121
122entry:
123  br label %loop.body
124
125loop.body:                                    ; preds = %loop.body, %entry
126  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.body ]
127  %ptr.iv.1 = phi ptr [ %start.1, %entry ], [ %ptr.iv.1.next, %loop.body ]
128  %ptr.iv.2 = phi ptr [ %start.2, %entry ], [ %ptr.iv.2.next, %loop.body ]
129  %ptr.iv.1.next = getelementptr inbounds ptr, ptr %ptr.iv.1, i64 1
130  %ptr.iv.2.next = getelementptr inbounds i8, ptr %ptr.iv.2, i64 1
131  store ptr %ptr.iv.2.next, ptr %ptr.iv.1, align 8
132  %lv = load i8, ptr %ptr.iv.2, align 1
133  %add = add i8 %lv, 1
134  store i8 %add, ptr %ptr.iv.2, align 1
135  %iv.next = add nuw i64 %iv, 1
136  %c = icmp ne i64 %iv.next, %N
137  br i1 %c, label %loop.body, label %exit, !llvm.loop !0
138
139exit:                            ; preds = %loop.body
140  ret void
141}
142
143define void @pointer_induction(ptr noalias %start, i64 %N) {
144; CHECK-LABEL: @pointer_induction(
145; CHECK-NEXT:  entry:
146; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[N:%.*]], 1
147; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
148; CHECK-NEXT:    [[TMP2:%.*]] = mul i64 [[TMP1]], 2
149; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
150; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
151; CHECK:       vector.ph:
152; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
153; CHECK-NEXT:    [[TMP4:%.*]] = mul i64 [[TMP3]], 2
154; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]]
155; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
156; CHECK-NEXT:    [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
157; CHECK-NEXT:    [[TMP6:%.*]] = mul i64 [[TMP5]], 2
158; CHECK-NEXT:    [[IND_END:%.*]] = getelementptr i8, ptr [[START:%.*]], i64 [[N_VEC]]
159; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
160; CHECK:       vector.body:
161; CHECK-NEXT:    [[POINTER_PHI:%.*]] = phi ptr [ [[START]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
162; CHECK-NEXT:    [[INDEX2:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
163; CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
164; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP7]], 2
165; CHECK-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 1
166; CHECK-NEXT:    [[TMP10:%.*]] = mul i64 1, [[TMP9]]
167; CHECK-NEXT:    [[TMP11:%.*]] = mul i64 [[TMP8]], 0
168; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP11]], i64 0
169; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
170; CHECK-NEXT:    [[TMP12:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
171; CHECK-NEXT:    [[TMP13:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP12]]
172; CHECK-NEXT:    [[TMP14:%.*]] = mul <vscale x 2 x i64> [[TMP13]], splat (i64 1)
173; CHECK-NEXT:    [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP14]]
174; CHECK-NEXT:    [[TMP15:%.*]] = extractelement <vscale x 2 x ptr> [[VECTOR_GEP]], i32 0
175; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr i8, ptr [[TMP15]], i32 0
176; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[TMP16]], align 1
177; CHECK-NEXT:    [[TMP17:%.*]] = add <vscale x 2 x i8> [[WIDE_LOAD]], splat (i8 1)
178; CHECK-NEXT:    store <vscale x 2 x i8> [[TMP17]], ptr [[TMP16]], align 1
179; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX2]], [[TMP6]]
180; CHECK-NEXT:    [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP10]]
181; CHECK-NEXT:    [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
182; CHECK-NEXT:    br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
183; CHECK:       middle.block:
184; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
185; CHECK-NEXT:    br i1 [[CMP_N]], label [[END:%.*]], label [[SCALAR_PH]]
186; CHECK:       scalar.ph:
187; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY:%.*]] ]
188; CHECK-NEXT:    [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
189; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
190; CHECK:       for.body:
191; CHECK-NEXT:    [[PTR_PHI:%.*]] = phi ptr [ [[PTR_PHI_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
192; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ [[INDEX_NXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
193; CHECK-NEXT:    [[INDEX_NXT]] = add i64 [[INDEX]], 1
194; CHECK-NEXT:    [[TMP19:%.*]] = load i8, ptr [[PTR_PHI]], align 1
195; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[TMP19]], 1
196; CHECK-NEXT:    store i8 [[ADD]], ptr [[PTR_PHI]], align 1
197; CHECK-NEXT:    [[PTR_PHI_NEXT]] = getelementptr inbounds i8, ptr [[PTR_PHI]], i64 1
198; CHECK-NEXT:    [[CMP_I_NOT:%.*]] = icmp eq ptr [[PTR_PHI_NEXT]], [[START]]
199; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i64 [[INDEX]], [[N]]
200; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[END]], !llvm.loop [[LOOP5:![0-9]+]]
201; CHECK:       end:
202; CHECK-NEXT:    ret void
203;
204entry:
205  br label %for.body
206
207for.body:
208  %ptr.phi = phi ptr [ %ptr.phi.next, %for.body ], [ %start, %entry ]
209  %index = phi i64 [ %index_nxt, %for.body ], [ 0, %entry ]
210  %index_nxt = add i64 %index, 1
211  %0 = load i8, ptr %ptr.phi, align 1
212  %add = add i8 %0, 1
213  store i8 %add, ptr %ptr.phi
214  %ptr.phi.next = getelementptr inbounds i8, ptr %ptr.phi, i64 1
215  %cmp.i.not = icmp eq ptr %ptr.phi.next, %start
216  %cmp = icmp ult i64 %index, %N
217  br i1 %cmp, label %for.body, label %end, !llvm.loop !0
218
219end:
220  ret void
221}
222
223attributes #0 = {"target-features"="+sve"}
224
225!0 = distinct !{!0, !1, !2, !3}
226!1 = !{!"llvm.loop.interleave.count", i32 1}
227!2 = !{!"llvm.loop.vectorize.width", i32 2}
228!3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
229!4 = !{ !5 }
230!5 = distinct !{ !5, !6 }
231!6 = distinct !{ !7 }
232!7 = distinct !{ !7, !6 }
233