xref: /llvm-project/llvm/test/Transforms/LoopVectorize/outer_loop_scalable.ll (revision 4746395bd75bc234dfd026bad672613b99e87e7a)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
2; RUN: opt -S -pass-remarks=loop-vectorize -passes=loop-vectorize -enable-vplan-native-path -force-target-supports-scalable-vectors < %s 2>&1 | FileCheck %s
3; RUN: opt -S -pass-remarks=loop-vectorize -passes=loop-vectorize -enable-vplan-native-path < %s 2>&1 | FileCheck %s -check-prefix=NO_SCALABLE_VECS
4
5; Test if the vplan-native-path successfully vectorizes a loop using scalable
6; vectors if the target supports scalable vectors and rejects vectorization
7; if a scalable VF is requested but not supported by the target.
8
9; CHECK: remark: <unknown>:0:0: vectorized outer loop (vectorization width: vscale x 4, interleaved count: 1)
10; NO_SCALABLE_VECS: remark: <unknown>:0:0: loop not vectorized: the scalable user-specified vectorization width for outer-loop vectorization cannot be used because the target does not support scalable vectors.
11
12@A = external local_unnamed_addr global [1024 x float], align 4
13@B = external local_unnamed_addr global [512 x float], align 4
14
15define void @foo() {
16; CHECK-LABEL: define void @foo() {
17; CHECK-NEXT:  entry:
18; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
19; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 [[TMP0]], 4
20; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
21; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
22; CHECK:       vector.ph:
23; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
24; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 4
25; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
26; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
27; CHECK-NEXT:    [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
28; CHECK-NEXT:    [[TMP19:%.*]] = mul i64 [[TMP18]], 4
29; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
30; CHECK-NEXT:    [[TMP6:%.*]] = mul <vscale x 4 x i64> [[TMP4]], splat (i64 1)
31; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP6]]
32; CHECK-NEXT:    [[TMP9:%.*]] = mul i64 1, [[TMP19]]
33; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
34; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
35; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
36; CHECK:       vector.body:
37; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[OUTER_LOOP_LATCH4:%.*]] ]
38; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[OUTER_LOOP_LATCH4]] ]
39; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [1024 x float], ptr @A, i64 0, <vscale x 4 x i64> [[VEC_IND]]
40; CHECK-NEXT:    [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> [[TMP10]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> poison)
41; CHECK-NEXT:    br label [[INNER_LOOP1:%.*]]
42; CHECK:       inner_loop1:
43; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <vscale x 4 x i64> [ zeroinitializer, [[VECTOR_BODY]] ], [ [[TMP13:%.*]], [[INNER_LOOP1]] ]
44; CHECK-NEXT:    [[VEC_PHI2:%.*]] = phi <vscale x 4 x float> [ [[WIDE_MASKED_GATHER]], [[VECTOR_BODY]] ], [ [[TMP12:%.*]], [[INNER_LOOP1]] ]
45; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [512 x float], ptr @B, i64 0, <vscale x 4 x i64> [[VEC_PHI]]
46; CHECK-NEXT:    [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> [[TMP11]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> poison)
47; CHECK-NEXT:    [[TMP12]] = fmul <vscale x 4 x float> [[VEC_PHI2]], [[WIDE_MASKED_GATHER3]]
48; CHECK-NEXT:    [[TMP13]] = add nuw nsw <vscale x 4 x i64> [[VEC_PHI]], splat (i64 1)
49; CHECK-NEXT:    [[TMP14:%.*]] = icmp eq <vscale x 4 x i64> [[TMP13]], splat (i64 512)
50; CHECK-NEXT:    [[TMP15:%.*]] = extractelement <vscale x 4 x i1> [[TMP14]], i32 0
51; CHECK-NEXT:    br i1 [[TMP15]], label [[OUTER_LOOP_LATCH4]], label [[INNER_LOOP1]]
52; CHECK:       vector.latch:
53; CHECK-NEXT:    [[VEC_PHI5:%.*]] = phi <vscale x 4 x float> [ [[TMP12]], [[INNER_LOOP1]] ]
54; CHECK-NEXT:    call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> [[VEC_PHI5]], <vscale x 4 x ptr> [[TMP10]], i32 4, <vscale x 4 x i1> splat (i1 true))
55; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]]
56; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
57; CHECK-NEXT:    [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
58; CHECK-NEXT:    br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
59; CHECK:       middle.block:
60; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
61; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
62; CHECK:       scalar.ph:
63; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
64; CHECK-NEXT:    br label [[OUTER_LOOP:%.*]]
65; CHECK:       outer_loop:
66; CHECK-NEXT:    [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[OUTER_LOOP_LATCH:%.*]] ]
67; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x float], ptr @A, i64 0, i64 [[I]]
68; CHECK-NEXT:    [[X_START:%.*]] = load float, ptr [[ARRAYIDX1]], align 4
69; CHECK-NEXT:    br label [[INNER_LOOP:%.*]]
70; CHECK:       inner_loop:
71; CHECK-NEXT:    [[J:%.*]] = phi i64 [ 0, [[OUTER_LOOP]] ], [ [[J_NEXT:%.*]], [[INNER_LOOP]] ]
72; CHECK-NEXT:    [[X:%.*]] = phi float [ [[X_START]], [[OUTER_LOOP]] ], [ [[X_NEXT:%.*]], [[INNER_LOOP]] ]
73; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [512 x float], ptr @B, i64 0, i64 [[J]]
74; CHECK-NEXT:    [[B:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
75; CHECK-NEXT:    [[X_NEXT]] = fmul float [[X]], [[B]]
76; CHECK-NEXT:    [[J_NEXT]] = add nuw nsw i64 [[J]], 1
77; CHECK-NEXT:    [[INNER_EXITCOND:%.*]] = icmp eq i64 [[J_NEXT]], 512
78; CHECK-NEXT:    br i1 [[INNER_EXITCOND]], label [[OUTER_LOOP_LATCH]], label [[INNER_LOOP]]
79; CHECK:       outer_loop_latch:
80; CHECK-NEXT:    [[X_NEXT_LCSSA:%.*]] = phi float [ [[X_NEXT]], [[INNER_LOOP]] ]
81; CHECK-NEXT:    store float [[X_NEXT_LCSSA]], ptr [[ARRAYIDX1]], align 4
82; CHECK-NEXT:    [[I_NEXT]] = add nuw nsw i64 [[I]], 1
83; CHECK-NEXT:    [[OUTER_EXITCOND:%.*]] = icmp eq i64 [[I_NEXT]], 1024
84; CHECK-NEXT:    br i1 [[OUTER_EXITCOND]], label [[EXIT]], label [[OUTER_LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
85; CHECK:       exit:
86; CHECK-NEXT:    ret void
87;
88; NO_SCALABLE_VECS-LABEL: define void @foo() {
89; NO_SCALABLE_VECS-NEXT:  entry:
90; NO_SCALABLE_VECS-NEXT:    br label [[OUTER_LOOP:%.*]]
91; NO_SCALABLE_VECS:       outer_loop:
92; NO_SCALABLE_VECS-NEXT:    [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[OUTER_LOOP_LATCH:%.*]] ]
93; NO_SCALABLE_VECS-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x float], ptr @A, i64 0, i64 [[I]]
94; NO_SCALABLE_VECS-NEXT:    [[X_START:%.*]] = load float, ptr [[ARRAYIDX1]], align 4
95; NO_SCALABLE_VECS-NEXT:    br label [[INNER_LOOP:%.*]]
96; NO_SCALABLE_VECS:       inner_loop:
97; NO_SCALABLE_VECS-NEXT:    [[J:%.*]] = phi i64 [ 0, [[OUTER_LOOP]] ], [ [[J_NEXT:%.*]], [[INNER_LOOP]] ]
98; NO_SCALABLE_VECS-NEXT:    [[X:%.*]] = phi float [ [[X_START]], [[OUTER_LOOP]] ], [ [[X_NEXT:%.*]], [[INNER_LOOP]] ]
99; NO_SCALABLE_VECS-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [512 x float], ptr @B, i64 0, i64 [[J]]
100; NO_SCALABLE_VECS-NEXT:    [[B:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
101; NO_SCALABLE_VECS-NEXT:    [[X_NEXT]] = fmul float [[X]], [[B]]
102; NO_SCALABLE_VECS-NEXT:    [[J_NEXT]] = add nuw nsw i64 [[J]], 1
103; NO_SCALABLE_VECS-NEXT:    [[INNER_EXITCOND:%.*]] = icmp eq i64 [[J_NEXT]], 512
104; NO_SCALABLE_VECS-NEXT:    br i1 [[INNER_EXITCOND]], label [[OUTER_LOOP_LATCH]], label [[INNER_LOOP]]
105; NO_SCALABLE_VECS:       outer_loop_latch:
106; NO_SCALABLE_VECS-NEXT:    [[X_NEXT_LCSSA:%.*]] = phi float [ [[X_NEXT]], [[INNER_LOOP]] ]
107; NO_SCALABLE_VECS-NEXT:    store float [[X_NEXT_LCSSA]], ptr [[ARRAYIDX1]], align 4
108; NO_SCALABLE_VECS-NEXT:    [[I_NEXT]] = add nuw nsw i64 [[I]], 1
109; NO_SCALABLE_VECS-NEXT:    [[OUTER_EXITCOND:%.*]] = icmp eq i64 [[I_NEXT]], 1024
110; NO_SCALABLE_VECS-NEXT:    br i1 [[OUTER_EXITCOND]], label [[EXIT:%.*]], label [[OUTER_LOOP]], !llvm.loop [[LOOP0:![0-9]+]]
111; NO_SCALABLE_VECS:       exit:
112; NO_SCALABLE_VECS-NEXT:    ret void
113;
114entry:
115  br label %outer_loop
116
117outer_loop:
118  %i = phi i64 [ 0, %entry ], [ %i.next, %outer_loop_latch ]
119  %arrayidx1 = getelementptr inbounds [1024 x float], ptr @A, i64 0, i64 %i
120  %x.start = load float, ptr %arrayidx1, align 4
121  br label %inner_loop
122
123inner_loop:
124  %j = phi i64 [ 0, %outer_loop ], [ %j.next, %inner_loop ]
125  %x = phi float [ %x.start, %outer_loop ], [ %x.next, %inner_loop ]
126  %arrayidx2 = getelementptr inbounds [512 x float], ptr @B, i64 0, i64 %j
127  %b = load float, ptr %arrayidx2, align 4
128  %x.next = fmul float %x, %b
129  %j.next = add nuw nsw i64 %j, 1
130  %inner_exitcond = icmp eq i64 %j.next, 512
131  br i1 %inner_exitcond, label %outer_loop_latch, label %inner_loop
132
133outer_loop_latch:
134  store float %x.next, ptr %arrayidx1, align 4
135  %i.next = add nuw nsw i64 %i, 1
136  %outer_exitcond = icmp eq i64 %i.next, 1024
137  br i1 %outer_exitcond, label %exit, label %outer_loop, !llvm.loop !1
138
139exit:
140  ret void
141}
142
143!1 = distinct !{!1, !2, !3, !4}
144!2 = !{!"llvm.loop.vectorize.enable", i1 true}
145!3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
146!4 = !{!"llvm.loop.vectorize.width", i32 4}
147