xref: /llvm-project/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll (revision 7f3428d3ed71d87a2088b77b6cab9f3d86544234)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2; RUN: opt -p loop-vectorize -S %s | FileCheck %s
3
4target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
5target triple = "riscv64-unknown-linux-gnu"
6
7; Test case for https://github.com/llvm/llvm-project/issues/106417.
8define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 {
9; CHECK-LABEL: define void @skip_free_iv_truncate(
10; CHECK-SAME: i16 [[X:%.*]], ptr [[A:%.*]]) #[[ATTR0:[0-9]+]] {
11; CHECK-NEXT:  [[ENTRY:.*]]:
12; CHECK-NEXT:    [[X_I32:%.*]] = sext i16 [[X]] to i32
13; CHECK-NEXT:    [[X_I64:%.*]] = sext i16 [[X]] to i64
14; CHECK-NEXT:    [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 -8
15; CHECK-NEXT:    [[SMAX20:%.*]] = call i64 @llvm.smax.i64(i64 [[X_I64]], i64 99)
16; CHECK-NEXT:    [[TMP0:%.*]] = sub i64 [[SMAX20]], [[X_I64]]
17; CHECK-NEXT:    [[UMIN21:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP0]], i64 1)
18; CHECK-NEXT:    [[TMP1:%.*]] = sub i64 [[SMAX20]], [[UMIN21]]
19; CHECK-NEXT:    [[TMP2:%.*]] = sub i64 [[TMP1]], [[X_I64]]
20; CHECK-NEXT:    [[TMP3:%.*]] = udiv i64 [[TMP2]], 3
21; CHECK-NEXT:    [[TMP4:%.*]] = add i64 [[UMIN21]], [[TMP3]]
22; CHECK-NEXT:    [[TMP5:%.*]] = add i64 [[TMP4]], 1
23; CHECK-NEXT:    [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
24; CHECK-NEXT:    [[TMP7:%.*]] = mul i64 [[TMP6]], 8
25; CHECK-NEXT:    [[TMP8:%.*]] = call i64 @llvm.umax.i64(i64 128, i64 [[TMP7]])
26; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP5]], [[TMP8]]
27; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
28; CHECK:       [[VECTOR_MEMCHECK]]:
29; CHECK-NEXT:    [[TMP31:%.*]] = shl nsw i64 [[X_I64]], 1
30; CHECK-NEXT:    [[SCEVGEP9:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP31]]
31; CHECK-NEXT:    [[SMAX10:%.*]] = call i64 @llvm.smax.i64(i64 [[X_I64]], i64 99)
32; CHECK-NEXT:    [[TMP32:%.*]] = sub i64 [[SMAX10]], [[X_I64]]
33; CHECK-NEXT:    [[UMIN11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP32]], i64 1)
34; CHECK-NEXT:    [[TMP33:%.*]] = sub i64 [[SMAX10]], [[UMIN11]]
35; CHECK-NEXT:    [[TMP34:%.*]] = sub i64 [[TMP33]], [[X_I64]]
36; CHECK-NEXT:    [[TMP35:%.*]] = udiv i64 [[TMP34]], 3
37; CHECK-NEXT:    [[TMP36:%.*]] = add i64 [[UMIN11]], [[TMP35]]
38; CHECK-NEXT:    [[TMP37:%.*]] = mul i64 [[TMP36]], 6
39; CHECK-NEXT:    [[TMP38:%.*]] = add i64 [[TMP37]], [[TMP31]]
40; CHECK-NEXT:    [[TMP39:%.*]] = add i64 [[TMP38]], 2
41; CHECK-NEXT:    [[SCEVGEP12:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP39]]
42; CHECK-NEXT:    [[TMP40:%.*]] = shl nsw i64 [[X_I64]], 3
43; CHECK-NEXT:    [[SCEVGEP13:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP40]]
44; CHECK-NEXT:    [[TMP41:%.*]] = mul i64 [[TMP36]], 24
45; CHECK-NEXT:    [[TMP42:%.*]] = add i64 [[TMP41]], [[TMP40]]
46; CHECK-NEXT:    [[TMP43:%.*]] = add i64 [[TMP42]], 8
47; CHECK-NEXT:    [[SCEVGEP14:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP43]]
48; CHECK-NEXT:    [[TMP44:%.*]] = add nsw i64 [[TMP40]], -8
49; CHECK-NEXT:    [[SCEVGEP15:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP44]]
50; CHECK-NEXT:    [[SCEVGEP16:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP42]]
51; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP9]], [[SCEVGEP14]]
52; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP13]], [[SCEVGEP12]]
53; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
54; CHECK-NEXT:    [[BOUND017:%.*]] = icmp ult ptr [[SCEVGEP9]], [[SCEVGEP16]]
55; CHECK-NEXT:    [[BOUND118:%.*]] = icmp ult ptr [[SCEVGEP15]], [[SCEVGEP12]]
56; CHECK-NEXT:    [[FOUND_CONFLICT19:%.*]] = and i1 [[BOUND017]], [[BOUND118]]
57; CHECK-NEXT:    [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT19]]
58; CHECK-NEXT:    br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
59; CHECK:       [[VECTOR_PH]]:
60; CHECK-NEXT:    [[TMP45:%.*]] = call i64 @llvm.vscale.i64()
61; CHECK-NEXT:    [[TMP46:%.*]] = mul i64 [[TMP45]], 8
62; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP5]], [[TMP46]]
63; CHECK-NEXT:    [[TMP47:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
64; CHECK-NEXT:    [[TMP48:%.*]] = select i1 [[TMP47]], i64 [[TMP46]], i64 [[N_MOD_VF]]
65; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP5]], [[TMP48]]
66; CHECK-NEXT:    [[TMP51:%.*]] = call i64 @llvm.vscale.i64()
67; CHECK-NEXT:    [[TMP52:%.*]] = mul i64 [[TMP51]], 8
68; CHECK-NEXT:    [[TMP49:%.*]] = mul i64 [[N_VEC]], 3
69; CHECK-NEXT:    [[IND_END:%.*]] = add i64 [[X_I64]], [[TMP49]]
70; CHECK-NEXT:    [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
71; CHECK-NEXT:    [[TMP50:%.*]] = mul i32 [[DOTCAST]], 3
72; CHECK-NEXT:    [[IND_END22:%.*]] = add i32 [[X_I32]], [[TMP50]]
73; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[X_I64]], i64 0
74; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
75; CHECK-NEXT:    [[TMP53:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
76; CHECK-NEXT:    [[TMP55:%.*]] = mul <vscale x 8 x i64> [[TMP53]], splat (i64 3)
77; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 8 x i64> [[DOTSPLAT]], [[TMP55]]
78; CHECK-NEXT:    [[TMP58:%.*]] = mul i64 3, [[TMP52]]
79; CHECK-NEXT:    [[DOTSPLATINSERT24:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP58]], i64 0
80; CHECK-NEXT:    [[DOTSPLAT25:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT24]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
81; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
82; CHECK:       [[VECTOR_BODY]]:
83; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
84; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
85; CHECK-NEXT:    [[TMP59:%.*]] = getelementptr i16, ptr [[A]], <vscale x 8 x i64> [[VEC_IND]]
86; CHECK-NEXT:    call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP59]], i32 2, <vscale x 8 x i1> splat (i1 true)), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
87; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP52]]
88; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT25]]
89; CHECK-NEXT:    [[TMP60:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
90; CHECK-NEXT:    br i1 [[TMP60]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
91; CHECK:       [[MIDDLE_BLOCK]]:
92; CHECK-NEXT:    br label %[[SCALAR_PH]]
93; CHECK:       [[SCALAR_PH]]:
94; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ [[X_I64]], %[[VECTOR_MEMCHECK]] ], [ [[X_I64]], %[[ENTRY]] ]
95; CHECK-NEXT:    [[BC_RESUME_VAL13:%.*]] = phi i32 [ [[IND_END22]], %[[MIDDLE_BLOCK]] ], [ [[X_I32]], %[[VECTOR_MEMCHECK]] ], [ [[X_I32]], %[[ENTRY]] ]
96; CHECK-NEXT:    br label %[[LOOP:.*]]
97; CHECK:       [[LOOP]]:
98; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
99; CHECK-NEXT:    [[IV_CONV:%.*]] = phi i32 [ [[BC_RESUME_VAL13]], %[[SCALAR_PH]] ], [ [[TMP64:%.*]], %[[LOOP]] ]
100; CHECK-NEXT:    [[GEP_I64:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
101; CHECK-NEXT:    [[TMP61:%.*]] = load i64, ptr [[GEP_I64]], align 8
102; CHECK-NEXT:    [[TMP62:%.*]] = sext i32 [[IV_CONV]] to i64
103; CHECK-NEXT:    [[GEP_CONV:%.*]] = getelementptr i64, ptr [[INVARIANT_GEP]], i64 [[TMP62]]
104; CHECK-NEXT:    [[TMP63:%.*]] = load i64, ptr [[GEP_CONV]], align 8
105; CHECK-NEXT:    [[GEP_I16:%.*]] = getelementptr i16, ptr [[A]], i64 [[IV]]
106; CHECK-NEXT:    store i16 0, ptr [[GEP_I16]], align 2
107; CHECK-NEXT:    [[IV_NEXT]] = add i64 [[IV]], 3
108; CHECK-NEXT:    [[TMP64]] = trunc i64 [[IV_NEXT]] to i32
109; CHECK-NEXT:    [[C:%.*]] = icmp slt i64 [[IV]], 99
110; CHECK-NEXT:    br i1 [[C]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP9:![0-9]+]]
111; CHECK:       [[EXIT]]:
112; CHECK-NEXT:    ret void
113;
114entry:
115  %x.i32 = sext i16 %x to i32
116  %x.i64 = sext i16 %x to i64
117  %invariant.gep = getelementptr i8, ptr %A, i64 -8
118  br label %loop
119
120loop:
121  %iv = phi i64 [ %x.i64, %entry ], [ %iv.next, %loop ]
122  %iv.conv = phi i32 [ %x.i32, %entry ], [ %5, %loop ]
123  %gep.i64 = getelementptr i64, ptr %A, i64 %iv
124  %2 = load i64, ptr %gep.i64, align 8
125  %3 = sext i32 %iv.conv to i64
126  %gep.conv = getelementptr i64, ptr %invariant.gep, i64 %3
127  %4 = load i64, ptr %gep.conv, align 8
128  %gep.i16 = getelementptr i16, ptr %A, i64 %iv
129  store i16 0, ptr %gep.i16, align 2
130  %iv.next = add i64 %iv, 3
131  %5 = trunc i64 %iv.next to i32
132  %c = icmp slt i64 %iv, 99
133  br i1 %c, label %loop, label %exit
134
135exit:
136  ret void
137}
138
139attributes #0 = { "target-features"="+64bit,+v,+zvl256b" }
140;.
141; CHECK: [[META0]] = !{[[META1:![0-9]+]]}
142; CHECK: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]}
143; CHECK: [[META2]] = distinct !{[[META2]], !"LVerDomain"}
144; CHECK: [[META3]] = !{[[META4:![0-9]+]], [[META5:![0-9]+]]}
145; CHECK: [[META4]] = distinct !{[[META4]], [[META2]]}
146; CHECK: [[META5]] = distinct !{[[META5]], [[META2]]}
147; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META7:![0-9]+]], [[META8:![0-9]+]]}
148; CHECK: [[META7]] = !{!"llvm.loop.isvectorized", i32 1}
149; CHECK: [[META8]] = !{!"llvm.loop.unroll.runtime.disable"}
150; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META7]]}
151;.
152