xref: /llvm-project/llvm/test/Transforms/LoopVectorize/X86/vplan-native-inner-loop-only.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
2; RUN: opt -p loop-vectorize -enable-vplan-native-path -force-vector-width=4 -force-vector-interleave=1 -S %s | FileCheck %s
3; RUN: opt -p loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S %s | FileCheck %s
4
5target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
6target triple = "x86_64-unknown-linux-gnu"
7
8; -enable-vplan-native-path should not impact codegen for inner loops.
9
10define void @test(ptr %A) {
11; CHECK-LABEL: define void @test(
12; CHECK-SAME: ptr [[A:%.*]]) {
13; CHECK-NEXT:  entry:
14; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
15; CHECK:       vector.ph:
16; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
17; CHECK:       vector.body:
18; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
19; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
20; CHECK-NEXT:    [[TMP1:%.*]] = add i64 [[INDEX]], 1
21; CHECK-NEXT:    [[TMP2:%.*]] = add i64 [[INDEX]], 2
22; CHECK-NEXT:    [[TMP3:%.*]] = add i64 [[INDEX]], 3
23; CHECK-NEXT:    [[TMP4:%.*]] = shl nsw i64 [[TMP0]], 1
24; CHECK-NEXT:    [[TMP5:%.*]] = shl nsw i64 [[TMP1]], 1
25; CHECK-NEXT:    [[TMP6:%.*]] = shl nsw i64 [[TMP2]], 1
26; CHECK-NEXT:    [[TMP7:%.*]] = shl nsw i64 [[TMP3]], 1
27; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP4]]
28; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP5]]
29; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP6]]
30; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP7]]
31; CHECK-NEXT:    [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP8]], align 4
32; CHECK-NEXT:    [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
33; CHECK-NEXT:    [[TMP13:%.*]] = add <4 x i32> [[STRIDED_VEC]], splat (i32 2)
34; CHECK-NEXT:    [[TMP14:%.*]] = extractelement <4 x i32> [[TMP13]], i32 0
35; CHECK-NEXT:    store i32 [[TMP14]], ptr [[TMP8]], align 4
36; CHECK-NEXT:    [[TMP15:%.*]] = extractelement <4 x i32> [[TMP13]], i32 1
37; CHECK-NEXT:    store i32 [[TMP15]], ptr [[TMP9]], align 4
38; CHECK-NEXT:    [[TMP16:%.*]] = extractelement <4 x i32> [[TMP13]], i32 2
39; CHECK-NEXT:    store i32 [[TMP16]], ptr [[TMP10]], align 4
40; CHECK-NEXT:    [[TMP17:%.*]] = extractelement <4 x i32> [[TMP13]], i32 3
41; CHECK-NEXT:    store i32 [[TMP17]], ptr [[TMP11]], align 4
42; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
43; CHECK-NEXT:    [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96
44; CHECK-NEXT:    br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
45; CHECK:       middle.block:
46; CHECK-NEXT:    br label [[SCALAR_PH]]
47; CHECK:       scalar.ph:
48; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 96, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
49; CHECK-NEXT:    br label [[LOOP:%.*]]
50; CHECK:       loop:
51; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
52; CHECK-NEXT:    [[MUL:%.*]] = shl nsw i64 [[IV]], 1
53; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL]]
54; CHECK-NEXT:    [[L:%.*]] = load i32, ptr [[GEP]], align 4
55; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[L]], 2
56; CHECK-NEXT:    store i32 [[ADD]], ptr [[GEP]], align 4
57; CHECK-NEXT:    [[IV_NEXT]] = add nsw i64 [[IV]], 1
58; CHECK-NEXT:    [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100
59; CHECK-NEXT:    br i1 [[EC]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
60; CHECK:       exit:
61; CHECK-NEXT:    ret void
62;
63entry:
64  br label %loop
65
66loop:
67  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
68  %mul = shl nsw i64 %iv, 1
69  %gep = getelementptr inbounds i32, ptr %A, i64 %mul
70  %l = load i32, ptr %gep, align 4
71  %add = add i32 %l, 2
72  store i32 %add, ptr %gep
73  %iv.next = add nsw i64 %iv, 1
74  %ec = icmp eq i64 %iv.next, 100
75  br i1 %ec, label %exit, label %loop
76
77exit:
78  ret void
79}
80;.
81; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
82; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
83; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
84; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
85;.
86