xref: /llvm-project/llvm/test/Transforms/LoopVectorize/X86/multi-exit-cost.ll (revision 2c87133c6212d4bd02b5e64adbb51f4e66bc2351)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2; RUN: opt -p loop-vectorize -mtriple=x86_64-apple-macosx -mcpu=penryn -S %s | FileCheck %s
3
4define i64 @test_value_in_exit_compare_chain_used_outside(ptr %src, i64 %x, i64 range(i64 1, 32) %N) {
5; CHECK-LABEL: define i64 @test_value_in_exit_compare_chain_used_outside(
6; CHECK-SAME: ptr [[SRC:%.*]], i64 [[X:%.*]], i64 range(i64 1, 32) [[N:%.*]]) #[[ATTR0:[0-9]+]] {
7; CHECK-NEXT:  [[ENTRY:.*]]:
8; CHECK-NEXT:    [[TMP0:%.*]] = add nsw i64 [[N]], -1
9; CHECK-NEXT:    [[TMP1:%.*]] = freeze i64 [[TMP0]]
10; CHECK-NEXT:    [[UMIN2:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 [[X]])
11; CHECK-NEXT:    [[TMP2:%.*]] = add nuw nsw i64 [[UMIN2]], 1
12; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], 8
13; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
14; CHECK:       [[VECTOR_SCEVCHECK]]:
15; CHECK-NEXT:    [[TMP3:%.*]] = add nsw i64 [[N]], -1
16; CHECK-NEXT:    [[TMP4:%.*]] = freeze i64 [[TMP3]]
17; CHECK-NEXT:    [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[X]])
18; CHECK-NEXT:    [[TMP5:%.*]] = trunc i64 [[UMIN]] to i1
19; CHECK-NEXT:    [[TMP6:%.*]] = icmp ugt i64 [[UMIN]], 1
20; CHECK-NEXT:    [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
21; CHECK-NEXT:    br i1 [[TMP7]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
22; CHECK:       [[VECTOR_PH]]:
23; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 8
24; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
25; CHECK-NEXT:    [[TMP9:%.*]] = select i1 [[TMP8]], i64 8, i64 [[N_MOD_VF]]
26; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP9]]
27; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
28; CHECK:       [[VECTOR_BODY]]:
29; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
30; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <8 x i8> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP29:%.*]], %[[VECTOR_BODY]] ]
31; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[INDEX]], 0
32; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP10]], 1
33; CHECK-NEXT:    [[TMP26:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP18]]
34; CHECK-NEXT:    [[TMP27:%.*]] = getelementptr i8, ptr [[TMP26]], i32 0
35; CHECK-NEXT:    [[TMP28:%.*]] = getelementptr i8, ptr [[TMP27]], i32 -7
36; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP28]], align 1
37; CHECK-NEXT:    [[REVERSE:%.*]] = shufflevector <8 x i8> [[WIDE_LOAD]], <8 x i8> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
38; CHECK-NEXT:    [[TMP29]] = xor <8 x i8> [[REVERSE]], [[VEC_PHI]]
39; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
40; CHECK-NEXT:    [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
41; CHECK-NEXT:    br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
42; CHECK:       [[MIDDLE_BLOCK]]:
43; CHECK-NEXT:    [[TMP31:%.*]] = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> [[TMP29]])
44; CHECK-NEXT:    br label %[[SCALAR_PH]]
45; CHECK:       [[SCALAR_PH]]:
46; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[ENTRY]] ]
47; CHECK-NEXT:    [[BC_MERGE_RDX:%.*]] = phi i8 [ [[TMP31]], %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[ENTRY]] ]
48; CHECK-NEXT:    br label %[[LOOP_HEADER:.*]]
49; CHECK:       [[LOOP_HEADER]]:
50; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
51; CHECK-NEXT:    [[XOR_RED:%.*]] = phi i8 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[XOR_RED_NEXT:%.*]], %[[LOOP_LATCH]] ]
52; CHECK-NEXT:    [[IV_AND:%.*]] = and i64 [[IV]], 1
53; CHECK-NEXT:    [[X_INC:%.*]] = add i64 [[IV_AND]], [[X]]
54; CHECK-NEXT:    [[GEP_SRC:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV_AND]]
55; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i64 [[X_INC]], 0
56; CHECK-NEXT:    br i1 [[CMP]], label %[[EXIT_1:.*]], label %[[LOOP_LATCH]]
57; CHECK:       [[LOOP_LATCH]]:
58; CHECK-NEXT:    [[L:%.*]] = load i8, ptr [[GEP_SRC]], align 1
59; CHECK-NEXT:    [[XOR_RED_NEXT]] = xor i8 [[L]], [[XOR_RED]]
60; CHECK-NEXT:    [[IV_NEXT]] = add i64 [[IV]], 1
61; CHECK-NEXT:    [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
62; CHECK-NEXT:    br i1 [[EC]], label %[[EXIT_2:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]]
63; CHECK:       [[EXIT_1]]:
64; CHECK-NEXT:    [[X_INC_LCSSA:%.*]] = phi i64 [ [[X_INC]], %[[LOOP_HEADER]] ]
65; CHECK-NEXT:    ret i64 [[X_INC_LCSSA]]
66; CHECK:       [[EXIT_2]]:
67; CHECK-NEXT:    [[XOR_RED_NEXT_LCSSA:%.*]] = phi i8 [ [[XOR_RED_NEXT]], %[[LOOP_LATCH]] ]
68; CHECK-NEXT:    [[R:%.*]] = zext i8 [[XOR_RED_NEXT_LCSSA]] to i64
69; CHECK-NEXT:    ret i64 [[R]]
70;
71entry:
72  br label %loop.header
73
74loop.header:
75  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
76  %xor.red = phi i8 [ 0, %entry ], [ %xor.red.next, %loop.latch ]
77  %iv.and = and i64 %iv, 1
78  %x.inc = add i64 %iv.and, %x
79  %gep.src = getelementptr i8, ptr %src, i64 %iv.and
80  %cmp = icmp eq i64 %x.inc, 0
81  br i1 %cmp, label %exit.1, label %loop.latch
82
83loop.latch:
84  %l = load i8, ptr %gep.src, align 1
85  %xor.red.next  = xor i8 %l, %xor.red
86  %iv.next = add i64 %iv, 1
87  %ec = icmp eq i64 %iv.next, %N
88  br i1 %ec, label %exit.2, label %loop.header
89
90exit.1:
91  ret i64 %x.inc
92
93exit.2:
94  %r = zext i8 %xor.red.next to i64
95  ret i64 %r
96}
97;.
98; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
99; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
100; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
101; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
102;.
103