xref: /llvm-project/llvm/test/Transforms/LoopVectorize/runtime-check-needed-but-empty.ll (revision f3df87d57e096143670e0fd396e81d43393a2dd2)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -loop-vectorize -force-vector-width=4 -S %s | FileCheck %s
3
4define void @test(float* %A, i32 %x) {
5; CHECK-LABEL: @test(
6; CHECK-NEXT:  entry:
7; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
8; CHECK:       vector.scevcheck:
9; CHECK-NEXT:    [[IDENT_CHECK:%.*]] = icmp ne i32 [[X:%.*]], 1
10; CHECK-NEXT:    [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 undef)
11; CHECK-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
12; CHECK-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
13; CHECK-NEXT:    [[TMP0:%.*]] = add i32 0, [[MUL_RESULT]]
14; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 0, [[MUL_RESULT]]
15; CHECK-NEXT:    [[TMP2:%.*]] = icmp ugt i32 [[TMP1]], 0
16; CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i32 [[TMP0]], 0
17; CHECK-NEXT:    [[TMP4:%.*]] = or i1 [[TMP3]], [[MUL_OVERFLOW]]
18; CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[IDENT_CHECK]], [[TMP4]]
19; CHECK-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 undef)
20; CHECK-NEXT:    [[MUL_RESULT2:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
21; CHECK-NEXT:    [[MUL_OVERFLOW3:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
22; CHECK-NEXT:    [[TMP6:%.*]] = add i32 1, [[MUL_RESULT2]]
23; CHECK-NEXT:    [[TMP7:%.*]] = sub i32 1, [[MUL_RESULT2]]
24; CHECK-NEXT:    [[TMP8:%.*]] = icmp ugt i32 [[TMP7]], 1
25; CHECK-NEXT:    [[TMP9:%.*]] = icmp ult i32 [[TMP6]], 1
26; CHECK-NEXT:    [[TMP10:%.*]] = or i1 [[TMP9]], [[MUL_OVERFLOW3]]
27; CHECK-NEXT:    [[TMP11:%.*]] = or i1 [[TMP5]], [[TMP10]]
28; CHECK-NEXT:    br i1 [[TMP11]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
29; CHECK:       vector.ph:
30; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
31; CHECK:       vector.body:
32; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
33; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[INDEX]], 0
34; CHECK-NEXT:    [[TMP13:%.*]] = add nuw nsw i64 [[TMP12]], 1
35; CHECK-NEXT:    [[TMP14:%.*]] = trunc i64 [[TMP13]] to i32
36; CHECK-NEXT:    [[TMP15:%.*]] = mul i32 [[TMP14]], [[X]]
37; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
38; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[TMP16]]
39; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds float, float* [[TMP17]], i32 0
40; CHECK-NEXT:    [[TMP19:%.*]] = bitcast float* [[TMP18]] to <4 x float>*
41; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x float>, <4 x float>* [[TMP19]], align 4
42; CHECK-NEXT:    [[TMP20:%.*]] = trunc i64 [[INDEX]] to i32
43; CHECK-NEXT:    [[TMP21:%.*]] = add i32 [[TMP20]], 0
44; CHECK-NEXT:    [[TMP22:%.*]] = mul i32 [[TMP21]], [[X]]
45; CHECK-NEXT:    [[TMP23:%.*]] = zext i32 [[TMP22]] to i64
46; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP23]]
47; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds float, float* [[TMP24]], i32 0
48; CHECK-NEXT:    [[TMP26:%.*]] = bitcast float* [[TMP25]] to <4 x float>*
49; CHECK-NEXT:    store <4 x float> [[WIDE_LOAD]], <4 x float>* [[TMP26]], align 4
50; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
51; CHECK-NEXT:    [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], undef
52; CHECK-NEXT:    br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
53; CHECK:       middle.block:
54; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 undef, undef
55; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
56; CHECK:       scalar.ph:
57; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ undef, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
58; CHECK-NEXT:    br label [[LOOP:%.*]]
59; CHECK:       loop:
60; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
61; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
62; CHECK-NEXT:    [[T_IV_NEXT:%.*]] = trunc i64 [[IV_NEXT]] to i32
63; CHECK-NEXT:    [[MUL_IV_NEXT:%.*]] = mul i32 [[T_IV_NEXT]], [[X]]
64; CHECK-NEXT:    [[IDX_1:%.*]] = zext i32 [[MUL_IV_NEXT]] to i64
65; CHECK-NEXT:    [[ARRAYIDX1215:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[IDX_1]]
66; CHECK-NEXT:    [[LV:%.*]] = load float, float* [[ARRAYIDX1215]], align 4
67; CHECK-NEXT:    [[T_IV:%.*]] = trunc i64 [[IV]] to i32
68; CHECK-NEXT:    [[MUL_IV:%.*]] = mul i32 [[T_IV]], [[X]]
69; CHECK-NEXT:    [[IDX_2:%.*]] = zext i32 [[MUL_IV]] to i64
70; CHECK-NEXT:    [[ARRAYIDX1209:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[IDX_2]]
71; CHECK-NEXT:    store float [[LV]], float* [[ARRAYIDX1209]], align 4
72; CHECK-NEXT:    [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], undef
73; CHECK-NEXT:    br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP2:![0-9]+]]
74; CHECK:       exit:
75; CHECK-NEXT:    ret void
76;
77entry:
78  br label %loop
79
80loop:                                     ; preds = %loop, %entry
81  %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
82  %iv.next = add nuw nsw i64 %iv, 1
83  %t.iv.next = trunc i64 %iv.next to i32
84  %mul.iv.next = mul i32 %t.iv.next, %x
85  %idx.1 = zext i32 %mul.iv.next to i64
86  %arrayidx1215 = getelementptr inbounds float, float* %A, i64 %idx.1
87  %lv = load float, float* %arrayidx1215, align 4
88
89  %t.iv = trunc i64 %iv to i32
90  %mul.iv = mul i32 %t.iv, %x
91  %idx.2 = zext i32 %mul.iv to i64
92  %arrayidx1209 = getelementptr inbounds float, float* %A, i64 %idx.2
93  store float %lv, float* %arrayidx1209, align 4
94  %ec = icmp eq i64 %iv.next, undef
95  br i1 %ec, label %exit, label %loop
96
97exit:                             ; preds = %loop
98  ret void
99}
100