1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -passes=loop-vectorize -mtriple=x86_64-unknown-linux-gnu -S < %s | FileCheck %s 3 4; The test checks that there is no assert caused by issue described in PR35432 5 6target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 7target triple = "x86_64-unknown-linux-gnu" 8 9@a = common local_unnamed_addr global [192 x [192 x i32]] zeroinitializer, align 16 10 11define i32 @main(ptr %ptr) { 12; CHECK-LABEL: @main( 13; CHECK-NEXT: entry: 14; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 15; CHECK-NEXT: [[S:%.*]] = alloca i16, align 2 16; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I]]) 17; CHECK-NEXT: store i32 0, ptr [[I]], align 4 18; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr nonnull [[S]]) 19; CHECK-NEXT: [[CALL:%.*]] = call i32 (ptr, ...) @goo(ptr nonnull [[I]]) 20; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4 21; CHECK-NEXT: [[STOREMERGE6:%.*]] = trunc i32 [[TMP0]] to i16 22; CHECK-NEXT: store i16 [[STOREMERGE6]], ptr [[S]], align 2 23; CHECK-NEXT: [[CONV17:%.*]] = and i32 [[TMP0]], 65472 24; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[CONV17]], 0 25; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END12:%.*]] 26; CHECK: for.body.lr.ph: 27; CHECK-NEXT: br label [[FOR_BODY:%.*]] 28; CHECK: for.body: 29; CHECK-NEXT: [[STOREMERGE_IN9:%.*]] = phi i32 [ [[TMP0]], [[FOR_BODY_LR_PH]] ], [ [[ADD:%.*]], [[FOR_INC9:%.*]] ] 30; CHECK-NEXT: [[CONV52:%.*]] = and i32 [[STOREMERGE_IN9]], 255 31; CHECK-NEXT: [[CMP63:%.*]] = icmp ult i32 [[TMP0]], [[CONV52]] 32; CHECK-NEXT: br i1 [[CMP63]], label [[FOR_BODY8_LR_PH:%.*]], label [[FOR_INC9]] 33; CHECK: for.body8.lr.ph: 34; CHECK-NEXT: [[CONV3:%.*]] = trunc i32 [[STOREMERGE_IN9]] to i8 35; CHECK-NEXT: [[DOTPROMOTED:%.*]] = load i32, ptr @a, align 16 36; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[CONV3]], -1 37; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP1]] to i32 38; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], 1 39; CHECK-NEXT: [[UMIN1:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP0]], i32 [[TMP2]]) 40; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[TMP3]], [[UMIN1]] 41; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP4]], 40 42; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] 43; CHECK: vector.scevcheck: 44; CHECK-NEXT: [[TMP5:%.*]] = add i8 [[CONV3]], -1 45; CHECK-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i32 46; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP0]], i32 [[TMP6]]) 47; CHECK-NEXT: [[TMP7:%.*]] = sub i32 [[TMP6]], [[UMIN]] 48; CHECK-NEXT: [[TMP8:%.*]] = trunc i32 [[TMP7]] to i8 49; CHECK-NEXT: [[MUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 1, i8 [[TMP8]]) 50; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i8, i1 } [[MUL]], 0 51; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i8, i1 } [[MUL]], 1 52; CHECK-NEXT: [[TMP9:%.*]] = sub i8 [[TMP5]], [[MUL_RESULT]] 53; CHECK-NEXT: [[TMP10:%.*]] = icmp ugt i8 [[TMP9]], [[TMP5]] 54; CHECK-NEXT: [[TMP11:%.*]] = or i1 [[TMP10]], [[MUL_OVERFLOW]] 55; CHECK-NEXT: [[TMP12:%.*]] = icmp ugt i32 [[TMP7]], 255 56; CHECK-NEXT: [[TMP13:%.*]] = or i1 [[TMP11]], [[TMP12]] 57; CHECK-NEXT: [[TMP14:%.*]] = add i32 [[DOTPROMOTED]], 1 58; CHECK-NEXT: [[TMP15:%.*]] = add i32 [[TMP14]], [[TMP7]] 59; CHECK-NEXT: [[TMP16:%.*]] = icmp slt i32 [[TMP15]], [[TMP14]] 60; CHECK-NEXT: [[TMP17:%.*]] = or i1 [[TMP13]], [[TMP16]] 61; CHECK-NEXT: br i1 [[TMP17]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] 62; CHECK: vector.ph: 63; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP4]], 8 64; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP4]], [[N_MOD_VF]] 65; CHECK-NEXT: [[IND_END:%.*]] = add i32 [[DOTPROMOTED]], [[N_VEC]] 66; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i8 67; CHECK-NEXT: [[IND_END2:%.*]] = sub i8 [[CONV3]], [[DOTCAST]] 68; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 69; CHECK: vector.body: 70; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 71; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[DOTPROMOTED]], [[INDEX]] 72; CHECK-NEXT: [[TMP18:%.*]] = add i32 [[OFFSET_IDX]], 0 73; CHECK-NEXT: [[TMP20:%.*]] = add i32 [[TMP18]], 1 74; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i32 [[TMP20]] 75; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[TMP22]], i32 0 76; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[TMP22]], i32 4 77; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP24]], align 4 78; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP25]], align 4 79; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 80; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] 81; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] 82; CHECK: middle.block: 83; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP4]], [[N_VEC]] 84; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND4_FOR_INC9_CRIT_EDGE:%.*]], label [[SCALAR_PH]] 85; CHECK: scalar.ph: 86; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[DOTPROMOTED]], [[VECTOR_SCEVCHECK]] ], [ [[DOTPROMOTED]], [[FOR_BODY8_LR_PH]] ] 87; CHECK-NEXT: [[BC_RESUME_VAL3:%.*]] = phi i8 [ [[IND_END2]], [[MIDDLE_BLOCK]] ], [ [[CONV3]], [[VECTOR_SCEVCHECK]] ], [ [[CONV3]], [[FOR_BODY8_LR_PH]] ] 88; CHECK-NEXT: br label [[FOR_BODY8:%.*]] 89; CHECK: for.body8: 90; CHECK-NEXT: [[INC5:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY8]] ] 91; CHECK-NEXT: [[C_04:%.*]] = phi i8 [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ], [ [[DEC:%.*]], [[FOR_BODY8]] ] 92; CHECK-NEXT: [[INC]] = add i32 [[INC5]], 1 93; CHECK-NEXT: [[DEC]] = add i8 [[C_04]], -1 94; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i32 [[INC]] 95; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4 96; CHECK-NEXT: [[CONV5:%.*]] = zext i8 [[DEC]] to i32 97; CHECK-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP0]], [[CONV5]] 98; CHECK-NEXT: br i1 [[CMP6]], label [[FOR_BODY8]], label [[FOR_COND4_FOR_INC9_CRIT_EDGE]], !llvm.loop [[LOOP3:![0-9]+]] 99; CHECK: for.cond4.for.inc9_crit_edge: 100; CHECK-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[INC]], [[FOR_BODY8]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ] 101; CHECK-NEXT: store i32 [[INC_LCSSA]], ptr @a, align 16 102; CHECK-NEXT: br label [[FOR_INC9]] 103; CHECK: for.inc9: 104; CHECK-NEXT: [[CONV10:%.*]] = and i32 [[STOREMERGE_IN9]], 65535 105; CHECK-NEXT: [[ADD]] = add nuw nsw i32 [[CONV10]], 1 106; CHECK-NEXT: [[CONV1:%.*]] = and i32 [[ADD]], 65472 107; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[CONV1]], 0 108; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END12_CRIT_EDGE:%.*]] 109; CHECK: for.cond.for.end12_crit_edge: 110; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_INC9]] ] 111; CHECK-NEXT: [[STOREMERGE:%.*]] = trunc i32 [[ADD_LCSSA]] to i16 112; CHECK-NEXT: store i16 [[STOREMERGE]], ptr [[S]], align 2 113; CHECK-NEXT: br label [[FOR_END12]] 114; CHECK: for.end12: 115; CHECK-NEXT: [[CALL13:%.*]] = call i32 (ptr, ...) @foo(ptr nonnull [[S]]) 116; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr nonnull [[S]]) 117; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[I]]) 118; CHECK-NEXT: ret i32 0 119; 120entry: 121 %i = alloca i32, align 4 122 %s = alloca i16, align 2 123 call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i) #3 124 store i32 0, ptr %i, align 4 125 call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %s) #3 126 %call = call i32 (ptr, ...) @goo(ptr nonnull %i) #3 127 %0 = load i32, ptr %i, align 4 128 %storemerge6 = trunc i32 %0 to i16 129 store i16 %storemerge6, ptr %s, align 2 130 %conv17 = and i32 %0, 65472 131 %cmp8 = icmp eq i32 %conv17, 0 132 br i1 %cmp8, label %for.body.lr.ph, label %for.end12 133 134for.body.lr.ph: ; preds = %entry 135 br label %for.body 136 137for.body: ; preds = %for.body.lr.ph, %for.inc9 138 %storemerge.in9 = phi i32 [ %0, %for.body.lr.ph ], [ %add, %for.inc9 ] 139 %conv52 = and i32 %storemerge.in9, 255 140 %cmp63 = icmp ult i32 %0, %conv52 141 br i1 %cmp63, label %for.body8.lr.ph, label %for.inc9 142 143for.body8.lr.ph: ; preds = %for.body 144 %conv3 = trunc i32 %storemerge.in9 to i8 145 %.promoted = load i32, ptr @a, align 16 146 br label %for.body8 147 148for.body8: ; preds = %for.body8.lr.ph, %for.body8 149 %inc5 = phi i32 [ %.promoted, %for.body8.lr.ph ], [ %inc, %for.body8 ] 150 %c.04 = phi i8 [ %conv3, %for.body8.lr.ph ], [ %dec, %for.body8 ] 151 %inc = add i32 %inc5, 1 152 %dec = add i8 %c.04, -1 153 %gep = getelementptr inbounds i32, ptr %ptr, i32 %inc 154 store i32 0, ptr %gep 155 %conv5 = zext i8 %dec to i32 156 %cmp6 = icmp ult i32 %0, %conv5 157 br i1 %cmp6, label %for.body8, label %for.cond4.for.inc9_crit_edge 158 159for.cond4.for.inc9_crit_edge: ; preds = %for.body8 160 %inc.lcssa = phi i32 [ %inc, %for.body8 ] 161 store i32 %inc.lcssa, ptr @a, align 16 162 br label %for.inc9 163 164for.inc9: ; preds = %for.cond4.for.inc9_crit_edge, %for.body 165 %conv10 = and i32 %storemerge.in9, 65535 166 %add = add nuw nsw i32 %conv10, 1 167 %conv1 = and i32 %add, 65472 168 %cmp = icmp eq i32 %conv1, 0 169 br i1 %cmp, label %for.body, label %for.cond.for.end12_crit_edge 170 171for.cond.for.end12_crit_edge: ; preds = %for.inc9 172 %add.lcssa = phi i32 [ %add, %for.inc9 ] 173 %storemerge = trunc i32 %add.lcssa to i16 174 store i16 %storemerge, ptr %s, align 2 175 br label %for.end12 176 177for.end12: ; preds = %for.cond.for.end12_crit_edge, %entry 178 %call13 = call i32 (ptr, ...) @foo(ptr nonnull %s) #3 179 call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %s) #3 180 call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i) #3 181 ret i32 0 182} 183 184; Function Attrs: argmemonly nounwind 185declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 186 187declare i32 @goo(...) local_unnamed_addr #2 188 189declare i32 @foo(...) local_unnamed_addr #2 190 191; Function Attrs: argmemonly nounwind 192declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 193