1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 2; RUN: opt -p loop-vectorize -S %s | FileCheck %s 3 4target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128" 5target triple = "riscv64-unknown-linux-gnu" 6 7define void @block_with_dead_inst_1(ptr %src, i64 %N) #0 { 8; CHECK-LABEL: define void @block_with_dead_inst_1( 9; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { 10; CHECK-NEXT: [[ENTRY:.*]]: 11; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -3 12; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 3 13; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1 14; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() 15; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 8 16; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], [[TMP4]] 17; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] 18; CHECK: [[VECTOR_PH]]: 19; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() 20; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8 21; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]] 22; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 23; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i64 [[TMP6]], i64 [[N_MOD_VF]] 24; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP8]] 25; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() 26; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 8 27; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3 28; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64() 29; CHECK-NEXT: [[TMP13:%.*]] = mul <vscale x 8 x i64> [[TMP11]], splat (i64 3) 30; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP13]] 31; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP10]] 32; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0 33; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer 34; CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() 35; CHECK-NEXT: [[TMP18:%.*]] = mul i32 [[TMP17]], 8 36; CHECK-NEXT: [[TMP19:%.*]] = sub i32 [[TMP18]], 1 37; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 1, i32 [[TMP19]] 38; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] 39; CHECK: [[VECTOR_BODY]]: 40; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] 41; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] 42; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 8 x i16> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ zeroinitializer, %[[VECTOR_BODY]] ] 43; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]] 44; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP20]], i32 2, <vscale x 8 x i1> splat (i1 true)) 45; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] 46; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]] 47; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 48; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] 49; CHECK: [[MIDDLE_BLOCK]]: 50; CHECK-NEXT: [[TMP22:%.*]] = call i32 @llvm.vscale.i32() 51; CHECK-NEXT: [[TMP23:%.*]] = mul i32 [[TMP22]], 8 52; CHECK-NEXT: [[TMP24:%.*]] = sub i32 [[TMP23]], 1 53; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <vscale x 8 x i16> zeroinitializer, i32 [[TMP24]] 54; CHECK-NEXT: br label %[[SCALAR_PH]] 55; CHECK: [[SCALAR_PH]]: 56; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 57; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ] 58; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] 59; CHECK: [[LOOP_HEADER]]: 60; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] 61; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] 62; CHECK-NEXT: [[XOR]] = xor i16 0, 0 63; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] 64; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 65; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 66; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] 67; CHECK: [[THEN]]: 68; CHECK-NEXT: [[DEAD_GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] 69; CHECK-NEXT: br label %[[LOOP_LATCH]] 70; CHECK: [[LOOP_LATCH]]: 71; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 72; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 73; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] 74; CHECK-NEXT: br i1 [[TMP25]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]] 75; CHECK: [[EXIT]]: 76; CHECK-NEXT: ret void 77; 78entry: 79 br label %loop.header 80 81loop.header: ; preds = %cond.end7, %entry 82 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] 83 %xor1315 = phi i16 [ 1, %entry ], [ %xor, %loop.latch ] 84 %xor = xor i16 0, 0 85 %gep = getelementptr i16, ptr %src, i64 %iv 86 %l = load i16, ptr %gep, align 2 87 %c = icmp eq i16 %l, 0 88 br i1 %c, label %then, label %loop.latch 89 90then: 91 %dead.gep = getelementptr i64, ptr %src, i64 %iv 92 br label %loop.latch 93 94loop.latch: 95 store i16 %xor, ptr %gep 96 %iv.next = add nsw i64 %iv, 3 97 %1 = icmp eq i64 %iv.next, %N 98 br i1 %1, label %exit, label %loop.header 99 100exit: 101 ret void 102} 103 104define void @block_with_dead_inst_2(ptr %src) #0 { 105; CHECK-LABEL: define void @block_with_dead_inst_2( 106; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0]] { 107; CHECK-NEXT: [[ENTRY:.*]]: 108; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() 109; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 110; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 333, [[TMP1]] 111; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] 112; CHECK: [[VECTOR_PH]]: 113; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() 114; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 115; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 333, [[TMP3]] 116; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 117; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]] 118; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 333, [[TMP5]] 119; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() 120; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4 121; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3 122; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() 123; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP8]], splat (i64 3) 124; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]] 125; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]] 126; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0 127; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer 128; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() 129; CHECK-NEXT: [[TMP15:%.*]] = mul i32 [[TMP14]], 4 130; CHECK-NEXT: [[TMP16:%.*]] = sub i32 [[TMP15]], 1 131; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 4 x i16> poison, i16 0, i32 [[TMP16]] 132; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] 133; CHECK: [[VECTOR_BODY]]: 134; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] 135; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] 136; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i16> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ zeroinitializer, %[[VECTOR_BODY]] ] 137; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 4 x i64> [[VEC_IND]] 138; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> zeroinitializer, <vscale x 4 x ptr> [[TMP17]], i32 2, <vscale x 4 x i1> splat (i1 true)) 139; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] 140; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] 141; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 142; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] 143; CHECK: [[MIDDLE_BLOCK]]: 144; CHECK-NEXT: [[TMP19:%.*]] = call i32 @llvm.vscale.i32() 145; CHECK-NEXT: [[TMP20:%.*]] = mul i32 [[TMP19]], 4 146; CHECK-NEXT: [[TMP21:%.*]] = sub i32 [[TMP20]], 1 147; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <vscale x 4 x i16> zeroinitializer, i32 [[TMP21]] 148; CHECK-NEXT: br label %[[SCALAR_PH]] 149; CHECK: [[SCALAR_PH]]: 150; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 151; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 152; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] 153; CHECK: [[LOOP_HEADER]]: 154; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] 155; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] 156; CHECK-NEXT: [[XOR]] = xor i16 0, 0 157; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] 158; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 159; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 160; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[ELSE:.*]] 161; CHECK: [[ELSE]]: 162; CHECK-NEXT: [[DEAD_GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] 163; CHECK-NEXT: br label %[[LOOP_LATCH]] 164; CHECK: [[LOOP_LATCH]]: 165; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 166; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 167; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 168; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP5:![0-9]+]] 169; CHECK: [[EXIT]]: 170; CHECK-NEXT: ret void 171; 172entry: 173 br label %loop.header 174 175loop.header: ; preds = %cond.end7, %entry 176 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] 177 %xor1315 = phi i16 [ 0, %entry ], [ %xor, %loop.latch ] 178 %xor = xor i16 0, 0 179 %gep = getelementptr i16, ptr %src, i64 %iv 180 %l = load i16, ptr %gep, align 2 181 %c = icmp eq i16 %l, 0 182 br i1 %c, label %loop.latch, label %else 183 184else: 185 %dead.gep = getelementptr i64, ptr %src, i64 %iv 186 br label %loop.latch 187 188loop.latch: 189 store i16 %xor, ptr %gep 190 %iv.next = add nsw i64 %iv, 3 191 %ec = icmp eq i64 %iv.next, 1000 192 br i1 %ec, label %exit, label %loop.header 193 194exit: 195 ret void 196} 197 198define void @multiple_blocks_with_dead_insts_3(ptr %src) #0 { 199; CHECK-LABEL: define void @multiple_blocks_with_dead_insts_3( 200; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0]] { 201; CHECK-NEXT: [[ENTRY:.*]]: 202; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() 203; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 204; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 333, [[TMP1]] 205; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] 206; CHECK: [[VECTOR_PH]]: 207; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() 208; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 209; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 333, [[TMP3]] 210; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 211; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]] 212; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 333, [[TMP5]] 213; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() 214; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4 215; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3 216; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() 217; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP8]], splat (i64 3) 218; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]] 219; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]] 220; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0 221; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer 222; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() 223; CHECK-NEXT: [[TMP15:%.*]] = mul i32 [[TMP14]], 4 224; CHECK-NEXT: [[TMP16:%.*]] = sub i32 [[TMP15]], 1 225; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 4 x i16> poison, i16 0, i32 [[TMP16]] 226; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] 227; CHECK: [[VECTOR_BODY]]: 228; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] 229; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] 230; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i16> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ zeroinitializer, %[[VECTOR_BODY]] ] 231; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 4 x i64> [[VEC_IND]] 232; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> zeroinitializer, <vscale x 4 x ptr> [[TMP17]], i32 2, <vscale x 4 x i1> splat (i1 true)) 233; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] 234; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] 235; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 236; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] 237; CHECK: [[MIDDLE_BLOCK]]: 238; CHECK-NEXT: [[TMP19:%.*]] = call i32 @llvm.vscale.i32() 239; CHECK-NEXT: [[TMP20:%.*]] = mul i32 [[TMP19]], 4 240; CHECK-NEXT: [[TMP21:%.*]] = sub i32 [[TMP20]], 1 241; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <vscale x 4 x i16> zeroinitializer, i32 [[TMP21]] 242; CHECK-NEXT: br label %[[SCALAR_PH]] 243; CHECK: [[SCALAR_PH]]: 244; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 245; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 246; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] 247; CHECK: [[LOOP_HEADER]]: 248; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] 249; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] 250; CHECK-NEXT: [[XOR]] = xor i16 0, 0 251; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] 252; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 253; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 254; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[ELSE:.*]] 255; CHECK: [[THEN]]: 256; CHECK-NEXT: [[DEAD_GEP_1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] 257; CHECK-NEXT: br label %[[LOOP_LATCH]] 258; CHECK: [[ELSE]]: 259; CHECK-NEXT: [[DEAD_GEP_2:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] 260; CHECK-NEXT: br label %[[LOOP_LATCH]] 261; CHECK: [[LOOP_LATCH]]: 262; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 263; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 264; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 265; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP7:![0-9]+]] 266; CHECK: [[EXIT]]: 267; CHECK-NEXT: ret void 268; 269entry: 270 br label %loop.header 271 272loop.header: ; preds = %cond.end7, %entry 273 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] 274 %xor1315 = phi i16 [ 0, %entry ], [ %xor, %loop.latch ] 275 %xor = xor i16 0, 0 276 %gep = getelementptr i16, ptr %src, i64 %iv 277 %l = load i16, ptr %gep, align 2 278 %c = icmp eq i16 %l, 0 279 br i1 %c, label %then, label %else 280 281then: 282 %dead.gep.1 = getelementptr i64, ptr %src, i64 %iv 283 br label %loop.latch 284 285else: 286 %dead.gep.2 = getelementptr i64, ptr %src, i64 %iv 287 br label %loop.latch 288 289loop.latch: 290 store i16 %xor, ptr %gep 291 %iv.next = add nsw i64 %iv, 3 292 %ec = icmp eq i64 %iv.next, 1000 293 br i1 %ec, label %exit, label %loop.header 294 295exit: 296 ret void 297} 298 299define void @multiple_blocks_with_dead_insts_4(ptr %src, i64 %N) #0 { 300; CHECK-LABEL: define void @multiple_blocks_with_dead_insts_4( 301; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { 302; CHECK-NEXT: [[ENTRY:.*]]: 303; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -3 304; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 3 305; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1 306; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() 307; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 8 308; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], [[TMP4]] 309; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] 310; CHECK: [[VECTOR_PH]]: 311; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() 312; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8 313; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]] 314; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 315; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i64 [[TMP6]], i64 [[N_MOD_VF]] 316; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP8]] 317; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() 318; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 8 319; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3 320; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64() 321; CHECK-NEXT: [[TMP13:%.*]] = mul <vscale x 8 x i64> [[TMP11]], splat (i64 3) 322; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP13]] 323; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP10]] 324; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0 325; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer 326; CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() 327; CHECK-NEXT: [[TMP18:%.*]] = mul i32 [[TMP17]], 8 328; CHECK-NEXT: [[TMP19:%.*]] = sub i32 [[TMP18]], 1 329; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 1, i32 [[TMP19]] 330; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] 331; CHECK: [[VECTOR_BODY]]: 332; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] 333; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] 334; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 8 x i16> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ zeroinitializer, %[[VECTOR_BODY]] ] 335; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]] 336; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP20]], i32 2, <vscale x 8 x i1> splat (i1 true)) 337; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] 338; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]] 339; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 340; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] 341; CHECK: [[MIDDLE_BLOCK]]: 342; CHECK-NEXT: [[TMP22:%.*]] = call i32 @llvm.vscale.i32() 343; CHECK-NEXT: [[TMP23:%.*]] = mul i32 [[TMP22]], 8 344; CHECK-NEXT: [[TMP24:%.*]] = sub i32 [[TMP23]], 1 345; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <vscale x 8 x i16> zeroinitializer, i32 [[TMP24]] 346; CHECK-NEXT: br label %[[SCALAR_PH]] 347; CHECK: [[SCALAR_PH]]: 348; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 349; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ] 350; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] 351; CHECK: [[LOOP_HEADER]]: 352; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] 353; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] 354; CHECK-NEXT: [[XOR]] = xor i16 0, 0 355; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] 356; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 357; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 358; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[ELSE:.*]] 359; CHECK: [[THEN]]: 360; CHECK-NEXT: br label %[[THEN_1:.*]] 361; CHECK: [[THEN_1]]: 362; CHECK-NEXT: [[DEAD_GEP_1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] 363; CHECK-NEXT: br label %[[LOOP_LATCH]] 364; CHECK: [[ELSE]]: 365; CHECK-NEXT: [[DEAD_GEP_2:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] 366; CHECK-NEXT: br label %[[LOOP_LATCH]] 367; CHECK: [[LOOP_LATCH]]: 368; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 369; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 370; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] 371; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP9:![0-9]+]] 372; CHECK: [[EXIT]]: 373; CHECK-NEXT: ret void 374; 375entry: 376 br label %loop.header 377 378loop.header: ; preds = %cond.end7, %entry 379 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] 380 %xor1315 = phi i16 [ 1, %entry ], [ %xor, %loop.latch ] 381 %xor = xor i16 0, 0 382 %gep = getelementptr i16, ptr %src, i64 %iv 383 %l = load i16, ptr %gep, align 2 384 %c = icmp eq i16 %l, 0 385 br i1 %c, label %then, label %else 386 387then: 388 br label %then.1 389 390then.1: 391 %dead.gep.1 = getelementptr i64, ptr %src, i64 %iv 392 br label %loop.latch 393 394else: 395 %dead.gep.2 = getelementptr i64, ptr %src, i64 %iv 396 br label %loop.latch 397 398loop.latch: 399 store i16 %xor, ptr %gep 400 %iv.next = add nsw i64 %iv, 3 401 %ec = icmp eq i64 %iv.next, %N 402 br i1 %ec, label %exit, label %loop.header 403 404exit: 405 ret void 406} 407 408define void @multiple_blocks_with_dead_inst_multiple_successors_5(ptr %src) #0 { 409; CHECK-LABEL: define void @multiple_blocks_with_dead_inst_multiple_successors_5( 410; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0]] { 411; CHECK-NEXT: [[ENTRY:.*]]: 412; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() 413; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 414; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 333, [[TMP1]] 415; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] 416; CHECK: [[VECTOR_PH]]: 417; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() 418; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 419; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 333, [[TMP3]] 420; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 421; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]] 422; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 333, [[TMP5]] 423; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() 424; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4 425; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3 426; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() 427; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP8]], splat (i64 3) 428; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]] 429; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]] 430; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0 431; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer 432; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() 433; CHECK-NEXT: [[TMP15:%.*]] = mul i32 [[TMP14]], 4 434; CHECK-NEXT: [[TMP16:%.*]] = sub i32 [[TMP15]], 1 435; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 4 x i16> poison, i16 1, i32 [[TMP16]] 436; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] 437; CHECK: [[VECTOR_BODY]]: 438; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] 439; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] 440; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i16> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ zeroinitializer, %[[VECTOR_BODY]] ] 441; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 4 x i64> [[VEC_IND]] 442; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> zeroinitializer, <vscale x 4 x ptr> [[TMP17]], i32 2, <vscale x 4 x i1> splat (i1 true)) 443; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] 444; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] 445; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 446; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] 447; CHECK: [[MIDDLE_BLOCK]]: 448; CHECK-NEXT: [[TMP19:%.*]] = call i32 @llvm.vscale.i32() 449; CHECK-NEXT: [[TMP20:%.*]] = mul i32 [[TMP19]], 4 450; CHECK-NEXT: [[TMP21:%.*]] = sub i32 [[TMP20]], 1 451; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <vscale x 4 x i16> zeroinitializer, i32 [[TMP21]] 452; CHECK-NEXT: br label %[[SCALAR_PH]] 453; CHECK: [[SCALAR_PH]]: 454; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 455; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ] 456; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] 457; CHECK: [[LOOP_HEADER]]: 458; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] 459; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] 460; CHECK-NEXT: [[XOR]] = xor i16 0, 0 461; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] 462; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 463; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 464; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[ELSE:.*]] 465; CHECK: [[THEN]]: 466; CHECK-NEXT: br label %[[THEN_1:.*]] 467; CHECK: [[THEN_1]]: 468; CHECK-NEXT: [[DEAD_GEP_1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] 469; CHECK-NEXT: br label %[[LOOP_LATCH]] 470; CHECK: [[ELSE]]: 471; CHECK-NEXT: br label %[[ELSE_2:.*]] 472; CHECK: [[ELSE_2]]: 473; CHECK-NEXT: [[DEAD_GEP_2:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] 474; CHECK-NEXT: br label %[[LOOP_LATCH]] 475; CHECK: [[LOOP_LATCH]]: 476; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 477; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 478; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 479; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP11:![0-9]+]] 480; CHECK: [[EXIT]]: 481; CHECK-NEXT: ret void 482; 483entry: 484 br label %loop.header 485 486loop.header: ; preds = %cond.end7, %entry 487 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] 488 %xor1315 = phi i16 [ 1, %entry ], [ %xor, %loop.latch ] 489 %xor = xor i16 0, 0 490 %gep = getelementptr i16, ptr %src, i64 %iv 491 %l = load i16, ptr %gep, align 2 492 %c = icmp eq i16 %l, 0 493 br i1 %c, label %then, label %else 494 495then: 496 br label %then.1 497 498then.1: 499 %dead.gep.1 = getelementptr i64, ptr %src, i64 %iv 500 br label %loop.latch 501 502else: 503 br label %else.2 504 505else.2: 506 %dead.gep.2 = getelementptr i64, ptr %src, i64 %iv 507 br label %loop.latch 508 509loop.latch: 510 store i16 %xor, ptr %gep 511 %iv.next = add nsw i64 %iv, 3 512 %ec = icmp eq i64 %iv.next, 1000 513 br i1 %ec, label %exit, label %loop.header 514 515exit: 516 ret void 517} 518 519define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 %ic, i64 %N) #0 { 520; CHECK-LABEL: define void @multiple_blocks_with_dead_inst_multiple_successors_6( 521; CHECK-SAME: ptr [[SRC:%.*]], i1 [[IC:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { 522; CHECK-NEXT: [[ENTRY:.*]]: 523; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -3 524; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 3 525; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1 526; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() 527; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 8 528; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], [[TMP4]] 529; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] 530; CHECK: [[VECTOR_PH]]: 531; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() 532; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8 533; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]] 534; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 535; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i64 [[TMP6]], i64 [[N_MOD_VF]] 536; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP8]] 537; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() 538; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 8 539; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3 540; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64() 541; CHECK-NEXT: [[TMP13:%.*]] = mul <vscale x 8 x i64> [[TMP11]], splat (i64 3) 542; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP13]] 543; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP10]] 544; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0 545; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer 546; CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() 547; CHECK-NEXT: [[TMP18:%.*]] = mul i32 [[TMP17]], 8 548; CHECK-NEXT: [[TMP19:%.*]] = sub i32 [[TMP18]], 1 549; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 1, i32 [[TMP19]] 550; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] 551; CHECK: [[VECTOR_BODY]]: 552; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] 553; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] 554; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 8 x i16> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ zeroinitializer, %[[VECTOR_BODY]] ] 555; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]] 556; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP20]], i32 2, <vscale x 8 x i1> splat (i1 true)) 557; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] 558; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]] 559; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 560; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] 561; CHECK: [[MIDDLE_BLOCK]]: 562; CHECK-NEXT: [[TMP22:%.*]] = call i32 @llvm.vscale.i32() 563; CHECK-NEXT: [[TMP23:%.*]] = mul i32 [[TMP22]], 8 564; CHECK-NEXT: [[TMP24:%.*]] = sub i32 [[TMP23]], 1 565; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <vscale x 8 x i16> zeroinitializer, i32 [[TMP24]] 566; CHECK-NEXT: br label %[[SCALAR_PH]] 567; CHECK: [[SCALAR_PH]]: 568; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 569; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ] 570; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] 571; CHECK: [[LOOP_HEADER]]: 572; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] 573; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] 574; CHECK-NEXT: [[XOR]] = xor i16 0, 0 575; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] 576; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 577; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 578; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[ELSE:.*]] 579; CHECK: [[THEN]]: 580; CHECK-NEXT: br i1 [[IC]], label %[[THEN_1:.*]], label %[[ELSE]] 581; CHECK: [[THEN_1]]: 582; CHECK-NEXT: [[DEAD_GEP_1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] 583; CHECK-NEXT: br label %[[LOOP_LATCH]] 584; CHECK: [[ELSE]]: 585; CHECK-NEXT: br label %[[ELSE_2:.*]] 586; CHECK: [[ELSE_2]]: 587; CHECK-NEXT: [[DEAD_GEP_2:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] 588; CHECK-NEXT: br label %[[LOOP_LATCH]] 589; CHECK: [[LOOP_LATCH]]: 590; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 591; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 592; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] 593; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP13:![0-9]+]] 594; CHECK: [[EXIT]]: 595; CHECK-NEXT: ret void 596; 597entry: 598 br label %loop.header 599 600loop.header: ; preds = %cond.end7, %entry 601 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] 602 %xor1315 = phi i16 [ 1, %entry ], [ %xor, %loop.latch ] 603 %xor = xor i16 0, 0 604 %gep = getelementptr i16, ptr %src, i64 %iv 605 %l = load i16, ptr %gep, align 2 606 %c = icmp eq i16 %l, 0 607 br i1 %c, label %then, label %else 608 609then: 610 br i1 %ic, label %then.1, label %else 611 612then.1: 613 %dead.gep.1 = getelementptr i64, ptr %src, i64 %iv 614 br label %loop.latch 615 616else: 617 br label %else.2 618 619else.2: 620 %dead.gep.2 = getelementptr i64, ptr %src, i64 %iv 621 br label %loop.latch 622 623loop.latch: 624 store i16 %xor, ptr %gep 625 %iv.next = add nsw i64 %iv, 3 626 %ec = icmp eq i64 %iv.next, %N 627 br i1 %ec, label %exit, label %loop.header 628 629exit: 630 ret void 631} 632 633define void @empty_block_with_phi_1(ptr %src, i64 %N) #0 { 634; CHECK-LABEL: define void @empty_block_with_phi_1( 635; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { 636; CHECK-NEXT: [[ENTRY:.*]]: 637; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() 638; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 8 639; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] 640; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] 641; CHECK: [[VECTOR_PH]]: 642; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() 643; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 8 644; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] 645; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] 646; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() 647; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8 648; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() 649; CHECK-NEXT: [[TMP7:%.*]] = mul i32 [[TMP6]], 8 650; CHECK-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1 651; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 8 x i32> poison, i32 1, i32 [[TMP8]] 652; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] 653; CHECK: [[VECTOR_BODY]]: 654; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] 655; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 8 x i32> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ zeroinitializer, %[[VECTOR_BODY]] ] 656; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 0 657; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[TMP9]] 658; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i16, ptr [[TMP10]], i32 0 659; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP11]], align 2 660; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <vscale x 8 x i16> [[WIDE_LOAD]], zeroinitializer 661; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 8 x i1> [[TMP12]], <vscale x 8 x i16> splat (i16 99), <vscale x 8 x i16> [[WIDE_LOAD]] 662; CHECK-NEXT: store <vscale x 8 x i16> [[PREDPHI]], ptr [[TMP11]], align 2 663; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] 664; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 665; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] 666; CHECK: [[MIDDLE_BLOCK]]: 667; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() 668; CHECK-NEXT: [[TMP15:%.*]] = mul i32 [[TMP14]], 8 669; CHECK-NEXT: [[TMP16:%.*]] = sub i32 [[TMP15]], 1 670; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <vscale x 8 x i32> zeroinitializer, i32 [[TMP16]] 671; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] 672; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] 673; CHECK: [[SCALAR_PH]]: 674; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 675; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ] 676; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] 677; CHECK: [[LOOP_HEADER]]: 678; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] 679; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] 680; CHECK-NEXT: [[XOR]] = xor i32 0, 0 681; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] 682; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 683; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 684; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] 685; CHECK: [[THEN]]: 686; CHECK-NEXT: br label %[[LOOP_LATCH]] 687; CHECK: [[LOOP_LATCH]]: 688; CHECK-NEXT: [[P:%.*]] = phi i16 [ [[L]], %[[LOOP_HEADER]] ], [ 99, %[[THEN]] ] 689; CHECK-NEXT: store i16 [[P]], ptr [[GEP]], align 2 690; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 691; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] 692; CHECK-NEXT: br i1 [[TMP17]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP15:![0-9]+]] 693; CHECK: [[EXIT]]: 694; CHECK-NEXT: ret void 695; 696entry: 697 br label %loop.header 698 699loop.header: ; preds = %cond.end7, %entry 700 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] 701 %xor1315 = phi i32 [ 1, %entry ], [ %xor, %loop.latch ] 702 %xor = xor i32 0, 0 703 %gep = getelementptr i16, ptr %src, i64 %iv 704 %l = load i16, ptr %gep, align 2 705 %c = icmp eq i16 %l, 0 706 br i1 %c, label %then, label %loop.latch 707 708then: 709 br label %loop.latch 710 711loop.latch: 712 %p = phi i16 [ %l, %loop.header ], [ 99, %then ] 713 store i16 %p, ptr %gep 714 %iv.next = add nsw i64 %iv, 1 715 %1 = icmp eq i64 %iv.next, %N 716 br i1 %1, label %exit, label %loop.header 717 718exit: 719 ret void 720} 721 722define void @empty_block_with_phi_2(ptr %src, i64 %N) #0 { 723; CHECK-LABEL: define void @empty_block_with_phi_2( 724; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { 725; CHECK-NEXT: [[ENTRY:.*]]: 726; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() 727; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 8 728; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] 729; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] 730; CHECK: [[VECTOR_PH]]: 731; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() 732; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 8 733; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] 734; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] 735; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() 736; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8 737; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() 738; CHECK-NEXT: [[TMP7:%.*]] = mul i32 [[TMP6]], 8 739; CHECK-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1 740; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 8 x i32> poison, i32 1, i32 [[TMP8]] 741; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] 742; CHECK: [[VECTOR_BODY]]: 743; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] 744; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 8 x i32> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ zeroinitializer, %[[VECTOR_BODY]] ] 745; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 0 746; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[TMP9]] 747; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i16, ptr [[TMP10]], i32 0 748; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP11]], align 2 749; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <vscale x 8 x i16> [[WIDE_LOAD]], zeroinitializer 750; CHECK-NEXT: [[TMP13:%.*]] = xor <vscale x 8 x i1> [[TMP12]], splat (i1 true) 751; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 8 x i1> [[TMP13]], <vscale x 8 x i16> splat (i16 99), <vscale x 8 x i16> [[WIDE_LOAD]] 752; CHECK-NEXT: store <vscale x 8 x i16> [[PREDPHI]], ptr [[TMP11]], align 2 753; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] 754; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 755; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] 756; CHECK: [[MIDDLE_BLOCK]]: 757; CHECK-NEXT: [[TMP15:%.*]] = call i32 @llvm.vscale.i32() 758; CHECK-NEXT: [[TMP16:%.*]] = mul i32 [[TMP15]], 8 759; CHECK-NEXT: [[TMP17:%.*]] = sub i32 [[TMP16]], 1 760; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <vscale x 8 x i32> zeroinitializer, i32 [[TMP17]] 761; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] 762; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] 763; CHECK: [[SCALAR_PH]]: 764; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 765; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ] 766; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] 767; CHECK: [[LOOP_HEADER]]: 768; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] 769; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] 770; CHECK-NEXT: [[XOR]] = xor i32 0, 0 771; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] 772; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 773; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 774; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[ELSE:.*]] 775; CHECK: [[ELSE]]: 776; CHECK-NEXT: br label %[[LOOP_LATCH]] 777; CHECK: [[LOOP_LATCH]]: 778; CHECK-NEXT: [[P:%.*]] = phi i16 [ [[L]], %[[LOOP_HEADER]] ], [ 99, %[[ELSE]] ] 779; CHECK-NEXT: store i16 [[P]], ptr [[GEP]], align 2 780; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 781; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] 782; CHECK-NEXT: br i1 [[TMP18]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP17:![0-9]+]] 783; CHECK: [[EXIT]]: 784; CHECK-NEXT: ret void 785; 786entry: 787 br label %loop.header 788 789loop.header: ; preds = %cond.end7, %entry 790 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] 791 %xor1315 = phi i32 [ 1, %entry ], [ %xor, %loop.latch ] 792 %xor = xor i32 0, 0 793 %gep = getelementptr i16, ptr %src, i64 %iv 794 %l = load i16, ptr %gep, align 2 795 %c = icmp eq i16 %l, 0 796 br i1 %c, label %loop.latch, label %else 797 798else: 799 br label %loop.latch 800 801loop.latch: 802 %p = phi i16 [ %l, %loop.header ], [ 99, %else ] 803 store i16 %p, ptr %gep 804 %iv.next = add nsw i64 %iv, 1 805 %1 = icmp eq i64 %iv.next, %N 806 br i1 %1, label %exit, label %loop.header 807 808exit: 809 ret void 810} 811 812; Test case for https://github.com/llvm/llvm-project/issues/100591. 813define void @dead_load_in_block(ptr %dst, ptr %src, i8 %N, i64 %x) #0 { 814; CHECK-LABEL: define void @dead_load_in_block( 815; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i8 [[N:%.*]], i64 [[X:%.*]]) #[[ATTR0]] { 816; CHECK-NEXT: [[ENTRY:.*]]: 817; CHECK-NEXT: [[N_EXT:%.*]] = zext i8 [[N]] to i64 818; CHECK-NEXT: [[UMIN7:%.*]] = call i64 @llvm.umin.i64(i64 [[N_EXT]], i64 1) 819; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[N_EXT]], [[UMIN7]] 820; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 3 821; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[UMIN7]], [[TMP1]] 822; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], 1 823; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() 824; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2 825; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.umax.i64(i64 40, i64 [[TMP5]]) 826; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP6]] 827; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] 828; CHECK: [[VECTOR_MEMCHECK]]: 829; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[N_EXT]], i64 1) 830; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N_EXT]], [[UMIN]] 831; CHECK-NEXT: [[TMP8:%.*]] = udiv i64 [[TMP7]], 3 832; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[UMIN]], [[TMP8]] 833; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 12 834; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 4 835; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP11]] 836; CHECK-NEXT: [[TMP12:%.*]] = shl i64 [[X]], 2 837; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP12]] 838; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], 4 839; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP13]] 840; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[SRC]], i64 4 841; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP2]] 842; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP1]], [[SCEVGEP]] 843; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] 844; CHECK-NEXT: [[BOUND04:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP3]] 845; CHECK-NEXT: [[BOUND15:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] 846; CHECK-NEXT: [[FOUND_CONFLICT6:%.*]] = and i1 [[BOUND04]], [[BOUND15]] 847; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT6]] 848; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] 849; CHECK: [[VECTOR_PH]]: 850; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() 851; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 2 852; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP15]] 853; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] 854; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() 855; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 2 856; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3 857; CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() 858; CHECK-NEXT: [[TMP20:%.*]] = mul <vscale x 2 x i64> [[TMP18]], splat (i64 3) 859; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP20]] 860; CHECK-NEXT: [[TMP23:%.*]] = mul i64 3, [[TMP17]] 861; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP23]], i64 0 862; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer 863; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] 864; CHECK: [[VECTOR_BODY]]: 865; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] 866; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] 867; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 2 x i64> [[VEC_IND]] 868; CHECK-NEXT: call void @llvm.masked.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32> zeroinitializer, <vscale x 2 x ptr> [[TMP24]], i32 4, <vscale x 2 x i1> splat (i1 true)), !alias.scope [[META18:![0-9]+]], !noalias [[META21:![0-9]+]] 869; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP17]] 870; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]] 871; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 872; CHECK-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] 873; CHECK: [[MIDDLE_BLOCK]]: 874; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] 875; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] 876; CHECK: [[SCALAR_PH]]: 877; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_MEMCHECK]] ], [ 0, %[[ENTRY]] ] 878; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] 879; CHECK: [[LOOP_HEADER]]: 880; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] 881; CHECK-NEXT: [[L_0:%.*]] = load i32, ptr [[SRC]], align 4 882; CHECK-NEXT: [[C_0:%.*]] = icmp eq i32 [[L_0]], 0 883; CHECK-NEXT: br i1 [[C_0]], label %[[LOOP_LATCH]], label %[[THEN:.*]] 884; CHECK: [[THEN]]: 885; CHECK-NEXT: [[GEP_SRC_X:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[X]] 886; CHECK-NEXT: [[L_DEAD:%.*]] = load i32, ptr [[GEP_SRC_X]], align 4 887; CHECK-NEXT: br label %[[LOOP_LATCH]] 888; CHECK: [[LOOP_LATCH]]: 889; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr i32, ptr [[DST]], i64 [[IV]] 890; CHECK-NEXT: store i32 0, ptr [[GEP_DST]], align 4 891; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 3 892; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV]], [[N_EXT]] 893; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP25:![0-9]+]] 894; CHECK: [[EXIT]]: 895; CHECK-NEXT: ret void 896; 897entry: 898 %N.ext = zext i8 %N to i64 899 br label %loop.header 900 901loop.header: 902 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] 903 %l.0 = load i32, ptr %src, align 4 904 %c.0 = icmp eq i32 %l.0, 0 905 br i1 %c.0, label %loop.latch , label %then 906 907then: 908 %gep.src.x = getelementptr i32, ptr %src, i64 %x 909 %l.dead = load i32, ptr %gep.src.x, align 4 910 br label %loop.latch 911 912loop.latch: 913 %gep.dst = getelementptr i32, ptr %dst, i64 %iv 914 store i32 0, ptr %gep.dst, align 4 915 %iv.next = add i64 %iv, 3 916 %cmp = icmp ult i64 %iv, %N.ext 917 br i1 %cmp, label %loop.header, label %exit 918 919exit: 920 ret void 921} 922 923attributes #0 = { "target-features"="+64bit,+v" } 924;. 925; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} 926; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} 927; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} 928; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} 929; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} 930; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]} 931; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} 932; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]} 933; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} 934; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]} 935; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]} 936; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META2]], [[META1]]} 937; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]} 938; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META2]], [[META1]]} 939; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]} 940; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META2]], [[META1]]} 941; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]} 942; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META2]], [[META1]]} 943; CHECK: [[META18]] = !{[[META19:![0-9]+]]} 944; CHECK: [[META19]] = distinct !{[[META19]], [[META20:![0-9]+]]} 945; CHECK: [[META20]] = distinct !{[[META20]], !"LVerDomain"} 946; CHECK: [[META21]] = !{[[META22:![0-9]+]], [[META23:![0-9]+]]} 947; CHECK: [[META22]] = distinct !{[[META22]], [[META20]]} 948; CHECK: [[META23]] = distinct !{[[META23]], [[META20]]} 949; CHECK: [[LOOP24]] = distinct !{[[LOOP24]], [[META1]], [[META2]]} 950; CHECK: [[LOOP25]] = distinct !{[[LOOP25]], [[META1]]} 951;. 952