1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -S | FileCheck --check-prefixes=CHECK,DEFAULT %s 3; RUN: opt < %s -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -lv-strided-pointer-ivs=true -S | FileCheck --check-prefixes=CHECK,STRIDED %s 4target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" 5 6 7; Function Attrs: nofree norecurse nounwind 8define void @a(ptr readnone %b) { 9; CHECK-LABEL: @a( 10; CHECK-NEXT: entry: 11; CHECK-NEXT: [[B1:%.*]] = ptrtoint ptr [[B:%.*]] to i64 12; CHECK-NEXT: [[CMP_NOT4:%.*]] = icmp eq ptr [[B]], null 13; CHECK-NEXT: br i1 [[CMP_NOT4]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY_PREHEADER:%.*]] 14; CHECK: for.body.preheader: 15; CHECK-NEXT: [[TMP0:%.*]] = sub i64 0, [[B1]] 16; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 4 17; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 18; CHECK: vector.ph: 19; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 4 20; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] 21; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[N_VEC]], -1 22; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr null, i64 [[TMP1]] 23; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 24; CHECK: vector.body: 25; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE10:%.*]] ] 26; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 0, [[INDEX]] 27; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 0 28; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr null, i64 [[TMP2]] 29; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i64 -1 30; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0 31; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 -3 32; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP5]], align 1 33; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i8> [[WIDE_LOAD]], <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0> 34; CHECK-NEXT: [[TMP6:%.*]] = icmp eq <4 x i8> [[REVERSE]], zeroinitializer 35; CHECK-NEXT: [[TMP7:%.*]] = xor <4 x i1> [[TMP6]], splat (i1 true) 36; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP7]], i32 0 37; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] 38; CHECK: pred.store.if: 39; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i64 -1 40; CHECK-NEXT: store i8 95, ptr [[TMP9]], align 1 41; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]] 42; CHECK: pred.store.continue: 43; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP7]], i32 1 44; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6:%.*]] 45; CHECK: pred.store.if5: 46; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], -1 47; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr null, i64 [[TMP11]] 48; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP2]], i64 -1 49; CHECK-NEXT: store i8 95, ptr [[TMP12]], align 1 50; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]] 51; CHECK: pred.store.continue6: 52; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[TMP7]], i32 2 53; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8:%.*]] 54; CHECK: pred.store.if7: 55; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX]], -2 56; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr null, i64 [[TMP14]] 57; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP3]], i64 -1 58; CHECK-NEXT: store i8 95, ptr [[TMP15]], align 1 59; CHECK-NEXT: br label [[PRED_STORE_CONTINUE8]] 60; CHECK: pred.store.continue8: 61; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP7]], i32 3 62; CHECK-NEXT: br i1 [[TMP16]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10]] 63; CHECK: pred.store.if9: 64; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[OFFSET_IDX]], -3 65; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr null, i64 [[TMP17]] 66; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP4]], i64 -1 67; CHECK-NEXT: store i8 95, ptr [[TMP18]], align 1 68; CHECK-NEXT: br label [[PRED_STORE_CONTINUE10]] 69; CHECK: pred.store.continue10: 70; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 71; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 72; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] 73; CHECK: middle.block: 74; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] 75; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]] 76; CHECK: scalar.ph: 77; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ null, [[FOR_BODY_PREHEADER]] ] 78; CHECK-NEXT: br label [[FOR_BODY:%.*]] 79; CHECK: for.cond.cleanup.loopexit: 80; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] 81; CHECK: for.cond.cleanup: 82; CHECK-NEXT: ret void 83; CHECK: for.body: 84; CHECK-NEXT: [[C_05:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] 85; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[C_05]], i64 -1 86; CHECK-NEXT: [[TMP20:%.*]] = load i8, ptr [[INCDEC_PTR]], align 1 87; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i8 [[TMP20]], 0 88; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[IF_END]], label [[IF_THEN:%.*]] 89; CHECK: if.then: 90; CHECK-NEXT: store i8 95, ptr [[INCDEC_PTR]], align 1 91; CHECK-NEXT: br label [[IF_END]] 92; CHECK: if.end: 93; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq ptr [[INCDEC_PTR]], [[B]] 94; CHECK-NEXT: br i1 [[CMP_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] 95; 96 97entry: 98 %cmp.not4 = icmp eq ptr %b, null 99 br i1 %cmp.not4, label %for.cond.cleanup, label %for.body.preheader 100 101for.body.preheader: ; preds = %entry 102 br label %for.body 103 104for.cond.cleanup.loopexit: ; preds = %if.end 105 br label %for.cond.cleanup 106 107for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry 108 ret void 109 110for.body: ; preds = %for.body.preheader, %if.end 111 %c.05 = phi ptr [ %incdec.ptr, %if.end ], [ null, %for.body.preheader ] 112 %incdec.ptr = getelementptr inbounds i8, ptr %c.05, i64 -1 113 %0 = load i8, ptr %incdec.ptr, align 1 114 %tobool.not = icmp eq i8 %0, 0 115 br i1 %tobool.not, label %if.end, label %if.then 116 117if.then: ; preds = %for.body 118 store i8 95, ptr %incdec.ptr, align 1 119 br label %if.end 120 121if.end: ; preds = %for.body, %if.then 122 %cmp.not = icmp eq ptr %incdec.ptr, %b 123 br i1 %cmp.not, label %for.cond.cleanup.loopexit, label %for.body 124} 125 126; In the test below the pointer phi %ptr.iv.2 is used as 127; 1. As a uniform address for the load, and 128; 2. Non-uniform use by the getelementptr which is stored. This requires the 129; vector value. 130define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias %start.2, i64 %N) { 131; CHECK-LABEL: @pointer_induction_used_as_vector( 132; CHECK-NEXT: entry: 133; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4 134; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 135; CHECK: vector.ph: 136; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4 137; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] 138; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[N_VEC]], 8 139; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START_1:%.*]], i64 [[TMP0]] 140; CHECK-NEXT: [[IND_END2:%.*]] = getelementptr i8, ptr [[START_2:%.*]], i64 [[N_VEC]] 141; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 142; CHECK: vector.body: 143; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START_2]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] 144; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 145; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> <i64 0, i64 1, i64 2, i64 3> 146; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 147; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 0 148; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START_1]], i64 [[TMP1]] 149; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, <4 x ptr> [[VECTOR_GEP]], i64 1 150; CHECK-NEXT: [[TMP3:%.*]] = getelementptr ptr, ptr [[NEXT_GEP]], i32 0 151; CHECK-NEXT: store <4 x ptr> [[TMP2]], ptr [[TMP3]], align 8 152; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x ptr> [[VECTOR_GEP]], i32 0 153; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i32 0 154; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP5]], align 1 155; CHECK-NEXT: [[TMP6:%.*]] = add <4 x i8> [[WIDE_LOAD]], splat (i8 1) 156; CHECK-NEXT: store <4 x i8> [[TMP6]], ptr [[TMP5]], align 1 157; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 158; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 4 159; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 160; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] 161; CHECK: middle.block: 162; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] 163; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] 164; CHECK: scalar.ph: 165; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] 166; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[START_1]], [[ENTRY]] ] 167; CHECK-NEXT: [[BC_RESUME_VAL3:%.*]] = phi ptr [ [[IND_END2]], [[MIDDLE_BLOCK]] ], [ [[START_2]], [[ENTRY]] ] 168; CHECK-NEXT: br label [[LOOP_BODY:%.*]] 169; CHECK: loop.body: 170; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_BODY]] ] 171; CHECK-NEXT: [[PTR_IV_1:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[PTR_IV_1_NEXT:%.*]], [[LOOP_BODY]] ] 172; CHECK-NEXT: [[PTR_IV_2:%.*]] = phi ptr [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ], [ [[PTR_IV_2_NEXT:%.*]], [[LOOP_BODY]] ] 173; CHECK-NEXT: [[PTR_IV_1_NEXT]] = getelementptr inbounds ptr, ptr [[PTR_IV_1]], i64 1 174; CHECK-NEXT: [[PTR_IV_2_NEXT]] = getelementptr inbounds i8, ptr [[PTR_IV_2]], i64 1 175; CHECK-NEXT: store ptr [[PTR_IV_2_NEXT]], ptr [[PTR_IV_1]], align 8 176; CHECK-NEXT: [[LV:%.*]] = load i8, ptr [[PTR_IV_2]], align 1 177; CHECK-NEXT: [[ADD:%.*]] = add i8 [[LV]], 1 178; CHECK-NEXT: store i8 [[ADD]], ptr [[PTR_IV_2]], align 1 179; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1 180; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[IV_NEXT]], [[N]] 181; CHECK-NEXT: br i1 [[C]], label [[LOOP_BODY]], label [[EXIT]], !llvm.loop [[LOOP5:![0-9]+]] 182; CHECK: exit: 183; CHECK-NEXT: ret void 184; 185 186entry: 187 br label %loop.body 188 189loop.body: ; preds = %loop.body, %entry 190 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.body ] 191 %ptr.iv.1 = phi ptr [ %start.1, %entry ], [ %ptr.iv.1.next, %loop.body ] 192 %ptr.iv.2 = phi ptr [ %start.2, %entry ], [ %ptr.iv.2.next, %loop.body ] 193 %ptr.iv.1.next = getelementptr inbounds ptr, ptr %ptr.iv.1, i64 1 194 %ptr.iv.2.next = getelementptr inbounds i8, ptr %ptr.iv.2, i64 1 195 store ptr %ptr.iv.2.next, ptr %ptr.iv.1, align 8 196 %lv = load i8, ptr %ptr.iv.2, align 1 197 %add = add i8 %lv, 1 198 store i8 %add, ptr %ptr.iv.2, align 1 199 %iv.next = add nuw i64 %iv, 1 200 %c = icmp ne i64 %iv.next, %N 201 br i1 %c, label %loop.body, label %exit 202 203exit: ; preds = %loop.body 204 ret void 205} 206 207; Test the vector expansion of a non-constant stride pointer IV 208define void @non_constant_vector_expansion(i32 %0, ptr %call) { 209; DEFAULT-LABEL: @non_constant_vector_expansion( 210; DEFAULT-NEXT: entry: 211; DEFAULT-NEXT: [[MUL:%.*]] = shl i32 [[TMP0:%.*]], 1 212; DEFAULT-NEXT: br label [[FOR_COND:%.*]] 213; DEFAULT: for.cond: 214; DEFAULT-NEXT: [[TMP1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_COND]] ] 215; DEFAULT-NEXT: [[P_0:%.*]] = phi ptr [ null, [[ENTRY]] ], [ [[ADD_PTR:%.*]], [[FOR_COND]] ] 216; DEFAULT-NEXT: [[ADD_PTR]] = getelementptr i8, ptr [[P_0]], i32 [[MUL]] 217; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr ptr, ptr [[CALL:%.*]], i32 [[TMP1]] 218; DEFAULT-NEXT: store ptr [[P_0]], ptr [[ARRAYIDX]], align 4 219; DEFAULT-NEXT: [[INC]] = add i32 [[TMP1]], 1 220; DEFAULT-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP1]], 100 221; DEFAULT-NEXT: br i1 [[TOBOOL_NOT]], label [[FOR_END:%.*]], label [[FOR_COND]] 222; DEFAULT: for.end: 223; DEFAULT-NEXT: ret void 224; 225; STRIDED-LABEL: @non_constant_vector_expansion( 226; STRIDED-NEXT: entry: 227; STRIDED-NEXT: [[MUL:%.*]] = shl i32 [[TMP0:%.*]], 1 228; STRIDED-NEXT: [[TMP1:%.*]] = sext i32 [[MUL]] to i64 229; STRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 230; STRIDED: vector.ph: 231; STRIDED-NEXT: [[TMP2:%.*]] = mul i64 100, [[TMP1]] 232; STRIDED-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr null, i64 [[TMP2]] 233; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]] 234; STRIDED: vector.body: 235; STRIDED-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ null, [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] 236; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 237; STRIDED-NEXT: [[TMP3:%.*]] = mul i64 [[TMP1]], 4 238; STRIDED-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP1]], i64 0 239; STRIDED-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i64> [[DOTSPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer 240; STRIDED-NEXT: [[TMP4:%.*]] = mul <4 x i64> <i64 0, i64 1, i64 2, i64 3>, [[DOTSPLAT]] 241; STRIDED-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> [[TMP4]] 242; STRIDED-NEXT: [[OFFSET_IDX:%.*]] = trunc i64 [[INDEX]] to i32 243; STRIDED-NEXT: [[TMP5:%.*]] = add i32 [[OFFSET_IDX]], 0 244; STRIDED-NEXT: [[TMP6:%.*]] = getelementptr ptr, ptr [[CALL:%.*]], i32 [[TMP5]] 245; STRIDED-NEXT: [[TMP7:%.*]] = getelementptr ptr, ptr [[TMP6]], i32 0 246; STRIDED-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP7]], align 4 247; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 248; STRIDED-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP3]] 249; STRIDED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 250; STRIDED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] 251; STRIDED: middle.block: 252; STRIDED-NEXT: br i1 false, label [[FOR_END:%.*]], label [[SCALAR_PH]] 253; STRIDED: scalar.ph: 254; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 100, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] 255; STRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ null, [[ENTRY]] ] 256; STRIDED-NEXT: br label [[FOR_COND:%.*]] 257; STRIDED: for.cond: 258; STRIDED-NEXT: [[TMP9:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_COND]] ] 259; STRIDED-NEXT: [[P_0:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[ADD_PTR:%.*]], [[FOR_COND]] ] 260; STRIDED-NEXT: [[ADD_PTR]] = getelementptr i8, ptr [[P_0]], i32 [[MUL]] 261; STRIDED-NEXT: [[ARRAYIDX:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP9]] 262; STRIDED-NEXT: store ptr [[P_0]], ptr [[ARRAYIDX]], align 4 263; STRIDED-NEXT: [[INC]] = add i32 [[TMP9]], 1 264; STRIDED-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP9]], 100 265; STRIDED-NEXT: br i1 [[TOBOOL_NOT]], label [[FOR_END]], label [[FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] 266; STRIDED: for.end: 267; STRIDED-NEXT: ret void 268; 269entry: 270 %mul = shl i32 %0, 1 271 br label %for.cond 272 273for.cond: ; preds = %for.body, %entry 274 %1 = phi i32 [ 0, %entry ], [ %inc, %for.cond ] 275 %p.0 = phi ptr [ null, %entry ], [ %add.ptr, %for.cond ] 276 %add.ptr = getelementptr i8, ptr %p.0, i32 %mul 277 %arrayidx = getelementptr ptr, ptr %call, i32 %1 278 store ptr %p.0, ptr %arrayidx, align 4 279 %inc = add i32 %1, 1 280 %tobool.not = icmp eq i32 %1, 100 281 br i1 %tobool.not, label %for.end, label %for.cond 282 283 284for.end: ; preds = %for.cond 285 ret void 286} 287