1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -loop-versioning -S < %s | FileCheck %s -check-prefix=LV 3 4target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" 5 6; For this loop: 7; unsigned index = 0; 8; for (int i = 0; i < n; i++) { 9; A[2 * index] = A[2 * index] + B[i]; 10; index++; 11; } 12; 13; SCEV is unable to prove that A[2 * i] does not overflow. 14; 15; Analyzing the IR does not help us because the GEPs are not 16; affine AddRecExprs. However, we can turn them into AddRecExprs 17; using SCEV Predicates. 18; 19; Once we have an affine expression we need to add an additional NUSW 20; to check that the pointers don't wrap since the GEPs are not 21; inbound. 22 23; The expression for %mul_ext as analyzed by SCEV is 24; (zext i32 {0,+,2}<%for.body> to i64) 25; We have added the nusw flag to turn this expression into the SCEV expression: 26; i64 {0,+,2}<%for.body> 27 28define void @f1(i16* noalias %a, 29; LV-LABEL: @f1( 30; LV-NEXT: for.body.lver.check: 31; LV-NEXT: [[A5:%.*]] = bitcast i16* [[A:%.*]] to i8* 32; LV-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1 33; LV-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32 34; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP1]]) 35; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 36; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1 37; LV-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 38; LV-NEXT: [[TMP8:%.*]] = or i1 false, [[TMP7]] 39; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP8]], [[MUL_OVERFLOW]] 40; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]]) 41; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0 42; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1 43; LV-NEXT: [[TMP11:%.*]] = sub i64 0, [[MUL_RESULT3]] 44; LV-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]] 45; LV-NEXT: [[TMP15:%.*]] = icmp ult i8* [[TMP12]], [[A5]] 46; LV-NEXT: [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]] 47; LV-NEXT: [[TMP18:%.*]] = or i1 [[TMP9]], [[TMP17]] 48; LV-NEXT: br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]] 49; LV: for.body.ph.lver.orig: 50; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]] 51; LV: for.body.lver.orig: 52; LV-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 53; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 54; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2 55; LV-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64 56; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]] 57; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2 58; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]] 59; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2 60; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]] 61; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2 62; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1 63; LV-NEXT: [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1 64; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]] 65; LV-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]] 66; LV: for.body.ph: 67; LV-NEXT: br label [[FOR_BODY:%.*]] 68; LV: for.body: 69; LV-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] 70; LV-NEXT: [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ] 71; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2 72; LV-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64 73; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]] 74; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2 75; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]] 76; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2 77; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]] 78; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2 79; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1 80; LV-NEXT: [[INC1]] = add i32 [[IND1]], 1 81; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]] 82; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]] 83; LV: for.end.loopexit: 84; LV-NEXT: br label [[FOR_END:%.*]] 85; LV: for.end.loopexit6: 86; LV-NEXT: br label [[FOR_END]] 87; LV: for.end: 88; LV-NEXT: ret void 89; 90 i16* noalias %b, i64 %N) { 91entry: 92 br label %for.body 93 94for.body: ; preds = %for.body, %entry 95 %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ] 96 %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ] 97 98 %mul = mul i32 %ind1, 2 99 %mul_ext = zext i32 %mul to i64 100 101 %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext 102 %loadA = load i16, i16* %arrayidxA, align 2 103 104 %arrayidxB = getelementptr i16, i16* %b, i64 %ind 105 %loadB = load i16, i16* %arrayidxB, align 2 106 107 %add = mul i16 %loadA, %loadB 108 109 store i16 %add, i16* %arrayidxA, align 2 110 111 %inc = add nuw nsw i64 %ind, 1 112 %inc1 = add i32 %ind1, 1 113 114 %exitcond = icmp eq i64 %inc, %N 115 br i1 %exitcond, label %for.end, label %for.body 116 117for.end: ; preds = %for.body 118 ret void 119} 120 121; For this loop: 122; unsigned index = n; 123; for (int i = 0; i < n; i++) { 124; A[2 * index] = A[2 * index] + B[i]; 125; index--; 126; } 127; 128; the SCEV expression for 2 * index is not an AddRecExpr 129; (and implictly not affine). However, we are able to make assumptions 130; that will turn the expression into an affine one and continue the 131; analysis. 132; 133; Once we have an affine expression we need to add an additional NUSW 134; to check that the pointers don't wrap since the GEPs are not 135; inbounds. 136; 137; This loop has a negative stride for A, and the nusw flag is required in 138; order to properly extend the increment from i32 -4 to i64 -4. 139 140; The expression for %mul_ext as analyzed by SCEV is 141; (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64) 142; We have added the nusw flag to turn this expression into the following SCEV: 143; i64 {zext i32 (2 * (trunc i64 %N to i32)) to i64,+,-2}<%for.body> 144 145define void @f2(i16* noalias %a, 146; LV-LABEL: @f2( 147; LV-NEXT: for.body.lver.check: 148; LV-NEXT: [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32 149; LV-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 150; LV-NEXT: [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1 151; LV-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32 152; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]]) 153; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 154; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1 155; LV-NEXT: [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]] 156; LV-NEXT: [[TMP5:%.*]] = icmp ugt i32 [[TMP4]], [[TMP1]] 157; LV-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 158; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP5]], [[TMP8]] 159; LV-NEXT: [[TMP10:%.*]] = or i1 [[TMP9]], [[MUL_OVERFLOW]] 160; LV-NEXT: [[TMP12:%.*]] = trunc i64 [[N]] to i31 161; LV-NEXT: [[TMP13:%.*]] = zext i31 [[TMP12]] to i64 162; LV-NEXT: [[TMP14:%.*]] = shl nuw nsw i64 [[TMP13]], 1 163; LV-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP14]] 164; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]]) 165; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0 166; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1 167; LV-NEXT: [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8* 168; LV-NEXT: [[TMP15:%.*]] = sub i64 0, [[MUL_RESULT3]] 169; LV-NEXT: [[TMP17:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP15]] 170; LV-NEXT: [[TMP18:%.*]] = icmp ugt i8* [[TMP17]], [[SCEVGEP5]] 171; LV-NEXT: [[TMP21:%.*]] = or i1 [[TMP18]], [[MUL_OVERFLOW4]] 172; LV-NEXT: [[TMP22:%.*]] = or i1 [[TMP10]], [[TMP21]] 173; LV-NEXT: br i1 [[TMP22]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]] 174; LV: for.body.ph.lver.orig: 175; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]] 176; LV: for.body.lver.orig: 177; LV-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 178; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 179; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2 180; LV-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64 181; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]] 182; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2 183; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]] 184; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2 185; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]] 186; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2 187; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1 188; LV-NEXT: [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1 189; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]] 190; LV-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]] 191; LV: for.body.ph: 192; LV-NEXT: br label [[FOR_BODY:%.*]] 193; LV: for.body: 194; LV-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] 195; LV-NEXT: [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ] 196; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2 197; LV-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64 198; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]] 199; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2 200; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]] 201; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2 202; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]] 203; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2 204; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1 205; LV-NEXT: [[DEC]] = sub i32 [[IND1]], 1 206; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]] 207; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]] 208; LV: for.end.loopexit: 209; LV-NEXT: br label [[FOR_END:%.*]] 210; LV: for.end.loopexit6: 211; LV-NEXT: br label [[FOR_END]] 212; LV: for.end: 213; LV-NEXT: ret void 214; 215 i16* noalias %b, i64 %N) { 216entry: 217 %TruncN = trunc i64 %N to i32 218 br label %for.body 219 220for.body: ; preds = %for.body, %entry 221 %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ] 222 %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ] 223 224 %mul = mul i32 %ind1, 2 225 %mul_ext = zext i32 %mul to i64 226 227 %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext 228 %loadA = load i16, i16* %arrayidxA, align 2 229 230 %arrayidxB = getelementptr i16, i16* %b, i64 %ind 231 %loadB = load i16, i16* %arrayidxB, align 2 232 233 %add = mul i16 %loadA, %loadB 234 235 store i16 %add, i16* %arrayidxA, align 2 236 237 %inc = add nuw nsw i64 %ind, 1 238 %dec = sub i32 %ind1, 1 239 240 %exitcond = icmp eq i64 %inc, %N 241 br i1 %exitcond, label %for.end, label %for.body 242 243for.end: ; preds = %for.body 244 ret void 245} 246 247; We replicate the tests above, but this time sign extend 2 * index instead 248; of zero extending it. 249 250; The expression for %mul_ext as analyzed by SCEV is 251; i64 (sext i32 {0,+,2}<%for.body> to i64) 252; We have added the nssw flag to turn this expression into the following SCEV: 253; i64 {0,+,2}<%for.body> 254 255define void @f3(i16* noalias %a, 256; LV-LABEL: @f3( 257; LV-NEXT: for.body.lver.check: 258; LV-NEXT: [[A5:%.*]] = bitcast i16* [[A:%.*]] to i8* 259; LV-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1 260; LV-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32 261; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP1]]) 262; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 263; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1 264; LV-NEXT: [[TMP2:%.*]] = add i32 0, [[MUL_RESULT]] 265; LV-NEXT: [[TMP5:%.*]] = icmp slt i32 [[TMP2]], 0 266; LV-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 267; LV-NEXT: [[TMP8:%.*]] = or i1 [[TMP5]], [[TMP7]] 268; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP8]], [[MUL_OVERFLOW]] 269; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]]) 270; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0 271; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1 272; LV-NEXT: [[TMP11:%.*]] = sub i64 0, [[MUL_RESULT3]] 273; LV-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]] 274; LV-NEXT: [[TMP15:%.*]] = icmp ult i8* [[TMP12]], [[A5]] 275; LV-NEXT: [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]] 276; LV-NEXT: [[TMP18:%.*]] = or i1 [[TMP9]], [[TMP17]] 277; LV-NEXT: br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]] 278; LV: for.body.ph.lver.orig: 279; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]] 280; LV: for.body.lver.orig: 281; LV-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 282; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 283; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2 284; LV-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = sext i32 [[MUL_LVER_ORIG]] to i64 285; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]] 286; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2 287; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]] 288; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2 289; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]] 290; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2 291; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1 292; LV-NEXT: [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1 293; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]] 294; LV-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]] 295; LV: for.body.ph: 296; LV-NEXT: br label [[FOR_BODY:%.*]] 297; LV: for.body: 298; LV-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] 299; LV-NEXT: [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ] 300; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2 301; LV-NEXT: [[MUL_EXT:%.*]] = sext i32 [[MUL]] to i64 302; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]] 303; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2 304; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]] 305; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2 306; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]] 307; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2 308; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1 309; LV-NEXT: [[INC1]] = add i32 [[IND1]], 1 310; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]] 311; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]] 312; LV: for.end.loopexit: 313; LV-NEXT: br label [[FOR_END:%.*]] 314; LV: for.end.loopexit6: 315; LV-NEXT: br label [[FOR_END]] 316; LV: for.end: 317; LV-NEXT: ret void 318; 319 i16* noalias %b, i64 %N) { 320entry: 321 br label %for.body 322 323for.body: ; preds = %for.body, %entry 324 %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ] 325 %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ] 326 327 %mul = mul i32 %ind1, 2 328 %mul_ext = sext i32 %mul to i64 329 330 %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext 331 %loadA = load i16, i16* %arrayidxA, align 2 332 333 %arrayidxB = getelementptr i16, i16* %b, i64 %ind 334 %loadB = load i16, i16* %arrayidxB, align 2 335 336 %add = mul i16 %loadA, %loadB 337 338 store i16 %add, i16* %arrayidxA, align 2 339 340 %inc = add nuw nsw i64 %ind, 1 341 %inc1 = add i32 %ind1, 1 342 343 %exitcond = icmp eq i64 %inc, %N 344 br i1 %exitcond, label %for.end, label %for.body 345 346for.end: ; preds = %for.body 347 ret void 348} 349 350define void @f4(i16* noalias %a, 351; LV-LABEL: @f4( 352; LV-NEXT: for.body.lver.check: 353; LV-NEXT: [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32 354; LV-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 355; LV-NEXT: [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1 356; LV-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32 357; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]]) 358; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 359; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1 360; LV-NEXT: [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]] 361; LV-NEXT: [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], [[TMP1]] 362; LV-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 363; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP5]], [[TMP8]] 364; LV-NEXT: [[TMP10:%.*]] = or i1 [[TMP9]], [[MUL_OVERFLOW]] 365; LV-NEXT: [[TMP12:%.*]] = sext i32 [[TMP1]] to i64 366; LV-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP12]] 367; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]]) 368; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0 369; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1 370; LV-NEXT: [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8* 371; LV-NEXT: [[TMP13:%.*]] = sub i64 0, [[MUL_RESULT3]] 372; LV-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP13]] 373; LV-NEXT: [[TMP16:%.*]] = icmp ugt i8* [[TMP15]], [[SCEVGEP5]] 374; LV-NEXT: [[TMP19:%.*]] = or i1 [[TMP16]], [[MUL_OVERFLOW4]] 375; LV-NEXT: [[TMP20:%.*]] = or i1 [[TMP10]], [[TMP19]] 376; LV-NEXT: br i1 [[TMP20]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]] 377; LV: for.body.ph.lver.orig: 378; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]] 379; LV: for.body.lver.orig: 380; LV-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 381; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 382; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2 383; LV-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = sext i32 [[MUL_LVER_ORIG]] to i64 384; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]] 385; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2 386; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]] 387; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2 388; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]] 389; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2 390; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1 391; LV-NEXT: [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1 392; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]] 393; LV-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]] 394; LV: for.body.ph: 395; LV-NEXT: br label [[FOR_BODY:%.*]] 396; LV: for.body: 397; LV-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] 398; LV-NEXT: [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ] 399; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2 400; LV-NEXT: [[MUL_EXT:%.*]] = sext i32 [[MUL]] to i64 401; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]] 402; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2 403; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]] 404; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2 405; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]] 406; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2 407; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1 408; LV-NEXT: [[DEC]] = sub i32 [[IND1]], 1 409; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]] 410; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]] 411; LV: for.end.loopexit: 412; LV-NEXT: br label [[FOR_END:%.*]] 413; LV: for.end.loopexit6: 414; LV-NEXT: br label [[FOR_END]] 415; LV: for.end: 416; LV-NEXT: ret void 417; 418 i16* noalias %b, i64 %N) { 419entry: 420 %TruncN = trunc i64 %N to i32 421 br label %for.body 422 423for.body: ; preds = %for.body, %entry 424 %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ] 425 %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ] 426 427 %mul = mul i32 %ind1, 2 428 %mul_ext = sext i32 %mul to i64 429 430 %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext 431 %loadA = load i16, i16* %arrayidxA, align 2 432 433 %arrayidxB = getelementptr i16, i16* %b, i64 %ind 434 %loadB = load i16, i16* %arrayidxB, align 2 435 436 %add = mul i16 %loadA, %loadB 437 438 store i16 %add, i16* %arrayidxA, align 2 439 440 %inc = add nuw nsw i64 %ind, 1 441 %dec = sub i32 %ind1, 1 442 443 %exitcond = icmp eq i64 %inc, %N 444 br i1 %exitcond, label %for.end, label %for.body 445 446for.end: ; preds = %for.body 447 ret void 448} 449 450; The following function is similar to the one above, but has the GEP 451; to pointer %A inbounds. The index %mul doesn't have the nsw flag. 452; This means that the SCEV expression for %mul can wrap and we need 453; a SCEV predicate to continue analysis. 454; 455; We can still analyze this by adding the required no wrap SCEV predicates. 456 457define void @f5(i16* noalias %a, 458; LV-LABEL: @f5( 459; LV-NEXT: for.body.lver.check: 460; LV-NEXT: [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32 461; LV-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 462; LV-NEXT: [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1 463; LV-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32 464; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]]) 465; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 466; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1 467; LV-NEXT: [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]] 468; LV-NEXT: [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], [[TMP1]] 469; LV-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 470; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP5]], [[TMP8]] 471; LV-NEXT: [[TMP10:%.*]] = or i1 [[TMP9]], [[MUL_OVERFLOW]] 472; LV-NEXT: [[TMP12:%.*]] = sext i32 [[TMP1]] to i64 473; LV-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP12]] 474; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]]) 475; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0 476; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1 477; LV-NEXT: [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8* 478; LV-NEXT: [[TMP13:%.*]] = sub i64 0, [[MUL_RESULT3]] 479; LV-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP13]] 480; LV-NEXT: [[TMP16:%.*]] = icmp ugt i8* [[TMP15]], [[SCEVGEP5]] 481; LV-NEXT: [[TMP19:%.*]] = or i1 [[TMP16]], [[MUL_OVERFLOW4]] 482; LV-NEXT: [[TMP20:%.*]] = or i1 [[TMP10]], [[TMP19]] 483; LV-NEXT: br i1 [[TMP20]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]] 484; LV: for.body.ph.lver.orig: 485; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]] 486; LV: for.body.lver.orig: 487; LV-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 488; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 489; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2 490; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[MUL_LVER_ORIG]] 491; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2 492; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]] 493; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2 494; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]] 495; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2 496; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1 497; LV-NEXT: [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1 498; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]] 499; LV-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]] 500; LV: for.body.ph: 501; LV-NEXT: br label [[FOR_BODY:%.*]] 502; LV: for.body: 503; LV-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] 504; LV-NEXT: [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ] 505; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2 506; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[MUL]] 507; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2 508; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i16, i16* [[B]], i64 [[IND]] 509; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2 510; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]] 511; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2 512; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1 513; LV-NEXT: [[DEC]] = sub i32 [[IND1]], 1 514; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]] 515; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]] 516; LV: for.end.loopexit: 517; LV-NEXT: br label [[FOR_END:%.*]] 518; LV: for.end.loopexit6: 519; LV-NEXT: br label [[FOR_END]] 520; LV: for.end: 521; LV-NEXT: ret void 522; 523 i16* noalias %b, i64 %N) { 524entry: 525 %TruncN = trunc i64 %N to i32 526 br label %for.body 527 528for.body: ; preds = %for.body, %entry 529 %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ] 530 %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ] 531 532 %mul = mul i32 %ind1, 2 533 534 %arrayidxA = getelementptr inbounds i16, i16* %a, i32 %mul 535 %loadA = load i16, i16* %arrayidxA, align 2 536 537 %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind 538 %loadB = load i16, i16* %arrayidxB, align 2 539 540 %add = mul i16 %loadA, %loadB 541 542 store i16 %add, i16* %arrayidxA, align 2 543 544 %inc = add nuw nsw i64 %ind, 1 545 %dec = sub i32 %ind1, 1 546 547 %exitcond = icmp eq i64 %inc, %N 548 br i1 %exitcond, label %for.end, label %for.body 549 550for.end: ; preds = %for.body 551 ret void 552} 553