; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5 ; RUN: opt -passes='print' -aa-pipeline='basic-aa' \ ; RUN: -disable-output %s 2>&1 | FileCheck %s ; For this loop: ; unsigned index = 0; ; for (int i = 0; i < n; i++) { ; A[2 * index] = A[2 * index] + B[i]; ; index++; ; } ; ; SCEV is unable to prove that A[2 * i] does not overflow. ; ; Analyzing the IR does not help us because the GEPs are not ; affine AddRecExprs. However, we can turn them into AddRecExprs ; using SCEV Predicates. ; ; Once we have an affine expression we need to add an additional NUSW ; to check that the pointers don't wrap since the GEPs are not ; inbound. ; The expression for %mul_ext as analyzed by SCEV is ; (zext i32 {0,+,2}<%for.body> to i64) ; We have added the nusw flag to turn this expression into the SCEV expression: ; i64 {0,+,2}<%for.body> define void @f1(ptr noalias %a, ptr noalias %b, i64 %N) { ; CHECK-LABEL: 'f1' ; CHECK-NEXT: for.body: ; CHECK-NEXT: Memory dependences are safe ; CHECK-NEXT: Dependences: ; CHECK-NEXT: Forward: ; CHECK-NEXT: %loadA = load i16, ptr %arrayidxA, align 2 -> ; CHECK-NEXT: store i16 %add, ptr %arrayidxA, align 2 ; CHECK-EMPTY: ; CHECK-NEXT: Run-time memory checks: ; CHECK-NEXT: Grouped accesses: ; CHECK-EMPTY: ; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. ; CHECK-NEXT: SCEV assumptions: ; CHECK-NEXT: {0,+,2}<%for.body> Added Flags: ; CHECK-NEXT: {%a,+,4}<%for.body> Added Flags: ; CHECK-EMPTY: ; CHECK-NEXT: Expressions re-written: ; CHECK-NEXT: [PSE] %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext: ; CHECK-NEXT: ((2 * (zext i32 {0,+,2}<%for.body> to i64)) + %a) ; CHECK-NEXT: --> {%a,+,4}<%for.body> ; entry: br label %for.body for.body: ; preds = %for.body, %entry %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ] %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ] %mul = mul i32 %ind1, 2 %mul_ext = zext i32 %mul to i64 %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext %loadA = load i16, ptr %arrayidxA, align 2 %arrayidxB = getelementptr i16, ptr %b, i64 %ind %loadB = load i16, ptr %arrayidxB, align 2 %add = mul i16 %loadA, %loadB store i16 %add, ptr %arrayidxA, align 2 %inc = add nuw nsw i64 %ind, 1 %inc1 = add i32 %ind1, 1 %exitcond = icmp eq i64 %inc, %N br i1 %exitcond, label %for.end, label %for.body for.end: ; preds = %for.body ret void } ; For this loop: ; unsigned index = n; ; for (int i = 0; i < n; i++) { ; A[2 * index] = A[2 * index] + B[i]; ; index--; ; } ; ; the SCEV expression for 2 * index is not an AddRecExpr ; (and implictly not affine). However, we are able to make assumptions ; that will turn the expression into an affine one and continue the ; analysis. ; ; Once we have an affine expression we need to add an additional NUSW ; to check that the pointers don't wrap since the GEPs are not ; inbounds. ; ; This loop has a negative stride for A, and the nusw flag is required in ; order to properly extend the increment from i32 -4 to i64 -4. ; The expression for %mul_ext as analyzed by SCEV is ; (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64) ; We have added the nusw flag to turn this expression into the following SCEV: ; i64 {zext i32 (2 * (trunc i64 %N to i32)) to i64,+,-2}<%for.body> define void @f2(ptr noalias %a, ptr noalias %b, i64 %N) { ; CHECK-LABEL: 'f2' ; CHECK-NEXT: for.body: ; CHECK-NEXT: Memory dependences are safe ; CHECK-NEXT: Dependences: ; CHECK-NEXT: Forward: ; CHECK-NEXT: %loadA = load i16, ptr %arrayidxA, align 2 -> ; CHECK-NEXT: store i16 %add, ptr %arrayidxA, align 2 ; CHECK-EMPTY: ; CHECK-NEXT: Run-time memory checks: ; CHECK-NEXT: Grouped accesses: ; CHECK-EMPTY: ; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. ; CHECK-NEXT: SCEV assumptions: ; CHECK-NEXT: {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> Added Flags: ; CHECK-NEXT: {((4 * (zext i31 (trunc i64 %N to i31) to i64)) + %a),+,-4}<%for.body> Added Flags: ; CHECK-EMPTY: ; CHECK-NEXT: Expressions re-written: ; CHECK-NEXT: [PSE] %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext: ; CHECK-NEXT: ((2 * (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64)) + %a) ; CHECK-NEXT: --> {((4 * (zext i31 (trunc i64 %N to i31) to i64)) + %a),+,-4}<%for.body> ; entry: %TruncN = trunc i64 %N to i32 br label %for.body for.body: ; preds = %for.body, %entry %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ] %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ] %mul = mul i32 %ind1, 2 %mul_ext = zext i32 %mul to i64 %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext %loadA = load i16, ptr %arrayidxA, align 2 %arrayidxB = getelementptr i16, ptr %b, i64 %ind %loadB = load i16, ptr %arrayidxB, align 2 %add = mul i16 %loadA, %loadB store i16 %add, ptr %arrayidxA, align 2 %inc = add nuw nsw i64 %ind, 1 %dec = sub i32 %ind1, 1 %exitcond = icmp eq i64 %inc, %N br i1 %exitcond, label %for.end, label %for.body for.end: ; preds = %for.body ret void } ; We replicate the tests above, but this time sign extend 2 * index instead ; of zero extending it. ; The expression for %mul_ext as analyzed by SCEV is ; i64 (sext i32 {0,+,2}<%for.body> to i64) ; We have added the nssw flag to turn this expression into the following SCEV: ; i64 {0,+,2}<%for.body> define void @f3(ptr noalias %a, ptr noalias %b, i64 %N) { ; CHECK-LABEL: 'f3' ; CHECK-NEXT: for.body: ; CHECK-NEXT: Memory dependences are safe ; CHECK-NEXT: Dependences: ; CHECK-NEXT: Forward: ; CHECK-NEXT: %loadA = load i16, ptr %arrayidxA, align 2 -> ; CHECK-NEXT: store i16 %add, ptr %arrayidxA, align 2 ; CHECK-EMPTY: ; CHECK-NEXT: Run-time memory checks: ; CHECK-NEXT: Grouped accesses: ; CHECK-EMPTY: ; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. ; CHECK-NEXT: SCEV assumptions: ; CHECK-NEXT: {0,+,2}<%for.body> Added Flags: ; CHECK-NEXT: {%a,+,4}<%for.body> Added Flags: ; CHECK-EMPTY: ; CHECK-NEXT: Expressions re-written: ; CHECK-NEXT: [PSE] %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext: ; CHECK-NEXT: ((2 * (sext i32 {0,+,2}<%for.body> to i64)) + %a) ; CHECK-NEXT: --> {%a,+,4}<%for.body> ; entry: br label %for.body for.body: ; preds = %for.body, %entry %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ] %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ] %mul = mul i32 %ind1, 2 %mul_ext = sext i32 %mul to i64 %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext %loadA = load i16, ptr %arrayidxA, align 2 %arrayidxB = getelementptr i16, ptr %b, i64 %ind %loadB = load i16, ptr %arrayidxB, align 2 %add = mul i16 %loadA, %loadB store i16 %add, ptr %arrayidxA, align 2 %inc = add nuw nsw i64 %ind, 1 %inc1 = add i32 %ind1, 1 %exitcond = icmp eq i64 %inc, %N br i1 %exitcond, label %for.end, label %for.body for.end: ; preds = %for.body ret void } ; The expression for %mul_ext as analyzed by SCEV is ; i64 (sext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64) ; We have added the nssw flag to turn this expression into the following SCEV: ; i64 {sext i32 (2 * (trunc i64 %N to i32)) to i64,+,-2}<%for.body> define void @f4(ptr noalias %a, ptr noalias %b, i64 %N) { ; CHECK-LABEL: 'f4' ; CHECK-NEXT: for.body: ; CHECK-NEXT: Memory dependences are safe ; CHECK-NEXT: Dependences: ; CHECK-NEXT: Forward: ; CHECK-NEXT: %loadA = load i16, ptr %arrayidxA, align 2 -> ; CHECK-NEXT: store i16 %add, ptr %arrayidxA, align 2 ; CHECK-EMPTY: ; CHECK-NEXT: Run-time memory checks: ; CHECK-NEXT: Grouped accesses: ; CHECK-EMPTY: ; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. ; CHECK-NEXT: SCEV assumptions: ; CHECK-NEXT: {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> Added Flags: ; CHECK-NEXT: {((2 * (sext i32 (2 * (trunc i64 %N to i32)) to i64)) + %a),+,-4}<%for.body> Added Flags: ; CHECK-EMPTY: ; CHECK-NEXT: Expressions re-written: ; CHECK-NEXT: [PSE] %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext: ; CHECK-NEXT: ((2 * (sext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64)) + %a) ; CHECK-NEXT: --> {((2 * (sext i32 (2 * (trunc i64 %N to i32)) to i64)) + %a),+,-4}<%for.body> ; entry: %TruncN = trunc i64 %N to i32 br label %for.body for.body: ; preds = %for.body, %entry %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ] %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ] %mul = mul i32 %ind1, 2 %mul_ext = sext i32 %mul to i64 %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext %loadA = load i16, ptr %arrayidxA, align 2 %arrayidxB = getelementptr i16, ptr %b, i64 %ind %loadB = load i16, ptr %arrayidxB, align 2 %add = mul i16 %loadA, %loadB store i16 %add, ptr %arrayidxA, align 2 %inc = add nuw nsw i64 %ind, 1 %dec = sub i32 %ind1, 1 %exitcond = icmp eq i64 %inc, %N br i1 %exitcond, label %for.end, label %for.body for.end: ; preds = %for.body ret void } ; The following function is similar to the one above, but has the GEP ; to pointer %A inbounds. The index %mul doesn't have the nsw flag. ; This means that the SCEV expression for %mul can wrap and we need ; a SCEV predicate to continue analysis. ; ; We can still analyze this by adding the required no wrap SCEV predicates. define void @f5(ptr noalias %a, ptr noalias %b, i64 %N) { ; CHECK-LABEL: 'f5' ; CHECK-NEXT: for.body: ; CHECK-NEXT: Memory dependences are safe ; CHECK-NEXT: Dependences: ; CHECK-NEXT: Forward: ; CHECK-NEXT: %loadA = load i16, ptr %arrayidxA, align 2 -> ; CHECK-NEXT: store i16 %add, ptr %arrayidxA, align 2 ; CHECK-EMPTY: ; CHECK-NEXT: Run-time memory checks: ; CHECK-NEXT: Grouped accesses: ; CHECK-EMPTY: ; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. ; CHECK-NEXT: SCEV assumptions: ; CHECK-NEXT: {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> Added Flags: ; CHECK-EMPTY: ; CHECK-NEXT: Expressions re-written: ; CHECK-NEXT: [PSE] %arrayidxA = getelementptr inbounds i16, ptr %a, i32 %mul: ; CHECK-NEXT: ((2 * (sext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64)) + %a) ; CHECK-NEXT: --> {((2 * (sext i32 (2 * (trunc i64 %N to i32)) to i64)) + %a),+,-4}<%for.body> ; entry: %TruncN = trunc i64 %N to i32 br label %for.body for.body: ; preds = %for.body, %entry %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ] %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ] %mul = mul i32 %ind1, 2 %arrayidxA = getelementptr inbounds i16, ptr %a, i32 %mul %loadA = load i16, ptr %arrayidxA, align 2 %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %ind %loadB = load i16, ptr %arrayidxB, align 2 %add = mul i16 %loadA, %loadB store i16 %add, ptr %arrayidxA, align 2 %inc = add nuw nsw i64 %ind, 1 %dec = sub i32 %ind1, 1 %exitcond = icmp eq i64 %inc, %N br i1 %exitcond, label %for.end, label %for.body for.end: ; preds = %for.body ret void }