1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 -S -loop-vectorize < %s -o - | FileCheck %s 3 4; The case will do aggressive interleave on PowerPC, resulting in a lot of memory checks. 5; (On the A2, always unroll aggressively. In fact, if aggressive interleaving is enabled, 6; similar issues may occur on other targets). 7; Interleaving should also be restricted by the threshold of memory checks similar to VF. 8; (e.g., runtime-memory-check-threshold, default 8). 9 10; CHECK-LABEL: @eddy_diff_caleddy_ 11; CHECK: vector.memcheck 12 13define fastcc void @eddy_diff_caleddy_(i64* %wet_cl, i64 %0, i32 %ncol.cast.val) { 14entry: 15 %trip.count = add nuw i32 %ncol.cast.val, 1 16 %wide.trip.count = zext i32 %ncol.cast.val to i64 17 %1 = shl i64 %0, 1 18 %2 = mul i64 %0, 3 19 %3 = shl i64 %0, 2 20 %4 = mul i64 %0, 5 21 %5 = mul i64 %0, 6 22 %6 = mul i64 %0, 7 23 %7 = shl i64 %0, 3 24 %8 = mul i64 %0, 9 25 %9 = mul i64 %0, 10 26 %10 = mul i64 %0, 11 27 %11 = mul i64 %0, 12 28 br label %loop.body 29 30loop.body: 31 %indvars.iv774 = phi i64 [ 0, %entry ], [ %indvars.iv.next775, %loop.body ] 32 %12 = add nsw i64 %indvars.iv774, -5 33 %13 = add i64 %12, %0 34 %14 = getelementptr i64, i64* %wet_cl, i64 %13 35 %15 = bitcast i64* %14 to double* 36 store double 0.000000e+00, double* %15, align 8 37 %16 = add i64 %12, %1 38 %17 = getelementptr i64, i64* %wet_cl, i64 %16 39 %18 = bitcast i64* %17 to double* 40 store double 0.000000e+00, double* %18, align 8 41 %19 = add i64 %12, %2 42 %20 = getelementptr i64, i64* %wet_cl, i64 %19 43 %21 = bitcast i64* %20 to double* 44 store double 0.000000e+00, double* %21, align 8 45 %22 = add i64 %12, %3 46 %23 = getelementptr i64, i64* %wet_cl, i64 %22 47 %24 = bitcast i64* %23 to double* 48 store double 0.000000e+00, double* %24, align 8 49 %25 = add i64 %12, %4 50 %26 = getelementptr i64, i64* %wet_cl, i64 %25 51 %27 = bitcast i64* %26 to double* 52 store double 0.000000e+00, double* %27, align 8 53 %28 = add i64 %12, %5 54 %29 = getelementptr i64, i64* %wet_cl, i64 %28 55 %30 = bitcast i64* %29 to double* 56 store double 0.000000e+00, double* %30, align 8 57 %31 = add i64 %12, %6 58 %32 = getelementptr i64, i64* %wet_cl, i64 %31 59 %33 = bitcast i64* %32 to double* 60 store double 0.000000e+00, double* %33, align 8 61 %34 = add i64 %12, %7 62 %35 = getelementptr i64, i64* %wet_cl, i64 %34 63 %36 = bitcast i64* %35 to double* 64 store double 0.000000e+00, double* %36, align 8 65 %37 = add i64 %12, %8 66 %38 = getelementptr i64, i64* %wet_cl, i64 %37 67 %39 = bitcast i64* %38 to double* 68 store double 0.000000e+00, double* %39, align 8 69 %40 = add i64 %12, %9 70 %41 = getelementptr i64, i64* %wet_cl, i64 %40 71 %42 = bitcast i64* %41 to double* 72 store double 0.000000e+00, double* %42, align 8 73 %43 = add i64 %12, %10 74 %44 = getelementptr i64, i64* %wet_cl, i64 %43 75 %45 = bitcast i64* %44 to double* 76 store double 0.000000e+00, double* %45, align 8 77 %46 = add i64 %12, %11 78 %47 = getelementptr i64, i64* %wet_cl, i64 %46 79 %48 = bitcast i64* %47 to double* 80 store double 0.000000e+00, double* %48, align 8 81 %indvars.iv.next775 = add nuw nsw i64 %indvars.iv774, 1 82 %exitcond778.not = icmp eq i64 %indvars.iv.next775, %wide.trip.count 83 br i1 %exitcond778.not, label %loop.end, label %loop.body 84 85loop.end: 86 ret void 87} 88