194a2bd5aSTiehu Zhang; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2*b3a9e8f7SFlorian Hahn; RUN: opt -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 -S -passes=loop-vectorize -vectorize-memory-check-threshold=60 < %s -o - | FileCheck %s 394a2bd5aSTiehu Zhang 494a2bd5aSTiehu Zhang; The case will do aggressive interleave on PowerPC, resulting in a lot of memory checks. 594a2bd5aSTiehu Zhang; (On the A2, always unroll aggressively. In fact, if aggressive interleaving is enabled, 694a2bd5aSTiehu Zhang; similar issues may occur on other targets). 794a2bd5aSTiehu Zhang; Interleaving should also be restricted by the threshold of memory checks similar to VF. 894a2bd5aSTiehu Zhang; (e.g., runtime-memory-check-threshold, default 8). 994a2bd5aSTiehu Zhang 1094a2bd5aSTiehu Zhang; CHECK-LABEL: @eddy_diff_caleddy_ 113ed9f603STiehu Zhang; CHECK-NOT: vector.memcheck 1294a2bd5aSTiehu Zhang 137d757725SNikita Popovdefine fastcc void @eddy_diff_caleddy_(ptr %wet_cl, i64 %0, i32 %ncol.cast.val) { 1494a2bd5aSTiehu Zhangentry: 1594a2bd5aSTiehu Zhang %trip.count = add nuw i32 %ncol.cast.val, 1 1694a2bd5aSTiehu Zhang %wide.trip.count = zext i32 %ncol.cast.val to i64 1794a2bd5aSTiehu Zhang %1 = shl i64 %0, 1 1894a2bd5aSTiehu Zhang %2 = mul i64 %0, 3 1994a2bd5aSTiehu Zhang %3 = shl i64 %0, 2 2094a2bd5aSTiehu Zhang %4 = mul i64 %0, 5 2194a2bd5aSTiehu Zhang %5 = mul i64 %0, 6 2294a2bd5aSTiehu Zhang %6 = mul i64 %0, 7 2394a2bd5aSTiehu Zhang %7 = shl i64 %0, 3 2494a2bd5aSTiehu Zhang %8 = mul i64 %0, 9 2594a2bd5aSTiehu Zhang %9 = mul i64 %0, 10 2694a2bd5aSTiehu Zhang %10 = mul i64 %0, 11 2794a2bd5aSTiehu Zhang %11 = mul i64 %0, 12 2894a2bd5aSTiehu Zhang br label %loop.body 2994a2bd5aSTiehu Zhang 3094a2bd5aSTiehu Zhangloop.body: 3194a2bd5aSTiehu Zhang %indvars.iv774 = phi i64 [ 0, %entry ], [ %indvars.iv.next775, %loop.body ] 3294a2bd5aSTiehu Zhang %12 = add nsw i64 %indvars.iv774, -5 3394a2bd5aSTiehu Zhang %13 = add i64 %12, %0 347d757725SNikita Popov %14 = getelementptr i64, ptr %wet_cl, i64 %13 357d757725SNikita Popov store double 0.000000e+00, ptr %14, align 8 367d757725SNikita Popov %15 = add i64 %12, %1 377d757725SNikita Popov %16 = getelementptr i64, ptr %wet_cl, i64 %15 387d757725SNikita Popov store double 0.000000e+00, ptr %16, align 8 397d757725SNikita Popov %17 = add i64 %12, %2 407d757725SNikita Popov %18 = getelementptr i64, ptr %wet_cl, i64 %17 417d757725SNikita Popov store double 0.000000e+00, ptr %18, align 8 427d757725SNikita Popov %19 = add i64 %12, %3 437d757725SNikita Popov %20 = getelementptr i64, ptr %wet_cl, i64 %19 447d757725SNikita Popov store double 0.000000e+00, ptr %20, align 8 457d757725SNikita Popov %21 = add i64 %12, %4 467d757725SNikita Popov %22 = getelementptr i64, ptr %wet_cl, i64 %21 477d757725SNikita Popov store double 0.000000e+00, ptr %22, align 8 487d757725SNikita Popov %23 = add i64 %12, %5 497d757725SNikita Popov %24 = getelementptr i64, ptr %wet_cl, i64 %23 507d757725SNikita Popov store double 0.000000e+00, ptr %24, align 8 517d757725SNikita Popov %25 = add i64 %12, %6 527d757725SNikita Popov %26 = getelementptr i64, ptr %wet_cl, i64 %25 537d757725SNikita Popov store double 0.000000e+00, ptr %26, align 8 547d757725SNikita Popov %27 = add i64 %12, %7 557d757725SNikita Popov %28 = getelementptr i64, ptr %wet_cl, i64 %27 567d757725SNikita Popov store double 0.000000e+00, ptr %28, align 8 577d757725SNikita Popov %29 = add i64 %12, %8 587d757725SNikita Popov %30 = getelementptr i64, ptr %wet_cl, i64 %29 597d757725SNikita Popov store double 0.000000e+00, ptr %30, align 8 607d757725SNikita Popov %31 = add i64 %12, %9 617d757725SNikita Popov %32 = getelementptr i64, ptr %wet_cl, i64 %31 627d757725SNikita Popov store double 0.000000e+00, ptr %32, align 8 637d757725SNikita Popov %33 = add i64 %12, %10 647d757725SNikita Popov %34 = getelementptr i64, ptr %wet_cl, i64 %33 657d757725SNikita Popov store double 0.000000e+00, ptr %34, align 8 667d757725SNikita Popov %35 = add i64 %12, %11 677d757725SNikita Popov %36 = getelementptr i64, ptr %wet_cl, i64 %35 687d757725SNikita Popov store double 0.000000e+00, ptr %36, align 8 6994a2bd5aSTiehu Zhang %indvars.iv.next775 = add nuw nsw i64 %indvars.iv774, 1 7094a2bd5aSTiehu Zhang %exitcond778.not = icmp eq i64 %indvars.iv.next775, %wide.trip.count 7194a2bd5aSTiehu Zhang br i1 %exitcond778.not, label %loop.end, label %loop.body 7294a2bd5aSTiehu Zhang 7394a2bd5aSTiehu Zhangloop.end: 7494a2bd5aSTiehu Zhang ret void 7594a2bd5aSTiehu Zhang} 76