1; RUN: opt < %s -passes=loop-vectorize -force-vector-width=4 -force-widen-divrem-via-safe-divisor=0 -S 2>&1 | FileCheck %s 2; RUN: opt < %s -passes=debugify,loop-vectorize -force-vector-width=4 -force-widen-divrem-via-safe-divisor=0 -S | FileCheck %s -check-prefix DEBUGLOC 3; RUN: opt < %s -passes=debugify,loop-vectorize -force-vector-width=4 -force-widen-divrem-via-safe-divisor=0 -S --try-experimental-debuginfo-iterators | FileCheck %s -check-prefix DEBUGLOC 4target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 5 6; This test makes sure we don't duplicate the loop vectorizer's metadata 7; while marking them as already vectorized (by setting width = 1), even 8; at lower optimization levels, where no extra cleanup is done 9 10; Check that the phi to resume the scalar part of the loop 11; has Debug Location. 12define void @_Z3fooPf(ptr %a) { 13; DEBUGLOC-LABEL: define void @_Z3fooPf( 14; DEBUGLOC: scalar.ph: 15; DEBUGLOC-NEXT: %bc.resume.val = phi {{.*}} !dbg ![[RESUMELOC:[0-9]+]] 16; 17entry: 18 br label %for.body 19 20for.body: ; preds = %for.body, %entry 21 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 22 %arrayidx = getelementptr inbounds float, ptr %a, i64 %indvars.iv 23 %p = load float, ptr %arrayidx, align 4 24 %mul = fmul float %p, 2.000000e+00 25 store float %mul, ptr %arrayidx, align 4 26 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 27 %exitcond = icmp eq i64 %indvars.iv.next, 1024 28 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0 29 30for.end: ; preds = %for.body 31 ret void 32} 33 34define void @widen_ptr_induction_dbg(ptr %start, ptr %end) { 35; DEBUGLOC-LABEL: define void @widen_ptr_induction_dbg( 36; DEBUGLOC: vector.body: 37; DEBUGLOC-NEXT: = phi ptr {{.+}}, !dbg ![[PTRIVLOC:[0-9]+]] 38; DEBUGLOC: = phi i64 39; 40; DEBUGLOC: loop: 41; DEBUGLOC-NEXT: = phi ptr {{.+}}, !dbg ![[PTRIVLOC]] 42; 43entry: 44 br label %loop 45 46loop: 47 %iv = phi ptr [ %start, %entry ], [ %iv.next, %loop ] 48 %iv.next = getelementptr inbounds ptr, ptr %iv, i64 1 49 store ptr %iv, ptr %iv, align 1 50 %cmp.not = icmp eq ptr %iv.next, %end 51 br i1 %cmp.not, label %exit, label %loop 52 53exit: 54 ret void 55} 56 57define void @predicated_phi_dbg(i64 %n, ptr %x) { 58; DEBUGLOC-LABEL: define void @predicated_phi_dbg( 59; DEBUGLOC: pred.udiv.continue{{.+}}: 60; DEBUGLOC-NEXT: = phi <4 x i64> {{.+}}, !dbg [[PREDPHILOC:![0-9]+]] 61; 62; DEBUGLOC: for.body: 63; DEBUGLOC: %tmp4 = udiv i64 %n, %i, !dbg [[PREDPHILOC]] 64; 65entry: 66 br label %for.body 67 68for.body: 69 %i = phi i64 [ 0, %entry ], [ %i.next, %for.inc ] 70 %cmp = icmp ult i64 %i, 5 71 br i1 %cmp, label %if.then, label %for.inc 72 73if.then: 74 %tmp4 = udiv i64 %n, %i 75 br label %for.inc 76 77for.inc: 78 %d = phi i64 [ 0, %for.body ], [ %tmp4, %if.then ] 79 %idx = getelementptr i64, ptr %x, i64 %i 80 store i64 %d, ptr %idx 81 %i.next = add nuw nsw i64 %i, 1 82 %cond = icmp slt i64 %i.next, %n 83 br i1 %cond, label %for.body, label %for.end 84 85for.end: 86 ret void 87} 88 89define void @scalar_cast_dbg(ptr nocapture %a, i32 %start, i64 %k) { 90; DEBUGLOC-LABEL: define void @scalar_cast_dbg( 91; DEBUGLOC: = trunc i64 %index to i32, !dbg [[CASTLOC:![0-9]+]] 92; 93; DEBUGLOC: loop: 94; DEBUGLOC: %trunc.iv = trunc i64 %iv to i32, !dbg [[CASTLOC]] 95; 96entry: 97 br label %loop 98 99loop: 100 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] 101 %trunc.iv = trunc i64 %iv to i32 102 %arrayidx = getelementptr inbounds i32, ptr %a, i32 %trunc.iv 103 store i32 %trunc.iv, ptr %arrayidx, align 4 104 %iv.next = add nuw nsw i64 %iv, 1 105 %exitcond = icmp eq i64 %iv.next, %k 106 br i1 %exitcond, label %exit, label %loop 107 108exit: 109 ret void 110} 111 112define void @widen_intrinsic_dbg(i64 %n, ptr %y, ptr %x) { 113; DEBUGLOC-LABEL: define void @widen_intrinsic_dbg( 114; DEBUGLOC: vector.body: 115; DEBUGLOC: = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %{{.+}}), !dbg ![[INTRINSIC_LOC:[0-9]+]] 116; DEBUGLOC: loop: 117; DEBUGLOC: = call float @llvm.sqrt.f32(float %{{.+}}), !dbg ![[INTRINSIC_LOC]] 118; 119entry: 120 br label %loop 121 122loop: 123 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] 124 %gep.y = getelementptr inbounds float, ptr %y, i64 %iv 125 %load = load float, ptr %gep.y, align 4 126 %call = call float @llvm.sqrt.f32(float %load) 127 %gep.x = getelementptr inbounds float, ptr %x, i64 %iv 128 store float %call, ptr %gep.x, align 4 129 %iv.next = add i64 %iv, 1 130 %exitcond = icmp eq i64 %iv.next, %n 131 br i1 %exitcond, label %exit, label %loop 132 133exit: 134 ret void 135} 136 137!0 = !{!0, !1} 138!1 = !{!"llvm.loop.vectorize.width", i32 4} 139; CHECK-NOT: !{metadata !"llvm.loop.vectorize.width", i32 4} 140; CHECK: !{!"llvm.loop.isvectorized", i32 1} 141 142; DEBUGLOC: ![[RESUMELOC]] = !DILocation(line: 2 143; DEBUGLOC: ![[PTRIVLOC]] = !DILocation(line: 12 144; DEBUGLOC: ![[INTRINSIC_LOC]] = !DILocation(line: 44 145