1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 2; RUN: opt -p loop-vectorize -S %s | FileCheck %s 3 4target triple = "arm64-apple-macosx11.0.0" 5 6define void @fshl_operand_first_order_recurrence(ptr %dst, ptr noalias %src) { 7; CHECK-LABEL: define void @fshl_operand_first_order_recurrence( 8; CHECK-SAME: ptr [[DST:%.*]], ptr noalias [[SRC:%.*]]) { 9; CHECK-NEXT: [[ENTRY:.*]]: 10; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] 11; CHECK: [[VECTOR_PH]]: 12; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] 13; CHECK: [[VECTOR_BODY]]: 14; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] 15; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i64> [ <i64 poison, i64 0>, %[[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], %[[VECTOR_BODY]] ] 16; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 17; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[TMP0]] 18; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 0 19; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 2 20; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP4]], align 8 21; CHECK-NEXT: [[WIDE_LOAD1]] = load <2 x i64>, ptr [[TMP5]], align 8 22; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x i64> [[VECTOR_RECUR]], <2 x i64> [[WIDE_LOAD]], <2 x i32> <i32 1, i32 2> 23; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x i64> [[WIDE_LOAD]], <2 x i64> [[WIDE_LOAD1]], <2 x i32> <i32 1, i32 2> 24; CHECK-NEXT: [[TMP8:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> splat (i64 1), <2 x i64> [[TMP6]], <2 x i64> splat (i64 1)) 25; CHECK-NEXT: [[TMP9:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> splat (i64 1), <2 x i64> [[TMP7]], <2 x i64> splat (i64 1)) 26; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[TMP0]] 27; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[TMP10]], i32 0 28; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[TMP10]], i32 2 29; CHECK-NEXT: store <2 x i64> [[TMP8]], ptr [[TMP12]], align 8 30; CHECK-NEXT: store <2 x i64> [[TMP9]], ptr [[TMP13]], align 8 31; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 32; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 33; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] 34; CHECK: [[MIDDLE_BLOCK]]: 35; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <2 x i64> [[WIDE_LOAD1]], i32 1 36; CHECK-NEXT: br i1 false, label %[[EXIT:.*]], label %[[SCALAR_PH]] 37; CHECK: [[SCALAR_PH]]: 38; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 100, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 39; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 40; CHECK-NEXT: br label %[[LOOP:.*]] 41; CHECK: [[LOOP]]: 42; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] 43; CHECK-NEXT: [[RECUR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[L:%.*]], %[[LOOP]] ] 44; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]] 45; CHECK-NEXT: [[L]] = load i64, ptr [[GEP_SRC]], align 8 46; CHECK-NEXT: [[OR:%.*]] = tail call i64 @llvm.fshl.i64(i64 1, i64 [[RECUR]], i64 1) 47; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[IV]] 48; CHECK-NEXT: store i64 [[OR]], ptr [[GEP_DST]], align 8 49; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 50; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 100 51; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] 52; CHECK: [[EXIT]]: 53; CHECK-NEXT: ret void 54; 55entry: 56 br label %loop 57 58loop: 59 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] 60 %recur = phi i64 [ 0, %entry ], [ %l, %loop ] 61 %gep.src = getelementptr inbounds i64, ptr %src, i64 %iv 62 %l = load i64, ptr %gep.src, align 8 63 %or = tail call i64 @llvm.fshl.i64(i64 1, i64 %recur, i64 1) 64 %gep.dst = getelementptr inbounds i64, ptr %dst, i64 %iv 65 store i64 %or, ptr %gep.dst, align 8 66 %iv.next = add i64 %iv, 1 67 %exitcond.not = icmp eq i64 %iv, 100 68 br i1 %exitcond.not, label %exit, label %loop 69 70exit: 71 ret void 72} 73 74; Test case for https://github.com/llvm/llvm-project/issues/107016. 75define void @powi_call(ptr %P) { 76; CHECK-LABEL: define void @powi_call( 77; CHECK-SAME: ptr [[P:%.*]]) { 78; CHECK-NEXT: [[ENTRY:.*]]: 79; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] 80; CHECK: [[VECTOR_PH]]: 81; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] 82; CHECK: [[VECTOR_BODY]]: 83; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds double, ptr [[P]], i64 0 84; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds double, ptr [[TMP1]], i32 0 85; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP2]], align 8 86; CHECK-NEXT: [[TMP3:%.*]] = call <2 x double> @llvm.powi.v2f64.i32(<2 x double> [[WIDE_LOAD]], i32 3) 87; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds double, ptr [[TMP1]], i32 0 88; CHECK-NEXT: store <2 x double> [[TMP3]], ptr [[TMP4]], align 8 89; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] 90; CHECK: [[MIDDLE_BLOCK]]: 91; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]] 92; CHECK: [[SCALAR_PH]]: 93; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 2, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 94; CHECK-NEXT: br label %[[LOOP:.*]] 95; CHECK: [[LOOP]]: 96; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] 97; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds double, ptr [[P]], i64 [[IV]] 98; CHECK-NEXT: [[L:%.*]] = load double, ptr [[GEP]], align 8 99; CHECK-NEXT: [[POWI:%.*]] = tail call double @llvm.powi.f64.i32(double [[L]], i32 3) 100; CHECK-NEXT: store double [[POWI]], ptr [[GEP]], align 8 101; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 102; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 103; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] 104; CHECK: [[EXIT]]: 105; CHECK-NEXT: ret void 106; 107entry: 108 br label %loop 109 110loop: 111 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] 112 %gep = getelementptr inbounds double, ptr %P, i64 %iv 113 %l = load double, ptr %gep 114 %powi = tail call double @llvm.powi.f64.i32(double %l, i32 3) 115 store double %powi, ptr %gep, align 8 116 %iv.next = add i64 %iv, 1 117 %ec = icmp eq i64 %iv, 1 118 br i1 %ec, label %exit, label %loop 119 120exit: 121 ret void 122} 123 124define void @call_scalarized(ptr noalias %src, ptr noalias %dst) { 125; CHECK-LABEL: define void @call_scalarized( 126; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) { 127; CHECK-NEXT: [[ENTRY:.*]]: 128; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] 129; CHECK: [[LOOP_HEADER]]: 130; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 100, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] 131; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], -1 132; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr double, ptr [[SRC]], i64 [[IV_NEXT]] 133; CHECK-NEXT: [[L:%.*]] = load double, ptr [[GEP_SRC]], align 8 134; CHECK-NEXT: [[CMP295:%.*]] = fcmp une double [[L]], 4.000000e+00 135; CHECK-NEXT: [[CMP299:%.*]] = fcmp ugt double [[L]], 0.000000e+00 136; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP295]], [[CMP299]] 137; CHECK-NEXT: br i1 [[OR_COND]], label %[[LOOP_LATCH]], label %[[THEN:.*]] 138; CHECK: [[THEN]]: 139; CHECK-NEXT: [[SQRT:%.*]] = call double @llvm.sqrt.f64(double [[L]]) 140; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr double, ptr [[DST]], i64 [[IV_NEXT]] 141; CHECK-NEXT: store double [[SQRT]], ptr [[GEP_DST]], align 8 142; CHECK-NEXT: br label %[[LOOP_LATCH]] 143; CHECK: [[LOOP_LATCH]]: 144; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 0 145; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label %[[EXIT:.*]], label %[[LOOP_HEADER]] 146; CHECK: [[EXIT]]: 147; CHECK-NEXT: ret void 148; 149entry: 150 br label %loop.header 151 152loop.header: 153 %iv = phi i64 [ 100, %entry ], [ %iv.next, %loop.latch ] 154 %iv.next = add i64 %iv, -1 155 %gep.src = getelementptr double, ptr %src, i64 %iv.next 156 %l = load double, ptr %gep.src, align 8 157 %cmp295 = fcmp une double %l, 4.000000e+00 158 %cmp299 = fcmp ugt double %l, 0.000000e+00 159 %or.cond = or i1 %cmp295, %cmp299 160 br i1 %or.cond, label %loop.latch, label %then 161 162then: 163 %sqrt = call double @llvm.sqrt.f64(double %l) 164 %gep.dst = getelementptr double, ptr %dst, i64 %iv.next 165 store double %sqrt, ptr %gep.dst, align 8 166 br label %loop.latch 167 168loop.latch: 169 %tobool.not = icmp eq i64 %iv.next, 0 170 br i1 %tobool.not, label %exit, label %loop.header 171 172exit: 173 ret void 174} 175 176define void @call_forced_scalar(ptr %src.1, ptr %src.2, ptr noalias %dst.1, ptr noalias %dst.2) { 177; CHECK-LABEL: define void @call_forced_scalar( 178; CHECK-SAME: ptr [[SRC_1:%.*]], ptr [[SRC_2:%.*]], ptr noalias [[DST_1:%.*]], ptr noalias [[DST_2:%.*]]) { 179; CHECK-NEXT: [[ENTRY:.*]]: 180; CHECK-NEXT: br label %[[LOOP:.*]] 181; CHECK: [[LOOP]]: 182; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] 183; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC_1]], align 4 184; CHECK-NEXT: [[SMAX:%.*]] = tail call i32 @llvm.smax.i32(i32 [[TMP0]], i32 0) 185; CHECK-NEXT: [[UMIN:%.*]] = tail call i32 @llvm.umin.i32(i32 [[SMAX]], i32 1) 186; CHECK-NEXT: [[UMIN_EXT:%.*]] = zext i32 [[UMIN]] to i64 187; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr i8, ptr [[SRC_2]], i64 [[UMIN_EXT]] 188; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[GEP_SRC_2]], align 1 189; CHECK-NEXT: [[L_EXT:%.*]] = zext i8 [[TMP1]] to i32 190; CHECK-NEXT: [[MUL:%.*]] = mul i32 3, [[L_EXT]] 191; CHECK-NEXT: store i32 [[MUL]], ptr [[DST_1]], align 4 192; CHECK-NEXT: [[GEP_DST_2:%.*]] = getelementptr i32, ptr [[DST_2]], i64 [[IV]] 193; CHECK-NEXT: store i32 0, ptr [[GEP_DST_2]], align 4 194; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 195; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 0 196; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] 197; CHECK: [[EXIT]]: 198; CHECK-NEXT: ret void 199; 200entry: 201 br label %loop 202 203loop: 204 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] 205 %0 = load i32, ptr %src.1, align 4 206 %smax = tail call i32 @llvm.smax.i32(i32 %0, i32 0) 207 %umin = tail call i32 @llvm.umin.i32(i32 %smax, i32 1) 208 %umin.ext = zext i32 %umin to i64 209 %gep.src.2 = getelementptr i8, ptr %src.2, i64 %umin.ext 210 %1 = load i8, ptr %gep.src.2, align 1 211 %l.ext = zext i8 %1 to i32 212 %mul = mul i32 3, %l.ext 213 store i32 %mul, ptr %dst.1, align 4 214 %gep.dst.2 = getelementptr i32, ptr %dst.2, i64 %iv 215 store i32 0, ptr %gep.dst.2, align 4 216 %iv.next = add i64 %iv, 1 217 %ec = icmp eq i64 %iv.next, 0 218 br i1 %ec, label %exit, label %loop 219 220exit: 221 ret void 222} 223 224declare i32 @llvm.smax.i32(i32, i32) 225declare i32 @llvm.umin.i32(i32, i32) 226declare double @llvm.sqrt.f64(double) 227declare double @llvm.powi.f64.i32(double, i32) 228declare i64 @llvm.fshl.i64(i64, i64, i64) 229;. 230; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} 231; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} 232; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} 233; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} 234; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META2]], [[META1]]} 235;. 236