1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 2; RUN: opt -p loop-vectorize -mattr=+v -S %s | FileCheck %s 3 4target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128" 5target triple = "riscv64-unknown-linux-gnu" 6 7define void @test_pr98413_zext_removed(ptr %src, ptr noalias %dst, i64 %x) { 8; CHECK-LABEL: define void @test_pr98413_zext_removed( 9; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]], i64 [[X:%.*]]) #[[ATTR0:[0-9]+]] { 10; CHECK-NEXT: [[ENTRY:.*]]: 11; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() 12; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 8 13; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 97, [[TMP1]] 14; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] 15; CHECK: [[VECTOR_PH]]: 16; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() 17; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 8 18; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 97, [[TMP3]] 19; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 97, [[N_MOD_VF]] 20; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() 21; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8 22; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[X]], i64 0 23; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer 24; CHECK-NEXT: [[TMP6:%.*]] = trunc <vscale x 8 x i64> [[BROADCAST_SPLAT]] to <vscale x 8 x i8> 25; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] 26; CHECK: [[VECTOR_BODY]]: 27; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] 28; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 0 29; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[TMP7]] 30; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i32 0 31; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP9]], align 8 32; CHECK-NEXT: [[TMP10:%.*]] = trunc <vscale x 8 x i16> [[WIDE_LOAD]] to <vscale x 8 x i8> 33; CHECK-NEXT: [[TMP11:%.*]] = and <vscale x 8 x i8> [[TMP6]], [[TMP10]] 34; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP7]] 35; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i32 0 36; CHECK-NEXT: store <vscale x 8 x i8> [[TMP11]], ptr [[TMP13]], align 1 37; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] 38; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 39; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] 40; CHECK: [[MIDDLE_BLOCK]]: 41; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 97, [[N_VEC]] 42; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] 43; CHECK: [[SCALAR_PH]]: 44; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 45; CHECK-NEXT: br label %[[LOOP:.*]] 46; CHECK: [[LOOP]]: 47; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] 48; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[IV]] 49; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC]], align 8 50; CHECK-NEXT: [[EXT_L:%.*]] = zext i16 [[L]] to i64 51; CHECK-NEXT: [[AND:%.*]] = and i64 [[X]], [[EXT_L]] 52; CHECK-NEXT: [[TRUNC_AND:%.*]] = trunc i64 [[AND]] to i8 53; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]] 54; CHECK-NEXT: store i8 [[TRUNC_AND]], ptr [[GEP_DST]], align 1 55; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 56; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 96 57; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] 58; CHECK: [[EXIT]]: 59; CHECK-NEXT: ret void 60; 61entry: 62 br label %loop 63 64loop: 65 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] 66 %gep.src = getelementptr inbounds i16, ptr %src, i64 %iv 67 %l = load i16, ptr %gep.src, align 8 68 %ext.l = zext i16 %l to i64 69 %and = and i64 %x, %ext.l 70 %trunc.and = trunc i64 %and to i8 71 %gep.dst = getelementptr inbounds i8, ptr %dst, i64 %iv 72 store i8 %trunc.and, ptr %gep.dst, align 1 73 %iv.next = add i64 %iv, 1 74 %exitcond.not = icmp eq i64 %iv, 96 75 br i1 %exitcond.not, label %exit, label %loop 76 77exit: 78 ret void 79} 80 81define void @test_pr98413_sext_removed(ptr %src, ptr noalias %dst, i64 %x) { 82; CHECK-LABEL: define void @test_pr98413_sext_removed( 83; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]], i64 [[X:%.*]]) #[[ATTR0]] { 84; CHECK-NEXT: [[ENTRY:.*]]: 85; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() 86; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 8 87; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 97, [[TMP1]] 88; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] 89; CHECK: [[VECTOR_PH]]: 90; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() 91; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 8 92; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 97, [[TMP3]] 93; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 97, [[N_MOD_VF]] 94; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() 95; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8 96; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[X]], i64 0 97; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer 98; CHECK-NEXT: [[TMP6:%.*]] = trunc <vscale x 8 x i64> [[BROADCAST_SPLAT]] to <vscale x 8 x i8> 99; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] 100; CHECK: [[VECTOR_BODY]]: 101; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] 102; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 0 103; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[TMP7]] 104; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i32 0 105; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP9]], align 8 106; CHECK-NEXT: [[TMP10:%.*]] = trunc <vscale x 8 x i16> [[WIDE_LOAD]] to <vscale x 8 x i8> 107; CHECK-NEXT: [[TMP11:%.*]] = and <vscale x 8 x i8> [[TMP6]], [[TMP10]] 108; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP7]] 109; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i32 0 110; CHECK-NEXT: store <vscale x 8 x i8> [[TMP11]], ptr [[TMP13]], align 1 111; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] 112; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 113; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] 114; CHECK: [[MIDDLE_BLOCK]]: 115; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 97, [[N_VEC]] 116; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] 117; CHECK: [[SCALAR_PH]]: 118; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 119; CHECK-NEXT: br label %[[LOOP:.*]] 120; CHECK: [[LOOP]]: 121; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] 122; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[IV]] 123; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC]], align 8 124; CHECK-NEXT: [[EXT_L:%.*]] = sext i16 [[L]] to i64 125; CHECK-NEXT: [[AND:%.*]] = and i64 [[X]], [[EXT_L]] 126; CHECK-NEXT: [[TRUNC_AND:%.*]] = trunc i64 [[AND]] to i8 127; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]] 128; CHECK-NEXT: store i8 [[TRUNC_AND]], ptr [[GEP_DST]], align 1 129; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 130; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 96 131; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] 132; CHECK: [[EXIT]]: 133; CHECK-NEXT: ret void 134; 135entry: 136 br label %loop 137 138loop: 139 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] 140 %gep.src = getelementptr inbounds i16, ptr %src, i64 %iv 141 %l = load i16, ptr %gep.src, align 8 142 %ext.l = sext i16 %l to i64 143 %and = and i64 %x, %ext.l 144 %trunc.and = trunc i64 %and to i8 145 %gep.dst = getelementptr inbounds i8, ptr %dst, i64 %iv 146 store i8 %trunc.and, ptr %gep.dst, align 1 147 %iv.next = add i64 %iv, 1 148 %exitcond.not = icmp eq i64 %iv, 96 149 br i1 %exitcond.not, label %exit, label %loop 150 151exit: 152 ret void 153} 154 155; Test case for https://github.com/llvm/llvm-project/issues/106641. 156define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 { 157; CHECK-LABEL: define void @truncate_to_i1_used_by_branch( 158; CHECK-SAME: i8 [[X:%.*]], ptr [[DST:%.*]]) #[[ATTR1:[0-9]+]] { 159; CHECK-NEXT: [[ENTRY:.*]]: 160; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] 161; CHECK: [[VECTOR_PH]]: 162; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i8> poison, i8 [[X]], i64 0 163; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i8> [[BROADCAST_SPLATINSERT]], <2 x i8> poison, <2 x i32> zeroinitializer 164; CHECK-NEXT: [[TMP0:%.*]] = trunc <2 x i8> [[BROADCAST_SPLAT]] to <2 x i1> 165; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i1> splat (i1 true), [[TMP0]] 166; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] 167; CHECK: [[VECTOR_BODY]]: 168; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32 0, i32 2) 169; CHECK-NEXT: [[TMP3:%.*]] = select <2 x i1> [[ACTIVE_LANE_MASK]], <2 x i1> [[TMP2]], <2 x i1> zeroinitializer 170; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x ptr> poison, ptr [[DST]], i64 0 171; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <2 x ptr> [[BROADCAST_SPLATINSERT1]], <2 x ptr> poison, <2 x i32> zeroinitializer 172; CHECK-NEXT: call void @llvm.masked.scatter.v2i8.v2p0(<2 x i8> zeroinitializer, <2 x ptr> [[BROADCAST_SPLAT4]], i32 1, <2 x i1> [[TMP3]]) 173; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] 174; CHECK: [[MIDDLE_BLOCK]]: 175; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]] 176; CHECK: [[SCALAR_PH]]: 177; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ 2, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 178; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] 179; CHECK: [[LOOP_HEADER]]: 180; CHECK-NEXT: [[F_039:%.*]] = phi i8 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ] 181; CHECK-NEXT: [[TMP4:%.*]] = or i8 23, [[X]] 182; CHECK-NEXT: [[EXTRACT_T:%.*]] = trunc i8 [[TMP4]] to i1 183; CHECK-NEXT: br i1 [[EXTRACT_T]], label %[[THEN:.*]], label %[[LOOP_LATCH]] 184; CHECK: [[THEN]]: 185; CHECK-NEXT: store i8 0, ptr [[DST]], align 1 186; CHECK-NEXT: br label %[[LOOP_LATCH]] 187; CHECK: [[LOOP_LATCH]]: 188; CHECK-NEXT: [[ADD]] = add i8 [[F_039]], 1 189; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[F_039]] to i32 190; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV]], 1 191; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP6:![0-9]+]] 192; CHECK: [[EXIT]]: 193; CHECK-NEXT: ret void 194; 195entry: 196 br label %loop.header 197 198loop.header: 199 %f.039 = phi i8 [ 0, %entry ], [ %add, %loop.latch ] 200 %0 = or i8 23, %x 201 %extract.t = trunc i8 %0 to i1 202 br i1 %extract.t, label %then, label %loop.latch 203 204then: 205 store i8 0, ptr %dst, align 1 206 br label %loop.latch 207 208loop.latch: 209 %add = add i8 %f.039, 1 210 %conv = sext i8 %f.039 to i32 211 %cmp = icmp slt i32 %conv, 1 212 br i1 %cmp, label %loop.header, label %exit 213 214exit: 215 ret void 216} 217 218; Test case for https://github.com/llvm/llvm-project/issues/107171. 219define i8 @icmp_ops_narrowed_to_i1() #1 { 220; CHECK-LABEL: define i8 @icmp_ops_narrowed_to_i1( 221; CHECK-SAME: ) #[[ATTR2:[0-9]+]] { 222; CHECK-NEXT: [[ENTRY:.*]]: 223; CHECK-NEXT: br label %[[LOOP:.*]] 224; CHECK: [[LOOP]]: 225; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] 226; CHECK-NEXT: [[C:%.*]] = icmp eq i8 0, 0 227; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[C]] to i64 228; CHECK-NEXT: [[SHR:%.*]] = lshr i64 [[EXT]], 1 229; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 [[SHR]] to i8 230; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 231; CHECK-NEXT: [[EC:%.*]] = icmp eq i16 [[IV_NEXT]], 100 232; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] 233; CHECK: [[EXIT]]: 234; CHECK-NEXT: [[TRUNC_LCSSA:%.*]] = phi i8 [ [[TRUNC]], %[[LOOP]] ] 235; CHECK-NEXT: ret i8 [[TRUNC_LCSSA]] 236; 237entry: 238 br label %loop 239 240loop: 241 %iv = phi i16 [ 0, %entry ], [ %iv.next, %loop ] 242 %c = icmp eq i8 0, 0 243 %ext = zext i1 %c to i64 244 %shr = lshr i64 %ext, 1 245 %trunc = trunc i64 %shr to i8 246 %iv.next = add i16 %iv, 1 247 %ec = icmp eq i16 %iv.next, 100 248 br i1 %ec, label %exit, label %loop 249 250exit: 251 ret i8 %trunc 252} 253 254define void @icmp_only_first_op_truncated(ptr noalias %dst, i32 %x, i64 %N, i64 %v, ptr noalias %src) #1 { 255; CHECK-LABEL: define void @icmp_only_first_op_truncated( 256; CHECK-SAME: ptr noalias [[DST:%.*]], i32 [[X:%.*]], i64 [[N:%.*]], i64 [[V:%.*]], ptr noalias [[SRC:%.*]]) #[[ATTR2]] { 257; CHECK-NEXT: [[ENTRY:.*]]: 258; CHECK-NEXT: [[T:%.*]] = trunc i64 [[N]] to i32 259; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[V]], 1 260; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() 261; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 2 262; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] 263; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] 264; CHECK: [[VECTOR_PH]]: 265; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() 266; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 267; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] 268; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] 269; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() 270; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 271; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[N]], i64 0 272; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer 273; CHECK-NEXT: [[TMP7:%.*]] = trunc <vscale x 2 x i64> [[BROADCAST_SPLAT]] to <vscale x 2 x i32> 274; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[T]], i64 0 275; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer 276; CHECK-NEXT: [[TMP8:%.*]] = icmp eq <vscale x 2 x i32> [[TMP7]], [[BROADCAST_SPLAT2]] 277; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[X]] to i64 278; CHECK-NEXT: [[TMP10:%.*]] = getelementptr double, ptr [[SRC]], i64 [[TMP9]] 279; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[TMP10]], i64 0 280; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 2 x ptr> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer 281; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[DST]], i64 0 282; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 2 x ptr> [[BROADCAST_SPLATINSERT5]], <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer 283; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] 284; CHECK: [[VECTOR_BODY]]: 285; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] 286; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> [[BROADCAST_SPLAT4]], i32 8, <vscale x 2 x i1> [[TMP8]], <vscale x 2 x double> poison) 287; CHECK-NEXT: call void @llvm.masked.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[WIDE_MASKED_GATHER]], <vscale x 2 x ptr> [[BROADCAST_SPLAT6]], i32 8, <vscale x 2 x i1> [[TMP8]]) 288; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] 289; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 290; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] 291; CHECK: [[MIDDLE_BLOCK]]: 292; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] 293; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] 294; CHECK: [[SCALAR_PH]]: 295; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] 296; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] 297; CHECK: [[LOOP_HEADER]]: 298; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] 299; CHECK-NEXT: [[T1:%.*]] = trunc i64 [[N]] to i32 300; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[T1]], [[T]] 301; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] 302; CHECK: [[THEN]]: 303; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[X]] to i64 304; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr double, ptr [[SRC]], i64 [[IDXPROM]] 305; CHECK-NEXT: [[RETVAL:%.*]] = load double, ptr [[ARRAYIDX]], align 8 306; CHECK-NEXT: store double [[RETVAL]], ptr [[DST]], align 8 307; CHECK-NEXT: br label %[[LOOP_LATCH]] 308; CHECK: [[LOOP_LATCH]]: 309; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 310; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[V]] 311; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP8:![0-9]+]] 312; CHECK: [[EXIT]]: 313; CHECK-NEXT: ret void 314; 315entry: 316 %t = trunc i64 %N to i32 317 br label %loop.header 318 319loop.header: 320 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] 321 %t1 = trunc i64 %N to i32 322 %c = icmp eq i32 %t1, %t 323 br i1 %c, label %then, label %loop.latch 324 325then: 326 %idxprom = zext i32 %x to i64 327 %arrayidx = getelementptr double, ptr %src, i64 %idxprom 328 %retval = load double, ptr %arrayidx, align 8 329 store double %retval, ptr %dst, align 8 330 br label %loop.latch 331 332loop.latch: 333 %iv.next = add i64 %iv, 1 334 %ec = icmp eq i64 %iv, %v 335 br i1 %ec, label %exit, label %loop.header 336 337exit: 338 ret void 339} 340 341attributes #0 = { "target-features"="+64bit,+v,+zvl256b" } 342attributes #1 = { "target-features"="+64bit,+v" } 343 344;. 345; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} 346; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} 347; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} 348; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} 349; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} 350; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]} 351; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]} 352; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} 353; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]} 354;. 355