1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -loop-vectorize -instsimplify -scalable-vectorization=on -force-target-supports-scalable-vectors -S | FileCheck %s 3 4define void @trunc_minimal_bitwidth(i8* %bptr, i16* noalias %hptr, i32 %val, i64 %N) { 5; CHECK-LABEL: @trunc_minimal_bitwidth( 6; CHECK-NEXT: entry: 7; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() 8; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 9; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] 10; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 11; CHECK: vector.ph: 12; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() 13; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 14; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] 15; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] 16; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL:%.*]], i32 0 17; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer 18; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 19; CHECK: vector.body: 20; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 21; CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 4 x i32> [[BROADCAST_SPLAT]] to <vscale x 4 x i16> 22; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, i16* [[HPTR:%.*]], i64 [[INDEX]] 23; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[TMP5]] to <vscale x 4 x i16>* 24; CHECK-NEXT: store <vscale x 4 x i16> [[TMP4]], <vscale x 4 x i16>* [[TMP6]], align 2 25; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() 26; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 4 27; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] 28; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 29; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] 30; CHECK: middle.block: 31; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] 32; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]] 33; CHECK: scalar.ph: 34; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] 35; CHECK-NEXT: br label [[FOR_BODY:%.*]] 36; CHECK: for.body: 37; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] 38; CHECK-NEXT: [[CONV21:%.*]] = trunc i32 [[VAL]] to i16 39; CHECK-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds i16, i16* [[HPTR]], i64 [[INDVARS_IV]] 40; CHECK-NEXT: store i16 [[CONV21]], i16* [[ARRAYIDX23]], align 2 41; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 42; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]] 43; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]] 44; CHECK: for.exit: 45; CHECK-NEXT: ret void 46; 47entry: 48 br label %for.body 49 50for.body: 51 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 52 %0 = load i8, i8* %bptr, align 1 53 %conv = zext i8 %0 to i32 54 %conv21 = trunc i32 %val to i16 55 %arrayidx23 = getelementptr inbounds i16, i16* %hptr, i64 %indvars.iv 56 store i16 %conv21, i16* %arrayidx23, align 2 57 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 58 %exitcond.not = icmp eq i64 %indvars.iv.next, %N 59 br i1 %exitcond.not, label %for.exit, label %for.body, !llvm.loop !0 60 61for.exit: 62 ret void 63} 64 65define void @trunc_minimal_bitwidths_shufflevector (i8* %p, i32 %arg1, i64 %len) { 66; CHECK-LABEL: @trunc_minimal_bitwidths_shufflevector( 67; CHECK-NEXT: entry: 68; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() 69; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 70; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[LEN:%.*]], [[TMP1]] 71; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 72; CHECK: vector.ph: 73; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() 74; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 75; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[LEN]], [[TMP3]] 76; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[LEN]], [[N_MOD_VF]] 77; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[ARG1:%.*]], i32 0 78; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer 79; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 80; CHECK: vector.body: 81; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 82; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[P:%.*]], i64 [[INDEX]] 83; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to <vscale x 4 x i8>* 84; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, <vscale x 4 x i8>* [[TMP5]], align 1 85; CHECK-NEXT: [[TMP6:%.*]] = trunc <vscale x 4 x i32> [[BROADCAST_SPLAT]] to <vscale x 4 x i8> 86; CHECK-NEXT: [[TMP7:%.*]] = xor <vscale x 4 x i8> [[WIDE_LOAD]], [[TMP6]] 87; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i8> [[TMP7]], [[WIDE_LOAD]] 88; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP4]] to <vscale x 4 x i8>* 89; CHECK-NEXT: store <vscale x 4 x i8> [[TMP8]], <vscale x 4 x i8>* [[TMP9]], align 1 90; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() 91; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 4 92; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]] 93; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 94; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] 95; CHECK: middle.block: 96; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[LEN]], [[N_VEC]] 97; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]] 98; CHECK: scalar.ph: 99; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] 100; CHECK-NEXT: br label [[FOR_BODY:%.*]] 101; CHECK: for.body: 102; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] 103; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[P]], i64 [[INDVARS_IV]] 104; CHECK-NEXT: [[TMP13:%.*]] = load i8, i8* [[ARRAYIDX]], align 1 105; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP13]] to i32 106; CHECK-NEXT: [[CONV17:%.*]] = xor i32 [[CONV]], [[ARG1]] 107; CHECK-NEXT: [[MUL18:%.*]] = mul nuw nsw i32 [[CONV17]], [[CONV]] 108; CHECK-NEXT: [[CONV19:%.*]] = trunc i32 [[MUL18]] to i8 109; CHECK-NEXT: store i8 [[CONV19]], i8* [[ARRAYIDX]], align 1 110; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 111; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[LEN]] 112; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] 113; CHECK: for.exit: 114; CHECK-NEXT: ret void 115; 116entry: 117 br label %for.body 118 119for.body: ; preds = %entry 120 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 121 %arrayidx = getelementptr inbounds i8, i8* %p, i64 %indvars.iv 122 %0 = load i8, i8* %arrayidx 123 %conv = zext i8 %0 to i32 124 %conv17 = xor i32 %conv, %arg1 125 %mul18 = mul nuw nsw i32 %conv17, %conv 126 %conv19 = trunc i32 %mul18 to i8 127 store i8 %conv19, i8* %arrayidx 128 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 129 %exitcond = icmp eq i64 %indvars.iv.next, %len 130 br i1 %exitcond, label %for.exit, label %for.body, !llvm.loop !0 131 132for.exit: ; preds = %for.body 133 ret void 134} 135!0 = !{!0, !1, !2} 136!1 = !{!"llvm.loop.vectorize.width", i32 4} 137!2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} 138