1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -S -passes="default<O2>" -mattr=avx < %s | FileCheck --check-prefix=AVX %s 3; RUN: opt -S -passes="default<O2>" -mattr=avx2 < %s | FileCheck --check-prefix=AVX2 %s 4 5target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" 6target triple = "x86_64-unknown-linux-gnu" 7 8; Make sure we vectorize when branches are convered to switch. 9define dso_local void @test(ptr %start, ptr %end) #0 { 10; 11; AVX-LABEL: @test( 12; AVX-NEXT: entry: 13; AVX-NEXT: [[I11_NOT1:%.*]] = icmp eq ptr [[START:%.*]], [[END:%.*]] 14; AVX-NEXT: br i1 [[I11_NOT1]], label [[EXIT:%.*]], label [[BB12:%.*]] 15; AVX: bb12: 16; AVX-NEXT: [[PTR2:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LATCH:%.*]] ], [ [[START]], [[ENTRY:%.*]] ] 17; AVX-NEXT: [[VAL:%.*]] = load i32, ptr [[PTR2]], align 4 18; AVX-NEXT: switch i32 [[VAL]], label [[LATCH]] [ 19; AVX-NEXT: i32 -12, label [[STORE:%.*]] 20; AVX-NEXT: i32 13, label [[STORE]] 21; AVX-NEXT: ] 22; AVX: store: 23; AVX-NEXT: store i32 42, ptr [[PTR2]], align 4 24; AVX-NEXT: br label [[LATCH]] 25; AVX: latch: 26; AVX-NEXT: [[PTR_NEXT]] = getelementptr inbounds nuw i8, ptr [[PTR2]], i64 4 27; AVX-NEXT: [[I11_NOT:%.*]] = icmp eq ptr [[PTR_NEXT]], [[END]] 28; AVX-NEXT: br i1 [[I11_NOT]], label [[EXIT]], label [[BB12]] 29; AVX: exit: 30; AVX-NEXT: ret void 31; 32; AVX2-LABEL: @test( 33; AVX2-NEXT: entry: 34; AVX2-NEXT: [[I11_NOT1:%.*]] = icmp eq ptr [[START:%.*]], [[END:%.*]] 35; AVX2-NEXT: br i1 [[I11_NOT1]], label [[EXIT:%.*]], label [[BB12_PREHEADER:%.*]] 36; AVX2: iter.check: 37; AVX2-NEXT: [[END3:%.*]] = ptrtoint ptr [[END]] to i64 38; AVX2-NEXT: [[START4:%.*]] = ptrtoint ptr [[START]] to i64 39; AVX2-NEXT: [[TMP0:%.*]] = add i64 [[END3]], -4 40; AVX2-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START4]] 41; AVX2-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 2 42; AVX2-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1 43; AVX2-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[TMP1]], 28 44; AVX2-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[BB12_PREHEADER1:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] 45; AVX2: vector.main.loop.iter.check: 46; AVX2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 124 47; AVX2-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[BB12_PREHEADER11:%.*]], label [[VECTOR_PH:%.*]] 48; AVX2: vector.ph: 49; AVX2-NEXT: [[N_VEC:%.*]] = and i64 [[TMP3]], 9223372036854775776 50; AVX2-NEXT: br label [[VECTOR_BODY:%.*]] 51; AVX2: vector.body: 52; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 53; AVX2-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2 54; AVX2-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] 55; AVX2-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 32 56; AVX2-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 64 57; AVX2-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 96 58; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[NEXT_GEP]], align 4 59; AVX2-NEXT: [[WIDE_LOAD8:%.*]] = load <8 x i32>, ptr [[TMP5]], align 4 60; AVX2-NEXT: [[WIDE_LOAD9:%.*]] = load <8 x i32>, ptr [[TMP6]], align 4 61; AVX2-NEXT: [[WIDE_LOAD10:%.*]] = load <8 x i32>, ptr [[TMP7]], align 4 62; AVX2-NEXT: [[TMP8:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD]], splat (i32 -12) 63; AVX2-NEXT: [[TMP9:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD8]], splat (i32 -12) 64; AVX2-NEXT: [[TMP10:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD9]], splat (i32 -12) 65; AVX2-NEXT: [[TMP11:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD10]], splat (i32 -12) 66; AVX2-NEXT: [[TMP12:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD]], splat (i32 13) 67; AVX2-NEXT: [[TMP13:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD8]], splat (i32 13) 68; AVX2-NEXT: [[TMP14:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD9]], splat (i32 13) 69; AVX2-NEXT: [[TMP15:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD10]], splat (i32 13) 70; AVX2-NEXT: [[TMP16:%.*]] = or <8 x i1> [[TMP8]], [[TMP12]] 71; AVX2-NEXT: [[TMP17:%.*]] = or <8 x i1> [[TMP9]], [[TMP13]] 72; AVX2-NEXT: [[TMP18:%.*]] = or <8 x i1> [[TMP10]], [[TMP14]] 73; AVX2-NEXT: [[TMP19:%.*]] = or <8 x i1> [[TMP11]], [[TMP15]] 74; AVX2-NEXT: tail call void @llvm.masked.store.v8i32.p0(<8 x i32> splat (i32 42), ptr [[NEXT_GEP]], i32 4, <8 x i1> [[TMP16]]) 75; AVX2-NEXT: tail call void @llvm.masked.store.v8i32.p0(<8 x i32> splat (i32 42), ptr [[TMP5]], i32 4, <8 x i1> [[TMP17]]) 76; AVX2-NEXT: tail call void @llvm.masked.store.v8i32.p0(<8 x i32> splat (i32 42), ptr [[TMP6]], i32 4, <8 x i1> [[TMP18]]) 77; AVX2-NEXT: tail call void @llvm.masked.store.v8i32.p0(<8 x i32> splat (i32 42), ptr [[TMP7]], i32 4, <8 x i1> [[TMP19]]) 78; AVX2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 79; AVX2-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 80; AVX2-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] 81; AVX2: middle.block: 82; AVX2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] 83; AVX2-NEXT: br i1 [[CMP_N]], label [[EXIT]], label [[VEC_EPILOG_ITER_CHECK:%.*]] 84; AVX2: vec.epilog.iter.check: 85; AVX2-NEXT: [[TMP26:%.*]] = shl i64 [[N_VEC]], 2 86; AVX2-NEXT: [[IND_END11:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP26]] 87; AVX2-NEXT: [[N_VEC_REMAINING:%.*]] = and i64 [[TMP3]], 24 88; AVX2-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp eq i64 [[N_VEC_REMAINING]], 0 89; AVX2-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[BB12_PREHEADER1]], label [[BB12_PREHEADER11]] 90; AVX2: vec.epilog.ph: 91; AVX2-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] 92; AVX2-NEXT: [[N_VEC10:%.*]] = and i64 [[TMP3]], 9223372036854775800 93; AVX2-NEXT: [[TMP21:%.*]] = shl i64 [[N_VEC10]], 2 94; AVX2-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP21]] 95; AVX2-NEXT: br label [[BB12:%.*]] 96; AVX2: vec.epilog.vector.body: 97; AVX2-NEXT: [[INDEX12:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[BB12_PREHEADER11]] ], [ [[INDEX_NEXT16:%.*]], [[BB12]] ] 98; AVX2-NEXT: [[OFFSET_IDX13:%.*]] = shl i64 [[INDEX12]], 2 99; AVX2-NEXT: [[NEXT_GEP14:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX13]] 100; AVX2-NEXT: [[WIDE_LOAD15:%.*]] = load <8 x i32>, ptr [[NEXT_GEP14]], align 4 101; AVX2-NEXT: [[TMP22:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD15]], splat (i32 -12) 102; AVX2-NEXT: [[TMP23:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD15]], splat (i32 13) 103; AVX2-NEXT: [[TMP24:%.*]] = or <8 x i1> [[TMP22]], [[TMP23]] 104; AVX2-NEXT: tail call void @llvm.masked.store.v8i32.p0(<8 x i32> splat (i32 42), ptr [[NEXT_GEP14]], i32 4, <8 x i1> [[TMP24]]) 105; AVX2-NEXT: [[INDEX_NEXT16]] = add nuw i64 [[INDEX12]], 8 106; AVX2-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT16]], [[N_VEC10]] 107; AVX2-NEXT: br i1 [[TMP25]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[BB12]], !llvm.loop [[LOOP3:![0-9]+]] 108; AVX2: vec.epilog.middle.block: 109; AVX2-NEXT: [[CMP_N17:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC10]] 110; AVX2-NEXT: br i1 [[CMP_N17]], label [[EXIT]], label [[BB12_PREHEADER1]] 111; AVX2: bb12.preheader: 112; AVX2-NEXT: [[PTR2_PH:%.*]] = phi ptr [ [[IND_END11]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[START]], [[BB12_PREHEADER]] ], [ [[IND_END]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] 113; AVX2-NEXT: br label [[BB13:%.*]] 114; AVX2: bb12: 115; AVX2-NEXT: [[PTR2:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LATCH:%.*]] ], [ [[PTR2_PH]], [[BB12_PREHEADER1]] ] 116; AVX2-NEXT: [[VAL:%.*]] = load i32, ptr [[PTR2]], align 4 117; AVX2-NEXT: switch i32 [[VAL]], label [[LATCH]] [ 118; AVX2-NEXT: i32 -12, label [[STORE:%.*]] 119; AVX2-NEXT: i32 13, label [[STORE]] 120; AVX2-NEXT: ] 121; AVX2: store: 122; AVX2-NEXT: store i32 42, ptr [[PTR2]], align 4 123; AVX2-NEXT: br label [[LATCH]] 124; AVX2: latch: 125; AVX2-NEXT: [[PTR_NEXT]] = getelementptr inbounds nuw i8, ptr [[PTR2]], i64 4 126; AVX2-NEXT: [[I11_NOT:%.*]] = icmp eq ptr [[PTR_NEXT]], [[END]] 127; AVX2-NEXT: br i1 [[I11_NOT]], label [[EXIT]], label [[BB13]], !llvm.loop [[LOOP4:![0-9]+]] 128; AVX2: exit: 129; AVX2-NEXT: ret void 130; 131entry: 132 br label %header 133 134header: 135 %ptr = phi ptr [ %start, %entry ], [ %ptr.next, %latch ] 136 %i11 = icmp ne ptr %ptr, %end 137 br i1 %i11, label %bb12, label %exit 138 139bb12: 140 %val = load i32, ptr %ptr, align 4 141 %c1 = icmp eq i32 %val, 13 142 %c2 = icmp eq i32 %val, -12 143 %c3 = or i1 %c1, %c2 144 br i1 %c3, label %store, label %latch 145 146store: 147 store i32 42, ptr %ptr, align 4 148 br label %latch 149 150latch: 151 %ptr.next = getelementptr inbounds i32, ptr %ptr, i32 1 152 br label %header 153 154exit: 155 ret void 156} 157