1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -passes=slp-vectorizer,dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s 3 4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" 5target triple = "x86_64-apple-macosx10.8.0" 6 7define i32 @rollable(ptr noalias nocapture %in, ptr noalias nocapture %out, i64 %n) { 8; CHECK-LABEL: @rollable( 9; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[N:%.*]], 0 10; CHECK-NEXT: br i1 [[TMP1]], label [[DOT_CRIT_EDGE:%.*]], label [[DOTLR_PH:%.*]] 11; CHECK: .lr.ph: 12; CHECK-NEXT: [[I_019:%.*]] = phi i64 [ [[TMP8:%.*]], [[DOTLR_PH]] ], [ 0, [[TMP0:%.*]] ] 13; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[I_019]], 2 14; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[IN:%.*]], i64 [[TMP2]] 15; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i64 [[TMP2]] 16; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 17; CHECK-NEXT: [[TMP6:%.*]] = mul <4 x i32> [[TMP5]], splat (i32 7) 18; CHECK-NEXT: [[TMP7:%.*]] = add <4 x i32> [[TMP6]], <i32 7, i32 14, i32 21, i32 28> 19; CHECK-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP4]], align 4 20; CHECK-NEXT: [[TMP8]] = add i64 [[I_019]], 1 21; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP8]], [[N]] 22; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] 23; CHECK: ._crit_edge: 24; CHECK-NEXT: ret i32 undef 25; 26 %1 = icmp eq i64 %n, 0 27 br i1 %1, label %._crit_edge, label %.lr.ph 28 29.lr.ph: ; preds = %0, %.lr.ph 30 %i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ] 31 %2 = shl i64 %i.019, 2 32 %3 = getelementptr inbounds i32, ptr %in, i64 %2 33 %4 = load i32, ptr %3, align 4 34 %5 = or disjoint i64 %2, 1 35 %6 = getelementptr inbounds i32, ptr %in, i64 %5 36 %7 = load i32, ptr %6, align 4 37 %8 = or disjoint i64 %2, 2 38 %9 = getelementptr inbounds i32, ptr %in, i64 %8 39 %10 = load i32, ptr %9, align 4 40 %11 = or disjoint i64 %2, 3 41 %12 = getelementptr inbounds i32, ptr %in, i64 %11 42 %13 = load i32, ptr %12, align 4 43 %14 = mul i32 %4, 7 44 %15 = add i32 %14, 7 45 %16 = mul i32 %7, 7 46 %17 = add i32 %16, 14 47 %18 = mul i32 %10, 7 48 %19 = add i32 %18, 21 49 %20 = mul i32 %13, 7 50 %21 = add i32 %20, 28 51 %22 = getelementptr inbounds i32, ptr %out, i64 %2 52 store i32 %15, ptr %22, align 4 53 %23 = getelementptr inbounds i32, ptr %out, i64 %5 54 store i32 %17, ptr %23, align 4 55 %24 = getelementptr inbounds i32, ptr %out, i64 %8 56 store i32 %19, ptr %24, align 4 57 %25 = getelementptr inbounds i32, ptr %out, i64 %11 58 store i32 %21, ptr %25, align 4 59 %26 = add i64 %i.019, 1 60 %exitcond = icmp eq i64 %26, %n 61 br i1 %exitcond, label %._crit_edge, label %.lr.ph 62 63._crit_edge: ; preds = %.lr.ph, %0 64 ret i32 undef 65} 66 67define i32 @unrollable(ptr %in, ptr %out, i64 %n) nounwind ssp uwtable { 68; CHECK-LABEL: @unrollable( 69; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[N:%.*]], 0 70; CHECK-NEXT: br i1 [[TMP1]], label [[DOT_CRIT_EDGE:%.*]], label [[DOTLR_PH:%.*]] 71; CHECK: .lr.ph: 72; CHECK-NEXT: [[I_019:%.*]] = phi i64 [ [[TMP14:%.*]], [[DOTLR_PH]] ], [ 0, [[TMP0:%.*]] ] 73; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[I_019]], 2 74; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[IN:%.*]], i64 [[TMP2]] 75; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[TMP2]], 2 76; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 [[TMP4]] 77; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i64 [[TMP2]] 78; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i32>, ptr [[TMP3]], align 4 79; CHECK-NEXT: [[TMP8:%.*]] = mul <2 x i32> [[TMP7]], splat (i32 7) 80; CHECK-NEXT: [[TMP9:%.*]] = add <2 x i32> [[TMP8]], <i32 7, i32 14> 81; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[OUT]], i64 [[TMP4]] 82; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i32>, ptr [[TMP5]], align 4 83; CHECK-NEXT: [[TMP12:%.*]] = mul <2 x i32> [[TMP11]], splat (i32 7) 84; CHECK-NEXT: [[TMP13:%.*]] = add <2 x i32> [[TMP12]], <i32 21, i32 28> 85; CHECK-NEXT: store <2 x i32> [[TMP9]], ptr [[TMP6]], align 4 86; CHECK-NEXT: [[BARRIER:%.*]] = call i32 @goo(i32 0) 87; CHECK-NEXT: store <2 x i32> [[TMP13]], ptr [[TMP10]], align 4 88; CHECK-NEXT: [[TMP14]] = add i64 [[I_019]], 1 89; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP14]], [[N]] 90; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] 91; CHECK: ._crit_edge: 92; CHECK-NEXT: ret i32 undef 93; 94 %1 = icmp eq i64 %n, 0 95 br i1 %1, label %._crit_edge, label %.lr.ph 96 97.lr.ph: ; preds = %0, %.lr.ph 98 %i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ] 99 %2 = shl i64 %i.019, 2 100 %3 = getelementptr inbounds i32, ptr %in, i64 %2 101 %4 = load i32, ptr %3, align 4 102 %5 = or disjoint i64 %2, 1 103 %6 = getelementptr inbounds i32, ptr %in, i64 %5 104 %7 = load i32, ptr %6, align 4 105 %8 = or disjoint i64 %2, 2 106 %9 = getelementptr inbounds i32, ptr %in, i64 %8 107 %10 = load i32, ptr %9, align 4 108 %11 = or disjoint i64 %2, 3 109 %12 = getelementptr inbounds i32, ptr %in, i64 %11 110 %13 = load i32, ptr %12, align 4 111 %14 = mul i32 %4, 7 112 %15 = add i32 %14, 7 113 %16 = mul i32 %7, 7 114 %17 = add i32 %16, 14 115 %18 = mul i32 %10, 7 116 %19 = add i32 %18, 21 117 %20 = mul i32 %13, 7 118 %21 = add i32 %20, 28 119 %22 = getelementptr inbounds i32, ptr %out, i64 %2 120 store i32 %15, ptr %22, align 4 121 %23 = getelementptr inbounds i32, ptr %out, i64 %5 122 store i32 %17, ptr %23, align 4 123 %barrier = call i32 @goo(i32 0) ; <---------------- memory barrier. 124 %24 = getelementptr inbounds i32, ptr %out, i64 %8 125 store i32 %19, ptr %24, align 4 126 %25 = getelementptr inbounds i32, ptr %out, i64 %11 127 store i32 %21, ptr %25, align 4 128 %26 = add i64 %i.019, 1 129 %exitcond = icmp eq i64 %26, %n 130 br i1 %exitcond, label %._crit_edge, label %.lr.ph 131 132._crit_edge: ; preds = %.lr.ph, %0 133 ret i32 undef 134} 135 136declare i32 @goo(i32) 137