1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 2; RUN: opt -mtriple=s390x-unknown-linux -mcpu=z15 -passes=slp-vectorizer %s -S -o - \ 3; RUN: | FileCheck %s 4 5; Test vectorization and reassociation of fmul operations. If the loads can 6; be vectorized, cases of fewer operands are also profitable to vectorize. 7 8define double @fmul_double_4_factors_seq(ptr nocapture noundef readonly %x) { 9; CHECK-LABEL: define double @fmul_double_4_factors_seq( 10; CHECK-SAME: ptr noundef readonly captures(none) [[X:%.*]]) #[[ATTR0:[0-9]+]] { 11; CHECK-NEXT: [[ENTRY:.*:]] 12; CHECK-NEXT: [[TMP0:%.*]] = load <4 x double>, ptr [[X]], align 8 13; CHECK-NEXT: [[TMP1:%.*]] = call reassoc nsz arcp contract afn double @llvm.vector.reduce.fmul.v4f64(double 1.000000e+00, <4 x double> [[TMP0]]) 14; CHECK-NEXT: ret double [[TMP1]] 15; 16entry: 17 %0 = load double, ptr %x, align 8 18 %arrayidx1 = getelementptr inbounds double, ptr %x, i64 1 19 %1 = load double, ptr %arrayidx1, align 8 20 %mul = fmul reassoc nsz arcp contract afn double %1, %0 21 %arrayidx2 = getelementptr inbounds double, ptr %x, i64 2 22 %2 = load double, ptr %arrayidx2, align 8 23 %mul3 = fmul reassoc nsz arcp contract afn double %mul, %2 24 %arrayidx4 = getelementptr inbounds double, ptr %x, i64 3 25 %3 = load double, ptr %arrayidx4, align 8 26 %mul5 = fmul reassoc nsz arcp contract afn double %mul3, %3 27 ret double %mul5 28} 29 30define double @fmul_double_8_factors_nonseq(ptr nocapture noundef readonly %x) { 31; CHECK-LABEL: define double @fmul_double_8_factors_nonseq( 32; CHECK-SAME: ptr noundef readonly captures(none) [[X:%.*]]) #[[ATTR0]] { 33; CHECK-NEXT: [[ENTRY:.*:]] 34; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[X]], align 8 35; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2 36; CHECK-NEXT: [[TMP1:%.*]] = load double, ptr [[ARRAYIDX1]], align 8 37; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 4 38; CHECK-NEXT: [[TMP2:%.*]] = load double, ptr [[ARRAYIDX2]], align 8 39; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds double, ptr [[X]], i64 6 40; CHECK-NEXT: [[TMP3:%.*]] = load double, ptr [[ARRAYIDX4]], align 8 41; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds double, ptr [[X]], i64 8 42; CHECK-NEXT: [[TMP4:%.*]] = load double, ptr [[ARRAYIDX6]], align 8 43; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, ptr [[X]], i64 10 44; CHECK-NEXT: [[TMP5:%.*]] = load double, ptr [[ARRAYIDX8]], align 8 45; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds double, ptr [[X]], i64 12 46; CHECK-NEXT: [[TMP6:%.*]] = load double, ptr [[ARRAYIDX10]], align 8 47; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds double, ptr [[X]], i64 14 48; CHECK-NEXT: [[TMP7:%.*]] = load double, ptr [[ARRAYIDX12]], align 8 49; CHECK-NEXT: [[TMP8:%.*]] = insertelement <8 x double> poison, double [[TMP1]], i32 0 50; CHECK-NEXT: [[TMP9:%.*]] = insertelement <8 x double> [[TMP8]], double [[TMP0]], i32 1 51; CHECK-NEXT: [[TMP10:%.*]] = insertelement <8 x double> [[TMP9]], double [[TMP2]], i32 2 52; CHECK-NEXT: [[TMP11:%.*]] = insertelement <8 x double> [[TMP10]], double [[TMP3]], i32 3 53; CHECK-NEXT: [[TMP12:%.*]] = insertelement <8 x double> [[TMP11]], double [[TMP4]], i32 4 54; CHECK-NEXT: [[TMP13:%.*]] = insertelement <8 x double> [[TMP12]], double [[TMP5]], i32 5 55; CHECK-NEXT: [[TMP14:%.*]] = insertelement <8 x double> [[TMP13]], double [[TMP6]], i32 6 56; CHECK-NEXT: [[TMP15:%.*]] = insertelement <8 x double> [[TMP14]], double [[TMP7]], i32 7 57; CHECK-NEXT: [[TMP16:%.*]] = call reassoc nsz arcp contract afn double @llvm.vector.reduce.fmul.v8f64(double 1.000000e+00, <8 x double> [[TMP15]]) 58; CHECK-NEXT: ret double [[TMP16]] 59; 60entry: 61 %0 = load double, ptr %x, align 8 62 %arrayidx1 = getelementptr inbounds double, ptr %x, i64 2 63 %1 = load double, ptr %arrayidx1, align 8 64 %mul = fmul reassoc nsz arcp contract afn double %1, %0 65 %arrayidx2 = getelementptr inbounds double, ptr %x, i64 4 66 %2 = load double, ptr %arrayidx2, align 8 67 %mul3 = fmul reassoc nsz arcp contract afn double %mul, %2 68 %arrayidx4 = getelementptr inbounds double, ptr %x, i64 6 69 %3 = load double, ptr %arrayidx4, align 8 70 %mul5 = fmul reassoc nsz arcp contract afn double %mul3, %3 71 %arrayidx6 = getelementptr inbounds double, ptr %x, i64 8 72 %4 = load double, ptr %arrayidx6, align 8 73 %mul7 = fmul reassoc nsz arcp contract afn double %mul5, %4 74 %arrayidx8 = getelementptr inbounds double, ptr %x, i64 10 75 %5 = load double, ptr %arrayidx8, align 8 76 %mul9 = fmul reassoc nsz arcp contract afn double %mul7, %5 77 %arrayidx10 = getelementptr inbounds double, ptr %x, i64 12 78 %6 = load double, ptr %arrayidx10, align 8 79 %mul11 = fmul reassoc nsz arcp contract afn double %mul9, %6 80 %arrayidx12 = getelementptr inbounds double, ptr %x, i64 14 81 %7 = load double, ptr %arrayidx12, align 8 82 %mul13 = fmul reassoc nsz arcp contract afn double %mul11, %7 83 ret double %mul13 84} 85 86define float @fmul_float_16_factors_nonseq(float noundef %m, ptr nocapture noundef readonly %x) { 87; CHECK-LABEL: define float @fmul_float_16_factors_nonseq( 88; CHECK-SAME: float noundef [[M:%.*]], ptr noundef readonly captures(none) [[X:%.*]]) #[[ATTR0]] { 89; CHECK-NEXT: [[ENTRY:.*:]] 90; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[X]], align 4 91; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2 92; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 93; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 4 94; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 95; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[X]], i64 6 96; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 97; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[X]], i64 8 98; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX6]], align 4 99; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[X]], i64 10 100; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX8]], align 4 101; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[X]], i64 12 102; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 103; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, ptr [[X]], i64 14 104; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX12]], align 4 105; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds float, ptr [[X]], i64 16 106; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX14]], align 4 107; CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds float, ptr [[X]], i64 18 108; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX16]], align 4 109; CHECK-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds float, ptr [[X]], i64 20 110; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX18]], align 4 111; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds float, ptr [[X]], i64 22 112; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX20]], align 4 113; CHECK-NEXT: [[ARRAYIDX22:%.*]] = getelementptr inbounds float, ptr [[X]], i64 24 114; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX22]], align 4 115; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds float, ptr [[X]], i64 26 116; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX24]], align 4 117; CHECK-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds float, ptr [[X]], i64 28 118; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX26]], align 4 119; CHECK-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds float, ptr [[X]], i64 30 120; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX28]], align 4 121; CHECK-NEXT: [[TMP16:%.*]] = insertelement <16 x float> poison, float [[TMP1]], i32 0 122; CHECK-NEXT: [[TMP17:%.*]] = insertelement <16 x float> [[TMP16]], float [[TMP0]], i32 1 123; CHECK-NEXT: [[TMP18:%.*]] = insertelement <16 x float> [[TMP17]], float [[TMP2]], i32 2 124; CHECK-NEXT: [[TMP19:%.*]] = insertelement <16 x float> [[TMP18]], float [[TMP3]], i32 3 125; CHECK-NEXT: [[TMP20:%.*]] = insertelement <16 x float> [[TMP19]], float [[TMP4]], i32 4 126; CHECK-NEXT: [[TMP21:%.*]] = insertelement <16 x float> [[TMP20]], float [[TMP5]], i32 5 127; CHECK-NEXT: [[TMP22:%.*]] = insertelement <16 x float> [[TMP21]], float [[TMP6]], i32 6 128; CHECK-NEXT: [[TMP23:%.*]] = insertelement <16 x float> [[TMP22]], float [[TMP7]], i32 7 129; CHECK-NEXT: [[TMP24:%.*]] = insertelement <16 x float> [[TMP23]], float [[TMP8]], i32 8 130; CHECK-NEXT: [[TMP25:%.*]] = insertelement <16 x float> [[TMP24]], float [[TMP9]], i32 9 131; CHECK-NEXT: [[TMP26:%.*]] = insertelement <16 x float> [[TMP25]], float [[TMP10]], i32 10 132; CHECK-NEXT: [[TMP27:%.*]] = insertelement <16 x float> [[TMP26]], float [[TMP11]], i32 11 133; CHECK-NEXT: [[TMP28:%.*]] = insertelement <16 x float> [[TMP27]], float [[TMP12]], i32 12 134; CHECK-NEXT: [[TMP29:%.*]] = insertelement <16 x float> [[TMP28]], float [[TMP13]], i32 13 135; CHECK-NEXT: [[TMP30:%.*]] = insertelement <16 x float> [[TMP29]], float [[TMP14]], i32 14 136; CHECK-NEXT: [[TMP31:%.*]] = insertelement <16 x float> [[TMP30]], float [[TMP15]], i32 15 137; CHECK-NEXT: [[TMP32:%.*]] = call reassoc nsz arcp contract afn float @llvm.vector.reduce.fmul.v16f32(float 1.000000e+00, <16 x float> [[TMP31]]) 138; CHECK-NEXT: ret float [[TMP32]] 139; 140entry: 141 %0 = load float, ptr %x, align 4 142 %arrayidx1 = getelementptr inbounds float, ptr %x, i64 2 143 %1 = load float, ptr %arrayidx1, align 4 144 %mul = fmul reassoc nsz arcp contract afn float %1, %0 145 %arrayidx2 = getelementptr inbounds float, ptr %x, i64 4 146 %2 = load float, ptr %arrayidx2, align 4 147 %mul3 = fmul reassoc nsz arcp contract afn float %mul, %2 148 %arrayidx4 = getelementptr inbounds float, ptr %x, i64 6 149 %3 = load float, ptr %arrayidx4, align 4 150 %mul5 = fmul reassoc nsz arcp contract afn float %mul3, %3 151 %arrayidx6 = getelementptr inbounds float, ptr %x, i64 8 152 %4 = load float, ptr %arrayidx6, align 4 153 %mul7 = fmul reassoc nsz arcp contract afn float %mul5, %4 154 %arrayidx8 = getelementptr inbounds float, ptr %x, i64 10 155 %5 = load float, ptr %arrayidx8, align 4 156 %mul9 = fmul reassoc nsz arcp contract afn float %mul7, %5 157 %arrayidx10 = getelementptr inbounds float, ptr %x, i64 12 158 %6 = load float, ptr %arrayidx10, align 4 159 %mul11 = fmul reassoc nsz arcp contract afn float %mul9, %6 160 %arrayidx12 = getelementptr inbounds float, ptr %x, i64 14 161 %7 = load float, ptr %arrayidx12, align 4 162 %mul13 = fmul reassoc nsz arcp contract afn float %mul11, %7 163 %arrayidx14 = getelementptr inbounds float, ptr %x, i64 16 164 %8 = load float, ptr %arrayidx14, align 4 165 %mul15 = fmul reassoc nsz arcp contract afn float %mul13, %8 166 %arrayidx16 = getelementptr inbounds float, ptr %x, i64 18 167 %9 = load float, ptr %arrayidx16, align 4 168 %mul17 = fmul reassoc nsz arcp contract afn float %mul15, %9 169 %arrayidx18 = getelementptr inbounds float, ptr %x, i64 20 170 %10 = load float, ptr %arrayidx18, align 4 171 %mul19 = fmul reassoc nsz arcp contract afn float %mul17, %10 172 %arrayidx20 = getelementptr inbounds float, ptr %x, i64 22 173 %11 = load float, ptr %arrayidx20, align 4 174 %mul21 = fmul reassoc nsz arcp contract afn float %mul19, %11 175 %arrayidx22 = getelementptr inbounds float, ptr %x, i64 24 176 %12 = load float, ptr %arrayidx22, align 4 177 %mul23 = fmul reassoc nsz arcp contract afn float %mul21, %12 178 %arrayidx24 = getelementptr inbounds float, ptr %x, i64 26 179 %13 = load float, ptr %arrayidx24, align 4 180 %mul25 = fmul reassoc nsz arcp contract afn float %mul23, %13 181 %arrayidx26 = getelementptr inbounds float, ptr %x, i64 28 182 %14 = load float, ptr %arrayidx26, align 4 183 %mul27 = fmul reassoc nsz arcp contract afn float %mul25, %14 184 %arrayidx28 = getelementptr inbounds float, ptr %x, i64 30 185 %15 = load float, ptr %arrayidx28, align 4 186 %mul29 = fmul reassoc nsz arcp contract afn float %mul27, %15 187 ret float %mul29 188} 189