1*7773243dSElvina Yakubova; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 2*7773243dSElvina Yakubova; RUN: %if x86-registered-target %{ opt -S -p slp-vectorizer -mtriple=x86_64-- %s | FileCheck %s %} 3*7773243dSElvina Yakubova; RUN: %if aarch64-registered-target %{ opt -S -p slp-vectorizer -mtriple=aarch64-unknown-linux-gnu %s | FileCheck %s %} 4*7773243dSElvina Yakubova 5*7773243dSElvina Yakubovadefine void @foo(<4 x float> %vec, float %val, ptr %ptr) { 6*7773243dSElvina Yakubova; CHECK-LABEL: define void @foo 7*7773243dSElvina Yakubova; CHECK-SAME: (<4 x float> [[VEC:%.*]], float [[VAL:%.*]], ptr [[PTR:%.*]]) { 8*7773243dSElvina Yakubova; CHECK-NEXT: [[GEP0:%.*]] = getelementptr inbounds float, ptr [[PTR]], i64 0 9*7773243dSElvina Yakubova; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[GEP0]], align 8 10*7773243dSElvina Yakubova; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[VEC]], <4 x float> poison, <2 x i32> <i32 3, i32 poison> 11*7773243dSElvina Yakubova; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x float> [[TMP2]], float [[VAL]], i32 1 12*7773243dSElvina Yakubova; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> poison, <4 x i32> <i32 0, i32 0, i32 1, i32 1> 13*7773243dSElvina Yakubova; CHECK-NEXT: [[TMP5:%.*]] = fadd <4 x float> [[TMP1]], [[TMP4]] 14*7773243dSElvina Yakubova; CHECK-NEXT: [[TMP6:%.*]] = fmul <4 x float> [[TMP5]], [[TMP4]] 15*7773243dSElvina Yakubova; CHECK-NEXT: store <4 x float> [[TMP6]], ptr [[GEP0]], align 4 16*7773243dSElvina Yakubova; CHECK-NEXT: ret void 17*7773243dSElvina Yakubova; 18*7773243dSElvina Yakubova %vec_3 = extractelement <4 x float> %vec, i32 3 19*7773243dSElvina Yakubova 20*7773243dSElvina Yakubova %gep0 = getelementptr inbounds float, ptr %ptr, i64 0 21*7773243dSElvina Yakubova %gep1 = getelementptr inbounds float, ptr %ptr, i64 1 22*7773243dSElvina Yakubova %gep2 = getelementptr inbounds float, ptr %ptr, i64 2 23*7773243dSElvina Yakubova %gep3 = getelementptr inbounds float, ptr %ptr, i64 3 24*7773243dSElvina Yakubova 25*7773243dSElvina Yakubova %l0 = load float, ptr %gep0, align 8 26*7773243dSElvina Yakubova %l1 = load float, ptr %gep1, align 8 27*7773243dSElvina Yakubova %l2 = load float, ptr %gep2, align 8 28*7773243dSElvina Yakubova %l3 = load float, ptr %gep3, align 8 29*7773243dSElvina Yakubova 30*7773243dSElvina Yakubova %fadd0 = fadd float %l0, %vec_3 31*7773243dSElvina Yakubova %fadd1 = fadd float %l1, %vec_3 32*7773243dSElvina Yakubova %fadd2 = fadd float %l2, %val 33*7773243dSElvina Yakubova %fadd3 = fadd float %l3, %val 34*7773243dSElvina Yakubova 35*7773243dSElvina Yakubova %fmul0 = fmul float %fadd0, %vec_3 36*7773243dSElvina Yakubova %fmul1 = fmul float %fadd1, %vec_3 37*7773243dSElvina Yakubova %fmul2 = fmul float %fadd2, %val 38*7773243dSElvina Yakubova %fmul3 = fmul float %fadd3, %val 39*7773243dSElvina Yakubova 40*7773243dSElvina Yakubova store float %fmul0, ptr %gep0, align 4 41*7773243dSElvina Yakubova store float %fmul1, ptr %gep1, align 4 42*7773243dSElvina Yakubova store float %fmul2, ptr %gep2, align 4 43*7773243dSElvina Yakubova store float %fmul3, ptr %gep3, align 4 44*7773243dSElvina Yakubova ret void 45*7773243dSElvina Yakubova} 46