xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/shufflebuilder-bug.ll (revision 7773243d9916f98ba0ffce0c3a960e4aa9f03e81)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
2; RUN: %if x86-registered-target %{ opt -S -p slp-vectorizer -mtriple=x86_64-- %s | FileCheck %s %}
3; RUN: %if aarch64-registered-target %{ opt -S -p slp-vectorizer -mtriple=aarch64-unknown-linux-gnu %s | FileCheck %s %}
4
5define void @foo(<4 x float> %vec, float %val, ptr %ptr) {
6; CHECK-LABEL: define void @foo
7; CHECK-SAME: (<4 x float> [[VEC:%.*]], float [[VAL:%.*]], ptr [[PTR:%.*]]) {
8; CHECK-NEXT:    [[GEP0:%.*]] = getelementptr inbounds float, ptr [[PTR]], i64 0
9; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[GEP0]], align 8
10; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <4 x float> [[VEC]], <4 x float> poison, <2 x i32> <i32 3, i32 poison>
11; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x float> [[TMP2]], float [[VAL]], i32 1
12; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> poison, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
13; CHECK-NEXT:    [[TMP5:%.*]] = fadd <4 x float> [[TMP1]], [[TMP4]]
14; CHECK-NEXT:    [[TMP6:%.*]] = fmul <4 x float> [[TMP5]], [[TMP4]]
15; CHECK-NEXT:    store <4 x float> [[TMP6]], ptr [[GEP0]], align 4
16; CHECK-NEXT:    ret void
17;
18  %vec_3 = extractelement <4 x float> %vec, i32 3
19
20  %gep0 = getelementptr inbounds float, ptr %ptr, i64 0
21  %gep1 = getelementptr inbounds float, ptr %ptr, i64 1
22  %gep2 = getelementptr inbounds float, ptr %ptr, i64 2
23  %gep3 = getelementptr inbounds float, ptr %ptr, i64 3
24
25  %l0 = load float, ptr %gep0, align 8
26  %l1 = load float, ptr %gep1, align 8
27  %l2 = load float, ptr %gep2, align 8
28  %l3 = load float, ptr %gep3, align 8
29
30  %fadd0 = fadd float %l0, %vec_3
31  %fadd1 = fadd float %l1, %vec_3
32  %fadd2 = fadd float %l2, %val
33  %fadd3 = fadd float %l3, %val
34
35  %fmul0 = fmul float %fadd0, %vec_3
36  %fmul1 = fmul float %fadd1, %vec_3
37  %fmul2 = fmul float %fadd2, %val
38  %fmul3 = fmul float %fadd3, %val
39
40  store float %fmul0, ptr %gep0, align 4
41  store float %fmul1, ptr %gep1, align 4
42  store float %fmul2, ptr %gep2, align 4
43  store float %fmul3, ptr %gep3, align 4
44  ret void
45}
46