xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/X86/store-jumbled.ll (revision 580210a0c938531ef9fd79f9ffedb93eeb2e66c2)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -S -mtriple=x86_64-unknown -mattr=+avx -passes=slp-vectorizer | FileCheck %s
3
4
5
6define i32 @jumbled-load(ptr noalias nocapture %in, ptr noalias nocapture %inn, ptr noalias nocapture %out) {
7; CHECK-LABEL: @jumbled-load(
8; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr [[IN:%.*]], align 4
9; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, ptr [[INN:%.*]], align 4
10; CHECK-NEXT:    [[TMP5:%.*]] = mul <4 x i32> [[TMP2]], [[TMP4]]
11; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> poison, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
12; CHECK-NEXT:    store <4 x i32> [[SHUFFLE]], ptr [[OUT:%.*]], align 4
13; CHECK-NEXT:    ret i32 undef
14;
15  %load.1 = load i32, ptr %in, align 4
16  %gep.1 = getelementptr inbounds i32, ptr %in, i64 1
17  %load.2 = load i32, ptr %gep.1, align 4
18  %gep.2 = getelementptr inbounds i32, ptr %in, i64 2
19  %load.3 = load i32, ptr %gep.2, align 4
20  %gep.3 = getelementptr inbounds i32, ptr %in, i64 3
21  %load.4 = load i32, ptr %gep.3, align 4
22  %load.5 = load i32, ptr %inn, align 4
23  %gep.4 = getelementptr inbounds i32, ptr %inn, i64 1
24  %load.6 = load i32, ptr %gep.4, align 4
25  %gep.5 = getelementptr inbounds i32, ptr %inn, i64 2
26  %load.7 = load i32, ptr %gep.5, align 4
27  %gep.6 = getelementptr inbounds i32, ptr %inn, i64 3
28  %load.8 = load i32, ptr %gep.6, align 4
29  %mul.1 = mul i32 %load.1, %load.5
30  %mul.2 = mul i32 %load.2, %load.6
31  %mul.3 = mul i32 %load.3, %load.7
32  %mul.4 = mul i32 %load.4, %load.8
33  %gep.8 = getelementptr inbounds i32, ptr %out, i64 1
34  %gep.9 = getelementptr inbounds i32, ptr %out, i64 2
35  %gep.10 = getelementptr inbounds i32, ptr %out, i64 3
36  store i32 %mul.1, ptr %gep.9, align 4
37  store i32 %mul.2, ptr %out, align 4
38  store i32 %mul.3, ptr %gep.10, align 4
39  store i32 %mul.4, ptr %gep.8, align 4
40
41  ret i32 undef
42}
43