xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/X86/store-insertelement-minbitwidth.ll (revision 31eaf86a1e8f1870e6ee4c42088a5213bde294b8)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -S -mtriple=x86_64-unknown -passes=slp-vectorizer -mattr=+avx -pass-remarks-output=%t | FileCheck %s
3; RUN: FileCheck --input-file=%t --check-prefix=YAML %s
4
5; YAML-LABEL: --- !Passed
6; YAML-NEXT:  Pass:            slp-vectorizer
7; YAML-NEXT:  Name:            StoresVectorized
8; YAML-NEXT:  Function:        stores
9; YAML-NEXT:  Args:
10; YAML-NEXT:    - String:          'Stores SLP vectorized with cost '
11; YAML-NEXT:    - Cost:            '-7'
12; YAML-NEXT:    - String:          ' and with tree size '
13; YAML-NEXT:    - TreeSize:        '6'
14define void @stores(ptr noalias %in, ptr noalias %inn, ptr noalias %out) {
15; CHECK-LABEL: @stores(
16; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr [[IN:%.*]], align 1
17; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i8>, ptr [[INN:%.*]], align 1
18; CHECK-NEXT:    [[TMP3:%.*]] = zext <4 x i8> [[TMP1]] to <4 x i16>
19; CHECK-NEXT:    [[TMP4:%.*]] = zext <4 x i8> [[TMP2]] to <4 x i16>
20; CHECK-NEXT:    [[TMP5:%.*]] = add <4 x i16> [[TMP3]], [[TMP4]]
21; CHECK-NEXT:    [[TMP6:%.*]] = zext <4 x i16> [[TMP5]] to <4 x i64>
22; CHECK-NEXT:    store <4 x i64> [[TMP6]], ptr [[OUT:%.*]], align 4
23; CHECK-NEXT:    ret void
24;
25  %load.1 = load i8, ptr %in, align 1
26  %gep.1 = getelementptr inbounds i8, ptr %in, i64 1
27  %load.2 = load i8, ptr %gep.1, align 1
28  %gep.2 = getelementptr inbounds i8, ptr %in, i64 2
29  %load.3 = load i8, ptr %gep.2, align 1
30  %gep.3 = getelementptr inbounds i8, ptr %in, i64 3
31  %load.4 = load i8, ptr %gep.3, align 1
32  %load.5 = load i8, ptr %inn, align 1
33  %gep.4 = getelementptr inbounds i8, ptr %inn, i64 1
34  %load.6 = load i8, ptr %gep.4, align 1
35  %gep.5 = getelementptr inbounds i8, ptr %inn, i64 2
36  %load.7 = load i8, ptr %gep.5, align 1
37  %gep.6 = getelementptr inbounds i8, ptr %inn, i64 3
38  %load.8 = load i8, ptr %gep.6, align 1
39  %z1 = zext i8 %load.1 to i64
40  %z2 = zext i8 %load.2 to i64
41  %z3 = zext i8 %load.3 to i64
42  %z4 = zext i8 %load.4 to i64
43  %z5 = zext i8 %load.5 to i64
44  %z6 = zext i8 %load.6 to i64
45  %z7 = zext i8 %load.7 to i64
46  %z8 = zext i8 %load.8 to i64
47  %add1 = add i64 %z1, %z5
48  %add2 = add i64 %z2, %z6
49  %add3 = add i64 %z3, %z7
50  %add4 = add i64 %z4, %z8
51  %gep.8 = getelementptr inbounds i64, ptr %out, i64 1
52  %gep.9 = getelementptr inbounds i64, ptr %out, i64 2
53  %gep.10 = getelementptr inbounds i64, ptr %out, i64 3
54  store i64 %add1, ptr %out, align 4
55  store i64 %add2, ptr %gep.8, align 4
56  store i64 %add3, ptr %gep.9, align 4
57  store i64 %add4, ptr %gep.10, align 4
58  ret void
59}
60
61; YAML-LABEL: --- !Passed
62; YAML-NEXT:  Pass:            slp-vectorizer
63; YAML-NEXT:  Name:            VectorizedList
64; YAML-NEXT:  Function:        insertelems
65; YAML-NEXT:  Args:
66; YAML-NEXT:    - String:          'SLP vectorized with cost '
67; YAML-NEXT:    - Cost:            '-9'
68; YAML-NEXT:    - String:          ' and with tree size '
69; YAML-NEXT:    - TreeSize:        '6'
70define <4 x i64> @insertelems(ptr noalias %in, ptr noalias %inn) {
71; CHECK-LABEL: @insertelems(
72; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr [[IN:%.*]], align 1
73; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i8>, ptr [[INN:%.*]], align 1
74; CHECK-NEXT:    [[TMP3:%.*]] = zext <4 x i8> [[TMP1]] to <4 x i16>
75; CHECK-NEXT:    [[TMP4:%.*]] = zext <4 x i8> [[TMP2]] to <4 x i16>
76; CHECK-NEXT:    [[TMP5:%.*]] = add <4 x i16> [[TMP3]], [[TMP4]]
77; CHECK-NEXT:    [[TMP6:%.*]] = zext <4 x i16> [[TMP5]] to <4 x i64>
78; CHECK-NEXT:    ret <4 x i64> [[TMP6]]
79;
80  %load.1 = load i8, ptr %in, align 1
81  %gep.1 = getelementptr inbounds i8, ptr %in, i64 1
82  %load.2 = load i8, ptr %gep.1, align 1
83  %gep.2 = getelementptr inbounds i8, ptr %in, i64 2
84  %load.3 = load i8, ptr %gep.2, align 1
85  %gep.3 = getelementptr inbounds i8, ptr %in, i64 3
86  %load.4 = load i8, ptr %gep.3, align 1
87  %load.5 = load i8, ptr %inn, align 1
88  %gep.4 = getelementptr inbounds i8, ptr %inn, i64 1
89  %load.6 = load i8, ptr %gep.4, align 1
90  %gep.5 = getelementptr inbounds i8, ptr %inn, i64 2
91  %load.7 = load i8, ptr %gep.5, align 1
92  %gep.6 = getelementptr inbounds i8, ptr %inn, i64 3
93  %load.8 = load i8, ptr %gep.6, align 1
94  %z1 = zext i8 %load.1 to i64
95  %z2 = zext i8 %load.2 to i64
96  %z3 = zext i8 %load.3 to i64
97  %z4 = zext i8 %load.4 to i64
98  %z5 = zext i8 %load.5 to i64
99  %z6 = zext i8 %load.6 to i64
100  %z7 = zext i8 %load.7 to i64
101  %z8 = zext i8 %load.8 to i64
102  %add1 = add i64 %z1, %z5
103  %add2 = add i64 %z2, %z6
104  %add3 = add i64 %z3, %z7
105  %add4 = add i64 %z4, %z8
106  %ins1 = insertelement <4 x i64> poison, i64 %add1, i32 0
107  %ins2 = insertelement <4 x i64> %ins1, i64 %add2, i32 1
108  %ins3 = insertelement <4 x i64> %ins2, i64 %add3, i32 2
109  %ins4 = insertelement <4 x i64> %ins3, i64 %add4, i32 3
110  ret <4 x i64> %ins4
111}
112