xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/X86/load-merge.ll (revision 1833d418a04123916c1dbeb0c41c8bc7d06b779b)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -passes=slp-vectorizer -slp-vectorize-hor -slp-vectorize-hor-store -S < %s -mtriple=x86_64-apple-macosx -mcpu=haswell | FileCheck %s --check-prefixes=CHECK,AVX2
3; RUN: opt -passes=slp-vectorizer -slp-vectorize-hor -slp-vectorize-hor-store -S < %s -mtriple=x86_64-apple-macosx -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,AVX512
4
5;unsigned load_le32(unsigned char *data) {
6;    unsigned le32 = (data[0]<<0) | (data[1]<<8) | (data[2]<<16) | (data[3]<<24);
7;    return le32;
8;}
9
10define i32 @_Z9load_le32Ph(ptr nocapture readonly %data) {
11; CHECK-LABEL: @_Z9load_le32Ph(
12; CHECK-NEXT:  entry:
13; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[DATA:%.*]], align 1
14; CHECK-NEXT:    [[CONV:%.*]] = zext i8 [[TMP0]] to i32
15; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[DATA]], i64 1
16; CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
17; CHECK-NEXT:    [[CONV2:%.*]] = zext i8 [[TMP1]] to i32
18; CHECK-NEXT:    [[SHL3:%.*]] = shl nuw nsw i32 [[CONV2]], 8
19; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHL3]], [[CONV]]
20; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[DATA]], i64 2
21; CHECK-NEXT:    [[TMP2:%.*]] = load i8, ptr [[ARRAYIDX4]], align 1
22; CHECK-NEXT:    [[CONV5:%.*]] = zext i8 [[TMP2]] to i32
23; CHECK-NEXT:    [[SHL6:%.*]] = shl nuw nsw i32 [[CONV5]], 16
24; CHECK-NEXT:    [[OR7:%.*]] = or i32 [[OR]], [[SHL6]]
25; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[DATA]], i64 3
26; CHECK-NEXT:    [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX8]], align 1
27; CHECK-NEXT:    [[CONV9:%.*]] = zext i8 [[TMP3]] to i32
28; CHECK-NEXT:    [[SHL10:%.*]] = shl nuw i32 [[CONV9]], 24
29; CHECK-NEXT:    [[OR11:%.*]] = or i32 [[OR7]], [[SHL10]]
30; CHECK-NEXT:    ret i32 [[OR11]]
31;
32entry:
33  %0 = load i8, ptr %data, align 1
34  %conv = zext i8 %0 to i32
35  %arrayidx1 = getelementptr inbounds i8, ptr %data, i64 1
36  %1 = load i8, ptr %arrayidx1, align 1
37  %conv2 = zext i8 %1 to i32
38  %shl3 = shl nuw nsw i32 %conv2, 8
39  %or = or i32 %shl3, %conv
40  %arrayidx4 = getelementptr inbounds i8, ptr %data, i64 2
41  %2 = load i8, ptr %arrayidx4, align 1
42  %conv5 = zext i8 %2 to i32
43  %shl6 = shl nuw nsw i32 %conv5, 16
44  %or7 = or i32 %or, %shl6
45  %arrayidx8 = getelementptr inbounds i8, ptr %data, i64 3
46  %3 = load i8, ptr %arrayidx8, align 1
47  %conv9 = zext i8 %3 to i32
48  %shl10 = shl nuw i32 %conv9, 24
49  %or11 = or i32 %or7, %shl10
50  ret i32 %or11
51}
52
53define <4 x float> @PR16739_byref(ptr nocapture readonly dereferenceable(16) %x) {
54; AVX2-LABEL: @PR16739_byref(
55; AVX2-NEXT:    [[GEP2:%.*]] = getelementptr inbounds <4 x float>, ptr [[X:%.*]], i64 0, i64 2
56; AVX2-NEXT:    [[TMP1:%.*]] = load <2 x float>, ptr [[X]], align 4
57; AVX2-NEXT:    [[X2:%.*]] = load float, ptr [[GEP2]], align 4
58; AVX2-NEXT:    [[TMP2:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
59; AVX2-NEXT:    [[I2:%.*]] = insertelement <4 x float> [[TMP2]], float [[X2]], i32 2
60; AVX2-NEXT:    [[I3:%.*]] = insertelement <4 x float> [[I2]], float [[X2]], i32 3
61; AVX2-NEXT:    ret <4 x float> [[I3]]
62;
63; AVX512-LABEL: @PR16739_byref(
64; AVX512-NEXT:    [[GEP1:%.*]] = getelementptr inbounds <4 x float>, ptr [[X:%.*]], i64 0, i64 1
65; AVX512-NEXT:    [[X0:%.*]] = load float, ptr [[X]], align 4
66; AVX512-NEXT:    [[TMP1:%.*]] = load <2 x float>, ptr [[GEP1]], align 4
67; AVX512-NEXT:    [[I0:%.*]] = insertelement <4 x float> undef, float [[X0]], i32 0
68; AVX512-NEXT:    [[TMP2:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
69; AVX512-NEXT:    [[I21:%.*]] = shufflevector <4 x float> [[I0]], <4 x float> [[TMP2]], <4 x i32> <i32 0, i32 4, i32 5, i32 3>
70; AVX512-NEXT:    [[TMP3:%.*]] = shufflevector <4 x float> [[I21]], <4 x float> [[TMP2]], <4 x i32> <i32 0, i32 1, i32 2, i32 5>
71; AVX512-NEXT:    ret <4 x float> [[TMP3]]
72;
73  %gep1 = getelementptr inbounds <4 x float>, ptr %x, i64 0, i64 1
74  %gep2 = getelementptr inbounds <4 x float>, ptr %x, i64 0, i64 2
75  %x0 = load float, ptr %x
76  %x1 = load float, ptr %gep1
77  %x2 = load float, ptr %gep2
78  %i0 = insertelement <4 x float> undef, float %x0, i32 0
79  %i1 = insertelement <4 x float> %i0, float %x1, i32 1
80  %i2 = insertelement <4 x float> %i1, float %x2, i32 2
81  %i3 = insertelement <4 x float> %i2, float %x2, i32 3
82  ret <4 x float> %i3
83}
84
85define <4 x float> @PR16739_byref_alt(ptr nocapture readonly dereferenceable(16) %x) {
86; CHECK-LABEL: @PR16739_byref_alt(
87; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x float>, ptr [[X:%.*]], align 4
88; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
89; CHECK-NEXT:    ret <4 x float> [[TMP2]]
90;
91  %gep1 = getelementptr inbounds <4 x float>, ptr %x, i64 0, i64 1
92  %x0 = load float, ptr %x
93  %x1 = load float, ptr %gep1
94  %i0 = insertelement <4 x float> undef, float %x0, i32 0
95  %i1 = insertelement <4 x float> %i0, float %x0, i32 1
96  %i2 = insertelement <4 x float> %i1, float %x1, i32 2
97  %i3 = insertelement <4 x float> %i2, float %x1, i32 3
98  ret <4 x float> %i3
99}
100
101define <4 x float> @PR16739_byval(ptr nocapture readonly dereferenceable(16) %x) {
102; CHECK-LABEL: @PR16739_byval(
103; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[X:%.*]], align 16
104; CHECK-NEXT:    [[T1:%.*]] = load i64, ptr [[X]], align 16
105; CHECK-NEXT:    [[T8:%.*]] = lshr i64 [[T1]], 32
106; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> poison, <4 x i32> <i32 0, i32 poison, i32 1, i32 1>
107; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <4 x i64> [[TMP2]], i64 [[T8]], i32 1
108; CHECK-NEXT:    [[TMP4:%.*]] = trunc <4 x i64> [[TMP3]] to <4 x i32>
109; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <4 x float>
110; CHECK-NEXT:    ret <4 x float> [[TMP5]]
111;
112  %t1 = load i64, ptr %x, align 16
113  %t2 = getelementptr inbounds <4 x float>, ptr %x, i64 0, i64 2
114  %t4 = load i64, ptr %t2, align 8
115  %t5 = trunc i64 %t1 to i32
116  %t6 = bitcast i32 %t5 to float
117  %t7 = insertelement <4 x float> undef, float %t6, i32 0
118  %t8 = lshr i64 %t1, 32
119  %t9 = trunc i64 %t8 to i32
120  %t10 = bitcast i32 %t9 to float
121  %t11 = insertelement <4 x float> %t7, float %t10, i32 1
122  %t12 = trunc i64 %t4 to i32
123  %t13 = bitcast i32 %t12 to float
124  %t14 = insertelement <4 x float> %t11, float %t13, i32 2
125  %t15 = insertelement <4 x float> %t14, float %t13, i32 3
126  ret <4 x float> %t15
127}
128
129define void @PR43578_prefer128(ptr %r, ptr %p, ptr %q) #0 {
130; CHECK-LABEL: @PR43578_prefer128(
131; CHECK-NEXT:    [[P2:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 2
132; CHECK-NEXT:    [[Q2:%.*]] = getelementptr inbounds i64, ptr [[Q:%.*]], i64 2
133; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[P]], align 2
134; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr [[Q]], align 2
135; CHECK-NEXT:    [[TMP3:%.*]] = sub nsw <2 x i64> [[TMP1]], [[TMP2]]
136; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, ptr [[P2]], align 2
137; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr [[Q2]], align 2
138; CHECK-NEXT:    [[TMP6:%.*]] = sub nsw <2 x i64> [[TMP4]], [[TMP5]]
139; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <2 x i64> [[TMP3]], i32 0
140; CHECK-NEXT:    [[G0:%.*]] = getelementptr inbounds i32, ptr [[R:%.*]], i64 [[TMP7]]
141; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <2 x i64> [[TMP3]], i32 1
142; CHECK-NEXT:    [[G1:%.*]] = getelementptr inbounds i32, ptr [[R]], i64 [[TMP8]]
143; CHECK-NEXT:    [[TMP9:%.*]] = extractelement <2 x i64> [[TMP6]], i32 0
144; CHECK-NEXT:    [[G2:%.*]] = getelementptr inbounds i32, ptr [[R]], i64 [[TMP9]]
145; CHECK-NEXT:    [[TMP10:%.*]] = extractelement <2 x i64> [[TMP6]], i32 1
146; CHECK-NEXT:    [[G3:%.*]] = getelementptr inbounds i32, ptr [[R]], i64 [[TMP10]]
147; CHECK-NEXT:    ret void
148;
149  %p1 = getelementptr inbounds i64, ptr %p, i64 1
150  %p2 = getelementptr inbounds i64, ptr %p, i64 2
151  %p3 = getelementptr inbounds i64, ptr %p, i64 3
152
153  %q1 = getelementptr inbounds i64, ptr %q, i64 1
154  %q2 = getelementptr inbounds i64, ptr %q, i64 2
155  %q3 = getelementptr inbounds i64, ptr %q, i64 3
156
157  %x0 = load i64, ptr %p, align 2
158  %x1 = load i64, ptr %p1, align 2
159  %x2 = load i64, ptr %p2, align 2
160  %x3 = load i64, ptr %p3, align 2
161
162  %y0 = load i64, ptr %q, align 2
163  %y1 = load i64, ptr %q1, align 2
164  %y2 = load i64, ptr %q2, align 2
165  %y3 = load i64, ptr %q3, align 2
166
167  %sub0 = sub nsw i64 %x0, %y0
168  %sub1 = sub nsw i64 %x1, %y1
169  %sub2 = sub nsw i64 %x2, %y2
170  %sub3 = sub nsw i64 %x3, %y3
171
172  %g0 = getelementptr inbounds i32, ptr %r, i64 %sub0
173  %g1 = getelementptr inbounds i32, ptr %r, i64 %sub1
174  %g2 = getelementptr inbounds i32, ptr %r, i64 %sub2
175  %g3 = getelementptr inbounds i32, ptr %r, i64 %sub3
176  ret void
177}
178
179attributes #0 = { "prefer-vector-width"="128" }
180