xref: /llvm-project/llvm/test/Transforms/InstCombine/X86/x86-avx2.ll (revision a5f34155339b4c01357462da95aac62291ed7ec8)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -mtriple=x86_64-unknown-unknown -S | FileCheck %s
3target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
4
5; Verify that instcombine is able to fold identity shuffles.
6
7define <8 x i32> @identity_test_vpermd(<8 x i32> %a0) {
8; CHECK-LABEL: @identity_test_vpermd(
9; CHECK-NEXT:    ret <8 x i32> [[A0:%.*]]
10;
11  %a = tail call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>)
12  ret <8 x i32> %a
13}
14
15define <8 x float> @identity_test_vpermps(<8 x float> %a0) {
16; CHECK-LABEL: @identity_test_vpermps(
17; CHECK-NEXT:    ret <8 x float> [[A0:%.*]]
18;
19  %a = tail call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>)
20  ret <8 x float> %a
21}
22
23; Instcombine should be able to fold the following shuffle to a builtin shufflevector
24; with a shuffle mask of all zeroes.
25
26define <8 x i32> @zero_test_vpermd(<8 x i32> %a0) {
27; CHECK-LABEL: @zero_test_vpermd(
28; CHECK-NEXT:    [[A:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> poison, <8 x i32> zeroinitializer
29; CHECK-NEXT:    ret <8 x i32> [[A]]
30;
31  %a = tail call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a0, <8 x i32> zeroinitializer)
32  ret <8 x i32> %a
33}
34
35define <8 x float> @zero_test_vpermps(<8 x float> %a0) {
36; CHECK-LABEL: @zero_test_vpermps(
37; CHECK-NEXT:    [[A:%.*]] = shufflevector <8 x float> [[A0:%.*]], <8 x float> poison, <8 x i32> zeroinitializer
38; CHECK-NEXT:    ret <8 x float> [[A]]
39;
40  %a = tail call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> zeroinitializer)
41  ret <8 x float> %a
42}
43
44; Verify that instcombine is able to fold constant shuffles.
45
46define <8 x i32> @shuffle_test_vpermd(<8 x i32> %a0) {
47; CHECK-LABEL: @shuffle_test_vpermd(
48; CHECK-NEXT:    [[A:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
49; CHECK-NEXT:    ret <8 x i32> [[A]]
50;
51  %a = tail call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a0, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>)
52  ret <8 x i32> %a
53}
54
55define <8 x float> @shuffle_test_vpermps(<8 x float> %a0) {
56; CHECK-LABEL: @shuffle_test_vpermps(
57; CHECK-NEXT:    [[A:%.*]] = shufflevector <8 x float> [[A0:%.*]], <8 x float> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
58; CHECK-NEXT:    ret <8 x float> [[A]]
59;
60  %a = tail call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>)
61  ret <8 x float> %a
62}
63
64; Verify that instcombine is able to fold constant shuffles with undef mask elements.
65
66define <8 x i32> @undef_test_vpermd(<8 x i32> %a0) {
67; CHECK-LABEL: @undef_test_vpermd(
68; CHECK-NEXT:    [[A:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> poison, <8 x i32> <i32 poison, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
69; CHECK-NEXT:    ret <8 x i32> [[A]]
70;
71  %a = tail call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a0, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>)
72  ret <8 x i32> %a
73}
74
75define <8 x float> @undef_test_vpermps(<8 x float> %a0) {
76; CHECK-LABEL: @undef_test_vpermps(
77; CHECK-NEXT:    [[A:%.*]] = shufflevector <8 x float> [[A0:%.*]], <8 x float> poison, <8 x i32> <i32 poison, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
78; CHECK-NEXT:    ret <8 x float> [[A]]
79;
80  %a = tail call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>)
81  ret <8 x float> %a
82}
83
84; Verify simplify demanded elts.
85
86define <8 x i32> @elts_test_vpermd(<8 x i32> %a0, i32 %a1) {
87; CHECK-LABEL: @elts_test_vpermd(
88; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> poison, <8 x i32> <i32 poison, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
89; CHECK-NEXT:    ret <8 x i32> [[TMP1]]
90;
91  %1 = insertelement <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, i32 %a1, i32 0
92  %2 = tail call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a0, <8 x i32> %1)
93  %3 = shufflevector <8 x i32> %2, <8 x i32> undef, <8 x i32> <i32 undef, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
94  ret <8 x i32> %3
95}
96
97define <8 x float> @elts_test_vpermps(<8 x float> %a0, <8 x i32> %a1) {
98; CHECK-LABEL: @elts_test_vpermps(
99; CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x float> @llvm.x86.avx2.permps(<8 x float> [[A0:%.*]], <8 x i32> [[A1:%.*]])
100; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> poison, <8 x i32> zeroinitializer
101; CHECK-NEXT:    ret <8 x float> [[TMP2]]
102;
103  %1 = insertelement <8 x i32> %a1, i32 0, i32 7
104  %2 = tail call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> %1)
105  %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> zeroinitializer
106  ret <8 x float> %3
107}
108
109define <2 x i64> @elts_test_vpsllvq(<2 x i64> %a0, <2 x i64> %a1) {
110; CHECK-LABEL: @elts_test_vpsllvq(
111; CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]])
112; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> poison, <2 x i32> zeroinitializer
113; CHECK-NEXT:    ret <2 x i64> [[TMP2]]
114;
115  %1 = insertelement <2 x i64> %a1, i64 0, i64 1
116  %2 = tail call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %a0, <2 x i64> %1)
117  %3 = shufflevector <2 x i64> %2, <2 x i64> undef, <2 x i32> zeroinitializer
118  ret <2 x i64> %3
119}
120
121define <2 x i64> @elts_test_vpsrlvq(<2 x i64> %a0, <2 x i64> %a1) {
122; CHECK-LABEL: @elts_test_vpsrlvq(
123; CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]])
124; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> poison, <2 x i32> zeroinitializer
125; CHECK-NEXT:    ret <2 x i64> [[TMP2]]
126;
127  %1 = insertelement <2 x i64> %a1, i64 0, i64 1
128  %2 = tail call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %a0, <2 x i64> %1)
129  %3 = shufflevector <2 x i64> %2, <2 x i64> undef, <2 x i32> zeroinitializer
130  ret <2 x i64> %3
131}
132
133define <4 x i64> @elts_test_vpsllvq_256(<4 x i64> %a0, <4 x i64> %a1) {
134; CHECK-LABEL: @elts_test_vpsllvq_256(
135; CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> [[A0:%.*]], <4 x i64> [[A1:%.*]])
136; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> poison, <4 x i32> zeroinitializer
137; CHECK-NEXT:    ret <4 x i64> [[TMP2]]
138;
139  %1 = insertelement <4 x i64> %a1, i64 0, i64 2
140  %2 = tail call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %a0, <4 x i64> %1)
141  %3 = shufflevector <4 x i64> %2, <4 x i64> undef, <4 x i32> zeroinitializer
142  ret <4 x i64> %3
143}
144
145define <4 x i64> @elts_test_vpsrlvq_256(<4 x i64> %a0, <4 x i64> %a1) {
146; CHECK-LABEL: @elts_test_vpsrlvq_256(
147; CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> [[A0:%.*]], <4 x i64> [[A1:%.*]])
148; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> poison, <4 x i32> zeroinitializer
149; CHECK-NEXT:    ret <4 x i64> [[TMP2]]
150;
151  %1 = insertelement <4 x i64> %a1, i64 0, i64 3
152  %2 = tail call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %a0, <4 x i64> %1)
153  %3 = shufflevector <4 x i64> %2, <4 x i64> undef, <4 x i32> zeroinitializer
154  ret <4 x i64> %3
155}
156
157define <4 x i32> @elts_test_vpsllvd(<4 x i32> %a0, <4 x i32> %a1) {
158; CHECK-LABEL: @elts_test_vpsllvd(
159; CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> [[A0:%.*]], <4 x i32> [[A1:%.*]])
160; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> zeroinitializer
161; CHECK-NEXT:    ret <4 x i32> [[TMP2]]
162;
163  %1 = insertelement <4 x i32> %a1, i32 0, i64 3
164  %2 = tail call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %a0, <4 x i32> %1)
165  %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> zeroinitializer
166  ret <4 x i32> %3
167}
168
169define <4 x i32> @elts_test_vpsravd(<4 x i32> %a0, <4 x i32> %a1) {
170; CHECK-LABEL: @elts_test_vpsravd(
171; CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> [[A0:%.*]], <4 x i32> [[A1:%.*]])
172; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> zeroinitializer
173; CHECK-NEXT:    ret <4 x i32> [[TMP2]]
174;
175  %1 = insertelement <4 x i32> %a1, i32 0, i64 1
176  %2 = tail call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %a0, <4 x i32> %1)
177  %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> zeroinitializer
178  ret <4 x i32> %3
179}
180
181define <4 x i32> @elts_test_vpsrlvd(<4 x i32> %a0, <4 x i32> %a1) {
182; CHECK-LABEL: @elts_test_vpsrlvd(
183; CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> [[A0:%.*]], <4 x i32> [[A1:%.*]])
184; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> zeroinitializer
185; CHECK-NEXT:    ret <4 x i32> [[TMP2]]
186;
187  %1 = insertelement <4 x i32> %a1, i32 0, i64 2
188  %2 = tail call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %a0, <4 x i32> %1)
189  %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> zeroinitializer
190  ret <4 x i32> %3
191}
192
193define <8 x i32> @elts_test_vpsllvd_256(<8 x i32> %a0, <8 x i32> %a1) {
194; CHECK-LABEL: @elts_test_vpsllvd_256(
195; CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]])
196; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <8 x i32> zeroinitializer
197; CHECK-NEXT:    ret <8 x i32> [[TMP2]]
198;
199  %1 = insertelement <8 x i32> %a1, i32 0, i64 3
200  %2 = tail call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %a0, <8 x i32> %1)
201  %3 = shufflevector <8 x i32> %2, <8 x i32> undef, <8 x i32> zeroinitializer
202  ret <8 x i32> %3
203}
204
205define <8 x i32> @elts_test_vpsravd_256(<8 x i32> %a0, <8 x i32> %a1) {
206; CHECK-LABEL: @elts_test_vpsravd_256(
207; CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]])
208; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <8 x i32> zeroinitializer
209; CHECK-NEXT:    ret <8 x i32> [[TMP2]]
210;
211  %1 = insertelement <8 x i32> %a1, i32 0, i64 4
212  %2 = tail call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %a0, <8 x i32> %1)
213  %3 = shufflevector <8 x i32> %2, <8 x i32> undef, <8 x i32> zeroinitializer
214  ret <8 x i32> %3
215}
216
217define <8 x i32> @elts_test_vpsrlvd_256(<8 x i32> %a0, <8 x i32> %a1) {
218; CHECK-LABEL: @elts_test_vpsrlvd_256(
219; CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]])
220; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <8 x i32> zeroinitializer
221; CHECK-NEXT:    ret <8 x i32> [[TMP2]]
222;
223  %1 = insertelement <8 x i32> %a1, i32 0, i64 5
224  %2 = tail call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %a0, <8 x i32> %1)
225  %3 = shufflevector <8 x i32> %2, <8 x i32> undef, <8 x i32> zeroinitializer
226  ret <8 x i32> %3
227}
228
229declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>)
230declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>)
231
232declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>)
233declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>)
234
235declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>)
236declare <4 x i64> @llvm.x86.avx2.psrav.q.256(<4 x i64>, <4 x i64>)
237declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>)
238
239declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>)
240declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>)
241declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>)
242
243declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>)
244declare <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32>, <8 x i32>)
245declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>)
246