xref: /llvm-project/llvm/test/Transforms/InstCombine/icmp-and-shift.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3
4declare void @use(i8)
5
6define i32 @icmp_eq_and_pow2_shl1(i32 %0) {
7; CHECK-LABEL: @icmp_eq_and_pow2_shl1(
8; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[TMP0:%.*]], 4
9; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[TMP2]] to i32
10; CHECK-NEXT:    ret i32 [[CONV]]
11;
12  %shl = shl i32 1, %0
13  %and = and i32 %shl, 16
14  %cmp = icmp eq i32 %and, 0
15  %conv = zext i1 %cmp to i32
16  ret i32 %conv
17}
18
19define <2 x i32> @icmp_eq_and_pow2_shl1_vec(<2 x i32> %0) {
20; CHECK-LABEL: @icmp_eq_and_pow2_shl1_vec(
21; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne <2 x i32> [[TMP0:%.*]], splat (i32 4)
22; CHECK-NEXT:    [[CONV:%.*]] = zext <2 x i1> [[TMP2]] to <2 x i32>
23; CHECK-NEXT:    ret <2 x i32> [[CONV]]
24;
25  %shl = shl <2 x i32> <i32 1, i32 1>, %0
26  %and = and <2 x i32> %shl, <i32 16, i32 16>
27  %cmp = icmp eq <2 x i32> %and, <i32 0, i32 0>
28  %conv = zext <2 x i1> %cmp to <2 x i32>
29  ret <2 x i32> %conv
30}
31
32define i32 @icmp_ne_and_pow2_shl1(i32 %0) {
33; CHECK-LABEL: @icmp_ne_and_pow2_shl1(
34; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[TMP0:%.*]], 4
35; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[TMP2]] to i32
36; CHECK-NEXT:    ret i32 [[CONV]]
37;
38  %shl = shl i32 1, %0
39  %and = and i32 %shl, 16
40  %cmp = icmp ne i32 %and, 0
41  %conv = zext i1 %cmp to i32
42  ret i32 %conv
43}
44
45define <2 x i32> @icmp_ne_and_pow2_shl1_vec(<2 x i32> %0) {
46; CHECK-LABEL: @icmp_ne_and_pow2_shl1_vec(
47; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq <2 x i32> [[TMP0:%.*]], splat (i32 4)
48; CHECK-NEXT:    [[CONV:%.*]] = zext <2 x i1> [[TMP2]] to <2 x i32>
49; CHECK-NEXT:    ret <2 x i32> [[CONV]]
50;
51  %shl = shl <2 x i32> <i32 1, i32 1>, %0
52  %and = and <2 x i32> %shl, <i32 16, i32 16>
53  %cmp = icmp ne <2 x i32> %and, <i32 0, i32 0>
54  %conv = zext <2 x i1> %cmp to <2 x i32>
55  ret <2 x i32> %conv
56}
57
58define i32 @icmp_eq_and_pow2_shl_pow2(i32 %0) {
59; CHECK-LABEL: @icmp_eq_and_pow2_shl_pow2(
60; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[TMP0:%.*]], 3
61; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[TMP2]] to i32
62; CHECK-NEXT:    ret i32 [[CONV]]
63;
64  %shl = shl i32 2, %0
65  %and = and i32 %shl, 16
66  %cmp = icmp eq i32 %and, 0
67  %conv = zext i1 %cmp to i32
68  ret i32 %conv
69}
70
71define <2 x i32> @icmp_eq_and_pow2_shl_pow2_vec(<2 x i32> %0) {
72; CHECK-LABEL: @icmp_eq_and_pow2_shl_pow2_vec(
73; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne <2 x i32> [[TMP0:%.*]], splat (i32 2)
74; CHECK-NEXT:    [[CONV:%.*]] = zext <2 x i1> [[TMP2]] to <2 x i32>
75; CHECK-NEXT:    ret <2 x i32> [[CONV]]
76;
77  %shl = shl <2 x i32> <i32 4, i32 4>, %0
78  %and = and <2 x i32> %shl, <i32 16, i32 16>
79  %cmp = icmp eq <2 x i32> %and, <i32 0, i32 0>
80  %conv = zext <2 x i1> %cmp to <2 x i32>
81  ret <2 x i32> %conv
82}
83
84define i32 @icmp_ne_and_pow2_shl_pow2(i32 %0) {
85; CHECK-LABEL: @icmp_ne_and_pow2_shl_pow2(
86; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[TMP0:%.*]], 3
87; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[TMP2]] to i32
88; CHECK-NEXT:    ret i32 [[CONV]]
89;
90  %shl = shl i32 2, %0
91  %and = and i32 %shl, 16
92  %cmp = icmp ne i32 %and, 0
93  %conv = zext i1 %cmp to i32
94  ret i32 %conv
95}
96
97define <2 x i32> @icmp_ne_and_pow2_shl_pow2_vec(<2 x i32> %0) {
98; CHECK-LABEL: @icmp_ne_and_pow2_shl_pow2_vec(
99; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq <2 x i32> [[TMP0:%.*]], splat (i32 2)
100; CHECK-NEXT:    [[CONV:%.*]] = zext <2 x i1> [[TMP2]] to <2 x i32>
101; CHECK-NEXT:    ret <2 x i32> [[CONV]]
102;
103  %shl = shl <2 x i32> <i32 4, i32 4>, %0
104  %and = and <2 x i32> %shl, <i32 16, i32 16>
105  %cmp = icmp ne <2 x i32> %and, <i32 0, i32 0>
106  %conv = zext <2 x i1> %cmp to <2 x i32>
107  ret <2 x i32> %conv
108}
109
110define i32 @icmp_eq_and_pow2_shl_pow2_negative1(i32 %0) {
111; CHECK-LABEL: @icmp_eq_and_pow2_shl_pow2_negative1(
112; CHECK-NEXT:    [[SHL:%.*]] = shl i32 11, [[TMP0:%.*]]
113; CHECK-NEXT:    [[AND:%.*]] = lshr i32 [[SHL]], 4
114; CHECK-NEXT:    [[AND_LOBIT:%.*]] = and i32 [[AND]], 1
115; CHECK-NEXT:    [[CONV:%.*]] = xor i32 [[AND_LOBIT]], 1
116; CHECK-NEXT:    ret i32 [[CONV]]
117;
118  %shl = shl i32 11, %0
119  %and = and i32 %shl, 16
120  %cmp = icmp eq i32 %and, 0
121  %conv = zext i1 %cmp to i32
122  ret i32 %conv
123}
124
125define i32 @icmp_eq_and_pow2_shl_pow2_negative2(i32 %0) {
126; CHECK-LABEL: @icmp_eq_and_pow2_shl_pow2_negative2(
127; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[TMP0:%.*]], 2
128; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
129; CHECK-NEXT:    ret i32 [[CONV]]
130;
131  %shl = shl i32 2, %0
132  %and = and i32 %shl, 14
133  %cmp = icmp eq i32 %and, 0
134  %conv = zext i1 %cmp to i32
135  ret i32 %conv
136}
137
138define i32 @icmp_eq_and_pow2_shl_pow2_negative3(i32 %0) {
139; CHECK-LABEL: @icmp_eq_and_pow2_shl_pow2_negative3(
140; CHECK-NEXT:    ret i32 1
141;
142  %shl = shl i32 32, %0
143  %and = and i32 %shl, 16
144  %cmp = icmp eq i32 %and, 0
145  %conv = zext i1 %cmp to i32
146  ret i32 %conv
147}
148
149
150define i32 @icmp_eq_and_pow2_minus1_shl1(i32 %0) {
151; CHECK-LABEL: @icmp_eq_and_pow2_minus1_shl1(
152; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[TMP0:%.*]], 3
153; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
154; CHECK-NEXT:    ret i32 [[CONV]]
155;
156  %shl = shl i32 1, %0
157  %and = and i32 %shl, 15
158  %cmp = icmp eq i32 %and, 0
159  %conv = zext i1 %cmp to i32
160  ret i32 %conv
161}
162
163define <2 x i32> @icmp_eq_and_pow2_minus1_shl1_vec(<2 x i32> %0) {
164; CHECK-LABEL: @icmp_eq_and_pow2_minus1_shl1_vec(
165; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt <2 x i32> [[TMP0:%.*]], splat (i32 3)
166; CHECK-NEXT:    [[CONV:%.*]] = zext <2 x i1> [[CMP]] to <2 x i32>
167; CHECK-NEXT:    ret <2 x i32> [[CONV]]
168;
169  %shl = shl <2 x i32> <i32 1, i32 1>, %0
170  %and = and <2 x i32> %shl, <i32 15, i32 15>
171  %cmp = icmp eq <2 x i32> %and, <i32 0, i32 0>
172  %conv = zext <2 x i1> %cmp to <2 x i32>
173  ret <2 x i32> %conv
174}
175
176define i32 @icmp_ne_and_pow2_minus1_shl1(i32 %0) {
177; CHECK-LABEL: @icmp_ne_and_pow2_minus1_shl1(
178; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[TMP0:%.*]], 4
179; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
180; CHECK-NEXT:    ret i32 [[CONV]]
181;
182  %shl = shl i32 1, %0
183  %and = and i32 %shl, 15
184  %cmp = icmp ne i32 %and, 0
185  %conv = zext i1 %cmp to i32
186  ret i32 %conv
187}
188
189define <2 x i32> @icmp_ne_and_pow2_minus1_shl1_vec(<2 x i32> %0) {
190; CHECK-LABEL: @icmp_ne_and_pow2_minus1_shl1_vec(
191; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> [[TMP0:%.*]], splat (i32 4)
192; CHECK-NEXT:    [[CONV:%.*]] = zext <2 x i1> [[CMP]] to <2 x i32>
193; CHECK-NEXT:    ret <2 x i32> [[CONV]]
194;
195  %shl = shl <2 x i32> <i32 1, i32 1>, %0
196  %and = and <2 x i32> %shl, <i32 15, i32 15>
197  %cmp = icmp ne <2 x i32> %and, <i32 0, i32 0>
198  %conv = zext <2 x i1> %cmp to <2 x i32>
199  ret <2 x i32> %conv
200}
201
202define i32 @icmp_eq_and_pow2_minus1_shl_pow2(i32 %0) {
203; CHECK-LABEL: @icmp_eq_and_pow2_minus1_shl_pow2(
204; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[TMP0:%.*]], 2
205; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
206; CHECK-NEXT:    ret i32 [[CONV]]
207;
208  %shl = shl i32 2, %0
209  %and = and i32 %shl, 15
210  %cmp = icmp eq i32 %and, 0
211  %conv = zext i1 %cmp to i32
212  ret i32 %conv
213}
214
215define <2 x i32> @icmp_eq_and_pow2_minus1_shl_pow2_vec(<2 x i32> %0) {
216; CHECK-LABEL: @icmp_eq_and_pow2_minus1_shl_pow2_vec(
217; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt <2 x i32> [[TMP0:%.*]], splat (i32 1)
218; CHECK-NEXT:    [[CONV:%.*]] = zext <2 x i1> [[CMP]] to <2 x i32>
219; CHECK-NEXT:    ret <2 x i32> [[CONV]]
220;
221  %shl = shl <2 x i32> <i32 4, i32 4>, %0
222  %and = and <2 x i32> %shl, <i32 15, i32 15>
223  %cmp = icmp eq <2 x i32> %and, <i32 0, i32 0>
224  %conv = zext <2 x i1> %cmp to <2 x i32>
225  ret <2 x i32> %conv
226}
227
228define i32 @icmp_ne_and_pow2_minus1_shl_pow2(i32 %0) {
229; CHECK-LABEL: @icmp_ne_and_pow2_minus1_shl_pow2(
230; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[TMP0:%.*]], 3
231; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
232; CHECK-NEXT:    ret i32 [[CONV]]
233;
234  %shl = shl i32 2, %0
235  %and = and i32 %shl, 15
236  %cmp = icmp ne i32 %and, 0
237  %conv = zext i1 %cmp to i32
238  ret i32 %conv
239}
240
241define <2 x i32> @icmp_ne_and_pow2_minus1_shl_pow2_vec(<2 x i32> %0) {
242; CHECK-LABEL: @icmp_ne_and_pow2_minus1_shl_pow2_vec(
243; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> [[TMP0:%.*]], splat (i32 2)
244; CHECK-NEXT:    [[CONV:%.*]] = zext <2 x i1> [[CMP]] to <2 x i32>
245; CHECK-NEXT:    ret <2 x i32> [[CONV]]
246;
247  %shl = shl <2 x i32> <i32 4, i32 4>, %0
248  %and = and <2 x i32> %shl, <i32 15, i32 15>
249  %cmp = icmp ne <2 x i32> %and, <i32 0, i32 0>
250  %conv = zext <2 x i1> %cmp to <2 x i32>
251  ret <2 x i32> %conv
252}
253
254define i32 @icmp_eq_and_pow2_minus1_shl1_negative1(i32 %0) {
255; CHECK-LABEL: @icmp_eq_and_pow2_minus1_shl1_negative1(
256; CHECK-NEXT:    [[SHL:%.*]] = shl i32 3, [[TMP0:%.*]]
257; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], 15
258; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
259; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
260; CHECK-NEXT:    ret i32 [[CONV]]
261;
262  %shl = shl i32 3, %0
263  %and = and i32 %shl, 15
264  %cmp = icmp eq i32 %and, 0
265  %conv = zext i1 %cmp to i32
266  ret i32 %conv
267}
268
269define i32 @icmp_eq_and_pow2_minus1_shl1_negative2(i32 %0) {
270; CHECK-LABEL: @icmp_eq_and_pow2_minus1_shl1_negative2(
271; CHECK-NEXT:    ret i32 1
272;
273  %shl = shl i32 32, %0
274  %and = and i32 %shl, 15
275  %cmp = icmp eq i32 %and, 0
276  %conv = zext i1 %cmp to i32
277  ret i32 %conv
278}
279
280
281define i32 @icmp_eq_and1_lshr_pow2(i32 %0) {
282; CHECK-LABEL: @icmp_eq_and1_lshr_pow2(
283; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[TMP0:%.*]], 3
284; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[TMP2]] to i32
285; CHECK-NEXT:    ret i32 [[CONV]]
286;
287  %lshr = lshr i32 8, %0
288  %and  = and i32 %lshr, 1
289  %cmp  = icmp eq i32 %and, 0
290  %conv = zext i1 %cmp to i32
291  ret i32 %conv
292}
293
294define <2 x i32> @icmp_eq_and1_lshr_pow2_vec(<2 x i32> %0) {
295; CHECK-LABEL: @icmp_eq_and1_lshr_pow2_vec(
296; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne <2 x i32> [[TMP0:%.*]], splat (i32 3)
297; CHECK-NEXT:    [[CONV:%.*]] = zext <2 x i1> [[TMP2]] to <2 x i32>
298; CHECK-NEXT:    ret <2 x i32> [[CONV]]
299;
300  %lshr = lshr <2 x i32> <i32 8, i32 8>, %0
301  %and  = and <2 x i32> %lshr, <i32 1, i32 1>
302  %cmp  = icmp eq <2 x i32> %and, <i32 0, i32 0>
303  %conv = zext <2 x i1> %cmp to <2 x i32>
304  ret <2 x i32> %conv
305}
306
307define i32 @icmp_ne_and1_lshr_pow2(i32 %0) {
308; CHECK-LABEL: @icmp_ne_and1_lshr_pow2(
309; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[TMP0:%.*]], 3
310; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[TMP2]] to i32
311; CHECK-NEXT:    ret i32 [[CONV]]
312;
313  %lshr = lshr i32 8, %0
314  %and  = and i32 %lshr, 1
315  %cmp  = icmp eq i32 %and, 0
316  %conv = zext i1 %cmp to i32
317  ret i32 %conv
318}
319
320define <2 x i32> @icmp_ne_and1_lshr_pow2_vec(<2 x i32> %0) {
321; CHECK-LABEL: @icmp_ne_and1_lshr_pow2_vec(
322; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq <2 x i32> [[TMP0:%.*]], splat (i32 1)
323; CHECK-NEXT:    [[CONV:%.*]] = zext <2 x i1> [[TMP2]] to <2 x i32>
324; CHECK-NEXT:    ret <2 x i32> [[CONV]]
325;
326  %lshr = lshr <2 x i32> <i32 8, i32 8>, %0
327  %and  = and <2 x i32> %lshr, <i32 4, i32 4>
328  %cmp  = icmp ne <2 x i32> %and, <i32 0, i32 0>
329  %conv = zext <2 x i1> %cmp to <2 x i32>
330  ret <2 x i32> %conv
331}
332
333define i32 @icmp_eq_and_pow2_lshr_pow2(i32 %0) {
334; CHECK-LABEL: @icmp_eq_and_pow2_lshr_pow2(
335; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[TMP0:%.*]], 1
336; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[TMP2]] to i32
337; CHECK-NEXT:    ret i32 [[CONV]]
338;
339  %lshr = lshr i32 8, %0
340  %and  = and i32 %lshr, 4
341  %cmp  = icmp eq i32 %and, 0
342  %conv = zext i1 %cmp to i32
343  ret i32 %conv
344}
345
346define i32 @icmp_eq_and_pow2_lshr_pow2_case2(i32 %0) {
347; CHECK-LABEL: @icmp_eq_and_pow2_lshr_pow2_case2(
348; CHECK-NEXT:    ret i32 1
349;
350  %lshr = lshr i32 4, %0
351  %and  = and i32 %lshr, 8
352  %cmp  = icmp eq i32 %and, 0
353  %conv = zext i1 %cmp to i32
354  ret i32 %conv
355}
356
357define <2 x i32> @icmp_eq_and_pow2_lshr_pow2_vec(<2 x i32> %0) {
358; CHECK-LABEL: @icmp_eq_and_pow2_lshr_pow2_vec(
359; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne <2 x i32> [[TMP0:%.*]], splat (i32 1)
360; CHECK-NEXT:    [[CONV:%.*]] = zext <2 x i1> [[TMP2]] to <2 x i32>
361; CHECK-NEXT:    ret <2 x i32> [[CONV]]
362;
363  %lshr = lshr <2 x i32> <i32 8, i32 8>, %0
364  %and  = and <2 x i32> %lshr, <i32 4, i32 4>
365  %cmp  = icmp eq <2 x i32> %and, <i32 0, i32 0>
366  %conv = zext <2 x i1> %cmp to <2 x i32>
367  ret <2 x i32> %conv
368}
369
370define i32 @icmp_ne_and_pow2_lshr_pow2(i32 %0) {
371; CHECK-LABEL: @icmp_ne_and_pow2_lshr_pow2(
372; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[TMP0:%.*]], 1
373; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[TMP2]] to i32
374; CHECK-NEXT:    ret i32 [[CONV]]
375;
376  %lshr = lshr i32 8, %0
377  %and  = and i32 %lshr, 4
378  %cmp  = icmp eq i32 %and, 0
379  %conv = zext i1 %cmp to i32
380  ret i32 %conv
381}
382
383define i32 @icmp_ne_and_pow2_lshr_pow2_case2(i32 %0) {
384; CHECK-LABEL: @icmp_ne_and_pow2_lshr_pow2_case2(
385; CHECK-NEXT:    ret i32 1
386;
387  %lshr = lshr i32 4, %0
388  %and  = and i32 %lshr, 8
389  %cmp  = icmp eq i32 %and, 0
390  %conv = zext i1 %cmp to i32
391  ret i32 %conv
392}
393
394define <2 x i32> @icmp_ne_and_pow2_lshr_pow2_vec(<2 x i32> %0) {
395; CHECK-LABEL: @icmp_ne_and_pow2_lshr_pow2_vec(
396; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq <2 x i32> [[TMP0:%.*]], splat (i32 1)
397; CHECK-NEXT:    [[CONV:%.*]] = zext <2 x i1> [[TMP2]] to <2 x i32>
398; CHECK-NEXT:    ret <2 x i32> [[CONV]]
399;
400  %lshr = lshr <2 x i32> <i32 8, i32 8>, %0
401  %and  = and <2 x i32> %lshr, <i32 4, i32 4>
402  %cmp  = icmp ne <2 x i32> %and, <i32 0, i32 0>
403  %conv = zext <2 x i1> %cmp to <2 x i32>
404  ret <2 x i32> %conv
405}
406
407define i32 @icmp_eq_and1_lshr_pow2_minus_one(i32 %0) {
408; CHECK-LABEL: @icmp_eq_and1_lshr_pow2_minus_one(
409; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[TMP0:%.*]], 2
410; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
411; CHECK-NEXT:    ret i32 [[CONV]]
412;
413  %lshr = lshr i32 7, %0
414  %and  = and i32 %lshr, 1
415  %cmp  = icmp eq i32 %and, 0
416  %conv = zext i1 %cmp to i32
417  ret i32 %conv
418}
419
420define i32 @icmp_eq_and1_lshr_pow2_negative2(i32 %0) {
421; CHECK-LABEL: @icmp_eq_and1_lshr_pow2_negative2(
422; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 8, [[TMP0:%.*]]
423; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], 3
424; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
425; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
426; CHECK-NEXT:    ret i32 [[CONV]]
427;
428  %lshr = lshr i32 8, %0
429  %and  = and i32 %lshr, 3
430  %cmp  = icmp eq i32 %and, 0
431  %conv = zext i1 %cmp to i32
432  ret i32 %conv
433}
434
435define i1 @eq_and_shl_one(i8 %x, i8 %y) {
436; CHECK-LABEL: @eq_and_shl_one(
437; CHECK-NEXT:    [[POW2:%.*]] = shl nuw i8 1, [[Y:%.*]]
438; CHECK-NEXT:    [[AND:%.*]] = and i8 [[POW2]], [[X:%.*]]
439; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i8 [[AND]], 0
440; CHECK-NEXT:    ret i1 [[CMP]]
441;
442  %pow2 = shl i8 1, %y
443  %and = and i8 %pow2, %x
444  %cmp = icmp eq i8 %and, %pow2
445  ret i1 %cmp
446}
447
448define <2 x i1> @ne_and_shl_one_commute(<2 x i8> %x, <2 x i8> %y) {
449; CHECK-LABEL: @ne_and_shl_one_commute(
450; CHECK-NEXT:    [[POW2:%.*]] = shl nuw <2 x i8> <i8 1, i8 poison>, [[Y:%.*]]
451; CHECK-NEXT:    [[AND:%.*]] = and <2 x i8> [[POW2]], [[X:%.*]]
452; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i8> [[AND]], zeroinitializer
453; CHECK-NEXT:    ret <2 x i1> [[CMP]]
454;
455  %pow2 = shl <2 x i8> <i8 1, i8 poison>, %y
456  %and = and <2 x i8> %pow2, %x
457  %cmp = icmp ne <2 x i8> %pow2, %and
458  ret <2 x i1> %cmp
459}
460
461define i1 @ne_and_lshr_minval(i8 %px, i8 %y) {
462; CHECK-LABEL: @ne_and_lshr_minval(
463; CHECK-NEXT:    [[X:%.*]] = mul i8 [[PX:%.*]], [[PX]]
464; CHECK-NEXT:    [[POW2:%.*]] = lshr exact i8 -128, [[Y:%.*]]
465; CHECK-NEXT:    [[AND:%.*]] = and i8 [[X]], [[POW2]]
466; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 [[AND]], 0
467; CHECK-NEXT:    ret i1 [[CMP]]
468;
469  %x = mul i8 %px, %px ; thwart complexity-based canonicalization
470  %pow2 = lshr i8 -128, %y
471  %and = and i8 %x, %pow2
472  %cmp = icmp ne i8 %and, %pow2
473  ret i1 %cmp
474}
475
476define i1 @eq_and_lshr_minval_commute(i8 %px, i8 %y) {
477; CHECK-LABEL: @eq_and_lshr_minval_commute(
478; CHECK-NEXT:    [[X:%.*]] = mul i8 [[PX:%.*]], [[PX]]
479; CHECK-NEXT:    [[POW2:%.*]] = lshr exact i8 -128, [[Y:%.*]]
480; CHECK-NEXT:    call void @use(i8 [[POW2]])
481; CHECK-NEXT:    [[AND:%.*]] = and i8 [[X]], [[POW2]]
482; CHECK-NEXT:    call void @use(i8 [[AND]])
483; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i8 [[AND]], 0
484; CHECK-NEXT:    ret i1 [[CMP]]
485;
486  %x = mul i8 %px, %px ; thwart complexity-based canonicalization
487  %pow2 = lshr i8 -128, %y
488  call void @use(i8 %pow2)
489  %and = and i8 %x, %pow2
490  call void @use(i8 %and)
491  %cmp = icmp eq i8 %pow2, %and
492  ret i1 %cmp
493}
494
495; Negative test: May be power of two or zero.
496define i1 @eq_and_shl_two(i8 %x, i8 %y) {
497; CHECK-LABEL: @eq_and_shl_two(
498; CHECK-NEXT:    [[POW2_OR_ZERO:%.*]] = shl i8 2, [[Y:%.*]]
499; CHECK-NEXT:    [[AND:%.*]] = and i8 [[X:%.*]], [[POW2_OR_ZERO]]
500; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 [[AND]], [[POW2_OR_ZERO]]
501; CHECK-NEXT:    ret i1 [[CMP]]
502;
503  %pow2_or_zero = shl i8 2, %y
504  %and = and i8 %x, %pow2_or_zero
505  %cmp = icmp eq i8 %and, %pow2_or_zero
506  ret i1 %cmp
507}
508
509; Negative test: Wrong predicate.
510define i1 @slt_and_shl_one(i8 %x, i8 %y) {
511; CHECK-LABEL: @slt_and_shl_one(
512; CHECK-NEXT:    [[POW2:%.*]] = shl nuw i8 1, [[Y:%.*]]
513; CHECK-NEXT:    [[AND:%.*]] = and i8 [[X:%.*]], [[POW2]]
514; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[AND]], [[POW2]]
515; CHECK-NEXT:    ret i1 [[CMP]]
516;
517  %pow2 = shl i8 1, %y
518  %and = and i8 %x, %pow2
519  %cmp = icmp slt i8 %and, %pow2
520  ret i1 %cmp
521}
522
523define i1 @fold_eq_lhs(i8 %x, i8 %y) {
524; CHECK-LABEL: @fold_eq_lhs(
525; CHECK-NEXT:    [[TMP1:%.*]] = lshr i8 [[Y:%.*]], [[X:%.*]]
526; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[TMP1]], 0
527; CHECK-NEXT:    ret i1 [[R]]
528;
529  %shl = shl i8 -1, %x
530  %and = and i8 %shl, %y
531  %r = icmp eq i8 %and, 0
532  ret i1 %r
533}
534
535define i1 @fold_eq_lhs_fail_eq_nonzero(i8 %x, i8 %y) {
536; CHECK-LABEL: @fold_eq_lhs_fail_eq_nonzero(
537; CHECK-NEXT:    [[SHL:%.*]] = shl nsw i8 -1, [[X:%.*]]
538; CHECK-NEXT:    [[AND:%.*]] = and i8 [[SHL]], [[Y:%.*]]
539; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[AND]], 1
540; CHECK-NEXT:    ret i1 [[R]]
541;
542  %shl = shl i8 -1, %x
543  %and = and i8 %shl, %y
544  %r = icmp eq i8 %and, 1
545  ret i1 %r
546}
547
548define i1 @fold_eq_lhs_fail_multiuse_shl(i8 %x, i8 %y) {
549; CHECK-LABEL: @fold_eq_lhs_fail_multiuse_shl(
550; CHECK-NEXT:    [[SHL:%.*]] = shl nsw i8 -1, [[X:%.*]]
551; CHECK-NEXT:    call void @use(i8 [[SHL]])
552; CHECK-NEXT:    [[AND:%.*]] = and i8 [[SHL]], [[Y:%.*]]
553; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[AND]], 0
554; CHECK-NEXT:    ret i1 [[R]]
555;
556  %shl = shl i8 -1, %x
557  call void @use(i8 %shl)
558  %and = and i8 %shl, %y
559  %r = icmp eq i8 %and, 0
560  ret i1 %r
561}
562
563define i1 @fold_ne_rhs(i8 %x, i8 %yy) {
564; CHECK-LABEL: @fold_ne_rhs(
565; CHECK-NEXT:    [[Y:%.*]] = xor i8 [[YY:%.*]], 123
566; CHECK-NEXT:    [[TMP1:%.*]] = lshr i8 [[Y]], [[X:%.*]]
567; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[TMP1]], 0
568; CHECK-NEXT:    ret i1 [[R]]
569;
570  %y = xor i8 %yy, 123
571  %shl = shl i8 -1, %x
572  %and = and i8 %y, %shl
573  %r = icmp ne i8 %and, 0
574  ret i1 %r
575}
576
577define i1 @fold_ne_rhs_fail_multiuse_and(i8 %x, i8 %yy) {
578; CHECK-LABEL: @fold_ne_rhs_fail_multiuse_and(
579; CHECK-NEXT:    [[Y:%.*]] = xor i8 [[YY:%.*]], 123
580; CHECK-NEXT:    [[SHL:%.*]] = shl nsw i8 -1, [[X:%.*]]
581; CHECK-NEXT:    [[AND:%.*]] = and i8 [[Y]], [[SHL]]
582; CHECK-NEXT:    call void @use(i8 [[AND]])
583; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[AND]], 0
584; CHECK-NEXT:    ret i1 [[R]]
585;
586  %y = xor i8 %yy, 123
587  %shl = shl i8 -1, %x
588  %and = and i8 %y, %shl
589  call void @use(i8 %and)
590  %r = icmp ne i8 %and, 0
591  ret i1 %r
592}
593
594define i1 @fold_ne_rhs_fail_shift_not_1s(i8 %x, i8 %yy) {
595; CHECK-LABEL: @fold_ne_rhs_fail_shift_not_1s(
596; CHECK-NEXT:    [[Y:%.*]] = xor i8 [[YY:%.*]], 122
597; CHECK-NEXT:    [[SHL:%.*]] = shl i8 -2, [[X:%.*]]
598; CHECK-NEXT:    [[AND:%.*]] = and i8 [[Y]], [[SHL]]
599; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[AND]], 0
600; CHECK-NEXT:    ret i1 [[R]]
601;
602  %y = xor i8 %yy, 123
603  %shl = shl i8 -2, %x
604  %and = and i8 %y, %shl
605  %r = icmp ne i8 %and, 0
606  ret i1 %r
607}
608
609define i1 @test_shr_and_1_ne_0(i32 %a, i32 %b) {
610; CHECK-LABEL: @test_shr_and_1_ne_0(
611; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw i32 1, [[B:%.*]]
612; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[A:%.*]], [[TMP1]]
613; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[TMP2]], 0
614; CHECK-NEXT:    ret i1 [[CMP]]
615;
616  %shr = lshr i32 %a, %b
617  %and = and i32 %shr, 1
618  %cmp = icmp ne i32 %and, 0
619  ret i1 %cmp
620}
621
622define i1 @test_shr_and_1_ne_0_samesign(i32 %a, i32 %b) {
623; CHECK-LABEL: @test_shr_and_1_ne_0_samesign(
624; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw i32 1, [[B:%.*]]
625; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[A:%.*]], [[TMP1]]
626; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[TMP2]], 0
627; CHECK-NEXT:    ret i1 [[CMP]]
628;
629  %shr = lshr i32 %a, %b
630  %and = and i32 %shr, 1
631  %cmp = icmp samesign ne i32 %and, 0
632  ret i1 %cmp
633}
634
635define i1 @test_const_shr_and_1_ne_0(i32 %b) {
636; CHECK-LABEL: @test_const_shr_and_1_ne_0(
637; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw i32 1, [[B:%.*]]
638; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 42
639; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[TMP2]], 0
640; CHECK-NEXT:    ret i1 [[CMP]]
641;
642  %shr = lshr i32 42, %b
643  %and = and i32 %shr, 1
644  %cmp = icmp ne i32 %and, 0
645  ret i1 %cmp
646}
647
648define i1 @test_not_const_shr_and_1_ne_0(i32 %b) {
649; CHECK-LABEL: @test_not_const_shr_and_1_ne_0(
650; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw i32 1, [[B:%.*]]
651; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 42
652; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP2]], 0
653; CHECK-NEXT:    ret i1 [[CMP]]
654;
655  %shr = lshr i32 42, %b
656  %and = and i32 %shr, 1
657  %cmp = icmp eq i32 %and, 0
658  ret i1 %cmp
659}
660
661define i1 @test_const_shr_exact_and_1_ne_0(i32 %b) {
662; CHECK-LABEL: @test_const_shr_exact_and_1_ne_0(
663; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw i32 1, [[B:%.*]]
664; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 42
665; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[TMP2]], 0
666; CHECK-NEXT:    ret i1 [[CMP]]
667;
668  %shr = lshr exact i32 42, %b
669  %and = and i32 %shr, 1
670  %cmp = icmp ne i32 %and, 0
671  ret i1 %cmp
672}
673
674define i1 @test_const_shr_and_2_ne_0_negative(i32 %b) {
675; CHECK-LABEL: @test_const_shr_and_2_ne_0_negative(
676; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 42, [[B:%.*]]
677; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHR]], 2
678; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
679; CHECK-NEXT:    ret i1 [[CMP]]
680;
681  %shr = lshr i32 42, %b
682  %and = and i32 %shr, 2
683  %cmp = icmp eq i32 %and, 0
684  ret i1 %cmp
685}
686
687define <8 x i1> @test_const_shr_and_1_ne_0_v8i8_splat_negative(<8 x i8> %b) {
688; CHECK-LABEL: @test_const_shr_and_1_ne_0_v8i8_splat_negative(
689; CHECK-NEXT:    [[SHR:%.*]] = lshr <8 x i8> splat (i8 42), [[B:%.*]]
690; CHECK-NEXT:    [[CMP:%.*]] = trunc <8 x i8> [[SHR]] to <8 x i1>
691; CHECK-NEXT:    ret <8 x i1> [[CMP]]
692;
693  %shr = lshr <8 x i8> <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>, %b
694  %and = and <8 x i8> %shr, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
695  %cmp = icmp ne <8 x i8> %and, zeroinitializer
696  ret <8 x i1> %cmp
697}
698
699define <8 x i1> @test_const_shr_and_1_ne_0_v8i8_nonsplat_negative(<8 x i8> %b) {
700; CHECK-LABEL: @test_const_shr_and_1_ne_0_v8i8_nonsplat_negative(
701; CHECK-NEXT:    [[SHR:%.*]] = lshr <8 x i8> <i8 42, i8 43, i8 44, i8 45, i8 46, i8 47, i8 48, i8 49>, [[B:%.*]]
702; CHECK-NEXT:    [[CMP:%.*]] = trunc <8 x i8> [[SHR]] to <8 x i1>
703; CHECK-NEXT:    ret <8 x i1> [[CMP]]
704;
705  %shr = lshr <8 x i8> <i8 42, i8 43, i8 44, i8 45, i8 46, i8 47, i8 48, i8 49>, %b
706  %and = and <8 x i8> %shr, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
707  %cmp = icmp ne <8 x i8> %and, zeroinitializer
708  ret <8 x i1> %cmp
709}
710
711define i1 @test_const_shr_and_1_ne_0_i1_negative(i1 %b) {
712; CHECK-LABEL: @test_const_shr_and_1_ne_0_i1_negative(
713; CHECK-NEXT:    ret i1 true
714;
715  %shr = lshr i1 1, %b
716  %and = and i1 %shr, 1
717  %cmp = icmp ne i1 %and, 0
718  ret i1 %cmp
719}
720
721define i1 @test_const_shr_and_1_ne_0_multi_use_lshr_negative(i32 %b) {
722; CHECK-LABEL: @test_const_shr_and_1_ne_0_multi_use_lshr_negative(
723; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 42, [[B:%.*]]
724; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHR]], 1
725; CHECK-NEXT:    [[CMP1:%.*]] = icmp ne i32 [[AND]], 0
726; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[B]], [[SHR]]
727; CHECK-NEXT:    [[RET:%.*]] = and i1 [[CMP1]], [[CMP2]]
728; CHECK-NEXT:    ret i1 [[RET]]
729;
730  %shr = lshr i32 42, %b
731  %and = and i32 %shr, 1
732  %cmp1 = icmp ne i32 %and, 0
733  %cmp2 = icmp eq i32 %b, %shr
734  %ret = and i1 %cmp1, %cmp2
735  ret i1 %ret
736}
737
738define i1 @test_const_shr_and_1_ne_0_multi_use_and_negative(i32 %b) {
739; CHECK-LABEL: @test_const_shr_and_1_ne_0_multi_use_and_negative(
740; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 42, [[B:%.*]]
741; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHR]], 1
742; CHECK-NEXT:    [[CMP1:%.*]] = icmp ne i32 [[AND]], 0
743; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[B]], [[AND]]
744; CHECK-NEXT:    [[RET:%.*]] = and i1 [[CMP1]], [[CMP2]]
745; CHECK-NEXT:    ret i1 [[RET]]
746;
747  %shr = lshr i32 42, %b
748  %and = and i32 %shr, 1
749  %cmp1 = icmp ne i32 %and, 0
750  %cmp2 = icmp eq i32 %b, %and
751  %ret = and i1 %cmp1, %cmp2
752  ret i1 %ret
753}
754