xref: /llvm-project/llvm/test/Transforms/InstCombine/and.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3
4declare void @use8(i8)
5declare void @use16(i16)
6declare void @use32(i32)
7
8; There should be no 'and' instructions left in any test.
9
10define i32 @test_with_1(i32 %x) {
11; CHECK-LABEL: @test_with_1(
12; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[X:%.*]], 0
13; CHECK-NEXT:    [[AND:%.*]] = zext i1 [[TMP1]] to i32
14; CHECK-NEXT:    ret i32 [[AND]]
15;
16  %shl = shl i32 1, %x
17  %and = and i32 %shl, 1
18  ret i32 %and
19}
20
21define i32 @test_with_3(i32 %x) {
22; CHECK-LABEL: @test_with_3(
23; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[X:%.*]], 0
24; CHECK-NEXT:    [[AND:%.*]] = zext i1 [[TMP1]] to i32
25; CHECK-NEXT:    ret i32 [[AND]]
26;
27  %shl = shl i32 3, %x
28  %and = and i32 %shl, 1
29  ret i32 %and
30}
31
32define i32 @test_with_5(i32 %x) {
33; CHECK-LABEL: @test_with_5(
34; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[X:%.*]], 0
35; CHECK-NEXT:    [[AND:%.*]] = zext i1 [[TMP1]] to i32
36; CHECK-NEXT:    ret i32 [[AND]]
37;
38  %shl = shl i32 5, %x
39  %and = and i32 %shl, 1
40  ret i32 %and
41}
42
43define i32 @test_with_neg_5(i32 %x) {
44; CHECK-LABEL: @test_with_neg_5(
45; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[X:%.*]], 0
46; CHECK-NEXT:    [[AND:%.*]] = zext i1 [[TMP1]] to i32
47; CHECK-NEXT:    ret i32 [[AND]]
48;
49  %shl = shl i32 -5, %x
50  %and = and i32 %shl, 1
51  ret i32 %and
52}
53
54define i32 @test_with_even(i32 %x) {
55; CHECK-LABEL: @test_with_even(
56; CHECK-NEXT:    ret i32 0
57;
58  %shl = shl i32 4, %x
59  %and = and i32 %shl, 1
60  ret i32 %and
61}
62
63define <2 x i32> @test_vec(<2 x i32> %x) {
64; CHECK-LABEL: @test_vec(
65; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq <2 x i32> [[X:%.*]], zeroinitializer
66; CHECK-NEXT:    [[AND:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i32>
67; CHECK-NEXT:    ret <2 x i32> [[AND]]
68;
69  %shl = shl <2 x i32> <i32 5, i32 5>, %x
70  %and = and <2 x i32> %shl, <i32 1, i32 1>
71  ret <2 x i32> %and
72}
73
74define i32 @test_with_neg_even(i32 %x) {
75; CHECK-LABEL: @test_with_neg_even(
76; CHECK-NEXT:    ret i32 0
77;
78  %shl = shl i32 -4, %x
79  %and = and i32 %shl, 1
80  ret i32 %and
81}
82
83define i32 @test_with_more_one_use(i32 %x) {
84; CHECK-LABEL: @test_with_more_one_use(
85; CHECK-NEXT:    [[SHL:%.*]] = shl i32 7, [[X:%.*]]
86; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], 1
87; CHECK-NEXT:    call void @use32(i32 [[SHL]])
88; CHECK-NEXT:    ret i32 [[AND]]
89;
90  %shl = shl i32 7, %x
91  %and = and i32 %shl, 1
92  call void @use32(i32 %shl)
93  ret i32 %and
94}
95
96define i32 @test1(i32 %A) {
97; CHECK-LABEL: @test1(
98; CHECK-NEXT:    ret i32 0
99;
100  %B = and i32 %A, 0
101  ret i32 %B
102}
103
104define i32 @test2(i32 %A) {
105; CHECK-LABEL: @test2(
106; CHECK-NEXT:    ret i32 [[A:%.*]]
107;
108  %B = and i32 %A, -1
109  ret i32 %B
110}
111
112define i1 @test3(i1 %A) {
113; CHECK-LABEL: @test3(
114; CHECK-NEXT:    ret i1 false
115;
116  %B = and i1 %A, false
117  ret i1 %B
118}
119
120define i1 @test3_logical(i1 %A) {
121; CHECK-LABEL: @test3_logical(
122; CHECK-NEXT:    ret i1 false
123;
124  %B = select i1 %A, i1 false, i1 false
125  ret i1 %B
126}
127
128define i1 @test4(i1 %A) {
129; CHECK-LABEL: @test4(
130; CHECK-NEXT:    ret i1 [[A:%.*]]
131;
132  %B = and i1 %A, true
133  ret i1 %B
134}
135
136define i1 @test4_logical(i1 %A) {
137; CHECK-LABEL: @test4_logical(
138; CHECK-NEXT:    ret i1 [[A:%.*]]
139;
140  %B = select i1 %A, i1 true, i1 false
141  ret i1 %B
142}
143
144define i32 @test5(i32 %A) {
145; CHECK-LABEL: @test5(
146; CHECK-NEXT:    ret i32 [[A:%.*]]
147;
148  %B = and i32 %A, %A
149  ret i32 %B
150}
151
152define i1 @test6(i1 %A) {
153; CHECK-LABEL: @test6(
154; CHECK-NEXT:    ret i1 [[A:%.*]]
155;
156  %B = and i1 %A, %A
157  ret i1 %B
158}
159
160define i1 @test6_logical(i1 %A) {
161; CHECK-LABEL: @test6_logical(
162; CHECK-NEXT:    ret i1 [[A:%.*]]
163;
164  %B = select i1 %A, i1 %A, i1 false
165  ret i1 %B
166}
167
168; A & ~A == 0
169define i32 @test7(i32 %A) {
170; CHECK-LABEL: @test7(
171; CHECK-NEXT:    ret i32 0
172;
173  %NotA = xor i32 %A, -1
174  %B = and i32 %A, %NotA
175  ret i32 %B
176}
177
178; AND associates
179define i8 @test8(i8 %A) {
180; CHECK-LABEL: @test8(
181; CHECK-NEXT:    ret i8 0
182;
183  %B = and i8 %A, 3
184  %C = and i8 %B, 4
185  ret i8 %C
186}
187
188; Test of sign bit, convert to setle %A, 0
189define i1 @test9(i32 %A) {
190; CHECK-LABEL: @test9(
191; CHECK-NEXT:    [[C:%.*]] = icmp slt i32 [[A:%.*]], 0
192; CHECK-NEXT:    ret i1 [[C]]
193;
194  %B = and i32 %A, -2147483648
195  %C = icmp ne i32 %B, 0
196  ret i1 %C
197}
198
199; Test of sign bit, convert to setle %A, 0
200define i1 @test9a(i32 %A) {
201; CHECK-LABEL: @test9a(
202; CHECK-NEXT:    [[C:%.*]] = icmp slt i32 [[A:%.*]], 0
203; CHECK-NEXT:    ret i1 [[C]]
204;
205  %B = and i32 %A, -2147483648
206  %C = icmp ne i32 %B, 0
207  ret i1 %C
208}
209
210define i32 @test10(i32 %A) {
211; CHECK-LABEL: @test10(
212; CHECK-NEXT:    ret i32 1
213;
214  %B = and i32 %A, 12
215  %C = xor i32 %B, 15
216  ; (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
217  %D = and i32 %C, 1
218  ret i32 %D
219}
220
221define i32 @test11(i32 %A, ptr %P) {
222; CHECK-LABEL: @test11(
223; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], -4
224; CHECK-NEXT:    [[C:%.*]] = xor i32 [[TMP1]], 15
225; CHECK-NEXT:    store i32 [[C]], ptr [[P:%.*]], align 4
226; CHECK-NEXT:    ret i32 3
227;
228  %B = or i32 %A, 3
229  %C = xor i32 %B, 12
230  ; additional use of C
231  store i32 %C, ptr %P
232  ; %C = and uint %B, 3 --> 3
233  %D = and i32 %C, 3
234  ret i32 %D
235}
236
237define i1 @test12(i32 %A, i32 %B) {
238; CHECK-LABEL: @test12(
239; CHECK-NEXT:    [[C1:%.*]] = icmp ult i32 [[A:%.*]], [[B:%.*]]
240; CHECK-NEXT:    ret i1 [[C1]]
241;
242  %C1 = icmp ult i32 %A, %B
243  %C2 = icmp ule i32 %A, %B
244  ; (A < B) & (A <= B) === (A < B)
245  %D = and i1 %C1, %C2
246  ret i1 %D
247}
248
249define i1 @test12_logical(i32 %A, i32 %B) {
250; CHECK-LABEL: @test12_logical(
251; CHECK-NEXT:    [[C1:%.*]] = icmp ult i32 [[A:%.*]], [[B:%.*]]
252; CHECK-NEXT:    ret i1 [[C1]]
253;
254  %C1 = icmp ult i32 %A, %B
255  %C2 = icmp ule i32 %A, %B
256  ; (A < B) & (A <= B) === (A < B)
257  %D = select i1 %C1, i1 %C2, i1 false
258  ret i1 %D
259}
260
261define i1 @test13(i32 %A, i32 %B) {
262; CHECK-LABEL: @test13(
263; CHECK-NEXT:    ret i1 false
264;
265  %C1 = icmp ult i32 %A, %B
266  %C2 = icmp ugt i32 %A, %B
267  ; (A < B) & (A > B) === false
268  %D = and i1 %C1, %C2
269  ret i1 %D
270}
271
272define i1 @test13_logical(i32 %A, i32 %B) {
273; CHECK-LABEL: @test13_logical(
274; CHECK-NEXT:    ret i1 false
275;
276  %C1 = icmp ult i32 %A, %B
277  %C2 = icmp ugt i32 %A, %B
278  ; (A < B) & (A > B) === false
279  %D = select i1 %C1, i1 %C2, i1 false
280  ret i1 %D
281}
282
283define i1 @test14(i8 %A) {
284; CHECK-LABEL: @test14(
285; CHECK-NEXT:    [[C:%.*]] = icmp slt i8 [[A:%.*]], 0
286; CHECK-NEXT:    ret i1 [[C]]
287;
288  %B = and i8 %A, -128
289  %C = icmp ne i8 %B, 0
290  ret i1 %C
291}
292
293define i8 @test15(i8 %A) {
294; CHECK-LABEL: @test15(
295; CHECK-NEXT:    ret i8 0
296;
297  %B = lshr i8 %A, 7
298  ; Always equals zero
299  %C = and i8 %B, 2
300  ret i8 %C
301}
302
303define i8 @test16(i8 %A) {
304; CHECK-LABEL: @test16(
305; CHECK-NEXT:    ret i8 0
306;
307  %B = shl i8 %A, 2
308  %C = and i8 %B, 3
309  ret i8 %C
310}
311
312define i1 @test18(i32 %A) {
313; CHECK-LABEL: @test18(
314; CHECK-NEXT:    [[C:%.*]] = icmp ugt i32 [[A:%.*]], 127
315; CHECK-NEXT:    ret i1 [[C]]
316;
317  %B = and i32 %A, -128
318  ;; C >= 128
319  %C = icmp ne i32 %B, 0
320  ret i1 %C
321}
322
323define <2 x i1> @test18_vec(<2 x i32> %A) {
324; CHECK-LABEL: @test18_vec(
325; CHECK-NEXT:    [[C:%.*]] = icmp ugt <2 x i32> [[A:%.*]], splat (i32 127)
326; CHECK-NEXT:    ret <2 x i1> [[C]]
327;
328  %B = and <2 x i32> %A, <i32 -128, i32 -128>
329  %C = icmp ne <2 x i32> %B, zeroinitializer
330  ret <2 x i1> %C
331}
332
333define i1 @test18a(i8 %A) {
334; CHECK-LABEL: @test18a(
335; CHECK-NEXT:    [[C:%.*]] = icmp ult i8 [[A:%.*]], 2
336; CHECK-NEXT:    ret i1 [[C]]
337;
338  %B = and i8 %A, -2
339  %C = icmp eq i8 %B, 0
340  ret i1 %C
341}
342
343define <2 x i1> @test18a_vec(<2 x i8> %A) {
344; CHECK-LABEL: @test18a_vec(
345; CHECK-NEXT:    [[C:%.*]] = icmp ult <2 x i8> [[A:%.*]], splat (i8 2)
346; CHECK-NEXT:    ret <2 x i1> [[C]]
347;
348  %B = and <2 x i8> %A, <i8 -2, i8 -2>
349  %C = icmp eq <2 x i8> %B, zeroinitializer
350  ret <2 x i1> %C
351}
352
353define i32 @test19(i32 %A) {
354; CHECK-LABEL: @test19(
355; CHECK-NEXT:    [[B:%.*]] = shl i32 [[A:%.*]], 3
356; CHECK-NEXT:    ret i32 [[B]]
357;
358  %B = shl i32 %A, 3
359  ;; Clearing a zero bit
360  %C = and i32 %B, -2
361  ret i32 %C
362}
363
364define i8 @test20(i8 %A) {
365; CHECK-LABEL: @test20(
366; CHECK-NEXT:    [[C:%.*]] = lshr i8 [[A:%.*]], 7
367; CHECK-NEXT:    ret i8 [[C]]
368;
369  %C = lshr i8 %A, 7
370  ;; Unneeded
371  %D = and i8 %C, 1
372  ret i8 %D
373}
374
375define i1 @test23(i32 %A) {
376; CHECK-LABEL: @test23(
377; CHECK-NEXT:    [[D:%.*]] = icmp eq i32 [[A:%.*]], 2
378; CHECK-NEXT:    ret i1 [[D]]
379;
380  %B = icmp sgt i32 %A, 1
381  %C = icmp sle i32 %A, 2
382  %D = and i1 %B, %C
383  ret i1 %D
384}
385
386define i1 @test23_logical(i32 %A) {
387; CHECK-LABEL: @test23_logical(
388; CHECK-NEXT:    [[D:%.*]] = icmp eq i32 [[A:%.*]], 2
389; CHECK-NEXT:    ret i1 [[D]]
390;
391  %B = icmp sgt i32 %A, 1
392  %C = icmp sle i32 %A, 2
393  %D = select i1 %B, i1 %C, i1 false
394  ret i1 %D
395}
396
397define <2 x i1> @test23vec(<2 x i32> %A) {
398; CHECK-LABEL: @test23vec(
399; CHECK-NEXT:    [[D:%.*]] = icmp eq <2 x i32> [[A:%.*]], splat (i32 2)
400; CHECK-NEXT:    ret <2 x i1> [[D]]
401;
402  %B = icmp sgt <2 x i32> %A, <i32 1, i32 1>
403  %C = icmp sle <2 x i32> %A, <i32 2, i32 2>
404  %D = and <2 x i1> %B, %C
405  ret <2 x i1> %D
406}
407
408define i1 @test24(i32 %A) {
409; CHECK-LABEL: @test24(
410; CHECK-NEXT:    [[D:%.*]] = icmp sgt i32 [[A:%.*]], 2
411; CHECK-NEXT:    ret i1 [[D]]
412;
413  %B = icmp sgt i32 %A, 1
414  %C = icmp ne i32 %A, 2
415  ;; A > 2
416  %D = and i1 %B, %C
417  ret i1 %D
418}
419
420define i1 @test24_logical(i32 %A) {
421; CHECK-LABEL: @test24_logical(
422; CHECK-NEXT:    [[D:%.*]] = icmp sgt i32 [[A:%.*]], 2
423; CHECK-NEXT:    ret i1 [[D]]
424;
425  %B = icmp sgt i32 %A, 1
426  %C = icmp ne i32 %A, 2
427  ;; A > 2
428  %D = select i1 %B, i1 %C, i1 false
429  ret i1 %D
430}
431
432define i1 @test25(i32 %A) {
433; CHECK-LABEL: @test25(
434; CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[A:%.*]], -50
435; CHECK-NEXT:    [[D:%.*]] = icmp ult i32 [[TMP1]], 50
436; CHECK-NEXT:    ret i1 [[D]]
437;
438  %B = icmp sge i32 %A, 50
439  %C = icmp slt i32 %A, 100
440  %D = and i1 %B, %C
441  ret i1 %D
442}
443
444define i1 @test25_logical(i32 %A) {
445; CHECK-LABEL: @test25_logical(
446; CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[A:%.*]], -50
447; CHECK-NEXT:    [[D:%.*]] = icmp ult i32 [[TMP1]], 50
448; CHECK-NEXT:    ret i1 [[D]]
449;
450  %B = icmp sge i32 %A, 50
451  %C = icmp slt i32 %A, 100
452  %D = select i1 %B, i1 %C, i1 false
453  ret i1 %D
454}
455
456define <2 x i1> @test25vec(<2 x i32> %A) {
457; CHECK-LABEL: @test25vec(
458; CHECK-NEXT:    [[TMP1:%.*]] = add <2 x i32> [[A:%.*]], splat (i32 -50)
459; CHECK-NEXT:    [[D:%.*]] = icmp ult <2 x i32> [[TMP1]], splat (i32 50)
460; CHECK-NEXT:    ret <2 x i1> [[D]]
461;
462  %B = icmp sge <2 x i32> %A, <i32 50, i32 50>
463  %C = icmp slt <2 x i32> %A, <i32 100, i32 100>
464  %D = and <2 x i1> %B, %C
465  ret <2 x i1> %D
466}
467
468define i8 @test27(i8 %A) {
469; CHECK-LABEL: @test27(
470; CHECK-NEXT:    ret i8 0
471;
472  %B = and i8 %A, 4
473  %C = sub i8 %B, 16
474  ;; 0xF0
475  %D = and i8 %C, -16
476  %E = add i8 %D, 16
477  ret i8 %E
478}
479
480;; No demand for extra sign bits.
481
482define i32 @ashr_lowmask(i32 %x) {
483; CHECK-LABEL: @ashr_lowmask(
484; CHECK-NEXT:    [[A:%.*]] = lshr i32 [[X:%.*]], 24
485; CHECK-NEXT:    ret i32 [[A]]
486;
487  %a = ashr i32 %x, 24
488  %r = and i32 %a, 255
489  ret i32 %r
490}
491
492define i32 @ashr_lowmask_use(i32 %x) {
493; CHECK-LABEL: @ashr_lowmask_use(
494; CHECK-NEXT:    [[A:%.*]] = ashr i32 [[X:%.*]], 1
495; CHECK-NEXT:    call void @use32(i32 [[A]])
496; CHECK-NEXT:    [[R:%.*]] = lshr i32 [[X]], 1
497; CHECK-NEXT:    ret i32 [[R]]
498;
499  %a = ashr i32 %x, 1
500  call void @use32(i32 %a)
501  %r = and i32 %a, 2147483647
502  ret i32 %r
503}
504
505define <2 x i8> @ashr_lowmask_use_splat(<2 x i8> %x, ptr %p) {
506; CHECK-LABEL: @ashr_lowmask_use_splat(
507; CHECK-NEXT:    [[A:%.*]] = ashr <2 x i8> [[X:%.*]], splat (i8 7)
508; CHECK-NEXT:    store <2 x i8> [[A]], ptr [[P:%.*]], align 2
509; CHECK-NEXT:    [[R:%.*]] = lshr <2 x i8> [[X]], splat (i8 7)
510; CHECK-NEXT:    ret <2 x i8> [[R]]
511;
512  %a = ashr <2 x i8> %x, <i8 7, i8 7>
513  store <2 x i8> %a, ptr %p
514  %r = and <2 x i8> %a, <i8 1, i8 1>
515  ret <2 x i8> %r
516}
517
518; negative test - must keep all low bits
519
520define i32 @ashr_not_lowmask1_use(i32 %x) {
521; CHECK-LABEL: @ashr_not_lowmask1_use(
522; CHECK-NEXT:    [[A:%.*]] = ashr i32 [[X:%.*]], 24
523; CHECK-NEXT:    call void @use32(i32 [[A]])
524; CHECK-NEXT:    [[R:%.*]] = and i32 [[A]], 254
525; CHECK-NEXT:    ret i32 [[R]]
526;
527  %a = ashr i32 %x, 24
528  call void @use32(i32 %a)
529  %r = and i32 %a, 254
530  ret i32 %r
531}
532
533; negative test - must keep all low bits
534
535define i32 @ashr_not_lowmask2_use(i32 %x) {
536; CHECK-LABEL: @ashr_not_lowmask2_use(
537; CHECK-NEXT:    [[A:%.*]] = ashr i32 [[X:%.*]], 24
538; CHECK-NEXT:    call void @use32(i32 [[A]])
539; CHECK-NEXT:    [[R:%.*]] = and i32 [[A]], 127
540; CHECK-NEXT:    ret i32 [[R]]
541;
542  %a = ashr i32 %x, 24
543  call void @use32(i32 %a)
544  %r = and i32 %a, 127
545  ret i32 %r
546}
547
548; negative test - must keep only low bits
549
550define i32 @ashr_not_lowmask3_use(i32 %x) {
551; CHECK-LABEL: @ashr_not_lowmask3_use(
552; CHECK-NEXT:    [[A:%.*]] = ashr i32 [[X:%.*]], 24
553; CHECK-NEXT:    call void @use32(i32 [[A]])
554; CHECK-NEXT:    [[R:%.*]] = and i32 [[A]], 511
555; CHECK-NEXT:    ret i32 [[R]]
556;
557  %a = ashr i32 %x, 24
558  call void @use32(i32 %a)
559  %r = and i32 %a, 511
560  ret i32 %r
561}
562
563define i32 @test29(i8 %X) {
564; CHECK-LABEL: @test29(
565; CHECK-NEXT:    [[Y:%.*]] = zext i8 [[X:%.*]] to i32
566; CHECK-NEXT:    ret i32 [[Y]]
567;
568  %Y = zext i8 %X to i32
569  ;; Zero extend makes this unneeded.
570  %Z = and i32 %Y, 255
571  ret i32 %Z
572}
573
574define i32 @test30(i1 %X) {
575; CHECK-LABEL: @test30(
576; CHECK-NEXT:    [[Y:%.*]] = zext i1 [[X:%.*]] to i32
577; CHECK-NEXT:    ret i32 [[Y]]
578;
579  %Y = zext i1 %X to i32
580  %Z = and i32 %Y, 1
581  ret i32 %Z
582}
583
584define i32 @test31(i1 %X) {
585; CHECK-LABEL: @test31(
586; CHECK-NEXT:    [[Z:%.*]] = select i1 [[X:%.*]], i32 16, i32 0
587; CHECK-NEXT:    ret i32 [[Z]]
588;
589  %Y = zext i1 %X to i32
590  %Z = shl i32 %Y, 4
591  %A = and i32 %Z, 16
592  ret i32 %A
593}
594
595; Demanded bit analysis allows us to eliminate the add.
596
597define <2 x i32> @and_demanded_bits_splat_vec(<2 x i32> %x) {
598; CHECK-LABEL: @and_demanded_bits_splat_vec(
599; CHECK-NEXT:    [[Z:%.*]] = and <2 x i32> [[X:%.*]], splat (i32 7)
600; CHECK-NEXT:    ret <2 x i32> [[Z]]
601;
602  %y = add <2 x i32> %x, <i32 8, i32 8>
603  %z = and <2 x i32> %y, <i32 7, i32 7>
604  ret <2 x i32> %z
605}
606
607; zext (x >> 8) has all zeros in the high 24-bits:  0x000000xx
608; (y | 255) has all ones in the low 8-bits: 0xyyyyyyff
609; 'and' of those is all known bits - it's just 'z'.
610
611define i32 @and_zext_demanded(i16 %x, i32 %y) {
612; CHECK-LABEL: @and_zext_demanded(
613; CHECK-NEXT:    [[S:%.*]] = lshr i16 [[X:%.*]], 8
614; CHECK-NEXT:    [[Z:%.*]] = zext nneg i16 [[S]] to i32
615; CHECK-NEXT:    ret i32 [[Z]]
616;
617  %s = lshr i16 %x, 8
618  %z = zext i16 %s to i32
619  %o = or i32 %y, 255
620  %a = and i32 %o, %z
621  ret i32 %a
622}
623
624define i32 @test32(i32 %In) {
625; CHECK-LABEL: @test32(
626; CHECK-NEXT:    ret i32 0
627;
628  %Y = and i32 %In, 16
629  %Z = lshr i32 %Y, 2
630  %A = and i32 %Z, 1
631  ret i32 %A
632}
633
634;; Code corresponding to one-bit bitfield ^1.
635define i32 @test33(i32 %b) {
636; CHECK-LABEL: @test33(
637; CHECK-NEXT:    [[T13:%.*]] = xor i32 [[B:%.*]], 1
638; CHECK-NEXT:    ret i32 [[T13]]
639;
640  %t4.mask = and i32 %b, 1
641  %t10 = xor i32 %t4.mask, 1
642  %t12 = and i32 %b, -2
643  %t13 = or i32 %t12, %t10
644  ret i32 %t13
645}
646
647define i32 @test33b(i32 %b) {
648; CHECK-LABEL: @test33b(
649; CHECK-NEXT:    [[T13:%.*]] = xor i32 [[B:%.*]], 1
650; CHECK-NEXT:    ret i32 [[T13]]
651;
652  %t4.mask = and i32 %b, 1
653  %t10 = xor i32 %t4.mask, 1
654  %t12 = and i32 %b, -2
655  %t13 = or i32 %t10, %t12
656  ret i32 %t13
657}
658
659define <2 x i32> @test33vec(<2 x i32> %b) {
660; CHECK-LABEL: @test33vec(
661; CHECK-NEXT:    [[T13:%.*]] = xor <2 x i32> [[B:%.*]], splat (i32 1)
662; CHECK-NEXT:    ret <2 x i32> [[T13]]
663;
664  %t4.mask = and <2 x i32> %b, <i32 1, i32 1>
665  %t10 = xor <2 x i32> %t4.mask, <i32 1, i32 1>
666  %t12 = and <2 x i32> %b, <i32 -2, i32 -2>
667  %t13 = or <2 x i32> %t12, %t10
668  ret <2 x i32> %t13
669}
670
671define <2 x i32> @test33vecb(<2 x i32> %b) {
672; CHECK-LABEL: @test33vecb(
673; CHECK-NEXT:    [[T13:%.*]] = xor <2 x i32> [[B:%.*]], splat (i32 1)
674; CHECK-NEXT:    ret <2 x i32> [[T13]]
675;
676  %t4.mask = and <2 x i32> %b, <i32 1, i32 1>
677  %t10 = xor <2 x i32> %t4.mask, <i32 1, i32 1>
678  %t12 = and <2 x i32> %b, <i32 -2, i32 -2>
679  %t13 = or <2 x i32> %t10, %t12
680  ret <2 x i32> %t13
681}
682
683define i32 @test34(i32 %A, i32 %B) {
684; CHECK-LABEL: @test34(
685; CHECK-NEXT:    ret i32 [[B:%.*]]
686;
687  %t2 = or i32 %B, %A
688  %t4 = and i32 %t2, %B
689  ret i32 %t4
690}
691
692; FIXME: This test should only need -passes=instsimplify (ValueTracking / computeKnownBits), not -passes=instcombine.
693
694define <2 x i32> @PR24942(<2 x i32> %x) {
695; CHECK-LABEL: @PR24942(
696; CHECK-NEXT:    ret <2 x i32> zeroinitializer
697;
698  %lshr = lshr <2 x i32> %x, <i32 31, i32 31>
699  %and = and <2 x i32> %lshr, <i32 2, i32 2>
700  ret <2 x i32> %and
701}
702
703define i64 @test35(i32 %X) {
704; CHECK-LABEL: @test35(
705; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 0, [[X:%.*]]
706; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 240
707; CHECK-NEXT:    [[RES:%.*]] = zext nneg i32 [[TMP2]] to i64
708; CHECK-NEXT:    ret i64 [[RES]]
709;
710  %zext = zext i32 %X to i64
711  %zsub = sub i64 0, %zext
712  %res = and i64 %zsub, 240
713  ret i64 %res
714}
715
716define <2 x i64> @test35_uniform(<2 x i32> %X) {
717; CHECK-LABEL: @test35_uniform(
718; CHECK-NEXT:    [[TMP1:%.*]] = sub <2 x i32> zeroinitializer, [[X:%.*]]
719; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[TMP1]], splat (i32 240)
720; CHECK-NEXT:    [[RES:%.*]] = zext nneg <2 x i32> [[TMP2]] to <2 x i64>
721; CHECK-NEXT:    ret <2 x i64> [[RES]]
722;
723  %zext = zext <2 x i32> %X to <2 x i64>
724  %zsub = sub <2 x i64> zeroinitializer, %zext
725  %res = and <2 x i64> %zsub, <i64 240, i64 240>
726  ret <2 x i64> %res
727}
728
729define i64 @test36(i32 %X) {
730; CHECK-LABEL: @test36(
731; CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[X:%.*]], 7
732; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 240
733; CHECK-NEXT:    [[RES:%.*]] = zext nneg i32 [[TMP2]] to i64
734; CHECK-NEXT:    ret i64 [[RES]]
735;
736  %zext = zext i32 %X to i64
737  %zsub = add i64 %zext, 7
738  %res = and i64 %zsub, 240
739  ret i64 %res
740}
741
742define <2 x i64> @test36_uniform(<2 x i32> %X) {
743; CHECK-LABEL: @test36_uniform(
744; CHECK-NEXT:    [[TMP1:%.*]] = add <2 x i32> [[X:%.*]], splat (i32 7)
745; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[TMP1]], splat (i32 240)
746; CHECK-NEXT:    [[RES:%.*]] = zext nneg <2 x i32> [[TMP2]] to <2 x i64>
747; CHECK-NEXT:    ret <2 x i64> [[RES]]
748;
749  %zext = zext <2 x i32> %X to <2 x i64>
750  %zsub = add <2 x i64> %zext, <i64 7, i64 7>
751  %res = and <2 x i64> %zsub, <i64 240, i64 240>
752  ret <2 x i64> %res
753}
754
755define <2 x i64> @test36_poison(<2 x i32> %X) {
756; CHECK-LABEL: @test36_poison(
757; CHECK-NEXT:    [[ZEXT:%.*]] = zext <2 x i32> [[X:%.*]] to <2 x i64>
758; CHECK-NEXT:    [[ZSUB:%.*]] = add nuw nsw <2 x i64> [[ZEXT]], <i64 7, i64 poison>
759; CHECK-NEXT:    [[RES:%.*]] = and <2 x i64> [[ZSUB]], <i64 240, i64 poison>
760; CHECK-NEXT:    ret <2 x i64> [[RES]]
761;
762  %zext = zext <2 x i32> %X to <2 x i64>
763  %zsub = add <2 x i64> %zext, <i64 7, i64 poison>
764  %res = and <2 x i64> %zsub, <i64 240, i64 poison>
765  ret <2 x i64> %res
766}
767
768define i64 @test37(i32 %X) {
769; CHECK-LABEL: @test37(
770; CHECK-NEXT:    [[TMP1:%.*]] = mul i32 [[X:%.*]], 7
771; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 240
772; CHECK-NEXT:    [[RES:%.*]] = zext nneg i32 [[TMP2]] to i64
773; CHECK-NEXT:    ret i64 [[RES]]
774;
775  %zext = zext i32 %X to i64
776  %zsub = mul i64 %zext, 7
777  %res = and i64 %zsub, 240
778  ret i64 %res
779}
780
781define <2 x i64> @test37_uniform(<2 x i32> %X) {
782; CHECK-LABEL: @test37_uniform(
783; CHECK-NEXT:    [[TMP1:%.*]] = mul <2 x i32> [[X:%.*]], splat (i32 7)
784; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[TMP1]], splat (i32 240)
785; CHECK-NEXT:    [[RES:%.*]] = zext nneg <2 x i32> [[TMP2]] to <2 x i64>
786; CHECK-NEXT:    ret <2 x i64> [[RES]]
787;
788  %zext = zext <2 x i32> %X to <2 x i64>
789  %zsub = mul <2 x i64> %zext, <i64 7, i64 7>
790  %res = and <2 x i64> %zsub, <i64 240, i64 240>
791  ret <2 x i64> %res
792}
793
794define <2 x i64> @test37_nonuniform(<2 x i32> %X) {
795; CHECK-LABEL: @test37_nonuniform(
796; CHECK-NEXT:    [[ZEXT:%.*]] = zext <2 x i32> [[X:%.*]] to <2 x i64>
797; CHECK-NEXT:    [[ZSUB:%.*]] = mul nuw nsw <2 x i64> [[ZEXT]], <i64 7, i64 9>
798; CHECK-NEXT:    [[RES:%.*]] = and <2 x i64> [[ZSUB]], <i64 240, i64 110>
799; CHECK-NEXT:    ret <2 x i64> [[RES]]
800;
801  %zext = zext <2 x i32> %X to <2 x i64>
802  %zsub = mul <2 x i64> %zext, <i64 7, i64 9>
803  %res = and <2 x i64> %zsub, <i64 240, i64 110>
804  ret <2 x i64> %res
805}
806
807define i64 @test38(i32 %X) {
808; CHECK-LABEL: @test38(
809; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], 240
810; CHECK-NEXT:    [[RES:%.*]] = zext nneg i32 [[TMP1]] to i64
811; CHECK-NEXT:    ret i64 [[RES]]
812;
813  %zext = zext i32 %X to i64
814  %zsub = xor i64 %zext, 7
815  %res = and i64 %zsub, 240
816  ret i64 %res
817}
818
819define i64 @test39(i32 %X) {
820; CHECK-LABEL: @test39(
821; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], 240
822; CHECK-NEXT:    [[RES:%.*]] = zext nneg i32 [[TMP1]] to i64
823; CHECK-NEXT:    ret i64 [[RES]]
824;
825  %zext = zext i32 %X to i64
826  %zsub = or i64 %zext, 7
827  %res = and i64 %zsub, 240
828  ret i64 %res
829}
830
831define i32 @lowmask_add_zext(i8 %x, i32 %y) {
832; CHECK-LABEL: @lowmask_add_zext(
833; CHECK-NEXT:    [[Y_TR:%.*]] = trunc i32 [[Y:%.*]] to i8
834; CHECK-NEXT:    [[BO_NARROW:%.*]] = add i8 [[X:%.*]], [[Y_TR]]
835; CHECK-NEXT:    [[R:%.*]] = zext i8 [[BO_NARROW]] to i32
836; CHECK-NEXT:    ret i32 [[R]]
837;
838  %zx = zext i8 %x to i32
839  %bo = add i32 %zx, %y
840  %r = and i32 %bo, 255
841  ret i32 %r
842}
843
844define i32 @lowmask_add_zext_commute(i16 %x, i32 %p) {
845; CHECK-LABEL: @lowmask_add_zext_commute(
846; CHECK-NEXT:    [[Y:%.*]] = mul i32 [[P:%.*]], [[P]]
847; CHECK-NEXT:    [[Y_TR:%.*]] = trunc i32 [[Y]] to i16
848; CHECK-NEXT:    [[BO_NARROW:%.*]] = add i16 [[X:%.*]], [[Y_TR]]
849; CHECK-NEXT:    [[R:%.*]] = zext i16 [[BO_NARROW]] to i32
850; CHECK-NEXT:    ret i32 [[R]]
851;
852  %y = mul i32 %p, %p ; thwart complexity-based canonicalization
853  %zx = zext i16 %x to i32
854  %bo = add i32 %y, %zx
855  %r = and i32 %bo, 65535
856  ret i32 %r
857}
858
859; negative test - the mask constant must match the zext source type
860
861define i32 @lowmask_add_zext_wrong_mask(i8 %x, i32 %y) {
862; CHECK-LABEL: @lowmask_add_zext_wrong_mask(
863; CHECK-NEXT:    [[ZX:%.*]] = zext i8 [[X:%.*]] to i32
864; CHECK-NEXT:    [[BO:%.*]] = add i32 [[Y:%.*]], [[ZX]]
865; CHECK-NEXT:    [[R:%.*]] = and i32 [[BO]], 511
866; CHECK-NEXT:    ret i32 [[R]]
867;
868  %zx = zext i8 %x to i32
869  %bo = add i32 %zx, %y
870  %r = and i32 %bo, 511
871  ret i32 %r
872}
873
874; negative test - extra use
875
876define i32 @lowmask_add_zext_use1(i8 %x, i32 %y) {
877; CHECK-LABEL: @lowmask_add_zext_use1(
878; CHECK-NEXT:    [[ZX:%.*]] = zext i8 [[X:%.*]] to i32
879; CHECK-NEXT:    call void @use32(i32 [[ZX]])
880; CHECK-NEXT:    [[BO:%.*]] = add i32 [[Y:%.*]], [[ZX]]
881; CHECK-NEXT:    [[R:%.*]] = and i32 [[BO]], 255
882; CHECK-NEXT:    ret i32 [[R]]
883;
884  %zx = zext i8 %x to i32
885  call void @use32(i32 %zx)
886  %bo = add i32 %zx, %y
887  %r = and i32 %bo, 255
888  ret i32 %r
889}
890
891; negative test - extra use
892
893define i32 @lowmask_add_zext_use2(i8 %x, i32 %y) {
894; CHECK-LABEL: @lowmask_add_zext_use2(
895; CHECK-NEXT:    [[ZX:%.*]] = zext i8 [[X:%.*]] to i32
896; CHECK-NEXT:    [[BO:%.*]] = add i32 [[Y:%.*]], [[ZX]]
897; CHECK-NEXT:    call void @use32(i32 [[BO]])
898; CHECK-NEXT:    [[R:%.*]] = and i32 [[BO]], 255
899; CHECK-NEXT:    ret i32 [[R]]
900;
901  %zx = zext i8 %x to i32
902  %bo = add i32 %zx, %y
903  call void @use32(i32 %bo)
904  %r = and i32 %bo, 255
905  ret i32 %r
906}
907
908; vector splats work too
909
910define <2 x i32> @lowmask_sub_zext(<2 x i4> %x, <2 x i32> %y) {
911; CHECK-LABEL: @lowmask_sub_zext(
912; CHECK-NEXT:    [[Y_TR:%.*]] = trunc <2 x i32> [[Y:%.*]] to <2 x i4>
913; CHECK-NEXT:    [[BO_NARROW:%.*]] = sub <2 x i4> [[X:%.*]], [[Y_TR]]
914; CHECK-NEXT:    [[R:%.*]] = zext <2 x i4> [[BO_NARROW]] to <2 x i32>
915; CHECK-NEXT:    ret <2 x i32> [[R]]
916;
917  %zx = zext <2 x i4> %x to <2 x i32>
918  %bo = sub <2 x i32> %zx, %y
919  %r = and <2 x i32> %bo, <i32 15, i32 15>
920  ret <2 x i32> %r
921}
922
923; weird types are allowed
924
925define i17 @lowmask_sub_zext_commute(i5 %x, i17 %y) {
926; CHECK-LABEL: @lowmask_sub_zext_commute(
927; CHECK-NEXT:    [[Y_TR:%.*]] = trunc i17 [[Y:%.*]] to i5
928; CHECK-NEXT:    [[BO_NARROW:%.*]] = sub i5 [[Y_TR]], [[X:%.*]]
929; CHECK-NEXT:    [[R:%.*]] = zext i5 [[BO_NARROW]] to i17
930; CHECK-NEXT:    ret i17 [[R]]
931;
932  %zx = zext i5 %x to i17
933  %bo = sub i17 %y, %zx
934  %r = and i17 %bo, 31
935  ret i17 %r
936}
937
938define i32 @lowmask_mul_zext(i8 %x, i32 %y) {
939; CHECK-LABEL: @lowmask_mul_zext(
940; CHECK-NEXT:    [[Y_TR:%.*]] = trunc i32 [[Y:%.*]] to i8
941; CHECK-NEXT:    [[BO_NARROW:%.*]] = mul i8 [[X:%.*]], [[Y_TR]]
942; CHECK-NEXT:    [[R:%.*]] = zext i8 [[BO_NARROW]] to i32
943; CHECK-NEXT:    ret i32 [[R]]
944;
945  %zx = zext i8 %x to i32
946  %bo = mul i32 %zx, %y
947  %r = and i32 %bo, 255
948  ret i32 %r
949}
950
951define i32 @lowmask_xor_zext_commute(i8 %x, i32 %p) {
952; CHECK-LABEL: @lowmask_xor_zext_commute(
953; CHECK-NEXT:    [[Y:%.*]] = mul i32 [[P:%.*]], [[P]]
954; CHECK-NEXT:    [[Y_TR:%.*]] = trunc i32 [[Y]] to i8
955; CHECK-NEXT:    [[BO_NARROW:%.*]] = xor i8 [[X:%.*]], [[Y_TR]]
956; CHECK-NEXT:    [[R:%.*]] = zext i8 [[BO_NARROW]] to i32
957; CHECK-NEXT:    ret i32 [[R]]
958;
959  %y = mul i32 %p, %p ; thwart complexity-based canonicalization
960  %zx = zext i8 %x to i32
961  %bo = xor i32 %y, %zx
962  %r = and i32 %bo, 255
963  ret i32 %r
964}
965
966define i24 @lowmask_or_zext_commute(i16 %x, i24 %y) {
967; CHECK-LABEL: @lowmask_or_zext_commute(
968; CHECK-NEXT:    [[Y_TR:%.*]] = trunc i24 [[Y:%.*]] to i16
969; CHECK-NEXT:    [[BO_NARROW:%.*]] = or i16 [[X:%.*]], [[Y_TR]]
970; CHECK-NEXT:    [[R:%.*]] = zext i16 [[BO_NARROW]] to i24
971; CHECK-NEXT:    ret i24 [[R]]
972;
973  %zx = zext i16 %x to i24
974  %bo = or i24 %y, %zx
975  %r = and i24 %bo, 65535
976  ret i24 %r
977}
978
979define i32 @test40(i1 %C) {
980; CHECK-LABEL: @test40(
981; CHECK-NEXT:    [[A:%.*]] = select i1 [[C:%.*]], i32 104, i32 10
982; CHECK-NEXT:    ret i32 [[A]]
983;
984  %A = select i1 %C, i32 1000, i32 10
985  %V = and i32 %A, 123
986  ret i32 %V
987}
988
989define <2 x i32> @test40vec(i1 %C) {
990; CHECK-LABEL: @test40vec(
991; CHECK-NEXT:    [[A:%.*]] = select i1 [[C:%.*]], <2 x i32> splat (i32 104), <2 x i32> splat (i32 10)
992; CHECK-NEXT:    ret <2 x i32> [[A]]
993;
994  %A = select i1 %C, <2 x i32> <i32 1000, i32 1000>, <2 x i32> <i32 10, i32 10>
995  %V = and <2 x i32> %A, <i32 123, i32 123>
996  ret <2 x i32> %V
997}
998
999define <2 x i32> @test40vec2(i1 %C) {
1000; CHECK-LABEL: @test40vec2(
1001; CHECK-NEXT:    [[V:%.*]] = select i1 [[C:%.*]], <2 x i32> <i32 104, i32 324>, <2 x i32> <i32 10, i32 12>
1002; CHECK-NEXT:    ret <2 x i32> [[V]]
1003;
1004  %A = select i1 %C, <2 x i32> <i32 1000, i32 2500>, <2 x i32> <i32 10, i32 30>
1005  %V = and <2 x i32> %A, <i32 123, i32 333>
1006  ret <2 x i32> %V
1007}
1008
1009define i32 @test41(i1 %which) {
1010; CHECK-LABEL: @test41(
1011; CHECK-NEXT:  entry:
1012; CHECK-NEXT:    br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
1013; CHECK:       delay:
1014; CHECK-NEXT:    br label [[FINAL]]
1015; CHECK:       final:
1016; CHECK-NEXT:    [[A:%.*]] = phi i32 [ 104, [[ENTRY:%.*]] ], [ 10, [[DELAY]] ]
1017; CHECK-NEXT:    ret i32 [[A]]
1018;
1019entry:
1020  br i1 %which, label %final, label %delay
1021
1022delay:
1023  br label %final
1024
1025final:
1026  %A = phi i32 [ 1000, %entry ], [ 10, %delay ]
1027  %value = and i32 %A, 123
1028  ret i32 %value
1029}
1030
1031define <2 x i32> @test41vec(i1 %which) {
1032; CHECK-LABEL: @test41vec(
1033; CHECK-NEXT:  entry:
1034; CHECK-NEXT:    br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
1035; CHECK:       delay:
1036; CHECK-NEXT:    br label [[FINAL]]
1037; CHECK:       final:
1038; CHECK-NEXT:    [[A:%.*]] = phi <2 x i32> [ splat (i32 104), [[ENTRY:%.*]] ], [ splat (i32 10), [[DELAY]] ]
1039; CHECK-NEXT:    ret <2 x i32> [[A]]
1040;
1041entry:
1042  br i1 %which, label %final, label %delay
1043
1044delay:
1045  br label %final
1046
1047final:
1048  %A = phi <2 x i32> [ <i32 1000, i32 1000>, %entry ], [ <i32 10, i32 10>, %delay ]
1049  %value = and <2 x i32> %A, <i32 123, i32 123>
1050  ret <2 x i32> %value
1051}
1052
1053define <2 x i32> @test41vec2(i1 %which) {
1054; CHECK-LABEL: @test41vec2(
1055; CHECK-NEXT:  entry:
1056; CHECK-NEXT:    br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
1057; CHECK:       delay:
1058; CHECK-NEXT:    br label [[FINAL]]
1059; CHECK:       final:
1060; CHECK-NEXT:    [[A:%.*]] = phi <2 x i32> [ <i32 104, i32 324>, [[ENTRY:%.*]] ], [ <i32 10, i32 12>, [[DELAY]] ]
1061; CHECK-NEXT:    ret <2 x i32> [[A]]
1062;
1063entry:
1064  br i1 %which, label %final, label %delay
1065
1066delay:
1067  br label %final
1068
1069final:
1070  %A = phi <2 x i32> [ <i32 1000, i32 2500>, %entry ], [ <i32 10, i32 30>, %delay ]
1071  %value = and <2 x i32> %A, <i32 123, i32 333>
1072  ret <2 x i32> %value
1073}
1074
1075define i32 @test42(i32 %a, i32 %c, i32 %d) {
1076; CHECK-LABEL: @test42(
1077; CHECK-NEXT:    [[FORCE:%.*]] = mul i32 [[C:%.*]], [[D:%.*]]
1078; CHECK-NEXT:    [[AND:%.*]] = and i32 [[FORCE]], [[A:%.*]]
1079; CHECK-NEXT:    ret i32 [[AND]]
1080;
1081  %force = mul i32 %c, %d ; forces the complexity sorting
1082  %or = or i32 %a, %force
1083  %nota = xor i32 %a, -1
1084  %xor = xor i32 %nota, %force
1085  %and = and i32 %xor, %or
1086  ret i32 %and
1087}
1088
1089define i32 @test43(i32 %a, i32 %c, i32 %d) {
1090; CHECK-LABEL: @test43(
1091; CHECK-NEXT:    [[FORCE:%.*]] = mul i32 [[C:%.*]], [[D:%.*]]
1092; CHECK-NEXT:    [[AND:%.*]] = and i32 [[FORCE]], [[A:%.*]]
1093; CHECK-NEXT:    ret i32 [[AND]]
1094;
1095  %force = mul i32 %c, %d ; forces the complexity sorting
1096  %or = or i32 %a, %force
1097  %nota = xor i32 %a, -1
1098  %xor = xor i32 %nota, %force
1099  %and = and i32 %or, %xor
1100  ret i32 %and
1101}
1102
1103; (~y | x) & y -> x & y
1104define i32 @test44(i32 %x, i32 %y) nounwind {
1105; CHECK-LABEL: @test44(
1106; CHECK-NEXT:    [[A:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
1107; CHECK-NEXT:    ret i32 [[A]]
1108;
1109  %n = xor i32 %y, -1
1110  %o = or i32 %n, %x
1111  %a = and i32 %o, %y
1112  ret i32 %a
1113}
1114
1115; (x | ~y) & y -> x & y
1116define i32 @test45(i32 %x, i32 %y) nounwind {
1117; CHECK-LABEL: @test45(
1118; CHECK-NEXT:    [[A:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
1119; CHECK-NEXT:    ret i32 [[A]]
1120;
1121  %n = xor i32 %y, -1
1122  %o = or i32 %x, %n
1123  %a = and i32 %o, %y
1124  ret i32 %a
1125}
1126
1127; y & (~y | x) -> y | x
1128define i32 @test46(i32 %x, i32 %y) nounwind {
1129; CHECK-LABEL: @test46(
1130; CHECK-NEXT:    [[A:%.*]] = and i32 [[Y:%.*]], [[X:%.*]]
1131; CHECK-NEXT:    ret i32 [[A]]
1132;
1133  %n = xor i32 %y, -1
1134  %o = or i32 %n, %x
1135  %a = and i32 %y, %o
1136  ret i32 %a
1137}
1138
1139; y & (x | ~y) -> y | x
1140define i32 @test47(i32 %x, i32 %y) nounwind {
1141; CHECK-LABEL: @test47(
1142; CHECK-NEXT:    [[A:%.*]] = and i32 [[Y:%.*]], [[X:%.*]]
1143; CHECK-NEXT:    ret i32 [[A]]
1144;
1145  %n = xor i32 %y, -1
1146  %o = or i32 %x, %n
1147  %a = and i32 %y, %o
1148  ret i32 %a
1149}
1150
1151; In the next 4 tests, vary the types and predicates for extra coverage.
1152; (X & (Y | ~X)) -> (X & Y), where 'not' is an inverted cmp
1153
1154define i1 @and_orn_cmp_1(i32 %a, i32 %b, i32 %c) {
1155; CHECK-LABEL: @and_orn_cmp_1(
1156; CHECK-NEXT:    [[X:%.*]] = icmp sgt i32 [[A:%.*]], [[B:%.*]]
1157; CHECK-NEXT:    [[Y:%.*]] = icmp ugt i32 [[C:%.*]], 42
1158; CHECK-NEXT:    [[AND:%.*]] = and i1 [[X]], [[Y]]
1159; CHECK-NEXT:    ret i1 [[AND]]
1160;
1161  %x = icmp sgt i32 %a, %b
1162  %x_inv = icmp sle i32 %a, %b
1163  %y = icmp ugt i32 %c, 42      ; thwart complexity-based ordering
1164  %or = or i1 %y, %x_inv
1165  %and = and i1 %x, %or
1166  ret i1 %and
1167}
1168
1169define i1 @and_orn_cmp_1_logical(i32 %a, i32 %b, i32 %c) {
1170; CHECK-LABEL: @and_orn_cmp_1_logical(
1171; CHECK-NEXT:    [[X:%.*]] = icmp sgt i32 [[A:%.*]], [[B:%.*]]
1172; CHECK-NEXT:    [[Y:%.*]] = icmp ugt i32 [[C:%.*]], 42
1173; CHECK-NEXT:    [[AND:%.*]] = select i1 [[X]], i1 [[Y]], i1 false
1174; CHECK-NEXT:    ret i1 [[AND]]
1175;
1176  %x = icmp sgt i32 %a, %b
1177  %x_inv = icmp sle i32 %a, %b
1178  %y = icmp ugt i32 %c, 42      ; thwart complexity-based ordering
1179  %or = select i1 %y, i1 true, i1 %x_inv
1180  %and = select i1 %x, i1 %or, i1 false
1181  ret i1 %and
1182}
1183
1184; Commute the 'and':
1185; ((Y | ~X) & X) -> (X & Y), where 'not' is an inverted cmp
1186
1187define <2 x i1> @and_orn_cmp_2(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) {
1188; CHECK-LABEL: @and_orn_cmp_2(
1189; CHECK-NEXT:    [[X:%.*]] = icmp sge <2 x i32> [[A:%.*]], [[B:%.*]]
1190; CHECK-NEXT:    [[Y:%.*]] = icmp ugt <2 x i32> [[C:%.*]], <i32 42, i32 47>
1191; CHECK-NEXT:    [[AND:%.*]] = and <2 x i1> [[Y]], [[X]]
1192; CHECK-NEXT:    ret <2 x i1> [[AND]]
1193;
1194  %x = icmp sge <2 x i32> %a, %b
1195  %x_inv = icmp slt <2 x i32> %a, %b
1196  %y = icmp ugt <2 x i32> %c, <i32 42, i32 47>      ; thwart complexity-based ordering
1197  %or = or <2 x i1> %y, %x_inv
1198  %and = and <2 x i1> %or, %x
1199  ret <2 x i1> %and
1200}
1201
1202; Commute the 'or':
1203; (X & (~X | Y)) -> (X & Y), where 'not' is an inverted cmp
1204
1205define i1 @and_orn_cmp_3(i72 %a, i72 %b, i72 %c) {
1206; CHECK-LABEL: @and_orn_cmp_3(
1207; CHECK-NEXT:    [[X:%.*]] = icmp ugt i72 [[A:%.*]], [[B:%.*]]
1208; CHECK-NEXT:    [[Y:%.*]] = icmp ugt i72 [[C:%.*]], 42
1209; CHECK-NEXT:    [[AND:%.*]] = and i1 [[X]], [[Y]]
1210; CHECK-NEXT:    ret i1 [[AND]]
1211;
1212  %x = icmp ugt i72 %a, %b
1213  %x_inv = icmp ule i72 %a, %b
1214  %y = icmp ugt i72 %c, 42      ; thwart complexity-based ordering
1215  %or = or i1 %x_inv, %y
1216  %and = and i1 %x, %or
1217  ret i1 %and
1218}
1219
1220define i1 @and_orn_cmp_3_logical(i72 %a, i72 %b, i72 %c) {
1221; CHECK-LABEL: @and_orn_cmp_3_logical(
1222; CHECK-NEXT:    [[X:%.*]] = icmp ugt i72 [[A:%.*]], [[B:%.*]]
1223; CHECK-NEXT:    [[Y:%.*]] = icmp ugt i72 [[C:%.*]], 42
1224; CHECK-NEXT:    [[AND:%.*]] = select i1 [[X]], i1 [[Y]], i1 false
1225; CHECK-NEXT:    ret i1 [[AND]]
1226;
1227  %x = icmp ugt i72 %a, %b
1228  %x_inv = icmp ule i72 %a, %b
1229  %y = icmp ugt i72 %c, 42      ; thwart complexity-based ordering
1230  %or = select i1 %x_inv, i1 true, i1 %y
1231  %and = select i1 %x, i1 %or, i1 false
1232  ret i1 %and
1233}
1234
1235; Commute the 'and':
1236; ((~X | Y) & X) -> (X & Y), where 'not' is an inverted cmp
1237
1238define <3 x i1> @or_andn_cmp_4(<3 x i32> %a, <3 x i32> %b, <3 x i32> %c) {
1239; CHECK-LABEL: @or_andn_cmp_4(
1240; CHECK-NEXT:    [[X:%.*]] = icmp eq <3 x i32> [[A:%.*]], [[B:%.*]]
1241; CHECK-NEXT:    [[Y:%.*]] = icmp ugt <3 x i32> [[C:%.*]], <i32 42, i32 43, i32 -1>
1242; CHECK-NEXT:    [[AND:%.*]] = and <3 x i1> [[Y]], [[X]]
1243; CHECK-NEXT:    ret <3 x i1> [[AND]]
1244;
1245  %x = icmp eq <3 x i32> %a, %b
1246  %x_inv = icmp ne <3 x i32> %a, %b
1247  %y = icmp ugt <3 x i32> %c, <i32 42, i32 43, i32 -1>      ; thwart complexity-based ordering
1248  %or = or <3 x i1> %x_inv, %y
1249  %and = and <3 x i1> %or, %x
1250  ret <3 x i1> %and
1251}
1252
1253; In the next 4 tests, vary the types and predicates for extra coverage.
1254; (~X & (Y | X)) -> (~X & Y), where 'not' is an inverted cmp
1255
1256define i1 @andn_or_cmp_1(i37 %a, i37 %b, i37 %c) {
1257; CHECK-LABEL: @andn_or_cmp_1(
1258; CHECK-NEXT:    [[X_INV:%.*]] = icmp sle i37 [[A:%.*]], [[B:%.*]]
1259; CHECK-NEXT:    [[Y:%.*]] = icmp ugt i37 [[C:%.*]], 42
1260; CHECK-NEXT:    [[AND:%.*]] = and i1 [[X_INV]], [[Y]]
1261; CHECK-NEXT:    ret i1 [[AND]]
1262;
1263  %x = icmp sgt i37 %a, %b
1264  %x_inv = icmp sle i37 %a, %b
1265  %y = icmp ugt i37 %c, 42      ; thwart complexity-based ordering
1266  %or = or i1 %y, %x
1267  %and = and i1 %x_inv, %or
1268  ret i1 %and
1269}
1270
1271define i1 @andn_or_cmp_1_logical(i37 %a, i37 %b, i37 %c) {
1272; CHECK-LABEL: @andn_or_cmp_1_logical(
1273; CHECK-NEXT:    [[X_INV:%.*]] = icmp sle i37 [[A:%.*]], [[B:%.*]]
1274; CHECK-NEXT:    [[Y:%.*]] = icmp ugt i37 [[C:%.*]], 42
1275; CHECK-NEXT:    [[AND:%.*]] = select i1 [[X_INV]], i1 [[Y]], i1 false
1276; CHECK-NEXT:    ret i1 [[AND]]
1277;
1278  %x = icmp sgt i37 %a, %b
1279  %x_inv = icmp sle i37 %a, %b
1280  %y = icmp ugt i37 %c, 42      ; thwart complexity-based ordering
1281  %or = select i1 %y, i1 true, i1 %x
1282  %and = select i1 %x_inv, i1 %or, i1 false
1283  ret i1 %and
1284}
1285
1286; Commute the 'and':
1287; ((Y | X) & ~X) -> (~X & Y), where 'not' is an inverted cmp
1288
1289define i1 @andn_or_cmp_2(i16 %a, i16 %b, i16 %c) {
1290; CHECK-LABEL: @andn_or_cmp_2(
1291; CHECK-NEXT:    [[X_INV:%.*]] = icmp slt i16 [[A:%.*]], [[B:%.*]]
1292; CHECK-NEXT:    [[Y:%.*]] = icmp ugt i16 [[C:%.*]], 42
1293; CHECK-NEXT:    [[AND:%.*]] = and i1 [[Y]], [[X_INV]]
1294; CHECK-NEXT:    ret i1 [[AND]]
1295;
1296  %x = icmp sge i16 %a, %b
1297  %x_inv = icmp slt i16 %a, %b
1298  %y = icmp ugt i16 %c, 42      ; thwart complexity-based ordering
1299  %or = or i1 %y, %x
1300  %and = and i1 %or, %x_inv
1301  ret i1 %and
1302}
1303
1304define i1 @andn_or_cmp_2_logical(i16 %a, i16 %b, i16 %c) {
1305; CHECK-LABEL: @andn_or_cmp_2_logical(
1306; CHECK-NEXT:    [[X_INV:%.*]] = icmp slt i16 [[A:%.*]], [[B:%.*]]
1307; CHECK-NEXT:    [[Y:%.*]] = icmp ugt i16 [[C:%.*]], 42
1308; CHECK-NEXT:    [[AND:%.*]] = select i1 [[Y]], i1 [[X_INV]], i1 false
1309; CHECK-NEXT:    ret i1 [[AND]]
1310;
1311  %x = icmp sge i16 %a, %b
1312  %x_inv = icmp slt i16 %a, %b
1313  %y = icmp ugt i16 %c, 42      ; thwart complexity-based ordering
1314  %or = select i1 %y, i1 true, i1 %x
1315  %and = select i1 %or, i1 %x_inv, i1 false
1316  ret i1 %and
1317}
1318
1319; Commute the 'or':
1320; (~X & (X | Y)) -> (~X & Y), where 'not' is an inverted cmp
1321
1322define <4 x i1> @andn_or_cmp_3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
1323; CHECK-LABEL: @andn_or_cmp_3(
1324; CHECK-NEXT:    [[X_INV:%.*]] = icmp ule <4 x i32> [[A:%.*]], [[B:%.*]]
1325; CHECK-NEXT:    [[Y:%.*]] = icmp ugt <4 x i32> [[C:%.*]], <i32 42, i32 0, i32 1, i32 -1>
1326; CHECK-NEXT:    [[AND:%.*]] = and <4 x i1> [[X_INV]], [[Y]]
1327; CHECK-NEXT:    ret <4 x i1> [[AND]]
1328;
1329  %x = icmp ugt <4 x i32> %a, %b
1330  %x_inv = icmp ule <4 x i32> %a, %b
1331  %y = icmp ugt <4 x i32> %c, <i32 42, i32 0, i32 1, i32 -1>      ; thwart complexity-based ordering
1332  %or = or <4 x i1> %x, %y
1333  %and = and <4 x i1> %x_inv, %or
1334  ret <4 x i1> %and
1335}
1336
1337; Commute the 'and':
1338; ((X | Y) & ~X) -> (~X & Y), where 'not' is an inverted cmp
1339
1340define i1 @andn_or_cmp_4(i32 %a, i32 %b, i32 %c) {
1341; CHECK-LABEL: @andn_or_cmp_4(
1342; CHECK-NEXT:    [[X_INV:%.*]] = icmp ne i32 [[A:%.*]], [[B:%.*]]
1343; CHECK-NEXT:    [[Y:%.*]] = icmp ugt i32 [[C:%.*]], 42
1344; CHECK-NEXT:    [[AND:%.*]] = and i1 [[Y]], [[X_INV]]
1345; CHECK-NEXT:    ret i1 [[AND]]
1346;
1347  %x = icmp eq i32 %a, %b
1348  %x_inv = icmp ne i32 %a, %b
1349  %y = icmp ugt i32 %c, 42      ; thwart complexity-based ordering
1350  %or = or i1 %x, %y
1351  %and = and i1 %or, %x_inv
1352  ret i1 %and
1353}
1354
1355define i1 @andn_or_cmp_4_logical(i32 %a, i32 %b, i32 %c) {
1356; CHECK-LABEL: @andn_or_cmp_4_logical(
1357; CHECK-NEXT:    [[X_INV:%.*]] = icmp ne i32 [[A:%.*]], [[B:%.*]]
1358; CHECK-NEXT:    [[Y:%.*]] = icmp ugt i32 [[C:%.*]], 42
1359; CHECK-NEXT:    [[AND:%.*]] = select i1 [[X_INV]], i1 [[Y]], i1 false
1360; CHECK-NEXT:    ret i1 [[AND]]
1361;
1362  %x = icmp eq i32 %a, %b
1363  %x_inv = icmp ne i32 %a, %b
1364  %y = icmp ugt i32 %c, 42      ; thwart complexity-based ordering
1365  %or = select i1 %x, i1 true, i1 %y
1366  %and = select i1 %or, i1 %x_inv, i1 false
1367  ret i1 %and
1368}
1369
1370define i32 @lowbitmask_casted_shift(i8 %x) {
1371; CHECK-LABEL: @lowbitmask_casted_shift(
1372; CHECK-NEXT:    [[TMP1:%.*]] = sext i8 [[X:%.*]] to i32
1373; CHECK-NEXT:    [[R:%.*]] = lshr i32 [[TMP1]], 1
1374; CHECK-NEXT:    ret i32 [[R]]
1375;
1376  %a = ashr i8 %x, 1
1377  %s = sext i8 %a to i32
1378  %r = and i32 %s, 2147483647
1379  ret i32 %r
1380}
1381
1382; Negative test - mask constant is too big.
1383
1384define i32 @lowbitmask_casted_shift_wrong_mask1(i8 %x) {
1385; CHECK-LABEL: @lowbitmask_casted_shift_wrong_mask1(
1386; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 2
1387; CHECK-NEXT:    [[S:%.*]] = sext i8 [[A]] to i32
1388; CHECK-NEXT:    [[R:%.*]] = and i32 [[S]], 2147483647
1389; CHECK-NEXT:    ret i32 [[R]]
1390;
1391  %a = ashr i8 %x, 2
1392  %s = sext i8 %a to i32
1393  %r = and i32 %s, 2147483647 ; 0x7fffffff
1394  ret i32 %r
1395}
1396
1397; Negative test - mask constant is too small.
1398
1399define i32 @lowbitmask_casted_shift_wrong_mask2(i8 %x) {
1400; CHECK-LABEL: @lowbitmask_casted_shift_wrong_mask2(
1401; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 2
1402; CHECK-NEXT:    [[S:%.*]] = sext i8 [[A]] to i32
1403; CHECK-NEXT:    [[R:%.*]] = and i32 [[S]], 536870911
1404; CHECK-NEXT:    ret i32 [[R]]
1405;
1406  %a = ashr i8 %x, 2
1407  %s = sext i8 %a to i32
1408  %r = and i32 %s, 536870911  ; 0x1fffffff
1409  ret i32 %r
1410}
1411
1412; Extra use of shift is ok.
1413
1414define i32 @lowbitmask_casted_shift_use1(i8 %x) {
1415; CHECK-LABEL: @lowbitmask_casted_shift_use1(
1416; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 3
1417; CHECK-NEXT:    call void @use8(i8 [[A]])
1418; CHECK-NEXT:    [[TMP1:%.*]] = sext i8 [[X]] to i32
1419; CHECK-NEXT:    [[R:%.*]] = lshr i32 [[TMP1]], 3
1420; CHECK-NEXT:    ret i32 [[R]]
1421;
1422  %a = ashr i8 %x, 3
1423  call void @use8(i8 %a)
1424  %s = sext i8 %a to i32
1425  %r = and i32 %s, 536870911
1426  ret i32 %r
1427}
1428
1429; Negative test - extra use of sext requires more instructions.
1430
1431define i32 @lowbitmask_casted_shift_use2(i8 %x) {
1432; CHECK-LABEL: @lowbitmask_casted_shift_use2(
1433; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 3
1434; CHECK-NEXT:    [[S:%.*]] = sext i8 [[A]] to i32
1435; CHECK-NEXT:    call void @use32(i32 [[S]])
1436; CHECK-NEXT:    [[R:%.*]] = and i32 [[S]], 536870911
1437; CHECK-NEXT:    ret i32 [[R]]
1438;
1439  %a = ashr i8 %x, 3
1440  %s = sext i8 %a to i32
1441  call void @use32(i32 %s)
1442  %r = and i32 %s, 536870911
1443  ret i32 %r
1444}
1445
1446; Vectors/weird types are ok.
1447
1448define <2 x i59> @lowbitmask_casted_shift_vec_splat(<2 x i47> %x) {
1449; CHECK-LABEL: @lowbitmask_casted_shift_vec_splat(
1450; CHECK-NEXT:    [[TMP1:%.*]] = sext <2 x i47> [[X:%.*]] to <2 x i59>
1451; CHECK-NEXT:    [[R:%.*]] = lshr <2 x i59> [[TMP1]], splat (i59 5)
1452; CHECK-NEXT:    ret <2 x i59> [[R]]
1453;
1454  %a = ashr <2 x i47> %x, <i47 5, i47 5>
1455  %s = sext <2 x i47> %a to <2 x i59>
1456  %r = and <2 x i59> %s, <i59 18014398509481983, i59 18014398509481983>  ;  -1 u>> 5 == 0x3f_ffff_ffff_ffff
1457  ret <2 x i59> %r
1458}
1459
1460define i32 @lowmask_sext_in_reg(i32 %x) {
1461; CHECK-LABEL: @lowmask_sext_in_reg(
1462; CHECK-NEXT:    [[L:%.*]] = shl i32 [[X:%.*]], 20
1463; CHECK-NEXT:    [[R:%.*]] = ashr exact i32 [[L]], 20
1464; CHECK-NEXT:    call void @use32(i32 [[R]])
1465; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X]], 4095
1466; CHECK-NEXT:    ret i32 [[AND]]
1467;
1468  %l = shl i32 %x, 20
1469  %r = ashr i32 %l, 20
1470  call void @use32(i32 %r)
1471  %and = and i32 %r, 4095
1472  ret i32 %and
1473}
1474
1475; Mismatched shift amounts, but the mask op can be replaced by a shift.
1476
1477define i32 @lowmask_not_sext_in_reg(i32 %x) {
1478; CHECK-LABEL: @lowmask_not_sext_in_reg(
1479; CHECK-NEXT:    [[L:%.*]] = shl i32 [[X:%.*]], 19
1480; CHECK-NEXT:    [[R:%.*]] = ashr i32 [[L]], 20
1481; CHECK-NEXT:    call void @use32(i32 [[R]])
1482; CHECK-NEXT:    [[AND:%.*]] = lshr i32 [[L]], 20
1483; CHECK-NEXT:    ret i32 [[AND]]
1484;
1485  %l = shl i32 %x, 19
1486  %r = ashr i32 %l, 20
1487  call void @use32(i32 %r)
1488  %and = and i32 %r, 4095
1489  ret i32 %and
1490}
1491
1492; Negative test - too much shift for mask
1493
1494define i32 @not_lowmask_sext_in_reg(i32 %x) {
1495; CHECK-LABEL: @not_lowmask_sext_in_reg(
1496; CHECK-NEXT:    [[L:%.*]] = shl i32 [[X:%.*]], 20
1497; CHECK-NEXT:    [[R:%.*]] = ashr exact i32 [[L]], 20
1498; CHECK-NEXT:    call void @use32(i32 [[R]])
1499; CHECK-NEXT:    [[AND:%.*]] = and i32 [[R]], 4096
1500; CHECK-NEXT:    ret i32 [[AND]]
1501;
1502  %l = shl i32 %x, 20
1503  %r = ashr i32 %l, 20
1504  call void @use32(i32 %r)
1505  %and = and i32 %r, 4096
1506  ret i32 %and
1507}
1508
1509; Negative test - too much shift for mask
1510
1511define i32 @not_lowmask_sext_in_reg2(i32 %x) {
1512; CHECK-LABEL: @not_lowmask_sext_in_reg2(
1513; CHECK-NEXT:    [[L:%.*]] = shl i32 [[X:%.*]], 21
1514; CHECK-NEXT:    [[R:%.*]] = ashr exact i32 [[L]], 21
1515; CHECK-NEXT:    call void @use32(i32 [[R]])
1516; CHECK-NEXT:    [[AND:%.*]] = and i32 [[R]], 4095
1517; CHECK-NEXT:    ret i32 [[AND]]
1518;
1519  %l = shl i32 %x, 21
1520  %r = ashr i32 %l, 21
1521  call void @use32(i32 %r)
1522  %and = and i32 %r, 4095
1523  ret i32 %and
1524}
1525
1526define <2 x i32> @lowmask_sext_in_reg_splat(<2 x i32> %x, ptr %p) {
1527; CHECK-LABEL: @lowmask_sext_in_reg_splat(
1528; CHECK-NEXT:    [[L:%.*]] = shl <2 x i32> [[X:%.*]], splat (i32 20)
1529; CHECK-NEXT:    [[R:%.*]] = ashr exact <2 x i32> [[L]], splat (i32 20)
1530; CHECK-NEXT:    store <2 x i32> [[R]], ptr [[P:%.*]], align 8
1531; CHECK-NEXT:    [[AND:%.*]] = and <2 x i32> [[X]], splat (i32 4095)
1532; CHECK-NEXT:    ret <2 x i32> [[AND]]
1533;
1534  %l = shl <2 x i32> %x, <i32 20, i32 20>
1535  %r = ashr <2 x i32> %l, <i32 20, i32 20>
1536  store <2 x i32> %r, ptr %p
1537  %and = and <2 x i32> %r, <i32 4095, i32 4095>
1538  ret <2 x i32> %and
1539}
1540
1541define i8 @lowmask_add(i8 %x) {
1542; CHECK-LABEL: @lowmask_add(
1543; CHECK-NEXT:    [[A:%.*]] = add i8 [[X:%.*]], -64
1544; CHECK-NEXT:    call void @use8(i8 [[A]])
1545; CHECK-NEXT:    [[R:%.*]] = and i8 [[X]], 32
1546; CHECK-NEXT:    ret i8 [[R]]
1547;
1548  %a = add i8 %x, -64 ; 0xc0
1549  call void @use8(i8 %a)
1550  %r = and i8 %a, 32 ; 0x20
1551  ret i8 %r
1552}
1553
1554define i8 @lowmask_add_2(i8 %x) {
1555; CHECK-LABEL: @lowmask_add_2(
1556; CHECK-NEXT:    [[R:%.*]] = and i8 [[X:%.*]], 63
1557; CHECK-NEXT:    ret i8 [[R]]
1558;
1559  %a = add i8 %x, -64 ; 0xc0
1560  %r = and i8 %a, 63 ; 0x3f
1561  ret i8 %r
1562}
1563
1564define i8 @lowmask_add_2_uses(i8 %x) {
1565; CHECK-LABEL: @lowmask_add_2_uses(
1566; CHECK-NEXT:    [[A:%.*]] = add i8 [[X:%.*]], -64
1567; CHECK-NEXT:    call void @use8(i8 [[A]])
1568; CHECK-NEXT:    [[R:%.*]] = and i8 [[X]], 63
1569; CHECK-NEXT:    ret i8 [[R]]
1570;
1571  %a = add i8 %x, -64 ; 0xc0
1572  call void @use8(i8 %a)
1573  %r = and i8 %a, 63 ; 0x3f
1574  ret i8 %r
1575}
1576
1577define <2 x i8> @lowmask_add_2_splat(<2 x i8> %x, ptr %p) {
1578; CHECK-LABEL: @lowmask_add_2_splat(
1579; CHECK-NEXT:    [[A:%.*]] = add <2 x i8> [[X:%.*]], splat (i8 -64)
1580; CHECK-NEXT:    store <2 x i8> [[A]], ptr [[P:%.*]], align 2
1581; CHECK-NEXT:    [[R:%.*]] = and <2 x i8> [[X]], splat (i8 63)
1582; CHECK-NEXT:    ret <2 x i8> [[R]]
1583;
1584  %a = add <2 x i8> %x, <i8 -64, i8 -64> ; 0xc0
1585  store <2 x i8> %a, ptr %p
1586  %r = and <2 x i8> %a, <i8 63, i8 63> ; 0x3f
1587  ret <2 x i8> %r
1588}
1589
1590; Negative test - mask overlaps low bit of add
1591
1592define i8 @not_lowmask_add(i8 %x) {
1593; CHECK-LABEL: @not_lowmask_add(
1594; CHECK-NEXT:    [[A:%.*]] = add i8 [[X:%.*]], -64
1595; CHECK-NEXT:    call void @use8(i8 [[A]])
1596; CHECK-NEXT:    [[R:%.*]] = and i8 [[A]], 64
1597; CHECK-NEXT:    ret i8 [[R]]
1598;
1599  %a = add i8 %x, -64 ; 0xc0
1600  call void @use8(i8 %a)
1601  %r = and i8 %a, 64 ; 0x40
1602  ret i8 %r
1603}
1604
1605; Negative test - mask overlaps low bit of add
1606
1607define i8 @not_lowmask_add2(i8 %x) {
1608; CHECK-LABEL: @not_lowmask_add2(
1609; CHECK-NEXT:    [[A:%.*]] = add i8 [[X:%.*]], -96
1610; CHECK-NEXT:    call void @use8(i8 [[A]])
1611; CHECK-NEXT:    [[R:%.*]] = and i8 [[A]], 63
1612; CHECK-NEXT:    ret i8 [[R]]
1613;
1614  %a = add i8 %x, -96 ; 0xe0
1615  call void @use8(i8 %a)
1616  %r = and i8 %a, 63 ; 0x3f
1617  ret i8 %r
1618}
1619
1620define <2 x i8> @lowmask_add_splat(<2 x i8> %x, ptr %p) {
1621; CHECK-LABEL: @lowmask_add_splat(
1622; CHECK-NEXT:    [[A:%.*]] = add <2 x i8> [[X:%.*]], splat (i8 -64)
1623; CHECK-NEXT:    store <2 x i8> [[A]], ptr [[P:%.*]], align 2
1624; CHECK-NEXT:    [[R:%.*]] = and <2 x i8> [[X]], splat (i8 32)
1625; CHECK-NEXT:    ret <2 x i8> [[R]]
1626;
1627  %a = add <2 x i8> %x, <i8 -64, i8 -64> ; 0xc0
1628  store <2 x i8> %a, ptr %p
1629  %r = and <2 x i8> %a, <i8 32, i8 32> ; 0x20
1630  ret <2 x i8> %r
1631}
1632
1633define <2 x i8> @lowmask_add_splat_poison(<2 x i8> %x, ptr %p) {
1634; CHECK-LABEL: @lowmask_add_splat_poison(
1635; CHECK-NEXT:    [[A:%.*]] = add <2 x i8> [[X:%.*]], <i8 -64, i8 poison>
1636; CHECK-NEXT:    store <2 x i8> [[A]], ptr [[P:%.*]], align 2
1637; CHECK-NEXT:    [[R:%.*]] = and <2 x i8> [[X]], <i8 poison, i8 32>
1638; CHECK-NEXT:    ret <2 x i8> [[R]]
1639;
1640  %a = add <2 x i8> %x, <i8 -64, i8 poison> ; 0xc0
1641  store <2 x i8> %a, ptr %p
1642  %r = and <2 x i8> %a, <i8 poison, i8 32> ; 0x20
1643  ret <2 x i8> %r
1644}
1645
1646define <2 x i8> @lowmask_add_vec(<2 x i8> %x, ptr %p) {
1647; CHECK-LABEL: @lowmask_add_vec(
1648; CHECK-NEXT:    [[A:%.*]] = add <2 x i8> [[X:%.*]], <i8 -96, i8 -64>
1649; CHECK-NEXT:    store <2 x i8> [[A]], ptr [[P:%.*]], align 2
1650; CHECK-NEXT:    [[R:%.*]] = and <2 x i8> [[A]], <i8 16, i8 32>
1651; CHECK-NEXT:    ret <2 x i8> [[R]]
1652;
1653  %a = add <2 x i8> %x, <i8 -96, i8 -64> ; 0xe0, 0xc0
1654  store <2 x i8> %a, ptr %p
1655  %r = and <2 x i8> %a, <i8 16, i8 32> ; 0x10, 0x20
1656  ret <2 x i8> %r
1657}
1658
1659; Only one bit set
1660define i8 @flip_masked_bit(i8 %A) {
1661; CHECK-LABEL: @flip_masked_bit(
1662; CHECK-NEXT:    [[TMP1:%.*]] = and i8 [[A:%.*]], 16
1663; CHECK-NEXT:    [[C:%.*]] = xor i8 [[TMP1]], 16
1664; CHECK-NEXT:    ret i8 [[C]]
1665;
1666  %B = add i8 %A, 16
1667  %C = and i8 %B, 16
1668  ret i8 %C
1669}
1670
1671define <2 x i8> @flip_masked_bit_uniform(<2 x i8> %A) {
1672; CHECK-LABEL: @flip_masked_bit_uniform(
1673; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i8> [[A:%.*]], splat (i8 16)
1674; CHECK-NEXT:    [[C:%.*]] = xor <2 x i8> [[TMP1]], splat (i8 16)
1675; CHECK-NEXT:    ret <2 x i8> [[C]]
1676;
1677  %B = add <2 x i8> %A, <i8 16, i8 16>
1678  %C = and <2 x i8> %B, <i8 16, i8 16>
1679  ret <2 x i8> %C
1680}
1681
1682define <2 x i8> @flip_masked_bit_poison(<2 x i8> %A) {
1683; CHECK-LABEL: @flip_masked_bit_poison(
1684; CHECK-NEXT:    [[TMP1:%.*]] = xor <2 x i8> [[A:%.*]], splat (i8 -1)
1685; CHECK-NEXT:    [[C:%.*]] = and <2 x i8> [[TMP1]], <i8 16, i8 poison>
1686; CHECK-NEXT:    ret <2 x i8> [[C]]
1687;
1688  %B = add <2 x i8> %A, <i8 16, i8 poison>
1689  %C = and <2 x i8> %B, <i8 16, i8 poison>
1690  ret <2 x i8> %C
1691}
1692
1693define <2 x i8> @flip_masked_bit_nonuniform(<2 x i8> %A) {
1694; CHECK-LABEL: @flip_masked_bit_nonuniform(
1695; CHECK-NEXT:    [[TMP1:%.*]] = xor <2 x i8> [[A:%.*]], splat (i8 -1)
1696; CHECK-NEXT:    [[C:%.*]] = and <2 x i8> [[TMP1]], <i8 16, i8 4>
1697; CHECK-NEXT:    ret <2 x i8> [[C]]
1698;
1699  %B = add <2 x i8> %A, <i8 16, i8 4>
1700  %C = and <2 x i8> %B, <i8 16, i8 4>
1701  ret <2 x i8> %C
1702}
1703
1704define i8 @ashr_bitwidth_mask(i8 %x, i8 %y) {
1705; CHECK-LABEL: @ashr_bitwidth_mask(
1706; CHECK-NEXT:    [[ISNEG:%.*]] = icmp slt i8 [[X:%.*]], 0
1707; CHECK-NEXT:    [[NEG_OR_ZERO:%.*]] = select i1 [[ISNEG]], i8 [[Y:%.*]], i8 0
1708; CHECK-NEXT:    ret i8 [[NEG_OR_ZERO]]
1709;
1710  %sign = ashr i8 %x, 7
1711  %neg_or_zero = and i8 %sign, %y
1712  ret i8 %neg_or_zero
1713}
1714
1715define <2 x i8> @ashr_bitwidth_mask_vec_commute(<2 x i8> %x, <2 x i8> %py) {
1716; CHECK-LABEL: @ashr_bitwidth_mask_vec_commute(
1717; CHECK-NEXT:    [[Y:%.*]] = mul <2 x i8> [[PY:%.*]], <i8 42, i8 2>
1718; CHECK-NEXT:    [[ISNEG:%.*]] = icmp slt <2 x i8> [[X:%.*]], zeroinitializer
1719; CHECK-NEXT:    [[NEG_OR_ZERO:%.*]] = select <2 x i1> [[ISNEG]], <2 x i8> [[Y]], <2 x i8> zeroinitializer
1720; CHECK-NEXT:    ret <2 x i8> [[NEG_OR_ZERO]]
1721;
1722  %y = mul <2 x i8> %py, <i8 42, i8 2>      ; thwart complexity-based ordering
1723  %sign = ashr <2 x i8> %x, <i8 7, i8 7>
1724  %neg_or_zero = and <2 x i8> %y, %sign
1725  ret <2 x i8> %neg_or_zero
1726}
1727
1728; negative test - extra use
1729
1730define i8 @ashr_bitwidth_mask_use(i8 %x, i8 %y) {
1731; CHECK-LABEL: @ashr_bitwidth_mask_use(
1732; CHECK-NEXT:    [[SIGN:%.*]] = ashr i8 [[X:%.*]], 7
1733; CHECK-NEXT:    call void @use8(i8 [[SIGN]])
1734; CHECK-NEXT:    [[R:%.*]] = and i8 [[SIGN]], [[Y:%.*]]
1735; CHECK-NEXT:    ret i8 [[R]]
1736;
1737  %sign = ashr i8 %x, 7
1738  call void @use8(i8 %sign)
1739  %r = and i8 %sign, %y
1740  ret i8 %r
1741}
1742
1743; negative test - wrong shift amount
1744
1745define i8 @ashr_not_bitwidth_mask(i8 %x, i8 %y) {
1746; CHECK-LABEL: @ashr_not_bitwidth_mask(
1747; CHECK-NEXT:    [[SIGN:%.*]] = ashr i8 [[X:%.*]], 6
1748; CHECK-NEXT:    [[R:%.*]] = and i8 [[SIGN]], [[Y:%.*]]
1749; CHECK-NEXT:    ret i8 [[R]]
1750;
1751  %sign = ashr i8 %x, 6
1752  %r = and i8 %sign, %y
1753  ret i8 %r
1754}
1755
1756; negative test - wrong shift opcode
1757
1758define i8 @lshr_bitwidth_mask(i8 %x, i8 %y) {
1759; CHECK-LABEL: @lshr_bitwidth_mask(
1760; CHECK-NEXT:    [[SIGN:%.*]] = lshr i8 [[X:%.*]], 7
1761; CHECK-NEXT:    [[R:%.*]] = and i8 [[SIGN]], [[Y:%.*]]
1762; CHECK-NEXT:    ret i8 [[R]]
1763;
1764  %sign = lshr i8 %x, 7
1765  %r = and i8 %sign, %y
1766  ret i8 %r
1767}
1768
1769define i16 @signbit_splat_mask(i8 %x, i16 %y) {
1770; CHECK-LABEL: @signbit_splat_mask(
1771; CHECK-NEXT:    [[ISNEG:%.*]] = icmp slt i8 [[X:%.*]], 0
1772; CHECK-NEXT:    [[R:%.*]] = select i1 [[ISNEG]], i16 [[Y:%.*]], i16 0
1773; CHECK-NEXT:    ret i16 [[R]]
1774;
1775  %a = ashr i8 %x, 7
1776  %s = sext i8 %a to i16
1777  %r = and i16 %s, %y
1778  ret i16 %r
1779}
1780
1781define <2 x i16> @signbit_splat_mask_commute(<2 x i5> %x, <2 x i16> %p) {
1782; CHECK-LABEL: @signbit_splat_mask_commute(
1783; CHECK-NEXT:    [[Y:%.*]] = mul <2 x i16> [[P:%.*]], [[P]]
1784; CHECK-NEXT:    [[ISNEG:%.*]] = icmp slt <2 x i5> [[X:%.*]], zeroinitializer
1785; CHECK-NEXT:    [[R:%.*]] = select <2 x i1> [[ISNEG]], <2 x i16> [[Y]], <2 x i16> zeroinitializer
1786; CHECK-NEXT:    ret <2 x i16> [[R]]
1787;
1788  %y = mul <2 x i16> %p, %p ; thwart complexity-based canonicalization
1789  %a = ashr <2 x i5> %x, <i5 4, i5 poison>
1790  %s = sext <2 x i5> %a to <2 x i16>
1791  %r = and <2 x i16> %y, %s
1792  ret <2 x i16> %r
1793}
1794
1795define i16 @signbit_splat_mask_use1(i8 %x, i16 %y) {
1796; CHECK-LABEL: @signbit_splat_mask_use1(
1797; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 7
1798; CHECK-NEXT:    call void @use8(i8 [[A]])
1799; CHECK-NEXT:    [[ISNEG:%.*]] = icmp slt i8 [[X]], 0
1800; CHECK-NEXT:    [[R:%.*]] = select i1 [[ISNEG]], i16 [[Y:%.*]], i16 0
1801; CHECK-NEXT:    ret i16 [[R]]
1802;
1803  %a = ashr i8 %x, 7
1804  call void @use8(i8 %a)
1805  %s = sext i8 %a to i16
1806  %r = and i16 %s, %y
1807  ret i16 %r
1808}
1809
1810; negative test - extra use
1811
1812define i16 @signbit_splat_mask_use2(i8 %x, i16 %y) {
1813; CHECK-LABEL: @signbit_splat_mask_use2(
1814; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 7
1815; CHECK-NEXT:    [[S:%.*]] = sext i8 [[A]] to i16
1816; CHECK-NEXT:    call void @use16(i16 [[S]])
1817; CHECK-NEXT:    [[R:%.*]] = and i16 [[Y:%.*]], [[S]]
1818; CHECK-NEXT:    ret i16 [[R]]
1819;
1820  %a = ashr i8 %x, 7
1821  %s = sext i8 %a to i16
1822  call void @use16(i16 %s)
1823  %r = and i16 %s, %y
1824  ret i16 %r
1825}
1826
1827; negative test - wrong extend
1828
1829define i16 @not_signbit_splat_mask1(i8 %x, i16 %y) {
1830; CHECK-LABEL: @not_signbit_splat_mask1(
1831; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 7
1832; CHECK-NEXT:    [[Z:%.*]] = zext i8 [[A]] to i16
1833; CHECK-NEXT:    [[R:%.*]] = and i16 [[Y:%.*]], [[Z]]
1834; CHECK-NEXT:    ret i16 [[R]]
1835;
1836  %a = ashr i8 %x, 7
1837  %z = zext i8 %a to i16
1838  %r = and i16 %z, %y
1839  ret i16 %r
1840}
1841
1842; negative test - wrong shift amount
1843
1844define i16 @not_signbit_splat_mask2(i8 %x, i16 %y) {
1845; CHECK-LABEL: @not_signbit_splat_mask2(
1846; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 6
1847; CHECK-NEXT:    [[S:%.*]] = sext i8 [[A]] to i16
1848; CHECK-NEXT:    [[R:%.*]] = and i16 [[Y:%.*]], [[S]]
1849; CHECK-NEXT:    ret i16 [[R]]
1850;
1851  %a = ashr i8 %x, 6
1852  %s = sext i8 %a to i16
1853  %r = and i16 %s, %y
1854  ret i16 %r
1855}
1856
1857define i8 @not_ashr_bitwidth_mask(i8 %x, i8 %y) {
1858; CHECK-LABEL: @not_ashr_bitwidth_mask(
1859; CHECK-NEXT:    [[ISNOTNEG_INV:%.*]] = icmp slt i8 [[X:%.*]], 0
1860; CHECK-NEXT:    [[POS_OR_ZERO:%.*]] = select i1 [[ISNOTNEG_INV]], i8 0, i8 [[Y:%.*]]
1861; CHECK-NEXT:    ret i8 [[POS_OR_ZERO]]
1862;
1863  %sign = ashr i8 %x, 7
1864  %not = xor i8 %sign, -1
1865  %pos_or_zero = and i8 %not, %y
1866  ret i8 %pos_or_zero
1867}
1868
1869define <2 x i8> @not_ashr_bitwidth_mask_vec_commute(<2 x i8> %x, <2 x i8> %py) {
1870; CHECK-LABEL: @not_ashr_bitwidth_mask_vec_commute(
1871; CHECK-NEXT:    [[Y:%.*]] = mul <2 x i8> [[PY:%.*]], <i8 42, i8 2>
1872; CHECK-NEXT:    [[ISNOTNEG_INV:%.*]] = icmp slt <2 x i8> [[X:%.*]], zeroinitializer
1873; CHECK-NEXT:    [[POS_OR_ZERO:%.*]] = select <2 x i1> [[ISNOTNEG_INV]], <2 x i8> zeroinitializer, <2 x i8> [[Y]]
1874; CHECK-NEXT:    ret <2 x i8> [[POS_OR_ZERO]]
1875;
1876  %y = mul <2 x i8> %py, <i8 42, i8 2>      ; thwart complexity-based ordering
1877  %sign = ashr <2 x i8> %x, <i8 7, i8 7>
1878  %not = xor <2 x i8> %sign, <i8 -1, i8 -1>
1879  %pos_or_zero = and <2 x i8> %y, %not
1880  ret <2 x i8> %pos_or_zero
1881}
1882
1883; extra use of shift is ok
1884
1885define i8 @not_ashr_bitwidth_mask_use1(i8 %x, i8 %y) {
1886; CHECK-LABEL: @not_ashr_bitwidth_mask_use1(
1887; CHECK-NEXT:    [[SIGN:%.*]] = ashr i8 [[X:%.*]], 7
1888; CHECK-NEXT:    call void @use8(i8 [[SIGN]])
1889; CHECK-NEXT:    [[ISNEG:%.*]] = icmp slt i8 [[X]], 0
1890; CHECK-NEXT:    [[R:%.*]] = select i1 [[ISNEG]], i8 0, i8 [[Y:%.*]]
1891; CHECK-NEXT:    ret i8 [[R]]
1892;
1893  %sign = ashr i8 %x, 7
1894  call void @use8(i8 %sign)
1895  %not = xor i8 %sign, -1
1896  %r = and i8 %not, %y
1897  ret i8 %r
1898}
1899
1900; extra use of xor is ok
1901
1902define i8 @not_ashr_bitwidth_mask_use2(i8 %x, i8 %y) {
1903; CHECK-LABEL: @not_ashr_bitwidth_mask_use2(
1904; CHECK-NEXT:    [[ISNOTNEG:%.*]] = icmp sgt i8 [[X:%.*]], -1
1905; CHECK-NEXT:    [[NOT:%.*]] = sext i1 [[ISNOTNEG]] to i8
1906; CHECK-NEXT:    call void @use8(i8 [[NOT]])
1907; CHECK-NEXT:    [[R:%.*]] = select i1 [[ISNOTNEG]], i8 [[Y:%.*]], i8 0
1908; CHECK-NEXT:    ret i8 [[R]]
1909;
1910  %sign = ashr i8 %x, 7
1911  %not = xor i8 %sign, -1
1912  call void @use8(i8 %not)
1913  %r = and i8 %not, %y
1914  ret i8 %r
1915}
1916
1917; negative test - wrong shift amount
1918
1919define i8 @not_ashr_not_bitwidth_mask(i8 %x, i8 %y) {
1920; CHECK-LABEL: @not_ashr_not_bitwidth_mask(
1921; CHECK-NEXT:    [[SIGN:%.*]] = ashr i8 [[X:%.*]], 6
1922; CHECK-NEXT:    [[NOT:%.*]] = xor i8 [[SIGN]], -1
1923; CHECK-NEXT:    [[R:%.*]] = and i8 [[Y:%.*]], [[NOT]]
1924; CHECK-NEXT:    ret i8 [[R]]
1925;
1926  %sign = ashr i8 %x, 6
1927  %not = xor i8 %sign, -1
1928  %r = and i8 %not, %y
1929  ret i8 %r
1930}
1931
1932; negative test - wrong shift opcode
1933
1934define i8 @not_lshr_bitwidth_mask(i8 %x, i8 %y) {
1935; CHECK-LABEL: @not_lshr_bitwidth_mask(
1936; CHECK-NEXT:    [[SIGN:%.*]] = lshr i8 [[X:%.*]], 7
1937; CHECK-NEXT:    [[NOT:%.*]] = xor i8 [[SIGN]], -1
1938; CHECK-NEXT:    [[R:%.*]] = and i8 [[Y:%.*]], [[NOT]]
1939; CHECK-NEXT:    ret i8 [[R]]
1940;
1941  %sign = lshr i8 %x, 7
1942  %not = xor i8 %sign, -1
1943  %r = and i8 %not, %y
1944  ret i8 %r
1945}
1946
1947define i16 @invert_signbit_splat_mask(i8 %x, i16 %y) {
1948; CHECK-LABEL: @invert_signbit_splat_mask(
1949; CHECK-NEXT:    [[ISNOTNEG:%.*]] = icmp sgt i8 [[X:%.*]], -1
1950; CHECK-NEXT:    [[R:%.*]] = select i1 [[ISNOTNEG]], i16 [[Y:%.*]], i16 0
1951; CHECK-NEXT:    ret i16 [[R]]
1952;
1953  %a = ashr i8 %x, 7
1954  %n = xor i8 %a, -1
1955  %s = sext i8 %n to i16
1956  %r = and i16 %s, %y
1957  ret i16 %r
1958}
1959
1960define <2 x i16> @invert_signbit_splat_mask_commute(<2 x i5> %x, <2 x i16> %p) {
1961; CHECK-LABEL: @invert_signbit_splat_mask_commute(
1962; CHECK-NEXT:    [[Y:%.*]] = mul <2 x i16> [[P:%.*]], [[P]]
1963; CHECK-NEXT:    [[ISNEG:%.*]] = icmp slt <2 x i5> [[X:%.*]], zeroinitializer
1964; CHECK-NEXT:    [[R:%.*]] = select <2 x i1> [[ISNEG]], <2 x i16> zeroinitializer, <2 x i16> [[Y]]
1965; CHECK-NEXT:    ret <2 x i16> [[R]]
1966;
1967  %y = mul <2 x i16> %p, %p ; thwart complexity-based canonicalization
1968  %a = ashr <2 x i5> %x, <i5 4, i5 poison>
1969  %n = xor <2 x i5> %a, <i5 -1, i5 -1>
1970  %s = sext <2 x i5> %n to <2 x i16>
1971  %r = and <2 x i16> %y, %s
1972  ret <2 x i16> %r
1973}
1974
1975define i16 @invert_signbit_splat_mask_use1(i8 %x, i16 %y) {
1976; CHECK-LABEL: @invert_signbit_splat_mask_use1(
1977; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 7
1978; CHECK-NEXT:    call void @use8(i8 [[A]])
1979; CHECK-NEXT:    [[ISNEG:%.*]] = icmp slt i8 [[X]], 0
1980; CHECK-NEXT:    [[R:%.*]] = select i1 [[ISNEG]], i16 0, i16 [[Y:%.*]]
1981; CHECK-NEXT:    ret i16 [[R]]
1982;
1983  %a = ashr i8 %x, 7
1984  call void @use8(i8 %a)
1985  %n = xor i8 %a, -1
1986  %s = sext i8 %n to i16
1987  %r = and i16 %s, %y
1988  ret i16 %r
1989}
1990
1991define i16 @invert_signbit_splat_mask_use2(i8 %x, i16 %y) {
1992; CHECK-LABEL: @invert_signbit_splat_mask_use2(
1993; CHECK-NEXT:    [[ISNOTNEG:%.*]] = icmp sgt i8 [[X:%.*]], -1
1994; CHECK-NEXT:    [[N:%.*]] = sext i1 [[ISNOTNEG]] to i8
1995; CHECK-NEXT:    call void @use8(i8 [[N]])
1996; CHECK-NEXT:    [[R:%.*]] = select i1 [[ISNOTNEG]], i16 [[Y:%.*]], i16 0
1997; CHECK-NEXT:    ret i16 [[R]]
1998;
1999  %a = ashr i8 %x, 7
2000  %n = xor i8 %a, -1
2001  call void @use8(i8 %n)
2002  %s = sext i8 %n to i16
2003  %r = and i16 %s, %y
2004  ret i16 %r
2005}
2006
2007; extra use of sext is ok
2008
2009define i16 @invert_signbit_splat_mask_use3(i8 %x, i16 %y) {
2010; CHECK-LABEL: @invert_signbit_splat_mask_use3(
2011; CHECK-NEXT:    [[ISNOTNEG:%.*]] = icmp sgt i8 [[X:%.*]], -1
2012; CHECK-NEXT:    [[S:%.*]] = sext i1 [[ISNOTNEG]] to i16
2013; CHECK-NEXT:    call void @use16(i16 [[S]])
2014; CHECK-NEXT:    [[R:%.*]] = select i1 [[ISNOTNEG]], i16 [[Y:%.*]], i16 0
2015; CHECK-NEXT:    ret i16 [[R]]
2016;
2017  %a = ashr i8 %x, 7
2018  %n = xor i8 %a, -1
2019  %s = sext i8 %n to i16
2020  call void @use16(i16 %s)
2021  %r = and i16 %s, %y
2022  ret i16 %r
2023}
2024
2025; negative test - wrong extend
2026
2027define i16 @not_invert_signbit_splat_mask1(i8 %x, i16 %y) {
2028; CHECK-LABEL: @not_invert_signbit_splat_mask1(
2029; CHECK-NEXT:    [[ISNOTNEG:%.*]] = icmp sgt i8 [[X:%.*]], -1
2030; CHECK-NEXT:    [[N:%.*]] = sext i1 [[ISNOTNEG]] to i8
2031; CHECK-NEXT:    [[Z:%.*]] = zext i8 [[N]] to i16
2032; CHECK-NEXT:    [[R:%.*]] = and i16 [[Y:%.*]], [[Z]]
2033; CHECK-NEXT:    ret i16 [[R]]
2034;
2035  %a = ashr i8 %x, 7
2036  %n = xor i8 %a, -1
2037  %z = zext i8 %n to i16
2038  %r = and i16 %z, %y
2039  ret i16 %r
2040}
2041
2042; negative test - wrong shift amount
2043
2044define i16 @not_invert_signbit_splat_mask2(i8 %x, i16 %y) {
2045; CHECK-LABEL: @not_invert_signbit_splat_mask2(
2046; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 6
2047; CHECK-NEXT:    [[N:%.*]] = xor i8 [[A]], -1
2048; CHECK-NEXT:    [[S:%.*]] = sext i8 [[N]] to i16
2049; CHECK-NEXT:    [[R:%.*]] = and i16 [[Y:%.*]], [[S]]
2050; CHECK-NEXT:    ret i16 [[R]]
2051;
2052  %a = ashr i8 %x, 6
2053  %n = xor i8 %a, -1
2054  %s = sext i8 %n to i16
2055  %r = and i16 %s, %y
2056  ret i16 %r
2057}
2058
2059; CTTZ(ShlC) < LShrC
2060
2061define i16 @shl_lshr_pow2_const_case1(i16 %x) {
2062; CHECK-LABEL: @shl_lshr_pow2_const_case1(
2063; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i16 [[X:%.*]], 7
2064; CHECK-NEXT:    [[R:%.*]] = select i1 [[TMP1]], i16 8, i16 0
2065; CHECK-NEXT:    ret i16 [[R]]
2066;
2067  %shl = shl i16 4, %x
2068  %lshr = lshr i16 %shl, 6
2069  %r = and i16 %lshr, 8
2070  ret i16 %r
2071}
2072
2073define i16 @shl_ashr_pow2_const_case1(i16 %x) {
2074; CHECK-LABEL: @shl_ashr_pow2_const_case1(
2075; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i16 [[X:%.*]], 7
2076; CHECK-NEXT:    [[R:%.*]] = select i1 [[TMP1]], i16 8, i16 0
2077; CHECK-NEXT:    ret i16 [[R]]
2078;
2079  %shl = shl i16 4, %x
2080  %lshr = ashr i16 %shl, 6
2081  %r = and i16 %lshr, 8
2082  ret i16 %r
2083}
2084
2085define <3 x i16> @shl_lshr_pow2_const_case1_uniform_vec(<3 x i16> %x) {
2086; CHECK-LABEL: @shl_lshr_pow2_const_case1_uniform_vec(
2087; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq <3 x i16> [[X:%.*]], splat (i16 7)
2088; CHECK-NEXT:    [[R:%.*]] = select <3 x i1> [[TMP1]], <3 x i16> splat (i16 8), <3 x i16> zeroinitializer
2089; CHECK-NEXT:    ret <3 x i16> [[R]]
2090;
2091  %shl = shl <3 x i16> <i16 4, i16 4, i16 4>, %x
2092  %lshr = lshr <3 x i16> %shl, <i16 6, i16 6, i16 6>
2093  %r = and <3 x i16> %lshr, <i16 8, i16 8, i16 8>
2094  ret <3 x i16> %r
2095}
2096
2097define <3 x i16> @shl_lshr_pow2_const_case1_non_uniform_vec(<3 x i16> %x) {
2098; CHECK-LABEL: @shl_lshr_pow2_const_case1_non_uniform_vec(
2099; CHECK-NEXT:    [[SHL:%.*]] = shl <3 x i16> <i16 2, i16 8, i16 32>, [[X:%.*]]
2100; CHECK-NEXT:    [[LSHR:%.*]] = lshr <3 x i16> [[SHL]], <i16 5, i16 6, i16 3>
2101; CHECK-NEXT:    [[R:%.*]] = and <3 x i16> [[LSHR]], <i16 8, i16 4, i16 8>
2102; CHECK-NEXT:    ret <3 x i16> [[R]]
2103;
2104  %shl = shl <3 x i16> <i16 2, i16 8, i16 32>, %x
2105  %lshr = lshr <3 x i16> %shl, <i16 5, i16 6, i16 3>
2106  %r = and <3 x i16> %lshr, <i16 8, i16 4, i16 8>
2107  ret <3 x i16> %r
2108}
2109
2110define <3 x i16> @shl_lshr_pow2_const_case1_non_uniform_vec_negative(<3 x i16> %x) {
2111; CHECK-LABEL: @shl_lshr_pow2_const_case1_non_uniform_vec_negative(
2112; CHECK-NEXT:    [[SHL:%.*]] = shl <3 x i16> <i16 2, i16 8, i16 32>, [[X:%.*]]
2113; CHECK-NEXT:    [[LSHR:%.*]] = lshr <3 x i16> [[SHL]], <i16 5, i16 6, i16 3>
2114; CHECK-NEXT:    [[R:%.*]] = and <3 x i16> [[LSHR]], <i16 8, i16 4, i16 16384>
2115; CHECK-NEXT:    ret <3 x i16> [[R]]
2116;
2117  %shl = shl <3 x i16> <i16 2, i16 8, i16 32>, %x
2118  %lshr = lshr <3 x i16> %shl, <i16 5, i16 6, i16 3>
2119  %r = and <3 x i16> %lshr, <i16 8, i16 4, i16 16384>
2120  ret <3 x i16> %r
2121}
2122
2123define <3 x i16> @shl_lshr_pow2_const_case1_poison1_vec(<3 x i16> %x) {
2124; CHECK-LABEL: @shl_lshr_pow2_const_case1_poison1_vec(
2125; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq <3 x i16> [[X:%.*]], <i16 8, i16 4, i16 4>
2126; CHECK-NEXT:    [[R:%.*]] = select <3 x i1> [[TMP1]], <3 x i16> splat (i16 8), <3 x i16> zeroinitializer
2127; CHECK-NEXT:    ret <3 x i16> [[R]]
2128;
2129  %shl = shl <3 x i16> <i16 poison, i16 16, i16 16>, %x
2130  %lshr = lshr <3 x i16> %shl, <i16 5, i16 5, i16 5>
2131  %r = and <3 x i16> %lshr, <i16 8, i16 8, i16 8>
2132  ret <3 x i16> %r
2133}
2134
2135define <3 x i16> @shl_lshr_pow2_const_case1_poison2_vec(<3 x i16> %x) {
2136; CHECK-LABEL: @shl_lshr_pow2_const_case1_poison2_vec(
2137; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq <3 x i16> [[X:%.*]], <i16 poison, i16 4, i16 4>
2138; CHECK-NEXT:    [[R:%.*]] = select <3 x i1> [[TMP1]], <3 x i16> splat (i16 8), <3 x i16> zeroinitializer
2139; CHECK-NEXT:    ret <3 x i16> [[R]]
2140;
2141  %shl = shl <3 x i16> <i16 16, i16 16, i16 16>, %x
2142  %lshr = lshr <3 x i16> %shl, <i16 poison, i16 5, i16 5>
2143  %r = and <3 x i16> %lshr, <i16 8, i16 8, i16 8>
2144  ret <3 x i16> %r
2145}
2146
2147define <3 x i16> @shl_lshr_pow2_const_case1_poison3_vec(<3 x i16> %x) {
2148; CHECK-LABEL: @shl_lshr_pow2_const_case1_poison3_vec(
2149; CHECK-NEXT:    [[SHL:%.*]] = shl <3 x i16> splat (i16 16), [[X:%.*]]
2150; CHECK-NEXT:    [[LSHR:%.*]] = lshr <3 x i16> [[SHL]], splat (i16 5)
2151; CHECK-NEXT:    [[R:%.*]] = and <3 x i16> [[LSHR]], <i16 poison, i16 8, i16 8>
2152; CHECK-NEXT:    ret <3 x i16> [[R]]
2153;
2154  %shl = shl <3 x i16> <i16 16, i16 16, i16 16>, %x
2155  %lshr = lshr <3 x i16> %shl, <i16 5, i16 5, i16 5>
2156  %r = and <3 x i16> %lshr, <i16 poison, i16 8, i16 8>
2157  ret <3 x i16> %r
2158}
2159
2160; LShrC < CTTZ(ShlC) < LShrC + CTTZ(AndC)
2161
2162define i16 @shl_lshr_pow2_const_case2(i16 %x) {
2163; CHECK-LABEL: @shl_lshr_pow2_const_case2(
2164; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i16 [[X:%.*]], 2
2165; CHECK-NEXT:    [[R:%.*]] = select i1 [[TMP1]], i16 8, i16 0
2166; CHECK-NEXT:    ret i16 [[R]]
2167;
2168  %shl = shl i16 16, %x
2169  %lshr = lshr i16 %shl, 3
2170  %r = and i16 %lshr, 8
2171  ret i16 %r
2172}
2173
2174define i16 @shl_lshr_pow2_not_const_case2(i16 %x) {
2175; CHECK-LABEL: @shl_lshr_pow2_not_const_case2(
2176; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i16 [[X:%.*]], 2
2177; CHECK-NEXT:    [[R:%.*]] = select i1 [[TMP1]], i16 0, i16 8
2178; CHECK-NEXT:    ret i16 [[R]]
2179;
2180  %shl = shl i16 16, %x
2181  %lshr = lshr i16 %shl, 3
2182  %and = and i16 %lshr, 8
2183  %r   = xor i16 %and, 8
2184  ret i16 %r
2185}
2186
2187; CTTZ(ShlC) > LShrC + CTTZ(AndC)
2188
2189define i16 @shl_lshr_pow2_const_negative_overflow1(i16 %x) {
2190; CHECK-LABEL: @shl_lshr_pow2_const_negative_overflow1(
2191; CHECK-NEXT:    ret i16 0
2192;
2193  %shl = shl i16 4096, %x
2194  %lshr = lshr i16 %shl, 6
2195  %r = and i16 %lshr, 8
2196  ret i16 %r
2197}
2198
2199; LShrC + CTTZ(AndC) > BitWidth
2200
2201define i16 @shl_lshr_pow2_const_negative_overflow2(i16 %x) {
2202; CHECK-LABEL: @shl_lshr_pow2_const_negative_overflow2(
2203; CHECK-NEXT:    ret i16 0
2204;
2205  %shl = shl i16 8, %x
2206  %lshr = lshr i16 %shl, 6
2207  %r = and i16 %lshr, 32768
2208  ret i16 %r
2209}
2210
2211define i16 @shl_lshr_pow2_const_negative_oneuse(i16 %x) {
2212; CHECK-LABEL: @shl_lshr_pow2_const_negative_oneuse(
2213; CHECK-NEXT:    [[SHL:%.*]] = shl i16 4, [[X:%.*]]
2214; CHECK-NEXT:    [[LSHR:%.*]] = lshr i16 [[SHL]], 6
2215; CHECK-NEXT:    call void @use16(i16 [[LSHR]])
2216; CHECK-NEXT:    [[R:%.*]] = and i16 [[LSHR]], 8
2217; CHECK-NEXT:    ret i16 [[R]]
2218;
2219  %shl = shl i16 4, %x
2220  %lshr = lshr i16 %shl, 6
2221  call void @use16(i16 %lshr)
2222  %r = and i16 %lshr, 8
2223  ret i16 %r
2224}
2225
2226define i16 @shl_lshr_pow2_const_negative_nopow2_1(i16 %x) {
2227; CHECK-LABEL: @shl_lshr_pow2_const_negative_nopow2_1(
2228; CHECK-NEXT:    [[SHL:%.*]] = shl i16 3, [[X:%.*]]
2229; CHECK-NEXT:    [[LSHR:%.*]] = lshr i16 [[SHL]], 6
2230; CHECK-NEXT:    [[R:%.*]] = and i16 [[LSHR]], 8
2231; CHECK-NEXT:    ret i16 [[R]]
2232;
2233  %shl = shl i16 3, %x
2234  %lshr = lshr i16 %shl, 6
2235  %r = and i16 %lshr, 8
2236  ret i16 %r
2237}
2238
2239define i16 @shl_lshr_pow2_const_negative_nopow2_2(i16 %x) {
2240; CHECK-LABEL: @shl_lshr_pow2_const_negative_nopow2_2(
2241; CHECK-NEXT:    [[SHL:%.*]] = shl i16 3, [[X:%.*]]
2242; CHECK-NEXT:    [[LSHR:%.*]] = lshr i16 [[SHL]], 6
2243; CHECK-NEXT:    [[R:%.*]] = and i16 [[LSHR]], 7
2244; CHECK-NEXT:    ret i16 [[R]]
2245;
2246  %shl = shl i16 3, %x
2247  %lshr = lshr i16 %shl, 6
2248  %r = and i16 %lshr, 7
2249  ret i16 %r
2250}
2251
2252define i16 @lshr_lshr_pow2_const(i16 %x) {
2253; CHECK-LABEL: @lshr_lshr_pow2_const(
2254; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i16 [[X:%.*]], 3
2255; CHECK-NEXT:    [[R:%.*]] = select i1 [[TMP1]], i16 4, i16 0
2256; CHECK-NEXT:    ret i16 [[R]]
2257;
2258  %lshr1 = lshr i16 2048, %x
2259  %lshr2 = lshr i16 %lshr1, 6
2260  %r = and i16 %lshr2, 4
2261  ret i16 %r
2262}
2263
2264define i16 @lshr_lshr_pow2_const_negative_oneuse(i16 %x) {
2265; CHECK-LABEL: @lshr_lshr_pow2_const_negative_oneuse(
2266; CHECK-NEXT:    [[LSHR2:%.*]] = lshr i16 32, [[X:%.*]]
2267; CHECK-NEXT:    call void @use16(i16 [[LSHR2]])
2268; CHECK-NEXT:    [[R:%.*]] = and i16 [[LSHR2]], 4
2269; CHECK-NEXT:    ret i16 [[R]]
2270;
2271  %lshr1 = lshr i16 2048, %x
2272  %lshr2 = lshr i16 %lshr1, 6
2273  call void @use16(i16 %lshr2)
2274  %r = and i16 %lshr2, 4
2275  ret i16 %r
2276}
2277
2278define i16 @lshr_lshr_pow2_const_negative_nopow2_1(i16 %x) {
2279; CHECK-LABEL: @lshr_lshr_pow2_const_negative_nopow2_1(
2280; CHECK-NEXT:    [[LSHR2:%.*]] = lshr i16 31, [[X:%.*]]
2281; CHECK-NEXT:    [[R:%.*]] = and i16 [[LSHR2]], 4
2282; CHECK-NEXT:    ret i16 [[R]]
2283;
2284  %lshr1 = lshr i16 2047, %x
2285  %lshr2 = lshr i16 %lshr1, 6
2286  %r = and i16 %lshr2, 4
2287  ret i16 %r
2288}
2289
2290define i16 @lshr_lshr_pow2_const_negative_nopow2_2(i16 %x) {
2291; CHECK-LABEL: @lshr_lshr_pow2_const_negative_nopow2_2(
2292; CHECK-NEXT:    [[LSHR2:%.*]] = lshr i16 128, [[X:%.*]]
2293; CHECK-NEXT:    [[R:%.*]] = and i16 [[LSHR2]], 3
2294; CHECK-NEXT:    ret i16 [[R]]
2295;
2296  %lshr1 = lshr i16 8192, %x
2297  %lshr2 = lshr i16 %lshr1, 6
2298  %r = and i16 %lshr2, 3
2299  ret i16 %r
2300}
2301
2302define i16 @lshr_lshr_pow2_const_negative_overflow(i16 %x) {
2303; CHECK-LABEL: @lshr_lshr_pow2_const_negative_overflow(
2304; CHECK-NEXT:    ret i16 0
2305;
2306  %lshr1 = lshr i16 32768, %x
2307  %lshr2 = lshr i16 %lshr1, 15
2308  %r = and i16 %lshr2, 4
2309  ret i16 %r
2310}
2311
2312; demanded bits path for lshr+shl+and
2313; Log2(LshrC) + ShlC < BitWidth
2314
2315define i16 @lshr_shl_pow2_const_case1(i16 %x) {
2316; CHECK-LABEL: @lshr_shl_pow2_const_case1(
2317; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i16 [[X:%.*]], 7
2318; CHECK-NEXT:    [[R:%.*]] = select i1 [[TMP1]], i16 8, i16 0
2319; CHECK-NEXT:    ret i16 [[R]]
2320;
2321  %lshr1 = lshr i16 256, %x
2322  %shl = shl i16 %lshr1, 2
2323  %r = and i16 %shl, 8
2324  ret i16 %r
2325}
2326
2327define i16 @lshr_shl_pow2_const_xor(i16 %x) {
2328; CHECK-LABEL: @lshr_shl_pow2_const_xor(
2329; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i16 [[X:%.*]], 7
2330; CHECK-NEXT:    [[R:%.*]] = select i1 [[TMP1]], i16 0, i16 8
2331; CHECK-NEXT:    ret i16 [[R]]
2332;
2333  %lshr1 = lshr i16 256, %x
2334  %shl = shl i16 %lshr1, 2
2335  %and = and i16 %shl, 8
2336  %r = xor i16 %and, 8
2337  ret i16 %r
2338}
2339
2340; Log2(LshrC) + ShlC >= BitWidth
2341
2342define i16 @lshr_shl_pow2_const_case2(i16 %x) {
2343; CHECK-LABEL: @lshr_shl_pow2_const_case2(
2344; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i16 [[X:%.*]], 12
2345; CHECK-NEXT:    [[R:%.*]] = select i1 [[TMP1]], i16 32, i16 0
2346; CHECK-NEXT:    ret i16 [[R]]
2347;
2348  %lshr1 = lshr i16 8192, %x
2349  %shl = shl i16 %lshr1, 4
2350  %r = and i16 %shl, 32
2351  ret i16 %r
2352}
2353
2354; ShlC > Log2(AndC)
2355
2356define i16 @lshr_shl_pow2_const_overflow(i16 %x) {
2357; CHECK-LABEL: @lshr_shl_pow2_const_overflow(
2358; CHECK-NEXT:    ret i16 0
2359;
2360  %lshr1 = lshr i16 8192, %x
2361  %shl = shl i16 %lshr1, 6
2362  %r = and i16 %shl, 32
2363  ret i16 %r
2364}
2365
2366define i16 @lshr_shl_pow2_const_negative_oneuse(i16 %x) {
2367; CHECK-LABEL: @lshr_shl_pow2_const_negative_oneuse(
2368; CHECK-NEXT:    [[LSHR1:%.*]] = lshr i16 8192, [[X:%.*]]
2369; CHECK-NEXT:    [[SHL:%.*]] = shl i16 [[LSHR1]], 4
2370; CHECK-NEXT:    call void @use16(i16 [[SHL]])
2371; CHECK-NEXT:    [[R:%.*]] = and i16 [[SHL]], 32
2372; CHECK-NEXT:    ret i16 [[R]]
2373;
2374  %lshr1 = lshr i16 8192, %x
2375  %shl = shl i16 %lshr1, 4
2376  call void @use16(i16 %shl)
2377  %r = and i16 %shl, 32
2378  ret i16 %r
2379}
2380
2381define <3 x i16> @lshr_shl_pow2_const_case1_uniform_vec(<3 x i16> %x) {
2382; CHECK-LABEL: @lshr_shl_pow2_const_case1_uniform_vec(
2383; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq <3 x i16> [[X:%.*]], splat (i16 12)
2384; CHECK-NEXT:    [[R:%.*]] = select <3 x i1> [[TMP1]], <3 x i16> splat (i16 128), <3 x i16> zeroinitializer
2385; CHECK-NEXT:    ret <3 x i16> [[R]]
2386;
2387  %lshr = lshr <3 x i16> <i16 8192, i16 8192, i16 8192>, %x
2388  %shl = shl <3 x i16> %lshr, <i16 6, i16 6, i16 6>
2389  %r = and <3 x i16> %shl, <i16 128, i16 128, i16 128>
2390  ret <3 x i16> %r
2391}
2392
2393define <3 x i16> @lshr_shl_pow2_const_case1_non_uniform_vec(<3 x i16> %x) {
2394; CHECK-LABEL: @lshr_shl_pow2_const_case1_non_uniform_vec(
2395; CHECK-NEXT:    [[LSHR:%.*]] = lshr <3 x i16> <i16 8192, i16 16384, i16 -32768>, [[X:%.*]]
2396; CHECK-NEXT:    [[SHL:%.*]] = shl <3 x i16> [[LSHR]], <i16 7, i16 5, i16 3>
2397; CHECK-NEXT:    [[R:%.*]] = and <3 x i16> [[SHL]], <i16 128, i16 256, i16 512>
2398; CHECK-NEXT:    ret <3 x i16> [[R]]
2399;
2400  %lshr = lshr <3 x i16> <i16 8192, i16 16384, i16 32768>, %x
2401  %shl = shl <3 x i16> %lshr, <i16 7, i16 5, i16 3>
2402  %r = and <3 x i16> %shl, <i16 128, i16 256, i16 512>
2403  ret <3 x i16> %r
2404}
2405
2406define <3 x i16> @lshr_shl_pow2_const_case1_non_uniform_vec_negative(<3 x i16> %x) {
2407; CHECK-LABEL: @lshr_shl_pow2_const_case1_non_uniform_vec_negative(
2408; CHECK-NEXT:    [[LSHR:%.*]] = lshr <3 x i16> <i16 8192, i16 16384, i16 -32768>, [[X:%.*]]
2409; CHECK-NEXT:    [[SHL:%.*]] = shl <3 x i16> [[LSHR]], <i16 8, i16 5, i16 3>
2410; CHECK-NEXT:    [[R:%.*]] = and <3 x i16> [[SHL]], <i16 128, i16 256, i16 512>
2411; CHECK-NEXT:    ret <3 x i16> [[R]]
2412;
2413  %lshr = lshr <3 x i16> <i16 8192, i16 16384, i16 32768>, %x
2414  %shl = shl <3 x i16> %lshr, <i16 8, i16 5, i16 3>
2415  %r = and <3 x i16> %shl, <i16 128, i16 256, i16 512>
2416  ret <3 x i16> %r
2417}
2418
2419define <3 x i16> @lshr_shl_pow2_const_case1_poison1_vec(<3 x i16> %x) {
2420; CHECK-LABEL: @lshr_shl_pow2_const_case1_poison1_vec(
2421; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq <3 x i16> [[X:%.*]], <i16 -1, i16 12, i16 12>
2422; CHECK-NEXT:    [[R:%.*]] = select <3 x i1> [[TMP1]], <3 x i16> splat (i16 128), <3 x i16> zeroinitializer
2423; CHECK-NEXT:    ret <3 x i16> [[R]]
2424;
2425  %lshr = lshr <3 x i16> <i16 poison, i16 8192, i16 8192>, %x
2426  %shl = shl <3 x i16> %lshr, <i16 6, i16 6, i16 6>
2427  %r = and <3 x i16> %shl, <i16 128, i16 128, i16 128>
2428  ret <3 x i16> %r
2429}
2430
2431define <3 x i16> @lshr_shl_pow2_const_case1_poison2_vec(<3 x i16> %x) {
2432; CHECK-LABEL: @lshr_shl_pow2_const_case1_poison2_vec(
2433; CHECK-NEXT:    [[LSHR:%.*]] = lshr <3 x i16> splat (i16 8192), [[X:%.*]]
2434; CHECK-NEXT:    [[SHL:%.*]] = shl <3 x i16> [[LSHR]], <i16 poison, i16 6, i16 6>
2435; CHECK-NEXT:    [[R:%.*]] = and <3 x i16> [[SHL]], splat (i16 128)
2436; CHECK-NEXT:    ret <3 x i16> [[R]]
2437;
2438  %lshr = lshr <3 x i16> <i16 8192, i16 8192, i16 8192>, %x
2439  %shl = shl <3 x i16> %lshr, <i16 poison, i16 6, i16 6>
2440  %r = and <3 x i16> %shl, <i16 128, i16 128, i16 128>
2441  ret <3 x i16> %r
2442}
2443
2444define <3 x i16> @lshr_shl_pow2_const_case1_poison3_vec(<3 x i16> %x) {
2445; CHECK-LABEL: @lshr_shl_pow2_const_case1_poison3_vec(
2446; CHECK-NEXT:    [[LSHR:%.*]] = lshr <3 x i16> splat (i16 8192), [[X:%.*]]
2447; CHECK-NEXT:    [[SHL:%.*]] = shl <3 x i16> [[LSHR]], splat (i16 6)
2448; CHECK-NEXT:    [[R:%.*]] = and <3 x i16> [[SHL]], <i16 poison, i16 128, i16 128>
2449; CHECK-NEXT:    ret <3 x i16> [[R]]
2450;
2451  %lshr = lshr <3 x i16> <i16 8192, i16 8192, i16 8192>, %x
2452  %shl = shl <3 x i16> %lshr, <i16 6, i16 6, i16 6>
2453  %r = and <3 x i16> %shl, <i16 poison, i16 128, i16 128>
2454  ret <3 x i16> %r
2455}
2456
2457define i8 @negate_lowbitmask(i8 %x, i8 %y) {
2458; CHECK-LABEL: @negate_lowbitmask(
2459; CHECK-NEXT:    [[A:%.*]] = and i8 [[X:%.*]], 1
2460; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i8 [[A]], 0
2461; CHECK-NEXT:    [[R:%.*]] = select i1 [[TMP1]], i8 0, i8 [[Y:%.*]]
2462; CHECK-NEXT:    ret i8 [[R]]
2463;
2464  %a = and i8 %x, 1
2465  %n = sub i8 0, %a
2466  %r = and i8 %n, %y
2467  ret i8 %r
2468}
2469
2470define <2 x i5> @negate_lowbitmask_commute(<2 x i5> %x, <2 x i5> %p) {
2471; CHECK-LABEL: @negate_lowbitmask_commute(
2472; CHECK-NEXT:    [[Y:%.*]] = mul <2 x i5> [[P:%.*]], [[P]]
2473; CHECK-NEXT:    [[A:%.*]] = and <2 x i5> [[X:%.*]], <i5 1, i5 poison>
2474; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq <2 x i5> [[A]], <i5 poison, i5 0>
2475; CHECK-NEXT:    [[R:%.*]] = select <2 x i1> [[TMP1]], <2 x i5> zeroinitializer, <2 x i5> [[Y]]
2476; CHECK-NEXT:    ret <2 x i5> [[R]]
2477;
2478  %y = mul <2 x i5> %p, %p ; thwart complexity-based canonicalization
2479  %a = and <2 x i5> %x, <i5 1, i5 poison>
2480  %n = sub <2 x i5> <i5 poison, i5 0>, %a
2481  %r = and <2 x i5> %y, %n
2482  ret <2 x i5> %r
2483}
2484
2485define i8 @negate_lowbitmask_use1(i8 %x, i8 %y) {
2486; CHECK-LABEL: @negate_lowbitmask_use1(
2487; CHECK-NEXT:    [[A:%.*]] = and i8 [[X:%.*]], 1
2488; CHECK-NEXT:    call void @use8(i8 [[A]])
2489; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i8 [[A]], 0
2490; CHECK-NEXT:    [[R:%.*]] = select i1 [[TMP1]], i8 0, i8 [[Y:%.*]]
2491; CHECK-NEXT:    ret i8 [[R]]
2492;
2493  %a = and i8 %x, 1
2494  call void @use8(i8 %a)
2495  %n = sub i8 0, %a
2496  %r = and i8 %n, %y
2497  ret i8 %r
2498}
2499
2500; negative test
2501
2502define i8 @negate_lowbitmask_use2(i8 %x, i8 %y) {
2503; CHECK-LABEL: @negate_lowbitmask_use2(
2504; CHECK-NEXT:    [[A:%.*]] = and i8 [[X:%.*]], 1
2505; CHECK-NEXT:    [[N:%.*]] = sub nsw i8 0, [[A]]
2506; CHECK-NEXT:    call void @use8(i8 [[N]])
2507; CHECK-NEXT:    [[R:%.*]] = and i8 [[Y:%.*]], [[N]]
2508; CHECK-NEXT:    ret i8 [[R]]
2509;
2510  %a = and i8 %x, 1
2511  %n = sub i8 0, %a
2512  call void @use8(i8 %n)
2513  %r = and i8 %n, %y
2514  ret i8 %r
2515}
2516
2517@g = external global i64
2518
2519define i64 @test_and_or_constexpr_infloop() {
2520; CHECK-LABEL: @test_and_or_constexpr_infloop(
2521; CHECK-NEXT:    [[AND:%.*]] = and i64 ptrtoint (ptr @g to i64), -8
2522; CHECK-NEXT:    [[OR:%.*]] = or disjoint i64 [[AND]], 1
2523; CHECK-NEXT:    ret i64 [[OR]]
2524;
2525  %and = and i64 ptrtoint (ptr @g to i64), -8
2526  %or = or i64 %and, 1
2527  ret i64 %or
2528}
2529
2530define i32 @and_zext(i32 %a, i1 %b) {
2531; CHECK-LABEL: @and_zext(
2532; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 1
2533; CHECK-NEXT:    [[R:%.*]] = select i1 [[B:%.*]], i32 [[TMP1]], i32 0
2534; CHECK-NEXT:    ret i32 [[R]]
2535;
2536  %mask = zext i1 %b to i32
2537  %r = and i32 %a, %mask
2538  ret i32 %r
2539}
2540
2541define i32 @and_zext_commuted(i32 %a, i1 %b) {
2542; CHECK-LABEL: @and_zext_commuted(
2543; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 1
2544; CHECK-NEXT:    [[R:%.*]] = select i1 [[B:%.*]], i32 [[TMP1]], i32 0
2545; CHECK-NEXT:    ret i32 [[R]]
2546;
2547  %mask = zext i1 %b to i32
2548  %r = and i32 %mask, %a
2549  ret i32 %r
2550}
2551
2552define i32 @and_zext_multiuse(i32 %a, i1 %b) {
2553; CHECK-LABEL: @and_zext_multiuse(
2554; CHECK-NEXT:    [[MASK:%.*]] = zext i1 [[B:%.*]] to i32
2555; CHECK-NEXT:    call void @use32(i32 [[MASK]])
2556; CHECK-NEXT:    [[R:%.*]] = and i32 [[A:%.*]], [[MASK]]
2557; CHECK-NEXT:    ret i32 [[R]]
2558;
2559  %mask = zext i1 %b to i32
2560  call void @use32(i32 %mask)
2561  %r = and i32 %a, %mask
2562  ret i32 %r
2563}
2564
2565define <2 x i32> @and_zext_vec(<2 x i32> %a, <2 x i1> %b) {
2566; CHECK-LABEL: @and_zext_vec(
2567; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[A:%.*]], splat (i32 1)
2568; CHECK-NEXT:    [[R:%.*]] = select <2 x i1> [[B:%.*]], <2 x i32> [[TMP1]], <2 x i32> zeroinitializer
2569; CHECK-NEXT:    ret <2 x i32> [[R]]
2570;
2571  %mask = zext <2 x i1> %b to <2 x i32>
2572  %r = and <2 x i32> %a, %mask
2573  ret <2 x i32> %r
2574}
2575
2576; tests from PR66606
2577define i32 @and_zext_eq_even(i32 %a) {
2578; CHECK-LABEL: @and_zext_eq_even(
2579; CHECK-NEXT:    ret i32 0
2580;
2581  %cond = icmp eq i32 %a, 2
2582  %not = zext i1 %cond to i32
2583  %r = and i32 %a, %not
2584  ret i32 %r
2585}
2586
2587define i32 @and_zext_eq_even_commuted(i32 %a) {
2588; CHECK-LABEL: @and_zext_eq_even_commuted(
2589; CHECK-NEXT:    ret i32 0
2590;
2591  %cond = icmp eq i32 %a, 2
2592  %not = zext i1 %cond to i32
2593  %r = and i32 %not, %a
2594  ret i32 %r
2595}
2596
2597define i32 @and_zext_eq_odd(i32 %a) {
2598; CHECK-LABEL: @and_zext_eq_odd(
2599; CHECK-NEXT:    [[COND:%.*]] = icmp eq i32 [[A:%.*]], 3
2600; CHECK-NEXT:    [[R:%.*]] = zext i1 [[COND]] to i32
2601; CHECK-NEXT:    ret i32 [[R]]
2602;
2603  %cond = icmp eq i32 %a, 3
2604  %not = zext i1 %cond to i32
2605  %r = and i32 %a, %not
2606  ret i32 %r
2607}
2608
2609define i32 @and_zext_eq_odd_commuted(i32 %a) {
2610; CHECK-LABEL: @and_zext_eq_odd_commuted(
2611; CHECK-NEXT:    [[COND:%.*]] = icmp eq i32 [[A:%.*]], 3
2612; CHECK-NEXT:    [[R:%.*]] = zext i1 [[COND]] to i32
2613; CHECK-NEXT:    ret i32 [[R]]
2614;
2615  %cond = icmp eq i32 %a, 3
2616  %not = zext i1 %cond to i32
2617  %r = and i32 %not, %a
2618  ret i32 %r
2619}
2620
2621; Tests from PR66733
2622define i32 @and_zext_eq_zero(i32 %A, i32 %C)  {
2623; CHECK-LABEL: @and_zext_eq_zero(
2624; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[A:%.*]], 0
2625; CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
2626; CHECK-NEXT:    ret i32 [[TMP2]]
2627;
2628  %1 = icmp eq i32 %A, 0
2629  %2 = zext i1 %1 to i32
2630  %3 = lshr i32 %A, %C
2631  %4 = xor i32 %3, -1
2632  %5 = and i32 %2, %4
2633  ret i32 %5
2634}
2635
2636define i32 @canonicalize_and_add_power2_or_zero(i32 %x, i32 %y) {
2637; CHECK-LABEL: @canonicalize_and_add_power2_or_zero(
2638; CHECK-NEXT:    [[NY:%.*]] = sub i32 0, [[Y:%.*]]
2639; CHECK-NEXT:    [[P2:%.*]] = and i32 [[Y]], [[NY]]
2640; CHECK-NEXT:    call void @use32(i32 [[P2]])
2641; CHECK-NEXT:    [[X2:%.*]] = mul i32 [[X:%.*]], [[X]]
2642; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[X2]], -1
2643; CHECK-NEXT:    [[AND:%.*]] = and i32 [[P2]], [[TMP1]]
2644; CHECK-NEXT:    ret i32 [[AND]]
2645;
2646  %ny = sub i32 0, %y
2647  %p2 = and i32 %ny, %y
2648  call void @use32(i32 %p2) ; keep p2
2649
2650  %x2 = mul i32 %x, %x ; thwart complexity-based canonicalization
2651  %val = add i32 %x2, %p2
2652  %and = and i32 %val, %p2
2653  ret i32 %and
2654}
2655
2656define i32 @canonicalize_and_sub_power2_or_zero(i32 %x, i32 %y) {
2657; CHECK-LABEL: @canonicalize_and_sub_power2_or_zero(
2658; CHECK-NEXT:    [[NY:%.*]] = sub i32 0, [[Y:%.*]]
2659; CHECK-NEXT:    [[P2:%.*]] = and i32 [[Y]], [[NY]]
2660; CHECK-NEXT:    call void @use32(i32 [[P2]])
2661; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[X:%.*]], -1
2662; CHECK-NEXT:    [[AND:%.*]] = and i32 [[P2]], [[TMP1]]
2663; CHECK-NEXT:    ret i32 [[AND]]
2664;
2665  %ny = sub i32 0, %y
2666  %p2 = and i32 %ny, %y
2667  call void @use32(i32 %p2) ; keep p2
2668
2669  %val = sub i32 %x, %p2
2670  %and = and i32 %val, %p2
2671  ret i32 %and
2672}
2673
2674define i32 @canonicalize_and_add_power2_or_zero_commuted1(i32 %x, i32 %y) {
2675; CHECK-LABEL: @canonicalize_and_add_power2_or_zero_commuted1(
2676; CHECK-NEXT:    [[NY:%.*]] = sub i32 0, [[Y:%.*]]
2677; CHECK-NEXT:    [[P2:%.*]] = and i32 [[Y]], [[NY]]
2678; CHECK-NEXT:    call void @use32(i32 [[P2]])
2679; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[X:%.*]], -1
2680; CHECK-NEXT:    [[AND:%.*]] = and i32 [[P2]], [[TMP1]]
2681; CHECK-NEXT:    ret i32 [[AND]]
2682;
2683  %ny = sub i32 0, %y
2684  %p2 = and i32 %ny, %y
2685  call void @use32(i32 %p2) ; keep p2
2686
2687  %val = add i32 %p2, %x
2688  %and = and i32 %val, %p2
2689  ret i32 %and
2690}
2691
2692define i32 @canonicalize_and_add_power2_or_zero_commuted2(i32 %x, i32 %y) {
2693; CHECK-LABEL: @canonicalize_and_add_power2_or_zero_commuted2(
2694; CHECK-NEXT:    [[NY:%.*]] = sub i32 0, [[Y:%.*]]
2695; CHECK-NEXT:    [[P2:%.*]] = and i32 [[Y]], [[NY]]
2696; CHECK-NEXT:    call void @use32(i32 [[P2]])
2697; CHECK-NEXT:    [[X2:%.*]] = mul i32 [[X:%.*]], [[X]]
2698; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[X2]], -1
2699; CHECK-NEXT:    [[AND:%.*]] = and i32 [[P2]], [[TMP1]]
2700; CHECK-NEXT:    ret i32 [[AND]]
2701;
2702  %ny = sub i32 0, %y
2703  %p2 = and i32 %ny, %y
2704  call void @use32(i32 %p2) ; keep p2
2705
2706  %x2 = mul i32 %x, %x ; thwart complexity-based canonicalization
2707  %val = add i32 %x2, %p2
2708  %and = and i32 %p2, %val
2709  ret i32 %and
2710}
2711
2712define i32 @canonicalize_and_add_power2_or_zero_commuted3(i32 %x, i32 %y) {
2713; CHECK-LABEL: @canonicalize_and_add_power2_or_zero_commuted3(
2714; CHECK-NEXT:    [[NY:%.*]] = sub i32 0, [[Y:%.*]]
2715; CHECK-NEXT:    [[P2:%.*]] = and i32 [[Y]], [[NY]]
2716; CHECK-NEXT:    call void @use32(i32 [[P2]])
2717; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[X:%.*]], -1
2718; CHECK-NEXT:    [[AND:%.*]] = and i32 [[P2]], [[TMP1]]
2719; CHECK-NEXT:    ret i32 [[AND]]
2720;
2721  %ny = sub i32 0, %y
2722  %p2 = and i32 %ny, %y
2723  call void @use32(i32 %p2) ; keep p2
2724
2725  %val = add i32 %p2, %x
2726  %and = and i32 %p2, %val
2727  ret i32 %and
2728}
2729
2730define i32 @canonicalize_and_sub_power2_or_zero_commuted_nofold(i32 %x, i32 %y) {
2731; CHECK-LABEL: @canonicalize_and_sub_power2_or_zero_commuted_nofold(
2732; CHECK-NEXT:    [[NY:%.*]] = sub i32 0, [[Y:%.*]]
2733; CHECK-NEXT:    [[P2:%.*]] = and i32 [[Y]], [[NY]]
2734; CHECK-NEXT:    call void @use32(i32 [[P2]])
2735; CHECK-NEXT:    [[VAL:%.*]] = sub i32 [[P2]], [[X:%.*]]
2736; CHECK-NEXT:    [[AND:%.*]] = and i32 [[VAL]], [[P2]]
2737; CHECK-NEXT:    ret i32 [[AND]]
2738;
2739  %ny = sub i32 0, %y
2740  %p2 = and i32 %ny, %y
2741  call void @use32(i32 %p2) ; keep p2
2742
2743  %val = sub i32 %p2, %x
2744  %and = and i32 %val, %p2
2745  ret i32 %and
2746}
2747
2748define i32 @canonicalize_and_add_non_power2_or_zero_nofold(i32 %x, i32 %y) {
2749; CHECK-LABEL: @canonicalize_and_add_non_power2_or_zero_nofold(
2750; CHECK-NEXT:    [[VAL:%.*]] = add i32 [[X:%.*]], [[Y:%.*]]
2751; CHECK-NEXT:    [[AND:%.*]] = and i32 [[VAL]], [[Y]]
2752; CHECK-NEXT:    ret i32 [[AND]]
2753;
2754  %val = add i32 %x, %y
2755  %and = and i32 %val, %y
2756  ret i32 %and
2757}
2758
2759define i32 @canonicalize_and_add_power2_or_zero_multiuse_nofold(i32 %x, i32 %y) {
2760; CHECK-LABEL: @canonicalize_and_add_power2_or_zero_multiuse_nofold(
2761; CHECK-NEXT:    [[NY:%.*]] = sub i32 0, [[Y:%.*]]
2762; CHECK-NEXT:    [[P2:%.*]] = and i32 [[Y]], [[NY]]
2763; CHECK-NEXT:    call void @use32(i32 [[P2]])
2764; CHECK-NEXT:    [[X2:%.*]] = mul i32 [[X:%.*]], [[X]]
2765; CHECK-NEXT:    [[VAL:%.*]] = add i32 [[X2]], [[P2]]
2766; CHECK-NEXT:    call void @use32(i32 [[VAL]])
2767; CHECK-NEXT:    [[AND:%.*]] = and i32 [[VAL]], [[P2]]
2768; CHECK-NEXT:    ret i32 [[AND]]
2769;
2770  %ny = sub i32 0, %y
2771  %p2 = and i32 %ny, %y
2772  call void @use32(i32 %p2) ; keep p2
2773
2774  %x2 = mul i32 %x, %x ; thwart complexity-based canonicalization
2775  %val = add i32 %x2, %p2
2776  call void @use32(i32 %val)
2777  %and = and i32 %val, %p2
2778  ret i32 %and
2779}
2780
2781define i32 @canonicalize_and_sub_power2_or_zero_multiuse_nofold(i32 %x, i32 %y) {
2782; CHECK-LABEL: @canonicalize_and_sub_power2_or_zero_multiuse_nofold(
2783; CHECK-NEXT:    [[NY:%.*]] = sub i32 0, [[Y:%.*]]
2784; CHECK-NEXT:    [[P2:%.*]] = and i32 [[Y]], [[NY]]
2785; CHECK-NEXT:    call void @use32(i32 [[P2]])
2786; CHECK-NEXT:    [[VAL:%.*]] = sub i32 [[X:%.*]], [[P2]]
2787; CHECK-NEXT:    call void @use32(i32 [[VAL]])
2788; CHECK-NEXT:    [[AND:%.*]] = and i32 [[VAL]], [[P2]]
2789; CHECK-NEXT:    ret i32 [[AND]]
2790;
2791  %ny = sub i32 0, %y
2792  %p2 = and i32 %ny, %y
2793  call void @use32(i32 %p2) ; keep p2
2794
2795  %val = sub i32 %x, %p2
2796  call void @use32(i32 %val)
2797  %and = and i32 %val, %p2
2798  ret i32 %and
2799}
2800
2801define i32 @add_constant_equal_with_the_top_bit_of_demandedbits_pass(i32 %x) {
2802; CHECK-LABEL: @add_constant_equal_with_the_top_bit_of_demandedbits_pass(
2803; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], 24
2804; CHECK-NEXT:    [[AND:%.*]] = xor i32 [[TMP1]], 16
2805; CHECK-NEXT:    ret i32 [[AND]]
2806;
2807  %add = add i32 %x, 16
2808  %and = and i32 %add, 24
2809  ret i32 %and
2810}
2811
2812define <2 x i16> @add_constant_equal_with_the_top_bit_of_demandedbits_pass_vector(<2 x i16> %x) {
2813; CHECK-LABEL: @add_constant_equal_with_the_top_bit_of_demandedbits_pass_vector(
2814; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i16> [[X:%.*]], splat (i16 24)
2815; CHECK-NEXT:    [[AND:%.*]] = xor <2 x i16> [[TMP1]], splat (i16 16)
2816; CHECK-NEXT:    ret <2 x i16> [[AND]]
2817;
2818  %add = add <2 x i16> %x, <i16 16, i16 16>
2819  %and = and <2 x i16> %add, <i16 24, i16 24>
2820  ret <2 x i16> %and
2821}
2822
2823define i32 @add_constant_equal_with_the_top_bit_of_demandedbits_fail1(i32 %x) {
2824; CHECK-LABEL: @add_constant_equal_with_the_top_bit_of_demandedbits_fail1(
2825; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], 8
2826; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 24
2827; CHECK-NEXT:    ret i32 [[AND]]
2828;
2829  %add = add i32 %x, 8
2830  %and = and i32 %add, 24
2831  ret i32 %and
2832}
2833
2834define i32 @add_constant_equal_with_the_top_bit_of_demandedbits_fail2(i32 %x) {
2835; CHECK-LABEL: @add_constant_equal_with_the_top_bit_of_demandedbits_fail2(
2836; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], 24
2837; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 24
2838; CHECK-NEXT:    ret i32 [[AND]]
2839;
2840  %add = add i32 %x, 24
2841  %and = and i32 %add, 24
2842  ret i32 %and
2843}
2844
2845define i32 @add_constant_equal_with_the_top_bit_of_demandedbits_insertpt(i32 %x, i32 %y) {
2846; CHECK-LABEL: @add_constant_equal_with_the_top_bit_of_demandedbits_insertpt(
2847; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[X:%.*]], 16
2848; CHECK-NEXT:    [[OR:%.*]] = or i32 [[TMP1]], [[Y:%.*]]
2849; CHECK-NEXT:    [[AND:%.*]] = and i32 [[OR]], 24
2850; CHECK-NEXT:    ret i32 [[AND]]
2851;
2852  %add = add i32 %x, 16
2853  %or = or i32 %add, %y
2854  %and = and i32 %or, 24
2855  ret i32 %and
2856}
2857
2858define i32 @and_sext_multiuse(i32 %x, i32 %y, i32 %a, i32 %b) {
2859; CHECK-LABEL: @and_sext_multiuse(
2860; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[Y:%.*]]
2861; CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
2862; CHECK-NEXT:    [[ADD:%.*]] = select i1 [[CMP]], i32 [[TMP1]], i32 0
2863; CHECK-NEXT:    ret i32 [[ADD]]
2864;
2865  %cmp = icmp sgt i32 %x, %y
2866  %sext = sext i1 %cmp to i32
2867  %and1 = and i32 %sext, %a
2868  %and2 = and i32 %sext, %b
2869  %add = add i32 %and1, %and2
2870  ret i32 %add
2871}
2872