xref: /llvm-project/llvm/test/Transforms/InstCombine/zext.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3
4target datalayout = "n64"
5
6declare void @use1(i1)
7declare void @use32(i32)
8declare void @use64(i64)
9declare void @use_vec(<2 x i9>)
10
11define i64 @test_sext_zext(i16 %A) {
12; CHECK-LABEL: @test_sext_zext(
13; CHECK-NEXT:    [[C2:%.*]] = zext i16 [[A:%.*]] to i64
14; CHECK-NEXT:    ret i64 [[C2]]
15;
16  %c1 = zext i16 %A to i32
17  %c2 = sext i32 %c1 to i64
18  ret i64 %c2
19}
20
21define <2 x i64> @test2(<2 x i1> %A) {
22; CHECK-LABEL: @test2(
23; CHECK-NEXT:    [[XOR:%.*]] = xor <2 x i1> [[A:%.*]], splat (i1 true)
24; CHECK-NEXT:    [[ZEXT:%.*]] = zext <2 x i1> [[XOR]] to <2 x i64>
25; CHECK-NEXT:    ret <2 x i64> [[ZEXT]]
26;
27  %xor = xor <2 x i1> %A, <i1 true, i1 true>
28  %zext = zext <2 x i1> %xor to <2 x i64>
29  ret <2 x i64> %zext
30}
31
32define <2 x i64> @test3(<2 x i64> %A) {
33; CHECK-LABEL: @test3(
34; CHECK-NEXT:    [[ZEXT:%.*]] = and <2 x i64> [[A:%.*]], <i64 23, i64 42>
35; CHECK-NEXT:    ret <2 x i64> [[ZEXT]]
36;
37  %trunc = trunc <2 x i64> %A to <2 x i32>
38  %and = and <2 x i32> %trunc, <i32 23, i32 42>
39  %zext = zext <2 x i32> %and to <2 x i64>
40  ret <2 x i64> %zext
41}
42
43define <2 x i64> @test4(<2 x i64> %A) {
44; CHECK-LABEL: @test4(
45; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i64> [[A:%.*]], <i64 23, i64 42>
46; CHECK-NEXT:    [[ZEXT:%.*]] = xor <2 x i64> [[TMP1]], <i64 23, i64 42>
47; CHECK-NEXT:    ret <2 x i64> [[ZEXT]]
48;
49  %trunc = trunc <2 x i64> %A to <2 x i32>
50  %and = and <2 x i32> %trunc, <i32 23, i32 42>
51  %xor = xor <2 x i32> %and, <i32 23, i32 42>
52  %zext = zext <2 x i32> %xor to <2 x i64>
53  ret <2 x i64> %zext
54}
55
56define i64 @fold_xor_zext_sandwich(i1 %a) {
57; CHECK-LABEL: @fold_xor_zext_sandwich(
58; CHECK-NEXT:    [[TMP1:%.*]] = xor i1 [[A:%.*]], true
59; CHECK-NEXT:    [[ZEXT2:%.*]] = zext i1 [[TMP1]] to i64
60; CHECK-NEXT:    ret i64 [[ZEXT2]]
61;
62  %zext1 = zext i1 %a to i32
63  %xor = xor i32 %zext1, 1
64  %zext2 = zext i32 %xor to i64
65  ret i64 %zext2
66}
67
68define <2 x i64> @fold_xor_zext_sandwich_vec(<2 x i1> %a) {
69; CHECK-LABEL: @fold_xor_zext_sandwich_vec(
70; CHECK-NEXT:    [[TMP1:%.*]] = xor <2 x i1> [[A:%.*]], splat (i1 true)
71; CHECK-NEXT:    [[ZEXT2:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i64>
72; CHECK-NEXT:    ret <2 x i64> [[ZEXT2]]
73;
74  %zext1 = zext <2 x i1> %a to <2 x i32>
75  %xor = xor <2 x i32> %zext1, <i32 1, i32 1>
76  %zext2 = zext <2 x i32> %xor to <2 x i64>
77  ret <2 x i64> %zext2
78}
79
80; Assert that zexts in and(zext(icmp), zext(icmp)) can be folded.
81
82define i8 @fold_and_zext_icmp(i64 %a, i64 %b, i64 %c) {
83; CHECK-LABEL: @fold_and_zext_icmp(
84; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i64 [[A:%.*]], [[B:%.*]]
85; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i64 [[A]], [[C:%.*]]
86; CHECK-NEXT:    [[TMP3:%.*]] = and i1 [[TMP1]], [[TMP2]]
87; CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i8
88; CHECK-NEXT:    ret i8 [[TMP4]]
89;
90  %1 = icmp sgt i64 %a, %b
91  %2 = zext i1 %1 to i8
92  %3 = icmp slt i64 %a, %c
93  %4 = zext i1 %3 to i8
94  %5 = and i8 %2, %4
95  ret i8 %5
96}
97
98; Assert that zexts in or(zext(icmp), zext(icmp)) can be folded.
99
100define i8 @fold_or_zext_icmp(i64 %a, i64 %b, i64 %c) {
101; CHECK-LABEL: @fold_or_zext_icmp(
102; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i64 [[A:%.*]], [[B:%.*]]
103; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i64 [[A]], [[C:%.*]]
104; CHECK-NEXT:    [[TMP3:%.*]] = or i1 [[TMP1]], [[TMP2]]
105; CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i8
106; CHECK-NEXT:    ret i8 [[TMP4]]
107;
108  %1 = icmp sgt i64 %a, %b
109  %2 = zext i1 %1 to i8
110  %3 = icmp slt i64 %a, %c
111  %4 = zext i1 %3 to i8
112  %5 = or i8 %2, %4
113  ret i8 %5
114}
115
116; Assert that zexts in xor(zext(icmp), zext(icmp)) can be folded.
117
118define i8 @fold_xor_zext_icmp(i64 %a, i64 %b, i64 %c) {
119; CHECK-LABEL: @fold_xor_zext_icmp(
120; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i64 [[A:%.*]], [[B:%.*]]
121; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i64 [[A]], [[C:%.*]]
122; CHECK-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP1]], [[TMP2]]
123; CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i8
124; CHECK-NEXT:    ret i8 [[TMP4]]
125;
126  %1 = icmp sgt i64 %a, %b
127  %2 = zext i1 %1 to i8
128  %3 = icmp slt i64 %a, %c
129  %4 = zext i1 %3 to i8
130  %5 = xor i8 %2, %4
131  ret i8 %5
132}
133
134; Assert that zexts in logic(zext(icmp), zext(icmp)) are also folded accross
135; nested logical operators.
136
137define i8 @fold_nested_logic_zext_icmp(i64 %a, i64 %b, i64 %c, i64 %d) {
138; CHECK-LABEL: @fold_nested_logic_zext_icmp(
139; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i64 [[A:%.*]], [[B:%.*]]
140; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i64 [[A]], [[C:%.*]]
141; CHECK-NEXT:    [[TMP3:%.*]] = and i1 [[TMP1]], [[TMP2]]
142; CHECK-NEXT:    [[TMP4:%.*]] = icmp eq i64 [[A]], [[D:%.*]]
143; CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
144; CHECK-NEXT:    [[TMP6:%.*]] = zext i1 [[TMP5]] to i8
145; CHECK-NEXT:    ret i8 [[TMP6]]
146;
147  %1 = icmp sgt i64 %a, %b
148  %2 = zext i1 %1 to i8
149  %3 = icmp slt i64 %a, %c
150  %4 = zext i1 %3 to i8
151  %5 = and i8 %2, %4
152  %6 = icmp eq i64 %a, %d
153  %7 = zext i1 %6 to i8
154  %8 = or i8 %5, %7
155  ret i8 %8
156}
157
158; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
159
160define i1024 @sext_zext_apint1(i77 %A) {
161; CHECK-LABEL: @sext_zext_apint1(
162; CHECK-NEXT:    [[C2:%.*]] = zext i77 [[A:%.*]] to i1024
163; CHECK-NEXT:    ret i1024 [[C2]]
164;
165  %c1 = zext i77 %A to i533
166  %c2 = sext i533 %c1 to i1024
167  ret i1024 %c2
168}
169
170; This test is for Integer BitWidth <= 64 && BitWidth % 2 != 0.
171
172define i47 @sext_zext_apint2(i11 %A) {
173; CHECK-LABEL: @sext_zext_apint2(
174; CHECK-NEXT:    [[C2:%.*]] = zext i11 [[A:%.*]] to i47
175; CHECK-NEXT:    ret i47 [[C2]]
176;
177  %c1 = zext i11 %A to i39
178  %c2 = sext i39 %c1 to i47
179  ret i47 %c2
180}
181
182define i32 @masked_bit_set(i32 %x, i32 %y) {
183; CHECK-LABEL: @masked_bit_set(
184; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
185; CHECK-NEXT:    [[R:%.*]] = and i32 [[TMP1]], 1
186; CHECK-NEXT:    ret i32 [[R]]
187;
188  %sh1 = shl i32 1, %y
189  %and = and i32 %sh1, %x
190  %cmp = icmp ne i32 %and, 0
191  %r = zext i1 %cmp to i32
192  ret i32 %r
193}
194
195define <2 x i32> @masked_bit_clear(<2 x i32> %x, <2 x i32> %y) {
196; CHECK-LABEL: @masked_bit_clear(
197; CHECK-NEXT:    [[TMP1:%.*]] = xor <2 x i32> [[X:%.*]], splat (i32 -1)
198; CHECK-NEXT:    [[TMP2:%.*]] = lshr <2 x i32> [[TMP1]], [[Y:%.*]]
199; CHECK-NEXT:    [[R:%.*]] = and <2 x i32> [[TMP2]], splat (i32 1)
200; CHECK-NEXT:    ret <2 x i32> [[R]]
201;
202  %sh1 = shl <2 x i32> <i32 1, i32 1>, %y
203  %and = and <2 x i32> %sh1, %x
204  %cmp = icmp eq <2 x i32> %and, zeroinitializer
205  %r = zext <2 x i1> %cmp to <2 x i32>
206  ret <2 x i32> %r
207}
208
209define <2 x i32> @masked_bit_set_commute(<2 x i32> %px, <2 x i32> %y) {
210; CHECK-LABEL: @masked_bit_set_commute(
211; CHECK-NEXT:    [[X:%.*]] = srem <2 x i32> <i32 42, i32 3>, [[PX:%.*]]
212; CHECK-NEXT:    [[TMP1:%.*]] = lshr <2 x i32> [[X]], [[Y:%.*]]
213; CHECK-NEXT:    [[R:%.*]] = and <2 x i32> [[TMP1]], splat (i32 1)
214; CHECK-NEXT:    ret <2 x i32> [[R]]
215;
216  %x = srem <2 x i32> <i32 42, i32 3>, %px ; thwart complexity-based canonicalization
217  %sh1 = shl <2 x i32> <i32 1, i32 1>, %y
218  %and = and <2 x i32> %x, %sh1
219  %cmp = icmp ne <2 x i32> %and, zeroinitializer
220  %r = zext <2 x i1> %cmp to <2 x i32>
221  ret <2 x i32> %r
222}
223
224define i32 @masked_bit_clear_commute(i32 %px, i32 %y) {
225; CHECK-LABEL: @masked_bit_clear_commute(
226; CHECK-NEXT:    [[X:%.*]] = srem i32 42, [[PX:%.*]]
227; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[X]], -1
228; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], [[Y:%.*]]
229; CHECK-NEXT:    [[R:%.*]] = and i32 [[TMP2]], 1
230; CHECK-NEXT:    ret i32 [[R]]
231;
232  %x = srem i32 42, %px ; thwart complexity-based canonicalization
233  %sh1 = shl i32 1, %y
234  %and = and i32 %x, %sh1
235  %cmp = icmp eq i32 %and, 0
236  %r = zext i1 %cmp to i32
237  ret i32 %r
238}
239
240define i32 @masked_bit_set_use1(i32 %x, i32 %y) {
241; CHECK-LABEL: @masked_bit_set_use1(
242; CHECK-NEXT:    [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
243; CHECK-NEXT:    call void @use32(i32 [[SH1]])
244; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], [[Y]]
245; CHECK-NEXT:    [[R:%.*]] = and i32 [[TMP1]], 1
246; CHECK-NEXT:    ret i32 [[R]]
247;
248  %sh1 = shl i32 1, %y
249  call void @use32(i32 %sh1)
250  %and = and i32 %sh1, %x
251  %cmp = icmp ne i32 %and, 0
252  %r = zext i1 %cmp to i32
253  ret i32 %r
254}
255
256; Negative test
257
258define i32 @masked_bit_set_use2(i32 %x, i32 %y) {
259; CHECK-LABEL: @masked_bit_set_use2(
260; CHECK-NEXT:    [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
261; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
262; CHECK-NEXT:    call void @use32(i32 [[AND]])
263; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[AND]], 0
264; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
265; CHECK-NEXT:    ret i32 [[R]]
266;
267  %sh1 = shl i32 1, %y
268  %and = and i32 %sh1, %x
269  call void @use32(i32 %and)
270  %cmp = icmp ne i32 %and, 0
271  %r = zext i1 %cmp to i32
272  ret i32 %r
273}
274
275; Negative test
276
277define i32 @masked_bit_set_use3(i32 %x, i32 %y) {
278; CHECK-LABEL: @masked_bit_set_use3(
279; CHECK-NEXT:    [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
280; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
281; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[AND]], 0
282; CHECK-NEXT:    call void @use1(i1 [[CMP]])
283; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
284; CHECK-NEXT:    ret i32 [[R]]
285;
286  %sh1 = shl i32 1, %y
287  %and = and i32 %sh1, %x
288  %cmp = icmp ne i32 %and, 0
289  call void @use1(i1 %cmp)
290  %r = zext i1 %cmp to i32
291  ret i32 %r
292}
293
294define i32 @masked_bit_clear_use1(i32 %x, i32 %y) {
295; CHECK-LABEL: @masked_bit_clear_use1(
296; CHECK-NEXT:    [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
297; CHECK-NEXT:    call void @use32(i32 [[SH1]])
298; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[X:%.*]], -1
299; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], [[Y]]
300; CHECK-NEXT:    [[R:%.*]] = and i32 [[TMP2]], 1
301; CHECK-NEXT:    ret i32 [[R]]
302;
303  %sh1 = shl i32 1, %y
304  call void @use32(i32 %sh1)
305  %and = and i32 %sh1, %x
306  %cmp = icmp eq i32 %and, 0
307  %r = zext i1 %cmp to i32
308  ret i32 %r
309}
310
311; Negative test
312
313define i32 @masked_bit_clear_use2(i32 %x, i32 %y) {
314; CHECK-LABEL: @masked_bit_clear_use2(
315; CHECK-NEXT:    [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
316; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
317; CHECK-NEXT:    call void @use32(i32 [[AND]])
318; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
319; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
320; CHECK-NEXT:    ret i32 [[R]]
321;
322  %sh1 = shl i32 1, %y
323  %and = and i32 %sh1, %x
324  call void @use32(i32 %and)
325  %cmp = icmp eq i32 %and, 0
326  %r = zext i1 %cmp to i32
327  ret i32 %r
328}
329
330; Negative test
331
332define i32 @masked_bit_clear_use3(i32 %x, i32 %y) {
333; CHECK-LABEL: @masked_bit_clear_use3(
334; CHECK-NEXT:    [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
335; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
336; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
337; CHECK-NEXT:    call void @use1(i1 [[CMP]])
338; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
339; CHECK-NEXT:    ret i32 [[R]]
340;
341  %sh1 = shl i32 1, %y
342  %and = and i32 %sh1, %x
343  %cmp = icmp eq i32 %and, 0
344  call void @use1(i1 %cmp)
345  %r = zext i1 %cmp to i32
346  ret i32 %r
347}
348
349; Negative test
350
351define i32 @masked_bits_set(i32 %x, i32 %y) {
352; CHECK-LABEL: @masked_bits_set(
353; CHECK-NEXT:    [[SH1:%.*]] = shl i32 3, [[Y:%.*]]
354; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
355; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[AND]], 0
356; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
357; CHECK-NEXT:    ret i32 [[R]]
358;
359  %sh1 = shl i32 3, %y
360  %and = and i32 %sh1, %x
361  %cmp = icmp ne i32 %and, 0
362  %r = zext i1 %cmp to i32
363  ret i32 %r
364}
365
366; Negative test
367
368define i32 @div_bit_set(i32 %x, i32 %y) {
369; CHECK-LABEL: @div_bit_set(
370; CHECK-NEXT:    [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
371; CHECK-NEXT:    [[AND:%.*]] = sdiv i32 [[SH1]], [[X:%.*]]
372; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[AND]], 0
373; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
374; CHECK-NEXT:    ret i32 [[R]]
375;
376  %sh1 = shl i32 1, %y
377  %and = sdiv i32 %sh1, %x
378  %cmp = icmp ne i32 %and, 0
379  %r = zext i1 %cmp to i32
380  ret i32 %r
381}
382
383; Negative test
384
385define i32 @masked_bit_set_nonzero_cmp(i32 %x, i32 %y) {
386; CHECK-LABEL: @masked_bit_set_nonzero_cmp(
387; CHECK-NEXT:    [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
388; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
389; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[AND]], 1
390; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
391; CHECK-NEXT:    ret i32 [[R]]
392;
393  %sh1 = shl i32 1, %y
394  %and = and i32 %sh1, %x
395  %cmp = icmp ne i32 %and, 1
396  %r = zext i1 %cmp to i32
397  ret i32 %r
398}
399
400; Negative test
401
402define i32 @masked_bit_wrong_pred(i32 %x, i32 %y) {
403; CHECK-LABEL: @masked_bit_wrong_pred(
404; CHECK-NEXT:    [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
405; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
406; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[AND]], 0
407; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
408; CHECK-NEXT:    ret i32 [[R]]
409;
410  %sh1 = shl i32 1, %y
411  %and = and i32 %sh1, %x
412  %cmp = icmp sgt i32 %and, 0
413  %r = zext i1 %cmp to i32
414  ret i32 %r
415}
416
417define i32 @zext_or_masked_bit_test(i32 %a, i32 %b, i32 %x) {
418; CHECK-LABEL: @zext_or_masked_bit_test(
419; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 1, [[B:%.*]]
420; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], [[A:%.*]]
421; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[AND]], 0
422; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[B]]
423; CHECK-NEXT:    [[OR:%.*]] = or i1 [[TOBOOL]], [[CMP]]
424; CHECK-NEXT:    [[Z:%.*]] = zext i1 [[OR]] to i32
425; CHECK-NEXT:    ret i32 [[Z]]
426;
427  %shl = shl i32 1, %b
428  %and = and i32 %shl, %a
429  %tobool = icmp ne i32 %and, 0
430  %cmp = icmp eq i32 %x, %b
431  %or = or i1 %tobool, %cmp
432  %z = zext i1 %or to i32
433  ret i32 %z
434}
435
436define i32 @zext_or_masked_bit_test_uses(i32 %a, i32 %b, i32 %x) {
437; CHECK-LABEL: @zext_or_masked_bit_test_uses(
438; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 1, [[B:%.*]]
439; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], [[A:%.*]]
440; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[AND]], 0
441; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[B]]
442; CHECK-NEXT:    [[OR:%.*]] = or i1 [[TOBOOL]], [[CMP]]
443; CHECK-NEXT:    call void @use1(i1 [[OR]])
444; CHECK-NEXT:    [[Z:%.*]] = zext i1 [[OR]] to i32
445; CHECK-NEXT:    ret i32 [[Z]]
446;
447  %shl = shl i32 1, %b
448  %and = and i32 %shl, %a
449  %tobool = icmp ne i32 %and, 0
450  %cmp = icmp eq i32 %x, %b
451  %or = or i1 %tobool, %cmp
452  call void @use1(i1 %or)
453  %z = zext i1 %or to i32
454  ret i32 %z
455}
456
457define i16 @zext_masked_bit_zero_to_smaller_bitwidth(i32 %a, i32 %b) {
458; CHECK-LABEL: @zext_masked_bit_zero_to_smaller_bitwidth(
459; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[A:%.*]], -1
460; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], [[B:%.*]]
461; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i16
462; CHECK-NEXT:    [[Z:%.*]] = and i16 [[TMP3]], 1
463; CHECK-NEXT:    ret i16 [[Z]]
464;
465  %shl = shl i32 1, %b
466  %and = and i32 %shl, %a
467  %cmp = icmp eq i32 %and, 0
468  %z = zext i1 %cmp to i16
469  ret i16 %z
470}
471
472define <4 x i16> @zext_masked_bit_zero_to_smaller_bitwidth_v4i32(<4 x i32> %a, <4 x i32> %b) {
473; CHECK-LABEL: @zext_masked_bit_zero_to_smaller_bitwidth_v4i32(
474; CHECK-NEXT:    [[TMP1:%.*]] = xor <4 x i32> [[A:%.*]], splat (i32 -1)
475; CHECK-NEXT:    [[TMP2:%.*]] = lshr <4 x i32> [[TMP1]], [[B:%.*]]
476; CHECK-NEXT:    [[TMP3:%.*]] = trunc <4 x i32> [[TMP2]] to <4 x i16>
477; CHECK-NEXT:    [[Z:%.*]] = and <4 x i16> [[TMP3]], splat (i16 1)
478; CHECK-NEXT:    ret <4 x i16> [[Z]]
479;
480  %shl = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %b
481  %and = and <4 x i32> %shl, %a
482  %cmp = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 0>
483  %z = zext <4 x i1> %cmp to <4 x i16>
484  ret <4 x i16> %z
485}
486
487; Negative test
488define i16 @zext_masked_bit_zero_to_smaller_bitwidth_multi_use_shl(i32 %a, i32 %b) {
489; CHECK-LABEL: @zext_masked_bit_zero_to_smaller_bitwidth_multi_use_shl(
490; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 1, [[B:%.*]]
491; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], [[A:%.*]]
492; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
493; CHECK-NEXT:    [[Z:%.*]] = zext i1 [[CMP]] to i16
494; CHECK-NEXT:    call void @use32(i32 [[SHL]])
495; CHECK-NEXT:    ret i16 [[Z]]
496;
497  %shl = shl i32 1, %b
498  %and = and i32 %shl, %a
499  %cmp = icmp eq i32 %and, 0
500  %z = zext i1 %cmp to i16
501  call void @use32(i32 %shl)
502  ret i16 %z
503}
504
505define i16 @zext_masked_bit_nonzero_to_smaller_bitwidth(i32 %a, i32 %b) {
506; CHECK-LABEL: @zext_masked_bit_nonzero_to_smaller_bitwidth(
507; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[A:%.*]], [[B:%.*]]
508; CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
509; CHECK-NEXT:    [[Z:%.*]] = and i16 [[TMP2]], 1
510; CHECK-NEXT:    ret i16 [[Z]]
511;
512  %shl = shl i32 1, %b
513  %and = and i32 %shl, %a
514  %cmp = icmp ne i32 %and, 0
515  %z = zext i1 %cmp to i16
516  ret i16 %z
517}
518
519define i16 @zext_masked_bit_nonzero_to_smaller_bitwidth_multi_use_shl(i32 %a, i32 %b) {
520; CHECK-LABEL: @zext_masked_bit_nonzero_to_smaller_bitwidth_multi_use_shl(
521; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 1, [[B:%.*]]
522; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[A:%.*]], [[B]]
523; CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
524; CHECK-NEXT:    [[Z:%.*]] = and i16 [[TMP2]], 1
525; CHECK-NEXT:    call void @use32(i32 [[SHL]])
526; CHECK-NEXT:    ret i16 [[Z]]
527;
528  %shl = shl i32 1, %b
529  %and = and i32 %shl, %a
530  %cmp = icmp ne i32 %and, 0
531  %z = zext i1 %cmp to i16
532  call void @use32(i32 %shl)
533  ret i16 %z
534}
535
536define i64 @zext_masked_bit_zero_to_larger_bitwidth(i32 %a, i32 %b) {
537; CHECK-LABEL: @zext_masked_bit_zero_to_larger_bitwidth(
538; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[A:%.*]], -1
539; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], [[B:%.*]]
540; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[TMP2]], 1
541; CHECK-NEXT:    [[Z:%.*]] = zext nneg i32 [[TMP3]] to i64
542; CHECK-NEXT:    ret i64 [[Z]]
543;
544  %shl = shl i32 1, %b
545  %and = and i32 %shl, %a
546  %cmp = icmp eq i32 %and, 0
547  %z = zext i1 %cmp to i64
548  ret i64 %z
549}
550
551define <4 x i64> @zext_masked_bit_zero_to_larger_bitwidth_v4i32(<4 x i32> %a, <4 x i32> %b) {
552; CHECK-LABEL: @zext_masked_bit_zero_to_larger_bitwidth_v4i32(
553; CHECK-NEXT:    [[TMP1:%.*]] = xor <4 x i32> [[A:%.*]], splat (i32 -1)
554; CHECK-NEXT:    [[TMP2:%.*]] = lshr <4 x i32> [[TMP1]], [[B:%.*]]
555; CHECK-NEXT:    [[TMP3:%.*]] = and <4 x i32> [[TMP2]], splat (i32 1)
556; CHECK-NEXT:    [[Z:%.*]] = zext nneg <4 x i32> [[TMP3]] to <4 x i64>
557; CHECK-NEXT:    ret <4 x i64> [[Z]]
558;
559  %shl = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %b
560  %and = and <4 x i32> %shl, %a
561  %cmp = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 0>
562  %z = zext <4 x i1> %cmp to <4 x i64>
563  ret <4 x i64> %z
564}
565
566define i32 @notneg_zext_wider(i8 %x) {
567; CHECK-LABEL: @notneg_zext_wider(
568; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i8 [[X:%.*]], -1
569; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
570; CHECK-NEXT:    ret i32 [[R]]
571;
572  %cmp = icmp sgt i8 %x, -1
573  %r = zext i1 %cmp to i32
574  ret i32 %r
575}
576
577define <2 x i8> @notneg_zext_narrower(<2 x i32> %x) {
578; CHECK-LABEL: @notneg_zext_narrower(
579; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt <2 x i32> [[X:%.*]], splat (i32 -1)
580; CHECK-NEXT:    [[R:%.*]] = zext <2 x i1> [[CMP]] to <2 x i8>
581; CHECK-NEXT:    ret <2 x i8> [[R]]
582;
583  %cmp = icmp sgt <2 x i32> %x, <i32 -1, i32 -1>
584  %r = zext <2 x i1> %cmp to <2 x i8>
585  ret <2 x i8> %r
586}
587
588define i32 @notneg_zext_wider_use(i8 %x) {
589; CHECK-LABEL: @notneg_zext_wider_use(
590; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i8 [[X:%.*]], -1
591; CHECK-NEXT:    call void @use1(i1 [[CMP]])
592; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
593; CHECK-NEXT:    ret i32 [[R]]
594;
595  %cmp = icmp sgt i8 %x, -1
596  call void @use1(i1 %cmp)
597  %r = zext i1 %cmp to i32
598  ret i32 %r
599}
600
601define i8 @notneg_zext_narrower_use(i32 %x) {
602; CHECK-LABEL: @notneg_zext_narrower_use(
603; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], -1
604; CHECK-NEXT:    call void @use1(i1 [[CMP]])
605; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i8
606; CHECK-NEXT:    ret i8 [[R]]
607;
608  %cmp = icmp sgt i32 %x, -1
609  call void @use1(i1 %cmp)
610  %r = zext i1 %cmp to i8
611  ret i8 %r
612}
613
614define i8 @disguised_signbit_clear_test(i64 %x) {
615; CHECK-LABEL: @disguised_signbit_clear_test(
616; CHECK-NEXT:    [[A1:%.*]] = and i64 [[X:%.*]], 128
617; CHECK-NEXT:    [[T4:%.*]] = icmp eq i64 [[A1]], 0
618; CHECK-NEXT:    [[T6:%.*]] = zext i1 [[T4]] to i8
619; CHECK-NEXT:    ret i8 [[T6]]
620;
621  %a1 = and i64 %x, 128
622  %t4 = icmp eq i64 %a1, 0
623  %t6 = zext i1 %t4 to i8
624  ret i8 %t6
625}
626
627define i16 @pr57899(i1 %c, i32 %x) {
628; CHECK-LABEL: @pr57899(
629; CHECK-NEXT:  entry:
630; CHECK-NEXT:    br i1 [[C:%.*]], label [[IF:%.*]], label [[JOIN:%.*]]
631; CHECK:       if:
632; CHECK-NEXT:    br label [[JOIN]]
633; CHECK:       join:
634; CHECK-NEXT:    ret i16 1
635;
636entry:
637  br i1 %c, label %if, label %join
638
639if:
640  %g.1 = select i1 false, i32 %x, i32 1
641  br label %join
642
643join:
644  %g.2 = phi i32 [ %g.1, %if ], [ 1, %entry ]
645  %tobool1 = icmp ne i32 %g.2, 4
646  %tobool3 = icmp ne i32 %g.2, 64
647  %x1 = and i1 %tobool1, %tobool3
648  %conv4 = zext i1 %x1 to i16
649  ret i16 %conv4
650}
651
652; negative test - but this could be transformed to eliminate a use of 't'
653
654define i64 @and_trunc_extra_use1(i64 %x, i32 %y) {
655; CHECK-LABEL: @and_trunc_extra_use1(
656; CHECK-NEXT:    [[T:%.*]] = trunc i64 [[X:%.*]] to i32
657; CHECK-NEXT:    call void @use32(i32 [[T]])
658; CHECK-NEXT:    [[A:%.*]] = and i32 [[Y:%.*]], [[T]]
659; CHECK-NEXT:    [[Z:%.*]] = zext i32 [[A]] to i64
660; CHECK-NEXT:    ret i64 [[Z]]
661;
662  %t = trunc i64 %x to i32
663  call void @use32(i32 %t)
664  %a = and i32 %t, %y
665  %z = zext i32 %a to i64
666  ret i64 %z
667}
668
669; negative test - but this could be transformed to eliminate a use of 't'
670
671define i64 @and_trunc_extra_use1_commute(i64 %x, i32 %p) {
672; CHECK-LABEL: @and_trunc_extra_use1_commute(
673; CHECK-NEXT:    [[Y:%.*]] = mul i32 [[P:%.*]], [[P]]
674; CHECK-NEXT:    [[T:%.*]] = trunc i64 [[X:%.*]] to i32
675; CHECK-NEXT:    call void @use32(i32 [[T]])
676; CHECK-NEXT:    [[A:%.*]] = and i32 [[Y]], [[T]]
677; CHECK-NEXT:    [[Z:%.*]] = zext i32 [[A]] to i64
678; CHECK-NEXT:    ret i64 [[Z]]
679;
680  %y = mul i32 %p, %p ; thwart complexity-based canonicalization
681  %t = trunc i64 %x to i32
682  call void @use32(i32 %t)
683  %a = and i32 %y, %t
684  %z = zext i32 %a to i64
685  ret i64 %z
686}
687
688; negative test - avoid creating an extra instruction
689
690define i64 @and_trunc_extra_use2(i64 %x, i32 %y) {
691; CHECK-LABEL: @and_trunc_extra_use2(
692; CHECK-NEXT:    [[T:%.*]] = trunc i64 [[X:%.*]] to i32
693; CHECK-NEXT:    [[A:%.*]] = and i32 [[Y:%.*]], [[T]]
694; CHECK-NEXT:    call void @use32(i32 [[A]])
695; CHECK-NEXT:    [[Z:%.*]] = zext i32 [[A]] to i64
696; CHECK-NEXT:    ret i64 [[Z]]
697;
698  %t = trunc i64 %x to i32
699  %a = and i32 %t, %y
700  call void @use32(i32 %a)
701  %z = zext i32 %a to i64
702  ret i64 %z
703}
704
705; With constant mask, we duplicate it as a wider constant.
706
707define i64 @and_trunc_extra_use2_constant(i64 %x) {
708; CHECK-LABEL: @and_trunc_extra_use2_constant(
709; CHECK-NEXT:    [[T:%.*]] = trunc i64 [[X:%.*]] to i32
710; CHECK-NEXT:    [[A:%.*]] = and i32 [[T]], 42
711; CHECK-NEXT:    call void @use32(i32 [[A]])
712; CHECK-NEXT:    [[Z:%.*]] = and i64 [[X]], 42
713; CHECK-NEXT:    ret i64 [[Z]]
714;
715  %t = trunc i64 %x to i32
716  %a = and i32 %t, 42
717  call void @use32(i32 %a)
718  %z = zext i32 %a to i64
719  ret i64 %z
720}
721
722; Works with arbitrary vectors and verify that the constant is zext.
723
724define <2 x i17> @and_trunc_extra_use3_constant_vec(<2 x i17> %x) {
725; CHECK-LABEL: @and_trunc_extra_use3_constant_vec(
726; CHECK-NEXT:    [[T:%.*]] = trunc <2 x i17> [[X:%.*]] to <2 x i9>
727; CHECK-NEXT:    call void @use_vec(<2 x i9> [[T]])
728; CHECK-NEXT:    [[A:%.*]] = and <2 x i9> [[T]], <i9 42, i9 -3>
729; CHECK-NEXT:    call void @use_vec(<2 x i9> [[A]])
730; CHECK-NEXT:    [[Z:%.*]] = and <2 x i17> [[X]], <i17 42, i17 509>
731; CHECK-NEXT:    ret <2 x i17> [[Z]]
732;
733  %t = trunc <2 x i17> %x to <2 x i9>
734  call void @use_vec(<2 x i9> %t)
735  %a = and <2 x i9> %t, <i9 42, i9 -3>
736  call void @use_vec(<2 x i9> %a)
737  %z = zext <2 x i9> %a to <2 x i17>
738  ret <2 x i17> %z
739}
740
741; negative test - would require another cast
742
743define i64 @and_trunc_extra_use1_wider_src(i65 %x, i32 %y) {
744; CHECK-LABEL: @and_trunc_extra_use1_wider_src(
745; CHECK-NEXT:    [[T:%.*]] = trunc i65 [[X:%.*]] to i32
746; CHECK-NEXT:    call void @use32(i32 [[T]])
747; CHECK-NEXT:    [[A:%.*]] = and i32 [[Y:%.*]], [[T]]
748; CHECK-NEXT:    [[Z:%.*]] = zext i32 [[A]] to i64
749; CHECK-NEXT:    ret i64 [[Z]]
750;
751  %t = trunc i65 %x to i32
752  call void @use32(i32 %t)
753  %a = and i32 %t, %y
754  %z = zext i32 %a to i64
755  ret i64 %z
756}
757
758define i16 @zext_icmp_eq0_pow2(i32 %x) {
759; CHECK-LABEL: @zext_icmp_eq0_pow2(
760; CHECK-NEXT:    [[M:%.*]] = and i32 [[X:%.*]], 4
761; CHECK-NEXT:    [[I:%.*]] = icmp eq i32 [[M]], 0
762; CHECK-NEXT:    [[Z:%.*]] = zext i1 [[I]] to i16
763; CHECK-NEXT:    ret i16 [[Z]]
764;
765  %m = and i32 %x, 4
766  %i = icmp eq i32 %m, 0
767  %z = zext i1 %i to i16
768  ret i16 %z
769}
770
771define i16 @zext_icmp_eq0_pow2_use1(i32 %x) {
772; CHECK-LABEL: @zext_icmp_eq0_pow2_use1(
773; CHECK-NEXT:    [[M:%.*]] = and i32 [[X:%.*]], 4
774; CHECK-NEXT:    call void @use32(i32 [[M]])
775; CHECK-NEXT:    [[I:%.*]] = icmp eq i32 [[M]], 0
776; CHECK-NEXT:    [[Z:%.*]] = zext i1 [[I]] to i16
777; CHECK-NEXT:    ret i16 [[Z]]
778;
779  %m = and i32 %x, 4
780  call void @use32(i32 %m)
781  %i = icmp eq i32 %m, 0
782  %z = zext i1 %i to i16
783  ret i16 %z
784}
785
786define i16 @zext_icmp_eq0_pow2_use2(i32 %x) {
787; CHECK-LABEL: @zext_icmp_eq0_pow2_use2(
788; CHECK-NEXT:    [[M:%.*]] = and i32 [[X:%.*]], 4
789; CHECK-NEXT:    [[I:%.*]] = icmp eq i32 [[M]], 0
790; CHECK-NEXT:    call void @use1(i1 [[I]])
791; CHECK-NEXT:    [[Z:%.*]] = zext i1 [[I]] to i16
792; CHECK-NEXT:    ret i16 [[Z]]
793;
794  %m = and i32 %x, 4
795  %i = icmp eq i32 %m, 0
796  call void @use1(i1 %i)
797  %z = zext i1 %i to i16
798  ret i16 %z
799}
800
801; This used to infinite loop.
802
803define i8 @zext_icmp_eq_pow2(i8 %y, i8 %x) {
804; CHECK-LABEL: @zext_icmp_eq_pow2(
805; CHECK-NEXT:    [[SHLX:%.*]] = shl i8 [[X:%.*]], 7
806; CHECK-NEXT:    [[SHLY:%.*]] = shl i8 -128, [[Y:%.*]]
807; CHECK-NEXT:    [[C:%.*]] = icmp eq i8 [[SHLX]], [[SHLY]]
808; CHECK-NEXT:    [[R:%.*]] = zext i1 [[C]] to i8
809; CHECK-NEXT:    ret i8 [[R]]
810;
811  %shlx = shl i8 %x, 7
812  %shly = shl i8 -128, %y
813  %c = icmp eq i8 %shlx, %shly
814  %r = zext i1 %c to i8
815  ret i8 %r
816}
817
818define i64 @zext_icmp_eq_bool_0(ptr %ptr) {
819; CHECK-LABEL: @zext_icmp_eq_bool_0(
820; CHECK-NEXT:    [[VAL:%.*]] = load i64, ptr [[PTR:%.*]], align 8, !range [[RNG0:![0-9]+]]
821; CHECK-NEXT:    [[LEN:%.*]] = xor i64 [[VAL]], 1
822; CHECK-NEXT:    ret i64 [[LEN]]
823;
824  %val = load i64, ptr %ptr, align 8, !range !{i64 0, i64 2}
825  %cmp = icmp eq i64 %val, 0
826  %len = zext i1 %cmp to i64
827  ret i64 %len
828}
829
830define i64 @zext_icmp_eq_bool_1(ptr %ptr) {
831; CHECK-LABEL: @zext_icmp_eq_bool_1(
832; CHECK-NEXT:    [[VAL:%.*]] = load i64, ptr [[PTR:%.*]], align 8, !range [[RNG0]]
833; CHECK-NEXT:    ret i64 [[VAL]]
834;
835  %val = load i64, ptr %ptr, align 8, !range !{i64 0, i64 2}
836  %cmp = icmp eq i64 %val, 1
837  %len = zext i1 %cmp to i64
838  ret i64 %len
839}
840
841define i64 @zext_icmp_ne_bool_0(ptr %ptr) {
842; CHECK-LABEL: @zext_icmp_ne_bool_0(
843; CHECK-NEXT:    [[VAL:%.*]] = load i64, ptr [[PTR:%.*]], align 8, !range [[RNG0]]
844; CHECK-NEXT:    ret i64 [[VAL]]
845;
846  %val = load i64, ptr %ptr, align 8, !range !{i64 0, i64 2}
847  %cmp = icmp ne i64 %val, 0
848  %len = zext i1 %cmp to i64
849  ret i64 %len
850}
851
852define i64 @zext_icmp_ne_bool_1(ptr %ptr) {
853; CHECK-LABEL: @zext_icmp_ne_bool_1(
854; CHECK-NEXT:    [[VAL:%.*]] = load i64, ptr [[PTR:%.*]], align 8, !range [[RNG0]]
855; CHECK-NEXT:    [[LEN:%.*]] = xor i64 [[VAL]], 1
856; CHECK-NEXT:    ret i64 [[LEN]]
857;
858  %val = load i64, ptr %ptr, align 8, !range !{i64 0, i64 2}
859  %cmp = icmp ne i64 %val, 1
860  %len = zext i1 %cmp to i64
861  ret i64 %len
862}
863
864; https://alive2.llvm.org/ce/z/k7qosS
865define i32  @zext_icmp_eq0_no_shift(ptr %ptr ) {
866; CHECK-LABEL: @zext_icmp_eq0_no_shift(
867; CHECK-NEXT:    [[X:%.*]] = load i8, ptr [[PTR:%.*]], align 1, !range [[RNG1:![0-9]+]]
868; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], 1
869; CHECK-NEXT:    [[RES:%.*]] = zext nneg i8 [[TMP1]] to i32
870; CHECK-NEXT:    ret i32 [[RES]]
871;
872  %X = load i8, ptr %ptr,align 1, !range !{i8 0, i8 2} ; range [0, 2)
873  %cmp = icmp eq i8 %X, 0
874  %res = zext i1 %cmp to i32
875  ret i32 %res
876}
877
878@g = external global i8
879
880define i64 @evaluate_zexted_const_expr(i1 %c) {
881; CHECK-LABEL: @evaluate_zexted_const_expr(
882; CHECK-NEXT:    [[AND:%.*]] = select i1 [[C:%.*]], i7 trunc (i64 add (i64 ptrtoint (ptr @g to i64), i64 1) to i7), i7 trunc (i64 add (i64 ptrtoint (ptr @g to i64), i64 2) to i7)
883; CHECK-NEXT:    [[EXT:%.*]] = zext i7 [[AND]] to i64
884; CHECK-NEXT:    ret i64 [[EXT]]
885;
886  %and = select i1 %c, i7 trunc (i64 add (i64 ptrtoint (ptr @g to i64), i64 1) to i7), i7 trunc (i64 add (i64 ptrtoint (ptr @g to i64), i64 2) to i7)
887  %ext = zext i7 %and to i64
888  ret i64 %ext
889}
890
891define i16 @zext_nneg_flag_drop(i8 %x, i16 %y) {
892; CHECK-LABEL: @zext_nneg_flag_drop(
893; CHECK-NEXT:    [[EXT:%.*]] = zext i8 [[X:%.*]] to i16
894; CHECK-NEXT:    [[OR1:%.*]] = or i16 [[Y:%.*]], [[EXT]]
895; CHECK-NEXT:    [[OR2:%.*]] = or i16 [[OR1]], 128
896; CHECK-NEXT:    ret i16 [[OR2]]
897;
898  %and = and i8 %x, 127
899  %ext = zext nneg i8 %and to i16
900  %or1 = or i16 %ext, %y
901  %or2 = or i16 %or1, 128
902  ret i16 %or2
903}
904
905define i32 @zext_nneg_redundant_and(i8 %a) {
906; CHECK-LABEL: @zext_nneg_redundant_and(
907; CHECK-NEXT:    [[A_I32:%.*]] = zext nneg i8 [[A:%.*]] to i32
908; CHECK-NEXT:    ret i32 [[A_I32]]
909;
910  %a.i32 = zext nneg i8 %a to i32
911  %res = and i32 %a.i32, 127
912  ret i32 %res
913}
914
915; Negative test, the and can't be removed
916define i32 @zext_nneg_redundant_and_neg(i8 %a) {
917; CHECK-LABEL: @zext_nneg_redundant_and_neg(
918; CHECK-NEXT:    [[B:%.*]] = and i8 [[A:%.*]], 127
919; CHECK-NEXT:    [[B_I32:%.*]] = zext nneg i8 [[B]] to i32
920; CHECK-NEXT:    ret i32 [[B_I32]]
921;
922  %b = and i8 %a, 127
923  %b.i32 = zext nneg i8 %b to i32
924  ret i32 %b.i32
925}
926
927define i64 @zext_nneg_signbit_extract(i32 %a) nounwind {
928; CHECK-LABEL: @zext_nneg_signbit_extract(
929; CHECK-NEXT:    ret i64 0
930;
931  %b = zext nneg i32 %a to i64
932  %c = lshr i64 %b, 31
933  ret i64 %c
934}
935
936define i64 @zext_nneg_demanded_constant(i8 %a) nounwind {
937; CHECK-LABEL: @zext_nneg_demanded_constant(
938; CHECK-NEXT:    [[B:%.*]] = zext nneg i8 [[A:%.*]] to i64
939; CHECK-NEXT:    call void @use64(i64 [[B]]) #[[ATTR0:[0-9]+]]
940; CHECK-NEXT:    [[C:%.*]] = and i64 [[B]], 126
941; CHECK-NEXT:    ret i64 [[C]]
942;
943  %b = zext nneg i8 %a to i64
944  call void @use64(i64 %b)
945  %c = and i64 %b, 254
946  ret i64 %c
947}
948
949define i32 @zext_nneg_i1(i1 %x) {
950; CHECK-LABEL: @zext_nneg_i1(
951; CHECK-NEXT:  entry:
952; CHECK-NEXT:    ret i32 0
953;
954entry:
955  %res = zext nneg i1 %x to i32
956  ret i32 %res
957}
958
959define <2 x i32> @zext_nneg_i1_vec(<2 x i1> %x) {
960; CHECK-LABEL: @zext_nneg_i1_vec(
961; CHECK-NEXT:  entry:
962; CHECK-NEXT:    ret <2 x i32> zeroinitializer
963;
964entry:
965  %res = zext nneg <2 x i1> %x to <2 x i32>
966  ret <2 x i32> %res
967}
968
969define i32 @zext_nneg_i2(i2 %x) {
970; CHECK-LABEL: @zext_nneg_i2(
971; CHECK-NEXT:  entry:
972; CHECK-NEXT:    [[RES:%.*]] = zext nneg i2 [[X:%.*]] to i32
973; CHECK-NEXT:    ret i32 [[RES]]
974;
975entry:
976  %res = zext nneg i2 %x to i32
977  ret i32 %res
978}
979