xref: /llvm-project/llvm/test/Transforms/InstCombine/onehot_merge.ll (revision d4182f1b5649da34e39c6dd22502c7995c406a5e)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3
4define i1 @and_consts(i32 %k, i32 %c1, i32 %c2) {
5; CHECK-LABEL: @and_consts(
6; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[K:%.*]], 12
7; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP1]], 12
8; CHECK-NEXT:    ret i1 [[OR]]
9;
10  %t1 = and i32 4, %k
11  %t2 = icmp eq i32 %t1, 0
12  %t5 = and i32 8, %k
13  %t6 = icmp eq i32 %t5, 0
14  %or = or i1 %t2, %t6
15  ret i1 %or
16}
17
18define i1 @and_consts_logical(i32 %k, i32 %c1, i32 %c2) {
19; CHECK-LABEL: @and_consts_logical(
20; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[K:%.*]], 12
21; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP1]], 12
22; CHECK-NEXT:    ret i1 [[OR]]
23;
24  %t1 = and i32 4, %k
25  %t2 = icmp eq i32 %t1, 0
26  %t5 = and i32 8, %k
27  %t6 = icmp eq i32 %t5, 0
28  %or = select i1 %t2, i1 true, i1 %t6
29  ret i1 %or
30}
31
32define <2 x i1> @and_consts_vector(<2 x i32> %k, <2 x i32> %c1, <2 x i32> %c2) {
33; CHECK-LABEL: @and_consts_vector(
34; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[K:%.*]], splat (i32 12)
35; CHECK-NEXT:    [[OR:%.*]] = icmp ne <2 x i32> [[TMP1]], splat (i32 12)
36; CHECK-NEXT:    ret <2 x i1> [[OR]]
37;
38  %t1 = and <2 x i32> <i32 4, i32 4>, %k
39  %t2 = icmp eq <2 x i32> %t1, zeroinitializer
40  %t5 = and <2 x i32> <i32 8, i32 8>, %k
41  %t6 = icmp eq <2 x i32> %t5, zeroinitializer
42  %or = or <2 x i1> %t2, %t6
43  ret <2 x i1> %or
44}
45
46define i1 @foo1_and(i32 %k, i32 %c1, i32 %c2) {
47; CHECK-LABEL: @foo1_and(
48; CHECK-NEXT:    [[T:%.*]] = shl nuw i32 1, [[C1:%.*]]
49; CHECK-NEXT:    [[T4:%.*]] = shl nuw i32 1, [[C2:%.*]]
50; CHECK-NEXT:    [[TMP1:%.*]] = or i32 [[T]], [[T4]]
51; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]]
52; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
53; CHECK-NEXT:    ret i1 [[OR]]
54;
55  %t = shl i32 1, %c1
56  %t4 = shl i32 1, %c2
57  %t1 = and i32 %t, %k
58  %t2 = icmp eq i32 %t1, 0
59  %t5 = and i32 %t4, %k
60  %t6 = icmp eq i32 %t5, 0
61  %or = or i1 %t2, %t6
62  ret i1 %or
63}
64
65define i1 @foo1_and_logical(i32 %k, i32 %c1, i32 %c2) {
66; CHECK-LABEL: @foo1_and_logical(
67; CHECK-NEXT:    [[T:%.*]] = shl nuw i32 1, [[C1:%.*]]
68; CHECK-NEXT:    [[T4:%.*]] = shl nuw i32 1, [[C2:%.*]]
69; CHECK-NEXT:    [[TMP1:%.*]] = freeze i32 [[T4]]
70; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[T]], [[TMP1]]
71; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]]
72; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
73; CHECK-NEXT:    ret i1 [[OR]]
74;
75  %t = shl i32 1, %c1
76  %t4 = shl i32 1, %c2
77  %t1 = and i32 %t, %k
78  %t2 = icmp eq i32 %t1, 0
79  %t5 = and i32 %t4, %k
80  %t6 = icmp eq i32 %t5, 0
81  %or = select i1 %t2, i1 true, i1 %t6
82  ret i1 %or
83}
84
85define <2 x i1> @foo1_and_vector(<2 x i32> %k, <2 x i32> %c1, <2 x i32> %c2) {
86; CHECK-LABEL: @foo1_and_vector(
87; CHECK-NEXT:    [[T:%.*]] = shl nuw <2 x i32> splat (i32 1), [[C1:%.*]]
88; CHECK-NEXT:    [[T4:%.*]] = shl nuw <2 x i32> splat (i32 1), [[C2:%.*]]
89; CHECK-NEXT:    [[TMP1:%.*]] = or <2 x i32> [[T]], [[T4]]
90; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[K:%.*]], [[TMP1]]
91; CHECK-NEXT:    [[OR:%.*]] = icmp ne <2 x i32> [[TMP2]], [[TMP1]]
92; CHECK-NEXT:    ret <2 x i1> [[OR]]
93;
94  %t = shl <2 x i32> <i32 1, i32 1>, %c1
95  %t4 = shl <2 x i32> <i32 1, i32 1>, %c2
96  %t1 = and <2 x i32> %t, %k
97  %t2 = icmp eq <2 x i32> %t1, zeroinitializer
98  %t5 = and <2 x i32> %t4, %k
99  %t6 = icmp eq <2 x i32> %t5, zeroinitializer
100  %or = or <2 x i1> %t2, %t6
101  ret <2 x i1> %or
102}
103
104; Same as above but with operands commuted one of the ands, but not the other.
105define i1 @foo1_and_commuted(i32 %k, i32 %c1, i32 %c2) {
106; CHECK-LABEL: @foo1_and_commuted(
107; CHECK-NEXT:    [[K2:%.*]] = mul i32 [[K:%.*]], [[K]]
108; CHECK-NEXT:    [[T:%.*]] = shl nuw i32 1, [[C1:%.*]]
109; CHECK-NEXT:    [[T4:%.*]] = shl nuw i32 1, [[C2:%.*]]
110; CHECK-NEXT:    [[TMP1:%.*]] = or i32 [[T]], [[T4]]
111; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[K2]], [[TMP1]]
112; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
113; CHECK-NEXT:    ret i1 [[OR]]
114;
115  %k2 = mul i32 %k, %k ; to trick the complexity sorting
116  %t = shl i32 1, %c1
117  %t4 = shl i32 1, %c2
118  %t1 = and i32 %k2, %t
119  %t2 = icmp eq i32 %t1, 0
120  %t5 = and i32 %t4, %k2
121  %t6 = icmp eq i32 %t5, 0
122  %or = or i1 %t2, %t6
123  ret i1 %or
124}
125
126define i1 @foo1_and_commuted_logical(i32 %k, i32 %c1, i32 %c2) {
127; CHECK-LABEL: @foo1_and_commuted_logical(
128; CHECK-NEXT:    [[K2:%.*]] = mul i32 [[K:%.*]], [[K]]
129; CHECK-NEXT:    [[T:%.*]] = shl nuw i32 1, [[C1:%.*]]
130; CHECK-NEXT:    [[T4:%.*]] = shl nuw i32 1, [[C2:%.*]]
131; CHECK-NEXT:    [[TMP1:%.*]] = freeze i32 [[T4]]
132; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[T]], [[TMP1]]
133; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[K2]], [[TMP2]]
134; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
135; CHECK-NEXT:    ret i1 [[OR]]
136;
137  %k2 = mul i32 %k, %k ; to trick the complexity sorting
138  %t = shl i32 1, %c1
139  %t4 = shl i32 1, %c2
140  %t1 = and i32 %k2, %t
141  %t2 = icmp eq i32 %t1, 0
142  %t5 = and i32 %t4, %k2
143  %t6 = icmp eq i32 %t5, 0
144  %or = select i1 %t2, i1 true, i1 %t6
145  ret i1 %or
146}
147
148define <2 x i1> @foo1_and_commuted_vector(<2 x i32> %k, <2 x i32> %c1, <2 x i32> %c2) {
149; CHECK-LABEL: @foo1_and_commuted_vector(
150; CHECK-NEXT:    [[K2:%.*]] = mul <2 x i32> [[K:%.*]], [[K]]
151; CHECK-NEXT:    [[T:%.*]] = shl nuw <2 x i32> splat (i32 1), [[C1:%.*]]
152; CHECK-NEXT:    [[T4:%.*]] = shl nuw <2 x i32> splat (i32 1), [[C2:%.*]]
153; CHECK-NEXT:    [[TMP1:%.*]] = or <2 x i32> [[T]], [[T4]]
154; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[K2]], [[TMP1]]
155; CHECK-NEXT:    [[OR:%.*]] = icmp ne <2 x i32> [[TMP2]], [[TMP1]]
156; CHECK-NEXT:    ret <2 x i1> [[OR]]
157;
158  %k2 = mul <2 x i32> %k, %k ; to trick the complexity sorting
159  %t = shl <2 x i32> <i32 1, i32 1>, %c1
160  %t4 = shl <2 x i32> <i32 1, i32 1>, %c2
161  %t1 = and <2 x i32> %k2, %t
162  %t2 = icmp eq <2 x i32> %t1, zeroinitializer
163  %t5 = and <2 x i32> %t4, %k2
164  %t6 = icmp eq <2 x i32> %t5, zeroinitializer
165  %or = or <2 x i1> %t2, %t6
166  ret <2 x i1> %or
167}
168
169define i1 @or_consts(i32 %k, i32 %c1, i32 %c2) {
170; CHECK-LABEL: @or_consts(
171; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[K:%.*]], 12
172; CHECK-NEXT:    [[OR:%.*]] = icmp eq i32 [[TMP1]], 12
173; CHECK-NEXT:    ret i1 [[OR]]
174;
175  %t1 = and i32 4, %k
176  %t2 = icmp ne i32 %t1, 0
177  %t5 = and i32 8, %k
178  %t6 = icmp ne i32 %t5, 0
179  %or = and i1 %t2, %t6
180  ret i1 %or
181}
182
183define i1 @or_consts_logical(i32 %k, i32 %c1, i32 %c2) {
184; CHECK-LABEL: @or_consts_logical(
185; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[K:%.*]], 12
186; CHECK-NEXT:    [[OR:%.*]] = icmp eq i32 [[TMP1]], 12
187; CHECK-NEXT:    ret i1 [[OR]]
188;
189  %t1 = and i32 4, %k
190  %t2 = icmp ne i32 %t1, 0
191  %t5 = and i32 8, %k
192  %t6 = icmp ne i32 %t5, 0
193  %or = select i1 %t2, i1 %t6, i1 false
194  ret i1 %or
195}
196
197define <2 x i1> @or_consts_vector(<2 x i32> %k, <2 x i32> %c1, <2 x i32> %c2) {
198; CHECK-LABEL: @or_consts_vector(
199; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[K:%.*]], splat (i32 12)
200; CHECK-NEXT:    [[OR:%.*]] = icmp eq <2 x i32> [[TMP1]], splat (i32 12)
201; CHECK-NEXT:    ret <2 x i1> [[OR]]
202;
203  %t1 = and <2 x i32> <i32 4, i32 4>, %k
204  %t2 = icmp ne <2 x i32> %t1, zeroinitializer
205  %t5 = and <2 x i32> <i32 8, i32 8>, %k
206  %t6 = icmp ne <2 x i32> %t5, zeroinitializer
207  %or = and <2 x i1> %t2, %t6
208  ret <2 x i1> %or
209}
210
211define i1 @foo1_or(i32 %k, i32 %c1, i32 %c2) {
212; CHECK-LABEL: @foo1_or(
213; CHECK-NEXT:    [[T:%.*]] = shl nuw i32 1, [[C1:%.*]]
214; CHECK-NEXT:    [[T4:%.*]] = shl nuw i32 1, [[C2:%.*]]
215; CHECK-NEXT:    [[TMP1:%.*]] = or i32 [[T]], [[T4]]
216; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]]
217; CHECK-NEXT:    [[OR:%.*]] = icmp eq i32 [[TMP2]], [[TMP1]]
218; CHECK-NEXT:    ret i1 [[OR]]
219;
220  %t = shl i32 1, %c1
221  %t4 = shl i32 1, %c2
222  %t1 = and i32 %t, %k
223  %t2 = icmp ne i32 %t1, 0
224  %t5 = and i32 %t4, %k
225  %t6 = icmp ne i32 %t5, 0
226  %or = and i1 %t2, %t6
227  ret i1 %or
228}
229
230define i1 @foo1_or_logical(i32 %k, i32 %c1, i32 %c2) {
231; CHECK-LABEL: @foo1_or_logical(
232; CHECK-NEXT:    [[T:%.*]] = shl nuw i32 1, [[C1:%.*]]
233; CHECK-NEXT:    [[T4:%.*]] = shl nuw i32 1, [[C2:%.*]]
234; CHECK-NEXT:    [[TMP1:%.*]] = freeze i32 [[T4]]
235; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[T]], [[TMP1]]
236; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]]
237; CHECK-NEXT:    [[OR:%.*]] = icmp eq i32 [[TMP3]], [[TMP2]]
238; CHECK-NEXT:    ret i1 [[OR]]
239;
240  %t = shl i32 1, %c1
241  %t4 = shl i32 1, %c2
242  %t1 = and i32 %t, %k
243  %t2 = icmp ne i32 %t1, 0
244  %t5 = and i32 %t4, %k
245  %t6 = icmp ne i32 %t5, 0
246  %or = select i1 %t2, i1 %t6, i1 false
247  ret i1 %or
248}
249
250define <2 x i1> @foo1_or_vector(<2 x i32> %k, <2 x i32> %c1, <2 x i32> %c2) {
251; CHECK-LABEL: @foo1_or_vector(
252; CHECK-NEXT:    [[T:%.*]] = shl nuw <2 x i32> splat (i32 1), [[C1:%.*]]
253; CHECK-NEXT:    [[T4:%.*]] = shl nuw <2 x i32> splat (i32 1), [[C2:%.*]]
254; CHECK-NEXT:    [[TMP1:%.*]] = or <2 x i32> [[T]], [[T4]]
255; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[K:%.*]], [[TMP1]]
256; CHECK-NEXT:    [[OR:%.*]] = icmp eq <2 x i32> [[TMP2]], [[TMP1]]
257; CHECK-NEXT:    ret <2 x i1> [[OR]]
258;
259  %t = shl <2 x i32> <i32 1, i32 1>, %c1
260  %t4 = shl <2 x i32> <i32 1, i32 1>, %c2
261  %t1 = and <2 x i32> %t, %k
262  %t2 = icmp ne <2 x i32> %t1, zeroinitializer
263  %t5 = and <2 x i32> %t4, %k
264  %t6 = icmp ne <2 x i32> %t5, zeroinitializer
265  %or = and <2 x i1> %t2, %t6
266  ret <2 x i1> %or
267}
268
269; Same as above but with operands commuted one of the ors, but not the other.
270define i1 @foo1_or_commuted(i32 %k, i32 %c1, i32 %c2) {
271; CHECK-LABEL: @foo1_or_commuted(
272; CHECK-NEXT:    [[K2:%.*]] = mul i32 [[K:%.*]], [[K]]
273; CHECK-NEXT:    [[T:%.*]] = shl nuw i32 1, [[C1:%.*]]
274; CHECK-NEXT:    [[T4:%.*]] = shl nuw i32 1, [[C2:%.*]]
275; CHECK-NEXT:    [[TMP1:%.*]] = or i32 [[T]], [[T4]]
276; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[K2]], [[TMP1]]
277; CHECK-NEXT:    [[OR:%.*]] = icmp eq i32 [[TMP2]], [[TMP1]]
278; CHECK-NEXT:    ret i1 [[OR]]
279;
280  %k2 = mul i32 %k, %k ; to trick the complexity sorting
281  %t = shl i32 1, %c1
282  %t4 = shl i32 1, %c2
283  %t1 = and i32 %k2, %t
284  %t2 = icmp ne i32 %t1, 0
285  %t5 = and i32 %t4, %k2
286  %t6 = icmp ne i32 %t5, 0
287  %or = and i1 %t2, %t6
288  ret i1 %or
289}
290
291define i1 @foo1_or_commuted_logical(i32 %k, i32 %c1, i32 %c2) {
292; CHECK-LABEL: @foo1_or_commuted_logical(
293; CHECK-NEXT:    [[K2:%.*]] = mul i32 [[K:%.*]], [[K]]
294; CHECK-NEXT:    [[T:%.*]] = shl nuw i32 1, [[C1:%.*]]
295; CHECK-NEXT:    [[T4:%.*]] = shl nuw i32 1, [[C2:%.*]]
296; CHECK-NEXT:    [[TMP1:%.*]] = freeze i32 [[T4]]
297; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[T]], [[TMP1]]
298; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[K2]], [[TMP2]]
299; CHECK-NEXT:    [[OR:%.*]] = icmp eq i32 [[TMP3]], [[TMP2]]
300; CHECK-NEXT:    ret i1 [[OR]]
301;
302  %k2 = mul i32 %k, %k ; to trick the complexity sorting
303  %t = shl i32 1, %c1
304  %t4 = shl i32 1, %c2
305  %t1 = and i32 %k2, %t
306  %t2 = icmp ne i32 %t1, 0
307  %t5 = and i32 %t4, %k2
308  %t6 = icmp ne i32 %t5, 0
309  %or = select i1 %t2, i1 %t6, i1 false
310  ret i1 %or
311}
312
313define <2 x i1> @foo1_or_commuted_vector(<2 x i32> %k, <2 x i32> %c1, <2 x i32> %c2) {
314; CHECK-LABEL: @foo1_or_commuted_vector(
315; CHECK-NEXT:    [[K2:%.*]] = mul <2 x i32> [[K:%.*]], [[K]]
316; CHECK-NEXT:    [[T:%.*]] = shl nuw <2 x i32> splat (i32 1), [[C1:%.*]]
317; CHECK-NEXT:    [[T4:%.*]] = shl nuw <2 x i32> splat (i32 1), [[C2:%.*]]
318; CHECK-NEXT:    [[TMP1:%.*]] = or <2 x i32> [[T]], [[T4]]
319; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[K2]], [[TMP1]]
320; CHECK-NEXT:    [[OR:%.*]] = icmp eq <2 x i32> [[TMP2]], [[TMP1]]
321; CHECK-NEXT:    ret <2 x i1> [[OR]]
322;
323  %k2 = mul <2 x i32> %k, %k ; to trick the complexity sorting
324  %t = shl <2 x i32> <i32 1, i32 1>, %c1
325  %t4 = shl <2 x i32> <i32 1, i32 1>, %c2
326  %t1 = and <2 x i32> %k2, %t
327  %t2 = icmp ne <2 x i32> %t1, zeroinitializer
328  %t5 = and <2 x i32> %t4, %k2
329  %t6 = icmp ne <2 x i32> %t5, zeroinitializer
330  %or = and <2 x i1> %t2, %t6
331  ret <2 x i1> %or
332}
333
334define i1 @foo1_and_signbit_lshr(i32 %k, i32 %c1, i32 %c2) {
335; CHECK-LABEL: @foo1_and_signbit_lshr(
336; CHECK-NEXT:    [[T:%.*]] = shl nuw i32 1, [[C1:%.*]]
337; CHECK-NEXT:    [[T4:%.*]] = lshr exact i32 -2147483648, [[C2:%.*]]
338; CHECK-NEXT:    [[TMP1:%.*]] = or i32 [[T]], [[T4]]
339; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]]
340; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
341; CHECK-NEXT:    ret i1 [[OR]]
342;
343  %t = shl i32 1, %c1
344  %t4 = lshr i32 -2147483648, %c2
345  %t1 = and i32 %t, %k
346  %t2 = icmp eq i32 %t1, 0
347  %t5 = and i32 %t4, %k
348  %t6 = icmp eq i32 %t5, 0
349  %or = or i1 %t2, %t6
350  ret i1 %or
351}
352
353define i1 @foo1_and_signbit_lshr_logical(i32 %k, i32 %c1, i32 %c2) {
354; CHECK-LABEL: @foo1_and_signbit_lshr_logical(
355; CHECK-NEXT:    [[T:%.*]] = shl nuw i32 1, [[C1:%.*]]
356; CHECK-NEXT:    [[T4:%.*]] = lshr exact i32 -2147483648, [[C2:%.*]]
357; CHECK-NEXT:    [[TMP1:%.*]] = freeze i32 [[T4]]
358; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[T]], [[TMP1]]
359; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]]
360; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
361; CHECK-NEXT:    ret i1 [[OR]]
362;
363  %t = shl i32 1, %c1
364  %t4 = lshr i32 -2147483648, %c2
365  %t1 = and i32 %t, %k
366  %t2 = icmp eq i32 %t1, 0
367  %t5 = and i32 %t4, %k
368  %t6 = icmp eq i32 %t5, 0
369  %or = select i1 %t2, i1 true, i1 %t6
370  ret i1 %or
371}
372
373define <2 x i1> @foo1_and_signbit_lshr_vector(<2 x i32> %k, <2 x i32> %c1, <2 x i32> %c2) {
374; CHECK-LABEL: @foo1_and_signbit_lshr_vector(
375; CHECK-NEXT:    [[T:%.*]] = shl nuw <2 x i32> splat (i32 1), [[C1:%.*]]
376; CHECK-NEXT:    [[T4:%.*]] = lshr exact <2 x i32> splat (i32 -2147483648), [[C2:%.*]]
377; CHECK-NEXT:    [[TMP1:%.*]] = or <2 x i32> [[T]], [[T4]]
378; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[K:%.*]], [[TMP1]]
379; CHECK-NEXT:    [[OR:%.*]] = icmp ne <2 x i32> [[TMP2]], [[TMP1]]
380; CHECK-NEXT:    ret <2 x i1> [[OR]]
381;
382  %t = shl <2 x i32> <i32 1, i32 1>, %c1
383  %t4 = lshr <2 x i32> <i32 -2147483648, i32 -2147483648>, %c2
384  %t1 = and <2 x i32> %t, %k
385  %t2 = icmp eq <2 x i32> %t1, zeroinitializer
386  %t5 = and <2 x i32> %t4, %k
387  %t6 = icmp eq <2 x i32> %t5, zeroinitializer
388  %or = or <2 x i1> %t2, %t6
389  ret <2 x i1> %or
390}
391
392define i1 @foo1_or_signbit_lshr(i32 %k, i32 %c1, i32 %c2) {
393; CHECK-LABEL: @foo1_or_signbit_lshr(
394; CHECK-NEXT:    [[T:%.*]] = shl nuw i32 1, [[C1:%.*]]
395; CHECK-NEXT:    [[T4:%.*]] = lshr exact i32 -2147483648, [[C2:%.*]]
396; CHECK-NEXT:    [[TMP1:%.*]] = or i32 [[T]], [[T4]]
397; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]]
398; CHECK-NEXT:    [[OR:%.*]] = icmp eq i32 [[TMP2]], [[TMP1]]
399; CHECK-NEXT:    ret i1 [[OR]]
400;
401  %t = shl i32 1, %c1
402  %t4 = lshr i32 -2147483648, %c2
403  %t1 = and i32 %t, %k
404  %t2 = icmp ne i32 %t1, 0
405  %t5 = and i32 %t4, %k
406  %t6 = icmp ne i32 %t5, 0
407  %or = and i1 %t2, %t6
408  ret i1 %or
409}
410
411define i1 @foo1_or_signbit_lshr_logical(i32 %k, i32 %c1, i32 %c2) {
412; CHECK-LABEL: @foo1_or_signbit_lshr_logical(
413; CHECK-NEXT:    [[T:%.*]] = shl nuw i32 1, [[C1:%.*]]
414; CHECK-NEXT:    [[T4:%.*]] = lshr exact i32 -2147483648, [[C2:%.*]]
415; CHECK-NEXT:    [[TMP1:%.*]] = freeze i32 [[T4]]
416; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[T]], [[TMP1]]
417; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]]
418; CHECK-NEXT:    [[OR:%.*]] = icmp eq i32 [[TMP3]], [[TMP2]]
419; CHECK-NEXT:    ret i1 [[OR]]
420;
421  %t = shl i32 1, %c1
422  %t4 = lshr i32 -2147483648, %c2
423  %t1 = and i32 %t, %k
424  %t2 = icmp ne i32 %t1, 0
425  %t5 = and i32 %t4, %k
426  %t6 = icmp ne i32 %t5, 0
427  %or = select i1 %t2, i1 %t6, i1 false
428  ret i1 %or
429}
430
431define <2 x i1> @foo1_or_signbit_lshr_vector(<2 x i32> %k, <2 x i32> %c1, <2 x i32> %c2) {
432; CHECK-LABEL: @foo1_or_signbit_lshr_vector(
433; CHECK-NEXT:    [[T:%.*]] = shl nuw <2 x i32> splat (i32 1), [[C1:%.*]]
434; CHECK-NEXT:    [[T4:%.*]] = lshr exact <2 x i32> splat (i32 -2147483648), [[C2:%.*]]
435; CHECK-NEXT:    [[TMP1:%.*]] = or <2 x i32> [[T]], [[T4]]
436; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[K:%.*]], [[TMP1]]
437; CHECK-NEXT:    [[OR:%.*]] = icmp eq <2 x i32> [[TMP2]], [[TMP1]]
438; CHECK-NEXT:    ret <2 x i1> [[OR]]
439;
440  %t = shl <2 x i32> <i32 1, i32 1>, %c1
441  %t4 = lshr <2 x i32> <i32 -2147483648, i32 -2147483648>, %c2
442  %t1 = and <2 x i32> %t, %k
443  %t2 = icmp ne <2 x i32> %t1, zeroinitializer
444  %t5 = and <2 x i32> %t4, %k
445  %t6 = icmp ne <2 x i32> %t5, zeroinitializer
446  %or = and <2 x i1> %t2, %t6
447  ret <2 x i1> %or
448}
449
450; Same as last two, but shift-of-signbit replaced with 'icmp s*'
451define i1 @foo1_and_signbit_lshr_without_shifting_signbit(i32 %k, i32 %c1, i32 %c2) {
452; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit(
453; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
454; CHECK-NEXT:    [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
455; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
456; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
457; CHECK-NEXT:    [[T4:%.*]] = icmp sgt i32 [[T3]], -1
458; CHECK-NEXT:    [[OR:%.*]] = or i1 [[T2]], [[T4]]
459; CHECK-NEXT:    ret i1 [[OR]]
460;
461  %t0 = shl i32 1, %c1
462  %t1 = and i32 %t0, %k
463  %t2 = icmp eq i32 %t1, 0
464  %t3 = shl i32 %k, %c2
465  %t4 = icmp sgt i32 %t3, -1
466  %or = or i1 %t2, %t4
467  ret i1 %or
468}
469
470define i1 @foo1_and_signbit_lshr_without_shifting_signbit_logical(i32 %k, i32 %c1, i32 %c2) {
471; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_logical(
472; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
473; CHECK-NEXT:    [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
474; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
475; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
476; CHECK-NEXT:    [[T4:%.*]] = icmp sgt i32 [[T3]], -1
477; CHECK-NEXT:    [[OR:%.*]] = select i1 [[T2]], i1 true, i1 [[T4]]
478; CHECK-NEXT:    ret i1 [[OR]]
479;
480  %t0 = shl i32 1, %c1
481  %t1 = and i32 %t0, %k
482  %t2 = icmp eq i32 %t1, 0
483  %t3 = shl i32 %k, %c2
484  %t4 = icmp sgt i32 %t3, -1
485  %or = select i1 %t2, i1 true, i1 %t4
486  ret i1 %or
487}
488
489define i1 @foo1_or_signbit_lshr_without_shifting_signbit(i32 %k, i32 %c1, i32 %c2) {
490; CHECK-LABEL: @foo1_or_signbit_lshr_without_shifting_signbit(
491; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
492; CHECK-NEXT:    [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
493; CHECK-NEXT:    [[T2:%.*]] = icmp ne i32 [[T1]], 0
494; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
495; CHECK-NEXT:    [[T4:%.*]] = icmp slt i32 [[T3]], 0
496; CHECK-NEXT:    [[OR:%.*]] = and i1 [[T2]], [[T4]]
497; CHECK-NEXT:    ret i1 [[OR]]
498;
499  %t0 = shl i32 1, %c1
500  %t1 = and i32 %t0, %k
501  %t2 = icmp ne i32 %t1, 0
502  %t3 = shl i32 %k, %c2
503  %t4 = icmp slt i32 %t3, 0
504  %or = and i1 %t2, %t4
505  ret i1 %or
506}
507
508define i1 @foo1_or_signbit_lshr_without_shifting_signbit_logical(i32 %k, i32 %c1, i32 %c2) {
509; CHECK-LABEL: @foo1_or_signbit_lshr_without_shifting_signbit_logical(
510; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
511; CHECK-NEXT:    [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
512; CHECK-NEXT:    [[T2:%.*]] = icmp ne i32 [[T1]], 0
513; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
514; CHECK-NEXT:    [[T4:%.*]] = icmp slt i32 [[T3]], 0
515; CHECK-NEXT:    [[OR:%.*]] = select i1 [[T2]], i1 [[T4]], i1 false
516; CHECK-NEXT:    ret i1 [[OR]]
517;
518  %t0 = shl i32 1, %c1
519  %t1 = and i32 %t0, %k
520  %t2 = icmp ne i32 %t1, 0
521  %t3 = shl i32 %k, %c2
522  %t4 = icmp slt i32 %t3, 0
523  %or = select i1 %t2, i1 %t4, i1 false
524  ret i1 %or
525}
526
527; Shift-of-signbit replaced with 'icmp s*' for both sides
528define i1 @foo1_and_signbit_lshr_without_shifting_signbit_both_sides(i32 %k, i32 %c1, i32 %c2) {
529; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_both_sides(
530; CHECK-NEXT:    [[T0:%.*]] = shl i32 [[K:%.*]], [[C1:%.*]]
531; CHECK-NEXT:    [[T2:%.*]] = shl i32 [[K]], [[C2:%.*]]
532; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[T0]], [[T2]]
533; CHECK-NEXT:    [[OR:%.*]] = icmp sgt i32 [[TMP1]], -1
534; CHECK-NEXT:    ret i1 [[OR]]
535;
536  %t0 = shl i32 %k, %c1
537  %t1 = icmp sgt i32 %t0, -1
538  %t2 = shl i32 %k, %c2
539  %t3 = icmp sgt i32 %t2, -1
540  %or = or i1 %t1, %t3
541  ret i1 %or
542}
543
544; %t2 can be poison where as %t0 isn't; merging these two is unsafe.
545define i1 @foo1_and_signbit_lshr_without_shifting_signbit_both_sides_logical(i32 %k, i32 %c1, i32 %c2) {
546; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_both_sides_logical(
547; CHECK-NEXT:    [[T0:%.*]] = shl i32 [[K:%.*]], [[C1:%.*]]
548; CHECK-NEXT:    [[T1:%.*]] = icmp sgt i32 [[T0]], -1
549; CHECK-NEXT:    [[T2:%.*]] = shl i32 [[K]], [[C2:%.*]]
550; CHECK-NEXT:    [[T3:%.*]] = icmp sgt i32 [[T2]], -1
551; CHECK-NEXT:    [[OR:%.*]] = select i1 [[T1]], i1 true, i1 [[T3]]
552; CHECK-NEXT:    ret i1 [[OR]]
553;
554  %t0 = shl i32 %k, %c1
555  %t1 = icmp sgt i32 %t0, -1
556  %t2 = shl i32 %k, %c2
557  %t3 = icmp sgt i32 %t2, -1
558  %or = select i1 %t1, i1 true, i1 %t3
559  ret i1 %or
560}
561
562define i1 @foo1_or_signbit_lshr_without_shifting_signbit_both_sides(i32 %k, i32 %c1, i32 %c2) {
563; CHECK-LABEL: @foo1_or_signbit_lshr_without_shifting_signbit_both_sides(
564; CHECK-NEXT:    [[T0:%.*]] = shl i32 [[K:%.*]], [[C1:%.*]]
565; CHECK-NEXT:    [[T2:%.*]] = shl i32 [[K]], [[C2:%.*]]
566; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[T0]], [[T2]]
567; CHECK-NEXT:    [[OR:%.*]] = icmp slt i32 [[TMP1]], 0
568; CHECK-NEXT:    ret i1 [[OR]]
569;
570  %t0 = shl i32 %k, %c1
571  %t1 = icmp slt i32 %t0, 0
572  %t2 = shl i32 %k, %c2
573  %t3 = icmp slt i32 %t2, 0
574  %or = and i1 %t1, %t3
575  ret i1 %or
576}
577
578define <2 x i1> @foo1_or_signbit_lshr_without_shifting_signbit_both_sides_splat(<2 x i32> %k, <2 x i32> %c1, <2 x i32> %c2) {
579; CHECK-LABEL: @foo1_or_signbit_lshr_without_shifting_signbit_both_sides_splat(
580; CHECK-NEXT:    [[T0:%.*]] = shl <2 x i32> [[K:%.*]], [[C1:%.*]]
581; CHECK-NEXT:    [[T2:%.*]] = shl <2 x i32> [[K]], [[C2:%.*]]
582; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[T0]], [[T2]]
583; CHECK-NEXT:    [[OR:%.*]] = icmp slt <2 x i32> [[TMP1]], zeroinitializer
584; CHECK-NEXT:    ret <2 x i1> [[OR]]
585;
586  %t0 = shl <2 x i32> %k, %c1
587  %t1 = icmp slt <2 x i32> %t0, zeroinitializer
588  %t2 = shl <2 x i32> %k, %c2
589  %t3 = icmp slt <2 x i32> %t2, zeroinitializer
590  %or = and <2 x i1> %t1, %t3
591  ret <2 x i1> %or
592}
593
594; %t2 can be poison where as %t0 isn't; merging these two is unsafe.
595define i1 @foo1_or_signbit_lshr_without_shifting_signbit_both_sides_logical(i32 %k, i32 %c1, i32 %c2) {
596; CHECK-LABEL: @foo1_or_signbit_lshr_without_shifting_signbit_both_sides_logical(
597; CHECK-NEXT:    [[T0:%.*]] = shl i32 [[K:%.*]], [[C1:%.*]]
598; CHECK-NEXT:    [[T1:%.*]] = icmp slt i32 [[T0]], 0
599; CHECK-NEXT:    [[T2:%.*]] = shl i32 [[K]], [[C2:%.*]]
600; CHECK-NEXT:    [[T3:%.*]] = icmp slt i32 [[T2]], 0
601; CHECK-NEXT:    [[OR:%.*]] = select i1 [[T1]], i1 [[T3]], i1 false
602; CHECK-NEXT:    ret i1 [[OR]]
603;
604  %t0 = shl i32 %k, %c1
605  %t1 = icmp slt i32 %t0, 0
606  %t2 = shl i32 %k, %c2
607  %t3 = icmp slt i32 %t2, 0
608  %or = select i1 %t1, i1 %t3, i1 false
609  ret i1 %or
610}
611
612; Extra use
613
614; Expect to fold
615define i1 @foo1_and_extra_use_shl(i32 %k, i32 %c1, i32 %c2, ptr %p) {
616; CHECK-LABEL: @foo1_and_extra_use_shl(
617; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
618; CHECK-NEXT:    store i32 [[T0]], ptr [[P:%.*]], align 4
619; CHECK-NEXT:    [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]]
620; CHECK-NEXT:    [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
621; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]]
622; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
623; CHECK-NEXT:    ret i1 [[OR]]
624;
625  %t0 = shl i32 1, %c1
626  store i32 %t0, ptr %p  ; extra use of shl
627  %t1 = shl i32 1, %c2
628  %t2 = and i32 %t0, %k
629  %t3 = icmp eq i32 %t2, 0
630  %t4 = and i32 %t1, %k
631  %t5 = icmp eq i32 %t4, 0
632  %or = or i1 %t3, %t5
633  ret i1 %or
634}
635
636define i1 @foo1_and_extra_use_shl_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
637; CHECK-LABEL: @foo1_and_extra_use_shl_logical(
638; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
639; CHECK-NEXT:    store i32 [[T0]], ptr [[P:%.*]], align 4
640; CHECK-NEXT:    [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]]
641; CHECK-NEXT:    [[TMP1:%.*]] = freeze i32 [[T1]]
642; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
643; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]]
644; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
645; CHECK-NEXT:    ret i1 [[OR]]
646;
647  %t0 = shl i32 1, %c1
648  store i32 %t0, ptr %p  ; extra use of shl
649  %t1 = shl i32 1, %c2
650  %t2 = and i32 %t0, %k
651  %t3 = icmp eq i32 %t2, 0
652  %t4 = and i32 %t1, %k
653  %t5 = icmp eq i32 %t4, 0
654  %or = select i1 %t3, i1 true, i1 %t5
655  ret i1 %or
656}
657
658; Should not fold
659define i1 @foo1_and_extra_use_and(i32 %k, i32 %c1, i32 %c2, ptr %p) {
660; CHECK-LABEL: @foo1_and_extra_use_and(
661; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
662; CHECK-NEXT:    [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]]
663; CHECK-NEXT:    [[T2:%.*]] = and i32 [[T0]], [[K:%.*]]
664; CHECK-NEXT:    store i32 [[T2]], ptr [[P:%.*]], align 4
665; CHECK-NEXT:    [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
666; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[K]], [[TMP1]]
667; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
668; CHECK-NEXT:    ret i1 [[OR]]
669;
670  %t0 = shl i32 1, %c1
671  %t1 = shl i32 1, %c2
672  %t2 = and i32 %t0, %k
673  store i32 %t2, ptr %p  ; extra use of and
674  %t3 = icmp eq i32 %t2, 0
675  %t4 = and i32 %t1, %k
676  %t5 = icmp eq i32 %t4, 0
677  %or = or i1 %t3, %t5
678  ret i1 %or
679}
680
681define i1 @foo1_and_extra_use_and_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
682; CHECK-LABEL: @foo1_and_extra_use_and_logical(
683; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
684; CHECK-NEXT:    [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]]
685; CHECK-NEXT:    [[T2:%.*]] = and i32 [[T0]], [[K:%.*]]
686; CHECK-NEXT:    store i32 [[T2]], ptr [[P:%.*]], align 4
687; CHECK-NEXT:    [[TMP1:%.*]] = freeze i32 [[T1]]
688; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
689; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[K]], [[TMP2]]
690; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
691; CHECK-NEXT:    ret i1 [[OR]]
692;
693  %t0 = shl i32 1, %c1
694  %t1 = shl i32 1, %c2
695  %t2 = and i32 %t0, %k
696  store i32 %t2, ptr %p  ; extra use of and
697  %t3 = icmp eq i32 %t2, 0
698  %t4 = and i32 %t1, %k
699  %t5 = icmp eq i32 %t4, 0
700  %or = select i1 %t3, i1 true, i1 %t5
701  ret i1 %or
702}
703
704; Should not fold
705define i1 @foo1_and_extra_use_cmp(i32 %k, i32 %c1, i32 %c2, ptr %p) {
706; CHECK-LABEL: @foo1_and_extra_use_cmp(
707; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
708; CHECK-NEXT:    [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]]
709; CHECK-NEXT:    [[T2:%.*]] = and i32 [[T0]], [[K:%.*]]
710; CHECK-NEXT:    [[T3:%.*]] = icmp eq i32 [[T2]], 0
711; CHECK-NEXT:    store i1 [[T3]], ptr [[P:%.*]], align 1
712; CHECK-NEXT:    [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
713; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[K]], [[TMP1]]
714; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
715; CHECK-NEXT:    ret i1 [[OR]]
716;
717  %t0 = shl i32 1, %c1
718  %t1 = shl i32 1, %c2
719  %t2 = and i32 %t0, %k
720  %t3 = icmp eq i32 %t2, 0
721  store i1 %t3, ptr %p  ; extra use of cmp
722  %t4 = and i32 %t1, %k
723  %t5 = icmp eq i32 %t4, 0
724  %or = or i1 %t3, %t5
725  ret i1 %or
726}
727
728define i1 @foo1_and_extra_use_cmp_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
729; CHECK-LABEL: @foo1_and_extra_use_cmp_logical(
730; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
731; CHECK-NEXT:    [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]]
732; CHECK-NEXT:    [[T2:%.*]] = and i32 [[T0]], [[K:%.*]]
733; CHECK-NEXT:    [[T3:%.*]] = icmp eq i32 [[T2]], 0
734; CHECK-NEXT:    store i1 [[T3]], ptr [[P:%.*]], align 1
735; CHECK-NEXT:    [[TMP1:%.*]] = freeze i32 [[T1]]
736; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
737; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[K]], [[TMP2]]
738; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
739; CHECK-NEXT:    ret i1 [[OR]]
740;
741  %t0 = shl i32 1, %c1
742  %t1 = shl i32 1, %c2
743  %t2 = and i32 %t0, %k
744  %t3 = icmp eq i32 %t2, 0
745  store i1 %t3, ptr %p  ; extra use of cmp
746  %t4 = and i32 %t1, %k
747  %t5 = icmp eq i32 %t4, 0
748  %or = select i1 %t3, i1 true, i1 %t5
749  ret i1 %or
750}
751
752; Expect to fold
753define i1 @foo1_and_extra_use_shl2(i32 %k, i32 %c1, i32 %c2, ptr %p) {
754; CHECK-LABEL: @foo1_and_extra_use_shl2(
755; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
756; CHECK-NEXT:    [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]]
757; CHECK-NEXT:    store i32 [[T1]], ptr [[P:%.*]], align 4
758; CHECK-NEXT:    [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
759; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]]
760; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
761; CHECK-NEXT:    ret i1 [[OR]]
762;
763  %t0 = shl i32 1, %c1
764  %t1 = shl i32 1, %c2
765  store i32 %t1, ptr %p  ; extra use of shl
766  %t2 = and i32 %t0, %k
767  %t3 = icmp eq i32 %t2, 0
768  %t4 = and i32 %t1, %k
769  %t5 = icmp eq i32 %t4, 0
770  %or = or i1 %t3, %t5
771  ret i1 %or
772}
773
774define i1 @foo1_and_extra_use_shl2_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
775; CHECK-LABEL: @foo1_and_extra_use_shl2_logical(
776; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
777; CHECK-NEXT:    [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]]
778; CHECK-NEXT:    [[TMP1:%.*]] = freeze i32 [[T1]]
779; CHECK-NEXT:    store i32 [[TMP1]], ptr [[P:%.*]], align 4
780; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
781; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]]
782; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
783; CHECK-NEXT:    ret i1 [[OR]]
784;
785  %t0 = shl i32 1, %c1
786  %t1 = shl i32 1, %c2
787  store i32 %t1, ptr %p  ; extra use of shl
788  %t2 = and i32 %t0, %k
789  %t3 = icmp eq i32 %t2, 0
790  %t4 = and i32 %t1, %k
791  %t5 = icmp eq i32 %t4, 0
792  %or = select i1 %t3, i1 true, i1 %t5
793  ret i1 %or
794}
795
796; Should not fold
797define i1 @foo1_and_extra_use_and2(i32 %k, i32 %c1, i32 %c2, ptr %p) {
798; CHECK-LABEL: @foo1_and_extra_use_and2(
799; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
800; CHECK-NEXT:    [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]]
801; CHECK-NEXT:    [[T4:%.*]] = and i32 [[T1]], [[K:%.*]]
802; CHECK-NEXT:    store i32 [[T4]], ptr [[P:%.*]], align 4
803; CHECK-NEXT:    [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
804; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[K]], [[TMP1]]
805; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
806; CHECK-NEXT:    ret i1 [[OR]]
807;
808  %t0 = shl i32 1, %c1
809  %t1 = shl i32 1, %c2
810  %t2 = and i32 %t0, %k
811  %t3 = icmp eq i32 %t2, 0
812  %t4 = and i32 %t1, %k
813  store i32 %t4, ptr %p  ; extra use of and
814  %t5 = icmp eq i32 %t4, 0
815  %or = or i1 %t3, %t5
816  ret i1 %or
817}
818
819define i1 @foo1_and_extra_use_and2_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
820; CHECK-LABEL: @foo1_and_extra_use_and2_logical(
821; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
822; CHECK-NEXT:    [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]]
823; CHECK-NEXT:    [[TMP1:%.*]] = freeze i32 [[T1]]
824; CHECK-NEXT:    [[T4:%.*]] = and i32 [[TMP1]], [[K:%.*]]
825; CHECK-NEXT:    store i32 [[T4]], ptr [[P:%.*]], align 4
826; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
827; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[K]], [[TMP2]]
828; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
829; CHECK-NEXT:    ret i1 [[OR]]
830;
831  %t0 = shl i32 1, %c1
832  %t1 = shl i32 1, %c2
833  %t2 = and i32 %t0, %k
834  %t3 = icmp eq i32 %t2, 0
835  %t4 = and i32 %t1, %k
836  store i32 %t4, ptr %p  ; extra use of and
837  %t5 = icmp eq i32 %t4, 0
838  %or = select i1 %t3, i1 true, i1 %t5
839  ret i1 %or
840}
841
842; Should not fold
843define i1 @foo1_and_extra_use_cmp2(i32 %k, i32 %c1, i32 %c2, ptr %p) {
844; CHECK-LABEL: @foo1_and_extra_use_cmp2(
845; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
846; CHECK-NEXT:    [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]]
847; CHECK-NEXT:    [[T4:%.*]] = and i32 [[T1]], [[K:%.*]]
848; CHECK-NEXT:    [[T5:%.*]] = icmp eq i32 [[T4]], 0
849; CHECK-NEXT:    store i1 [[T5]], ptr [[P:%.*]], align 1
850; CHECK-NEXT:    [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
851; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[K]], [[TMP1]]
852; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
853; CHECK-NEXT:    ret i1 [[OR]]
854;
855  %t0 = shl i32 1, %c1
856  %t1 = shl i32 1, %c2
857  %t2 = and i32 %t0, %k
858  %t3 = icmp eq i32 %t2, 0
859  %t4 = and i32 %t1, %k
860  %t5 = icmp eq i32 %t4, 0
861  store i1 %t5, ptr %p  ; extra use of cmp
862  %or = or i1 %t3, %t5
863  ret i1 %or
864}
865
866define i1 @foo1_and_extra_use_cmp2_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
867; CHECK-LABEL: @foo1_and_extra_use_cmp2_logical(
868; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
869; CHECK-NEXT:    [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]]
870; CHECK-NEXT:    [[TMP1:%.*]] = freeze i32 [[T1]]
871; CHECK-NEXT:    [[T4:%.*]] = and i32 [[TMP1]], [[K:%.*]]
872; CHECK-NEXT:    [[T5:%.*]] = icmp eq i32 [[T4]], 0
873; CHECK-NEXT:    store i1 [[T5]], ptr [[P:%.*]], align 1
874; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
875; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[K]], [[TMP2]]
876; CHECK-NEXT:    [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
877; CHECK-NEXT:    ret i1 [[OR]]
878;
879  %t0 = shl i32 1, %c1
880  %t1 = shl i32 1, %c2
881  %t2 = and i32 %t0, %k
882  %t3 = icmp eq i32 %t2, 0
883  %t4 = and i32 %t1, %k
884  %t5 = icmp eq i32 %t4, 0
885  store i1 %t5, ptr %p  ; extra use of cmp
886  %or = select i1 %t3, i1 true, i1 %t5
887  ret i1 %or
888}
889
890; Shift-of-signbit replaced with 'icmp s*'
891; Expect to fold
892define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl1(i32 %k, i32 %c1, i32 %c2, ptr %p) {
893; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl1(
894; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
895; CHECK-NEXT:    store i32 [[T0]], ptr [[P:%.*]], align 4
896; CHECK-NEXT:    [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
897; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
898; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
899; CHECK-NEXT:    [[T4:%.*]] = icmp sgt i32 [[T3]], -1
900; CHECK-NEXT:    [[OR:%.*]] = or i1 [[T2]], [[T4]]
901; CHECK-NEXT:    ret i1 [[OR]]
902;
903  %t0 = shl i32 1, %c1
904  store i32 %t0, ptr %p  ; extra use of shl
905  %t1 = and i32 %t0, %k
906  %t2 = icmp eq i32 %t1, 0
907  %t3 = shl i32 %k, %c2
908  %t4 = icmp sgt i32 %t3, -1
909  %or = or i1 %t2, %t4
910  ret i1 %or
911}
912
913define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl1_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
914; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl1_logical(
915; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
916; CHECK-NEXT:    store i32 [[T0]], ptr [[P:%.*]], align 4
917; CHECK-NEXT:    [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
918; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
919; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
920; CHECK-NEXT:    [[T4:%.*]] = icmp sgt i32 [[T3]], -1
921; CHECK-NEXT:    [[OR:%.*]] = select i1 [[T2]], i1 true, i1 [[T4]]
922; CHECK-NEXT:    ret i1 [[OR]]
923;
924  %t0 = shl i32 1, %c1
925  store i32 %t0, ptr %p  ; extra use of shl
926  %t1 = and i32 %t0, %k
927  %t2 = icmp eq i32 %t1, 0
928  %t3 = shl i32 %k, %c2
929  %t4 = icmp sgt i32 %t3, -1
930  %or = select i1 %t2, i1 true, i1 %t4
931  ret i1 %or
932}
933
934; Not fold
935define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_and(i32 %k, i32 %c1, i32 %c2, ptr %p) {
936; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_and(
937; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
938; CHECK-NEXT:    [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
939; CHECK-NEXT:    store i32 [[T1]], ptr [[P:%.*]], align 4
940; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
941; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
942; CHECK-NEXT:    [[T4:%.*]] = icmp sgt i32 [[T3]], -1
943; CHECK-NEXT:    [[OR:%.*]] = or i1 [[T2]], [[T4]]
944; CHECK-NEXT:    ret i1 [[OR]]
945;
946  %t0 = shl i32 1, %c1
947  %t1 = and i32 %t0, %k
948  store i32 %t1, ptr %p  ; extra use of and
949  %t2 = icmp eq i32 %t1, 0
950  %t3 = shl i32 %k, %c2
951  %t4 = icmp sgt i32 %t3, -1
952  %or = or i1 %t2, %t4
953  ret i1 %or
954}
955
956define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_and_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
957; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_and_logical(
958; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
959; CHECK-NEXT:    [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
960; CHECK-NEXT:    store i32 [[T1]], ptr [[P:%.*]], align 4
961; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
962; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
963; CHECK-NEXT:    [[T4:%.*]] = icmp sgt i32 [[T3]], -1
964; CHECK-NEXT:    [[OR:%.*]] = select i1 [[T2]], i1 true, i1 [[T4]]
965; CHECK-NEXT:    ret i1 [[OR]]
966;
967  %t0 = shl i32 1, %c1
968  %t1 = and i32 %t0, %k
969  store i32 %t1, ptr %p  ; extra use of and
970  %t2 = icmp eq i32 %t1, 0
971  %t3 = shl i32 %k, %c2
972  %t4 = icmp sgt i32 %t3, -1
973  %or = select i1 %t2, i1 true, i1 %t4
974  ret i1 %or
975}
976
977; Not fold
978define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp1(i32 %k, i32 %c1, i32 %c2, ptr %p) {
979; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp1(
980; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
981; CHECK-NEXT:    [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
982; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
983; CHECK-NEXT:    store i1 [[T2]], ptr [[P:%.*]], align 1
984; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
985; CHECK-NEXT:    [[T4:%.*]] = icmp sgt i32 [[T3]], -1
986; CHECK-NEXT:    [[OR:%.*]] = or i1 [[T2]], [[T4]]
987; CHECK-NEXT:    ret i1 [[OR]]
988;
989  %t0 = shl i32 1, %c1
990  %t1 = and i32 %t0, %k
991  %t2 = icmp eq i32 %t1, 0
992  store i1 %t2, ptr %p  ; extra use of cmp
993  %t3 = shl i32 %k, %c2
994  %t4 = icmp sgt i32 %t3, -1
995  %or = or i1 %t2, %t4
996  ret i1 %or
997}
998
999define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp1_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
1000; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp1_logical(
1001; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
1002; CHECK-NEXT:    [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
1003; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
1004; CHECK-NEXT:    store i1 [[T2]], ptr [[P:%.*]], align 1
1005; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
1006; CHECK-NEXT:    [[T4:%.*]] = icmp sgt i32 [[T3]], -1
1007; CHECK-NEXT:    [[OR:%.*]] = select i1 [[T2]], i1 true, i1 [[T4]]
1008; CHECK-NEXT:    ret i1 [[OR]]
1009;
1010  %t0 = shl i32 1, %c1
1011  %t1 = and i32 %t0, %k
1012  %t2 = icmp eq i32 %t1, 0
1013  store i1 %t2, ptr %p  ; extra use of cmp
1014  %t3 = shl i32 %k, %c2
1015  %t4 = icmp sgt i32 %t3, -1
1016  %or = select i1 %t2, i1 true, i1 %t4
1017  ret i1 %or
1018}
1019
1020; Not fold
1021define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl2(i32 %k, i32 %c1, i32 %c2, ptr %p) {
1022; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl2(
1023; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
1024; CHECK-NEXT:    [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
1025; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
1026; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
1027; CHECK-NEXT:    store i32 [[T3]], ptr [[P:%.*]], align 4
1028; CHECK-NEXT:    [[T4:%.*]] = icmp sgt i32 [[T3]], -1
1029; CHECK-NEXT:    [[OR:%.*]] = or i1 [[T2]], [[T4]]
1030; CHECK-NEXT:    ret i1 [[OR]]
1031;
1032  %t0 = shl i32 1, %c1
1033  %t1 = and i32 %t0, %k
1034  %t2 = icmp eq i32 %t1, 0
1035  %t3 = shl i32 %k, %c2
1036  store i32 %t3, ptr %p  ; extra use of shl
1037  %t4 = icmp sgt i32 %t3, -1
1038  %or = or i1 %t2, %t4
1039  ret i1 %or
1040}
1041
1042define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl2_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
1043; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl2_logical(
1044; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
1045; CHECK-NEXT:    [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
1046; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
1047; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
1048; CHECK-NEXT:    store i32 [[T3]], ptr [[P:%.*]], align 4
1049; CHECK-NEXT:    [[T4:%.*]] = icmp sgt i32 [[T3]], -1
1050; CHECK-NEXT:    [[OR:%.*]] = select i1 [[T2]], i1 true, i1 [[T4]]
1051; CHECK-NEXT:    ret i1 [[OR]]
1052;
1053  %t0 = shl i32 1, %c1
1054  %t1 = and i32 %t0, %k
1055  %t2 = icmp eq i32 %t1, 0
1056  %t3 = shl i32 %k, %c2
1057  store i32 %t3, ptr %p  ; extra use of shl
1058  %t4 = icmp sgt i32 %t3, -1
1059  %or = select i1 %t2, i1 true, i1 %t4
1060  ret i1 %or
1061}
1062
1063; Not fold
1064define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp2(i32 %k, i32 %c1, i32 %c2, ptr %p) {
1065; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp2(
1066; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
1067; CHECK-NEXT:    [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
1068; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
1069; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
1070; CHECK-NEXT:    [[T4:%.*]] = icmp sgt i32 [[T3]], -1
1071; CHECK-NEXT:    store i1 [[T4]], ptr [[P:%.*]], align 1
1072; CHECK-NEXT:    [[OR:%.*]] = or i1 [[T2]], [[T4]]
1073; CHECK-NEXT:    ret i1 [[OR]]
1074;
1075  %t0 = shl i32 1, %c1
1076  %t1 = and i32 %t0, %k
1077  %t2 = icmp eq i32 %t1, 0
1078  %t3 = shl i32 %k, %c2
1079  %t4 = icmp sgt i32 %t3, -1
1080  store i1 %t4, ptr %p  ; extra use of cmp
1081  %or = or i1 %t2, %t4
1082  ret i1 %or
1083}
1084
1085define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp2_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
1086; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp2_logical(
1087; CHECK-NEXT:    [[T0:%.*]] = shl nuw i32 1, [[C1:%.*]]
1088; CHECK-NEXT:    [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
1089; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
1090; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
1091; CHECK-NEXT:    [[T4:%.*]] = icmp sgt i32 [[T3]], -1
1092; CHECK-NEXT:    store i1 [[T4]], ptr [[P:%.*]], align 1
1093; CHECK-NEXT:    [[OR:%.*]] = select i1 [[T2]], i1 true, i1 [[T4]]
1094; CHECK-NEXT:    ret i1 [[OR]]
1095;
1096  %t0 = shl i32 1, %c1
1097  %t1 = and i32 %t0, %k
1098  %t2 = icmp eq i32 %t1, 0
1099  %t3 = shl i32 %k, %c2
1100  %t4 = icmp sgt i32 %t3, -1
1101  store i1 %t4, ptr %p  ; extra use of cmp
1102  %or = select i1 %t2, i1 true, i1 %t4
1103  ret i1 %or
1104}
1105
1106; Negative tests
1107
1108; This test checks that we are not creating additional shift instruction when fold fails.
1109define i1 @foo1_and_signbit_lshr_without_shifting_signbit_not_pwr2(i32 %k, i32 %c1, i32 %c2) {
1110; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_not_pwr2(
1111; CHECK-NEXT:    [[T0:%.*]] = shl i32 3, [[C1:%.*]]
1112; CHECK-NEXT:    [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
1113; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
1114; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
1115; CHECK-NEXT:    [[T4:%.*]] = icmp sgt i32 [[T3]], -1
1116; CHECK-NEXT:    [[OR:%.*]] = or i1 [[T2]], [[T4]]
1117; CHECK-NEXT:    ret i1 [[OR]]
1118;
1119  %t0 = shl i32 3, %c1
1120  %t1 = and i32 %t0, %k
1121  %t2 = icmp eq i32 %t1, 0
1122  %t3 = shl i32 %k, %c2
1123  %t4 = icmp sgt i32 %t3, -1
1124  %or = or i1 %t2, %t4
1125  ret i1 %or
1126}
1127
1128define i1 @foo1_and_signbit_lshr_without_shifting_signbit_not_pwr2_logical(i32 %k, i32 %c1, i32 %c2) {
1129; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_not_pwr2_logical(
1130; CHECK-NEXT:    [[T0:%.*]] = shl i32 3, [[C1:%.*]]
1131; CHECK-NEXT:    [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
1132; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
1133; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
1134; CHECK-NEXT:    [[T4:%.*]] = icmp sgt i32 [[T3]], -1
1135; CHECK-NEXT:    [[OR:%.*]] = select i1 [[T2]], i1 true, i1 [[T4]]
1136; CHECK-NEXT:    ret i1 [[OR]]
1137;
1138  %t0 = shl i32 3, %c1
1139  %t1 = and i32 %t0, %k
1140  %t2 = icmp eq i32 %t1, 0
1141  %t3 = shl i32 %k, %c2
1142  %t4 = icmp sgt i32 %t3, -1
1143  %or = select i1 %t2, i1 true, i1 %t4
1144  ret i1 %or
1145}
1146
1147define i1 @two_types_of_bittest(i8 %x, i8 %c) {
1148; CHECK-LABEL: @two_types_of_bittest(
1149; CHECK-NEXT:    [[T0:%.*]] = shl nuw i8 1, [[C:%.*]]
1150; CHECK-NEXT:    [[TMP1:%.*]] = or i8 [[T0]], -128
1151; CHECK-NEXT:    [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
1152; CHECK-NEXT:    [[RET:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]]
1153; CHECK-NEXT:    ret i1 [[RET]]
1154;
1155  %t0 = shl i8 1, %c
1156  %icmp1 = icmp slt i8 %x, 0
1157  %and = and i8 %x, %t0
1158  %icmp2 = icmp ne i8 %and, 0
1159  %ret = and i1 %icmp1, %icmp2
1160  ret i1 %ret
1161}
1162