xref: /llvm-project/llvm/test/Transforms/InstCombine/icmp-binop.ll (revision c1ad064dd3b9a1bf503bb8cd6d0d0418a05d10e8)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3
4declare void @use64(i64)
5declare void @llvm.assume(i1)
6
7define i1 @mul_unkV_oddC_eq(i32 %v) {
8; CHECK-LABEL: @mul_unkV_oddC_eq(
9; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[V:%.*]], 0
10; CHECK-NEXT:    ret i1 [[CMP]]
11;
12  %mul = mul i32 %v, 3
13  %cmp = icmp eq i32 %mul, 0
14  ret i1 %cmp
15}
16
17define i1 @mul_unkV_oddC_eq_nonzero(i32 %v) {
18; CHECK-LABEL: @mul_unkV_oddC_eq_nonzero(
19; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[V:%.*]], 3
20; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[MUL]], 4
21; CHECK-NEXT:    ret i1 [[CMP]]
22;
23  %mul = mul i32 %v, 3
24  %cmp = icmp eq i32 %mul, 4
25  ret i1 %cmp
26}
27
28define <2 x i1> @mul_unkV_oddC_ne_vec(<2 x i64> %v) {
29; CHECK-LABEL: @mul_unkV_oddC_ne_vec(
30; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i64> [[V:%.*]], zeroinitializer
31; CHECK-NEXT:    ret <2 x i1> [[CMP]]
32;
33  %mul = mul <2 x i64> %v, <i64 3, i64 3>
34  %cmp = icmp ne <2 x i64> %mul, <i64 0, i64 0>
35  ret <2 x i1> %cmp
36}
37
38define i1 @mul_assumeoddV_asumeoddV_eq(i16 %v, i16 %v2) {
39; CHECK-LABEL: @mul_assumeoddV_asumeoddV_eq(
40; CHECK-NEXT:    [[LB:%.*]] = and i16 [[V:%.*]], 1
41; CHECK-NEXT:    [[ODD:%.*]] = icmp ne i16 [[LB]], 0
42; CHECK-NEXT:    call void @llvm.assume(i1 [[ODD]])
43; CHECK-NEXT:    [[LB2:%.*]] = and i16 [[V2:%.*]], 1
44; CHECK-NEXT:    [[ODD2:%.*]] = icmp ne i16 [[LB2]], 0
45; CHECK-NEXT:    call void @llvm.assume(i1 [[ODD2]])
46; CHECK-NEXT:    ret i1 true
47;
48  %lb = and i16 %v, 1
49  %odd = icmp ne i16 %lb, 0
50  call void @llvm.assume(i1 %odd)
51  %lb2 = and i16 %v2, 1
52  %odd2 = icmp ne i16 %lb2, 0
53  call void @llvm.assume(i1 %odd2)
54  %mul = mul i16 %v, %v2
55  %cmp = icmp ne i16 %mul, 0
56  ret i1 %cmp
57}
58
59define i1 @mul_unkV_oddC_sge(i8 %v) {
60; CHECK-LABEL: @mul_unkV_oddC_sge(
61; CHECK-NEXT:    [[MUL:%.*]] = mul i8 [[V:%.*]], 3
62; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i8 [[MUL]], -1
63; CHECK-NEXT:    ret i1 [[CMP]]
64;
65  %mul = mul i8 %v, 3
66  %cmp = icmp sge i8 %mul, 0
67  ret i1 %cmp
68}
69
70define i1 @mul_reused_unkV_oddC_ne(i64 %v) {
71; CHECK-LABEL: @mul_reused_unkV_oddC_ne(
72; CHECK-NEXT:    [[MUL:%.*]] = mul i64 [[V:%.*]], 3
73; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i64 [[V]], 0
74; CHECK-NEXT:    call void @use64(i64 [[MUL]])
75; CHECK-NEXT:    ret i1 [[CMP]]
76;
77  %mul = mul i64 %v, 3
78  %cmp = icmp ne i64 %mul, 0
79  call void @use64(i64 %mul)
80  ret i1 %cmp
81}
82
83define i1 @mul_assumeoddV_unkV_eq(i16 %v, i16 %v2) {
84; CHECK-LABEL: @mul_assumeoddV_unkV_eq(
85; CHECK-NEXT:    [[LB:%.*]] = and i16 [[V2:%.*]], 1
86; CHECK-NEXT:    [[ODD:%.*]] = icmp ne i16 [[LB]], 0
87; CHECK-NEXT:    call void @llvm.assume(i1 [[ODD]])
88; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i16 [[V:%.*]], 0
89; CHECK-NEXT:    ret i1 [[CMP]]
90;
91  %lb = and i16 %v2, 1
92  %odd = icmp eq i16 %lb, 1
93  call void @llvm.assume(i1 %odd)
94  %mul = mul i16 %v, %v2
95  %cmp = icmp eq i16 %mul, 0
96  ret i1 %cmp
97}
98
99define i1 @mul_reusedassumeoddV_unkV_ne(i64 %v, i64 %v2) {
100; CHECK-LABEL: @mul_reusedassumeoddV_unkV_ne(
101; CHECK-NEXT:    [[LB:%.*]] = and i64 [[V:%.*]], 1
102; CHECK-NEXT:    [[ODD:%.*]] = icmp ne i64 [[LB]], 0
103; CHECK-NEXT:    call void @llvm.assume(i1 [[ODD]])
104; CHECK-NEXT:    [[MUL:%.*]] = mul i64 [[V]], [[V2:%.*]]
105; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i64 [[V2]], 0
106; CHECK-NEXT:    call void @use64(i64 [[MUL]])
107; CHECK-NEXT:    ret i1 [[CMP]]
108;
109  %lb = and i64 %v, 1
110  %odd = icmp ne i64 %lb, 0
111  call void @llvm.assume(i1 %odd)
112  %mul = mul i64 %v, %v2
113  %cmp = icmp ne i64 %mul, 0
114  call void @use64(i64 %mul)
115  ret i1 %cmp
116}
117
118define <2 x i1> @mul_setoddV_unkV_ne(<2 x i32> %v1, <2 x i32> %v2) {
119; CHECK-LABEL: @mul_setoddV_unkV_ne(
120; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i32> [[V2:%.*]], zeroinitializer
121; CHECK-NEXT:    ret <2 x i1> [[CMP]]
122;
123  %v = or <2 x i32> %v1, <i32 1, i32 1>
124  %mul = mul <2 x i32> %v, %v2
125  %cmp = icmp ne <2 x i32> %mul, <i32 0, i32 0>
126  ret <2 x i1> %cmp
127}
128
129define i1 @mul_broddV_unkV_eq(i16 %v, i16 %v2) {
130; CHECK-LABEL: @mul_broddV_unkV_eq(
131; CHECK-NEXT:    [[LB:%.*]] = and i16 [[V2:%.*]], 1
132; CHECK-NEXT:    [[ODD_NOT:%.*]] = icmp eq i16 [[LB]], 0
133; CHECK-NEXT:    br i1 [[ODD_NOT]], label [[FALSE:%.*]], label [[TRUE:%.*]]
134; CHECK:       true:
135; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i16 [[V:%.*]], 0
136; CHECK-NEXT:    ret i1 [[CMP]]
137; CHECK:       false:
138; CHECK-NEXT:    call void @use64(i16 [[V]])
139; CHECK-NEXT:    ret i1 false
140;
141  %lb = and i16 %v2, 1
142  %odd = icmp eq i16 %lb, 1
143  br i1 %odd, label %true, label %false
144true:
145  %mul = mul i16 %v, %v2
146  %cmp = icmp eq i16 %mul, 0
147  ret i1 %cmp
148false:
149  call void @use64(i16 %v)
150  ret i1 false
151}
152
153define i1 @mul_unkV_evenC_ne(i64 %v) {
154; CHECK-LABEL: @mul_unkV_evenC_ne(
155; CHECK-NEXT:    [[MUL_MASK:%.*]] = and i64 [[V:%.*]], 4611686018427387903
156; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i64 [[MUL_MASK]], 0
157; CHECK-NEXT:    ret i1 [[CMP]]
158;
159  %mul = mul i64 %v, 4
160  %cmp = icmp ne i64 %mul, 0
161  ret i1 %cmp
162}
163
164define i1 @mul_assumenzV_asumenzV_eq(i64 %v, i64 %v2) {
165; CHECK-LABEL: @mul_assumenzV_asumenzV_eq(
166; CHECK-NEXT:    [[NZ:%.*]] = icmp ne i64 [[V:%.*]], 0
167; CHECK-NEXT:    call void @llvm.assume(i1 [[NZ]])
168; CHECK-NEXT:    [[NZ2:%.*]] = icmp ne i64 [[V2:%.*]], 0
169; CHECK-NEXT:    call void @llvm.assume(i1 [[NZ2]])
170; CHECK-NEXT:    [[MUL:%.*]] = mul i64 [[V]], [[V2]]
171; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i64 [[MUL]], 0
172; CHECK-NEXT:    ret i1 [[CMP]]
173;
174  %nz = icmp ne i64 %v, 0
175  call void @llvm.assume(i1 %nz)
176  %nz2 = icmp ne i64 %v2, 0
177  call void @llvm.assume(i1 %nz2)
178  %mul = mul i64 %v, %v2
179  %cmp = icmp eq i64 %mul, 0
180  ret i1 %cmp
181}
182
183define i1 @mul_assumenzV_unkV_nsw_ne(i32 %v, i32 %v2) {
184; CHECK-LABEL: @mul_assumenzV_unkV_nsw_ne(
185; CHECK-NEXT:    [[NZ:%.*]] = icmp ne i32 [[V:%.*]], 0
186; CHECK-NEXT:    call void @llvm.assume(i1 [[NZ]])
187; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[V2:%.*]], 0
188; CHECK-NEXT:    ret i1 [[CMP]]
189;
190  %nz = icmp ne i32 %v, 0
191  call void @llvm.assume(i1 %nz)
192  %mul = mul nsw i32 %v, %v2
193  %cmp = icmp ne i32 %mul, 0
194  ret i1 %cmp
195}
196
197define i1 @mul_selectnzV_unkV_nsw_ne(i8 %v, i8 %v2) {
198; CHECK-LABEL: @mul_selectnzV_unkV_nsw_ne(
199; CHECK-NEXT:    [[NZ:%.*]] = icmp ne i8 [[V:%.*]], 0
200; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i8 [[V]], [[V2:%.*]]
201; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i8 [[MUL]], 0
202; CHECK-NEXT:    [[R:%.*]] = select i1 [[NZ]], i1 [[CMP]], i1 false
203; CHECK-NEXT:    ret i1 [[R]]
204;
205  %nz = icmp ne i8 %v, 0
206  %mul = mul nsw i8 %v, %v2
207  %cmp = icmp ne i8 %mul, 0
208  %r = select i1 %nz, i1 %cmp, i1 false
209  ret i1 %r
210}
211
212define <2 x i1> @mul_unkV_unkV_nsw_nuw_ne(<2 x i16> %v, <2 x i16> %v2) {
213; CHECK-LABEL: @mul_unkV_unkV_nsw_nuw_ne(
214; CHECK-NEXT:    [[MUL:%.*]] = mul nuw nsw <2 x i16> [[V:%.*]], [[V2:%.*]]
215; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i16> [[MUL]], zeroinitializer
216; CHECK-NEXT:    ret <2 x i1> [[CMP]]
217;
218  %mul = mul nuw nsw <2 x i16> %v, %v2
219  %cmp = icmp ne <2 x i16> %mul, <i16 0, i16 0>
220  ret <2 x i1> %cmp
221}
222
223define i1 @mul_setnzV_unkV_nuw_eq(i8 %v1, i8 %v2) {
224; CHECK-LABEL: @mul_setnzV_unkV_nuw_eq(
225; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 [[V2:%.*]], 0
226; CHECK-NEXT:    ret i1 [[CMP]]
227;
228  %v = or i8 %v1, 2
229  %mul = mul nuw i8 %v, %v2
230  %cmp = icmp eq i8 %mul, 0
231  ret i1 %cmp
232}
233
234define i1 @mul_brnzV_unkV_nuw_eq(i64 %v, i64 %v2) {
235; CHECK-LABEL: @mul_brnzV_unkV_nuw_eq(
236; CHECK-NEXT:    [[NZ_NOT:%.*]] = icmp eq i64 [[V2:%.*]], 0
237; CHECK-NEXT:    br i1 [[NZ_NOT]], label [[FALSE:%.*]], label [[TRUE:%.*]]
238; CHECK:       true:
239; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i64 [[V:%.*]], 0
240; CHECK-NEXT:    ret i1 [[CMP]]
241; CHECK:       false:
242; CHECK-NEXT:    call void @use64(i64 [[V]])
243; CHECK-NEXT:    ret i1 false
244;
245  %nz = icmp ne i64 %v2, 0
246  br i1 %nz, label %true, label %false
247true:
248  %mul = mul nuw i64 %v, %v2
249  %cmp = icmp eq i64 %mul, 0
250  ret i1 %cmp
251false:
252  call void @use64(i64 %v)
253  ret i1 false
254}
255
256define i1 @test_icmp_sgt_and_negpow2_zero(i32 %add) {
257; CHECK-LABEL: @test_icmp_sgt_and_negpow2_zero(
258; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[ADD:%.*]], 7
259; CHECK-NEXT:    ret i1 [[CMP]]
260;
261  %and = and i32 %add, -8
262  %cmp = icmp sgt i32 %and, 0
263  ret i1 %cmp
264}
265
266define i1 @test_icmp_slt_and_negpow2_one(i32 %add) {
267; CHECK-LABEL: @test_icmp_slt_and_negpow2_one(
268; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[ADD:%.*]], 8
269; CHECK-NEXT:    ret i1 [[CMP]]
270;
271  %and = and i32 %add, -8
272  %cmp = icmp slt i32 %and, 1
273  ret i1 %cmp
274}
275
276define i1 @test_icmp_sgt_and_negpow2_nonzero(i32 %add) {
277; CHECK-LABEL: @test_icmp_sgt_and_negpow2_nonzero(
278; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD:%.*]], -8
279; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[AND]], -2
280; CHECK-NEXT:    ret i1 [[CMP]]
281;
282  %and = and i32 %add, -8
283  %cmp = icmp sgt i32 %and, -2
284  ret i1 %cmp
285}
286
287define i1 @test_icmp_sgt_and_nonnegpow2_zero(i32 %add) {
288; CHECK-LABEL: @test_icmp_sgt_and_nonnegpow2_zero(
289; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD:%.*]], 8
290; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[AND]], 0
291; CHECK-NEXT:    ret i1 [[CMP]]
292;
293  %and = and i32 %add, 8
294  %cmp = icmp sgt i32 %and, 0
295  ret i1 %cmp
296}
297
298define i1 @test_icmp_ult_and_negpow2_one(i32 %add) {
299; CHECK-LABEL: @test_icmp_ult_and_negpow2_one(
300; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD:%.*]], 8
301; CHECK-NEXT:    ret i1 [[CMP]]
302;
303  %and = and i32 %add, -8
304  %cmp = icmp ult i32 %and, 1
305  ret i1 %cmp
306}
307
308define i1 @test_imply_dom_condition(i32 %add) {
309; CHECK-LABEL: @test_imply_dom_condition(
310; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[ADD:%.*]], 7
311; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
312; CHECK-NEXT:    ret i1 false
313;
314  %and = and i32 %add, -8
315  %cmp = icmp sgt i32 %and, 0
316  tail call void @llvm.assume(i1 %cmp)
317  %min.iters.check = icmp ult i32 %and, 8
318  ret i1 %min.iters.check
319}
320
321define i1 @test_icmp_slt_and_negpow2_c(i32 %add) {
322; CHECK-LABEL: @test_icmp_slt_and_negpow2_c(
323; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[ADD:%.*]], 32
324; CHECK-NEXT:    ret i1 [[CMP]]
325;
326  %and = and i32 %add, -32
327  %cmp = icmp slt i32 %and, 16
328  ret i1 %cmp
329}
330
331define i1 @test_icmp_slt_and_negpow2_invalid_c(i32 %add) {
332; CHECK-LABEL: @test_icmp_slt_and_negpow2_invalid_c(
333; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD:%.*]], -32
334; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[AND]], 48
335; CHECK-NEXT:    ret i1 [[CMP]]
336;
337  %and = and i32 %add, -32
338  %cmp = icmp slt i32 %and, 48
339  ret i1 %cmp
340}
341
342define i1 @test_icmp_sgt_and_negpow2_c(i32 %add) {
343; CHECK-LABEL: @test_icmp_sgt_and_negpow2_c(
344; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[ADD:%.*]], 31
345; CHECK-NEXT:    ret i1 [[CMP]]
346;
347  %and = and i32 %add, -32
348  %cmp = icmp sgt i32 %and, 16
349  ret i1 %cmp
350}
351
352define i1 @test_icmp_sgt_and_negpow2_invalid_c(i32 %add) {
353; CHECK-LABEL: @test_icmp_sgt_and_negpow2_invalid_c(
354; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD:%.*]], -32
355; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[AND]], 48
356; CHECK-NEXT:    ret i1 [[CMP]]
357;
358  %and = and i32 %add, -32
359  %cmp = icmp sgt i32 %and, 48
360  ret i1 %cmp
361}
362