xref: /llvm-project/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3
4; Should fold
5;   (%x + %y) u>= %x
6; or
7;   (%x + %y) u>= %y
8; to
9;   @llvm.uadd.with.overflow(%x, %y) + extractvalue + not
10
11define i1 @t0_basic(i8 %x, i8 %y) {
12; CHECK-LABEL: @t0_basic(
13; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
14; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[X:%.*]], [[TMP1]]
15; CHECK-NEXT:    ret i1 [[R]]
16;
17  %t0 = add i8 %x, %y
18  %r = icmp uge i8 %t0, %y
19  ret i1 %r
20}
21
22define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) {
23; CHECK-LABEL: @t1_vec(
24; CHECK-NEXT:    [[TMP1:%.*]] = xor <2 x i8> [[Y:%.*]], splat (i8 -1)
25; CHECK-NEXT:    [[R:%.*]] = icmp ule <2 x i8> [[X:%.*]], [[TMP1]]
26; CHECK-NEXT:    ret <2 x i1> [[R]]
27;
28  %t0 = add <2 x i8> %x, %y
29  %r = icmp uge <2 x i8> %t0, %y
30  ret <2 x i1> %r
31}
32
33; Commutativity
34
35define i1 @t2_symmetry(i8 %x, i8 %y) {
36; CHECK-LABEL: @t2_symmetry(
37; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X:%.*]], -1
38; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[Y:%.*]], [[TMP1]]
39; CHECK-NEXT:    ret i1 [[R]]
40;
41  %t0 = add i8 %x, %y
42  %r = icmp uge i8 %t0, %x ; can check against either of `add` arguments
43  ret i1 %r
44}
45
46declare i8 @gen8()
47
48define i1 @t3_commutative(i8 %x) {
49; CHECK-LABEL: @t3_commutative(
50; CHECK-NEXT:    [[Y:%.*]] = call i8 @gen8()
51; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y]], -1
52; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[X:%.*]], [[TMP1]]
53; CHECK-NEXT:    ret i1 [[R]]
54;
55  %y = call i8 @gen8()
56  %t0 = add i8 %y, %x ; swapped
57  %r = icmp uge i8 %t0, %y
58  ret i1 %r
59}
60
61define i1 @t4_commutative(i8 %x, i8 %y) {
62; CHECK-LABEL: @t4_commutative(
63; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
64; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[X:%.*]], [[TMP1]]
65; CHECK-NEXT:    ret i1 [[R]]
66;
67  %t0 = add i8 %x, %y
68  %r = icmp ule i8 %y, %t0 ; swapped
69  ret i1 %r
70}
71
72define i1 @t5_commutative(i8 %x) {
73; CHECK-LABEL: @t5_commutative(
74; CHECK-NEXT:    [[Y:%.*]] = call i8 @gen8()
75; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y]], -1
76; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[X:%.*]], [[TMP1]]
77; CHECK-NEXT:    ret i1 [[R]]
78;
79  %y = call i8 @gen8()
80  %t0 = add i8 %y, %x ; swapped
81  %r = icmp ule i8 %y, %t0 ; swapped
82  ret i1 %r
83}
84
85; Extra-use tests
86
87declare void @use8(i8)
88
89define i1 @t6_extrause(i8 %x, i8 %y) {
90; CHECK-LABEL: @t6_extrause(
91; CHECK-NEXT:    [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]]
92; CHECK-NEXT:    call void @use8(i8 [[T0]])
93; CHECK-NEXT:    [[R:%.*]] = icmp uge i8 [[T0]], [[Y]]
94; CHECK-NEXT:    ret i1 [[R]]
95;
96  %t0 = add i8 %x, %y
97  call void @use8(i8 %t0)
98  %r = icmp uge i8 %t0, %y
99  ret i1 %r
100}
101
102; Negative tests
103
104define i1 @n7_different_y(i8 %x, i8 %y0, i8 %y1) {
105; CHECK-LABEL: @n7_different_y(
106; CHECK-NEXT:    [[T0:%.*]] = add i8 [[X:%.*]], [[Y0:%.*]]
107; CHECK-NEXT:    [[R:%.*]] = icmp uge i8 [[T0]], [[Y1:%.*]]
108; CHECK-NEXT:    ret i1 [[R]]
109;
110  %t0 = add i8 %x, %y0
111  %r = icmp uge i8 %t0, %y1
112  ret i1 %r
113}
114
115define i1 @n8_wrong_pred0(i8 %x, i8 %y) {
116; CHECK-LABEL: @n8_wrong_pred0(
117; CHECK-NEXT:    [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]]
118; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[T0]], [[Y]]
119; CHECK-NEXT:    ret i1 [[R]]
120;
121  %t0 = add i8 %x, %y
122  %r = icmp ule i8 %t0, %y
123  ret i1 %r
124}
125
126define i1 @n9_wrong_pred1(i8 %x, i8 %y) {
127; CHECK-LABEL: @n9_wrong_pred1(
128; CHECK-NEXT:    [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]]
129; CHECK-NEXT:    [[R:%.*]] = icmp ugt i8 [[T0]], [[Y]]
130; CHECK-NEXT:    ret i1 [[R]]
131;
132  %t0 = add i8 %x, %y
133  %r = icmp ugt i8 %t0, %y
134  ret i1 %r
135}
136
137define i1 @n10_wrong_pred2(i8 %x, i8 %y) {
138; CHECK-LABEL: @n10_wrong_pred2(
139; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[X:%.*]], 0
140; CHECK-NEXT:    ret i1 [[R]]
141;
142  %t0 = add i8 %x, %y
143  %r = icmp eq i8 %t0, %y
144  ret i1 %r
145}
146
147define i1 @n11_wrong_pred3(i8 %x, i8 %y) {
148; CHECK-LABEL: @n11_wrong_pred3(
149; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[X:%.*]], 0
150; CHECK-NEXT:    ret i1 [[R]]
151;
152  %t0 = add i8 %x, %y
153  %r = icmp ne i8 %t0, %y
154  ret i1 %r
155}
156
157define i1 @n12_wrong_pred4(i8 %x, i8 %y) {
158; CHECK-LABEL: @n12_wrong_pred4(
159; CHECK-NEXT:    [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]]
160; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[T0]], [[Y]]
161; CHECK-NEXT:    ret i1 [[R]]
162;
163  %t0 = add i8 %x, %y
164  %r = icmp slt i8 %t0, %y
165  ret i1 %r
166}
167
168define i1 @n13_wrong_pred5(i8 %x, i8 %y) {
169; CHECK-LABEL: @n13_wrong_pred5(
170; CHECK-NEXT:    [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]]
171; CHECK-NEXT:    [[R:%.*]] = icmp sle i8 [[T0]], [[Y]]
172; CHECK-NEXT:    ret i1 [[R]]
173;
174  %t0 = add i8 %x, %y
175  %r = icmp sle i8 %t0, %y
176  ret i1 %r
177}
178
179define i1 @n14_wrong_pred6(i8 %x, i8 %y) {
180; CHECK-LABEL: @n14_wrong_pred6(
181; CHECK-NEXT:    [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]]
182; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[T0]], [[Y]]
183; CHECK-NEXT:    ret i1 [[R]]
184;
185  %t0 = add i8 %x, %y
186  %r = icmp sgt i8 %t0, %y
187  ret i1 %r
188}
189
190define i1 @n15_wrong_pred7(i8 %x, i8 %y) {
191; CHECK-LABEL: @n15_wrong_pred7(
192; CHECK-NEXT:    [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]]
193; CHECK-NEXT:    [[R:%.*]] = icmp sge i8 [[T0]], [[Y]]
194; CHECK-NEXT:    ret i1 [[R]]
195;
196  %t0 = add i8 %x, %y
197  %r = icmp sge i8 %t0, %y
198  ret i1 %r
199}
200
201define i1 @low_bitmask_ult(i8 %x) {
202; CHECK-LABEL: @low_bitmask_ult(
203; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[X:%.*]], 0
204; CHECK-NEXT:    ret i1 [[R]]
205;
206  %a = add i8 %x, 31
207  %m = and i8 %a, 31
208  %r = icmp ult i8 %m, %x
209  ret i1 %r
210}
211
212define <2 x i1> @low_bitmask_uge(<2 x i8> %x) {
213; CHECK-LABEL: @low_bitmask_uge(
214; CHECK-NEXT:    [[R:%.*]] = icmp eq <2 x i8> [[X:%.*]], zeroinitializer
215; CHECK-NEXT:    ret <2 x i1> [[R]]
216;
217  %a = add <2 x i8> %x, <i8 15, i8 poison>
218  %m = and <2 x i8> %a, <i8 15, i8 15>
219  %r = icmp uge <2 x i8> %m, %x
220  ret <2 x i1> %r
221}
222
223define i1 @low_bitmask_ugt(i8 %px) {
224; CHECK-LABEL: @low_bitmask_ugt(
225; CHECK-NEXT:    [[X:%.*]] = mul i8 [[PX:%.*]], [[PX]]
226; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[X]], 0
227; CHECK-NEXT:    ret i1 [[R]]
228;
229  %x = mul i8 %px, %px
230  %a = add i8 %x, 127
231  %m = and i8 %a, 127
232  %r = icmp ugt i8 %x, %m
233  ret i1 %r
234}
235
236define <2 x i1> @low_bitmask_ule(<2 x i8> %px) {
237; CHECK-LABEL: @low_bitmask_ule(
238; CHECK-NEXT:    [[X:%.*]] = mul <2 x i8> [[PX:%.*]], [[PX]]
239; CHECK-NEXT:    [[R:%.*]] = icmp eq <2 x i8> [[X]], zeroinitializer
240; CHECK-NEXT:    ret <2 x i1> [[R]]
241;
242  %x = mul <2 x i8> %px, %px
243  %a = add <2 x i8> %x, <i8 3, i8 3>
244  %m = and <2 x i8> %a, <i8 3, i8 3>
245  %r = icmp ule <2 x i8> %x, %m
246  ret <2 x i1> %r
247}
248
249define i1 @low_bitmask_ult_use(i8 %x) {
250; CHECK-LABEL: @low_bitmask_ult_use(
251; CHECK-NEXT:    [[A:%.*]] = add i8 [[X:%.*]], 7
252; CHECK-NEXT:    [[M:%.*]] = and i8 [[A]], 7
253; CHECK-NEXT:    call void @use8(i8 [[M]])
254; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[X]], 0
255; CHECK-NEXT:    ret i1 [[R]]
256;
257  %a = add i8 %x, 7
258  %m = and i8 %a, 7
259  call void @use8(i8 %m)
260  %r = icmp ult i8 %m, %x
261  ret i1 %r
262}
263
264define i1 @low_bitmask_ugt_use(i8 %px) {
265; CHECK-LABEL: @low_bitmask_ugt_use(
266; CHECK-NEXT:    [[X:%.*]] = mul i8 [[PX:%.*]], [[PX]]
267; CHECK-NEXT:    [[A:%.*]] = add i8 [[X]], 3
268; CHECK-NEXT:    call void @use8(i8 [[A]])
269; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[X]], 0
270; CHECK-NEXT:    ret i1 [[R]]
271;
272  %x = mul i8 %px, %px
273  %a = add i8 %x, 3
274  call void @use8(i8 %a)
275  %m = and i8 %a, 3
276  %r = icmp ugt i8 %x, %m
277  ret i1 %r
278}
279
280; negative test - need same low bitmask
281
282define i1 @low_bitmask_ult_wrong_mask1(i8 %x) {
283; CHECK-LABEL: @low_bitmask_ult_wrong_mask1(
284; CHECK-NEXT:    [[A:%.*]] = add i8 [[X:%.*]], 30
285; CHECK-NEXT:    [[M:%.*]] = and i8 [[A]], 31
286; CHECK-NEXT:    [[R:%.*]] = icmp ult i8 [[M]], [[X]]
287; CHECK-NEXT:    ret i1 [[R]]
288;
289  %a = add i8 %x, 30
290  %m = and i8 %a, 31
291  %r = icmp ult i8 %m, %x
292  ret i1 %r
293}
294
295; negative test - need same low bitmask
296
297define i1 @low_bitmask_uge_wrong_mask2(i8 %x) {
298; CHECK-LABEL: @low_bitmask_uge_wrong_mask2(
299; CHECK-NEXT:    [[A:%.*]] = add i8 [[X:%.*]], 31
300; CHECK-NEXT:    [[M:%.*]] = and i8 [[A]], 63
301; CHECK-NEXT:    [[R:%.*]] = icmp uge i8 [[M]], [[X]]
302; CHECK-NEXT:    ret i1 [[R]]
303;
304  %a = add i8 %x, 31
305  %m = and i8 %a, 63
306  %r = icmp uge i8 %m, %x
307  ret i1 %r
308}
309
310; negative test - predicate mandates operand order
311
312define i1 @low_bitmask_ugt_swapped(i8 %x) {
313; CHECK-LABEL: @low_bitmask_ugt_swapped(
314; CHECK-NEXT:    [[A:%.*]] = add i8 [[X:%.*]], 127
315; CHECK-NEXT:    [[M:%.*]] = and i8 [[A]], 127
316; CHECK-NEXT:    [[R:%.*]] = icmp ugt i8 [[M]], [[X]]
317; CHECK-NEXT:    ret i1 [[R]]
318;
319  %a = add i8 %x, 127
320  %m = and i8 %a, 127
321  %r = icmp ugt i8 %m, %x
322  ret i1 %r
323}
324
325; negative test - unsigned preds only
326
327define i1 @low_bitmask_sgt(i8 %px) {
328; CHECK-LABEL: @low_bitmask_sgt(
329; CHECK-NEXT:    [[X:%.*]] = mul i8 [[PX:%.*]], [[PX]]
330; CHECK-NEXT:    [[A:%.*]] = add i8 [[X]], 127
331; CHECK-NEXT:    [[M:%.*]] = and i8 [[A]], 127
332; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[X]], [[M]]
333; CHECK-NEXT:    ret i1 [[R]]
334;
335  %x = mul i8 %px, %px
336  %a = add i8 %x, 127
337  %m = and i8 %a, 127
338  %r = icmp sgt i8 %x, %m
339  ret i1 %r
340}
341
342; negative test - specific operand must match
343
344define i1 @low_bitmask_ult_specific_op(i8 %x, i8 %y) {
345; CHECK-LABEL: @low_bitmask_ult_specific_op(
346; CHECK-NEXT:    [[A:%.*]] = add i8 [[X:%.*]], 31
347; CHECK-NEXT:    [[M:%.*]] = and i8 [[A]], 31
348; CHECK-NEXT:    [[R:%.*]] = icmp ult i8 [[M]], [[Y:%.*]]
349; CHECK-NEXT:    ret i1 [[R]]
350;
351  %a = add i8 %x, 31
352  %m = and i8 %a, 31
353  %r = icmp ult i8 %m, %y
354  ret i1 %r
355}
356